1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/btf.h> 12 #include <linux/bpf_verifier.h> 13 #include <linux/filter.h> 14 #include <net/netlink.h> 15 #include <linux/file.h> 16 #include <linux/vmalloc.h> 17 #include <linux/stringify.h> 18 #include <linux/bsearch.h> 19 #include <linux/sort.h> 20 #include <linux/perf_event.h> 21 #include <linux/ctype.h> 22 23 #include "disasm.h" 24 25 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 26 #define BPF_PROG_TYPE(_id, _name) \ 27 [_id] = & _name ## _verifier_ops, 28 #define BPF_MAP_TYPE(_id, _ops) 29 #include <linux/bpf_types.h> 30 #undef BPF_PROG_TYPE 31 #undef BPF_MAP_TYPE 32 }; 33 34 /* bpf_check() is a static code analyzer that walks eBPF program 35 * instruction by instruction and updates register/stack state. 36 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 37 * 38 * The first pass is depth-first-search to check that the program is a DAG. 39 * It rejects the following programs: 40 * - larger than BPF_MAXINSNS insns 41 * - if loop is present (detected via back-edge) 42 * - unreachable insns exist (shouldn't be a forest. program = one function) 43 * - out of bounds or malformed jumps 44 * The second pass is all possible path descent from the 1st insn. 45 * Since it's analyzing all pathes through the program, the length of the 46 * analysis is limited to 64k insn, which may be hit even if total number of 47 * insn is less then 4K, but there are too many branches that change stack/regs. 48 * Number of 'branches to be analyzed' is limited to 1k 49 * 50 * On entry to each instruction, each register has a type, and the instruction 51 * changes the types of the registers depending on instruction semantics. 52 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 53 * copied to R1. 54 * 55 * All registers are 64-bit. 56 * R0 - return register 57 * R1-R5 argument passing registers 58 * R6-R9 callee saved registers 59 * R10 - frame pointer read-only 60 * 61 * At the start of BPF program the register R1 contains a pointer to bpf_context 62 * and has type PTR_TO_CTX. 63 * 64 * Verifier tracks arithmetic operations on pointers in case: 65 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 66 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 67 * 1st insn copies R10 (which has FRAME_PTR) type into R1 68 * and 2nd arithmetic instruction is pattern matched to recognize 69 * that it wants to construct a pointer to some element within stack. 70 * So after 2nd insn, the register R1 has type PTR_TO_STACK 71 * (and -20 constant is saved for further stack bounds checking). 72 * Meaning that this reg is a pointer to stack plus known immediate constant. 73 * 74 * Most of the time the registers have SCALAR_VALUE type, which 75 * means the register has some value, but it's not a valid pointer. 76 * (like pointer plus pointer becomes SCALAR_VALUE type) 77 * 78 * When verifier sees load or store instructions the type of base register 79 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 80 * four pointer types recognized by check_mem_access() function. 81 * 82 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 83 * and the range of [ptr, ptr + map's value_size) is accessible. 84 * 85 * registers used to pass values to function calls are checked against 86 * function argument constraints. 87 * 88 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 89 * It means that the register type passed to this function must be 90 * PTR_TO_STACK and it will be used inside the function as 91 * 'pointer to map element key' 92 * 93 * For example the argument constraints for bpf_map_lookup_elem(): 94 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 95 * .arg1_type = ARG_CONST_MAP_PTR, 96 * .arg2_type = ARG_PTR_TO_MAP_KEY, 97 * 98 * ret_type says that this function returns 'pointer to map elem value or null' 99 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 100 * 2nd argument should be a pointer to stack, which will be used inside 101 * the helper function as a pointer to map element key. 102 * 103 * On the kernel side the helper function looks like: 104 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 105 * { 106 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 107 * void *key = (void *) (unsigned long) r2; 108 * void *value; 109 * 110 * here kernel can access 'key' and 'map' pointers safely, knowing that 111 * [key, key + map->key_size) bytes are valid and were initialized on 112 * the stack of eBPF program. 113 * } 114 * 115 * Corresponding eBPF program may look like: 116 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 117 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 118 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 119 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 120 * here verifier looks at prototype of map_lookup_elem() and sees: 121 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 122 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 123 * 124 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 125 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 126 * and were initialized prior to this call. 127 * If it's ok, then verifier allows this BPF_CALL insn and looks at 128 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 129 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 130 * returns ether pointer to map value or NULL. 131 * 132 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 133 * insn, the register holding that pointer in the true branch changes state to 134 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 135 * branch. See check_cond_jmp_op(). 136 * 137 * After the call R0 is set to return type of the function and registers R1-R5 138 * are set to NOT_INIT to indicate that they are no longer readable. 139 * 140 * The following reference types represent a potential reference to a kernel 141 * resource which, after first being allocated, must be checked and freed by 142 * the BPF program: 143 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 144 * 145 * When the verifier sees a helper call return a reference type, it allocates a 146 * pointer id for the reference and stores it in the current function state. 147 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 148 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 149 * passes through a NULL-check conditional. For the branch wherein the state is 150 * changed to CONST_IMM, the verifier releases the reference. 151 * 152 * For each helper function that allocates a reference, such as 153 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 154 * bpf_sk_release(). When a reference type passes into the release function, 155 * the verifier also releases the reference. If any unchecked or unreleased 156 * reference remains at the end of the program, the verifier rejects it. 157 */ 158 159 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 160 struct bpf_verifier_stack_elem { 161 /* verifer state is 'st' 162 * before processing instruction 'insn_idx' 163 * and after processing instruction 'prev_insn_idx' 164 */ 165 struct bpf_verifier_state st; 166 int insn_idx; 167 int prev_insn_idx; 168 struct bpf_verifier_stack_elem *next; 169 }; 170 171 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 172 #define BPF_COMPLEXITY_LIMIT_STATES 64 173 174 #define BPF_MAP_PTR_UNPRIV 1UL 175 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 176 POISON_POINTER_DELTA)) 177 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 178 179 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 180 { 181 return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; 182 } 183 184 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 185 { 186 return aux->map_state & BPF_MAP_PTR_UNPRIV; 187 } 188 189 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 190 const struct bpf_map *map, bool unpriv) 191 { 192 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 193 unpriv |= bpf_map_ptr_unpriv(aux); 194 aux->map_state = (unsigned long)map | 195 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 196 } 197 198 struct bpf_call_arg_meta { 199 struct bpf_map *map_ptr; 200 bool raw_mode; 201 bool pkt_access; 202 int regno; 203 int access_size; 204 s64 msize_smax_value; 205 u64 msize_umax_value; 206 int ref_obj_id; 207 int func_id; 208 }; 209 210 static DEFINE_MUTEX(bpf_verifier_lock); 211 212 static const struct bpf_line_info * 213 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 214 { 215 const struct bpf_line_info *linfo; 216 const struct bpf_prog *prog; 217 u32 i, nr_linfo; 218 219 prog = env->prog; 220 nr_linfo = prog->aux->nr_linfo; 221 222 if (!nr_linfo || insn_off >= prog->len) 223 return NULL; 224 225 linfo = prog->aux->linfo; 226 for (i = 1; i < nr_linfo; i++) 227 if (insn_off < linfo[i].insn_off) 228 break; 229 230 return &linfo[i - 1]; 231 } 232 233 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 234 va_list args) 235 { 236 unsigned int n; 237 238 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 239 240 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 241 "verifier log line truncated - local buffer too short\n"); 242 243 n = min(log->len_total - log->len_used - 1, n); 244 log->kbuf[n] = '\0'; 245 246 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 247 log->len_used += n; 248 else 249 log->ubuf = NULL; 250 } 251 252 /* log_level controls verbosity level of eBPF verifier. 253 * bpf_verifier_log_write() is used to dump the verification trace to the log, 254 * so the user can figure out what's wrong with the program 255 */ 256 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 257 const char *fmt, ...) 258 { 259 va_list args; 260 261 if (!bpf_verifier_log_needed(&env->log)) 262 return; 263 264 va_start(args, fmt); 265 bpf_verifier_vlog(&env->log, fmt, args); 266 va_end(args); 267 } 268 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 269 270 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 271 { 272 struct bpf_verifier_env *env = private_data; 273 va_list args; 274 275 if (!bpf_verifier_log_needed(&env->log)) 276 return; 277 278 va_start(args, fmt); 279 bpf_verifier_vlog(&env->log, fmt, args); 280 va_end(args); 281 } 282 283 static const char *ltrim(const char *s) 284 { 285 while (isspace(*s)) 286 s++; 287 288 return s; 289 } 290 291 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 292 u32 insn_off, 293 const char *prefix_fmt, ...) 294 { 295 const struct bpf_line_info *linfo; 296 297 if (!bpf_verifier_log_needed(&env->log)) 298 return; 299 300 linfo = find_linfo(env, insn_off); 301 if (!linfo || linfo == env->prev_linfo) 302 return; 303 304 if (prefix_fmt) { 305 va_list args; 306 307 va_start(args, prefix_fmt); 308 bpf_verifier_vlog(&env->log, prefix_fmt, args); 309 va_end(args); 310 } 311 312 verbose(env, "%s\n", 313 ltrim(btf_name_by_offset(env->prog->aux->btf, 314 linfo->line_off))); 315 316 env->prev_linfo = linfo; 317 } 318 319 static bool type_is_pkt_pointer(enum bpf_reg_type type) 320 { 321 return type == PTR_TO_PACKET || 322 type == PTR_TO_PACKET_META; 323 } 324 325 static bool type_is_sk_pointer(enum bpf_reg_type type) 326 { 327 return type == PTR_TO_SOCKET || 328 type == PTR_TO_SOCK_COMMON || 329 type == PTR_TO_TCP_SOCK || 330 type == PTR_TO_XDP_SOCK; 331 } 332 333 static bool reg_type_may_be_null(enum bpf_reg_type type) 334 { 335 return type == PTR_TO_MAP_VALUE_OR_NULL || 336 type == PTR_TO_SOCKET_OR_NULL || 337 type == PTR_TO_SOCK_COMMON_OR_NULL || 338 type == PTR_TO_TCP_SOCK_OR_NULL; 339 } 340 341 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 342 { 343 return reg->type == PTR_TO_MAP_VALUE && 344 map_value_has_spin_lock(reg->map_ptr); 345 } 346 347 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 348 { 349 return type == PTR_TO_SOCKET || 350 type == PTR_TO_SOCKET_OR_NULL || 351 type == PTR_TO_TCP_SOCK || 352 type == PTR_TO_TCP_SOCK_OR_NULL; 353 } 354 355 static bool arg_type_may_be_refcounted(enum bpf_arg_type type) 356 { 357 return type == ARG_PTR_TO_SOCK_COMMON; 358 } 359 360 /* Determine whether the function releases some resources allocated by another 361 * function call. The first reference type argument will be assumed to be 362 * released by release_reference(). 363 */ 364 static bool is_release_function(enum bpf_func_id func_id) 365 { 366 return func_id == BPF_FUNC_sk_release; 367 } 368 369 static bool is_acquire_function(enum bpf_func_id func_id) 370 { 371 return func_id == BPF_FUNC_sk_lookup_tcp || 372 func_id == BPF_FUNC_sk_lookup_udp || 373 func_id == BPF_FUNC_skc_lookup_tcp; 374 } 375 376 static bool is_ptr_cast_function(enum bpf_func_id func_id) 377 { 378 return func_id == BPF_FUNC_tcp_sock || 379 func_id == BPF_FUNC_sk_fullsock; 380 } 381 382 /* string representation of 'enum bpf_reg_type' */ 383 static const char * const reg_type_str[] = { 384 [NOT_INIT] = "?", 385 [SCALAR_VALUE] = "inv", 386 [PTR_TO_CTX] = "ctx", 387 [CONST_PTR_TO_MAP] = "map_ptr", 388 [PTR_TO_MAP_VALUE] = "map_value", 389 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 390 [PTR_TO_STACK] = "fp", 391 [PTR_TO_PACKET] = "pkt", 392 [PTR_TO_PACKET_META] = "pkt_meta", 393 [PTR_TO_PACKET_END] = "pkt_end", 394 [PTR_TO_FLOW_KEYS] = "flow_keys", 395 [PTR_TO_SOCKET] = "sock", 396 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", 397 [PTR_TO_SOCK_COMMON] = "sock_common", 398 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", 399 [PTR_TO_TCP_SOCK] = "tcp_sock", 400 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", 401 [PTR_TO_TP_BUFFER] = "tp_buffer", 402 [PTR_TO_XDP_SOCK] = "xdp_sock", 403 }; 404 405 static char slot_type_char[] = { 406 [STACK_INVALID] = '?', 407 [STACK_SPILL] = 'r', 408 [STACK_MISC] = 'm', 409 [STACK_ZERO] = '0', 410 }; 411 412 static void print_liveness(struct bpf_verifier_env *env, 413 enum bpf_reg_liveness live) 414 { 415 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 416 verbose(env, "_"); 417 if (live & REG_LIVE_READ) 418 verbose(env, "r"); 419 if (live & REG_LIVE_WRITTEN) 420 verbose(env, "w"); 421 if (live & REG_LIVE_DONE) 422 verbose(env, "D"); 423 } 424 425 static struct bpf_func_state *func(struct bpf_verifier_env *env, 426 const struct bpf_reg_state *reg) 427 { 428 struct bpf_verifier_state *cur = env->cur_state; 429 430 return cur->frame[reg->frameno]; 431 } 432 433 static void print_verifier_state(struct bpf_verifier_env *env, 434 const struct bpf_func_state *state) 435 { 436 const struct bpf_reg_state *reg; 437 enum bpf_reg_type t; 438 int i; 439 440 if (state->frameno) 441 verbose(env, " frame%d:", state->frameno); 442 for (i = 0; i < MAX_BPF_REG; i++) { 443 reg = &state->regs[i]; 444 t = reg->type; 445 if (t == NOT_INIT) 446 continue; 447 verbose(env, " R%d", i); 448 print_liveness(env, reg->live); 449 verbose(env, "=%s", reg_type_str[t]); 450 if (t == SCALAR_VALUE && reg->precise) 451 verbose(env, "P"); 452 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 453 tnum_is_const(reg->var_off)) { 454 /* reg->off should be 0 for SCALAR_VALUE */ 455 verbose(env, "%lld", reg->var_off.value + reg->off); 456 } else { 457 verbose(env, "(id=%d", reg->id); 458 if (reg_type_may_be_refcounted_or_null(t)) 459 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); 460 if (t != SCALAR_VALUE) 461 verbose(env, ",off=%d", reg->off); 462 if (type_is_pkt_pointer(t)) 463 verbose(env, ",r=%d", reg->range); 464 else if (t == CONST_PTR_TO_MAP || 465 t == PTR_TO_MAP_VALUE || 466 t == PTR_TO_MAP_VALUE_OR_NULL) 467 verbose(env, ",ks=%d,vs=%d", 468 reg->map_ptr->key_size, 469 reg->map_ptr->value_size); 470 if (tnum_is_const(reg->var_off)) { 471 /* Typically an immediate SCALAR_VALUE, but 472 * could be a pointer whose offset is too big 473 * for reg->off 474 */ 475 verbose(env, ",imm=%llx", reg->var_off.value); 476 } else { 477 if (reg->smin_value != reg->umin_value && 478 reg->smin_value != S64_MIN) 479 verbose(env, ",smin_value=%lld", 480 (long long)reg->smin_value); 481 if (reg->smax_value != reg->umax_value && 482 reg->smax_value != S64_MAX) 483 verbose(env, ",smax_value=%lld", 484 (long long)reg->smax_value); 485 if (reg->umin_value != 0) 486 verbose(env, ",umin_value=%llu", 487 (unsigned long long)reg->umin_value); 488 if (reg->umax_value != U64_MAX) 489 verbose(env, ",umax_value=%llu", 490 (unsigned long long)reg->umax_value); 491 if (!tnum_is_unknown(reg->var_off)) { 492 char tn_buf[48]; 493 494 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 495 verbose(env, ",var_off=%s", tn_buf); 496 } 497 } 498 verbose(env, ")"); 499 } 500 } 501 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 502 char types_buf[BPF_REG_SIZE + 1]; 503 bool valid = false; 504 int j; 505 506 for (j = 0; j < BPF_REG_SIZE; j++) { 507 if (state->stack[i].slot_type[j] != STACK_INVALID) 508 valid = true; 509 types_buf[j] = slot_type_char[ 510 state->stack[i].slot_type[j]]; 511 } 512 types_buf[BPF_REG_SIZE] = 0; 513 if (!valid) 514 continue; 515 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 516 print_liveness(env, state->stack[i].spilled_ptr.live); 517 if (state->stack[i].slot_type[0] == STACK_SPILL) { 518 reg = &state->stack[i].spilled_ptr; 519 t = reg->type; 520 verbose(env, "=%s", reg_type_str[t]); 521 if (t == SCALAR_VALUE && reg->precise) 522 verbose(env, "P"); 523 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 524 verbose(env, "%lld", reg->var_off.value + reg->off); 525 } else { 526 verbose(env, "=%s", types_buf); 527 } 528 } 529 if (state->acquired_refs && state->refs[0].id) { 530 verbose(env, " refs=%d", state->refs[0].id); 531 for (i = 1; i < state->acquired_refs; i++) 532 if (state->refs[i].id) 533 verbose(env, ",%d", state->refs[i].id); 534 } 535 verbose(env, "\n"); 536 } 537 538 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 539 static int copy_##NAME##_state(struct bpf_func_state *dst, \ 540 const struct bpf_func_state *src) \ 541 { \ 542 if (!src->FIELD) \ 543 return 0; \ 544 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ 545 /* internal bug, make state invalid to reject the program */ \ 546 memset(dst, 0, sizeof(*dst)); \ 547 return -EFAULT; \ 548 } \ 549 memcpy(dst->FIELD, src->FIELD, \ 550 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ 551 return 0; \ 552 } 553 /* copy_reference_state() */ 554 COPY_STATE_FN(reference, acquired_refs, refs, 1) 555 /* copy_stack_state() */ 556 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 557 #undef COPY_STATE_FN 558 559 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 560 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ 561 bool copy_old) \ 562 { \ 563 u32 old_size = state->COUNT; \ 564 struct bpf_##NAME##_state *new_##FIELD; \ 565 int slot = size / SIZE; \ 566 \ 567 if (size <= old_size || !size) { \ 568 if (copy_old) \ 569 return 0; \ 570 state->COUNT = slot * SIZE; \ 571 if (!size && old_size) { \ 572 kfree(state->FIELD); \ 573 state->FIELD = NULL; \ 574 } \ 575 return 0; \ 576 } \ 577 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ 578 GFP_KERNEL); \ 579 if (!new_##FIELD) \ 580 return -ENOMEM; \ 581 if (copy_old) { \ 582 if (state->FIELD) \ 583 memcpy(new_##FIELD, state->FIELD, \ 584 sizeof(*new_##FIELD) * (old_size / SIZE)); \ 585 memset(new_##FIELD + old_size / SIZE, 0, \ 586 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ 587 } \ 588 state->COUNT = slot * SIZE; \ 589 kfree(state->FIELD); \ 590 state->FIELD = new_##FIELD; \ 591 return 0; \ 592 } 593 /* realloc_reference_state() */ 594 REALLOC_STATE_FN(reference, acquired_refs, refs, 1) 595 /* realloc_stack_state() */ 596 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 597 #undef REALLOC_STATE_FN 598 599 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 600 * make it consume minimal amount of memory. check_stack_write() access from 601 * the program calls into realloc_func_state() to grow the stack size. 602 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 603 * which realloc_stack_state() copies over. It points to previous 604 * bpf_verifier_state which is never reallocated. 605 */ 606 static int realloc_func_state(struct bpf_func_state *state, int stack_size, 607 int refs_size, bool copy_old) 608 { 609 int err = realloc_reference_state(state, refs_size, copy_old); 610 if (err) 611 return err; 612 return realloc_stack_state(state, stack_size, copy_old); 613 } 614 615 /* Acquire a pointer id from the env and update the state->refs to include 616 * this new pointer reference. 617 * On success, returns a valid pointer id to associate with the register 618 * On failure, returns a negative errno. 619 */ 620 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 621 { 622 struct bpf_func_state *state = cur_func(env); 623 int new_ofs = state->acquired_refs; 624 int id, err; 625 626 err = realloc_reference_state(state, state->acquired_refs + 1, true); 627 if (err) 628 return err; 629 id = ++env->id_gen; 630 state->refs[new_ofs].id = id; 631 state->refs[new_ofs].insn_idx = insn_idx; 632 633 return id; 634 } 635 636 /* release function corresponding to acquire_reference_state(). Idempotent. */ 637 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 638 { 639 int i, last_idx; 640 641 last_idx = state->acquired_refs - 1; 642 for (i = 0; i < state->acquired_refs; i++) { 643 if (state->refs[i].id == ptr_id) { 644 if (last_idx && i != last_idx) 645 memcpy(&state->refs[i], &state->refs[last_idx], 646 sizeof(*state->refs)); 647 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 648 state->acquired_refs--; 649 return 0; 650 } 651 } 652 return -EINVAL; 653 } 654 655 static int transfer_reference_state(struct bpf_func_state *dst, 656 struct bpf_func_state *src) 657 { 658 int err = realloc_reference_state(dst, src->acquired_refs, false); 659 if (err) 660 return err; 661 err = copy_reference_state(dst, src); 662 if (err) 663 return err; 664 return 0; 665 } 666 667 static void free_func_state(struct bpf_func_state *state) 668 { 669 if (!state) 670 return; 671 kfree(state->refs); 672 kfree(state->stack); 673 kfree(state); 674 } 675 676 static void clear_jmp_history(struct bpf_verifier_state *state) 677 { 678 kfree(state->jmp_history); 679 state->jmp_history = NULL; 680 state->jmp_history_cnt = 0; 681 } 682 683 static void free_verifier_state(struct bpf_verifier_state *state, 684 bool free_self) 685 { 686 int i; 687 688 for (i = 0; i <= state->curframe; i++) { 689 free_func_state(state->frame[i]); 690 state->frame[i] = NULL; 691 } 692 clear_jmp_history(state); 693 if (free_self) 694 kfree(state); 695 } 696 697 /* copy verifier state from src to dst growing dst stack space 698 * when necessary to accommodate larger src stack 699 */ 700 static int copy_func_state(struct bpf_func_state *dst, 701 const struct bpf_func_state *src) 702 { 703 int err; 704 705 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, 706 false); 707 if (err) 708 return err; 709 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 710 err = copy_reference_state(dst, src); 711 if (err) 712 return err; 713 return copy_stack_state(dst, src); 714 } 715 716 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 717 const struct bpf_verifier_state *src) 718 { 719 struct bpf_func_state *dst; 720 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; 721 int i, err; 722 723 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { 724 kfree(dst_state->jmp_history); 725 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); 726 if (!dst_state->jmp_history) 727 return -ENOMEM; 728 } 729 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); 730 dst_state->jmp_history_cnt = src->jmp_history_cnt; 731 732 /* if dst has more stack frames then src frame, free them */ 733 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 734 free_func_state(dst_state->frame[i]); 735 dst_state->frame[i] = NULL; 736 } 737 dst_state->speculative = src->speculative; 738 dst_state->curframe = src->curframe; 739 dst_state->active_spin_lock = src->active_spin_lock; 740 dst_state->branches = src->branches; 741 dst_state->parent = src->parent; 742 dst_state->first_insn_idx = src->first_insn_idx; 743 dst_state->last_insn_idx = src->last_insn_idx; 744 for (i = 0; i <= src->curframe; i++) { 745 dst = dst_state->frame[i]; 746 if (!dst) { 747 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 748 if (!dst) 749 return -ENOMEM; 750 dst_state->frame[i] = dst; 751 } 752 err = copy_func_state(dst, src->frame[i]); 753 if (err) 754 return err; 755 } 756 return 0; 757 } 758 759 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 760 { 761 while (st) { 762 u32 br = --st->branches; 763 764 /* WARN_ON(br > 1) technically makes sense here, 765 * but see comment in push_stack(), hence: 766 */ 767 WARN_ONCE((int)br < 0, 768 "BUG update_branch_counts:branches_to_explore=%d\n", 769 br); 770 if (br) 771 break; 772 st = st->parent; 773 } 774 } 775 776 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 777 int *insn_idx) 778 { 779 struct bpf_verifier_state *cur = env->cur_state; 780 struct bpf_verifier_stack_elem *elem, *head = env->head; 781 int err; 782 783 if (env->head == NULL) 784 return -ENOENT; 785 786 if (cur) { 787 err = copy_verifier_state(cur, &head->st); 788 if (err) 789 return err; 790 } 791 if (insn_idx) 792 *insn_idx = head->insn_idx; 793 if (prev_insn_idx) 794 *prev_insn_idx = head->prev_insn_idx; 795 elem = head->next; 796 free_verifier_state(&head->st, false); 797 kfree(head); 798 env->head = elem; 799 env->stack_size--; 800 return 0; 801 } 802 803 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 804 int insn_idx, int prev_insn_idx, 805 bool speculative) 806 { 807 struct bpf_verifier_state *cur = env->cur_state; 808 struct bpf_verifier_stack_elem *elem; 809 int err; 810 811 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 812 if (!elem) 813 goto err; 814 815 elem->insn_idx = insn_idx; 816 elem->prev_insn_idx = prev_insn_idx; 817 elem->next = env->head; 818 env->head = elem; 819 env->stack_size++; 820 err = copy_verifier_state(&elem->st, cur); 821 if (err) 822 goto err; 823 elem->st.speculative |= speculative; 824 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 825 verbose(env, "The sequence of %d jumps is too complex.\n", 826 env->stack_size); 827 goto err; 828 } 829 if (elem->st.parent) { 830 ++elem->st.parent->branches; 831 /* WARN_ON(branches > 2) technically makes sense here, 832 * but 833 * 1. speculative states will bump 'branches' for non-branch 834 * instructions 835 * 2. is_state_visited() heuristics may decide not to create 836 * a new state for a sequence of branches and all such current 837 * and cloned states will be pointing to a single parent state 838 * which might have large 'branches' count. 839 */ 840 } 841 return &elem->st; 842 err: 843 free_verifier_state(env->cur_state, true); 844 env->cur_state = NULL; 845 /* pop all elements and return */ 846 while (!pop_stack(env, NULL, NULL)); 847 return NULL; 848 } 849 850 #define CALLER_SAVED_REGS 6 851 static const int caller_saved[CALLER_SAVED_REGS] = { 852 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 853 }; 854 855 static void __mark_reg_not_init(struct bpf_reg_state *reg); 856 857 /* Mark the unknown part of a register (variable offset or scalar value) as 858 * known to have the value @imm. 859 */ 860 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 861 { 862 /* Clear id, off, and union(map_ptr, range) */ 863 memset(((u8 *)reg) + sizeof(reg->type), 0, 864 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 865 reg->var_off = tnum_const(imm); 866 reg->smin_value = (s64)imm; 867 reg->smax_value = (s64)imm; 868 reg->umin_value = imm; 869 reg->umax_value = imm; 870 } 871 872 /* Mark the 'variable offset' part of a register as zero. This should be 873 * used only on registers holding a pointer type. 874 */ 875 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 876 { 877 __mark_reg_known(reg, 0); 878 } 879 880 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 881 { 882 __mark_reg_known(reg, 0); 883 reg->type = SCALAR_VALUE; 884 } 885 886 static void mark_reg_known_zero(struct bpf_verifier_env *env, 887 struct bpf_reg_state *regs, u32 regno) 888 { 889 if (WARN_ON(regno >= MAX_BPF_REG)) { 890 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 891 /* Something bad happened, let's kill all regs */ 892 for (regno = 0; regno < MAX_BPF_REG; regno++) 893 __mark_reg_not_init(regs + regno); 894 return; 895 } 896 __mark_reg_known_zero(regs + regno); 897 } 898 899 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 900 { 901 return type_is_pkt_pointer(reg->type); 902 } 903 904 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 905 { 906 return reg_is_pkt_pointer(reg) || 907 reg->type == PTR_TO_PACKET_END; 908 } 909 910 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 911 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 912 enum bpf_reg_type which) 913 { 914 /* The register can already have a range from prior markings. 915 * This is fine as long as it hasn't been advanced from its 916 * origin. 917 */ 918 return reg->type == which && 919 reg->id == 0 && 920 reg->off == 0 && 921 tnum_equals_const(reg->var_off, 0); 922 } 923 924 /* Attempts to improve min/max values based on var_off information */ 925 static void __update_reg_bounds(struct bpf_reg_state *reg) 926 { 927 /* min signed is max(sign bit) | min(other bits) */ 928 reg->smin_value = max_t(s64, reg->smin_value, 929 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 930 /* max signed is min(sign bit) | max(other bits) */ 931 reg->smax_value = min_t(s64, reg->smax_value, 932 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 933 reg->umin_value = max(reg->umin_value, reg->var_off.value); 934 reg->umax_value = min(reg->umax_value, 935 reg->var_off.value | reg->var_off.mask); 936 } 937 938 /* Uses signed min/max values to inform unsigned, and vice-versa */ 939 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 940 { 941 /* Learn sign from signed bounds. 942 * If we cannot cross the sign boundary, then signed and unsigned bounds 943 * are the same, so combine. This works even in the negative case, e.g. 944 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 945 */ 946 if (reg->smin_value >= 0 || reg->smax_value < 0) { 947 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 948 reg->umin_value); 949 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 950 reg->umax_value); 951 return; 952 } 953 /* Learn sign from unsigned bounds. Signed bounds cross the sign 954 * boundary, so we must be careful. 955 */ 956 if ((s64)reg->umax_value >= 0) { 957 /* Positive. We can't learn anything from the smin, but smax 958 * is positive, hence safe. 959 */ 960 reg->smin_value = reg->umin_value; 961 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 962 reg->umax_value); 963 } else if ((s64)reg->umin_value < 0) { 964 /* Negative. We can't learn anything from the smax, but smin 965 * is negative, hence safe. 966 */ 967 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 968 reg->umin_value); 969 reg->smax_value = reg->umax_value; 970 } 971 } 972 973 /* Attempts to improve var_off based on unsigned min/max information */ 974 static void __reg_bound_offset(struct bpf_reg_state *reg) 975 { 976 reg->var_off = tnum_intersect(reg->var_off, 977 tnum_range(reg->umin_value, 978 reg->umax_value)); 979 } 980 981 /* Reset the min/max bounds of a register */ 982 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 983 { 984 reg->smin_value = S64_MIN; 985 reg->smax_value = S64_MAX; 986 reg->umin_value = 0; 987 reg->umax_value = U64_MAX; 988 989 /* constant backtracking is enabled for root only for now */ 990 reg->precise = capable(CAP_SYS_ADMIN) ? false : true; 991 } 992 993 /* Mark a register as having a completely unknown (scalar) value. */ 994 static void __mark_reg_unknown(struct bpf_reg_state *reg) 995 { 996 /* 997 * Clear type, id, off, and union(map_ptr, range) and 998 * padding between 'type' and union 999 */ 1000 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1001 reg->type = SCALAR_VALUE; 1002 reg->var_off = tnum_unknown; 1003 reg->frameno = 0; 1004 __mark_reg_unbounded(reg); 1005 } 1006 1007 static void mark_reg_unknown(struct bpf_verifier_env *env, 1008 struct bpf_reg_state *regs, u32 regno) 1009 { 1010 if (WARN_ON(regno >= MAX_BPF_REG)) { 1011 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1012 /* Something bad happened, let's kill all regs except FP */ 1013 for (regno = 0; regno < BPF_REG_FP; regno++) 1014 __mark_reg_not_init(regs + regno); 1015 return; 1016 } 1017 __mark_reg_unknown(regs + regno); 1018 } 1019 1020 static void __mark_reg_not_init(struct bpf_reg_state *reg) 1021 { 1022 __mark_reg_unknown(reg); 1023 reg->type = NOT_INIT; 1024 } 1025 1026 static void mark_reg_not_init(struct bpf_verifier_env *env, 1027 struct bpf_reg_state *regs, u32 regno) 1028 { 1029 if (WARN_ON(regno >= MAX_BPF_REG)) { 1030 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1031 /* Something bad happened, let's kill all regs except FP */ 1032 for (regno = 0; regno < BPF_REG_FP; regno++) 1033 __mark_reg_not_init(regs + regno); 1034 return; 1035 } 1036 __mark_reg_not_init(regs + regno); 1037 } 1038 1039 #define DEF_NOT_SUBREG (0) 1040 static void init_reg_state(struct bpf_verifier_env *env, 1041 struct bpf_func_state *state) 1042 { 1043 struct bpf_reg_state *regs = state->regs; 1044 int i; 1045 1046 for (i = 0; i < MAX_BPF_REG; i++) { 1047 mark_reg_not_init(env, regs, i); 1048 regs[i].live = REG_LIVE_NONE; 1049 regs[i].parent = NULL; 1050 regs[i].subreg_def = DEF_NOT_SUBREG; 1051 } 1052 1053 /* frame pointer */ 1054 regs[BPF_REG_FP].type = PTR_TO_STACK; 1055 mark_reg_known_zero(env, regs, BPF_REG_FP); 1056 regs[BPF_REG_FP].frameno = state->frameno; 1057 1058 /* 1st arg to a function */ 1059 regs[BPF_REG_1].type = PTR_TO_CTX; 1060 mark_reg_known_zero(env, regs, BPF_REG_1); 1061 } 1062 1063 #define BPF_MAIN_FUNC (-1) 1064 static void init_func_state(struct bpf_verifier_env *env, 1065 struct bpf_func_state *state, 1066 int callsite, int frameno, int subprogno) 1067 { 1068 state->callsite = callsite; 1069 state->frameno = frameno; 1070 state->subprogno = subprogno; 1071 init_reg_state(env, state); 1072 } 1073 1074 enum reg_arg_type { 1075 SRC_OP, /* register is used as source operand */ 1076 DST_OP, /* register is used as destination operand */ 1077 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1078 }; 1079 1080 static int cmp_subprogs(const void *a, const void *b) 1081 { 1082 return ((struct bpf_subprog_info *)a)->start - 1083 ((struct bpf_subprog_info *)b)->start; 1084 } 1085 1086 static int find_subprog(struct bpf_verifier_env *env, int off) 1087 { 1088 struct bpf_subprog_info *p; 1089 1090 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1091 sizeof(env->subprog_info[0]), cmp_subprogs); 1092 if (!p) 1093 return -ENOENT; 1094 return p - env->subprog_info; 1095 1096 } 1097 1098 static int add_subprog(struct bpf_verifier_env *env, int off) 1099 { 1100 int insn_cnt = env->prog->len; 1101 int ret; 1102 1103 if (off >= insn_cnt || off < 0) { 1104 verbose(env, "call to invalid destination\n"); 1105 return -EINVAL; 1106 } 1107 ret = find_subprog(env, off); 1108 if (ret >= 0) 1109 return 0; 1110 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1111 verbose(env, "too many subprograms\n"); 1112 return -E2BIG; 1113 } 1114 env->subprog_info[env->subprog_cnt++].start = off; 1115 sort(env->subprog_info, env->subprog_cnt, 1116 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1117 return 0; 1118 } 1119 1120 static int check_subprogs(struct bpf_verifier_env *env) 1121 { 1122 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; 1123 struct bpf_subprog_info *subprog = env->subprog_info; 1124 struct bpf_insn *insn = env->prog->insnsi; 1125 int insn_cnt = env->prog->len; 1126 1127 /* Add entry function. */ 1128 ret = add_subprog(env, 0); 1129 if (ret < 0) 1130 return ret; 1131 1132 /* determine subprog starts. The end is one before the next starts */ 1133 for (i = 0; i < insn_cnt; i++) { 1134 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1135 continue; 1136 if (insn[i].src_reg != BPF_PSEUDO_CALL) 1137 continue; 1138 if (!env->allow_ptr_leaks) { 1139 verbose(env, "function calls to other bpf functions are allowed for root only\n"); 1140 return -EPERM; 1141 } 1142 ret = add_subprog(env, i + insn[i].imm + 1); 1143 if (ret < 0) 1144 return ret; 1145 } 1146 1147 /* Add a fake 'exit' subprog which could simplify subprog iteration 1148 * logic. 'subprog_cnt' should not be increased. 1149 */ 1150 subprog[env->subprog_cnt].start = insn_cnt; 1151 1152 if (env->log.level & BPF_LOG_LEVEL2) 1153 for (i = 0; i < env->subprog_cnt; i++) 1154 verbose(env, "func#%d @%d\n", i, subprog[i].start); 1155 1156 /* now check that all jumps are within the same subprog */ 1157 subprog_start = subprog[cur_subprog].start; 1158 subprog_end = subprog[cur_subprog + 1].start; 1159 for (i = 0; i < insn_cnt; i++) { 1160 u8 code = insn[i].code; 1161 1162 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 1163 goto next; 1164 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 1165 goto next; 1166 off = i + insn[i].off + 1; 1167 if (off < subprog_start || off >= subprog_end) { 1168 verbose(env, "jump out of range from insn %d to %d\n", i, off); 1169 return -EINVAL; 1170 } 1171 next: 1172 if (i == subprog_end - 1) { 1173 /* to avoid fall-through from one subprog into another 1174 * the last insn of the subprog should be either exit 1175 * or unconditional jump back 1176 */ 1177 if (code != (BPF_JMP | BPF_EXIT) && 1178 code != (BPF_JMP | BPF_JA)) { 1179 verbose(env, "last insn is not an exit or jmp\n"); 1180 return -EINVAL; 1181 } 1182 subprog_start = subprog_end; 1183 cur_subprog++; 1184 if (cur_subprog < env->subprog_cnt) 1185 subprog_end = subprog[cur_subprog + 1].start; 1186 } 1187 } 1188 return 0; 1189 } 1190 1191 /* Parentage chain of this register (or stack slot) should take care of all 1192 * issues like callee-saved registers, stack slot allocation time, etc. 1193 */ 1194 static int mark_reg_read(struct bpf_verifier_env *env, 1195 const struct bpf_reg_state *state, 1196 struct bpf_reg_state *parent, u8 flag) 1197 { 1198 bool writes = parent == state->parent; /* Observe write marks */ 1199 int cnt = 0; 1200 1201 while (parent) { 1202 /* if read wasn't screened by an earlier write ... */ 1203 if (writes && state->live & REG_LIVE_WRITTEN) 1204 break; 1205 if (parent->live & REG_LIVE_DONE) { 1206 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 1207 reg_type_str[parent->type], 1208 parent->var_off.value, parent->off); 1209 return -EFAULT; 1210 } 1211 /* The first condition is more likely to be true than the 1212 * second, checked it first. 1213 */ 1214 if ((parent->live & REG_LIVE_READ) == flag || 1215 parent->live & REG_LIVE_READ64) 1216 /* The parentage chain never changes and 1217 * this parent was already marked as LIVE_READ. 1218 * There is no need to keep walking the chain again and 1219 * keep re-marking all parents as LIVE_READ. 1220 * This case happens when the same register is read 1221 * multiple times without writes into it in-between. 1222 * Also, if parent has the stronger REG_LIVE_READ64 set, 1223 * then no need to set the weak REG_LIVE_READ32. 1224 */ 1225 break; 1226 /* ... then we depend on parent's value */ 1227 parent->live |= flag; 1228 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 1229 if (flag == REG_LIVE_READ64) 1230 parent->live &= ~REG_LIVE_READ32; 1231 state = parent; 1232 parent = state->parent; 1233 writes = true; 1234 cnt++; 1235 } 1236 1237 if (env->longest_mark_read_walk < cnt) 1238 env->longest_mark_read_walk = cnt; 1239 return 0; 1240 } 1241 1242 /* This function is supposed to be used by the following 32-bit optimization 1243 * code only. It returns TRUE if the source or destination register operates 1244 * on 64-bit, otherwise return FALSE. 1245 */ 1246 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 1247 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 1248 { 1249 u8 code, class, op; 1250 1251 code = insn->code; 1252 class = BPF_CLASS(code); 1253 op = BPF_OP(code); 1254 if (class == BPF_JMP) { 1255 /* BPF_EXIT for "main" will reach here. Return TRUE 1256 * conservatively. 1257 */ 1258 if (op == BPF_EXIT) 1259 return true; 1260 if (op == BPF_CALL) { 1261 /* BPF to BPF call will reach here because of marking 1262 * caller saved clobber with DST_OP_NO_MARK for which we 1263 * don't care the register def because they are anyway 1264 * marked as NOT_INIT already. 1265 */ 1266 if (insn->src_reg == BPF_PSEUDO_CALL) 1267 return false; 1268 /* Helper call will reach here because of arg type 1269 * check, conservatively return TRUE. 1270 */ 1271 if (t == SRC_OP) 1272 return true; 1273 1274 return false; 1275 } 1276 } 1277 1278 if (class == BPF_ALU64 || class == BPF_JMP || 1279 /* BPF_END always use BPF_ALU class. */ 1280 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 1281 return true; 1282 1283 if (class == BPF_ALU || class == BPF_JMP32) 1284 return false; 1285 1286 if (class == BPF_LDX) { 1287 if (t != SRC_OP) 1288 return BPF_SIZE(code) == BPF_DW; 1289 /* LDX source must be ptr. */ 1290 return true; 1291 } 1292 1293 if (class == BPF_STX) { 1294 if (reg->type != SCALAR_VALUE) 1295 return true; 1296 return BPF_SIZE(code) == BPF_DW; 1297 } 1298 1299 if (class == BPF_LD) { 1300 u8 mode = BPF_MODE(code); 1301 1302 /* LD_IMM64 */ 1303 if (mode == BPF_IMM) 1304 return true; 1305 1306 /* Both LD_IND and LD_ABS return 32-bit data. */ 1307 if (t != SRC_OP) 1308 return false; 1309 1310 /* Implicit ctx ptr. */ 1311 if (regno == BPF_REG_6) 1312 return true; 1313 1314 /* Explicit source could be any width. */ 1315 return true; 1316 } 1317 1318 if (class == BPF_ST) 1319 /* The only source register for BPF_ST is a ptr. */ 1320 return true; 1321 1322 /* Conservatively return true at default. */ 1323 return true; 1324 } 1325 1326 /* Return TRUE if INSN doesn't have explicit value define. */ 1327 static bool insn_no_def(struct bpf_insn *insn) 1328 { 1329 u8 class = BPF_CLASS(insn->code); 1330 1331 return (class == BPF_JMP || class == BPF_JMP32 || 1332 class == BPF_STX || class == BPF_ST); 1333 } 1334 1335 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 1336 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 1337 { 1338 if (insn_no_def(insn)) 1339 return false; 1340 1341 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); 1342 } 1343 1344 static void mark_insn_zext(struct bpf_verifier_env *env, 1345 struct bpf_reg_state *reg) 1346 { 1347 s32 def_idx = reg->subreg_def; 1348 1349 if (def_idx == DEF_NOT_SUBREG) 1350 return; 1351 1352 env->insn_aux_data[def_idx - 1].zext_dst = true; 1353 /* The dst will be zero extended, so won't be sub-register anymore. */ 1354 reg->subreg_def = DEF_NOT_SUBREG; 1355 } 1356 1357 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 1358 enum reg_arg_type t) 1359 { 1360 struct bpf_verifier_state *vstate = env->cur_state; 1361 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1362 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 1363 struct bpf_reg_state *reg, *regs = state->regs; 1364 bool rw64; 1365 1366 if (regno >= MAX_BPF_REG) { 1367 verbose(env, "R%d is invalid\n", regno); 1368 return -EINVAL; 1369 } 1370 1371 reg = ®s[regno]; 1372 rw64 = is_reg64(env, insn, regno, reg, t); 1373 if (t == SRC_OP) { 1374 /* check whether register used as source operand can be read */ 1375 if (reg->type == NOT_INIT) { 1376 verbose(env, "R%d !read_ok\n", regno); 1377 return -EACCES; 1378 } 1379 /* We don't need to worry about FP liveness because it's read-only */ 1380 if (regno == BPF_REG_FP) 1381 return 0; 1382 1383 if (rw64) 1384 mark_insn_zext(env, reg); 1385 1386 return mark_reg_read(env, reg, reg->parent, 1387 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 1388 } else { 1389 /* check whether register used as dest operand can be written to */ 1390 if (regno == BPF_REG_FP) { 1391 verbose(env, "frame pointer is read only\n"); 1392 return -EACCES; 1393 } 1394 reg->live |= REG_LIVE_WRITTEN; 1395 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 1396 if (t == DST_OP) 1397 mark_reg_unknown(env, regs, regno); 1398 } 1399 return 0; 1400 } 1401 1402 /* for any branch, call, exit record the history of jmps in the given state */ 1403 static int push_jmp_history(struct bpf_verifier_env *env, 1404 struct bpf_verifier_state *cur) 1405 { 1406 u32 cnt = cur->jmp_history_cnt; 1407 struct bpf_idx_pair *p; 1408 1409 cnt++; 1410 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 1411 if (!p) 1412 return -ENOMEM; 1413 p[cnt - 1].idx = env->insn_idx; 1414 p[cnt - 1].prev_idx = env->prev_insn_idx; 1415 cur->jmp_history = p; 1416 cur->jmp_history_cnt = cnt; 1417 return 0; 1418 } 1419 1420 /* Backtrack one insn at a time. If idx is not at the top of recorded 1421 * history then previous instruction came from straight line execution. 1422 */ 1423 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 1424 u32 *history) 1425 { 1426 u32 cnt = *history; 1427 1428 if (cnt && st->jmp_history[cnt - 1].idx == i) { 1429 i = st->jmp_history[cnt - 1].prev_idx; 1430 (*history)--; 1431 } else { 1432 i--; 1433 } 1434 return i; 1435 } 1436 1437 /* For given verifier state backtrack_insn() is called from the last insn to 1438 * the first insn. Its purpose is to compute a bitmask of registers and 1439 * stack slots that needs precision in the parent verifier state. 1440 */ 1441 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 1442 u32 *reg_mask, u64 *stack_mask) 1443 { 1444 const struct bpf_insn_cbs cbs = { 1445 .cb_print = verbose, 1446 .private_data = env, 1447 }; 1448 struct bpf_insn *insn = env->prog->insnsi + idx; 1449 u8 class = BPF_CLASS(insn->code); 1450 u8 opcode = BPF_OP(insn->code); 1451 u8 mode = BPF_MODE(insn->code); 1452 u32 dreg = 1u << insn->dst_reg; 1453 u32 sreg = 1u << insn->src_reg; 1454 u32 spi; 1455 1456 if (insn->code == 0) 1457 return 0; 1458 if (env->log.level & BPF_LOG_LEVEL) { 1459 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 1460 verbose(env, "%d: ", idx); 1461 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 1462 } 1463 1464 if (class == BPF_ALU || class == BPF_ALU64) { 1465 if (!(*reg_mask & dreg)) 1466 return 0; 1467 if (opcode == BPF_MOV) { 1468 if (BPF_SRC(insn->code) == BPF_X) { 1469 /* dreg = sreg 1470 * dreg needs precision after this insn 1471 * sreg needs precision before this insn 1472 */ 1473 *reg_mask &= ~dreg; 1474 *reg_mask |= sreg; 1475 } else { 1476 /* dreg = K 1477 * dreg needs precision after this insn. 1478 * Corresponding register is already marked 1479 * as precise=true in this verifier state. 1480 * No further markings in parent are necessary 1481 */ 1482 *reg_mask &= ~dreg; 1483 } 1484 } else { 1485 if (BPF_SRC(insn->code) == BPF_X) { 1486 /* dreg += sreg 1487 * both dreg and sreg need precision 1488 * before this insn 1489 */ 1490 *reg_mask |= sreg; 1491 } /* else dreg += K 1492 * dreg still needs precision before this insn 1493 */ 1494 } 1495 } else if (class == BPF_LDX) { 1496 if (!(*reg_mask & dreg)) 1497 return 0; 1498 *reg_mask &= ~dreg; 1499 1500 /* scalars can only be spilled into stack w/o losing precision. 1501 * Load from any other memory can be zero extended. 1502 * The desire to keep that precision is already indicated 1503 * by 'precise' mark in corresponding register of this state. 1504 * No further tracking necessary. 1505 */ 1506 if (insn->src_reg != BPF_REG_FP) 1507 return 0; 1508 if (BPF_SIZE(insn->code) != BPF_DW) 1509 return 0; 1510 1511 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 1512 * that [fp - off] slot contains scalar that needs to be 1513 * tracked with precision 1514 */ 1515 spi = (-insn->off - 1) / BPF_REG_SIZE; 1516 if (spi >= 64) { 1517 verbose(env, "BUG spi %d\n", spi); 1518 WARN_ONCE(1, "verifier backtracking bug"); 1519 return -EFAULT; 1520 } 1521 *stack_mask |= 1ull << spi; 1522 } else if (class == BPF_STX || class == BPF_ST) { 1523 if (*reg_mask & dreg) 1524 /* stx & st shouldn't be using _scalar_ dst_reg 1525 * to access memory. It means backtracking 1526 * encountered a case of pointer subtraction. 1527 */ 1528 return -ENOTSUPP; 1529 /* scalars can only be spilled into stack */ 1530 if (insn->dst_reg != BPF_REG_FP) 1531 return 0; 1532 if (BPF_SIZE(insn->code) != BPF_DW) 1533 return 0; 1534 spi = (-insn->off - 1) / BPF_REG_SIZE; 1535 if (spi >= 64) { 1536 verbose(env, "BUG spi %d\n", spi); 1537 WARN_ONCE(1, "verifier backtracking bug"); 1538 return -EFAULT; 1539 } 1540 if (!(*stack_mask & (1ull << spi))) 1541 return 0; 1542 *stack_mask &= ~(1ull << spi); 1543 if (class == BPF_STX) 1544 *reg_mask |= sreg; 1545 } else if (class == BPF_JMP || class == BPF_JMP32) { 1546 if (opcode == BPF_CALL) { 1547 if (insn->src_reg == BPF_PSEUDO_CALL) 1548 return -ENOTSUPP; 1549 /* regular helper call sets R0 */ 1550 *reg_mask &= ~1; 1551 if (*reg_mask & 0x3f) { 1552 /* if backtracing was looking for registers R1-R5 1553 * they should have been found already. 1554 */ 1555 verbose(env, "BUG regs %x\n", *reg_mask); 1556 WARN_ONCE(1, "verifier backtracking bug"); 1557 return -EFAULT; 1558 } 1559 } else if (opcode == BPF_EXIT) { 1560 return -ENOTSUPP; 1561 } 1562 } else if (class == BPF_LD) { 1563 if (!(*reg_mask & dreg)) 1564 return 0; 1565 *reg_mask &= ~dreg; 1566 /* It's ld_imm64 or ld_abs or ld_ind. 1567 * For ld_imm64 no further tracking of precision 1568 * into parent is necessary 1569 */ 1570 if (mode == BPF_IND || mode == BPF_ABS) 1571 /* to be analyzed */ 1572 return -ENOTSUPP; 1573 } 1574 return 0; 1575 } 1576 1577 /* the scalar precision tracking algorithm: 1578 * . at the start all registers have precise=false. 1579 * . scalar ranges are tracked as normal through alu and jmp insns. 1580 * . once precise value of the scalar register is used in: 1581 * . ptr + scalar alu 1582 * . if (scalar cond K|scalar) 1583 * . helper_call(.., scalar, ...) where ARG_CONST is expected 1584 * backtrack through the verifier states and mark all registers and 1585 * stack slots with spilled constants that these scalar regisers 1586 * should be precise. 1587 * . during state pruning two registers (or spilled stack slots) 1588 * are equivalent if both are not precise. 1589 * 1590 * Note the verifier cannot simply walk register parentage chain, 1591 * since many different registers and stack slots could have been 1592 * used to compute single precise scalar. 1593 * 1594 * The approach of starting with precise=true for all registers and then 1595 * backtrack to mark a register as not precise when the verifier detects 1596 * that program doesn't care about specific value (e.g., when helper 1597 * takes register as ARG_ANYTHING parameter) is not safe. 1598 * 1599 * It's ok to walk single parentage chain of the verifier states. 1600 * It's possible that this backtracking will go all the way till 1st insn. 1601 * All other branches will be explored for needing precision later. 1602 * 1603 * The backtracking needs to deal with cases like: 1604 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 1605 * r9 -= r8 1606 * r5 = r9 1607 * if r5 > 0x79f goto pc+7 1608 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 1609 * r5 += 1 1610 * ... 1611 * call bpf_perf_event_output#25 1612 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 1613 * 1614 * and this case: 1615 * r6 = 1 1616 * call foo // uses callee's r6 inside to compute r0 1617 * r0 += r6 1618 * if r0 == 0 goto 1619 * 1620 * to track above reg_mask/stack_mask needs to be independent for each frame. 1621 * 1622 * Also if parent's curframe > frame where backtracking started, 1623 * the verifier need to mark registers in both frames, otherwise callees 1624 * may incorrectly prune callers. This is similar to 1625 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 1626 * 1627 * For now backtracking falls back into conservative marking. 1628 */ 1629 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 1630 struct bpf_verifier_state *st) 1631 { 1632 struct bpf_func_state *func; 1633 struct bpf_reg_state *reg; 1634 int i, j; 1635 1636 /* big hammer: mark all scalars precise in this path. 1637 * pop_stack may still get !precise scalars. 1638 */ 1639 for (; st; st = st->parent) 1640 for (i = 0; i <= st->curframe; i++) { 1641 func = st->frame[i]; 1642 for (j = 0; j < BPF_REG_FP; j++) { 1643 reg = &func->regs[j]; 1644 if (reg->type != SCALAR_VALUE) 1645 continue; 1646 reg->precise = true; 1647 } 1648 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 1649 if (func->stack[j].slot_type[0] != STACK_SPILL) 1650 continue; 1651 reg = &func->stack[j].spilled_ptr; 1652 if (reg->type != SCALAR_VALUE) 1653 continue; 1654 reg->precise = true; 1655 } 1656 } 1657 } 1658 1659 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 1660 int spi) 1661 { 1662 struct bpf_verifier_state *st = env->cur_state; 1663 int first_idx = st->first_insn_idx; 1664 int last_idx = env->insn_idx; 1665 struct bpf_func_state *func; 1666 struct bpf_reg_state *reg; 1667 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 1668 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 1669 bool skip_first = true; 1670 bool new_marks = false; 1671 int i, err; 1672 1673 if (!env->allow_ptr_leaks) 1674 /* backtracking is root only for now */ 1675 return 0; 1676 1677 func = st->frame[st->curframe]; 1678 if (regno >= 0) { 1679 reg = &func->regs[regno]; 1680 if (reg->type != SCALAR_VALUE) { 1681 WARN_ONCE(1, "backtracing misuse"); 1682 return -EFAULT; 1683 } 1684 if (!reg->precise) 1685 new_marks = true; 1686 else 1687 reg_mask = 0; 1688 reg->precise = true; 1689 } 1690 1691 while (spi >= 0) { 1692 if (func->stack[spi].slot_type[0] != STACK_SPILL) { 1693 stack_mask = 0; 1694 break; 1695 } 1696 reg = &func->stack[spi].spilled_ptr; 1697 if (reg->type != SCALAR_VALUE) { 1698 stack_mask = 0; 1699 break; 1700 } 1701 if (!reg->precise) 1702 new_marks = true; 1703 else 1704 stack_mask = 0; 1705 reg->precise = true; 1706 break; 1707 } 1708 1709 if (!new_marks) 1710 return 0; 1711 if (!reg_mask && !stack_mask) 1712 return 0; 1713 for (;;) { 1714 DECLARE_BITMAP(mask, 64); 1715 u32 history = st->jmp_history_cnt; 1716 1717 if (env->log.level & BPF_LOG_LEVEL) 1718 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 1719 for (i = last_idx;;) { 1720 if (skip_first) { 1721 err = 0; 1722 skip_first = false; 1723 } else { 1724 err = backtrack_insn(env, i, ®_mask, &stack_mask); 1725 } 1726 if (err == -ENOTSUPP) { 1727 mark_all_scalars_precise(env, st); 1728 return 0; 1729 } else if (err) { 1730 return err; 1731 } 1732 if (!reg_mask && !stack_mask) 1733 /* Found assignment(s) into tracked register in this state. 1734 * Since this state is already marked, just return. 1735 * Nothing to be tracked further in the parent state. 1736 */ 1737 return 0; 1738 if (i == first_idx) 1739 break; 1740 i = get_prev_insn_idx(st, i, &history); 1741 if (i >= env->prog->len) { 1742 /* This can happen if backtracking reached insn 0 1743 * and there are still reg_mask or stack_mask 1744 * to backtrack. 1745 * It means the backtracking missed the spot where 1746 * particular register was initialized with a constant. 1747 */ 1748 verbose(env, "BUG backtracking idx %d\n", i); 1749 WARN_ONCE(1, "verifier backtracking bug"); 1750 return -EFAULT; 1751 } 1752 } 1753 st = st->parent; 1754 if (!st) 1755 break; 1756 1757 new_marks = false; 1758 func = st->frame[st->curframe]; 1759 bitmap_from_u64(mask, reg_mask); 1760 for_each_set_bit(i, mask, 32) { 1761 reg = &func->regs[i]; 1762 if (reg->type != SCALAR_VALUE) { 1763 reg_mask &= ~(1u << i); 1764 continue; 1765 } 1766 if (!reg->precise) 1767 new_marks = true; 1768 reg->precise = true; 1769 } 1770 1771 bitmap_from_u64(mask, stack_mask); 1772 for_each_set_bit(i, mask, 64) { 1773 if (i >= func->allocated_stack / BPF_REG_SIZE) { 1774 /* This can happen if backtracking 1775 * is propagating stack precision where 1776 * caller has larger stack frame 1777 * than callee, but backtrack_insn() should 1778 * have returned -ENOTSUPP. 1779 */ 1780 verbose(env, "BUG spi %d stack_size %d\n", 1781 i, func->allocated_stack); 1782 WARN_ONCE(1, "verifier backtracking bug"); 1783 return -EFAULT; 1784 } 1785 1786 if (func->stack[i].slot_type[0] != STACK_SPILL) { 1787 stack_mask &= ~(1ull << i); 1788 continue; 1789 } 1790 reg = &func->stack[i].spilled_ptr; 1791 if (reg->type != SCALAR_VALUE) { 1792 stack_mask &= ~(1ull << i); 1793 continue; 1794 } 1795 if (!reg->precise) 1796 new_marks = true; 1797 reg->precise = true; 1798 } 1799 if (env->log.level & BPF_LOG_LEVEL) { 1800 print_verifier_state(env, func); 1801 verbose(env, "parent %s regs=%x stack=%llx marks\n", 1802 new_marks ? "didn't have" : "already had", 1803 reg_mask, stack_mask); 1804 } 1805 1806 if (!reg_mask && !stack_mask) 1807 break; 1808 if (!new_marks) 1809 break; 1810 1811 last_idx = st->last_insn_idx; 1812 first_idx = st->first_insn_idx; 1813 } 1814 return 0; 1815 } 1816 1817 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) 1818 { 1819 return __mark_chain_precision(env, regno, -1); 1820 } 1821 1822 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 1823 { 1824 return __mark_chain_precision(env, -1, spi); 1825 } 1826 1827 static bool is_spillable_regtype(enum bpf_reg_type type) 1828 { 1829 switch (type) { 1830 case PTR_TO_MAP_VALUE: 1831 case PTR_TO_MAP_VALUE_OR_NULL: 1832 case PTR_TO_STACK: 1833 case PTR_TO_CTX: 1834 case PTR_TO_PACKET: 1835 case PTR_TO_PACKET_META: 1836 case PTR_TO_PACKET_END: 1837 case PTR_TO_FLOW_KEYS: 1838 case CONST_PTR_TO_MAP: 1839 case PTR_TO_SOCKET: 1840 case PTR_TO_SOCKET_OR_NULL: 1841 case PTR_TO_SOCK_COMMON: 1842 case PTR_TO_SOCK_COMMON_OR_NULL: 1843 case PTR_TO_TCP_SOCK: 1844 case PTR_TO_TCP_SOCK_OR_NULL: 1845 case PTR_TO_XDP_SOCK: 1846 return true; 1847 default: 1848 return false; 1849 } 1850 } 1851 1852 /* Does this register contain a constant zero? */ 1853 static bool register_is_null(struct bpf_reg_state *reg) 1854 { 1855 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 1856 } 1857 1858 static bool register_is_const(struct bpf_reg_state *reg) 1859 { 1860 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 1861 } 1862 1863 static void save_register_state(struct bpf_func_state *state, 1864 int spi, struct bpf_reg_state *reg) 1865 { 1866 int i; 1867 1868 state->stack[spi].spilled_ptr = *reg; 1869 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1870 1871 for (i = 0; i < BPF_REG_SIZE; i++) 1872 state->stack[spi].slot_type[i] = STACK_SPILL; 1873 } 1874 1875 /* check_stack_read/write functions track spill/fill of registers, 1876 * stack boundary and alignment are checked in check_mem_access() 1877 */ 1878 static int check_stack_write(struct bpf_verifier_env *env, 1879 struct bpf_func_state *state, /* func where register points to */ 1880 int off, int size, int value_regno, int insn_idx) 1881 { 1882 struct bpf_func_state *cur; /* state of the current function */ 1883 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 1884 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 1885 struct bpf_reg_state *reg = NULL; 1886 1887 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 1888 state->acquired_refs, true); 1889 if (err) 1890 return err; 1891 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 1892 * so it's aligned access and [off, off + size) are within stack limits 1893 */ 1894 if (!env->allow_ptr_leaks && 1895 state->stack[spi].slot_type[0] == STACK_SPILL && 1896 size != BPF_REG_SIZE) { 1897 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 1898 return -EACCES; 1899 } 1900 1901 cur = env->cur_state->frame[env->cur_state->curframe]; 1902 if (value_regno >= 0) 1903 reg = &cur->regs[value_regno]; 1904 1905 if (reg && size == BPF_REG_SIZE && register_is_const(reg) && 1906 !register_is_null(reg) && env->allow_ptr_leaks) { 1907 if (dst_reg != BPF_REG_FP) { 1908 /* The backtracking logic can only recognize explicit 1909 * stack slot address like [fp - 8]. Other spill of 1910 * scalar via different register has to be conervative. 1911 * Backtrack from here and mark all registers as precise 1912 * that contributed into 'reg' being a constant. 1913 */ 1914 err = mark_chain_precision(env, value_regno); 1915 if (err) 1916 return err; 1917 } 1918 save_register_state(state, spi, reg); 1919 } else if (reg && is_spillable_regtype(reg->type)) { 1920 /* register containing pointer is being spilled into stack */ 1921 if (size != BPF_REG_SIZE) { 1922 verbose_linfo(env, insn_idx, "; "); 1923 verbose(env, "invalid size of register spill\n"); 1924 return -EACCES; 1925 } 1926 1927 if (state != cur && reg->type == PTR_TO_STACK) { 1928 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 1929 return -EINVAL; 1930 } 1931 1932 if (!env->allow_ptr_leaks) { 1933 bool sanitize = false; 1934 1935 if (state->stack[spi].slot_type[0] == STACK_SPILL && 1936 register_is_const(&state->stack[spi].spilled_ptr)) 1937 sanitize = true; 1938 for (i = 0; i < BPF_REG_SIZE; i++) 1939 if (state->stack[spi].slot_type[i] == STACK_MISC) { 1940 sanitize = true; 1941 break; 1942 } 1943 if (sanitize) { 1944 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; 1945 int soff = (-spi - 1) * BPF_REG_SIZE; 1946 1947 /* detected reuse of integer stack slot with a pointer 1948 * which means either llvm is reusing stack slot or 1949 * an attacker is trying to exploit CVE-2018-3639 1950 * (speculative store bypass) 1951 * Have to sanitize that slot with preemptive 1952 * store of zero. 1953 */ 1954 if (*poff && *poff != soff) { 1955 /* disallow programs where single insn stores 1956 * into two different stack slots, since verifier 1957 * cannot sanitize them 1958 */ 1959 verbose(env, 1960 "insn %d cannot access two stack slots fp%d and fp%d", 1961 insn_idx, *poff, soff); 1962 return -EINVAL; 1963 } 1964 *poff = soff; 1965 } 1966 } 1967 save_register_state(state, spi, reg); 1968 } else { 1969 u8 type = STACK_MISC; 1970 1971 /* regular write of data into stack destroys any spilled ptr */ 1972 state->stack[spi].spilled_ptr.type = NOT_INIT; 1973 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 1974 if (state->stack[spi].slot_type[0] == STACK_SPILL) 1975 for (i = 0; i < BPF_REG_SIZE; i++) 1976 state->stack[spi].slot_type[i] = STACK_MISC; 1977 1978 /* only mark the slot as written if all 8 bytes were written 1979 * otherwise read propagation may incorrectly stop too soon 1980 * when stack slots are partially written. 1981 * This heuristic means that read propagation will be 1982 * conservative, since it will add reg_live_read marks 1983 * to stack slots all the way to first state when programs 1984 * writes+reads less than 8 bytes 1985 */ 1986 if (size == BPF_REG_SIZE) 1987 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1988 1989 /* when we zero initialize stack slots mark them as such */ 1990 if (reg && register_is_null(reg)) { 1991 /* backtracking doesn't work for STACK_ZERO yet. */ 1992 err = mark_chain_precision(env, value_regno); 1993 if (err) 1994 return err; 1995 type = STACK_ZERO; 1996 } 1997 1998 /* Mark slots affected by this stack write. */ 1999 for (i = 0; i < size; i++) 2000 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 2001 type; 2002 } 2003 return 0; 2004 } 2005 2006 static int check_stack_read(struct bpf_verifier_env *env, 2007 struct bpf_func_state *reg_state /* func where register points to */, 2008 int off, int size, int value_regno) 2009 { 2010 struct bpf_verifier_state *vstate = env->cur_state; 2011 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2012 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 2013 struct bpf_reg_state *reg; 2014 u8 *stype; 2015 2016 if (reg_state->allocated_stack <= slot) { 2017 verbose(env, "invalid read from stack off %d+0 size %d\n", 2018 off, size); 2019 return -EACCES; 2020 } 2021 stype = reg_state->stack[spi].slot_type; 2022 reg = ®_state->stack[spi].spilled_ptr; 2023 2024 if (stype[0] == STACK_SPILL) { 2025 if (size != BPF_REG_SIZE) { 2026 if (reg->type != SCALAR_VALUE) { 2027 verbose_linfo(env, env->insn_idx, "; "); 2028 verbose(env, "invalid size of register fill\n"); 2029 return -EACCES; 2030 } 2031 if (value_regno >= 0) { 2032 mark_reg_unknown(env, state->regs, value_regno); 2033 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2034 } 2035 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2036 return 0; 2037 } 2038 for (i = 1; i < BPF_REG_SIZE; i++) { 2039 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 2040 verbose(env, "corrupted spill memory\n"); 2041 return -EACCES; 2042 } 2043 } 2044 2045 if (value_regno >= 0) { 2046 /* restore register state from stack */ 2047 state->regs[value_regno] = *reg; 2048 /* mark reg as written since spilled pointer state likely 2049 * has its liveness marks cleared by is_state_visited() 2050 * which resets stack/reg liveness for state transitions 2051 */ 2052 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2053 } 2054 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2055 } else { 2056 int zeros = 0; 2057 2058 for (i = 0; i < size; i++) { 2059 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) 2060 continue; 2061 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { 2062 zeros++; 2063 continue; 2064 } 2065 verbose(env, "invalid read from stack off %d+%d size %d\n", 2066 off, i, size); 2067 return -EACCES; 2068 } 2069 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2070 if (value_regno >= 0) { 2071 if (zeros == size) { 2072 /* any size read into register is zero extended, 2073 * so the whole register == const_zero 2074 */ 2075 __mark_reg_const_zero(&state->regs[value_regno]); 2076 /* backtracking doesn't support STACK_ZERO yet, 2077 * so mark it precise here, so that later 2078 * backtracking can stop here. 2079 * Backtracking may not need this if this register 2080 * doesn't participate in pointer adjustment. 2081 * Forward propagation of precise flag is not 2082 * necessary either. This mark is only to stop 2083 * backtracking. Any register that contributed 2084 * to const 0 was marked precise before spill. 2085 */ 2086 state->regs[value_regno].precise = true; 2087 } else { 2088 /* have read misc data from the stack */ 2089 mark_reg_unknown(env, state->regs, value_regno); 2090 } 2091 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2092 } 2093 } 2094 return 0; 2095 } 2096 2097 static int check_stack_access(struct bpf_verifier_env *env, 2098 const struct bpf_reg_state *reg, 2099 int off, int size) 2100 { 2101 /* Stack accesses must be at a fixed offset, so that we 2102 * can determine what type of data were returned. See 2103 * check_stack_read(). 2104 */ 2105 if (!tnum_is_const(reg->var_off)) { 2106 char tn_buf[48]; 2107 2108 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2109 verbose(env, "variable stack access var_off=%s off=%d size=%d\n", 2110 tn_buf, off, size); 2111 return -EACCES; 2112 } 2113 2114 if (off >= 0 || off < -MAX_BPF_STACK) { 2115 verbose(env, "invalid stack off=%d size=%d\n", off, size); 2116 return -EACCES; 2117 } 2118 2119 return 0; 2120 } 2121 2122 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 2123 int off, int size, enum bpf_access_type type) 2124 { 2125 struct bpf_reg_state *regs = cur_regs(env); 2126 struct bpf_map *map = regs[regno].map_ptr; 2127 u32 cap = bpf_map_flags_to_cap(map); 2128 2129 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 2130 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 2131 map->value_size, off, size); 2132 return -EACCES; 2133 } 2134 2135 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 2136 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 2137 map->value_size, off, size); 2138 return -EACCES; 2139 } 2140 2141 return 0; 2142 } 2143 2144 /* check read/write into map element returned by bpf_map_lookup_elem() */ 2145 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 2146 int size, bool zero_size_allowed) 2147 { 2148 struct bpf_reg_state *regs = cur_regs(env); 2149 struct bpf_map *map = regs[regno].map_ptr; 2150 2151 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 2152 off + size > map->value_size) { 2153 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 2154 map->value_size, off, size); 2155 return -EACCES; 2156 } 2157 return 0; 2158 } 2159 2160 /* check read/write into a map element with possible variable offset */ 2161 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 2162 int off, int size, bool zero_size_allowed) 2163 { 2164 struct bpf_verifier_state *vstate = env->cur_state; 2165 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2166 struct bpf_reg_state *reg = &state->regs[regno]; 2167 int err; 2168 2169 /* We may have adjusted the register to this map value, so we 2170 * need to try adding each of min_value and max_value to off 2171 * to make sure our theoretical access will be safe. 2172 */ 2173 if (env->log.level & BPF_LOG_LEVEL) 2174 print_verifier_state(env, state); 2175 2176 /* The minimum value is only important with signed 2177 * comparisons where we can't assume the floor of a 2178 * value is 0. If we are using signed variables for our 2179 * index'es we need to make sure that whatever we use 2180 * will have a set floor within our range. 2181 */ 2182 if (reg->smin_value < 0 && 2183 (reg->smin_value == S64_MIN || 2184 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 2185 reg->smin_value + off < 0)) { 2186 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2187 regno); 2188 return -EACCES; 2189 } 2190 err = __check_map_access(env, regno, reg->smin_value + off, size, 2191 zero_size_allowed); 2192 if (err) { 2193 verbose(env, "R%d min value is outside of the array range\n", 2194 regno); 2195 return err; 2196 } 2197 2198 /* If we haven't set a max value then we need to bail since we can't be 2199 * sure we won't do bad things. 2200 * If reg->umax_value + off could overflow, treat that as unbounded too. 2201 */ 2202 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 2203 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", 2204 regno); 2205 return -EACCES; 2206 } 2207 err = __check_map_access(env, regno, reg->umax_value + off, size, 2208 zero_size_allowed); 2209 if (err) 2210 verbose(env, "R%d max value is outside of the array range\n", 2211 regno); 2212 2213 if (map_value_has_spin_lock(reg->map_ptr)) { 2214 u32 lock = reg->map_ptr->spin_lock_off; 2215 2216 /* if any part of struct bpf_spin_lock can be touched by 2217 * load/store reject this program. 2218 * To check that [x1, x2) overlaps with [y1, y2) 2219 * it is sufficient to check x1 < y2 && y1 < x2. 2220 */ 2221 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 2222 lock < reg->umax_value + off + size) { 2223 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 2224 return -EACCES; 2225 } 2226 } 2227 return err; 2228 } 2229 2230 #define MAX_PACKET_OFF 0xffff 2231 2232 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 2233 const struct bpf_call_arg_meta *meta, 2234 enum bpf_access_type t) 2235 { 2236 switch (env->prog->type) { 2237 /* Program types only with direct read access go here! */ 2238 case BPF_PROG_TYPE_LWT_IN: 2239 case BPF_PROG_TYPE_LWT_OUT: 2240 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2241 case BPF_PROG_TYPE_SK_REUSEPORT: 2242 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2243 case BPF_PROG_TYPE_CGROUP_SKB: 2244 if (t == BPF_WRITE) 2245 return false; 2246 /* fallthrough */ 2247 2248 /* Program types with direct read + write access go here! */ 2249 case BPF_PROG_TYPE_SCHED_CLS: 2250 case BPF_PROG_TYPE_SCHED_ACT: 2251 case BPF_PROG_TYPE_XDP: 2252 case BPF_PROG_TYPE_LWT_XMIT: 2253 case BPF_PROG_TYPE_SK_SKB: 2254 case BPF_PROG_TYPE_SK_MSG: 2255 if (meta) 2256 return meta->pkt_access; 2257 2258 env->seen_direct_write = true; 2259 return true; 2260 2261 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2262 if (t == BPF_WRITE) 2263 env->seen_direct_write = true; 2264 2265 return true; 2266 2267 default: 2268 return false; 2269 } 2270 } 2271 2272 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, 2273 int off, int size, bool zero_size_allowed) 2274 { 2275 struct bpf_reg_state *regs = cur_regs(env); 2276 struct bpf_reg_state *reg = ®s[regno]; 2277 2278 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 2279 (u64)off + size > reg->range) { 2280 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 2281 off, size, regno, reg->id, reg->off, reg->range); 2282 return -EACCES; 2283 } 2284 return 0; 2285 } 2286 2287 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 2288 int size, bool zero_size_allowed) 2289 { 2290 struct bpf_reg_state *regs = cur_regs(env); 2291 struct bpf_reg_state *reg = ®s[regno]; 2292 int err; 2293 2294 /* We may have added a variable offset to the packet pointer; but any 2295 * reg->range we have comes after that. We are only checking the fixed 2296 * offset. 2297 */ 2298 2299 /* We don't allow negative numbers, because we aren't tracking enough 2300 * detail to prove they're safe. 2301 */ 2302 if (reg->smin_value < 0) { 2303 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2304 regno); 2305 return -EACCES; 2306 } 2307 err = __check_packet_access(env, regno, off, size, zero_size_allowed); 2308 if (err) { 2309 verbose(env, "R%d offset is outside of the packet\n", regno); 2310 return err; 2311 } 2312 2313 /* __check_packet_access has made sure "off + size - 1" is within u16. 2314 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 2315 * otherwise find_good_pkt_pointers would have refused to set range info 2316 * that __check_packet_access would have rejected this pkt access. 2317 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 2318 */ 2319 env->prog->aux->max_pkt_offset = 2320 max_t(u32, env->prog->aux->max_pkt_offset, 2321 off + reg->umax_value + size - 1); 2322 2323 return err; 2324 } 2325 2326 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 2327 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 2328 enum bpf_access_type t, enum bpf_reg_type *reg_type) 2329 { 2330 struct bpf_insn_access_aux info = { 2331 .reg_type = *reg_type, 2332 }; 2333 2334 if (env->ops->is_valid_access && 2335 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 2336 /* A non zero info.ctx_field_size indicates that this field is a 2337 * candidate for later verifier transformation to load the whole 2338 * field and then apply a mask when accessed with a narrower 2339 * access than actual ctx access size. A zero info.ctx_field_size 2340 * will only allow for whole field access and rejects any other 2341 * type of narrower access. 2342 */ 2343 *reg_type = info.reg_type; 2344 2345 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 2346 /* remember the offset of last byte accessed in ctx */ 2347 if (env->prog->aux->max_ctx_offset < off + size) 2348 env->prog->aux->max_ctx_offset = off + size; 2349 return 0; 2350 } 2351 2352 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 2353 return -EACCES; 2354 } 2355 2356 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 2357 int size) 2358 { 2359 if (size < 0 || off < 0 || 2360 (u64)off + size > sizeof(struct bpf_flow_keys)) { 2361 verbose(env, "invalid access to flow keys off=%d size=%d\n", 2362 off, size); 2363 return -EACCES; 2364 } 2365 return 0; 2366 } 2367 2368 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 2369 u32 regno, int off, int size, 2370 enum bpf_access_type t) 2371 { 2372 struct bpf_reg_state *regs = cur_regs(env); 2373 struct bpf_reg_state *reg = ®s[regno]; 2374 struct bpf_insn_access_aux info = {}; 2375 bool valid; 2376 2377 if (reg->smin_value < 0) { 2378 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2379 regno); 2380 return -EACCES; 2381 } 2382 2383 switch (reg->type) { 2384 case PTR_TO_SOCK_COMMON: 2385 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 2386 break; 2387 case PTR_TO_SOCKET: 2388 valid = bpf_sock_is_valid_access(off, size, t, &info); 2389 break; 2390 case PTR_TO_TCP_SOCK: 2391 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 2392 break; 2393 case PTR_TO_XDP_SOCK: 2394 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 2395 break; 2396 default: 2397 valid = false; 2398 } 2399 2400 2401 if (valid) { 2402 env->insn_aux_data[insn_idx].ctx_field_size = 2403 info.ctx_field_size; 2404 return 0; 2405 } 2406 2407 verbose(env, "R%d invalid %s access off=%d size=%d\n", 2408 regno, reg_type_str[reg->type], off, size); 2409 2410 return -EACCES; 2411 } 2412 2413 static bool __is_pointer_value(bool allow_ptr_leaks, 2414 const struct bpf_reg_state *reg) 2415 { 2416 if (allow_ptr_leaks) 2417 return false; 2418 2419 return reg->type != SCALAR_VALUE; 2420 } 2421 2422 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 2423 { 2424 return cur_regs(env) + regno; 2425 } 2426 2427 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 2428 { 2429 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 2430 } 2431 2432 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 2433 { 2434 const struct bpf_reg_state *reg = reg_state(env, regno); 2435 2436 return reg->type == PTR_TO_CTX; 2437 } 2438 2439 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 2440 { 2441 const struct bpf_reg_state *reg = reg_state(env, regno); 2442 2443 return type_is_sk_pointer(reg->type); 2444 } 2445 2446 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 2447 { 2448 const struct bpf_reg_state *reg = reg_state(env, regno); 2449 2450 return type_is_pkt_pointer(reg->type); 2451 } 2452 2453 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 2454 { 2455 const struct bpf_reg_state *reg = reg_state(env, regno); 2456 2457 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 2458 return reg->type == PTR_TO_FLOW_KEYS; 2459 } 2460 2461 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 2462 const struct bpf_reg_state *reg, 2463 int off, int size, bool strict) 2464 { 2465 struct tnum reg_off; 2466 int ip_align; 2467 2468 /* Byte size accesses are always allowed. */ 2469 if (!strict || size == 1) 2470 return 0; 2471 2472 /* For platforms that do not have a Kconfig enabling 2473 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 2474 * NET_IP_ALIGN is universally set to '2'. And on platforms 2475 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 2476 * to this code only in strict mode where we want to emulate 2477 * the NET_IP_ALIGN==2 checking. Therefore use an 2478 * unconditional IP align value of '2'. 2479 */ 2480 ip_align = 2; 2481 2482 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 2483 if (!tnum_is_aligned(reg_off, size)) { 2484 char tn_buf[48]; 2485 2486 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2487 verbose(env, 2488 "misaligned packet access off %d+%s+%d+%d size %d\n", 2489 ip_align, tn_buf, reg->off, off, size); 2490 return -EACCES; 2491 } 2492 2493 return 0; 2494 } 2495 2496 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 2497 const struct bpf_reg_state *reg, 2498 const char *pointer_desc, 2499 int off, int size, bool strict) 2500 { 2501 struct tnum reg_off; 2502 2503 /* Byte size accesses are always allowed. */ 2504 if (!strict || size == 1) 2505 return 0; 2506 2507 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 2508 if (!tnum_is_aligned(reg_off, size)) { 2509 char tn_buf[48]; 2510 2511 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2512 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 2513 pointer_desc, tn_buf, reg->off, off, size); 2514 return -EACCES; 2515 } 2516 2517 return 0; 2518 } 2519 2520 static int check_ptr_alignment(struct bpf_verifier_env *env, 2521 const struct bpf_reg_state *reg, int off, 2522 int size, bool strict_alignment_once) 2523 { 2524 bool strict = env->strict_alignment || strict_alignment_once; 2525 const char *pointer_desc = ""; 2526 2527 switch (reg->type) { 2528 case PTR_TO_PACKET: 2529 case PTR_TO_PACKET_META: 2530 /* Special case, because of NET_IP_ALIGN. Given metadata sits 2531 * right in front, treat it the very same way. 2532 */ 2533 return check_pkt_ptr_alignment(env, reg, off, size, strict); 2534 case PTR_TO_FLOW_KEYS: 2535 pointer_desc = "flow keys "; 2536 break; 2537 case PTR_TO_MAP_VALUE: 2538 pointer_desc = "value "; 2539 break; 2540 case PTR_TO_CTX: 2541 pointer_desc = "context "; 2542 break; 2543 case PTR_TO_STACK: 2544 pointer_desc = "stack "; 2545 /* The stack spill tracking logic in check_stack_write() 2546 * and check_stack_read() relies on stack accesses being 2547 * aligned. 2548 */ 2549 strict = true; 2550 break; 2551 case PTR_TO_SOCKET: 2552 pointer_desc = "sock "; 2553 break; 2554 case PTR_TO_SOCK_COMMON: 2555 pointer_desc = "sock_common "; 2556 break; 2557 case PTR_TO_TCP_SOCK: 2558 pointer_desc = "tcp_sock "; 2559 break; 2560 case PTR_TO_XDP_SOCK: 2561 pointer_desc = "xdp_sock "; 2562 break; 2563 default: 2564 break; 2565 } 2566 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 2567 strict); 2568 } 2569 2570 static int update_stack_depth(struct bpf_verifier_env *env, 2571 const struct bpf_func_state *func, 2572 int off) 2573 { 2574 u16 stack = env->subprog_info[func->subprogno].stack_depth; 2575 2576 if (stack >= -off) 2577 return 0; 2578 2579 /* update known max for given subprogram */ 2580 env->subprog_info[func->subprogno].stack_depth = -off; 2581 return 0; 2582 } 2583 2584 /* starting from main bpf function walk all instructions of the function 2585 * and recursively walk all callees that given function can call. 2586 * Ignore jump and exit insns. 2587 * Since recursion is prevented by check_cfg() this algorithm 2588 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 2589 */ 2590 static int check_max_stack_depth(struct bpf_verifier_env *env) 2591 { 2592 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 2593 struct bpf_subprog_info *subprog = env->subprog_info; 2594 struct bpf_insn *insn = env->prog->insnsi; 2595 int ret_insn[MAX_CALL_FRAMES]; 2596 int ret_prog[MAX_CALL_FRAMES]; 2597 2598 process_func: 2599 /* round up to 32-bytes, since this is granularity 2600 * of interpreter stack size 2601 */ 2602 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 2603 if (depth > MAX_BPF_STACK) { 2604 verbose(env, "combined stack size of %d calls is %d. Too large\n", 2605 frame + 1, depth); 2606 return -EACCES; 2607 } 2608 continue_func: 2609 subprog_end = subprog[idx + 1].start; 2610 for (; i < subprog_end; i++) { 2611 if (insn[i].code != (BPF_JMP | BPF_CALL)) 2612 continue; 2613 if (insn[i].src_reg != BPF_PSEUDO_CALL) 2614 continue; 2615 /* remember insn and function to return to */ 2616 ret_insn[frame] = i + 1; 2617 ret_prog[frame] = idx; 2618 2619 /* find the callee */ 2620 i = i + insn[i].imm + 1; 2621 idx = find_subprog(env, i); 2622 if (idx < 0) { 2623 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 2624 i); 2625 return -EFAULT; 2626 } 2627 frame++; 2628 if (frame >= MAX_CALL_FRAMES) { 2629 verbose(env, "the call stack of %d frames is too deep !\n", 2630 frame); 2631 return -E2BIG; 2632 } 2633 goto process_func; 2634 } 2635 /* end of for() loop means the last insn of the 'subprog' 2636 * was reached. Doesn't matter whether it was JA or EXIT 2637 */ 2638 if (frame == 0) 2639 return 0; 2640 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 2641 frame--; 2642 i = ret_insn[frame]; 2643 idx = ret_prog[frame]; 2644 goto continue_func; 2645 } 2646 2647 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2648 static int get_callee_stack_depth(struct bpf_verifier_env *env, 2649 const struct bpf_insn *insn, int idx) 2650 { 2651 int start = idx + insn->imm + 1, subprog; 2652 2653 subprog = find_subprog(env, start); 2654 if (subprog < 0) { 2655 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 2656 start); 2657 return -EFAULT; 2658 } 2659 return env->subprog_info[subprog].stack_depth; 2660 } 2661 #endif 2662 2663 static int check_ctx_reg(struct bpf_verifier_env *env, 2664 const struct bpf_reg_state *reg, int regno) 2665 { 2666 /* Access to ctx or passing it to a helper is only allowed in 2667 * its original, unmodified form. 2668 */ 2669 2670 if (reg->off) { 2671 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", 2672 regno, reg->off); 2673 return -EACCES; 2674 } 2675 2676 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 2677 char tn_buf[48]; 2678 2679 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2680 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); 2681 return -EACCES; 2682 } 2683 2684 return 0; 2685 } 2686 2687 static int check_tp_buffer_access(struct bpf_verifier_env *env, 2688 const struct bpf_reg_state *reg, 2689 int regno, int off, int size) 2690 { 2691 if (off < 0) { 2692 verbose(env, 2693 "R%d invalid tracepoint buffer access: off=%d, size=%d", 2694 regno, off, size); 2695 return -EACCES; 2696 } 2697 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 2698 char tn_buf[48]; 2699 2700 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2701 verbose(env, 2702 "R%d invalid variable buffer offset: off=%d, var_off=%s", 2703 regno, off, tn_buf); 2704 return -EACCES; 2705 } 2706 if (off + size > env->prog->aux->max_tp_access) 2707 env->prog->aux->max_tp_access = off + size; 2708 2709 return 0; 2710 } 2711 2712 2713 /* truncate register to smaller size (in bytes) 2714 * must be called with size < BPF_REG_SIZE 2715 */ 2716 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 2717 { 2718 u64 mask; 2719 2720 /* clear high bits in bit representation */ 2721 reg->var_off = tnum_cast(reg->var_off, size); 2722 2723 /* fix arithmetic bounds */ 2724 mask = ((u64)1 << (size * 8)) - 1; 2725 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 2726 reg->umin_value &= mask; 2727 reg->umax_value &= mask; 2728 } else { 2729 reg->umin_value = 0; 2730 reg->umax_value = mask; 2731 } 2732 reg->smin_value = reg->umin_value; 2733 reg->smax_value = reg->umax_value; 2734 } 2735 2736 /* check whether memory at (regno + off) is accessible for t = (read | write) 2737 * if t==write, value_regno is a register which value is stored into memory 2738 * if t==read, value_regno is a register which will receive the value from memory 2739 * if t==write && value_regno==-1, some unknown value is stored into memory 2740 * if t==read && value_regno==-1, don't care what we read from memory 2741 */ 2742 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 2743 int off, int bpf_size, enum bpf_access_type t, 2744 int value_regno, bool strict_alignment_once) 2745 { 2746 struct bpf_reg_state *regs = cur_regs(env); 2747 struct bpf_reg_state *reg = regs + regno; 2748 struct bpf_func_state *state; 2749 int size, err = 0; 2750 2751 size = bpf_size_to_bytes(bpf_size); 2752 if (size < 0) 2753 return size; 2754 2755 /* alignment checks will add in reg->off themselves */ 2756 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 2757 if (err) 2758 return err; 2759 2760 /* for access checks, reg->off is just part of off */ 2761 off += reg->off; 2762 2763 if (reg->type == PTR_TO_MAP_VALUE) { 2764 if (t == BPF_WRITE && value_regno >= 0 && 2765 is_pointer_value(env, value_regno)) { 2766 verbose(env, "R%d leaks addr into map\n", value_regno); 2767 return -EACCES; 2768 } 2769 err = check_map_access_type(env, regno, off, size, t); 2770 if (err) 2771 return err; 2772 err = check_map_access(env, regno, off, size, false); 2773 if (!err && t == BPF_READ && value_regno >= 0) 2774 mark_reg_unknown(env, regs, value_regno); 2775 2776 } else if (reg->type == PTR_TO_CTX) { 2777 enum bpf_reg_type reg_type = SCALAR_VALUE; 2778 2779 if (t == BPF_WRITE && value_regno >= 0 && 2780 is_pointer_value(env, value_regno)) { 2781 verbose(env, "R%d leaks addr into ctx\n", value_regno); 2782 return -EACCES; 2783 } 2784 2785 err = check_ctx_reg(env, reg, regno); 2786 if (err < 0) 2787 return err; 2788 2789 err = check_ctx_access(env, insn_idx, off, size, t, ®_type); 2790 if (!err && t == BPF_READ && value_regno >= 0) { 2791 /* ctx access returns either a scalar, or a 2792 * PTR_TO_PACKET[_META,_END]. In the latter 2793 * case, we know the offset is zero. 2794 */ 2795 if (reg_type == SCALAR_VALUE) { 2796 mark_reg_unknown(env, regs, value_regno); 2797 } else { 2798 mark_reg_known_zero(env, regs, 2799 value_regno); 2800 if (reg_type_may_be_null(reg_type)) 2801 regs[value_regno].id = ++env->id_gen; 2802 /* A load of ctx field could have different 2803 * actual load size with the one encoded in the 2804 * insn. When the dst is PTR, it is for sure not 2805 * a sub-register. 2806 */ 2807 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 2808 } 2809 regs[value_regno].type = reg_type; 2810 } 2811 2812 } else if (reg->type == PTR_TO_STACK) { 2813 off += reg->var_off.value; 2814 err = check_stack_access(env, reg, off, size); 2815 if (err) 2816 return err; 2817 2818 state = func(env, reg); 2819 err = update_stack_depth(env, state, off); 2820 if (err) 2821 return err; 2822 2823 if (t == BPF_WRITE) 2824 err = check_stack_write(env, state, off, size, 2825 value_regno, insn_idx); 2826 else 2827 err = check_stack_read(env, state, off, size, 2828 value_regno); 2829 } else if (reg_is_pkt_pointer(reg)) { 2830 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 2831 verbose(env, "cannot write into packet\n"); 2832 return -EACCES; 2833 } 2834 if (t == BPF_WRITE && value_regno >= 0 && 2835 is_pointer_value(env, value_regno)) { 2836 verbose(env, "R%d leaks addr into packet\n", 2837 value_regno); 2838 return -EACCES; 2839 } 2840 err = check_packet_access(env, regno, off, size, false); 2841 if (!err && t == BPF_READ && value_regno >= 0) 2842 mark_reg_unknown(env, regs, value_regno); 2843 } else if (reg->type == PTR_TO_FLOW_KEYS) { 2844 if (t == BPF_WRITE && value_regno >= 0 && 2845 is_pointer_value(env, value_regno)) { 2846 verbose(env, "R%d leaks addr into flow keys\n", 2847 value_regno); 2848 return -EACCES; 2849 } 2850 2851 err = check_flow_keys_access(env, off, size); 2852 if (!err && t == BPF_READ && value_regno >= 0) 2853 mark_reg_unknown(env, regs, value_regno); 2854 } else if (type_is_sk_pointer(reg->type)) { 2855 if (t == BPF_WRITE) { 2856 verbose(env, "R%d cannot write into %s\n", 2857 regno, reg_type_str[reg->type]); 2858 return -EACCES; 2859 } 2860 err = check_sock_access(env, insn_idx, regno, off, size, t); 2861 if (!err && value_regno >= 0) 2862 mark_reg_unknown(env, regs, value_regno); 2863 } else if (reg->type == PTR_TO_TP_BUFFER) { 2864 err = check_tp_buffer_access(env, reg, regno, off, size); 2865 if (!err && t == BPF_READ && value_regno >= 0) 2866 mark_reg_unknown(env, regs, value_regno); 2867 } else { 2868 verbose(env, "R%d invalid mem access '%s'\n", regno, 2869 reg_type_str[reg->type]); 2870 return -EACCES; 2871 } 2872 2873 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 2874 regs[value_regno].type == SCALAR_VALUE) { 2875 /* b/h/w load zero-extends, mark upper bits as known 0 */ 2876 coerce_reg_to_size(®s[value_regno], size); 2877 } 2878 return err; 2879 } 2880 2881 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 2882 { 2883 int err; 2884 2885 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 2886 insn->imm != 0) { 2887 verbose(env, "BPF_XADD uses reserved fields\n"); 2888 return -EINVAL; 2889 } 2890 2891 /* check src1 operand */ 2892 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2893 if (err) 2894 return err; 2895 2896 /* check src2 operand */ 2897 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 2898 if (err) 2899 return err; 2900 2901 if (is_pointer_value(env, insn->src_reg)) { 2902 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 2903 return -EACCES; 2904 } 2905 2906 if (is_ctx_reg(env, insn->dst_reg) || 2907 is_pkt_reg(env, insn->dst_reg) || 2908 is_flow_key_reg(env, insn->dst_reg) || 2909 is_sk_reg(env, insn->dst_reg)) { 2910 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", 2911 insn->dst_reg, 2912 reg_type_str[reg_state(env, insn->dst_reg)->type]); 2913 return -EACCES; 2914 } 2915 2916 /* check whether atomic_add can read the memory */ 2917 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 2918 BPF_SIZE(insn->code), BPF_READ, -1, true); 2919 if (err) 2920 return err; 2921 2922 /* check whether atomic_add can write into the same memory */ 2923 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 2924 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 2925 } 2926 2927 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, 2928 int off, int access_size, 2929 bool zero_size_allowed) 2930 { 2931 struct bpf_reg_state *reg = reg_state(env, regno); 2932 2933 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 2934 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 2935 if (tnum_is_const(reg->var_off)) { 2936 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 2937 regno, off, access_size); 2938 } else { 2939 char tn_buf[48]; 2940 2941 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2942 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", 2943 regno, tn_buf, access_size); 2944 } 2945 return -EACCES; 2946 } 2947 return 0; 2948 } 2949 2950 /* when register 'regno' is passed into function that will read 'access_size' 2951 * bytes from that pointer, make sure that it's within stack boundary 2952 * and all elements of stack are initialized. 2953 * Unlike most pointer bounds-checking functions, this one doesn't take an 2954 * 'off' argument, so it has to add in reg->off itself. 2955 */ 2956 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 2957 int access_size, bool zero_size_allowed, 2958 struct bpf_call_arg_meta *meta) 2959 { 2960 struct bpf_reg_state *reg = reg_state(env, regno); 2961 struct bpf_func_state *state = func(env, reg); 2962 int err, min_off, max_off, i, j, slot, spi; 2963 2964 if (reg->type != PTR_TO_STACK) { 2965 /* Allow zero-byte read from NULL, regardless of pointer type */ 2966 if (zero_size_allowed && access_size == 0 && 2967 register_is_null(reg)) 2968 return 0; 2969 2970 verbose(env, "R%d type=%s expected=%s\n", regno, 2971 reg_type_str[reg->type], 2972 reg_type_str[PTR_TO_STACK]); 2973 return -EACCES; 2974 } 2975 2976 if (tnum_is_const(reg->var_off)) { 2977 min_off = max_off = reg->var_off.value + reg->off; 2978 err = __check_stack_boundary(env, regno, min_off, access_size, 2979 zero_size_allowed); 2980 if (err) 2981 return err; 2982 } else { 2983 /* Variable offset is prohibited for unprivileged mode for 2984 * simplicity since it requires corresponding support in 2985 * Spectre masking for stack ALU. 2986 * See also retrieve_ptr_limit(). 2987 */ 2988 if (!env->allow_ptr_leaks) { 2989 char tn_buf[48]; 2990 2991 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2992 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", 2993 regno, tn_buf); 2994 return -EACCES; 2995 } 2996 /* Only initialized buffer on stack is allowed to be accessed 2997 * with variable offset. With uninitialized buffer it's hard to 2998 * guarantee that whole memory is marked as initialized on 2999 * helper return since specific bounds are unknown what may 3000 * cause uninitialized stack leaking. 3001 */ 3002 if (meta && meta->raw_mode) 3003 meta = NULL; 3004 3005 if (reg->smax_value >= BPF_MAX_VAR_OFF || 3006 reg->smax_value <= -BPF_MAX_VAR_OFF) { 3007 verbose(env, "R%d unbounded indirect variable offset stack access\n", 3008 regno); 3009 return -EACCES; 3010 } 3011 min_off = reg->smin_value + reg->off; 3012 max_off = reg->smax_value + reg->off; 3013 err = __check_stack_boundary(env, regno, min_off, access_size, 3014 zero_size_allowed); 3015 if (err) { 3016 verbose(env, "R%d min value is outside of stack bound\n", 3017 regno); 3018 return err; 3019 } 3020 err = __check_stack_boundary(env, regno, max_off, access_size, 3021 zero_size_allowed); 3022 if (err) { 3023 verbose(env, "R%d max value is outside of stack bound\n", 3024 regno); 3025 return err; 3026 } 3027 } 3028 3029 if (meta && meta->raw_mode) { 3030 meta->access_size = access_size; 3031 meta->regno = regno; 3032 return 0; 3033 } 3034 3035 for (i = min_off; i < max_off + access_size; i++) { 3036 u8 *stype; 3037 3038 slot = -i - 1; 3039 spi = slot / BPF_REG_SIZE; 3040 if (state->allocated_stack <= slot) 3041 goto err; 3042 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 3043 if (*stype == STACK_MISC) 3044 goto mark; 3045 if (*stype == STACK_ZERO) { 3046 /* helper can write anything into the stack */ 3047 *stype = STACK_MISC; 3048 goto mark; 3049 } 3050 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3051 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { 3052 __mark_reg_unknown(&state->stack[spi].spilled_ptr); 3053 for (j = 0; j < BPF_REG_SIZE; j++) 3054 state->stack[spi].slot_type[j] = STACK_MISC; 3055 goto mark; 3056 } 3057 3058 err: 3059 if (tnum_is_const(reg->var_off)) { 3060 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 3061 min_off, i - min_off, access_size); 3062 } else { 3063 char tn_buf[48]; 3064 3065 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3066 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", 3067 tn_buf, i - min_off, access_size); 3068 } 3069 return -EACCES; 3070 mark: 3071 /* reading any byte out of 8-byte 'spill_slot' will cause 3072 * the whole slot to be marked as 'read' 3073 */ 3074 mark_reg_read(env, &state->stack[spi].spilled_ptr, 3075 state->stack[spi].spilled_ptr.parent, 3076 REG_LIVE_READ64); 3077 } 3078 return update_stack_depth(env, state, min_off); 3079 } 3080 3081 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 3082 int access_size, bool zero_size_allowed, 3083 struct bpf_call_arg_meta *meta) 3084 { 3085 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3086 3087 switch (reg->type) { 3088 case PTR_TO_PACKET: 3089 case PTR_TO_PACKET_META: 3090 return check_packet_access(env, regno, reg->off, access_size, 3091 zero_size_allowed); 3092 case PTR_TO_MAP_VALUE: 3093 if (check_map_access_type(env, regno, reg->off, access_size, 3094 meta && meta->raw_mode ? BPF_WRITE : 3095 BPF_READ)) 3096 return -EACCES; 3097 return check_map_access(env, regno, reg->off, access_size, 3098 zero_size_allowed); 3099 default: /* scalar_value|ptr_to_stack or invalid ptr */ 3100 return check_stack_boundary(env, regno, access_size, 3101 zero_size_allowed, meta); 3102 } 3103 } 3104 3105 /* Implementation details: 3106 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 3107 * Two bpf_map_lookups (even with the same key) will have different reg->id. 3108 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 3109 * value_or_null->value transition, since the verifier only cares about 3110 * the range of access to valid map value pointer and doesn't care about actual 3111 * address of the map element. 3112 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 3113 * reg->id > 0 after value_or_null->value transition. By doing so 3114 * two bpf_map_lookups will be considered two different pointers that 3115 * point to different bpf_spin_locks. 3116 * The verifier allows taking only one bpf_spin_lock at a time to avoid 3117 * dead-locks. 3118 * Since only one bpf_spin_lock is allowed the checks are simpler than 3119 * reg_is_refcounted() logic. The verifier needs to remember only 3120 * one spin_lock instead of array of acquired_refs. 3121 * cur_state->active_spin_lock remembers which map value element got locked 3122 * and clears it after bpf_spin_unlock. 3123 */ 3124 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 3125 bool is_lock) 3126 { 3127 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3128 struct bpf_verifier_state *cur = env->cur_state; 3129 bool is_const = tnum_is_const(reg->var_off); 3130 struct bpf_map *map = reg->map_ptr; 3131 u64 val = reg->var_off.value; 3132 3133 if (reg->type != PTR_TO_MAP_VALUE) { 3134 verbose(env, "R%d is not a pointer to map_value\n", regno); 3135 return -EINVAL; 3136 } 3137 if (!is_const) { 3138 verbose(env, 3139 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 3140 regno); 3141 return -EINVAL; 3142 } 3143 if (!map->btf) { 3144 verbose(env, 3145 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 3146 map->name); 3147 return -EINVAL; 3148 } 3149 if (!map_value_has_spin_lock(map)) { 3150 if (map->spin_lock_off == -E2BIG) 3151 verbose(env, 3152 "map '%s' has more than one 'struct bpf_spin_lock'\n", 3153 map->name); 3154 else if (map->spin_lock_off == -ENOENT) 3155 verbose(env, 3156 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 3157 map->name); 3158 else 3159 verbose(env, 3160 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 3161 map->name); 3162 return -EINVAL; 3163 } 3164 if (map->spin_lock_off != val + reg->off) { 3165 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 3166 val + reg->off); 3167 return -EINVAL; 3168 } 3169 if (is_lock) { 3170 if (cur->active_spin_lock) { 3171 verbose(env, 3172 "Locking two bpf_spin_locks are not allowed\n"); 3173 return -EINVAL; 3174 } 3175 cur->active_spin_lock = reg->id; 3176 } else { 3177 if (!cur->active_spin_lock) { 3178 verbose(env, "bpf_spin_unlock without taking a lock\n"); 3179 return -EINVAL; 3180 } 3181 if (cur->active_spin_lock != reg->id) { 3182 verbose(env, "bpf_spin_unlock of different lock\n"); 3183 return -EINVAL; 3184 } 3185 cur->active_spin_lock = 0; 3186 } 3187 return 0; 3188 } 3189 3190 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 3191 { 3192 return type == ARG_PTR_TO_MEM || 3193 type == ARG_PTR_TO_MEM_OR_NULL || 3194 type == ARG_PTR_TO_UNINIT_MEM; 3195 } 3196 3197 static bool arg_type_is_mem_size(enum bpf_arg_type type) 3198 { 3199 return type == ARG_CONST_SIZE || 3200 type == ARG_CONST_SIZE_OR_ZERO; 3201 } 3202 3203 static bool arg_type_is_int_ptr(enum bpf_arg_type type) 3204 { 3205 return type == ARG_PTR_TO_INT || 3206 type == ARG_PTR_TO_LONG; 3207 } 3208 3209 static int int_ptr_type_to_size(enum bpf_arg_type type) 3210 { 3211 if (type == ARG_PTR_TO_INT) 3212 return sizeof(u32); 3213 else if (type == ARG_PTR_TO_LONG) 3214 return sizeof(u64); 3215 3216 return -EINVAL; 3217 } 3218 3219 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 3220 enum bpf_arg_type arg_type, 3221 struct bpf_call_arg_meta *meta) 3222 { 3223 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3224 enum bpf_reg_type expected_type, type = reg->type; 3225 int err = 0; 3226 3227 if (arg_type == ARG_DONTCARE) 3228 return 0; 3229 3230 err = check_reg_arg(env, regno, SRC_OP); 3231 if (err) 3232 return err; 3233 3234 if (arg_type == ARG_ANYTHING) { 3235 if (is_pointer_value(env, regno)) { 3236 verbose(env, "R%d leaks addr into helper function\n", 3237 regno); 3238 return -EACCES; 3239 } 3240 return 0; 3241 } 3242 3243 if (type_is_pkt_pointer(type) && 3244 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 3245 verbose(env, "helper access to the packet is not allowed\n"); 3246 return -EACCES; 3247 } 3248 3249 if (arg_type == ARG_PTR_TO_MAP_KEY || 3250 arg_type == ARG_PTR_TO_MAP_VALUE || 3251 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || 3252 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { 3253 expected_type = PTR_TO_STACK; 3254 if (register_is_null(reg) && 3255 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) 3256 /* final test in check_stack_boundary() */; 3257 else if (!type_is_pkt_pointer(type) && 3258 type != PTR_TO_MAP_VALUE && 3259 type != expected_type) 3260 goto err_type; 3261 } else if (arg_type == ARG_CONST_SIZE || 3262 arg_type == ARG_CONST_SIZE_OR_ZERO) { 3263 expected_type = SCALAR_VALUE; 3264 if (type != expected_type) 3265 goto err_type; 3266 } else if (arg_type == ARG_CONST_MAP_PTR) { 3267 expected_type = CONST_PTR_TO_MAP; 3268 if (type != expected_type) 3269 goto err_type; 3270 } else if (arg_type == ARG_PTR_TO_CTX) { 3271 expected_type = PTR_TO_CTX; 3272 if (type != expected_type) 3273 goto err_type; 3274 err = check_ctx_reg(env, reg, regno); 3275 if (err < 0) 3276 return err; 3277 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { 3278 expected_type = PTR_TO_SOCK_COMMON; 3279 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ 3280 if (!type_is_sk_pointer(type)) 3281 goto err_type; 3282 if (reg->ref_obj_id) { 3283 if (meta->ref_obj_id) { 3284 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 3285 regno, reg->ref_obj_id, 3286 meta->ref_obj_id); 3287 return -EFAULT; 3288 } 3289 meta->ref_obj_id = reg->ref_obj_id; 3290 } 3291 } else if (arg_type == ARG_PTR_TO_SOCKET) { 3292 expected_type = PTR_TO_SOCKET; 3293 if (type != expected_type) 3294 goto err_type; 3295 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 3296 if (meta->func_id == BPF_FUNC_spin_lock) { 3297 if (process_spin_lock(env, regno, true)) 3298 return -EACCES; 3299 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 3300 if (process_spin_lock(env, regno, false)) 3301 return -EACCES; 3302 } else { 3303 verbose(env, "verifier internal error\n"); 3304 return -EFAULT; 3305 } 3306 } else if (arg_type_is_mem_ptr(arg_type)) { 3307 expected_type = PTR_TO_STACK; 3308 /* One exception here. In case function allows for NULL to be 3309 * passed in as argument, it's a SCALAR_VALUE type. Final test 3310 * happens during stack boundary checking. 3311 */ 3312 if (register_is_null(reg) && 3313 arg_type == ARG_PTR_TO_MEM_OR_NULL) 3314 /* final test in check_stack_boundary() */; 3315 else if (!type_is_pkt_pointer(type) && 3316 type != PTR_TO_MAP_VALUE && 3317 type != expected_type) 3318 goto err_type; 3319 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 3320 } else if (arg_type_is_int_ptr(arg_type)) { 3321 expected_type = PTR_TO_STACK; 3322 if (!type_is_pkt_pointer(type) && 3323 type != PTR_TO_MAP_VALUE && 3324 type != expected_type) 3325 goto err_type; 3326 } else { 3327 verbose(env, "unsupported arg_type %d\n", arg_type); 3328 return -EFAULT; 3329 } 3330 3331 if (arg_type == ARG_CONST_MAP_PTR) { 3332 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 3333 meta->map_ptr = reg->map_ptr; 3334 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 3335 /* bpf_map_xxx(..., map_ptr, ..., key) call: 3336 * check that [key, key + map->key_size) are within 3337 * stack limits and initialized 3338 */ 3339 if (!meta->map_ptr) { 3340 /* in function declaration map_ptr must come before 3341 * map_key, so that it's verified and known before 3342 * we have to check map_key here. Otherwise it means 3343 * that kernel subsystem misconfigured verifier 3344 */ 3345 verbose(env, "invalid map_ptr to access map->key\n"); 3346 return -EACCES; 3347 } 3348 err = check_helper_mem_access(env, regno, 3349 meta->map_ptr->key_size, false, 3350 NULL); 3351 } else if (arg_type == ARG_PTR_TO_MAP_VALUE || 3352 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && 3353 !register_is_null(reg)) || 3354 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { 3355 /* bpf_map_xxx(..., map_ptr, ..., value) call: 3356 * check [value, value + map->value_size) validity 3357 */ 3358 if (!meta->map_ptr) { 3359 /* kernel subsystem misconfigured verifier */ 3360 verbose(env, "invalid map_ptr to access map->value\n"); 3361 return -EACCES; 3362 } 3363 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); 3364 err = check_helper_mem_access(env, regno, 3365 meta->map_ptr->value_size, false, 3366 meta); 3367 } else if (arg_type_is_mem_size(arg_type)) { 3368 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 3369 3370 /* remember the mem_size which may be used later 3371 * to refine return values. 3372 */ 3373 meta->msize_smax_value = reg->smax_value; 3374 meta->msize_umax_value = reg->umax_value; 3375 3376 /* The register is SCALAR_VALUE; the access check 3377 * happens using its boundaries. 3378 */ 3379 if (!tnum_is_const(reg->var_off)) 3380 /* For unprivileged variable accesses, disable raw 3381 * mode so that the program is required to 3382 * initialize all the memory that the helper could 3383 * just partially fill up. 3384 */ 3385 meta = NULL; 3386 3387 if (reg->smin_value < 0) { 3388 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 3389 regno); 3390 return -EACCES; 3391 } 3392 3393 if (reg->umin_value == 0) { 3394 err = check_helper_mem_access(env, regno - 1, 0, 3395 zero_size_allowed, 3396 meta); 3397 if (err) 3398 return err; 3399 } 3400 3401 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 3402 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 3403 regno); 3404 return -EACCES; 3405 } 3406 err = check_helper_mem_access(env, regno - 1, 3407 reg->umax_value, 3408 zero_size_allowed, meta); 3409 if (!err) 3410 err = mark_chain_precision(env, regno); 3411 } else if (arg_type_is_int_ptr(arg_type)) { 3412 int size = int_ptr_type_to_size(arg_type); 3413 3414 err = check_helper_mem_access(env, regno, size, false, meta); 3415 if (err) 3416 return err; 3417 err = check_ptr_alignment(env, reg, 0, size, true); 3418 } 3419 3420 return err; 3421 err_type: 3422 verbose(env, "R%d type=%s expected=%s\n", regno, 3423 reg_type_str[type], reg_type_str[expected_type]); 3424 return -EACCES; 3425 } 3426 3427 static int check_map_func_compatibility(struct bpf_verifier_env *env, 3428 struct bpf_map *map, int func_id) 3429 { 3430 if (!map) 3431 return 0; 3432 3433 /* We need a two way check, first is from map perspective ... */ 3434 switch (map->map_type) { 3435 case BPF_MAP_TYPE_PROG_ARRAY: 3436 if (func_id != BPF_FUNC_tail_call) 3437 goto error; 3438 break; 3439 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 3440 if (func_id != BPF_FUNC_perf_event_read && 3441 func_id != BPF_FUNC_perf_event_output && 3442 func_id != BPF_FUNC_perf_event_read_value) 3443 goto error; 3444 break; 3445 case BPF_MAP_TYPE_STACK_TRACE: 3446 if (func_id != BPF_FUNC_get_stackid) 3447 goto error; 3448 break; 3449 case BPF_MAP_TYPE_CGROUP_ARRAY: 3450 if (func_id != BPF_FUNC_skb_under_cgroup && 3451 func_id != BPF_FUNC_current_task_under_cgroup) 3452 goto error; 3453 break; 3454 case BPF_MAP_TYPE_CGROUP_STORAGE: 3455 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 3456 if (func_id != BPF_FUNC_get_local_storage) 3457 goto error; 3458 break; 3459 case BPF_MAP_TYPE_DEVMAP: 3460 if (func_id != BPF_FUNC_redirect_map && 3461 func_id != BPF_FUNC_map_lookup_elem) 3462 goto error; 3463 break; 3464 /* Restrict bpf side of cpumap and xskmap, open when use-cases 3465 * appear. 3466 */ 3467 case BPF_MAP_TYPE_CPUMAP: 3468 if (func_id != BPF_FUNC_redirect_map) 3469 goto error; 3470 break; 3471 case BPF_MAP_TYPE_XSKMAP: 3472 if (func_id != BPF_FUNC_redirect_map && 3473 func_id != BPF_FUNC_map_lookup_elem) 3474 goto error; 3475 break; 3476 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 3477 case BPF_MAP_TYPE_HASH_OF_MAPS: 3478 if (func_id != BPF_FUNC_map_lookup_elem) 3479 goto error; 3480 break; 3481 case BPF_MAP_TYPE_SOCKMAP: 3482 if (func_id != BPF_FUNC_sk_redirect_map && 3483 func_id != BPF_FUNC_sock_map_update && 3484 func_id != BPF_FUNC_map_delete_elem && 3485 func_id != BPF_FUNC_msg_redirect_map) 3486 goto error; 3487 break; 3488 case BPF_MAP_TYPE_SOCKHASH: 3489 if (func_id != BPF_FUNC_sk_redirect_hash && 3490 func_id != BPF_FUNC_sock_hash_update && 3491 func_id != BPF_FUNC_map_delete_elem && 3492 func_id != BPF_FUNC_msg_redirect_hash) 3493 goto error; 3494 break; 3495 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 3496 if (func_id != BPF_FUNC_sk_select_reuseport) 3497 goto error; 3498 break; 3499 case BPF_MAP_TYPE_QUEUE: 3500 case BPF_MAP_TYPE_STACK: 3501 if (func_id != BPF_FUNC_map_peek_elem && 3502 func_id != BPF_FUNC_map_pop_elem && 3503 func_id != BPF_FUNC_map_push_elem) 3504 goto error; 3505 break; 3506 case BPF_MAP_TYPE_SK_STORAGE: 3507 if (func_id != BPF_FUNC_sk_storage_get && 3508 func_id != BPF_FUNC_sk_storage_delete) 3509 goto error; 3510 break; 3511 default: 3512 break; 3513 } 3514 3515 /* ... and second from the function itself. */ 3516 switch (func_id) { 3517 case BPF_FUNC_tail_call: 3518 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 3519 goto error; 3520 if (env->subprog_cnt > 1) { 3521 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); 3522 return -EINVAL; 3523 } 3524 break; 3525 case BPF_FUNC_perf_event_read: 3526 case BPF_FUNC_perf_event_output: 3527 case BPF_FUNC_perf_event_read_value: 3528 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 3529 goto error; 3530 break; 3531 case BPF_FUNC_get_stackid: 3532 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 3533 goto error; 3534 break; 3535 case BPF_FUNC_current_task_under_cgroup: 3536 case BPF_FUNC_skb_under_cgroup: 3537 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 3538 goto error; 3539 break; 3540 case BPF_FUNC_redirect_map: 3541 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 3542 map->map_type != BPF_MAP_TYPE_CPUMAP && 3543 map->map_type != BPF_MAP_TYPE_XSKMAP) 3544 goto error; 3545 break; 3546 case BPF_FUNC_sk_redirect_map: 3547 case BPF_FUNC_msg_redirect_map: 3548 case BPF_FUNC_sock_map_update: 3549 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 3550 goto error; 3551 break; 3552 case BPF_FUNC_sk_redirect_hash: 3553 case BPF_FUNC_msg_redirect_hash: 3554 case BPF_FUNC_sock_hash_update: 3555 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 3556 goto error; 3557 break; 3558 case BPF_FUNC_get_local_storage: 3559 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 3560 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 3561 goto error; 3562 break; 3563 case BPF_FUNC_sk_select_reuseport: 3564 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) 3565 goto error; 3566 break; 3567 case BPF_FUNC_map_peek_elem: 3568 case BPF_FUNC_map_pop_elem: 3569 case BPF_FUNC_map_push_elem: 3570 if (map->map_type != BPF_MAP_TYPE_QUEUE && 3571 map->map_type != BPF_MAP_TYPE_STACK) 3572 goto error; 3573 break; 3574 case BPF_FUNC_sk_storage_get: 3575 case BPF_FUNC_sk_storage_delete: 3576 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 3577 goto error; 3578 break; 3579 default: 3580 break; 3581 } 3582 3583 return 0; 3584 error: 3585 verbose(env, "cannot pass map_type %d into func %s#%d\n", 3586 map->map_type, func_id_name(func_id), func_id); 3587 return -EINVAL; 3588 } 3589 3590 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 3591 { 3592 int count = 0; 3593 3594 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 3595 count++; 3596 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 3597 count++; 3598 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 3599 count++; 3600 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 3601 count++; 3602 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 3603 count++; 3604 3605 /* We only support one arg being in raw mode at the moment, 3606 * which is sufficient for the helper functions we have 3607 * right now. 3608 */ 3609 return count <= 1; 3610 } 3611 3612 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 3613 enum bpf_arg_type arg_next) 3614 { 3615 return (arg_type_is_mem_ptr(arg_curr) && 3616 !arg_type_is_mem_size(arg_next)) || 3617 (!arg_type_is_mem_ptr(arg_curr) && 3618 arg_type_is_mem_size(arg_next)); 3619 } 3620 3621 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 3622 { 3623 /* bpf_xxx(..., buf, len) call will access 'len' 3624 * bytes from memory 'buf'. Both arg types need 3625 * to be paired, so make sure there's no buggy 3626 * helper function specification. 3627 */ 3628 if (arg_type_is_mem_size(fn->arg1_type) || 3629 arg_type_is_mem_ptr(fn->arg5_type) || 3630 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 3631 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 3632 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 3633 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 3634 return false; 3635 3636 return true; 3637 } 3638 3639 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) 3640 { 3641 int count = 0; 3642 3643 if (arg_type_may_be_refcounted(fn->arg1_type)) 3644 count++; 3645 if (arg_type_may_be_refcounted(fn->arg2_type)) 3646 count++; 3647 if (arg_type_may_be_refcounted(fn->arg3_type)) 3648 count++; 3649 if (arg_type_may_be_refcounted(fn->arg4_type)) 3650 count++; 3651 if (arg_type_may_be_refcounted(fn->arg5_type)) 3652 count++; 3653 3654 /* A reference acquiring function cannot acquire 3655 * another refcounted ptr. 3656 */ 3657 if (is_acquire_function(func_id) && count) 3658 return false; 3659 3660 /* We only support one arg being unreferenced at the moment, 3661 * which is sufficient for the helper functions we have right now. 3662 */ 3663 return count <= 1; 3664 } 3665 3666 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 3667 { 3668 return check_raw_mode_ok(fn) && 3669 check_arg_pair_ok(fn) && 3670 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; 3671 } 3672 3673 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 3674 * are now invalid, so turn them into unknown SCALAR_VALUE. 3675 */ 3676 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 3677 struct bpf_func_state *state) 3678 { 3679 struct bpf_reg_state *regs = state->regs, *reg; 3680 int i; 3681 3682 for (i = 0; i < MAX_BPF_REG; i++) 3683 if (reg_is_pkt_pointer_any(®s[i])) 3684 mark_reg_unknown(env, regs, i); 3685 3686 bpf_for_each_spilled_reg(i, state, reg) { 3687 if (!reg) 3688 continue; 3689 if (reg_is_pkt_pointer_any(reg)) 3690 __mark_reg_unknown(reg); 3691 } 3692 } 3693 3694 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 3695 { 3696 struct bpf_verifier_state *vstate = env->cur_state; 3697 int i; 3698 3699 for (i = 0; i <= vstate->curframe; i++) 3700 __clear_all_pkt_pointers(env, vstate->frame[i]); 3701 } 3702 3703 static void release_reg_references(struct bpf_verifier_env *env, 3704 struct bpf_func_state *state, 3705 int ref_obj_id) 3706 { 3707 struct bpf_reg_state *regs = state->regs, *reg; 3708 int i; 3709 3710 for (i = 0; i < MAX_BPF_REG; i++) 3711 if (regs[i].ref_obj_id == ref_obj_id) 3712 mark_reg_unknown(env, regs, i); 3713 3714 bpf_for_each_spilled_reg(i, state, reg) { 3715 if (!reg) 3716 continue; 3717 if (reg->ref_obj_id == ref_obj_id) 3718 __mark_reg_unknown(reg); 3719 } 3720 } 3721 3722 /* The pointer with the specified id has released its reference to kernel 3723 * resources. Identify all copies of the same pointer and clear the reference. 3724 */ 3725 static int release_reference(struct bpf_verifier_env *env, 3726 int ref_obj_id) 3727 { 3728 struct bpf_verifier_state *vstate = env->cur_state; 3729 int err; 3730 int i; 3731 3732 err = release_reference_state(cur_func(env), ref_obj_id); 3733 if (err) 3734 return err; 3735 3736 for (i = 0; i <= vstate->curframe; i++) 3737 release_reg_references(env, vstate->frame[i], ref_obj_id); 3738 3739 return 0; 3740 } 3741 3742 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 3743 int *insn_idx) 3744 { 3745 struct bpf_verifier_state *state = env->cur_state; 3746 struct bpf_func_state *caller, *callee; 3747 int i, err, subprog, target_insn; 3748 3749 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 3750 verbose(env, "the call stack of %d frames is too deep\n", 3751 state->curframe + 2); 3752 return -E2BIG; 3753 } 3754 3755 target_insn = *insn_idx + insn->imm; 3756 subprog = find_subprog(env, target_insn + 1); 3757 if (subprog < 0) { 3758 verbose(env, "verifier bug. No program starts at insn %d\n", 3759 target_insn + 1); 3760 return -EFAULT; 3761 } 3762 3763 caller = state->frame[state->curframe]; 3764 if (state->frame[state->curframe + 1]) { 3765 verbose(env, "verifier bug. Frame %d already allocated\n", 3766 state->curframe + 1); 3767 return -EFAULT; 3768 } 3769 3770 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 3771 if (!callee) 3772 return -ENOMEM; 3773 state->frame[state->curframe + 1] = callee; 3774 3775 /* callee cannot access r0, r6 - r9 for reading and has to write 3776 * into its own stack before reading from it. 3777 * callee can read/write into caller's stack 3778 */ 3779 init_func_state(env, callee, 3780 /* remember the callsite, it will be used by bpf_exit */ 3781 *insn_idx /* callsite */, 3782 state->curframe + 1 /* frameno within this callchain */, 3783 subprog /* subprog number within this prog */); 3784 3785 /* Transfer references to the callee */ 3786 err = transfer_reference_state(callee, caller); 3787 if (err) 3788 return err; 3789 3790 /* copy r1 - r5 args that callee can access. The copy includes parent 3791 * pointers, which connects us up to the liveness chain 3792 */ 3793 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 3794 callee->regs[i] = caller->regs[i]; 3795 3796 /* after the call registers r0 - r5 were scratched */ 3797 for (i = 0; i < CALLER_SAVED_REGS; i++) { 3798 mark_reg_not_init(env, caller->regs, caller_saved[i]); 3799 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 3800 } 3801 3802 /* only increment it after check_reg_arg() finished */ 3803 state->curframe++; 3804 3805 /* and go analyze first insn of the callee */ 3806 *insn_idx = target_insn; 3807 3808 if (env->log.level & BPF_LOG_LEVEL) { 3809 verbose(env, "caller:\n"); 3810 print_verifier_state(env, caller); 3811 verbose(env, "callee:\n"); 3812 print_verifier_state(env, callee); 3813 } 3814 return 0; 3815 } 3816 3817 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 3818 { 3819 struct bpf_verifier_state *state = env->cur_state; 3820 struct bpf_func_state *caller, *callee; 3821 struct bpf_reg_state *r0; 3822 int err; 3823 3824 callee = state->frame[state->curframe]; 3825 r0 = &callee->regs[BPF_REG_0]; 3826 if (r0->type == PTR_TO_STACK) { 3827 /* technically it's ok to return caller's stack pointer 3828 * (or caller's caller's pointer) back to the caller, 3829 * since these pointers are valid. Only current stack 3830 * pointer will be invalid as soon as function exits, 3831 * but let's be conservative 3832 */ 3833 verbose(env, "cannot return stack pointer to the caller\n"); 3834 return -EINVAL; 3835 } 3836 3837 state->curframe--; 3838 caller = state->frame[state->curframe]; 3839 /* return to the caller whatever r0 had in the callee */ 3840 caller->regs[BPF_REG_0] = *r0; 3841 3842 /* Transfer references to the caller */ 3843 err = transfer_reference_state(caller, callee); 3844 if (err) 3845 return err; 3846 3847 *insn_idx = callee->callsite + 1; 3848 if (env->log.level & BPF_LOG_LEVEL) { 3849 verbose(env, "returning from callee:\n"); 3850 print_verifier_state(env, callee); 3851 verbose(env, "to caller at %d:\n", *insn_idx); 3852 print_verifier_state(env, caller); 3853 } 3854 /* clear everything in the callee */ 3855 free_func_state(callee); 3856 state->frame[state->curframe + 1] = NULL; 3857 return 0; 3858 } 3859 3860 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 3861 int func_id, 3862 struct bpf_call_arg_meta *meta) 3863 { 3864 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 3865 3866 if (ret_type != RET_INTEGER || 3867 (func_id != BPF_FUNC_get_stack && 3868 func_id != BPF_FUNC_probe_read_str)) 3869 return; 3870 3871 ret_reg->smax_value = meta->msize_smax_value; 3872 ret_reg->umax_value = meta->msize_umax_value; 3873 __reg_deduce_bounds(ret_reg); 3874 __reg_bound_offset(ret_reg); 3875 } 3876 3877 static int 3878 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 3879 int func_id, int insn_idx) 3880 { 3881 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 3882 struct bpf_map *map = meta->map_ptr; 3883 3884 if (func_id != BPF_FUNC_tail_call && 3885 func_id != BPF_FUNC_map_lookup_elem && 3886 func_id != BPF_FUNC_map_update_elem && 3887 func_id != BPF_FUNC_map_delete_elem && 3888 func_id != BPF_FUNC_map_push_elem && 3889 func_id != BPF_FUNC_map_pop_elem && 3890 func_id != BPF_FUNC_map_peek_elem) 3891 return 0; 3892 3893 if (map == NULL) { 3894 verbose(env, "kernel subsystem misconfigured verifier\n"); 3895 return -EINVAL; 3896 } 3897 3898 /* In case of read-only, some additional restrictions 3899 * need to be applied in order to prevent altering the 3900 * state of the map from program side. 3901 */ 3902 if ((map->map_flags & BPF_F_RDONLY_PROG) && 3903 (func_id == BPF_FUNC_map_delete_elem || 3904 func_id == BPF_FUNC_map_update_elem || 3905 func_id == BPF_FUNC_map_push_elem || 3906 func_id == BPF_FUNC_map_pop_elem)) { 3907 verbose(env, "write into map forbidden\n"); 3908 return -EACCES; 3909 } 3910 3911 if (!BPF_MAP_PTR(aux->map_state)) 3912 bpf_map_ptr_store(aux, meta->map_ptr, 3913 meta->map_ptr->unpriv_array); 3914 else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) 3915 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 3916 meta->map_ptr->unpriv_array); 3917 return 0; 3918 } 3919 3920 static int check_reference_leak(struct bpf_verifier_env *env) 3921 { 3922 struct bpf_func_state *state = cur_func(env); 3923 int i; 3924 3925 for (i = 0; i < state->acquired_refs; i++) { 3926 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 3927 state->refs[i].id, state->refs[i].insn_idx); 3928 } 3929 return state->acquired_refs ? -EINVAL : 0; 3930 } 3931 3932 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 3933 { 3934 const struct bpf_func_proto *fn = NULL; 3935 struct bpf_reg_state *regs; 3936 struct bpf_call_arg_meta meta; 3937 bool changes_data; 3938 int i, err; 3939 3940 /* find function prototype */ 3941 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 3942 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 3943 func_id); 3944 return -EINVAL; 3945 } 3946 3947 if (env->ops->get_func_proto) 3948 fn = env->ops->get_func_proto(func_id, env->prog); 3949 if (!fn) { 3950 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 3951 func_id); 3952 return -EINVAL; 3953 } 3954 3955 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 3956 if (!env->prog->gpl_compatible && fn->gpl_only) { 3957 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 3958 return -EINVAL; 3959 } 3960 3961 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 3962 changes_data = bpf_helper_changes_pkt_data(fn->func); 3963 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 3964 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 3965 func_id_name(func_id), func_id); 3966 return -EINVAL; 3967 } 3968 3969 memset(&meta, 0, sizeof(meta)); 3970 meta.pkt_access = fn->pkt_access; 3971 3972 err = check_func_proto(fn, func_id); 3973 if (err) { 3974 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 3975 func_id_name(func_id), func_id); 3976 return err; 3977 } 3978 3979 meta.func_id = func_id; 3980 /* check args */ 3981 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 3982 if (err) 3983 return err; 3984 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 3985 if (err) 3986 return err; 3987 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 3988 if (err) 3989 return err; 3990 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 3991 if (err) 3992 return err; 3993 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 3994 if (err) 3995 return err; 3996 3997 err = record_func_map(env, &meta, func_id, insn_idx); 3998 if (err) 3999 return err; 4000 4001 /* Mark slots with STACK_MISC in case of raw mode, stack offset 4002 * is inferred from register state. 4003 */ 4004 for (i = 0; i < meta.access_size; i++) { 4005 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 4006 BPF_WRITE, -1, false); 4007 if (err) 4008 return err; 4009 } 4010 4011 if (func_id == BPF_FUNC_tail_call) { 4012 err = check_reference_leak(env); 4013 if (err) { 4014 verbose(env, "tail_call would lead to reference leak\n"); 4015 return err; 4016 } 4017 } else if (is_release_function(func_id)) { 4018 err = release_reference(env, meta.ref_obj_id); 4019 if (err) { 4020 verbose(env, "func %s#%d reference has not been acquired before\n", 4021 func_id_name(func_id), func_id); 4022 return err; 4023 } 4024 } 4025 4026 regs = cur_regs(env); 4027 4028 /* check that flags argument in get_local_storage(map, flags) is 0, 4029 * this is required because get_local_storage() can't return an error. 4030 */ 4031 if (func_id == BPF_FUNC_get_local_storage && 4032 !register_is_null(®s[BPF_REG_2])) { 4033 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 4034 return -EINVAL; 4035 } 4036 4037 /* reset caller saved regs */ 4038 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4039 mark_reg_not_init(env, regs, caller_saved[i]); 4040 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4041 } 4042 4043 /* helper call returns 64-bit value. */ 4044 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 4045 4046 /* update return register (already marked as written above) */ 4047 if (fn->ret_type == RET_INTEGER) { 4048 /* sets type to SCALAR_VALUE */ 4049 mark_reg_unknown(env, regs, BPF_REG_0); 4050 } else if (fn->ret_type == RET_VOID) { 4051 regs[BPF_REG_0].type = NOT_INIT; 4052 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || 4053 fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4054 /* There is no offset yet applied, variable or fixed */ 4055 mark_reg_known_zero(env, regs, BPF_REG_0); 4056 /* remember map_ptr, so that check_map_access() 4057 * can check 'value_size' boundary of memory access 4058 * to map element returned from bpf_map_lookup_elem() 4059 */ 4060 if (meta.map_ptr == NULL) { 4061 verbose(env, 4062 "kernel subsystem misconfigured verifier\n"); 4063 return -EINVAL; 4064 } 4065 regs[BPF_REG_0].map_ptr = meta.map_ptr; 4066 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4067 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; 4068 if (map_value_has_spin_lock(meta.map_ptr)) 4069 regs[BPF_REG_0].id = ++env->id_gen; 4070 } else { 4071 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 4072 regs[BPF_REG_0].id = ++env->id_gen; 4073 } 4074 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 4075 mark_reg_known_zero(env, regs, BPF_REG_0); 4076 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 4077 regs[BPF_REG_0].id = ++env->id_gen; 4078 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { 4079 mark_reg_known_zero(env, regs, BPF_REG_0); 4080 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; 4081 regs[BPF_REG_0].id = ++env->id_gen; 4082 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { 4083 mark_reg_known_zero(env, regs, BPF_REG_0); 4084 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; 4085 regs[BPF_REG_0].id = ++env->id_gen; 4086 } else { 4087 verbose(env, "unknown return type %d of func %s#%d\n", 4088 fn->ret_type, func_id_name(func_id), func_id); 4089 return -EINVAL; 4090 } 4091 4092 if (is_ptr_cast_function(func_id)) { 4093 /* For release_reference() */ 4094 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 4095 } else if (is_acquire_function(func_id)) { 4096 int id = acquire_reference_state(env, insn_idx); 4097 4098 if (id < 0) 4099 return id; 4100 /* For mark_ptr_or_null_reg() */ 4101 regs[BPF_REG_0].id = id; 4102 /* For release_reference() */ 4103 regs[BPF_REG_0].ref_obj_id = id; 4104 } 4105 4106 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 4107 4108 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 4109 if (err) 4110 return err; 4111 4112 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { 4113 const char *err_str; 4114 4115 #ifdef CONFIG_PERF_EVENTS 4116 err = get_callchain_buffers(sysctl_perf_event_max_stack); 4117 err_str = "cannot get callchain buffer for func %s#%d\n"; 4118 #else 4119 err = -ENOTSUPP; 4120 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 4121 #endif 4122 if (err) { 4123 verbose(env, err_str, func_id_name(func_id), func_id); 4124 return err; 4125 } 4126 4127 env->prog->has_callchain_buf = true; 4128 } 4129 4130 if (changes_data) 4131 clear_all_pkt_pointers(env); 4132 return 0; 4133 } 4134 4135 static bool signed_add_overflows(s64 a, s64 b) 4136 { 4137 /* Do the add in u64, where overflow is well-defined */ 4138 s64 res = (s64)((u64)a + (u64)b); 4139 4140 if (b < 0) 4141 return res > a; 4142 return res < a; 4143 } 4144 4145 static bool signed_sub_overflows(s64 a, s64 b) 4146 { 4147 /* Do the sub in u64, where overflow is well-defined */ 4148 s64 res = (s64)((u64)a - (u64)b); 4149 4150 if (b < 0) 4151 return res < a; 4152 return res > a; 4153 } 4154 4155 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 4156 const struct bpf_reg_state *reg, 4157 enum bpf_reg_type type) 4158 { 4159 bool known = tnum_is_const(reg->var_off); 4160 s64 val = reg->var_off.value; 4161 s64 smin = reg->smin_value; 4162 4163 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 4164 verbose(env, "math between %s pointer and %lld is not allowed\n", 4165 reg_type_str[type], val); 4166 return false; 4167 } 4168 4169 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 4170 verbose(env, "%s pointer offset %d is not allowed\n", 4171 reg_type_str[type], reg->off); 4172 return false; 4173 } 4174 4175 if (smin == S64_MIN) { 4176 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 4177 reg_type_str[type]); 4178 return false; 4179 } 4180 4181 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 4182 verbose(env, "value %lld makes %s pointer be out of bounds\n", 4183 smin, reg_type_str[type]); 4184 return false; 4185 } 4186 4187 return true; 4188 } 4189 4190 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 4191 { 4192 return &env->insn_aux_data[env->insn_idx]; 4193 } 4194 4195 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 4196 u32 *ptr_limit, u8 opcode, bool off_is_neg) 4197 { 4198 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || 4199 (opcode == BPF_SUB && !off_is_neg); 4200 u32 off; 4201 4202 switch (ptr_reg->type) { 4203 case PTR_TO_STACK: 4204 /* Indirect variable offset stack access is prohibited in 4205 * unprivileged mode so it's not handled here. 4206 */ 4207 off = ptr_reg->off + ptr_reg->var_off.value; 4208 if (mask_to_left) 4209 *ptr_limit = MAX_BPF_STACK + off; 4210 else 4211 *ptr_limit = -off; 4212 return 0; 4213 case PTR_TO_MAP_VALUE: 4214 if (mask_to_left) { 4215 *ptr_limit = ptr_reg->umax_value + ptr_reg->off; 4216 } else { 4217 off = ptr_reg->smin_value + ptr_reg->off; 4218 *ptr_limit = ptr_reg->map_ptr->value_size - off; 4219 } 4220 return 0; 4221 default: 4222 return -EINVAL; 4223 } 4224 } 4225 4226 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 4227 const struct bpf_insn *insn) 4228 { 4229 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; 4230 } 4231 4232 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 4233 u32 alu_state, u32 alu_limit) 4234 { 4235 /* If we arrived here from different branches with different 4236 * state or limits to sanitize, then this won't work. 4237 */ 4238 if (aux->alu_state && 4239 (aux->alu_state != alu_state || 4240 aux->alu_limit != alu_limit)) 4241 return -EACCES; 4242 4243 /* Corresponding fixup done in fixup_bpf_calls(). */ 4244 aux->alu_state = alu_state; 4245 aux->alu_limit = alu_limit; 4246 return 0; 4247 } 4248 4249 static int sanitize_val_alu(struct bpf_verifier_env *env, 4250 struct bpf_insn *insn) 4251 { 4252 struct bpf_insn_aux_data *aux = cur_aux(env); 4253 4254 if (can_skip_alu_sanitation(env, insn)) 4255 return 0; 4256 4257 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 4258 } 4259 4260 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 4261 struct bpf_insn *insn, 4262 const struct bpf_reg_state *ptr_reg, 4263 struct bpf_reg_state *dst_reg, 4264 bool off_is_neg) 4265 { 4266 struct bpf_verifier_state *vstate = env->cur_state; 4267 struct bpf_insn_aux_data *aux = cur_aux(env); 4268 bool ptr_is_dst_reg = ptr_reg == dst_reg; 4269 u8 opcode = BPF_OP(insn->code); 4270 u32 alu_state, alu_limit; 4271 struct bpf_reg_state tmp; 4272 bool ret; 4273 4274 if (can_skip_alu_sanitation(env, insn)) 4275 return 0; 4276 4277 /* We already marked aux for masking from non-speculative 4278 * paths, thus we got here in the first place. We only care 4279 * to explore bad access from here. 4280 */ 4281 if (vstate->speculative) 4282 goto do_sim; 4283 4284 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 4285 alu_state |= ptr_is_dst_reg ? 4286 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 4287 4288 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 4289 return 0; 4290 if (update_alu_sanitation_state(aux, alu_state, alu_limit)) 4291 return -EACCES; 4292 do_sim: 4293 /* Simulate and find potential out-of-bounds access under 4294 * speculative execution from truncation as a result of 4295 * masking when off was not within expected range. If off 4296 * sits in dst, then we temporarily need to move ptr there 4297 * to simulate dst (== 0) +/-= ptr. Needed, for example, 4298 * for cases where we use K-based arithmetic in one direction 4299 * and truncated reg-based in the other in order to explore 4300 * bad access. 4301 */ 4302 if (!ptr_is_dst_reg) { 4303 tmp = *dst_reg; 4304 *dst_reg = *ptr_reg; 4305 } 4306 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); 4307 if (!ptr_is_dst_reg && ret) 4308 *dst_reg = tmp; 4309 return !ret ? -EFAULT : 0; 4310 } 4311 4312 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 4313 * Caller should also handle BPF_MOV case separately. 4314 * If we return -EACCES, caller may want to try again treating pointer as a 4315 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 4316 */ 4317 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 4318 struct bpf_insn *insn, 4319 const struct bpf_reg_state *ptr_reg, 4320 const struct bpf_reg_state *off_reg) 4321 { 4322 struct bpf_verifier_state *vstate = env->cur_state; 4323 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4324 struct bpf_reg_state *regs = state->regs, *dst_reg; 4325 bool known = tnum_is_const(off_reg->var_off); 4326 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 4327 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 4328 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 4329 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 4330 u32 dst = insn->dst_reg, src = insn->src_reg; 4331 u8 opcode = BPF_OP(insn->code); 4332 int ret; 4333 4334 dst_reg = ®s[dst]; 4335 4336 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 4337 smin_val > smax_val || umin_val > umax_val) { 4338 /* Taint dst register if offset had invalid bounds derived from 4339 * e.g. dead branches. 4340 */ 4341 __mark_reg_unknown(dst_reg); 4342 return 0; 4343 } 4344 4345 if (BPF_CLASS(insn->code) != BPF_ALU64) { 4346 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 4347 verbose(env, 4348 "R%d 32-bit pointer arithmetic prohibited\n", 4349 dst); 4350 return -EACCES; 4351 } 4352 4353 switch (ptr_reg->type) { 4354 case PTR_TO_MAP_VALUE_OR_NULL: 4355 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 4356 dst, reg_type_str[ptr_reg->type]); 4357 return -EACCES; 4358 case CONST_PTR_TO_MAP: 4359 case PTR_TO_PACKET_END: 4360 case PTR_TO_SOCKET: 4361 case PTR_TO_SOCKET_OR_NULL: 4362 case PTR_TO_SOCK_COMMON: 4363 case PTR_TO_SOCK_COMMON_OR_NULL: 4364 case PTR_TO_TCP_SOCK: 4365 case PTR_TO_TCP_SOCK_OR_NULL: 4366 case PTR_TO_XDP_SOCK: 4367 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 4368 dst, reg_type_str[ptr_reg->type]); 4369 return -EACCES; 4370 case PTR_TO_MAP_VALUE: 4371 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { 4372 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", 4373 off_reg == dst_reg ? dst : src); 4374 return -EACCES; 4375 } 4376 /* fall-through */ 4377 default: 4378 break; 4379 } 4380 4381 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 4382 * The id may be overwritten later if we create a new variable offset. 4383 */ 4384 dst_reg->type = ptr_reg->type; 4385 dst_reg->id = ptr_reg->id; 4386 4387 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 4388 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 4389 return -EINVAL; 4390 4391 switch (opcode) { 4392 case BPF_ADD: 4393 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 4394 if (ret < 0) { 4395 verbose(env, "R%d tried to add from different maps or paths\n", dst); 4396 return ret; 4397 } 4398 /* We can take a fixed offset as long as it doesn't overflow 4399 * the s32 'off' field 4400 */ 4401 if (known && (ptr_reg->off + smin_val == 4402 (s64)(s32)(ptr_reg->off + smin_val))) { 4403 /* pointer += K. Accumulate it into fixed offset */ 4404 dst_reg->smin_value = smin_ptr; 4405 dst_reg->smax_value = smax_ptr; 4406 dst_reg->umin_value = umin_ptr; 4407 dst_reg->umax_value = umax_ptr; 4408 dst_reg->var_off = ptr_reg->var_off; 4409 dst_reg->off = ptr_reg->off + smin_val; 4410 dst_reg->raw = ptr_reg->raw; 4411 break; 4412 } 4413 /* A new variable offset is created. Note that off_reg->off 4414 * == 0, since it's a scalar. 4415 * dst_reg gets the pointer type and since some positive 4416 * integer value was added to the pointer, give it a new 'id' 4417 * if it's a PTR_TO_PACKET. 4418 * this creates a new 'base' pointer, off_reg (variable) gets 4419 * added into the variable offset, and we copy the fixed offset 4420 * from ptr_reg. 4421 */ 4422 if (signed_add_overflows(smin_ptr, smin_val) || 4423 signed_add_overflows(smax_ptr, smax_val)) { 4424 dst_reg->smin_value = S64_MIN; 4425 dst_reg->smax_value = S64_MAX; 4426 } else { 4427 dst_reg->smin_value = smin_ptr + smin_val; 4428 dst_reg->smax_value = smax_ptr + smax_val; 4429 } 4430 if (umin_ptr + umin_val < umin_ptr || 4431 umax_ptr + umax_val < umax_ptr) { 4432 dst_reg->umin_value = 0; 4433 dst_reg->umax_value = U64_MAX; 4434 } else { 4435 dst_reg->umin_value = umin_ptr + umin_val; 4436 dst_reg->umax_value = umax_ptr + umax_val; 4437 } 4438 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 4439 dst_reg->off = ptr_reg->off; 4440 dst_reg->raw = ptr_reg->raw; 4441 if (reg_is_pkt_pointer(ptr_reg)) { 4442 dst_reg->id = ++env->id_gen; 4443 /* something was added to pkt_ptr, set range to zero */ 4444 dst_reg->raw = 0; 4445 } 4446 break; 4447 case BPF_SUB: 4448 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 4449 if (ret < 0) { 4450 verbose(env, "R%d tried to sub from different maps or paths\n", dst); 4451 return ret; 4452 } 4453 if (dst_reg == off_reg) { 4454 /* scalar -= pointer. Creates an unknown scalar */ 4455 verbose(env, "R%d tried to subtract pointer from scalar\n", 4456 dst); 4457 return -EACCES; 4458 } 4459 /* We don't allow subtraction from FP, because (according to 4460 * test_verifier.c test "invalid fp arithmetic", JITs might not 4461 * be able to deal with it. 4462 */ 4463 if (ptr_reg->type == PTR_TO_STACK) { 4464 verbose(env, "R%d subtraction from stack pointer prohibited\n", 4465 dst); 4466 return -EACCES; 4467 } 4468 if (known && (ptr_reg->off - smin_val == 4469 (s64)(s32)(ptr_reg->off - smin_val))) { 4470 /* pointer -= K. Subtract it from fixed offset */ 4471 dst_reg->smin_value = smin_ptr; 4472 dst_reg->smax_value = smax_ptr; 4473 dst_reg->umin_value = umin_ptr; 4474 dst_reg->umax_value = umax_ptr; 4475 dst_reg->var_off = ptr_reg->var_off; 4476 dst_reg->id = ptr_reg->id; 4477 dst_reg->off = ptr_reg->off - smin_val; 4478 dst_reg->raw = ptr_reg->raw; 4479 break; 4480 } 4481 /* A new variable offset is created. If the subtrahend is known 4482 * nonnegative, then any reg->range we had before is still good. 4483 */ 4484 if (signed_sub_overflows(smin_ptr, smax_val) || 4485 signed_sub_overflows(smax_ptr, smin_val)) { 4486 /* Overflow possible, we know nothing */ 4487 dst_reg->smin_value = S64_MIN; 4488 dst_reg->smax_value = S64_MAX; 4489 } else { 4490 dst_reg->smin_value = smin_ptr - smax_val; 4491 dst_reg->smax_value = smax_ptr - smin_val; 4492 } 4493 if (umin_ptr < umax_val) { 4494 /* Overflow possible, we know nothing */ 4495 dst_reg->umin_value = 0; 4496 dst_reg->umax_value = U64_MAX; 4497 } else { 4498 /* Cannot overflow (as long as bounds are consistent) */ 4499 dst_reg->umin_value = umin_ptr - umax_val; 4500 dst_reg->umax_value = umax_ptr - umin_val; 4501 } 4502 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 4503 dst_reg->off = ptr_reg->off; 4504 dst_reg->raw = ptr_reg->raw; 4505 if (reg_is_pkt_pointer(ptr_reg)) { 4506 dst_reg->id = ++env->id_gen; 4507 /* something was added to pkt_ptr, set range to zero */ 4508 if (smin_val < 0) 4509 dst_reg->raw = 0; 4510 } 4511 break; 4512 case BPF_AND: 4513 case BPF_OR: 4514 case BPF_XOR: 4515 /* bitwise ops on pointers are troublesome, prohibit. */ 4516 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 4517 dst, bpf_alu_string[opcode >> 4]); 4518 return -EACCES; 4519 default: 4520 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 4521 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 4522 dst, bpf_alu_string[opcode >> 4]); 4523 return -EACCES; 4524 } 4525 4526 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 4527 return -EINVAL; 4528 4529 __update_reg_bounds(dst_reg); 4530 __reg_deduce_bounds(dst_reg); 4531 __reg_bound_offset(dst_reg); 4532 4533 /* For unprivileged we require that resulting offset must be in bounds 4534 * in order to be able to sanitize access later on. 4535 */ 4536 if (!env->allow_ptr_leaks) { 4537 if (dst_reg->type == PTR_TO_MAP_VALUE && 4538 check_map_access(env, dst, dst_reg->off, 1, false)) { 4539 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 4540 "prohibited for !root\n", dst); 4541 return -EACCES; 4542 } else if (dst_reg->type == PTR_TO_STACK && 4543 check_stack_access(env, dst_reg, dst_reg->off + 4544 dst_reg->var_off.value, 1)) { 4545 verbose(env, "R%d stack pointer arithmetic goes out of range, " 4546 "prohibited for !root\n", dst); 4547 return -EACCES; 4548 } 4549 } 4550 4551 return 0; 4552 } 4553 4554 /* WARNING: This function does calculations on 64-bit values, but the actual 4555 * execution may occur on 32-bit values. Therefore, things like bitshifts 4556 * need extra checks in the 32-bit case. 4557 */ 4558 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 4559 struct bpf_insn *insn, 4560 struct bpf_reg_state *dst_reg, 4561 struct bpf_reg_state src_reg) 4562 { 4563 struct bpf_reg_state *regs = cur_regs(env); 4564 u8 opcode = BPF_OP(insn->code); 4565 bool src_known, dst_known; 4566 s64 smin_val, smax_val; 4567 u64 umin_val, umax_val; 4568 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 4569 u32 dst = insn->dst_reg; 4570 int ret; 4571 4572 if (insn_bitness == 32) { 4573 /* Relevant for 32-bit RSH: Information can propagate towards 4574 * LSB, so it isn't sufficient to only truncate the output to 4575 * 32 bits. 4576 */ 4577 coerce_reg_to_size(dst_reg, 4); 4578 coerce_reg_to_size(&src_reg, 4); 4579 } 4580 4581 smin_val = src_reg.smin_value; 4582 smax_val = src_reg.smax_value; 4583 umin_val = src_reg.umin_value; 4584 umax_val = src_reg.umax_value; 4585 src_known = tnum_is_const(src_reg.var_off); 4586 dst_known = tnum_is_const(dst_reg->var_off); 4587 4588 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || 4589 smin_val > smax_val || umin_val > umax_val) { 4590 /* Taint dst register if offset had invalid bounds derived from 4591 * e.g. dead branches. 4592 */ 4593 __mark_reg_unknown(dst_reg); 4594 return 0; 4595 } 4596 4597 if (!src_known && 4598 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 4599 __mark_reg_unknown(dst_reg); 4600 return 0; 4601 } 4602 4603 switch (opcode) { 4604 case BPF_ADD: 4605 ret = sanitize_val_alu(env, insn); 4606 if (ret < 0) { 4607 verbose(env, "R%d tried to add from different pointers or scalars\n", dst); 4608 return ret; 4609 } 4610 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 4611 signed_add_overflows(dst_reg->smax_value, smax_val)) { 4612 dst_reg->smin_value = S64_MIN; 4613 dst_reg->smax_value = S64_MAX; 4614 } else { 4615 dst_reg->smin_value += smin_val; 4616 dst_reg->smax_value += smax_val; 4617 } 4618 if (dst_reg->umin_value + umin_val < umin_val || 4619 dst_reg->umax_value + umax_val < umax_val) { 4620 dst_reg->umin_value = 0; 4621 dst_reg->umax_value = U64_MAX; 4622 } else { 4623 dst_reg->umin_value += umin_val; 4624 dst_reg->umax_value += umax_val; 4625 } 4626 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 4627 break; 4628 case BPF_SUB: 4629 ret = sanitize_val_alu(env, insn); 4630 if (ret < 0) { 4631 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); 4632 return ret; 4633 } 4634 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 4635 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 4636 /* Overflow possible, we know nothing */ 4637 dst_reg->smin_value = S64_MIN; 4638 dst_reg->smax_value = S64_MAX; 4639 } else { 4640 dst_reg->smin_value -= smax_val; 4641 dst_reg->smax_value -= smin_val; 4642 } 4643 if (dst_reg->umin_value < umax_val) { 4644 /* Overflow possible, we know nothing */ 4645 dst_reg->umin_value = 0; 4646 dst_reg->umax_value = U64_MAX; 4647 } else { 4648 /* Cannot overflow (as long as bounds are consistent) */ 4649 dst_reg->umin_value -= umax_val; 4650 dst_reg->umax_value -= umin_val; 4651 } 4652 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 4653 break; 4654 case BPF_MUL: 4655 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 4656 if (smin_val < 0 || dst_reg->smin_value < 0) { 4657 /* Ain't nobody got time to multiply that sign */ 4658 __mark_reg_unbounded(dst_reg); 4659 __update_reg_bounds(dst_reg); 4660 break; 4661 } 4662 /* Both values are positive, so we can work with unsigned and 4663 * copy the result to signed (unless it exceeds S64_MAX). 4664 */ 4665 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 4666 /* Potential overflow, we know nothing */ 4667 __mark_reg_unbounded(dst_reg); 4668 /* (except what we can learn from the var_off) */ 4669 __update_reg_bounds(dst_reg); 4670 break; 4671 } 4672 dst_reg->umin_value *= umin_val; 4673 dst_reg->umax_value *= umax_val; 4674 if (dst_reg->umax_value > S64_MAX) { 4675 /* Overflow possible, we know nothing */ 4676 dst_reg->smin_value = S64_MIN; 4677 dst_reg->smax_value = S64_MAX; 4678 } else { 4679 dst_reg->smin_value = dst_reg->umin_value; 4680 dst_reg->smax_value = dst_reg->umax_value; 4681 } 4682 break; 4683 case BPF_AND: 4684 if (src_known && dst_known) { 4685 __mark_reg_known(dst_reg, dst_reg->var_off.value & 4686 src_reg.var_off.value); 4687 break; 4688 } 4689 /* We get our minimum from the var_off, since that's inherently 4690 * bitwise. Our maximum is the minimum of the operands' maxima. 4691 */ 4692 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 4693 dst_reg->umin_value = dst_reg->var_off.value; 4694 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 4695 if (dst_reg->smin_value < 0 || smin_val < 0) { 4696 /* Lose signed bounds when ANDing negative numbers, 4697 * ain't nobody got time for that. 4698 */ 4699 dst_reg->smin_value = S64_MIN; 4700 dst_reg->smax_value = S64_MAX; 4701 } else { 4702 /* ANDing two positives gives a positive, so safe to 4703 * cast result into s64. 4704 */ 4705 dst_reg->smin_value = dst_reg->umin_value; 4706 dst_reg->smax_value = dst_reg->umax_value; 4707 } 4708 /* We may learn something more from the var_off */ 4709 __update_reg_bounds(dst_reg); 4710 break; 4711 case BPF_OR: 4712 if (src_known && dst_known) { 4713 __mark_reg_known(dst_reg, dst_reg->var_off.value | 4714 src_reg.var_off.value); 4715 break; 4716 } 4717 /* We get our maximum from the var_off, and our minimum is the 4718 * maximum of the operands' minima 4719 */ 4720 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 4721 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 4722 dst_reg->umax_value = dst_reg->var_off.value | 4723 dst_reg->var_off.mask; 4724 if (dst_reg->smin_value < 0 || smin_val < 0) { 4725 /* Lose signed bounds when ORing negative numbers, 4726 * ain't nobody got time for that. 4727 */ 4728 dst_reg->smin_value = S64_MIN; 4729 dst_reg->smax_value = S64_MAX; 4730 } else { 4731 /* ORing two positives gives a positive, so safe to 4732 * cast result into s64. 4733 */ 4734 dst_reg->smin_value = dst_reg->umin_value; 4735 dst_reg->smax_value = dst_reg->umax_value; 4736 } 4737 /* We may learn something more from the var_off */ 4738 __update_reg_bounds(dst_reg); 4739 break; 4740 case BPF_LSH: 4741 if (umax_val >= insn_bitness) { 4742 /* Shifts greater than 31 or 63 are undefined. 4743 * This includes shifts by a negative number. 4744 */ 4745 mark_reg_unknown(env, regs, insn->dst_reg); 4746 break; 4747 } 4748 /* We lose all sign bit information (except what we can pick 4749 * up from var_off) 4750 */ 4751 dst_reg->smin_value = S64_MIN; 4752 dst_reg->smax_value = S64_MAX; 4753 /* If we might shift our top bit out, then we know nothing */ 4754 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 4755 dst_reg->umin_value = 0; 4756 dst_reg->umax_value = U64_MAX; 4757 } else { 4758 dst_reg->umin_value <<= umin_val; 4759 dst_reg->umax_value <<= umax_val; 4760 } 4761 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 4762 /* We may learn something more from the var_off */ 4763 __update_reg_bounds(dst_reg); 4764 break; 4765 case BPF_RSH: 4766 if (umax_val >= insn_bitness) { 4767 /* Shifts greater than 31 or 63 are undefined. 4768 * This includes shifts by a negative number. 4769 */ 4770 mark_reg_unknown(env, regs, insn->dst_reg); 4771 break; 4772 } 4773 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 4774 * be negative, then either: 4775 * 1) src_reg might be zero, so the sign bit of the result is 4776 * unknown, so we lose our signed bounds 4777 * 2) it's known negative, thus the unsigned bounds capture the 4778 * signed bounds 4779 * 3) the signed bounds cross zero, so they tell us nothing 4780 * about the result 4781 * If the value in dst_reg is known nonnegative, then again the 4782 * unsigned bounts capture the signed bounds. 4783 * Thus, in all cases it suffices to blow away our signed bounds 4784 * and rely on inferring new ones from the unsigned bounds and 4785 * var_off of the result. 4786 */ 4787 dst_reg->smin_value = S64_MIN; 4788 dst_reg->smax_value = S64_MAX; 4789 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 4790 dst_reg->umin_value >>= umax_val; 4791 dst_reg->umax_value >>= umin_val; 4792 /* We may learn something more from the var_off */ 4793 __update_reg_bounds(dst_reg); 4794 break; 4795 case BPF_ARSH: 4796 if (umax_val >= insn_bitness) { 4797 /* Shifts greater than 31 or 63 are undefined. 4798 * This includes shifts by a negative number. 4799 */ 4800 mark_reg_unknown(env, regs, insn->dst_reg); 4801 break; 4802 } 4803 4804 /* Upon reaching here, src_known is true and 4805 * umax_val is equal to umin_val. 4806 */ 4807 dst_reg->smin_value >>= umin_val; 4808 dst_reg->smax_value >>= umin_val; 4809 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); 4810 4811 /* blow away the dst_reg umin_value/umax_value and rely on 4812 * dst_reg var_off to refine the result. 4813 */ 4814 dst_reg->umin_value = 0; 4815 dst_reg->umax_value = U64_MAX; 4816 __update_reg_bounds(dst_reg); 4817 break; 4818 default: 4819 mark_reg_unknown(env, regs, insn->dst_reg); 4820 break; 4821 } 4822 4823 if (BPF_CLASS(insn->code) != BPF_ALU64) { 4824 /* 32-bit ALU ops are (32,32)->32 */ 4825 coerce_reg_to_size(dst_reg, 4); 4826 } 4827 4828 __reg_deduce_bounds(dst_reg); 4829 __reg_bound_offset(dst_reg); 4830 return 0; 4831 } 4832 4833 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 4834 * and var_off. 4835 */ 4836 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 4837 struct bpf_insn *insn) 4838 { 4839 struct bpf_verifier_state *vstate = env->cur_state; 4840 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4841 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 4842 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 4843 u8 opcode = BPF_OP(insn->code); 4844 int err; 4845 4846 dst_reg = ®s[insn->dst_reg]; 4847 src_reg = NULL; 4848 if (dst_reg->type != SCALAR_VALUE) 4849 ptr_reg = dst_reg; 4850 if (BPF_SRC(insn->code) == BPF_X) { 4851 src_reg = ®s[insn->src_reg]; 4852 if (src_reg->type != SCALAR_VALUE) { 4853 if (dst_reg->type != SCALAR_VALUE) { 4854 /* Combining two pointers by any ALU op yields 4855 * an arbitrary scalar. Disallow all math except 4856 * pointer subtraction 4857 */ 4858 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 4859 mark_reg_unknown(env, regs, insn->dst_reg); 4860 return 0; 4861 } 4862 verbose(env, "R%d pointer %s pointer prohibited\n", 4863 insn->dst_reg, 4864 bpf_alu_string[opcode >> 4]); 4865 return -EACCES; 4866 } else { 4867 /* scalar += pointer 4868 * This is legal, but we have to reverse our 4869 * src/dest handling in computing the range 4870 */ 4871 err = mark_chain_precision(env, insn->dst_reg); 4872 if (err) 4873 return err; 4874 return adjust_ptr_min_max_vals(env, insn, 4875 src_reg, dst_reg); 4876 } 4877 } else if (ptr_reg) { 4878 /* pointer += scalar */ 4879 err = mark_chain_precision(env, insn->src_reg); 4880 if (err) 4881 return err; 4882 return adjust_ptr_min_max_vals(env, insn, 4883 dst_reg, src_reg); 4884 } 4885 } else { 4886 /* Pretend the src is a reg with a known value, since we only 4887 * need to be able to read from this state. 4888 */ 4889 off_reg.type = SCALAR_VALUE; 4890 __mark_reg_known(&off_reg, insn->imm); 4891 src_reg = &off_reg; 4892 if (ptr_reg) /* pointer += K */ 4893 return adjust_ptr_min_max_vals(env, insn, 4894 ptr_reg, src_reg); 4895 } 4896 4897 /* Got here implies adding two SCALAR_VALUEs */ 4898 if (WARN_ON_ONCE(ptr_reg)) { 4899 print_verifier_state(env, state); 4900 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 4901 return -EINVAL; 4902 } 4903 if (WARN_ON(!src_reg)) { 4904 print_verifier_state(env, state); 4905 verbose(env, "verifier internal error: no src_reg\n"); 4906 return -EINVAL; 4907 } 4908 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 4909 } 4910 4911 /* check validity of 32-bit and 64-bit arithmetic operations */ 4912 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 4913 { 4914 struct bpf_reg_state *regs = cur_regs(env); 4915 u8 opcode = BPF_OP(insn->code); 4916 int err; 4917 4918 if (opcode == BPF_END || opcode == BPF_NEG) { 4919 if (opcode == BPF_NEG) { 4920 if (BPF_SRC(insn->code) != 0 || 4921 insn->src_reg != BPF_REG_0 || 4922 insn->off != 0 || insn->imm != 0) { 4923 verbose(env, "BPF_NEG uses reserved fields\n"); 4924 return -EINVAL; 4925 } 4926 } else { 4927 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 4928 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 4929 BPF_CLASS(insn->code) == BPF_ALU64) { 4930 verbose(env, "BPF_END uses reserved fields\n"); 4931 return -EINVAL; 4932 } 4933 } 4934 4935 /* check src operand */ 4936 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4937 if (err) 4938 return err; 4939 4940 if (is_pointer_value(env, insn->dst_reg)) { 4941 verbose(env, "R%d pointer arithmetic prohibited\n", 4942 insn->dst_reg); 4943 return -EACCES; 4944 } 4945 4946 /* check dest operand */ 4947 err = check_reg_arg(env, insn->dst_reg, DST_OP); 4948 if (err) 4949 return err; 4950 4951 } else if (opcode == BPF_MOV) { 4952 4953 if (BPF_SRC(insn->code) == BPF_X) { 4954 if (insn->imm != 0 || insn->off != 0) { 4955 verbose(env, "BPF_MOV uses reserved fields\n"); 4956 return -EINVAL; 4957 } 4958 4959 /* check src operand */ 4960 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4961 if (err) 4962 return err; 4963 } else { 4964 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 4965 verbose(env, "BPF_MOV uses reserved fields\n"); 4966 return -EINVAL; 4967 } 4968 } 4969 4970 /* check dest operand, mark as required later */ 4971 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 4972 if (err) 4973 return err; 4974 4975 if (BPF_SRC(insn->code) == BPF_X) { 4976 struct bpf_reg_state *src_reg = regs + insn->src_reg; 4977 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 4978 4979 if (BPF_CLASS(insn->code) == BPF_ALU64) { 4980 /* case: R1 = R2 4981 * copy register state to dest reg 4982 */ 4983 *dst_reg = *src_reg; 4984 dst_reg->live |= REG_LIVE_WRITTEN; 4985 dst_reg->subreg_def = DEF_NOT_SUBREG; 4986 } else { 4987 /* R1 = (u32) R2 */ 4988 if (is_pointer_value(env, insn->src_reg)) { 4989 verbose(env, 4990 "R%d partial copy of pointer\n", 4991 insn->src_reg); 4992 return -EACCES; 4993 } else if (src_reg->type == SCALAR_VALUE) { 4994 *dst_reg = *src_reg; 4995 dst_reg->live |= REG_LIVE_WRITTEN; 4996 dst_reg->subreg_def = env->insn_idx + 1; 4997 } else { 4998 mark_reg_unknown(env, regs, 4999 insn->dst_reg); 5000 } 5001 coerce_reg_to_size(dst_reg, 4); 5002 } 5003 } else { 5004 /* case: R = imm 5005 * remember the value we stored into this reg 5006 */ 5007 /* clear any state __mark_reg_known doesn't set */ 5008 mark_reg_unknown(env, regs, insn->dst_reg); 5009 regs[insn->dst_reg].type = SCALAR_VALUE; 5010 if (BPF_CLASS(insn->code) == BPF_ALU64) { 5011 __mark_reg_known(regs + insn->dst_reg, 5012 insn->imm); 5013 } else { 5014 __mark_reg_known(regs + insn->dst_reg, 5015 (u32)insn->imm); 5016 } 5017 } 5018 5019 } else if (opcode > BPF_END) { 5020 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 5021 return -EINVAL; 5022 5023 } else { /* all other ALU ops: and, sub, xor, add, ... */ 5024 5025 if (BPF_SRC(insn->code) == BPF_X) { 5026 if (insn->imm != 0 || insn->off != 0) { 5027 verbose(env, "BPF_ALU uses reserved fields\n"); 5028 return -EINVAL; 5029 } 5030 /* check src1 operand */ 5031 err = check_reg_arg(env, insn->src_reg, SRC_OP); 5032 if (err) 5033 return err; 5034 } else { 5035 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 5036 verbose(env, "BPF_ALU uses reserved fields\n"); 5037 return -EINVAL; 5038 } 5039 } 5040 5041 /* check src2 operand */ 5042 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 5043 if (err) 5044 return err; 5045 5046 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 5047 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 5048 verbose(env, "div by zero\n"); 5049 return -EINVAL; 5050 } 5051 5052 if ((opcode == BPF_LSH || opcode == BPF_RSH || 5053 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 5054 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 5055 5056 if (insn->imm < 0 || insn->imm >= size) { 5057 verbose(env, "invalid shift %d\n", insn->imm); 5058 return -EINVAL; 5059 } 5060 } 5061 5062 /* check dest operand */ 5063 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 5064 if (err) 5065 return err; 5066 5067 return adjust_reg_min_max_vals(env, insn); 5068 } 5069 5070 return 0; 5071 } 5072 5073 static void __find_good_pkt_pointers(struct bpf_func_state *state, 5074 struct bpf_reg_state *dst_reg, 5075 enum bpf_reg_type type, u16 new_range) 5076 { 5077 struct bpf_reg_state *reg; 5078 int i; 5079 5080 for (i = 0; i < MAX_BPF_REG; i++) { 5081 reg = &state->regs[i]; 5082 if (reg->type == type && reg->id == dst_reg->id) 5083 /* keep the maximum range already checked */ 5084 reg->range = max(reg->range, new_range); 5085 } 5086 5087 bpf_for_each_spilled_reg(i, state, reg) { 5088 if (!reg) 5089 continue; 5090 if (reg->type == type && reg->id == dst_reg->id) 5091 reg->range = max(reg->range, new_range); 5092 } 5093 } 5094 5095 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 5096 struct bpf_reg_state *dst_reg, 5097 enum bpf_reg_type type, 5098 bool range_right_open) 5099 { 5100 u16 new_range; 5101 int i; 5102 5103 if (dst_reg->off < 0 || 5104 (dst_reg->off == 0 && range_right_open)) 5105 /* This doesn't give us any range */ 5106 return; 5107 5108 if (dst_reg->umax_value > MAX_PACKET_OFF || 5109 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 5110 /* Risk of overflow. For instance, ptr + (1<<63) may be less 5111 * than pkt_end, but that's because it's also less than pkt. 5112 */ 5113 return; 5114 5115 new_range = dst_reg->off; 5116 if (range_right_open) 5117 new_range--; 5118 5119 /* Examples for register markings: 5120 * 5121 * pkt_data in dst register: 5122 * 5123 * r2 = r3; 5124 * r2 += 8; 5125 * if (r2 > pkt_end) goto <handle exception> 5126 * <access okay> 5127 * 5128 * r2 = r3; 5129 * r2 += 8; 5130 * if (r2 < pkt_end) goto <access okay> 5131 * <handle exception> 5132 * 5133 * Where: 5134 * r2 == dst_reg, pkt_end == src_reg 5135 * r2=pkt(id=n,off=8,r=0) 5136 * r3=pkt(id=n,off=0,r=0) 5137 * 5138 * pkt_data in src register: 5139 * 5140 * r2 = r3; 5141 * r2 += 8; 5142 * if (pkt_end >= r2) goto <access okay> 5143 * <handle exception> 5144 * 5145 * r2 = r3; 5146 * r2 += 8; 5147 * if (pkt_end <= r2) goto <handle exception> 5148 * <access okay> 5149 * 5150 * Where: 5151 * pkt_end == dst_reg, r2 == src_reg 5152 * r2=pkt(id=n,off=8,r=0) 5153 * r3=pkt(id=n,off=0,r=0) 5154 * 5155 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 5156 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 5157 * and [r3, r3 + 8-1) respectively is safe to access depending on 5158 * the check. 5159 */ 5160 5161 /* If our ids match, then we must have the same max_value. And we 5162 * don't care about the other reg's fixed offset, since if it's too big 5163 * the range won't allow anything. 5164 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 5165 */ 5166 for (i = 0; i <= vstate->curframe; i++) 5167 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, 5168 new_range); 5169 } 5170 5171 /* compute branch direction of the expression "if (reg opcode val) goto target;" 5172 * and return: 5173 * 1 - branch will be taken and "goto target" will be executed 5174 * 0 - branch will not be taken and fall-through to next insn 5175 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] 5176 */ 5177 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 5178 bool is_jmp32) 5179 { 5180 struct bpf_reg_state reg_lo; 5181 s64 sval; 5182 5183 if (__is_pointer_value(false, reg)) 5184 return -1; 5185 5186 if (is_jmp32) { 5187 reg_lo = *reg; 5188 reg = ®_lo; 5189 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size 5190 * could truncate high bits and update umin/umax according to 5191 * information of low bits. 5192 */ 5193 coerce_reg_to_size(reg, 4); 5194 /* smin/smax need special handling. For example, after coerce, 5195 * if smin_value is 0x00000000ffffffffLL, the value is -1 when 5196 * used as operand to JMP32. It is a negative number from s32's 5197 * point of view, while it is a positive number when seen as 5198 * s64. The smin/smax are kept as s64, therefore, when used with 5199 * JMP32, they need to be transformed into s32, then sign 5200 * extended back to s64. 5201 * 5202 * Also, smin/smax were copied from umin/umax. If umin/umax has 5203 * different sign bit, then min/max relationship doesn't 5204 * maintain after casting into s32, for this case, set smin/smax 5205 * to safest range. 5206 */ 5207 if ((reg->umax_value ^ reg->umin_value) & 5208 (1ULL << 31)) { 5209 reg->smin_value = S32_MIN; 5210 reg->smax_value = S32_MAX; 5211 } 5212 reg->smin_value = (s64)(s32)reg->smin_value; 5213 reg->smax_value = (s64)(s32)reg->smax_value; 5214 5215 val = (u32)val; 5216 sval = (s64)(s32)val; 5217 } else { 5218 sval = (s64)val; 5219 } 5220 5221 switch (opcode) { 5222 case BPF_JEQ: 5223 if (tnum_is_const(reg->var_off)) 5224 return !!tnum_equals_const(reg->var_off, val); 5225 break; 5226 case BPF_JNE: 5227 if (tnum_is_const(reg->var_off)) 5228 return !tnum_equals_const(reg->var_off, val); 5229 break; 5230 case BPF_JSET: 5231 if ((~reg->var_off.mask & reg->var_off.value) & val) 5232 return 1; 5233 if (!((reg->var_off.mask | reg->var_off.value) & val)) 5234 return 0; 5235 break; 5236 case BPF_JGT: 5237 if (reg->umin_value > val) 5238 return 1; 5239 else if (reg->umax_value <= val) 5240 return 0; 5241 break; 5242 case BPF_JSGT: 5243 if (reg->smin_value > sval) 5244 return 1; 5245 else if (reg->smax_value < sval) 5246 return 0; 5247 break; 5248 case BPF_JLT: 5249 if (reg->umax_value < val) 5250 return 1; 5251 else if (reg->umin_value >= val) 5252 return 0; 5253 break; 5254 case BPF_JSLT: 5255 if (reg->smax_value < sval) 5256 return 1; 5257 else if (reg->smin_value >= sval) 5258 return 0; 5259 break; 5260 case BPF_JGE: 5261 if (reg->umin_value >= val) 5262 return 1; 5263 else if (reg->umax_value < val) 5264 return 0; 5265 break; 5266 case BPF_JSGE: 5267 if (reg->smin_value >= sval) 5268 return 1; 5269 else if (reg->smax_value < sval) 5270 return 0; 5271 break; 5272 case BPF_JLE: 5273 if (reg->umax_value <= val) 5274 return 1; 5275 else if (reg->umin_value > val) 5276 return 0; 5277 break; 5278 case BPF_JSLE: 5279 if (reg->smax_value <= sval) 5280 return 1; 5281 else if (reg->smin_value > sval) 5282 return 0; 5283 break; 5284 } 5285 5286 return -1; 5287 } 5288 5289 /* Generate min value of the high 32-bit from TNUM info. */ 5290 static u64 gen_hi_min(struct tnum var) 5291 { 5292 return var.value & ~0xffffffffULL; 5293 } 5294 5295 /* Generate max value of the high 32-bit from TNUM info. */ 5296 static u64 gen_hi_max(struct tnum var) 5297 { 5298 return (var.value | var.mask) & ~0xffffffffULL; 5299 } 5300 5301 /* Return true if VAL is compared with a s64 sign extended from s32, and they 5302 * are with the same signedness. 5303 */ 5304 static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) 5305 { 5306 return ((s32)sval >= 0 && 5307 reg->smin_value >= 0 && reg->smax_value <= S32_MAX) || 5308 ((s32)sval < 0 && 5309 reg->smax_value <= 0 && reg->smin_value >= S32_MIN); 5310 } 5311 5312 /* Adjusts the register min/max values in the case that the dst_reg is the 5313 * variable register that we are working on, and src_reg is a constant or we're 5314 * simply doing a BPF_K check. 5315 * In JEQ/JNE cases we also adjust the var_off values. 5316 */ 5317 static void reg_set_min_max(struct bpf_reg_state *true_reg, 5318 struct bpf_reg_state *false_reg, u64 val, 5319 u8 opcode, bool is_jmp32) 5320 { 5321 s64 sval; 5322 5323 /* If the dst_reg is a pointer, we can't learn anything about its 5324 * variable offset from the compare (unless src_reg were a pointer into 5325 * the same object, but we don't bother with that. 5326 * Since false_reg and true_reg have the same type by construction, we 5327 * only need to check one of them for pointerness. 5328 */ 5329 if (__is_pointer_value(false, false_reg)) 5330 return; 5331 5332 val = is_jmp32 ? (u32)val : val; 5333 sval = is_jmp32 ? (s64)(s32)val : (s64)val; 5334 5335 switch (opcode) { 5336 case BPF_JEQ: 5337 case BPF_JNE: 5338 { 5339 struct bpf_reg_state *reg = 5340 opcode == BPF_JEQ ? true_reg : false_reg; 5341 5342 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but 5343 * if it is true we know the value for sure. Likewise for 5344 * BPF_JNE. 5345 */ 5346 if (is_jmp32) { 5347 u64 old_v = reg->var_off.value; 5348 u64 hi_mask = ~0xffffffffULL; 5349 5350 reg->var_off.value = (old_v & hi_mask) | val; 5351 reg->var_off.mask &= hi_mask; 5352 } else { 5353 __mark_reg_known(reg, val); 5354 } 5355 break; 5356 } 5357 case BPF_JSET: 5358 false_reg->var_off = tnum_and(false_reg->var_off, 5359 tnum_const(~val)); 5360 if (is_power_of_2(val)) 5361 true_reg->var_off = tnum_or(true_reg->var_off, 5362 tnum_const(val)); 5363 break; 5364 case BPF_JGE: 5365 case BPF_JGT: 5366 { 5367 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 5368 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 5369 5370 if (is_jmp32) { 5371 false_umax += gen_hi_max(false_reg->var_off); 5372 true_umin += gen_hi_min(true_reg->var_off); 5373 } 5374 false_reg->umax_value = min(false_reg->umax_value, false_umax); 5375 true_reg->umin_value = max(true_reg->umin_value, true_umin); 5376 break; 5377 } 5378 case BPF_JSGE: 5379 case BPF_JSGT: 5380 { 5381 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 5382 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 5383 5384 /* If the full s64 was not sign-extended from s32 then don't 5385 * deduct further info. 5386 */ 5387 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5388 break; 5389 false_reg->smax_value = min(false_reg->smax_value, false_smax); 5390 true_reg->smin_value = max(true_reg->smin_value, true_smin); 5391 break; 5392 } 5393 case BPF_JLE: 5394 case BPF_JLT: 5395 { 5396 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 5397 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 5398 5399 if (is_jmp32) { 5400 false_umin += gen_hi_min(false_reg->var_off); 5401 true_umax += gen_hi_max(true_reg->var_off); 5402 } 5403 false_reg->umin_value = max(false_reg->umin_value, false_umin); 5404 true_reg->umax_value = min(true_reg->umax_value, true_umax); 5405 break; 5406 } 5407 case BPF_JSLE: 5408 case BPF_JSLT: 5409 { 5410 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 5411 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 5412 5413 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5414 break; 5415 false_reg->smin_value = max(false_reg->smin_value, false_smin); 5416 true_reg->smax_value = min(true_reg->smax_value, true_smax); 5417 break; 5418 } 5419 default: 5420 break; 5421 } 5422 5423 __reg_deduce_bounds(false_reg); 5424 __reg_deduce_bounds(true_reg); 5425 /* We might have learned some bits from the bounds. */ 5426 __reg_bound_offset(false_reg); 5427 __reg_bound_offset(true_reg); 5428 /* Intersecting with the old var_off might have improved our bounds 5429 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 5430 * then new var_off is (0; 0x7f...fc) which improves our umax. 5431 */ 5432 __update_reg_bounds(false_reg); 5433 __update_reg_bounds(true_reg); 5434 } 5435 5436 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 5437 * the variable reg. 5438 */ 5439 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 5440 struct bpf_reg_state *false_reg, u64 val, 5441 u8 opcode, bool is_jmp32) 5442 { 5443 s64 sval; 5444 5445 if (__is_pointer_value(false, false_reg)) 5446 return; 5447 5448 val = is_jmp32 ? (u32)val : val; 5449 sval = is_jmp32 ? (s64)(s32)val : (s64)val; 5450 5451 switch (opcode) { 5452 case BPF_JEQ: 5453 case BPF_JNE: 5454 { 5455 struct bpf_reg_state *reg = 5456 opcode == BPF_JEQ ? true_reg : false_reg; 5457 5458 if (is_jmp32) { 5459 u64 old_v = reg->var_off.value; 5460 u64 hi_mask = ~0xffffffffULL; 5461 5462 reg->var_off.value = (old_v & hi_mask) | val; 5463 reg->var_off.mask &= hi_mask; 5464 } else { 5465 __mark_reg_known(reg, val); 5466 } 5467 break; 5468 } 5469 case BPF_JSET: 5470 false_reg->var_off = tnum_and(false_reg->var_off, 5471 tnum_const(~val)); 5472 if (is_power_of_2(val)) 5473 true_reg->var_off = tnum_or(true_reg->var_off, 5474 tnum_const(val)); 5475 break; 5476 case BPF_JGE: 5477 case BPF_JGT: 5478 { 5479 u64 false_umin = opcode == BPF_JGT ? val : val + 1; 5480 u64 true_umax = opcode == BPF_JGT ? val - 1 : val; 5481 5482 if (is_jmp32) { 5483 false_umin += gen_hi_min(false_reg->var_off); 5484 true_umax += gen_hi_max(true_reg->var_off); 5485 } 5486 false_reg->umin_value = max(false_reg->umin_value, false_umin); 5487 true_reg->umax_value = min(true_reg->umax_value, true_umax); 5488 break; 5489 } 5490 case BPF_JSGE: 5491 case BPF_JSGT: 5492 { 5493 s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; 5494 s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; 5495 5496 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5497 break; 5498 false_reg->smin_value = max(false_reg->smin_value, false_smin); 5499 true_reg->smax_value = min(true_reg->smax_value, true_smax); 5500 break; 5501 } 5502 case BPF_JLE: 5503 case BPF_JLT: 5504 { 5505 u64 false_umax = opcode == BPF_JLT ? val : val - 1; 5506 u64 true_umin = opcode == BPF_JLT ? val + 1 : val; 5507 5508 if (is_jmp32) { 5509 false_umax += gen_hi_max(false_reg->var_off); 5510 true_umin += gen_hi_min(true_reg->var_off); 5511 } 5512 false_reg->umax_value = min(false_reg->umax_value, false_umax); 5513 true_reg->umin_value = max(true_reg->umin_value, true_umin); 5514 break; 5515 } 5516 case BPF_JSLE: 5517 case BPF_JSLT: 5518 { 5519 s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; 5520 s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; 5521 5522 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5523 break; 5524 false_reg->smax_value = min(false_reg->smax_value, false_smax); 5525 true_reg->smin_value = max(true_reg->smin_value, true_smin); 5526 break; 5527 } 5528 default: 5529 break; 5530 } 5531 5532 __reg_deduce_bounds(false_reg); 5533 __reg_deduce_bounds(true_reg); 5534 /* We might have learned some bits from the bounds. */ 5535 __reg_bound_offset(false_reg); 5536 __reg_bound_offset(true_reg); 5537 /* Intersecting with the old var_off might have improved our bounds 5538 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 5539 * then new var_off is (0; 0x7f...fc) which improves our umax. 5540 */ 5541 __update_reg_bounds(false_reg); 5542 __update_reg_bounds(true_reg); 5543 } 5544 5545 /* Regs are known to be equal, so intersect their min/max/var_off */ 5546 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 5547 struct bpf_reg_state *dst_reg) 5548 { 5549 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 5550 dst_reg->umin_value); 5551 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 5552 dst_reg->umax_value); 5553 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 5554 dst_reg->smin_value); 5555 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 5556 dst_reg->smax_value); 5557 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 5558 dst_reg->var_off); 5559 /* We might have learned new bounds from the var_off. */ 5560 __update_reg_bounds(src_reg); 5561 __update_reg_bounds(dst_reg); 5562 /* We might have learned something about the sign bit. */ 5563 __reg_deduce_bounds(src_reg); 5564 __reg_deduce_bounds(dst_reg); 5565 /* We might have learned some bits from the bounds. */ 5566 __reg_bound_offset(src_reg); 5567 __reg_bound_offset(dst_reg); 5568 /* Intersecting with the old var_off might have improved our bounds 5569 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 5570 * then new var_off is (0; 0x7f...fc) which improves our umax. 5571 */ 5572 __update_reg_bounds(src_reg); 5573 __update_reg_bounds(dst_reg); 5574 } 5575 5576 static void reg_combine_min_max(struct bpf_reg_state *true_src, 5577 struct bpf_reg_state *true_dst, 5578 struct bpf_reg_state *false_src, 5579 struct bpf_reg_state *false_dst, 5580 u8 opcode) 5581 { 5582 switch (opcode) { 5583 case BPF_JEQ: 5584 __reg_combine_min_max(true_src, true_dst); 5585 break; 5586 case BPF_JNE: 5587 __reg_combine_min_max(false_src, false_dst); 5588 break; 5589 } 5590 } 5591 5592 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 5593 struct bpf_reg_state *reg, u32 id, 5594 bool is_null) 5595 { 5596 if (reg_type_may_be_null(reg->type) && reg->id == id) { 5597 /* Old offset (both fixed and variable parts) should 5598 * have been known-zero, because we don't allow pointer 5599 * arithmetic on pointers that might be NULL. 5600 */ 5601 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 5602 !tnum_equals_const(reg->var_off, 0) || 5603 reg->off)) { 5604 __mark_reg_known_zero(reg); 5605 reg->off = 0; 5606 } 5607 if (is_null) { 5608 reg->type = SCALAR_VALUE; 5609 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 5610 if (reg->map_ptr->inner_map_meta) { 5611 reg->type = CONST_PTR_TO_MAP; 5612 reg->map_ptr = reg->map_ptr->inner_map_meta; 5613 } else if (reg->map_ptr->map_type == 5614 BPF_MAP_TYPE_XSKMAP) { 5615 reg->type = PTR_TO_XDP_SOCK; 5616 } else { 5617 reg->type = PTR_TO_MAP_VALUE; 5618 } 5619 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { 5620 reg->type = PTR_TO_SOCKET; 5621 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { 5622 reg->type = PTR_TO_SOCK_COMMON; 5623 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { 5624 reg->type = PTR_TO_TCP_SOCK; 5625 } 5626 if (is_null) { 5627 /* We don't need id and ref_obj_id from this point 5628 * onwards anymore, thus we should better reset it, 5629 * so that state pruning has chances to take effect. 5630 */ 5631 reg->id = 0; 5632 reg->ref_obj_id = 0; 5633 } else if (!reg_may_point_to_spin_lock(reg)) { 5634 /* For not-NULL ptr, reg->ref_obj_id will be reset 5635 * in release_reg_references(). 5636 * 5637 * reg->id is still used by spin_lock ptr. Other 5638 * than spin_lock ptr type, reg->id can be reset. 5639 */ 5640 reg->id = 0; 5641 } 5642 } 5643 } 5644 5645 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, 5646 bool is_null) 5647 { 5648 struct bpf_reg_state *reg; 5649 int i; 5650 5651 for (i = 0; i < MAX_BPF_REG; i++) 5652 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); 5653 5654 bpf_for_each_spilled_reg(i, state, reg) { 5655 if (!reg) 5656 continue; 5657 mark_ptr_or_null_reg(state, reg, id, is_null); 5658 } 5659 } 5660 5661 /* The logic is similar to find_good_pkt_pointers(), both could eventually 5662 * be folded together at some point. 5663 */ 5664 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 5665 bool is_null) 5666 { 5667 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5668 struct bpf_reg_state *regs = state->regs; 5669 u32 ref_obj_id = regs[regno].ref_obj_id; 5670 u32 id = regs[regno].id; 5671 int i; 5672 5673 if (ref_obj_id && ref_obj_id == id && is_null) 5674 /* regs[regno] is in the " == NULL" branch. 5675 * No one could have freed the reference state before 5676 * doing the NULL check. 5677 */ 5678 WARN_ON_ONCE(release_reference_state(state, id)); 5679 5680 for (i = 0; i <= vstate->curframe; i++) 5681 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); 5682 } 5683 5684 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 5685 struct bpf_reg_state *dst_reg, 5686 struct bpf_reg_state *src_reg, 5687 struct bpf_verifier_state *this_branch, 5688 struct bpf_verifier_state *other_branch) 5689 { 5690 if (BPF_SRC(insn->code) != BPF_X) 5691 return false; 5692 5693 /* Pointers are always 64-bit. */ 5694 if (BPF_CLASS(insn->code) == BPF_JMP32) 5695 return false; 5696 5697 switch (BPF_OP(insn->code)) { 5698 case BPF_JGT: 5699 if ((dst_reg->type == PTR_TO_PACKET && 5700 src_reg->type == PTR_TO_PACKET_END) || 5701 (dst_reg->type == PTR_TO_PACKET_META && 5702 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 5703 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 5704 find_good_pkt_pointers(this_branch, dst_reg, 5705 dst_reg->type, false); 5706 } else if ((dst_reg->type == PTR_TO_PACKET_END && 5707 src_reg->type == PTR_TO_PACKET) || 5708 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 5709 src_reg->type == PTR_TO_PACKET_META)) { 5710 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 5711 find_good_pkt_pointers(other_branch, src_reg, 5712 src_reg->type, true); 5713 } else { 5714 return false; 5715 } 5716 break; 5717 case BPF_JLT: 5718 if ((dst_reg->type == PTR_TO_PACKET && 5719 src_reg->type == PTR_TO_PACKET_END) || 5720 (dst_reg->type == PTR_TO_PACKET_META && 5721 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 5722 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 5723 find_good_pkt_pointers(other_branch, dst_reg, 5724 dst_reg->type, true); 5725 } else if ((dst_reg->type == PTR_TO_PACKET_END && 5726 src_reg->type == PTR_TO_PACKET) || 5727 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 5728 src_reg->type == PTR_TO_PACKET_META)) { 5729 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 5730 find_good_pkt_pointers(this_branch, src_reg, 5731 src_reg->type, false); 5732 } else { 5733 return false; 5734 } 5735 break; 5736 case BPF_JGE: 5737 if ((dst_reg->type == PTR_TO_PACKET && 5738 src_reg->type == PTR_TO_PACKET_END) || 5739 (dst_reg->type == PTR_TO_PACKET_META && 5740 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 5741 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 5742 find_good_pkt_pointers(this_branch, dst_reg, 5743 dst_reg->type, true); 5744 } else if ((dst_reg->type == PTR_TO_PACKET_END && 5745 src_reg->type == PTR_TO_PACKET) || 5746 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 5747 src_reg->type == PTR_TO_PACKET_META)) { 5748 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 5749 find_good_pkt_pointers(other_branch, src_reg, 5750 src_reg->type, false); 5751 } else { 5752 return false; 5753 } 5754 break; 5755 case BPF_JLE: 5756 if ((dst_reg->type == PTR_TO_PACKET && 5757 src_reg->type == PTR_TO_PACKET_END) || 5758 (dst_reg->type == PTR_TO_PACKET_META && 5759 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 5760 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 5761 find_good_pkt_pointers(other_branch, dst_reg, 5762 dst_reg->type, false); 5763 } else if ((dst_reg->type == PTR_TO_PACKET_END && 5764 src_reg->type == PTR_TO_PACKET) || 5765 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 5766 src_reg->type == PTR_TO_PACKET_META)) { 5767 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 5768 find_good_pkt_pointers(this_branch, src_reg, 5769 src_reg->type, true); 5770 } else { 5771 return false; 5772 } 5773 break; 5774 default: 5775 return false; 5776 } 5777 5778 return true; 5779 } 5780 5781 static int check_cond_jmp_op(struct bpf_verifier_env *env, 5782 struct bpf_insn *insn, int *insn_idx) 5783 { 5784 struct bpf_verifier_state *this_branch = env->cur_state; 5785 struct bpf_verifier_state *other_branch; 5786 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 5787 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 5788 u8 opcode = BPF_OP(insn->code); 5789 bool is_jmp32; 5790 int pred = -1; 5791 int err; 5792 5793 /* Only conditional jumps are expected to reach here. */ 5794 if (opcode == BPF_JA || opcode > BPF_JSLE) { 5795 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 5796 return -EINVAL; 5797 } 5798 5799 if (BPF_SRC(insn->code) == BPF_X) { 5800 if (insn->imm != 0) { 5801 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 5802 return -EINVAL; 5803 } 5804 5805 /* check src1 operand */ 5806 err = check_reg_arg(env, insn->src_reg, SRC_OP); 5807 if (err) 5808 return err; 5809 5810 if (is_pointer_value(env, insn->src_reg)) { 5811 verbose(env, "R%d pointer comparison prohibited\n", 5812 insn->src_reg); 5813 return -EACCES; 5814 } 5815 src_reg = ®s[insn->src_reg]; 5816 } else { 5817 if (insn->src_reg != BPF_REG_0) { 5818 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 5819 return -EINVAL; 5820 } 5821 } 5822 5823 /* check src2 operand */ 5824 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 5825 if (err) 5826 return err; 5827 5828 dst_reg = ®s[insn->dst_reg]; 5829 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 5830 5831 if (BPF_SRC(insn->code) == BPF_K) 5832 pred = is_branch_taken(dst_reg, insn->imm, 5833 opcode, is_jmp32); 5834 else if (src_reg->type == SCALAR_VALUE && 5835 tnum_is_const(src_reg->var_off)) 5836 pred = is_branch_taken(dst_reg, src_reg->var_off.value, 5837 opcode, is_jmp32); 5838 if (pred >= 0) { 5839 err = mark_chain_precision(env, insn->dst_reg); 5840 if (BPF_SRC(insn->code) == BPF_X && !err) 5841 err = mark_chain_precision(env, insn->src_reg); 5842 if (err) 5843 return err; 5844 } 5845 if (pred == 1) { 5846 /* only follow the goto, ignore fall-through */ 5847 *insn_idx += insn->off; 5848 return 0; 5849 } else if (pred == 0) { 5850 /* only follow fall-through branch, since 5851 * that's where the program will go 5852 */ 5853 return 0; 5854 } 5855 5856 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 5857 false); 5858 if (!other_branch) 5859 return -EFAULT; 5860 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 5861 5862 /* detect if we are comparing against a constant value so we can adjust 5863 * our min/max values for our dst register. 5864 * this is only legit if both are scalars (or pointers to the same 5865 * object, I suppose, but we don't support that right now), because 5866 * otherwise the different base pointers mean the offsets aren't 5867 * comparable. 5868 */ 5869 if (BPF_SRC(insn->code) == BPF_X) { 5870 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 5871 struct bpf_reg_state lo_reg0 = *dst_reg; 5872 struct bpf_reg_state lo_reg1 = *src_reg; 5873 struct bpf_reg_state *src_lo, *dst_lo; 5874 5875 dst_lo = &lo_reg0; 5876 src_lo = &lo_reg1; 5877 coerce_reg_to_size(dst_lo, 4); 5878 coerce_reg_to_size(src_lo, 4); 5879 5880 if (dst_reg->type == SCALAR_VALUE && 5881 src_reg->type == SCALAR_VALUE) { 5882 if (tnum_is_const(src_reg->var_off) || 5883 (is_jmp32 && tnum_is_const(src_lo->var_off))) 5884 reg_set_min_max(&other_branch_regs[insn->dst_reg], 5885 dst_reg, 5886 is_jmp32 5887 ? src_lo->var_off.value 5888 : src_reg->var_off.value, 5889 opcode, is_jmp32); 5890 else if (tnum_is_const(dst_reg->var_off) || 5891 (is_jmp32 && tnum_is_const(dst_lo->var_off))) 5892 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 5893 src_reg, 5894 is_jmp32 5895 ? dst_lo->var_off.value 5896 : dst_reg->var_off.value, 5897 opcode, is_jmp32); 5898 else if (!is_jmp32 && 5899 (opcode == BPF_JEQ || opcode == BPF_JNE)) 5900 /* Comparing for equality, we can combine knowledge */ 5901 reg_combine_min_max(&other_branch_regs[insn->src_reg], 5902 &other_branch_regs[insn->dst_reg], 5903 src_reg, dst_reg, opcode); 5904 } 5905 } else if (dst_reg->type == SCALAR_VALUE) { 5906 reg_set_min_max(&other_branch_regs[insn->dst_reg], 5907 dst_reg, insn->imm, opcode, is_jmp32); 5908 } 5909 5910 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 5911 * NOTE: these optimizations below are related with pointer comparison 5912 * which will never be JMP32. 5913 */ 5914 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 5915 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 5916 reg_type_may_be_null(dst_reg->type)) { 5917 /* Mark all identical registers in each branch as either 5918 * safe or unknown depending R == 0 or R != 0 conditional. 5919 */ 5920 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 5921 opcode == BPF_JNE); 5922 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 5923 opcode == BPF_JEQ); 5924 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 5925 this_branch, other_branch) && 5926 is_pointer_value(env, insn->dst_reg)) { 5927 verbose(env, "R%d pointer comparison prohibited\n", 5928 insn->dst_reg); 5929 return -EACCES; 5930 } 5931 if (env->log.level & BPF_LOG_LEVEL) 5932 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 5933 return 0; 5934 } 5935 5936 /* verify BPF_LD_IMM64 instruction */ 5937 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 5938 { 5939 struct bpf_insn_aux_data *aux = cur_aux(env); 5940 struct bpf_reg_state *regs = cur_regs(env); 5941 struct bpf_map *map; 5942 int err; 5943 5944 if (BPF_SIZE(insn->code) != BPF_DW) { 5945 verbose(env, "invalid BPF_LD_IMM insn\n"); 5946 return -EINVAL; 5947 } 5948 if (insn->off != 0) { 5949 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 5950 return -EINVAL; 5951 } 5952 5953 err = check_reg_arg(env, insn->dst_reg, DST_OP); 5954 if (err) 5955 return err; 5956 5957 if (insn->src_reg == 0) { 5958 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 5959 5960 regs[insn->dst_reg].type = SCALAR_VALUE; 5961 __mark_reg_known(®s[insn->dst_reg], imm); 5962 return 0; 5963 } 5964 5965 map = env->used_maps[aux->map_index]; 5966 mark_reg_known_zero(env, regs, insn->dst_reg); 5967 regs[insn->dst_reg].map_ptr = map; 5968 5969 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { 5970 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 5971 regs[insn->dst_reg].off = aux->map_off; 5972 if (map_value_has_spin_lock(map)) 5973 regs[insn->dst_reg].id = ++env->id_gen; 5974 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 5975 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 5976 } else { 5977 verbose(env, "bpf verifier is misconfigured\n"); 5978 return -EINVAL; 5979 } 5980 5981 return 0; 5982 } 5983 5984 static bool may_access_skb(enum bpf_prog_type type) 5985 { 5986 switch (type) { 5987 case BPF_PROG_TYPE_SOCKET_FILTER: 5988 case BPF_PROG_TYPE_SCHED_CLS: 5989 case BPF_PROG_TYPE_SCHED_ACT: 5990 return true; 5991 default: 5992 return false; 5993 } 5994 } 5995 5996 /* verify safety of LD_ABS|LD_IND instructions: 5997 * - they can only appear in the programs where ctx == skb 5998 * - since they are wrappers of function calls, they scratch R1-R5 registers, 5999 * preserve R6-R9, and store return value into R0 6000 * 6001 * Implicit input: 6002 * ctx == skb == R6 == CTX 6003 * 6004 * Explicit input: 6005 * SRC == any register 6006 * IMM == 32-bit immediate 6007 * 6008 * Output: 6009 * R0 - 8/16/32-bit skb data converted to cpu endianness 6010 */ 6011 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 6012 { 6013 struct bpf_reg_state *regs = cur_regs(env); 6014 u8 mode = BPF_MODE(insn->code); 6015 int i, err; 6016 6017 if (!may_access_skb(env->prog->type)) { 6018 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 6019 return -EINVAL; 6020 } 6021 6022 if (!env->ops->gen_ld_abs) { 6023 verbose(env, "bpf verifier is misconfigured\n"); 6024 return -EINVAL; 6025 } 6026 6027 if (env->subprog_cnt > 1) { 6028 /* when program has LD_ABS insn JITs and interpreter assume 6029 * that r1 == ctx == skb which is not the case for callees 6030 * that can have arbitrary arguments. It's problematic 6031 * for main prog as well since JITs would need to analyze 6032 * all functions in order to make proper register save/restore 6033 * decisions in the main prog. Hence disallow LD_ABS with calls 6034 */ 6035 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); 6036 return -EINVAL; 6037 } 6038 6039 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 6040 BPF_SIZE(insn->code) == BPF_DW || 6041 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 6042 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 6043 return -EINVAL; 6044 } 6045 6046 /* check whether implicit source operand (register R6) is readable */ 6047 err = check_reg_arg(env, BPF_REG_6, SRC_OP); 6048 if (err) 6049 return err; 6050 6051 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 6052 * gen_ld_abs() may terminate the program at runtime, leading to 6053 * reference leak. 6054 */ 6055 err = check_reference_leak(env); 6056 if (err) { 6057 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 6058 return err; 6059 } 6060 6061 if (env->cur_state->active_spin_lock) { 6062 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 6063 return -EINVAL; 6064 } 6065 6066 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 6067 verbose(env, 6068 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 6069 return -EINVAL; 6070 } 6071 6072 if (mode == BPF_IND) { 6073 /* check explicit source operand */ 6074 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6075 if (err) 6076 return err; 6077 } 6078 6079 /* reset caller saved regs to unreadable */ 6080 for (i = 0; i < CALLER_SAVED_REGS; i++) { 6081 mark_reg_not_init(env, regs, caller_saved[i]); 6082 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 6083 } 6084 6085 /* mark destination R0 register as readable, since it contains 6086 * the value fetched from the packet. 6087 * Already marked as written above. 6088 */ 6089 mark_reg_unknown(env, regs, BPF_REG_0); 6090 /* ld_abs load up to 32-bit skb data. */ 6091 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 6092 return 0; 6093 } 6094 6095 static int check_return_code(struct bpf_verifier_env *env) 6096 { 6097 struct tnum enforce_attach_type_range = tnum_unknown; 6098 struct bpf_reg_state *reg; 6099 struct tnum range = tnum_range(0, 1); 6100 6101 switch (env->prog->type) { 6102 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 6103 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 6104 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG) 6105 range = tnum_range(1, 1); 6106 break; 6107 case BPF_PROG_TYPE_CGROUP_SKB: 6108 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 6109 range = tnum_range(0, 3); 6110 enforce_attach_type_range = tnum_range(2, 3); 6111 } 6112 break; 6113 case BPF_PROG_TYPE_CGROUP_SOCK: 6114 case BPF_PROG_TYPE_SOCK_OPS: 6115 case BPF_PROG_TYPE_CGROUP_DEVICE: 6116 case BPF_PROG_TYPE_CGROUP_SYSCTL: 6117 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 6118 break; 6119 default: 6120 return 0; 6121 } 6122 6123 reg = cur_regs(env) + BPF_REG_0; 6124 if (reg->type != SCALAR_VALUE) { 6125 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 6126 reg_type_str[reg->type]); 6127 return -EINVAL; 6128 } 6129 6130 if (!tnum_in(range, reg->var_off)) { 6131 char tn_buf[48]; 6132 6133 verbose(env, "At program exit the register R0 "); 6134 if (!tnum_is_unknown(reg->var_off)) { 6135 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6136 verbose(env, "has value %s", tn_buf); 6137 } else { 6138 verbose(env, "has unknown scalar value"); 6139 } 6140 tnum_strn(tn_buf, sizeof(tn_buf), range); 6141 verbose(env, " should have been in %s\n", tn_buf); 6142 return -EINVAL; 6143 } 6144 6145 if (!tnum_is_unknown(enforce_attach_type_range) && 6146 tnum_in(enforce_attach_type_range, reg->var_off)) 6147 env->prog->enforce_expected_attach_type = 1; 6148 return 0; 6149 } 6150 6151 /* non-recursive DFS pseudo code 6152 * 1 procedure DFS-iterative(G,v): 6153 * 2 label v as discovered 6154 * 3 let S be a stack 6155 * 4 S.push(v) 6156 * 5 while S is not empty 6157 * 6 t <- S.pop() 6158 * 7 if t is what we're looking for: 6159 * 8 return t 6160 * 9 for all edges e in G.adjacentEdges(t) do 6161 * 10 if edge e is already labelled 6162 * 11 continue with the next edge 6163 * 12 w <- G.adjacentVertex(t,e) 6164 * 13 if vertex w is not discovered and not explored 6165 * 14 label e as tree-edge 6166 * 15 label w as discovered 6167 * 16 S.push(w) 6168 * 17 continue at 5 6169 * 18 else if vertex w is discovered 6170 * 19 label e as back-edge 6171 * 20 else 6172 * 21 // vertex w is explored 6173 * 22 label e as forward- or cross-edge 6174 * 23 label t as explored 6175 * 24 S.pop() 6176 * 6177 * convention: 6178 * 0x10 - discovered 6179 * 0x11 - discovered and fall-through edge labelled 6180 * 0x12 - discovered and fall-through and branch edges labelled 6181 * 0x20 - explored 6182 */ 6183 6184 enum { 6185 DISCOVERED = 0x10, 6186 EXPLORED = 0x20, 6187 FALLTHROUGH = 1, 6188 BRANCH = 2, 6189 }; 6190 6191 static u32 state_htab_size(struct bpf_verifier_env *env) 6192 { 6193 return env->prog->len; 6194 } 6195 6196 static struct bpf_verifier_state_list **explored_state( 6197 struct bpf_verifier_env *env, 6198 int idx) 6199 { 6200 struct bpf_verifier_state *cur = env->cur_state; 6201 struct bpf_func_state *state = cur->frame[cur->curframe]; 6202 6203 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 6204 } 6205 6206 static void init_explored_state(struct bpf_verifier_env *env, int idx) 6207 { 6208 env->insn_aux_data[idx].prune_point = true; 6209 } 6210 6211 /* t, w, e - match pseudo-code above: 6212 * t - index of current instruction 6213 * w - next instruction 6214 * e - edge 6215 */ 6216 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 6217 bool loop_ok) 6218 { 6219 int *insn_stack = env->cfg.insn_stack; 6220 int *insn_state = env->cfg.insn_state; 6221 6222 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 6223 return 0; 6224 6225 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 6226 return 0; 6227 6228 if (w < 0 || w >= env->prog->len) { 6229 verbose_linfo(env, t, "%d: ", t); 6230 verbose(env, "jump out of range from insn %d to %d\n", t, w); 6231 return -EINVAL; 6232 } 6233 6234 if (e == BRANCH) 6235 /* mark branch target for state pruning */ 6236 init_explored_state(env, w); 6237 6238 if (insn_state[w] == 0) { 6239 /* tree-edge */ 6240 insn_state[t] = DISCOVERED | e; 6241 insn_state[w] = DISCOVERED; 6242 if (env->cfg.cur_stack >= env->prog->len) 6243 return -E2BIG; 6244 insn_stack[env->cfg.cur_stack++] = w; 6245 return 1; 6246 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 6247 if (loop_ok && env->allow_ptr_leaks) 6248 return 0; 6249 verbose_linfo(env, t, "%d: ", t); 6250 verbose_linfo(env, w, "%d: ", w); 6251 verbose(env, "back-edge from insn %d to %d\n", t, w); 6252 return -EINVAL; 6253 } else if (insn_state[w] == EXPLORED) { 6254 /* forward- or cross-edge */ 6255 insn_state[t] = DISCOVERED | e; 6256 } else { 6257 verbose(env, "insn state internal bug\n"); 6258 return -EFAULT; 6259 } 6260 return 0; 6261 } 6262 6263 /* non-recursive depth-first-search to detect loops in BPF program 6264 * loop == back-edge in directed graph 6265 */ 6266 static int check_cfg(struct bpf_verifier_env *env) 6267 { 6268 struct bpf_insn *insns = env->prog->insnsi; 6269 int insn_cnt = env->prog->len; 6270 int *insn_stack, *insn_state; 6271 int ret = 0; 6272 int i, t; 6273 6274 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 6275 if (!insn_state) 6276 return -ENOMEM; 6277 6278 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 6279 if (!insn_stack) { 6280 kvfree(insn_state); 6281 return -ENOMEM; 6282 } 6283 6284 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 6285 insn_stack[0] = 0; /* 0 is the first instruction */ 6286 env->cfg.cur_stack = 1; 6287 6288 peek_stack: 6289 if (env->cfg.cur_stack == 0) 6290 goto check_state; 6291 t = insn_stack[env->cfg.cur_stack - 1]; 6292 6293 if (BPF_CLASS(insns[t].code) == BPF_JMP || 6294 BPF_CLASS(insns[t].code) == BPF_JMP32) { 6295 u8 opcode = BPF_OP(insns[t].code); 6296 6297 if (opcode == BPF_EXIT) { 6298 goto mark_explored; 6299 } else if (opcode == BPF_CALL) { 6300 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 6301 if (ret == 1) 6302 goto peek_stack; 6303 else if (ret < 0) 6304 goto err_free; 6305 if (t + 1 < insn_cnt) 6306 init_explored_state(env, t + 1); 6307 if (insns[t].src_reg == BPF_PSEUDO_CALL) { 6308 init_explored_state(env, t); 6309 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, 6310 env, false); 6311 if (ret == 1) 6312 goto peek_stack; 6313 else if (ret < 0) 6314 goto err_free; 6315 } 6316 } else if (opcode == BPF_JA) { 6317 if (BPF_SRC(insns[t].code) != BPF_K) { 6318 ret = -EINVAL; 6319 goto err_free; 6320 } 6321 /* unconditional jump with single edge */ 6322 ret = push_insn(t, t + insns[t].off + 1, 6323 FALLTHROUGH, env, true); 6324 if (ret == 1) 6325 goto peek_stack; 6326 else if (ret < 0) 6327 goto err_free; 6328 /* unconditional jmp is not a good pruning point, 6329 * but it's marked, since backtracking needs 6330 * to record jmp history in is_state_visited(). 6331 */ 6332 init_explored_state(env, t + insns[t].off + 1); 6333 /* tell verifier to check for equivalent states 6334 * after every call and jump 6335 */ 6336 if (t + 1 < insn_cnt) 6337 init_explored_state(env, t + 1); 6338 } else { 6339 /* conditional jump with two edges */ 6340 init_explored_state(env, t); 6341 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 6342 if (ret == 1) 6343 goto peek_stack; 6344 else if (ret < 0) 6345 goto err_free; 6346 6347 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 6348 if (ret == 1) 6349 goto peek_stack; 6350 else if (ret < 0) 6351 goto err_free; 6352 } 6353 } else { 6354 /* all other non-branch instructions with single 6355 * fall-through edge 6356 */ 6357 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 6358 if (ret == 1) 6359 goto peek_stack; 6360 else if (ret < 0) 6361 goto err_free; 6362 } 6363 6364 mark_explored: 6365 insn_state[t] = EXPLORED; 6366 if (env->cfg.cur_stack-- <= 0) { 6367 verbose(env, "pop stack internal bug\n"); 6368 ret = -EFAULT; 6369 goto err_free; 6370 } 6371 goto peek_stack; 6372 6373 check_state: 6374 for (i = 0; i < insn_cnt; i++) { 6375 if (insn_state[i] != EXPLORED) { 6376 verbose(env, "unreachable insn %d\n", i); 6377 ret = -EINVAL; 6378 goto err_free; 6379 } 6380 } 6381 ret = 0; /* cfg looks good */ 6382 6383 err_free: 6384 kvfree(insn_state); 6385 kvfree(insn_stack); 6386 env->cfg.insn_state = env->cfg.insn_stack = NULL; 6387 return ret; 6388 } 6389 6390 /* The minimum supported BTF func info size */ 6391 #define MIN_BPF_FUNCINFO_SIZE 8 6392 #define MAX_FUNCINFO_REC_SIZE 252 6393 6394 static int check_btf_func(struct bpf_verifier_env *env, 6395 const union bpf_attr *attr, 6396 union bpf_attr __user *uattr) 6397 { 6398 u32 i, nfuncs, urec_size, min_size; 6399 u32 krec_size = sizeof(struct bpf_func_info); 6400 struct bpf_func_info *krecord; 6401 const struct btf_type *type; 6402 struct bpf_prog *prog; 6403 const struct btf *btf; 6404 void __user *urecord; 6405 u32 prev_offset = 0; 6406 int ret = 0; 6407 6408 nfuncs = attr->func_info_cnt; 6409 if (!nfuncs) 6410 return 0; 6411 6412 if (nfuncs != env->subprog_cnt) { 6413 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 6414 return -EINVAL; 6415 } 6416 6417 urec_size = attr->func_info_rec_size; 6418 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 6419 urec_size > MAX_FUNCINFO_REC_SIZE || 6420 urec_size % sizeof(u32)) { 6421 verbose(env, "invalid func info rec size %u\n", urec_size); 6422 return -EINVAL; 6423 } 6424 6425 prog = env->prog; 6426 btf = prog->aux->btf; 6427 6428 urecord = u64_to_user_ptr(attr->func_info); 6429 min_size = min_t(u32, krec_size, urec_size); 6430 6431 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 6432 if (!krecord) 6433 return -ENOMEM; 6434 6435 for (i = 0; i < nfuncs; i++) { 6436 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 6437 if (ret) { 6438 if (ret == -E2BIG) { 6439 verbose(env, "nonzero tailing record in func info"); 6440 /* set the size kernel expects so loader can zero 6441 * out the rest of the record. 6442 */ 6443 if (put_user(min_size, &uattr->func_info_rec_size)) 6444 ret = -EFAULT; 6445 } 6446 goto err_free; 6447 } 6448 6449 if (copy_from_user(&krecord[i], urecord, min_size)) { 6450 ret = -EFAULT; 6451 goto err_free; 6452 } 6453 6454 /* check insn_off */ 6455 if (i == 0) { 6456 if (krecord[i].insn_off) { 6457 verbose(env, 6458 "nonzero insn_off %u for the first func info record", 6459 krecord[i].insn_off); 6460 ret = -EINVAL; 6461 goto err_free; 6462 } 6463 } else if (krecord[i].insn_off <= prev_offset) { 6464 verbose(env, 6465 "same or smaller insn offset (%u) than previous func info record (%u)", 6466 krecord[i].insn_off, prev_offset); 6467 ret = -EINVAL; 6468 goto err_free; 6469 } 6470 6471 if (env->subprog_info[i].start != krecord[i].insn_off) { 6472 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 6473 ret = -EINVAL; 6474 goto err_free; 6475 } 6476 6477 /* check type_id */ 6478 type = btf_type_by_id(btf, krecord[i].type_id); 6479 if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) { 6480 verbose(env, "invalid type id %d in func info", 6481 krecord[i].type_id); 6482 ret = -EINVAL; 6483 goto err_free; 6484 } 6485 6486 prev_offset = krecord[i].insn_off; 6487 urecord += urec_size; 6488 } 6489 6490 prog->aux->func_info = krecord; 6491 prog->aux->func_info_cnt = nfuncs; 6492 return 0; 6493 6494 err_free: 6495 kvfree(krecord); 6496 return ret; 6497 } 6498 6499 static void adjust_btf_func(struct bpf_verifier_env *env) 6500 { 6501 int i; 6502 6503 if (!env->prog->aux->func_info) 6504 return; 6505 6506 for (i = 0; i < env->subprog_cnt; i++) 6507 env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; 6508 } 6509 6510 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ 6511 sizeof(((struct bpf_line_info *)(0))->line_col)) 6512 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 6513 6514 static int check_btf_line(struct bpf_verifier_env *env, 6515 const union bpf_attr *attr, 6516 union bpf_attr __user *uattr) 6517 { 6518 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 6519 struct bpf_subprog_info *sub; 6520 struct bpf_line_info *linfo; 6521 struct bpf_prog *prog; 6522 const struct btf *btf; 6523 void __user *ulinfo; 6524 int err; 6525 6526 nr_linfo = attr->line_info_cnt; 6527 if (!nr_linfo) 6528 return 0; 6529 6530 rec_size = attr->line_info_rec_size; 6531 if (rec_size < MIN_BPF_LINEINFO_SIZE || 6532 rec_size > MAX_LINEINFO_REC_SIZE || 6533 rec_size & (sizeof(u32) - 1)) 6534 return -EINVAL; 6535 6536 /* Need to zero it in case the userspace may 6537 * pass in a smaller bpf_line_info object. 6538 */ 6539 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 6540 GFP_KERNEL | __GFP_NOWARN); 6541 if (!linfo) 6542 return -ENOMEM; 6543 6544 prog = env->prog; 6545 btf = prog->aux->btf; 6546 6547 s = 0; 6548 sub = env->subprog_info; 6549 ulinfo = u64_to_user_ptr(attr->line_info); 6550 expected_size = sizeof(struct bpf_line_info); 6551 ncopy = min_t(u32, expected_size, rec_size); 6552 for (i = 0; i < nr_linfo; i++) { 6553 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 6554 if (err) { 6555 if (err == -E2BIG) { 6556 verbose(env, "nonzero tailing record in line_info"); 6557 if (put_user(expected_size, 6558 &uattr->line_info_rec_size)) 6559 err = -EFAULT; 6560 } 6561 goto err_free; 6562 } 6563 6564 if (copy_from_user(&linfo[i], ulinfo, ncopy)) { 6565 err = -EFAULT; 6566 goto err_free; 6567 } 6568 6569 /* 6570 * Check insn_off to ensure 6571 * 1) strictly increasing AND 6572 * 2) bounded by prog->len 6573 * 6574 * The linfo[0].insn_off == 0 check logically falls into 6575 * the later "missing bpf_line_info for func..." case 6576 * because the first linfo[0].insn_off must be the 6577 * first sub also and the first sub must have 6578 * subprog_info[0].start == 0. 6579 */ 6580 if ((i && linfo[i].insn_off <= prev_offset) || 6581 linfo[i].insn_off >= prog->len) { 6582 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 6583 i, linfo[i].insn_off, prev_offset, 6584 prog->len); 6585 err = -EINVAL; 6586 goto err_free; 6587 } 6588 6589 if (!prog->insnsi[linfo[i].insn_off].code) { 6590 verbose(env, 6591 "Invalid insn code at line_info[%u].insn_off\n", 6592 i); 6593 err = -EINVAL; 6594 goto err_free; 6595 } 6596 6597 if (!btf_name_by_offset(btf, linfo[i].line_off) || 6598 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 6599 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 6600 err = -EINVAL; 6601 goto err_free; 6602 } 6603 6604 if (s != env->subprog_cnt) { 6605 if (linfo[i].insn_off == sub[s].start) { 6606 sub[s].linfo_idx = i; 6607 s++; 6608 } else if (sub[s].start < linfo[i].insn_off) { 6609 verbose(env, "missing bpf_line_info for func#%u\n", s); 6610 err = -EINVAL; 6611 goto err_free; 6612 } 6613 } 6614 6615 prev_offset = linfo[i].insn_off; 6616 ulinfo += rec_size; 6617 } 6618 6619 if (s != env->subprog_cnt) { 6620 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 6621 env->subprog_cnt - s, s); 6622 err = -EINVAL; 6623 goto err_free; 6624 } 6625 6626 prog->aux->linfo = linfo; 6627 prog->aux->nr_linfo = nr_linfo; 6628 6629 return 0; 6630 6631 err_free: 6632 kvfree(linfo); 6633 return err; 6634 } 6635 6636 static int check_btf_info(struct bpf_verifier_env *env, 6637 const union bpf_attr *attr, 6638 union bpf_attr __user *uattr) 6639 { 6640 struct btf *btf; 6641 int err; 6642 6643 if (!attr->func_info_cnt && !attr->line_info_cnt) 6644 return 0; 6645 6646 btf = btf_get_by_fd(attr->prog_btf_fd); 6647 if (IS_ERR(btf)) 6648 return PTR_ERR(btf); 6649 env->prog->aux->btf = btf; 6650 6651 err = check_btf_func(env, attr, uattr); 6652 if (err) 6653 return err; 6654 6655 err = check_btf_line(env, attr, uattr); 6656 if (err) 6657 return err; 6658 6659 return 0; 6660 } 6661 6662 /* check %cur's range satisfies %old's */ 6663 static bool range_within(struct bpf_reg_state *old, 6664 struct bpf_reg_state *cur) 6665 { 6666 return old->umin_value <= cur->umin_value && 6667 old->umax_value >= cur->umax_value && 6668 old->smin_value <= cur->smin_value && 6669 old->smax_value >= cur->smax_value; 6670 } 6671 6672 /* Maximum number of register states that can exist at once */ 6673 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 6674 struct idpair { 6675 u32 old; 6676 u32 cur; 6677 }; 6678 6679 /* If in the old state two registers had the same id, then they need to have 6680 * the same id in the new state as well. But that id could be different from 6681 * the old state, so we need to track the mapping from old to new ids. 6682 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 6683 * regs with old id 5 must also have new id 9 for the new state to be safe. But 6684 * regs with a different old id could still have new id 9, we don't care about 6685 * that. 6686 * So we look through our idmap to see if this old id has been seen before. If 6687 * so, we require the new id to match; otherwise, we add the id pair to the map. 6688 */ 6689 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 6690 { 6691 unsigned int i; 6692 6693 for (i = 0; i < ID_MAP_SIZE; i++) { 6694 if (!idmap[i].old) { 6695 /* Reached an empty slot; haven't seen this id before */ 6696 idmap[i].old = old_id; 6697 idmap[i].cur = cur_id; 6698 return true; 6699 } 6700 if (idmap[i].old == old_id) 6701 return idmap[i].cur == cur_id; 6702 } 6703 /* We ran out of idmap slots, which should be impossible */ 6704 WARN_ON_ONCE(1); 6705 return false; 6706 } 6707 6708 static void clean_func_state(struct bpf_verifier_env *env, 6709 struct bpf_func_state *st) 6710 { 6711 enum bpf_reg_liveness live; 6712 int i, j; 6713 6714 for (i = 0; i < BPF_REG_FP; i++) { 6715 live = st->regs[i].live; 6716 /* liveness must not touch this register anymore */ 6717 st->regs[i].live |= REG_LIVE_DONE; 6718 if (!(live & REG_LIVE_READ)) 6719 /* since the register is unused, clear its state 6720 * to make further comparison simpler 6721 */ 6722 __mark_reg_not_init(&st->regs[i]); 6723 } 6724 6725 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 6726 live = st->stack[i].spilled_ptr.live; 6727 /* liveness must not touch this stack slot anymore */ 6728 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 6729 if (!(live & REG_LIVE_READ)) { 6730 __mark_reg_not_init(&st->stack[i].spilled_ptr); 6731 for (j = 0; j < BPF_REG_SIZE; j++) 6732 st->stack[i].slot_type[j] = STACK_INVALID; 6733 } 6734 } 6735 } 6736 6737 static void clean_verifier_state(struct bpf_verifier_env *env, 6738 struct bpf_verifier_state *st) 6739 { 6740 int i; 6741 6742 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 6743 /* all regs in this state in all frames were already marked */ 6744 return; 6745 6746 for (i = 0; i <= st->curframe; i++) 6747 clean_func_state(env, st->frame[i]); 6748 } 6749 6750 /* the parentage chains form a tree. 6751 * the verifier states are added to state lists at given insn and 6752 * pushed into state stack for future exploration. 6753 * when the verifier reaches bpf_exit insn some of the verifer states 6754 * stored in the state lists have their final liveness state already, 6755 * but a lot of states will get revised from liveness point of view when 6756 * the verifier explores other branches. 6757 * Example: 6758 * 1: r0 = 1 6759 * 2: if r1 == 100 goto pc+1 6760 * 3: r0 = 2 6761 * 4: exit 6762 * when the verifier reaches exit insn the register r0 in the state list of 6763 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 6764 * of insn 2 and goes exploring further. At the insn 4 it will walk the 6765 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 6766 * 6767 * Since the verifier pushes the branch states as it sees them while exploring 6768 * the program the condition of walking the branch instruction for the second 6769 * time means that all states below this branch were already explored and 6770 * their final liveness markes are already propagated. 6771 * Hence when the verifier completes the search of state list in is_state_visited() 6772 * we can call this clean_live_states() function to mark all liveness states 6773 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 6774 * will not be used. 6775 * This function also clears the registers and stack for states that !READ 6776 * to simplify state merging. 6777 * 6778 * Important note here that walking the same branch instruction in the callee 6779 * doesn't meant that the states are DONE. The verifier has to compare 6780 * the callsites 6781 */ 6782 static void clean_live_states(struct bpf_verifier_env *env, int insn, 6783 struct bpf_verifier_state *cur) 6784 { 6785 struct bpf_verifier_state_list *sl; 6786 int i; 6787 6788 sl = *explored_state(env, insn); 6789 while (sl) { 6790 if (sl->state.branches) 6791 goto next; 6792 if (sl->state.insn_idx != insn || 6793 sl->state.curframe != cur->curframe) 6794 goto next; 6795 for (i = 0; i <= cur->curframe; i++) 6796 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 6797 goto next; 6798 clean_verifier_state(env, &sl->state); 6799 next: 6800 sl = sl->next; 6801 } 6802 } 6803 6804 /* Returns true if (rold safe implies rcur safe) */ 6805 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 6806 struct idpair *idmap) 6807 { 6808 bool equal; 6809 6810 if (!(rold->live & REG_LIVE_READ)) 6811 /* explored state didn't use this */ 6812 return true; 6813 6814 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 6815 6816 if (rold->type == PTR_TO_STACK) 6817 /* two stack pointers are equal only if they're pointing to 6818 * the same stack frame, since fp-8 in foo != fp-8 in bar 6819 */ 6820 return equal && rold->frameno == rcur->frameno; 6821 6822 if (equal) 6823 return true; 6824 6825 if (rold->type == NOT_INIT) 6826 /* explored state can't have used this */ 6827 return true; 6828 if (rcur->type == NOT_INIT) 6829 return false; 6830 switch (rold->type) { 6831 case SCALAR_VALUE: 6832 if (rcur->type == SCALAR_VALUE) { 6833 if (!rold->precise && !rcur->precise) 6834 return true; 6835 /* new val must satisfy old val knowledge */ 6836 return range_within(rold, rcur) && 6837 tnum_in(rold->var_off, rcur->var_off); 6838 } else { 6839 /* We're trying to use a pointer in place of a scalar. 6840 * Even if the scalar was unbounded, this could lead to 6841 * pointer leaks because scalars are allowed to leak 6842 * while pointers are not. We could make this safe in 6843 * special cases if root is calling us, but it's 6844 * probably not worth the hassle. 6845 */ 6846 return false; 6847 } 6848 case PTR_TO_MAP_VALUE: 6849 /* If the new min/max/var_off satisfy the old ones and 6850 * everything else matches, we are OK. 6851 * 'id' is not compared, since it's only used for maps with 6852 * bpf_spin_lock inside map element and in such cases if 6853 * the rest of the prog is valid for one map element then 6854 * it's valid for all map elements regardless of the key 6855 * used in bpf_map_lookup() 6856 */ 6857 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 6858 range_within(rold, rcur) && 6859 tnum_in(rold->var_off, rcur->var_off); 6860 case PTR_TO_MAP_VALUE_OR_NULL: 6861 /* a PTR_TO_MAP_VALUE could be safe to use as a 6862 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 6863 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 6864 * checked, doing so could have affected others with the same 6865 * id, and we can't check for that because we lost the id when 6866 * we converted to a PTR_TO_MAP_VALUE. 6867 */ 6868 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 6869 return false; 6870 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 6871 return false; 6872 /* Check our ids match any regs they're supposed to */ 6873 return check_ids(rold->id, rcur->id, idmap); 6874 case PTR_TO_PACKET_META: 6875 case PTR_TO_PACKET: 6876 if (rcur->type != rold->type) 6877 return false; 6878 /* We must have at least as much range as the old ptr 6879 * did, so that any accesses which were safe before are 6880 * still safe. This is true even if old range < old off, 6881 * since someone could have accessed through (ptr - k), or 6882 * even done ptr -= k in a register, to get a safe access. 6883 */ 6884 if (rold->range > rcur->range) 6885 return false; 6886 /* If the offsets don't match, we can't trust our alignment; 6887 * nor can we be sure that we won't fall out of range. 6888 */ 6889 if (rold->off != rcur->off) 6890 return false; 6891 /* id relations must be preserved */ 6892 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 6893 return false; 6894 /* new val must satisfy old val knowledge */ 6895 return range_within(rold, rcur) && 6896 tnum_in(rold->var_off, rcur->var_off); 6897 case PTR_TO_CTX: 6898 case CONST_PTR_TO_MAP: 6899 case PTR_TO_PACKET_END: 6900 case PTR_TO_FLOW_KEYS: 6901 case PTR_TO_SOCKET: 6902 case PTR_TO_SOCKET_OR_NULL: 6903 case PTR_TO_SOCK_COMMON: 6904 case PTR_TO_SOCK_COMMON_OR_NULL: 6905 case PTR_TO_TCP_SOCK: 6906 case PTR_TO_TCP_SOCK_OR_NULL: 6907 case PTR_TO_XDP_SOCK: 6908 /* Only valid matches are exact, which memcmp() above 6909 * would have accepted 6910 */ 6911 default: 6912 /* Don't know what's going on, just say it's not safe */ 6913 return false; 6914 } 6915 6916 /* Shouldn't get here; if we do, say it's not safe */ 6917 WARN_ON_ONCE(1); 6918 return false; 6919 } 6920 6921 static bool stacksafe(struct bpf_func_state *old, 6922 struct bpf_func_state *cur, 6923 struct idpair *idmap) 6924 { 6925 int i, spi; 6926 6927 /* walk slots of the explored stack and ignore any additional 6928 * slots in the current stack, since explored(safe) state 6929 * didn't use them 6930 */ 6931 for (i = 0; i < old->allocated_stack; i++) { 6932 spi = i / BPF_REG_SIZE; 6933 6934 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 6935 i += BPF_REG_SIZE - 1; 6936 /* explored state didn't use this */ 6937 continue; 6938 } 6939 6940 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 6941 continue; 6942 6943 /* explored stack has more populated slots than current stack 6944 * and these slots were used 6945 */ 6946 if (i >= cur->allocated_stack) 6947 return false; 6948 6949 /* if old state was safe with misc data in the stack 6950 * it will be safe with zero-initialized stack. 6951 * The opposite is not true 6952 */ 6953 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 6954 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 6955 continue; 6956 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 6957 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 6958 /* Ex: old explored (safe) state has STACK_SPILL in 6959 * this stack slot, but current has has STACK_MISC -> 6960 * this verifier states are not equivalent, 6961 * return false to continue verification of this path 6962 */ 6963 return false; 6964 if (i % BPF_REG_SIZE) 6965 continue; 6966 if (old->stack[spi].slot_type[0] != STACK_SPILL) 6967 continue; 6968 if (!regsafe(&old->stack[spi].spilled_ptr, 6969 &cur->stack[spi].spilled_ptr, 6970 idmap)) 6971 /* when explored and current stack slot are both storing 6972 * spilled registers, check that stored pointers types 6973 * are the same as well. 6974 * Ex: explored safe path could have stored 6975 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 6976 * but current path has stored: 6977 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 6978 * such verifier states are not equivalent. 6979 * return false to continue verification of this path 6980 */ 6981 return false; 6982 } 6983 return true; 6984 } 6985 6986 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 6987 { 6988 if (old->acquired_refs != cur->acquired_refs) 6989 return false; 6990 return !memcmp(old->refs, cur->refs, 6991 sizeof(*old->refs) * old->acquired_refs); 6992 } 6993 6994 /* compare two verifier states 6995 * 6996 * all states stored in state_list are known to be valid, since 6997 * verifier reached 'bpf_exit' instruction through them 6998 * 6999 * this function is called when verifier exploring different branches of 7000 * execution popped from the state stack. If it sees an old state that has 7001 * more strict register state and more strict stack state then this execution 7002 * branch doesn't need to be explored further, since verifier already 7003 * concluded that more strict state leads to valid finish. 7004 * 7005 * Therefore two states are equivalent if register state is more conservative 7006 * and explored stack state is more conservative than the current one. 7007 * Example: 7008 * explored current 7009 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 7010 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 7011 * 7012 * In other words if current stack state (one being explored) has more 7013 * valid slots than old one that already passed validation, it means 7014 * the verifier can stop exploring and conclude that current state is valid too 7015 * 7016 * Similarly with registers. If explored state has register type as invalid 7017 * whereas register type in current state is meaningful, it means that 7018 * the current state will reach 'bpf_exit' instruction safely 7019 */ 7020 static bool func_states_equal(struct bpf_func_state *old, 7021 struct bpf_func_state *cur) 7022 { 7023 struct idpair *idmap; 7024 bool ret = false; 7025 int i; 7026 7027 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 7028 /* If we failed to allocate the idmap, just say it's not safe */ 7029 if (!idmap) 7030 return false; 7031 7032 for (i = 0; i < MAX_BPF_REG; i++) { 7033 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 7034 goto out_free; 7035 } 7036 7037 if (!stacksafe(old, cur, idmap)) 7038 goto out_free; 7039 7040 if (!refsafe(old, cur)) 7041 goto out_free; 7042 ret = true; 7043 out_free: 7044 kfree(idmap); 7045 return ret; 7046 } 7047 7048 static bool states_equal(struct bpf_verifier_env *env, 7049 struct bpf_verifier_state *old, 7050 struct bpf_verifier_state *cur) 7051 { 7052 int i; 7053 7054 if (old->curframe != cur->curframe) 7055 return false; 7056 7057 /* Verification state from speculative execution simulation 7058 * must never prune a non-speculative execution one. 7059 */ 7060 if (old->speculative && !cur->speculative) 7061 return false; 7062 7063 if (old->active_spin_lock != cur->active_spin_lock) 7064 return false; 7065 7066 /* for states to be equal callsites have to be the same 7067 * and all frame states need to be equivalent 7068 */ 7069 for (i = 0; i <= old->curframe; i++) { 7070 if (old->frame[i]->callsite != cur->frame[i]->callsite) 7071 return false; 7072 if (!func_states_equal(old->frame[i], cur->frame[i])) 7073 return false; 7074 } 7075 return true; 7076 } 7077 7078 /* Return 0 if no propagation happened. Return negative error code if error 7079 * happened. Otherwise, return the propagated bit. 7080 */ 7081 static int propagate_liveness_reg(struct bpf_verifier_env *env, 7082 struct bpf_reg_state *reg, 7083 struct bpf_reg_state *parent_reg) 7084 { 7085 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 7086 u8 flag = reg->live & REG_LIVE_READ; 7087 int err; 7088 7089 /* When comes here, read flags of PARENT_REG or REG could be any of 7090 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 7091 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 7092 */ 7093 if (parent_flag == REG_LIVE_READ64 || 7094 /* Or if there is no read flag from REG. */ 7095 !flag || 7096 /* Or if the read flag from REG is the same as PARENT_REG. */ 7097 parent_flag == flag) 7098 return 0; 7099 7100 err = mark_reg_read(env, reg, parent_reg, flag); 7101 if (err) 7102 return err; 7103 7104 return flag; 7105 } 7106 7107 /* A write screens off any subsequent reads; but write marks come from the 7108 * straight-line code between a state and its parent. When we arrive at an 7109 * equivalent state (jump target or such) we didn't arrive by the straight-line 7110 * code, so read marks in the state must propagate to the parent regardless 7111 * of the state's write marks. That's what 'parent == state->parent' comparison 7112 * in mark_reg_read() is for. 7113 */ 7114 static int propagate_liveness(struct bpf_verifier_env *env, 7115 const struct bpf_verifier_state *vstate, 7116 struct bpf_verifier_state *vparent) 7117 { 7118 struct bpf_reg_state *state_reg, *parent_reg; 7119 struct bpf_func_state *state, *parent; 7120 int i, frame, err = 0; 7121 7122 if (vparent->curframe != vstate->curframe) { 7123 WARN(1, "propagate_live: parent frame %d current frame %d\n", 7124 vparent->curframe, vstate->curframe); 7125 return -EFAULT; 7126 } 7127 /* Propagate read liveness of registers... */ 7128 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 7129 for (frame = 0; frame <= vstate->curframe; frame++) { 7130 parent = vparent->frame[frame]; 7131 state = vstate->frame[frame]; 7132 parent_reg = parent->regs; 7133 state_reg = state->regs; 7134 /* We don't need to worry about FP liveness, it's read-only */ 7135 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 7136 err = propagate_liveness_reg(env, &state_reg[i], 7137 &parent_reg[i]); 7138 if (err < 0) 7139 return err; 7140 if (err == REG_LIVE_READ64) 7141 mark_insn_zext(env, &parent_reg[i]); 7142 } 7143 7144 /* Propagate stack slots. */ 7145 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 7146 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 7147 parent_reg = &parent->stack[i].spilled_ptr; 7148 state_reg = &state->stack[i].spilled_ptr; 7149 err = propagate_liveness_reg(env, state_reg, 7150 parent_reg); 7151 if (err < 0) 7152 return err; 7153 } 7154 } 7155 return 0; 7156 } 7157 7158 /* find precise scalars in the previous equivalent state and 7159 * propagate them into the current state 7160 */ 7161 static int propagate_precision(struct bpf_verifier_env *env, 7162 const struct bpf_verifier_state *old) 7163 { 7164 struct bpf_reg_state *state_reg; 7165 struct bpf_func_state *state; 7166 int i, err = 0; 7167 7168 state = old->frame[old->curframe]; 7169 state_reg = state->regs; 7170 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 7171 if (state_reg->type != SCALAR_VALUE || 7172 !state_reg->precise) 7173 continue; 7174 if (env->log.level & BPF_LOG_LEVEL2) 7175 verbose(env, "propagating r%d\n", i); 7176 err = mark_chain_precision(env, i); 7177 if (err < 0) 7178 return err; 7179 } 7180 7181 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 7182 if (state->stack[i].slot_type[0] != STACK_SPILL) 7183 continue; 7184 state_reg = &state->stack[i].spilled_ptr; 7185 if (state_reg->type != SCALAR_VALUE || 7186 !state_reg->precise) 7187 continue; 7188 if (env->log.level & BPF_LOG_LEVEL2) 7189 verbose(env, "propagating fp%d\n", 7190 (-i - 1) * BPF_REG_SIZE); 7191 err = mark_chain_precision_stack(env, i); 7192 if (err < 0) 7193 return err; 7194 } 7195 return 0; 7196 } 7197 7198 static bool states_maybe_looping(struct bpf_verifier_state *old, 7199 struct bpf_verifier_state *cur) 7200 { 7201 struct bpf_func_state *fold, *fcur; 7202 int i, fr = cur->curframe; 7203 7204 if (old->curframe != fr) 7205 return false; 7206 7207 fold = old->frame[fr]; 7208 fcur = cur->frame[fr]; 7209 for (i = 0; i < MAX_BPF_REG; i++) 7210 if (memcmp(&fold->regs[i], &fcur->regs[i], 7211 offsetof(struct bpf_reg_state, parent))) 7212 return false; 7213 return true; 7214 } 7215 7216 7217 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 7218 { 7219 struct bpf_verifier_state_list *new_sl; 7220 struct bpf_verifier_state_list *sl, **pprev; 7221 struct bpf_verifier_state *cur = env->cur_state, *new; 7222 int i, j, err, states_cnt = 0; 7223 bool add_new_state = false; 7224 7225 cur->last_insn_idx = env->prev_insn_idx; 7226 if (!env->insn_aux_data[insn_idx].prune_point) 7227 /* this 'insn_idx' instruction wasn't marked, so we will not 7228 * be doing state search here 7229 */ 7230 return 0; 7231 7232 /* bpf progs typically have pruning point every 4 instructions 7233 * http://vger.kernel.org/bpfconf2019.html#session-1 7234 * Do not add new state for future pruning if the verifier hasn't seen 7235 * at least 2 jumps and at least 8 instructions. 7236 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 7237 * In tests that amounts to up to 50% reduction into total verifier 7238 * memory consumption and 20% verifier time speedup. 7239 */ 7240 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 7241 env->insn_processed - env->prev_insn_processed >= 8) 7242 add_new_state = true; 7243 7244 pprev = explored_state(env, insn_idx); 7245 sl = *pprev; 7246 7247 clean_live_states(env, insn_idx, cur); 7248 7249 while (sl) { 7250 states_cnt++; 7251 if (sl->state.insn_idx != insn_idx) 7252 goto next; 7253 if (sl->state.branches) { 7254 if (states_maybe_looping(&sl->state, cur) && 7255 states_equal(env, &sl->state, cur)) { 7256 verbose_linfo(env, insn_idx, "; "); 7257 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 7258 return -EINVAL; 7259 } 7260 /* if the verifier is processing a loop, avoid adding new state 7261 * too often, since different loop iterations have distinct 7262 * states and may not help future pruning. 7263 * This threshold shouldn't be too low to make sure that 7264 * a loop with large bound will be rejected quickly. 7265 * The most abusive loop will be: 7266 * r1 += 1 7267 * if r1 < 1000000 goto pc-2 7268 * 1M insn_procssed limit / 100 == 10k peak states. 7269 * This threshold shouldn't be too high either, since states 7270 * at the end of the loop are likely to be useful in pruning. 7271 */ 7272 if (env->jmps_processed - env->prev_jmps_processed < 20 && 7273 env->insn_processed - env->prev_insn_processed < 100) 7274 add_new_state = false; 7275 goto miss; 7276 } 7277 if (states_equal(env, &sl->state, cur)) { 7278 sl->hit_cnt++; 7279 /* reached equivalent register/stack state, 7280 * prune the search. 7281 * Registers read by the continuation are read by us. 7282 * If we have any write marks in env->cur_state, they 7283 * will prevent corresponding reads in the continuation 7284 * from reaching our parent (an explored_state). Our 7285 * own state will get the read marks recorded, but 7286 * they'll be immediately forgotten as we're pruning 7287 * this state and will pop a new one. 7288 */ 7289 err = propagate_liveness(env, &sl->state, cur); 7290 7291 /* if previous state reached the exit with precision and 7292 * current state is equivalent to it (except precsion marks) 7293 * the precision needs to be propagated back in 7294 * the current state. 7295 */ 7296 err = err ? : push_jmp_history(env, cur); 7297 err = err ? : propagate_precision(env, &sl->state); 7298 if (err) 7299 return err; 7300 return 1; 7301 } 7302 miss: 7303 /* when new state is not going to be added do not increase miss count. 7304 * Otherwise several loop iterations will remove the state 7305 * recorded earlier. The goal of these heuristics is to have 7306 * states from some iterations of the loop (some in the beginning 7307 * and some at the end) to help pruning. 7308 */ 7309 if (add_new_state) 7310 sl->miss_cnt++; 7311 /* heuristic to determine whether this state is beneficial 7312 * to keep checking from state equivalence point of view. 7313 * Higher numbers increase max_states_per_insn and verification time, 7314 * but do not meaningfully decrease insn_processed. 7315 */ 7316 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 7317 /* the state is unlikely to be useful. Remove it to 7318 * speed up verification 7319 */ 7320 *pprev = sl->next; 7321 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 7322 u32 br = sl->state.branches; 7323 7324 WARN_ONCE(br, 7325 "BUG live_done but branches_to_explore %d\n", 7326 br); 7327 free_verifier_state(&sl->state, false); 7328 kfree(sl); 7329 env->peak_states--; 7330 } else { 7331 /* cannot free this state, since parentage chain may 7332 * walk it later. Add it for free_list instead to 7333 * be freed at the end of verification 7334 */ 7335 sl->next = env->free_list; 7336 env->free_list = sl; 7337 } 7338 sl = *pprev; 7339 continue; 7340 } 7341 next: 7342 pprev = &sl->next; 7343 sl = *pprev; 7344 } 7345 7346 if (env->max_states_per_insn < states_cnt) 7347 env->max_states_per_insn = states_cnt; 7348 7349 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 7350 return push_jmp_history(env, cur); 7351 7352 if (!add_new_state) 7353 return push_jmp_history(env, cur); 7354 7355 /* There were no equivalent states, remember the current one. 7356 * Technically the current state is not proven to be safe yet, 7357 * but it will either reach outer most bpf_exit (which means it's safe) 7358 * or it will be rejected. When there are no loops the verifier won't be 7359 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 7360 * again on the way to bpf_exit. 7361 * When looping the sl->state.branches will be > 0 and this state 7362 * will not be considered for equivalence until branches == 0. 7363 */ 7364 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 7365 if (!new_sl) 7366 return -ENOMEM; 7367 env->total_states++; 7368 env->peak_states++; 7369 env->prev_jmps_processed = env->jmps_processed; 7370 env->prev_insn_processed = env->insn_processed; 7371 7372 /* add new state to the head of linked list */ 7373 new = &new_sl->state; 7374 err = copy_verifier_state(new, cur); 7375 if (err) { 7376 free_verifier_state(new, false); 7377 kfree(new_sl); 7378 return err; 7379 } 7380 new->insn_idx = insn_idx; 7381 WARN_ONCE(new->branches != 1, 7382 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 7383 7384 cur->parent = new; 7385 cur->first_insn_idx = insn_idx; 7386 clear_jmp_history(cur); 7387 new_sl->next = *explored_state(env, insn_idx); 7388 *explored_state(env, insn_idx) = new_sl; 7389 /* connect new state to parentage chain. Current frame needs all 7390 * registers connected. Only r6 - r9 of the callers are alive (pushed 7391 * to the stack implicitly by JITs) so in callers' frames connect just 7392 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 7393 * the state of the call instruction (with WRITTEN set), and r0 comes 7394 * from callee with its full parentage chain, anyway. 7395 */ 7396 /* clear write marks in current state: the writes we did are not writes 7397 * our child did, so they don't screen off its reads from us. 7398 * (There are no read marks in current state, because reads always mark 7399 * their parent and current state never has children yet. Only 7400 * explored_states can get read marks.) 7401 */ 7402 for (j = 0; j <= cur->curframe; j++) { 7403 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 7404 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 7405 for (i = 0; i < BPF_REG_FP; i++) 7406 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 7407 } 7408 7409 /* all stack frames are accessible from callee, clear them all */ 7410 for (j = 0; j <= cur->curframe; j++) { 7411 struct bpf_func_state *frame = cur->frame[j]; 7412 struct bpf_func_state *newframe = new->frame[j]; 7413 7414 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 7415 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 7416 frame->stack[i].spilled_ptr.parent = 7417 &newframe->stack[i].spilled_ptr; 7418 } 7419 } 7420 return 0; 7421 } 7422 7423 /* Return true if it's OK to have the same insn return a different type. */ 7424 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 7425 { 7426 switch (type) { 7427 case PTR_TO_CTX: 7428 case PTR_TO_SOCKET: 7429 case PTR_TO_SOCKET_OR_NULL: 7430 case PTR_TO_SOCK_COMMON: 7431 case PTR_TO_SOCK_COMMON_OR_NULL: 7432 case PTR_TO_TCP_SOCK: 7433 case PTR_TO_TCP_SOCK_OR_NULL: 7434 case PTR_TO_XDP_SOCK: 7435 return false; 7436 default: 7437 return true; 7438 } 7439 } 7440 7441 /* If an instruction was previously used with particular pointer types, then we 7442 * need to be careful to avoid cases such as the below, where it may be ok 7443 * for one branch accessing the pointer, but not ok for the other branch: 7444 * 7445 * R1 = sock_ptr 7446 * goto X; 7447 * ... 7448 * R1 = some_other_valid_ptr; 7449 * goto X; 7450 * ... 7451 * R2 = *(u32 *)(R1 + 0); 7452 */ 7453 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 7454 { 7455 return src != prev && (!reg_type_mismatch_ok(src) || 7456 !reg_type_mismatch_ok(prev)); 7457 } 7458 7459 static int do_check(struct bpf_verifier_env *env) 7460 { 7461 struct bpf_verifier_state *state; 7462 struct bpf_insn *insns = env->prog->insnsi; 7463 struct bpf_reg_state *regs; 7464 int insn_cnt = env->prog->len; 7465 bool do_print_state = false; 7466 int prev_insn_idx = -1; 7467 7468 env->prev_linfo = NULL; 7469 7470 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 7471 if (!state) 7472 return -ENOMEM; 7473 state->curframe = 0; 7474 state->speculative = false; 7475 state->branches = 1; 7476 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 7477 if (!state->frame[0]) { 7478 kfree(state); 7479 return -ENOMEM; 7480 } 7481 env->cur_state = state; 7482 init_func_state(env, state->frame[0], 7483 BPF_MAIN_FUNC /* callsite */, 7484 0 /* frameno */, 7485 0 /* subprogno, zero == main subprog */); 7486 7487 for (;;) { 7488 struct bpf_insn *insn; 7489 u8 class; 7490 int err; 7491 7492 env->prev_insn_idx = prev_insn_idx; 7493 if (env->insn_idx >= insn_cnt) { 7494 verbose(env, "invalid insn idx %d insn_cnt %d\n", 7495 env->insn_idx, insn_cnt); 7496 return -EFAULT; 7497 } 7498 7499 insn = &insns[env->insn_idx]; 7500 class = BPF_CLASS(insn->code); 7501 7502 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 7503 verbose(env, 7504 "BPF program is too large. Processed %d insn\n", 7505 env->insn_processed); 7506 return -E2BIG; 7507 } 7508 7509 err = is_state_visited(env, env->insn_idx); 7510 if (err < 0) 7511 return err; 7512 if (err == 1) { 7513 /* found equivalent state, can prune the search */ 7514 if (env->log.level & BPF_LOG_LEVEL) { 7515 if (do_print_state) 7516 verbose(env, "\nfrom %d to %d%s: safe\n", 7517 env->prev_insn_idx, env->insn_idx, 7518 env->cur_state->speculative ? 7519 " (speculative execution)" : ""); 7520 else 7521 verbose(env, "%d: safe\n", env->insn_idx); 7522 } 7523 goto process_bpf_exit; 7524 } 7525 7526 if (signal_pending(current)) 7527 return -EAGAIN; 7528 7529 if (need_resched()) 7530 cond_resched(); 7531 7532 if (env->log.level & BPF_LOG_LEVEL2 || 7533 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { 7534 if (env->log.level & BPF_LOG_LEVEL2) 7535 verbose(env, "%d:", env->insn_idx); 7536 else 7537 verbose(env, "\nfrom %d to %d%s:", 7538 env->prev_insn_idx, env->insn_idx, 7539 env->cur_state->speculative ? 7540 " (speculative execution)" : ""); 7541 print_verifier_state(env, state->frame[state->curframe]); 7542 do_print_state = false; 7543 } 7544 7545 if (env->log.level & BPF_LOG_LEVEL) { 7546 const struct bpf_insn_cbs cbs = { 7547 .cb_print = verbose, 7548 .private_data = env, 7549 }; 7550 7551 verbose_linfo(env, env->insn_idx, "; "); 7552 verbose(env, "%d: ", env->insn_idx); 7553 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 7554 } 7555 7556 if (bpf_prog_is_dev_bound(env->prog->aux)) { 7557 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 7558 env->prev_insn_idx); 7559 if (err) 7560 return err; 7561 } 7562 7563 regs = cur_regs(env); 7564 env->insn_aux_data[env->insn_idx].seen = true; 7565 prev_insn_idx = env->insn_idx; 7566 7567 if (class == BPF_ALU || class == BPF_ALU64) { 7568 err = check_alu_op(env, insn); 7569 if (err) 7570 return err; 7571 7572 } else if (class == BPF_LDX) { 7573 enum bpf_reg_type *prev_src_type, src_reg_type; 7574 7575 /* check for reserved fields is already done */ 7576 7577 /* check src operand */ 7578 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7579 if (err) 7580 return err; 7581 7582 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 7583 if (err) 7584 return err; 7585 7586 src_reg_type = regs[insn->src_reg].type; 7587 7588 /* check that memory (src_reg + off) is readable, 7589 * the state of dst_reg will be updated by this func 7590 */ 7591 err = check_mem_access(env, env->insn_idx, insn->src_reg, 7592 insn->off, BPF_SIZE(insn->code), 7593 BPF_READ, insn->dst_reg, false); 7594 if (err) 7595 return err; 7596 7597 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 7598 7599 if (*prev_src_type == NOT_INIT) { 7600 /* saw a valid insn 7601 * dst_reg = *(u32 *)(src_reg + off) 7602 * save type to validate intersecting paths 7603 */ 7604 *prev_src_type = src_reg_type; 7605 7606 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 7607 /* ABuser program is trying to use the same insn 7608 * dst_reg = *(u32*) (src_reg + off) 7609 * with different pointer types: 7610 * src_reg == ctx in one branch and 7611 * src_reg == stack|map in some other branch. 7612 * Reject it. 7613 */ 7614 verbose(env, "same insn cannot be used with different pointers\n"); 7615 return -EINVAL; 7616 } 7617 7618 } else if (class == BPF_STX) { 7619 enum bpf_reg_type *prev_dst_type, dst_reg_type; 7620 7621 if (BPF_MODE(insn->code) == BPF_XADD) { 7622 err = check_xadd(env, env->insn_idx, insn); 7623 if (err) 7624 return err; 7625 env->insn_idx++; 7626 continue; 7627 } 7628 7629 /* check src1 operand */ 7630 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7631 if (err) 7632 return err; 7633 /* check src2 operand */ 7634 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7635 if (err) 7636 return err; 7637 7638 dst_reg_type = regs[insn->dst_reg].type; 7639 7640 /* check that memory (dst_reg + off) is writeable */ 7641 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 7642 insn->off, BPF_SIZE(insn->code), 7643 BPF_WRITE, insn->src_reg, false); 7644 if (err) 7645 return err; 7646 7647 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 7648 7649 if (*prev_dst_type == NOT_INIT) { 7650 *prev_dst_type = dst_reg_type; 7651 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 7652 verbose(env, "same insn cannot be used with different pointers\n"); 7653 return -EINVAL; 7654 } 7655 7656 } else if (class == BPF_ST) { 7657 if (BPF_MODE(insn->code) != BPF_MEM || 7658 insn->src_reg != BPF_REG_0) { 7659 verbose(env, "BPF_ST uses reserved fields\n"); 7660 return -EINVAL; 7661 } 7662 /* check src operand */ 7663 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7664 if (err) 7665 return err; 7666 7667 if (is_ctx_reg(env, insn->dst_reg)) { 7668 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 7669 insn->dst_reg, 7670 reg_type_str[reg_state(env, insn->dst_reg)->type]); 7671 return -EACCES; 7672 } 7673 7674 /* check that memory (dst_reg + off) is writeable */ 7675 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 7676 insn->off, BPF_SIZE(insn->code), 7677 BPF_WRITE, -1, false); 7678 if (err) 7679 return err; 7680 7681 } else if (class == BPF_JMP || class == BPF_JMP32) { 7682 u8 opcode = BPF_OP(insn->code); 7683 7684 env->jmps_processed++; 7685 if (opcode == BPF_CALL) { 7686 if (BPF_SRC(insn->code) != BPF_K || 7687 insn->off != 0 || 7688 (insn->src_reg != BPF_REG_0 && 7689 insn->src_reg != BPF_PSEUDO_CALL) || 7690 insn->dst_reg != BPF_REG_0 || 7691 class == BPF_JMP32) { 7692 verbose(env, "BPF_CALL uses reserved fields\n"); 7693 return -EINVAL; 7694 } 7695 7696 if (env->cur_state->active_spin_lock && 7697 (insn->src_reg == BPF_PSEUDO_CALL || 7698 insn->imm != BPF_FUNC_spin_unlock)) { 7699 verbose(env, "function calls are not allowed while holding a lock\n"); 7700 return -EINVAL; 7701 } 7702 if (insn->src_reg == BPF_PSEUDO_CALL) 7703 err = check_func_call(env, insn, &env->insn_idx); 7704 else 7705 err = check_helper_call(env, insn->imm, env->insn_idx); 7706 if (err) 7707 return err; 7708 7709 } else if (opcode == BPF_JA) { 7710 if (BPF_SRC(insn->code) != BPF_K || 7711 insn->imm != 0 || 7712 insn->src_reg != BPF_REG_0 || 7713 insn->dst_reg != BPF_REG_0 || 7714 class == BPF_JMP32) { 7715 verbose(env, "BPF_JA uses reserved fields\n"); 7716 return -EINVAL; 7717 } 7718 7719 env->insn_idx += insn->off + 1; 7720 continue; 7721 7722 } else if (opcode == BPF_EXIT) { 7723 if (BPF_SRC(insn->code) != BPF_K || 7724 insn->imm != 0 || 7725 insn->src_reg != BPF_REG_0 || 7726 insn->dst_reg != BPF_REG_0 || 7727 class == BPF_JMP32) { 7728 verbose(env, "BPF_EXIT uses reserved fields\n"); 7729 return -EINVAL; 7730 } 7731 7732 if (env->cur_state->active_spin_lock) { 7733 verbose(env, "bpf_spin_unlock is missing\n"); 7734 return -EINVAL; 7735 } 7736 7737 if (state->curframe) { 7738 /* exit from nested function */ 7739 err = prepare_func_exit(env, &env->insn_idx); 7740 if (err) 7741 return err; 7742 do_print_state = true; 7743 continue; 7744 } 7745 7746 err = check_reference_leak(env); 7747 if (err) 7748 return err; 7749 7750 /* eBPF calling convetion is such that R0 is used 7751 * to return the value from eBPF program. 7752 * Make sure that it's readable at this time 7753 * of bpf_exit, which means that program wrote 7754 * something into it earlier 7755 */ 7756 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 7757 if (err) 7758 return err; 7759 7760 if (is_pointer_value(env, BPF_REG_0)) { 7761 verbose(env, "R0 leaks addr as return value\n"); 7762 return -EACCES; 7763 } 7764 7765 err = check_return_code(env); 7766 if (err) 7767 return err; 7768 process_bpf_exit: 7769 update_branch_counts(env, env->cur_state); 7770 err = pop_stack(env, &prev_insn_idx, 7771 &env->insn_idx); 7772 if (err < 0) { 7773 if (err != -ENOENT) 7774 return err; 7775 break; 7776 } else { 7777 do_print_state = true; 7778 continue; 7779 } 7780 } else { 7781 err = check_cond_jmp_op(env, insn, &env->insn_idx); 7782 if (err) 7783 return err; 7784 } 7785 } else if (class == BPF_LD) { 7786 u8 mode = BPF_MODE(insn->code); 7787 7788 if (mode == BPF_ABS || mode == BPF_IND) { 7789 err = check_ld_abs(env, insn); 7790 if (err) 7791 return err; 7792 7793 } else if (mode == BPF_IMM) { 7794 err = check_ld_imm(env, insn); 7795 if (err) 7796 return err; 7797 7798 env->insn_idx++; 7799 env->insn_aux_data[env->insn_idx].seen = true; 7800 } else { 7801 verbose(env, "invalid BPF_LD mode\n"); 7802 return -EINVAL; 7803 } 7804 } else { 7805 verbose(env, "unknown insn class %d\n", class); 7806 return -EINVAL; 7807 } 7808 7809 env->insn_idx++; 7810 } 7811 7812 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 7813 return 0; 7814 } 7815 7816 static int check_map_prealloc(struct bpf_map *map) 7817 { 7818 return (map->map_type != BPF_MAP_TYPE_HASH && 7819 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 7820 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 7821 !(map->map_flags & BPF_F_NO_PREALLOC); 7822 } 7823 7824 static bool is_tracing_prog_type(enum bpf_prog_type type) 7825 { 7826 switch (type) { 7827 case BPF_PROG_TYPE_KPROBE: 7828 case BPF_PROG_TYPE_TRACEPOINT: 7829 case BPF_PROG_TYPE_PERF_EVENT: 7830 case BPF_PROG_TYPE_RAW_TRACEPOINT: 7831 return true; 7832 default: 7833 return false; 7834 } 7835 } 7836 7837 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 7838 struct bpf_map *map, 7839 struct bpf_prog *prog) 7840 7841 { 7842 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 7843 * preallocated hash maps, since doing memory allocation 7844 * in overflow_handler can crash depending on where nmi got 7845 * triggered. 7846 */ 7847 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 7848 if (!check_map_prealloc(map)) { 7849 verbose(env, "perf_event programs can only use preallocated hash map\n"); 7850 return -EINVAL; 7851 } 7852 if (map->inner_map_meta && 7853 !check_map_prealloc(map->inner_map_meta)) { 7854 verbose(env, "perf_event programs can only use preallocated inner hash map\n"); 7855 return -EINVAL; 7856 } 7857 } 7858 7859 if ((is_tracing_prog_type(prog->type) || 7860 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && 7861 map_value_has_spin_lock(map)) { 7862 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 7863 return -EINVAL; 7864 } 7865 7866 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 7867 !bpf_offload_prog_map_match(prog, map)) { 7868 verbose(env, "offload device mismatch between prog and map\n"); 7869 return -EINVAL; 7870 } 7871 7872 return 0; 7873 } 7874 7875 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 7876 { 7877 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 7878 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 7879 } 7880 7881 /* look for pseudo eBPF instructions that access map FDs and 7882 * replace them with actual map pointers 7883 */ 7884 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 7885 { 7886 struct bpf_insn *insn = env->prog->insnsi; 7887 int insn_cnt = env->prog->len; 7888 int i, j, err; 7889 7890 err = bpf_prog_calc_tag(env->prog); 7891 if (err) 7892 return err; 7893 7894 for (i = 0; i < insn_cnt; i++, insn++) { 7895 if (BPF_CLASS(insn->code) == BPF_LDX && 7896 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 7897 verbose(env, "BPF_LDX uses reserved fields\n"); 7898 return -EINVAL; 7899 } 7900 7901 if (BPF_CLASS(insn->code) == BPF_STX && 7902 ((BPF_MODE(insn->code) != BPF_MEM && 7903 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 7904 verbose(env, "BPF_STX uses reserved fields\n"); 7905 return -EINVAL; 7906 } 7907 7908 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 7909 struct bpf_insn_aux_data *aux; 7910 struct bpf_map *map; 7911 struct fd f; 7912 u64 addr; 7913 7914 if (i == insn_cnt - 1 || insn[1].code != 0 || 7915 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 7916 insn[1].off != 0) { 7917 verbose(env, "invalid bpf_ld_imm64 insn\n"); 7918 return -EINVAL; 7919 } 7920 7921 if (insn[0].src_reg == 0) 7922 /* valid generic load 64-bit imm */ 7923 goto next_insn; 7924 7925 /* In final convert_pseudo_ld_imm64() step, this is 7926 * converted into regular 64-bit imm load insn. 7927 */ 7928 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && 7929 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || 7930 (insn[0].src_reg == BPF_PSEUDO_MAP_FD && 7931 insn[1].imm != 0)) { 7932 verbose(env, 7933 "unrecognized bpf_ld_imm64 insn\n"); 7934 return -EINVAL; 7935 } 7936 7937 f = fdget(insn[0].imm); 7938 map = __bpf_map_get(f); 7939 if (IS_ERR(map)) { 7940 verbose(env, "fd %d is not pointing to valid bpf_map\n", 7941 insn[0].imm); 7942 return PTR_ERR(map); 7943 } 7944 7945 err = check_map_prog_compatibility(env, map, env->prog); 7946 if (err) { 7947 fdput(f); 7948 return err; 7949 } 7950 7951 aux = &env->insn_aux_data[i]; 7952 if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 7953 addr = (unsigned long)map; 7954 } else { 7955 u32 off = insn[1].imm; 7956 7957 if (off >= BPF_MAX_VAR_OFF) { 7958 verbose(env, "direct value offset of %u is not allowed\n", off); 7959 fdput(f); 7960 return -EINVAL; 7961 } 7962 7963 if (!map->ops->map_direct_value_addr) { 7964 verbose(env, "no direct value access support for this map type\n"); 7965 fdput(f); 7966 return -EINVAL; 7967 } 7968 7969 err = map->ops->map_direct_value_addr(map, &addr, off); 7970 if (err) { 7971 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 7972 map->value_size, off); 7973 fdput(f); 7974 return err; 7975 } 7976 7977 aux->map_off = off; 7978 addr += off; 7979 } 7980 7981 insn[0].imm = (u32)addr; 7982 insn[1].imm = addr >> 32; 7983 7984 /* check whether we recorded this map already */ 7985 for (j = 0; j < env->used_map_cnt; j++) { 7986 if (env->used_maps[j] == map) { 7987 aux->map_index = j; 7988 fdput(f); 7989 goto next_insn; 7990 } 7991 } 7992 7993 if (env->used_map_cnt >= MAX_USED_MAPS) { 7994 fdput(f); 7995 return -E2BIG; 7996 } 7997 7998 /* hold the map. If the program is rejected by verifier, 7999 * the map will be released by release_maps() or it 8000 * will be used by the valid program until it's unloaded 8001 * and all maps are released in free_used_maps() 8002 */ 8003 map = bpf_map_inc(map, false); 8004 if (IS_ERR(map)) { 8005 fdput(f); 8006 return PTR_ERR(map); 8007 } 8008 8009 aux->map_index = env->used_map_cnt; 8010 env->used_maps[env->used_map_cnt++] = map; 8011 8012 if (bpf_map_is_cgroup_storage(map) && 8013 bpf_cgroup_storage_assign(env->prog, map)) { 8014 verbose(env, "only one cgroup storage of each type is allowed\n"); 8015 fdput(f); 8016 return -EBUSY; 8017 } 8018 8019 fdput(f); 8020 next_insn: 8021 insn++; 8022 i++; 8023 continue; 8024 } 8025 8026 /* Basic sanity check before we invest more work here. */ 8027 if (!bpf_opcode_in_insntable(insn->code)) { 8028 verbose(env, "unknown opcode %02x\n", insn->code); 8029 return -EINVAL; 8030 } 8031 } 8032 8033 /* now all pseudo BPF_LD_IMM64 instructions load valid 8034 * 'struct bpf_map *' into a register instead of user map_fd. 8035 * These pointers will be used later by verifier to validate map access. 8036 */ 8037 return 0; 8038 } 8039 8040 /* drop refcnt of maps used by the rejected program */ 8041 static void release_maps(struct bpf_verifier_env *env) 8042 { 8043 enum bpf_cgroup_storage_type stype; 8044 int i; 8045 8046 for_each_cgroup_storage_type(stype) { 8047 if (!env->prog->aux->cgroup_storage[stype]) 8048 continue; 8049 bpf_cgroup_storage_release(env->prog, 8050 env->prog->aux->cgroup_storage[stype]); 8051 } 8052 8053 for (i = 0; i < env->used_map_cnt; i++) 8054 bpf_map_put(env->used_maps[i]); 8055 } 8056 8057 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 8058 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 8059 { 8060 struct bpf_insn *insn = env->prog->insnsi; 8061 int insn_cnt = env->prog->len; 8062 int i; 8063 8064 for (i = 0; i < insn_cnt; i++, insn++) 8065 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 8066 insn->src_reg = 0; 8067 } 8068 8069 /* single env->prog->insni[off] instruction was replaced with the range 8070 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 8071 * [0, off) and [off, end) to new locations, so the patched range stays zero 8072 */ 8073 static int adjust_insn_aux_data(struct bpf_verifier_env *env, 8074 struct bpf_prog *new_prog, u32 off, u32 cnt) 8075 { 8076 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 8077 struct bpf_insn *insn = new_prog->insnsi; 8078 u32 prog_len; 8079 int i; 8080 8081 /* aux info at OFF always needs adjustment, no matter fast path 8082 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 8083 * original insn at old prog. 8084 */ 8085 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 8086 8087 if (cnt == 1) 8088 return 0; 8089 prog_len = new_prog->len; 8090 new_data = vzalloc(array_size(prog_len, 8091 sizeof(struct bpf_insn_aux_data))); 8092 if (!new_data) 8093 return -ENOMEM; 8094 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 8095 memcpy(new_data + off + cnt - 1, old_data + off, 8096 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 8097 for (i = off; i < off + cnt - 1; i++) { 8098 new_data[i].seen = true; 8099 new_data[i].zext_dst = insn_has_def32(env, insn + i); 8100 } 8101 env->insn_aux_data = new_data; 8102 vfree(old_data); 8103 return 0; 8104 } 8105 8106 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 8107 { 8108 int i; 8109 8110 if (len == 1) 8111 return; 8112 /* NOTE: fake 'exit' subprog should be updated as well. */ 8113 for (i = 0; i <= env->subprog_cnt; i++) { 8114 if (env->subprog_info[i].start <= off) 8115 continue; 8116 env->subprog_info[i].start += len - 1; 8117 } 8118 } 8119 8120 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 8121 const struct bpf_insn *patch, u32 len) 8122 { 8123 struct bpf_prog *new_prog; 8124 8125 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 8126 if (IS_ERR(new_prog)) { 8127 if (PTR_ERR(new_prog) == -ERANGE) 8128 verbose(env, 8129 "insn %d cannot be patched due to 16-bit range\n", 8130 env->insn_aux_data[off].orig_idx); 8131 return NULL; 8132 } 8133 if (adjust_insn_aux_data(env, new_prog, off, len)) 8134 return NULL; 8135 adjust_subprog_starts(env, off, len); 8136 return new_prog; 8137 } 8138 8139 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 8140 u32 off, u32 cnt) 8141 { 8142 int i, j; 8143 8144 /* find first prog starting at or after off (first to remove) */ 8145 for (i = 0; i < env->subprog_cnt; i++) 8146 if (env->subprog_info[i].start >= off) 8147 break; 8148 /* find first prog starting at or after off + cnt (first to stay) */ 8149 for (j = i; j < env->subprog_cnt; j++) 8150 if (env->subprog_info[j].start >= off + cnt) 8151 break; 8152 /* if j doesn't start exactly at off + cnt, we are just removing 8153 * the front of previous prog 8154 */ 8155 if (env->subprog_info[j].start != off + cnt) 8156 j--; 8157 8158 if (j > i) { 8159 struct bpf_prog_aux *aux = env->prog->aux; 8160 int move; 8161 8162 /* move fake 'exit' subprog as well */ 8163 move = env->subprog_cnt + 1 - j; 8164 8165 memmove(env->subprog_info + i, 8166 env->subprog_info + j, 8167 sizeof(*env->subprog_info) * move); 8168 env->subprog_cnt -= j - i; 8169 8170 /* remove func_info */ 8171 if (aux->func_info) { 8172 move = aux->func_info_cnt - j; 8173 8174 memmove(aux->func_info + i, 8175 aux->func_info + j, 8176 sizeof(*aux->func_info) * move); 8177 aux->func_info_cnt -= j - i; 8178 /* func_info->insn_off is set after all code rewrites, 8179 * in adjust_btf_func() - no need to adjust 8180 */ 8181 } 8182 } else { 8183 /* convert i from "first prog to remove" to "first to adjust" */ 8184 if (env->subprog_info[i].start == off) 8185 i++; 8186 } 8187 8188 /* update fake 'exit' subprog as well */ 8189 for (; i <= env->subprog_cnt; i++) 8190 env->subprog_info[i].start -= cnt; 8191 8192 return 0; 8193 } 8194 8195 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 8196 u32 cnt) 8197 { 8198 struct bpf_prog *prog = env->prog; 8199 u32 i, l_off, l_cnt, nr_linfo; 8200 struct bpf_line_info *linfo; 8201 8202 nr_linfo = prog->aux->nr_linfo; 8203 if (!nr_linfo) 8204 return 0; 8205 8206 linfo = prog->aux->linfo; 8207 8208 /* find first line info to remove, count lines to be removed */ 8209 for (i = 0; i < nr_linfo; i++) 8210 if (linfo[i].insn_off >= off) 8211 break; 8212 8213 l_off = i; 8214 l_cnt = 0; 8215 for (; i < nr_linfo; i++) 8216 if (linfo[i].insn_off < off + cnt) 8217 l_cnt++; 8218 else 8219 break; 8220 8221 /* First live insn doesn't match first live linfo, it needs to "inherit" 8222 * last removed linfo. prog is already modified, so prog->len == off 8223 * means no live instructions after (tail of the program was removed). 8224 */ 8225 if (prog->len != off && l_cnt && 8226 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 8227 l_cnt--; 8228 linfo[--i].insn_off = off + cnt; 8229 } 8230 8231 /* remove the line info which refer to the removed instructions */ 8232 if (l_cnt) { 8233 memmove(linfo + l_off, linfo + i, 8234 sizeof(*linfo) * (nr_linfo - i)); 8235 8236 prog->aux->nr_linfo -= l_cnt; 8237 nr_linfo = prog->aux->nr_linfo; 8238 } 8239 8240 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 8241 for (i = l_off; i < nr_linfo; i++) 8242 linfo[i].insn_off -= cnt; 8243 8244 /* fix up all subprogs (incl. 'exit') which start >= off */ 8245 for (i = 0; i <= env->subprog_cnt; i++) 8246 if (env->subprog_info[i].linfo_idx > l_off) { 8247 /* program may have started in the removed region but 8248 * may not be fully removed 8249 */ 8250 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 8251 env->subprog_info[i].linfo_idx -= l_cnt; 8252 else 8253 env->subprog_info[i].linfo_idx = l_off; 8254 } 8255 8256 return 0; 8257 } 8258 8259 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 8260 { 8261 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8262 unsigned int orig_prog_len = env->prog->len; 8263 int err; 8264 8265 if (bpf_prog_is_dev_bound(env->prog->aux)) 8266 bpf_prog_offload_remove_insns(env, off, cnt); 8267 8268 err = bpf_remove_insns(env->prog, off, cnt); 8269 if (err) 8270 return err; 8271 8272 err = adjust_subprog_starts_after_remove(env, off, cnt); 8273 if (err) 8274 return err; 8275 8276 err = bpf_adj_linfo_after_remove(env, off, cnt); 8277 if (err) 8278 return err; 8279 8280 memmove(aux_data + off, aux_data + off + cnt, 8281 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 8282 8283 return 0; 8284 } 8285 8286 /* The verifier does more data flow analysis than llvm and will not 8287 * explore branches that are dead at run time. Malicious programs can 8288 * have dead code too. Therefore replace all dead at-run-time code 8289 * with 'ja -1'. 8290 * 8291 * Just nops are not optimal, e.g. if they would sit at the end of the 8292 * program and through another bug we would manage to jump there, then 8293 * we'd execute beyond program memory otherwise. Returning exception 8294 * code also wouldn't work since we can have subprogs where the dead 8295 * code could be located. 8296 */ 8297 static void sanitize_dead_code(struct bpf_verifier_env *env) 8298 { 8299 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8300 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 8301 struct bpf_insn *insn = env->prog->insnsi; 8302 const int insn_cnt = env->prog->len; 8303 int i; 8304 8305 for (i = 0; i < insn_cnt; i++) { 8306 if (aux_data[i].seen) 8307 continue; 8308 memcpy(insn + i, &trap, sizeof(trap)); 8309 } 8310 } 8311 8312 static bool insn_is_cond_jump(u8 code) 8313 { 8314 u8 op; 8315 8316 if (BPF_CLASS(code) == BPF_JMP32) 8317 return true; 8318 8319 if (BPF_CLASS(code) != BPF_JMP) 8320 return false; 8321 8322 op = BPF_OP(code); 8323 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 8324 } 8325 8326 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 8327 { 8328 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8329 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 8330 struct bpf_insn *insn = env->prog->insnsi; 8331 const int insn_cnt = env->prog->len; 8332 int i; 8333 8334 for (i = 0; i < insn_cnt; i++, insn++) { 8335 if (!insn_is_cond_jump(insn->code)) 8336 continue; 8337 8338 if (!aux_data[i + 1].seen) 8339 ja.off = insn->off; 8340 else if (!aux_data[i + 1 + insn->off].seen) 8341 ja.off = 0; 8342 else 8343 continue; 8344 8345 if (bpf_prog_is_dev_bound(env->prog->aux)) 8346 bpf_prog_offload_replace_insn(env, i, &ja); 8347 8348 memcpy(insn, &ja, sizeof(ja)); 8349 } 8350 } 8351 8352 static int opt_remove_dead_code(struct bpf_verifier_env *env) 8353 { 8354 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8355 int insn_cnt = env->prog->len; 8356 int i, err; 8357 8358 for (i = 0; i < insn_cnt; i++) { 8359 int j; 8360 8361 j = 0; 8362 while (i + j < insn_cnt && !aux_data[i + j].seen) 8363 j++; 8364 if (!j) 8365 continue; 8366 8367 err = verifier_remove_insns(env, i, j); 8368 if (err) 8369 return err; 8370 insn_cnt = env->prog->len; 8371 } 8372 8373 return 0; 8374 } 8375 8376 static int opt_remove_nops(struct bpf_verifier_env *env) 8377 { 8378 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 8379 struct bpf_insn *insn = env->prog->insnsi; 8380 int insn_cnt = env->prog->len; 8381 int i, err; 8382 8383 for (i = 0; i < insn_cnt; i++) { 8384 if (memcmp(&insn[i], &ja, sizeof(ja))) 8385 continue; 8386 8387 err = verifier_remove_insns(env, i, 1); 8388 if (err) 8389 return err; 8390 insn_cnt--; 8391 i--; 8392 } 8393 8394 return 0; 8395 } 8396 8397 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 8398 const union bpf_attr *attr) 8399 { 8400 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 8401 struct bpf_insn_aux_data *aux = env->insn_aux_data; 8402 int i, patch_len, delta = 0, len = env->prog->len; 8403 struct bpf_insn *insns = env->prog->insnsi; 8404 struct bpf_prog *new_prog; 8405 bool rnd_hi32; 8406 8407 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 8408 zext_patch[1] = BPF_ZEXT_REG(0); 8409 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 8410 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 8411 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 8412 for (i = 0; i < len; i++) { 8413 int adj_idx = i + delta; 8414 struct bpf_insn insn; 8415 8416 insn = insns[adj_idx]; 8417 if (!aux[adj_idx].zext_dst) { 8418 u8 code, class; 8419 u32 imm_rnd; 8420 8421 if (!rnd_hi32) 8422 continue; 8423 8424 code = insn.code; 8425 class = BPF_CLASS(code); 8426 if (insn_no_def(&insn)) 8427 continue; 8428 8429 /* NOTE: arg "reg" (the fourth one) is only used for 8430 * BPF_STX which has been ruled out in above 8431 * check, it is safe to pass NULL here. 8432 */ 8433 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { 8434 if (class == BPF_LD && 8435 BPF_MODE(code) == BPF_IMM) 8436 i++; 8437 continue; 8438 } 8439 8440 /* ctx load could be transformed into wider load. */ 8441 if (class == BPF_LDX && 8442 aux[adj_idx].ptr_type == PTR_TO_CTX) 8443 continue; 8444 8445 imm_rnd = get_random_int(); 8446 rnd_hi32_patch[0] = insn; 8447 rnd_hi32_patch[1].imm = imm_rnd; 8448 rnd_hi32_patch[3].dst_reg = insn.dst_reg; 8449 patch = rnd_hi32_patch; 8450 patch_len = 4; 8451 goto apply_patch_buffer; 8452 } 8453 8454 if (!bpf_jit_needs_zext()) 8455 continue; 8456 8457 zext_patch[0] = insn; 8458 zext_patch[1].dst_reg = insn.dst_reg; 8459 zext_patch[1].src_reg = insn.dst_reg; 8460 patch = zext_patch; 8461 patch_len = 2; 8462 apply_patch_buffer: 8463 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 8464 if (!new_prog) 8465 return -ENOMEM; 8466 env->prog = new_prog; 8467 insns = new_prog->insnsi; 8468 aux = env->insn_aux_data; 8469 delta += patch_len - 1; 8470 } 8471 8472 return 0; 8473 } 8474 8475 /* convert load instructions that access fields of a context type into a 8476 * sequence of instructions that access fields of the underlying structure: 8477 * struct __sk_buff -> struct sk_buff 8478 * struct bpf_sock_ops -> struct sock 8479 */ 8480 static int convert_ctx_accesses(struct bpf_verifier_env *env) 8481 { 8482 const struct bpf_verifier_ops *ops = env->ops; 8483 int i, cnt, size, ctx_field_size, delta = 0; 8484 const int insn_cnt = env->prog->len; 8485 struct bpf_insn insn_buf[16], *insn; 8486 u32 target_size, size_default, off; 8487 struct bpf_prog *new_prog; 8488 enum bpf_access_type type; 8489 bool is_narrower_load; 8490 8491 if (ops->gen_prologue || env->seen_direct_write) { 8492 if (!ops->gen_prologue) { 8493 verbose(env, "bpf verifier is misconfigured\n"); 8494 return -EINVAL; 8495 } 8496 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 8497 env->prog); 8498 if (cnt >= ARRAY_SIZE(insn_buf)) { 8499 verbose(env, "bpf verifier is misconfigured\n"); 8500 return -EINVAL; 8501 } else if (cnt) { 8502 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 8503 if (!new_prog) 8504 return -ENOMEM; 8505 8506 env->prog = new_prog; 8507 delta += cnt - 1; 8508 } 8509 } 8510 8511 if (bpf_prog_is_dev_bound(env->prog->aux)) 8512 return 0; 8513 8514 insn = env->prog->insnsi + delta; 8515 8516 for (i = 0; i < insn_cnt; i++, insn++) { 8517 bpf_convert_ctx_access_t convert_ctx_access; 8518 8519 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 8520 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 8521 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 8522 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 8523 type = BPF_READ; 8524 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 8525 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 8526 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 8527 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 8528 type = BPF_WRITE; 8529 else 8530 continue; 8531 8532 if (type == BPF_WRITE && 8533 env->insn_aux_data[i + delta].sanitize_stack_off) { 8534 struct bpf_insn patch[] = { 8535 /* Sanitize suspicious stack slot with zero. 8536 * There are no memory dependencies for this store, 8537 * since it's only using frame pointer and immediate 8538 * constant of zero 8539 */ 8540 BPF_ST_MEM(BPF_DW, BPF_REG_FP, 8541 env->insn_aux_data[i + delta].sanitize_stack_off, 8542 0), 8543 /* the original STX instruction will immediately 8544 * overwrite the same stack slot with appropriate value 8545 */ 8546 *insn, 8547 }; 8548 8549 cnt = ARRAY_SIZE(patch); 8550 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 8551 if (!new_prog) 8552 return -ENOMEM; 8553 8554 delta += cnt - 1; 8555 env->prog = new_prog; 8556 insn = new_prog->insnsi + i + delta; 8557 continue; 8558 } 8559 8560 switch (env->insn_aux_data[i + delta].ptr_type) { 8561 case PTR_TO_CTX: 8562 if (!ops->convert_ctx_access) 8563 continue; 8564 convert_ctx_access = ops->convert_ctx_access; 8565 break; 8566 case PTR_TO_SOCKET: 8567 case PTR_TO_SOCK_COMMON: 8568 convert_ctx_access = bpf_sock_convert_ctx_access; 8569 break; 8570 case PTR_TO_TCP_SOCK: 8571 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 8572 break; 8573 case PTR_TO_XDP_SOCK: 8574 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 8575 break; 8576 default: 8577 continue; 8578 } 8579 8580 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 8581 size = BPF_LDST_BYTES(insn); 8582 8583 /* If the read access is a narrower load of the field, 8584 * convert to a 4/8-byte load, to minimum program type specific 8585 * convert_ctx_access changes. If conversion is successful, 8586 * we will apply proper mask to the result. 8587 */ 8588 is_narrower_load = size < ctx_field_size; 8589 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 8590 off = insn->off; 8591 if (is_narrower_load) { 8592 u8 size_code; 8593 8594 if (type == BPF_WRITE) { 8595 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 8596 return -EINVAL; 8597 } 8598 8599 size_code = BPF_H; 8600 if (ctx_field_size == 4) 8601 size_code = BPF_W; 8602 else if (ctx_field_size == 8) 8603 size_code = BPF_DW; 8604 8605 insn->off = off & ~(size_default - 1); 8606 insn->code = BPF_LDX | BPF_MEM | size_code; 8607 } 8608 8609 target_size = 0; 8610 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 8611 &target_size); 8612 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 8613 (ctx_field_size && !target_size)) { 8614 verbose(env, "bpf verifier is misconfigured\n"); 8615 return -EINVAL; 8616 } 8617 8618 if (is_narrower_load && size < target_size) { 8619 u8 shift = (off & (size_default - 1)) * 8; 8620 8621 if (ctx_field_size <= 4) { 8622 if (shift) 8623 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 8624 insn->dst_reg, 8625 shift); 8626 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 8627 (1 << size * 8) - 1); 8628 } else { 8629 if (shift) 8630 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 8631 insn->dst_reg, 8632 shift); 8633 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 8634 (1ULL << size * 8) - 1); 8635 } 8636 } 8637 8638 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 8639 if (!new_prog) 8640 return -ENOMEM; 8641 8642 delta += cnt - 1; 8643 8644 /* keep walking new program and skip insns we just inserted */ 8645 env->prog = new_prog; 8646 insn = new_prog->insnsi + i + delta; 8647 } 8648 8649 return 0; 8650 } 8651 8652 static int jit_subprogs(struct bpf_verifier_env *env) 8653 { 8654 struct bpf_prog *prog = env->prog, **func, *tmp; 8655 int i, j, subprog_start, subprog_end = 0, len, subprog; 8656 struct bpf_insn *insn; 8657 void *old_bpf_func; 8658 int err; 8659 8660 if (env->subprog_cnt <= 1) 8661 return 0; 8662 8663 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 8664 if (insn->code != (BPF_JMP | BPF_CALL) || 8665 insn->src_reg != BPF_PSEUDO_CALL) 8666 continue; 8667 /* Upon error here we cannot fall back to interpreter but 8668 * need a hard reject of the program. Thus -EFAULT is 8669 * propagated in any case. 8670 */ 8671 subprog = find_subprog(env, i + insn->imm + 1); 8672 if (subprog < 0) { 8673 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 8674 i + insn->imm + 1); 8675 return -EFAULT; 8676 } 8677 /* temporarily remember subprog id inside insn instead of 8678 * aux_data, since next loop will split up all insns into funcs 8679 */ 8680 insn->off = subprog; 8681 /* remember original imm in case JIT fails and fallback 8682 * to interpreter will be needed 8683 */ 8684 env->insn_aux_data[i].call_imm = insn->imm; 8685 /* point imm to __bpf_call_base+1 from JITs point of view */ 8686 insn->imm = 1; 8687 } 8688 8689 err = bpf_prog_alloc_jited_linfo(prog); 8690 if (err) 8691 goto out_undo_insn; 8692 8693 err = -ENOMEM; 8694 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 8695 if (!func) 8696 goto out_undo_insn; 8697 8698 for (i = 0; i < env->subprog_cnt; i++) { 8699 subprog_start = subprog_end; 8700 subprog_end = env->subprog_info[i + 1].start; 8701 8702 len = subprog_end - subprog_start; 8703 /* BPF_PROG_RUN doesn't call subprogs directly, 8704 * hence main prog stats include the runtime of subprogs. 8705 * subprogs don't have IDs and not reachable via prog_get_next_id 8706 * func[i]->aux->stats will never be accessed and stays NULL 8707 */ 8708 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 8709 if (!func[i]) 8710 goto out_free; 8711 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 8712 len * sizeof(struct bpf_insn)); 8713 func[i]->type = prog->type; 8714 func[i]->len = len; 8715 if (bpf_prog_calc_tag(func[i])) 8716 goto out_free; 8717 func[i]->is_func = 1; 8718 func[i]->aux->func_idx = i; 8719 /* the btf and func_info will be freed only at prog->aux */ 8720 func[i]->aux->btf = prog->aux->btf; 8721 func[i]->aux->func_info = prog->aux->func_info; 8722 8723 /* Use bpf_prog_F_tag to indicate functions in stack traces. 8724 * Long term would need debug info to populate names 8725 */ 8726 func[i]->aux->name[0] = 'F'; 8727 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 8728 func[i]->jit_requested = 1; 8729 func[i]->aux->linfo = prog->aux->linfo; 8730 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 8731 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 8732 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 8733 func[i] = bpf_int_jit_compile(func[i]); 8734 if (!func[i]->jited) { 8735 err = -ENOTSUPP; 8736 goto out_free; 8737 } 8738 cond_resched(); 8739 } 8740 /* at this point all bpf functions were successfully JITed 8741 * now populate all bpf_calls with correct addresses and 8742 * run last pass of JIT 8743 */ 8744 for (i = 0; i < env->subprog_cnt; i++) { 8745 insn = func[i]->insnsi; 8746 for (j = 0; j < func[i]->len; j++, insn++) { 8747 if (insn->code != (BPF_JMP | BPF_CALL) || 8748 insn->src_reg != BPF_PSEUDO_CALL) 8749 continue; 8750 subprog = insn->off; 8751 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - 8752 __bpf_call_base; 8753 } 8754 8755 /* we use the aux data to keep a list of the start addresses 8756 * of the JITed images for each function in the program 8757 * 8758 * for some architectures, such as powerpc64, the imm field 8759 * might not be large enough to hold the offset of the start 8760 * address of the callee's JITed image from __bpf_call_base 8761 * 8762 * in such cases, we can lookup the start address of a callee 8763 * by using its subprog id, available from the off field of 8764 * the call instruction, as an index for this list 8765 */ 8766 func[i]->aux->func = func; 8767 func[i]->aux->func_cnt = env->subprog_cnt; 8768 } 8769 for (i = 0; i < env->subprog_cnt; i++) { 8770 old_bpf_func = func[i]->bpf_func; 8771 tmp = bpf_int_jit_compile(func[i]); 8772 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 8773 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 8774 err = -ENOTSUPP; 8775 goto out_free; 8776 } 8777 cond_resched(); 8778 } 8779 8780 /* finally lock prog and jit images for all functions and 8781 * populate kallsysm 8782 */ 8783 for (i = 0; i < env->subprog_cnt; i++) { 8784 bpf_prog_lock_ro(func[i]); 8785 bpf_prog_kallsyms_add(func[i]); 8786 } 8787 8788 /* Last step: make now unused interpreter insns from main 8789 * prog consistent for later dump requests, so they can 8790 * later look the same as if they were interpreted only. 8791 */ 8792 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 8793 if (insn->code != (BPF_JMP | BPF_CALL) || 8794 insn->src_reg != BPF_PSEUDO_CALL) 8795 continue; 8796 insn->off = env->insn_aux_data[i].call_imm; 8797 subprog = find_subprog(env, i + insn->off + 1); 8798 insn->imm = subprog; 8799 } 8800 8801 prog->jited = 1; 8802 prog->bpf_func = func[0]->bpf_func; 8803 prog->aux->func = func; 8804 prog->aux->func_cnt = env->subprog_cnt; 8805 bpf_prog_free_unused_jited_linfo(prog); 8806 return 0; 8807 out_free: 8808 for (i = 0; i < env->subprog_cnt; i++) 8809 if (func[i]) 8810 bpf_jit_free(func[i]); 8811 kfree(func); 8812 out_undo_insn: 8813 /* cleanup main prog to be interpreted */ 8814 prog->jit_requested = 0; 8815 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 8816 if (insn->code != (BPF_JMP | BPF_CALL) || 8817 insn->src_reg != BPF_PSEUDO_CALL) 8818 continue; 8819 insn->off = 0; 8820 insn->imm = env->insn_aux_data[i].call_imm; 8821 } 8822 bpf_prog_free_jited_linfo(prog); 8823 return err; 8824 } 8825 8826 static int fixup_call_args(struct bpf_verifier_env *env) 8827 { 8828 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 8829 struct bpf_prog *prog = env->prog; 8830 struct bpf_insn *insn = prog->insnsi; 8831 int i, depth; 8832 #endif 8833 int err = 0; 8834 8835 if (env->prog->jit_requested && 8836 !bpf_prog_is_dev_bound(env->prog->aux)) { 8837 err = jit_subprogs(env); 8838 if (err == 0) 8839 return 0; 8840 if (err == -EFAULT) 8841 return err; 8842 } 8843 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 8844 for (i = 0; i < prog->len; i++, insn++) { 8845 if (insn->code != (BPF_JMP | BPF_CALL) || 8846 insn->src_reg != BPF_PSEUDO_CALL) 8847 continue; 8848 depth = get_callee_stack_depth(env, insn, i); 8849 if (depth < 0) 8850 return depth; 8851 bpf_patch_call_args(insn, depth); 8852 } 8853 err = 0; 8854 #endif 8855 return err; 8856 } 8857 8858 /* fixup insn->imm field of bpf_call instructions 8859 * and inline eligible helpers as explicit sequence of BPF instructions 8860 * 8861 * this function is called after eBPF program passed verification 8862 */ 8863 static int fixup_bpf_calls(struct bpf_verifier_env *env) 8864 { 8865 struct bpf_prog *prog = env->prog; 8866 struct bpf_insn *insn = prog->insnsi; 8867 const struct bpf_func_proto *fn; 8868 const int insn_cnt = prog->len; 8869 const struct bpf_map_ops *ops; 8870 struct bpf_insn_aux_data *aux; 8871 struct bpf_insn insn_buf[16]; 8872 struct bpf_prog *new_prog; 8873 struct bpf_map *map_ptr; 8874 int i, cnt, delta = 0; 8875 8876 for (i = 0; i < insn_cnt; i++, insn++) { 8877 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 8878 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 8879 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 8880 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 8881 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 8882 struct bpf_insn mask_and_div[] = { 8883 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 8884 /* Rx div 0 -> 0 */ 8885 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), 8886 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 8887 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 8888 *insn, 8889 }; 8890 struct bpf_insn mask_and_mod[] = { 8891 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 8892 /* Rx mod 0 -> Rx */ 8893 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), 8894 *insn, 8895 }; 8896 struct bpf_insn *patchlet; 8897 8898 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 8899 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 8900 patchlet = mask_and_div + (is64 ? 1 : 0); 8901 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); 8902 } else { 8903 patchlet = mask_and_mod + (is64 ? 1 : 0); 8904 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); 8905 } 8906 8907 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 8908 if (!new_prog) 8909 return -ENOMEM; 8910 8911 delta += cnt - 1; 8912 env->prog = prog = new_prog; 8913 insn = new_prog->insnsi + i + delta; 8914 continue; 8915 } 8916 8917 if (BPF_CLASS(insn->code) == BPF_LD && 8918 (BPF_MODE(insn->code) == BPF_ABS || 8919 BPF_MODE(insn->code) == BPF_IND)) { 8920 cnt = env->ops->gen_ld_abs(insn, insn_buf); 8921 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 8922 verbose(env, "bpf verifier is misconfigured\n"); 8923 return -EINVAL; 8924 } 8925 8926 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 8927 if (!new_prog) 8928 return -ENOMEM; 8929 8930 delta += cnt - 1; 8931 env->prog = prog = new_prog; 8932 insn = new_prog->insnsi + i + delta; 8933 continue; 8934 } 8935 8936 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 8937 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 8938 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 8939 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 8940 struct bpf_insn insn_buf[16]; 8941 struct bpf_insn *patch = &insn_buf[0]; 8942 bool issrc, isneg; 8943 u32 off_reg; 8944 8945 aux = &env->insn_aux_data[i + delta]; 8946 if (!aux->alu_state || 8947 aux->alu_state == BPF_ALU_NON_POINTER) 8948 continue; 8949 8950 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 8951 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 8952 BPF_ALU_SANITIZE_SRC; 8953 8954 off_reg = issrc ? insn->src_reg : insn->dst_reg; 8955 if (isneg) 8956 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 8957 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); 8958 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 8959 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 8960 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 8961 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 8962 if (issrc) { 8963 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, 8964 off_reg); 8965 insn->src_reg = BPF_REG_AX; 8966 } else { 8967 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, 8968 BPF_REG_AX); 8969 } 8970 if (isneg) 8971 insn->code = insn->code == code_add ? 8972 code_sub : code_add; 8973 *patch++ = *insn; 8974 if (issrc && isneg) 8975 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 8976 cnt = patch - insn_buf; 8977 8978 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 8979 if (!new_prog) 8980 return -ENOMEM; 8981 8982 delta += cnt - 1; 8983 env->prog = prog = new_prog; 8984 insn = new_prog->insnsi + i + delta; 8985 continue; 8986 } 8987 8988 if (insn->code != (BPF_JMP | BPF_CALL)) 8989 continue; 8990 if (insn->src_reg == BPF_PSEUDO_CALL) 8991 continue; 8992 8993 if (insn->imm == BPF_FUNC_get_route_realm) 8994 prog->dst_needed = 1; 8995 if (insn->imm == BPF_FUNC_get_prandom_u32) 8996 bpf_user_rnd_init_once(); 8997 if (insn->imm == BPF_FUNC_override_return) 8998 prog->kprobe_override = 1; 8999 if (insn->imm == BPF_FUNC_tail_call) { 9000 /* If we tail call into other programs, we 9001 * cannot make any assumptions since they can 9002 * be replaced dynamically during runtime in 9003 * the program array. 9004 */ 9005 prog->cb_access = 1; 9006 env->prog->aux->stack_depth = MAX_BPF_STACK; 9007 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; 9008 9009 /* mark bpf_tail_call as different opcode to avoid 9010 * conditional branch in the interpeter for every normal 9011 * call and to prevent accidental JITing by JIT compiler 9012 * that doesn't support bpf_tail_call yet 9013 */ 9014 insn->imm = 0; 9015 insn->code = BPF_JMP | BPF_TAIL_CALL; 9016 9017 aux = &env->insn_aux_data[i + delta]; 9018 if (!bpf_map_ptr_unpriv(aux)) 9019 continue; 9020 9021 /* instead of changing every JIT dealing with tail_call 9022 * emit two extra insns: 9023 * if (index >= max_entries) goto out; 9024 * index &= array->index_mask; 9025 * to avoid out-of-bounds cpu speculation 9026 */ 9027 if (bpf_map_ptr_poisoned(aux)) { 9028 verbose(env, "tail_call abusing map_ptr\n"); 9029 return -EINVAL; 9030 } 9031 9032 map_ptr = BPF_MAP_PTR(aux->map_state); 9033 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 9034 map_ptr->max_entries, 2); 9035 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 9036 container_of(map_ptr, 9037 struct bpf_array, 9038 map)->index_mask); 9039 insn_buf[2] = *insn; 9040 cnt = 3; 9041 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9042 if (!new_prog) 9043 return -ENOMEM; 9044 9045 delta += cnt - 1; 9046 env->prog = prog = new_prog; 9047 insn = new_prog->insnsi + i + delta; 9048 continue; 9049 } 9050 9051 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 9052 * and other inlining handlers are currently limited to 64 bit 9053 * only. 9054 */ 9055 if (prog->jit_requested && BITS_PER_LONG == 64 && 9056 (insn->imm == BPF_FUNC_map_lookup_elem || 9057 insn->imm == BPF_FUNC_map_update_elem || 9058 insn->imm == BPF_FUNC_map_delete_elem || 9059 insn->imm == BPF_FUNC_map_push_elem || 9060 insn->imm == BPF_FUNC_map_pop_elem || 9061 insn->imm == BPF_FUNC_map_peek_elem)) { 9062 aux = &env->insn_aux_data[i + delta]; 9063 if (bpf_map_ptr_poisoned(aux)) 9064 goto patch_call_imm; 9065 9066 map_ptr = BPF_MAP_PTR(aux->map_state); 9067 ops = map_ptr->ops; 9068 if (insn->imm == BPF_FUNC_map_lookup_elem && 9069 ops->map_gen_lookup) { 9070 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 9071 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 9072 verbose(env, "bpf verifier is misconfigured\n"); 9073 return -EINVAL; 9074 } 9075 9076 new_prog = bpf_patch_insn_data(env, i + delta, 9077 insn_buf, cnt); 9078 if (!new_prog) 9079 return -ENOMEM; 9080 9081 delta += cnt - 1; 9082 env->prog = prog = new_prog; 9083 insn = new_prog->insnsi + i + delta; 9084 continue; 9085 } 9086 9087 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 9088 (void *(*)(struct bpf_map *map, void *key))NULL)); 9089 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 9090 (int (*)(struct bpf_map *map, void *key))NULL)); 9091 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 9092 (int (*)(struct bpf_map *map, void *key, void *value, 9093 u64 flags))NULL)); 9094 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 9095 (int (*)(struct bpf_map *map, void *value, 9096 u64 flags))NULL)); 9097 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 9098 (int (*)(struct bpf_map *map, void *value))NULL)); 9099 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 9100 (int (*)(struct bpf_map *map, void *value))NULL)); 9101 9102 switch (insn->imm) { 9103 case BPF_FUNC_map_lookup_elem: 9104 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - 9105 __bpf_call_base; 9106 continue; 9107 case BPF_FUNC_map_update_elem: 9108 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - 9109 __bpf_call_base; 9110 continue; 9111 case BPF_FUNC_map_delete_elem: 9112 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - 9113 __bpf_call_base; 9114 continue; 9115 case BPF_FUNC_map_push_elem: 9116 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - 9117 __bpf_call_base; 9118 continue; 9119 case BPF_FUNC_map_pop_elem: 9120 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - 9121 __bpf_call_base; 9122 continue; 9123 case BPF_FUNC_map_peek_elem: 9124 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - 9125 __bpf_call_base; 9126 continue; 9127 } 9128 9129 goto patch_call_imm; 9130 } 9131 9132 patch_call_imm: 9133 fn = env->ops->get_func_proto(insn->imm, env->prog); 9134 /* all functions that have prototype and verifier allowed 9135 * programs to call them, must be real in-kernel functions 9136 */ 9137 if (!fn->func) { 9138 verbose(env, 9139 "kernel subsystem misconfigured func %s#%d\n", 9140 func_id_name(insn->imm), insn->imm); 9141 return -EFAULT; 9142 } 9143 insn->imm = fn->func - __bpf_call_base; 9144 } 9145 9146 return 0; 9147 } 9148 9149 static void free_states(struct bpf_verifier_env *env) 9150 { 9151 struct bpf_verifier_state_list *sl, *sln; 9152 int i; 9153 9154 sl = env->free_list; 9155 while (sl) { 9156 sln = sl->next; 9157 free_verifier_state(&sl->state, false); 9158 kfree(sl); 9159 sl = sln; 9160 } 9161 9162 if (!env->explored_states) 9163 return; 9164 9165 for (i = 0; i < state_htab_size(env); i++) { 9166 sl = env->explored_states[i]; 9167 9168 while (sl) { 9169 sln = sl->next; 9170 free_verifier_state(&sl->state, false); 9171 kfree(sl); 9172 sl = sln; 9173 } 9174 } 9175 9176 kvfree(env->explored_states); 9177 } 9178 9179 static void print_verification_stats(struct bpf_verifier_env *env) 9180 { 9181 int i; 9182 9183 if (env->log.level & BPF_LOG_STATS) { 9184 verbose(env, "verification time %lld usec\n", 9185 div_u64(env->verification_time, 1000)); 9186 verbose(env, "stack depth "); 9187 for (i = 0; i < env->subprog_cnt; i++) { 9188 u32 depth = env->subprog_info[i].stack_depth; 9189 9190 verbose(env, "%d", depth); 9191 if (i + 1 < env->subprog_cnt) 9192 verbose(env, "+"); 9193 } 9194 verbose(env, "\n"); 9195 } 9196 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 9197 "total_states %d peak_states %d mark_read %d\n", 9198 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 9199 env->max_states_per_insn, env->total_states, 9200 env->peak_states, env->longest_mark_read_walk); 9201 } 9202 9203 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, 9204 union bpf_attr __user *uattr) 9205 { 9206 u64 start_time = ktime_get_ns(); 9207 struct bpf_verifier_env *env; 9208 struct bpf_verifier_log *log; 9209 int i, len, ret = -EINVAL; 9210 bool is_priv; 9211 9212 /* no program is valid */ 9213 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 9214 return -EINVAL; 9215 9216 /* 'struct bpf_verifier_env' can be global, but since it's not small, 9217 * allocate/free it every time bpf_check() is called 9218 */ 9219 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 9220 if (!env) 9221 return -ENOMEM; 9222 log = &env->log; 9223 9224 len = (*prog)->len; 9225 env->insn_aux_data = 9226 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 9227 ret = -ENOMEM; 9228 if (!env->insn_aux_data) 9229 goto err_free_env; 9230 for (i = 0; i < len; i++) 9231 env->insn_aux_data[i].orig_idx = i; 9232 env->prog = *prog; 9233 env->ops = bpf_verifier_ops[env->prog->type]; 9234 is_priv = capable(CAP_SYS_ADMIN); 9235 9236 /* grab the mutex to protect few globals used by verifier */ 9237 if (!is_priv) 9238 mutex_lock(&bpf_verifier_lock); 9239 9240 if (attr->log_level || attr->log_buf || attr->log_size) { 9241 /* user requested verbose verifier output 9242 * and supplied buffer to store the verification trace 9243 */ 9244 log->level = attr->log_level; 9245 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 9246 log->len_total = attr->log_size; 9247 9248 ret = -EINVAL; 9249 /* log attributes have to be sane */ 9250 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || 9251 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) 9252 goto err_unlock; 9253 } 9254 9255 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 9256 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 9257 env->strict_alignment = true; 9258 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 9259 env->strict_alignment = false; 9260 9261 env->allow_ptr_leaks = is_priv; 9262 9263 ret = replace_map_fd_with_map_ptr(env); 9264 if (ret < 0) 9265 goto skip_full_check; 9266 9267 if (bpf_prog_is_dev_bound(env->prog->aux)) { 9268 ret = bpf_prog_offload_verifier_prep(env->prog); 9269 if (ret) 9270 goto skip_full_check; 9271 } 9272 9273 env->explored_states = kvcalloc(state_htab_size(env), 9274 sizeof(struct bpf_verifier_state_list *), 9275 GFP_USER); 9276 ret = -ENOMEM; 9277 if (!env->explored_states) 9278 goto skip_full_check; 9279 9280 ret = check_subprogs(env); 9281 if (ret < 0) 9282 goto skip_full_check; 9283 9284 ret = check_btf_info(env, attr, uattr); 9285 if (ret < 0) 9286 goto skip_full_check; 9287 9288 ret = check_cfg(env); 9289 if (ret < 0) 9290 goto skip_full_check; 9291 9292 ret = do_check(env); 9293 if (env->cur_state) { 9294 free_verifier_state(env->cur_state, true); 9295 env->cur_state = NULL; 9296 } 9297 9298 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 9299 ret = bpf_prog_offload_finalize(env); 9300 9301 skip_full_check: 9302 while (!pop_stack(env, NULL, NULL)); 9303 free_states(env); 9304 9305 if (ret == 0) 9306 ret = check_max_stack_depth(env); 9307 9308 /* instruction rewrites happen after this point */ 9309 if (is_priv) { 9310 if (ret == 0) 9311 opt_hard_wire_dead_code_branches(env); 9312 if (ret == 0) 9313 ret = opt_remove_dead_code(env); 9314 if (ret == 0) 9315 ret = opt_remove_nops(env); 9316 } else { 9317 if (ret == 0) 9318 sanitize_dead_code(env); 9319 } 9320 9321 if (ret == 0) 9322 /* program is valid, convert *(u32*)(ctx + off) accesses */ 9323 ret = convert_ctx_accesses(env); 9324 9325 if (ret == 0) 9326 ret = fixup_bpf_calls(env); 9327 9328 /* do 32-bit optimization after insn patching has done so those patched 9329 * insns could be handled correctly. 9330 */ 9331 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 9332 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 9333 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 9334 : false; 9335 } 9336 9337 if (ret == 0) 9338 ret = fixup_call_args(env); 9339 9340 env->verification_time = ktime_get_ns() - start_time; 9341 print_verification_stats(env); 9342 9343 if (log->level && bpf_verifier_log_full(log)) 9344 ret = -ENOSPC; 9345 if (log->level && !log->ubuf) { 9346 ret = -EFAULT; 9347 goto err_release_maps; 9348 } 9349 9350 if (ret == 0 && env->used_map_cnt) { 9351 /* if program passed verifier, update used_maps in bpf_prog_info */ 9352 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 9353 sizeof(env->used_maps[0]), 9354 GFP_KERNEL); 9355 9356 if (!env->prog->aux->used_maps) { 9357 ret = -ENOMEM; 9358 goto err_release_maps; 9359 } 9360 9361 memcpy(env->prog->aux->used_maps, env->used_maps, 9362 sizeof(env->used_maps[0]) * env->used_map_cnt); 9363 env->prog->aux->used_map_cnt = env->used_map_cnt; 9364 9365 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 9366 * bpf_ld_imm64 instructions 9367 */ 9368 convert_pseudo_ld_imm64(env); 9369 } 9370 9371 if (ret == 0) 9372 adjust_btf_func(env); 9373 9374 err_release_maps: 9375 if (!env->prog->aux->used_maps) 9376 /* if we didn't copy map pointers into bpf_prog_info, release 9377 * them now. Otherwise free_used_maps() will release them. 9378 */ 9379 release_maps(env); 9380 *prog = env->prog; 9381 err_unlock: 9382 if (!is_priv) 9383 mutex_unlock(&bpf_verifier_lock); 9384 vfree(env->insn_aux_data); 9385 err_free_env: 9386 kfree(env); 9387 return ret; 9388 } 9389