1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/btf.h> 12 #include <linux/bpf_verifier.h> 13 #include <linux/filter.h> 14 #include <net/netlink.h> 15 #include <linux/file.h> 16 #include <linux/vmalloc.h> 17 #include <linux/stringify.h> 18 #include <linux/bsearch.h> 19 #include <linux/sort.h> 20 #include <linux/perf_event.h> 21 #include <linux/ctype.h> 22 #include <linux/error-injection.h> 23 #include <linux/bpf_lsm.h> 24 25 #include "disasm.h" 26 27 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 28 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 29 [_id] = & _name ## _verifier_ops, 30 #define BPF_MAP_TYPE(_id, _ops) 31 #include <linux/bpf_types.h> 32 #undef BPF_PROG_TYPE 33 #undef BPF_MAP_TYPE 34 }; 35 36 /* bpf_check() is a static code analyzer that walks eBPF program 37 * instruction by instruction and updates register/stack state. 38 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 39 * 40 * The first pass is depth-first-search to check that the program is a DAG. 41 * It rejects the following programs: 42 * - larger than BPF_MAXINSNS insns 43 * - if loop is present (detected via back-edge) 44 * - unreachable insns exist (shouldn't be a forest. program = one function) 45 * - out of bounds or malformed jumps 46 * The second pass is all possible path descent from the 1st insn. 47 * Since it's analyzing all pathes through the program, the length of the 48 * analysis is limited to 64k insn, which may be hit even if total number of 49 * insn is less then 4K, but there are too many branches that change stack/regs. 50 * Number of 'branches to be analyzed' is limited to 1k 51 * 52 * On entry to each instruction, each register has a type, and the instruction 53 * changes the types of the registers depending on instruction semantics. 54 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 55 * copied to R1. 56 * 57 * All registers are 64-bit. 58 * R0 - return register 59 * R1-R5 argument passing registers 60 * R6-R9 callee saved registers 61 * R10 - frame pointer read-only 62 * 63 * At the start of BPF program the register R1 contains a pointer to bpf_context 64 * and has type PTR_TO_CTX. 65 * 66 * Verifier tracks arithmetic operations on pointers in case: 67 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 68 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 69 * 1st insn copies R10 (which has FRAME_PTR) type into R1 70 * and 2nd arithmetic instruction is pattern matched to recognize 71 * that it wants to construct a pointer to some element within stack. 72 * So after 2nd insn, the register R1 has type PTR_TO_STACK 73 * (and -20 constant is saved for further stack bounds checking). 74 * Meaning that this reg is a pointer to stack plus known immediate constant. 75 * 76 * Most of the time the registers have SCALAR_VALUE type, which 77 * means the register has some value, but it's not a valid pointer. 78 * (like pointer plus pointer becomes SCALAR_VALUE type) 79 * 80 * When verifier sees load or store instructions the type of base register 81 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 82 * four pointer types recognized by check_mem_access() function. 83 * 84 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 85 * and the range of [ptr, ptr + map's value_size) is accessible. 86 * 87 * registers used to pass values to function calls are checked against 88 * function argument constraints. 89 * 90 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 91 * It means that the register type passed to this function must be 92 * PTR_TO_STACK and it will be used inside the function as 93 * 'pointer to map element key' 94 * 95 * For example the argument constraints for bpf_map_lookup_elem(): 96 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 97 * .arg1_type = ARG_CONST_MAP_PTR, 98 * .arg2_type = ARG_PTR_TO_MAP_KEY, 99 * 100 * ret_type says that this function returns 'pointer to map elem value or null' 101 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 102 * 2nd argument should be a pointer to stack, which will be used inside 103 * the helper function as a pointer to map element key. 104 * 105 * On the kernel side the helper function looks like: 106 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 107 * { 108 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 109 * void *key = (void *) (unsigned long) r2; 110 * void *value; 111 * 112 * here kernel can access 'key' and 'map' pointers safely, knowing that 113 * [key, key + map->key_size) bytes are valid and were initialized on 114 * the stack of eBPF program. 115 * } 116 * 117 * Corresponding eBPF program may look like: 118 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 119 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 120 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 121 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 122 * here verifier looks at prototype of map_lookup_elem() and sees: 123 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 124 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 125 * 126 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 127 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 128 * and were initialized prior to this call. 129 * If it's ok, then verifier allows this BPF_CALL insn and looks at 130 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 131 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 132 * returns ether pointer to map value or NULL. 133 * 134 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 135 * insn, the register holding that pointer in the true branch changes state to 136 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 137 * branch. See check_cond_jmp_op(). 138 * 139 * After the call R0 is set to return type of the function and registers R1-R5 140 * are set to NOT_INIT to indicate that they are no longer readable. 141 * 142 * The following reference types represent a potential reference to a kernel 143 * resource which, after first being allocated, must be checked and freed by 144 * the BPF program: 145 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 146 * 147 * When the verifier sees a helper call return a reference type, it allocates a 148 * pointer id for the reference and stores it in the current function state. 149 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 150 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 151 * passes through a NULL-check conditional. For the branch wherein the state is 152 * changed to CONST_IMM, the verifier releases the reference. 153 * 154 * For each helper function that allocates a reference, such as 155 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 156 * bpf_sk_release(). When a reference type passes into the release function, 157 * the verifier also releases the reference. If any unchecked or unreleased 158 * reference remains at the end of the program, the verifier rejects it. 159 */ 160 161 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 162 struct bpf_verifier_stack_elem { 163 /* verifer state is 'st' 164 * before processing instruction 'insn_idx' 165 * and after processing instruction 'prev_insn_idx' 166 */ 167 struct bpf_verifier_state st; 168 int insn_idx; 169 int prev_insn_idx; 170 struct bpf_verifier_stack_elem *next; 171 }; 172 173 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 174 #define BPF_COMPLEXITY_LIMIT_STATES 64 175 176 #define BPF_MAP_KEY_POISON (1ULL << 63) 177 #define BPF_MAP_KEY_SEEN (1ULL << 62) 178 179 #define BPF_MAP_PTR_UNPRIV 1UL 180 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 181 POISON_POINTER_DELTA)) 182 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 183 184 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 185 { 186 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 187 } 188 189 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 190 { 191 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 192 } 193 194 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 195 const struct bpf_map *map, bool unpriv) 196 { 197 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 198 unpriv |= bpf_map_ptr_unpriv(aux); 199 aux->map_ptr_state = (unsigned long)map | 200 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 201 } 202 203 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 204 { 205 return aux->map_key_state & BPF_MAP_KEY_POISON; 206 } 207 208 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 209 { 210 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 211 } 212 213 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 214 { 215 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 216 } 217 218 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 219 { 220 bool poisoned = bpf_map_key_poisoned(aux); 221 222 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 223 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 224 } 225 226 struct bpf_call_arg_meta { 227 struct bpf_map *map_ptr; 228 bool raw_mode; 229 bool pkt_access; 230 int regno; 231 int access_size; 232 u64 msize_max_value; 233 int ref_obj_id; 234 int func_id; 235 u32 btf_id; 236 }; 237 238 struct btf *btf_vmlinux; 239 240 static DEFINE_MUTEX(bpf_verifier_lock); 241 242 static const struct bpf_line_info * 243 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 244 { 245 const struct bpf_line_info *linfo; 246 const struct bpf_prog *prog; 247 u32 i, nr_linfo; 248 249 prog = env->prog; 250 nr_linfo = prog->aux->nr_linfo; 251 252 if (!nr_linfo || insn_off >= prog->len) 253 return NULL; 254 255 linfo = prog->aux->linfo; 256 for (i = 1; i < nr_linfo; i++) 257 if (insn_off < linfo[i].insn_off) 258 break; 259 260 return &linfo[i - 1]; 261 } 262 263 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 264 va_list args) 265 { 266 unsigned int n; 267 268 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 269 270 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 271 "verifier log line truncated - local buffer too short\n"); 272 273 n = min(log->len_total - log->len_used - 1, n); 274 log->kbuf[n] = '\0'; 275 276 if (log->level == BPF_LOG_KERNEL) { 277 pr_err("BPF:%s\n", log->kbuf); 278 return; 279 } 280 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 281 log->len_used += n; 282 else 283 log->ubuf = NULL; 284 } 285 286 /* log_level controls verbosity level of eBPF verifier. 287 * bpf_verifier_log_write() is used to dump the verification trace to the log, 288 * so the user can figure out what's wrong with the program 289 */ 290 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 291 const char *fmt, ...) 292 { 293 va_list args; 294 295 if (!bpf_verifier_log_needed(&env->log)) 296 return; 297 298 va_start(args, fmt); 299 bpf_verifier_vlog(&env->log, fmt, args); 300 va_end(args); 301 } 302 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 303 304 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 305 { 306 struct bpf_verifier_env *env = private_data; 307 va_list args; 308 309 if (!bpf_verifier_log_needed(&env->log)) 310 return; 311 312 va_start(args, fmt); 313 bpf_verifier_vlog(&env->log, fmt, args); 314 va_end(args); 315 } 316 317 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 318 const char *fmt, ...) 319 { 320 va_list args; 321 322 if (!bpf_verifier_log_needed(log)) 323 return; 324 325 va_start(args, fmt); 326 bpf_verifier_vlog(log, fmt, args); 327 va_end(args); 328 } 329 330 static const char *ltrim(const char *s) 331 { 332 while (isspace(*s)) 333 s++; 334 335 return s; 336 } 337 338 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 339 u32 insn_off, 340 const char *prefix_fmt, ...) 341 { 342 const struct bpf_line_info *linfo; 343 344 if (!bpf_verifier_log_needed(&env->log)) 345 return; 346 347 linfo = find_linfo(env, insn_off); 348 if (!linfo || linfo == env->prev_linfo) 349 return; 350 351 if (prefix_fmt) { 352 va_list args; 353 354 va_start(args, prefix_fmt); 355 bpf_verifier_vlog(&env->log, prefix_fmt, args); 356 va_end(args); 357 } 358 359 verbose(env, "%s\n", 360 ltrim(btf_name_by_offset(env->prog->aux->btf, 361 linfo->line_off))); 362 363 env->prev_linfo = linfo; 364 } 365 366 static bool type_is_pkt_pointer(enum bpf_reg_type type) 367 { 368 return type == PTR_TO_PACKET || 369 type == PTR_TO_PACKET_META; 370 } 371 372 static bool type_is_sk_pointer(enum bpf_reg_type type) 373 { 374 return type == PTR_TO_SOCKET || 375 type == PTR_TO_SOCK_COMMON || 376 type == PTR_TO_TCP_SOCK || 377 type == PTR_TO_XDP_SOCK; 378 } 379 380 static bool reg_type_may_be_null(enum bpf_reg_type type) 381 { 382 return type == PTR_TO_MAP_VALUE_OR_NULL || 383 type == PTR_TO_SOCKET_OR_NULL || 384 type == PTR_TO_SOCK_COMMON_OR_NULL || 385 type == PTR_TO_TCP_SOCK_OR_NULL; 386 } 387 388 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 389 { 390 return reg->type == PTR_TO_MAP_VALUE && 391 map_value_has_spin_lock(reg->map_ptr); 392 } 393 394 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 395 { 396 return type == PTR_TO_SOCKET || 397 type == PTR_TO_SOCKET_OR_NULL || 398 type == PTR_TO_TCP_SOCK || 399 type == PTR_TO_TCP_SOCK_OR_NULL; 400 } 401 402 static bool arg_type_may_be_refcounted(enum bpf_arg_type type) 403 { 404 return type == ARG_PTR_TO_SOCK_COMMON; 405 } 406 407 /* Determine whether the function releases some resources allocated by another 408 * function call. The first reference type argument will be assumed to be 409 * released by release_reference(). 410 */ 411 static bool is_release_function(enum bpf_func_id func_id) 412 { 413 return func_id == BPF_FUNC_sk_release; 414 } 415 416 static bool is_acquire_function(enum bpf_func_id func_id) 417 { 418 return func_id == BPF_FUNC_sk_lookup_tcp || 419 func_id == BPF_FUNC_sk_lookup_udp || 420 func_id == BPF_FUNC_skc_lookup_tcp; 421 } 422 423 static bool is_ptr_cast_function(enum bpf_func_id func_id) 424 { 425 return func_id == BPF_FUNC_tcp_sock || 426 func_id == BPF_FUNC_sk_fullsock; 427 } 428 429 /* string representation of 'enum bpf_reg_type' */ 430 static const char * const reg_type_str[] = { 431 [NOT_INIT] = "?", 432 [SCALAR_VALUE] = "inv", 433 [PTR_TO_CTX] = "ctx", 434 [CONST_PTR_TO_MAP] = "map_ptr", 435 [PTR_TO_MAP_VALUE] = "map_value", 436 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 437 [PTR_TO_STACK] = "fp", 438 [PTR_TO_PACKET] = "pkt", 439 [PTR_TO_PACKET_META] = "pkt_meta", 440 [PTR_TO_PACKET_END] = "pkt_end", 441 [PTR_TO_FLOW_KEYS] = "flow_keys", 442 [PTR_TO_SOCKET] = "sock", 443 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", 444 [PTR_TO_SOCK_COMMON] = "sock_common", 445 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", 446 [PTR_TO_TCP_SOCK] = "tcp_sock", 447 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", 448 [PTR_TO_TP_BUFFER] = "tp_buffer", 449 [PTR_TO_XDP_SOCK] = "xdp_sock", 450 [PTR_TO_BTF_ID] = "ptr_", 451 }; 452 453 static char slot_type_char[] = { 454 [STACK_INVALID] = '?', 455 [STACK_SPILL] = 'r', 456 [STACK_MISC] = 'm', 457 [STACK_ZERO] = '0', 458 }; 459 460 static void print_liveness(struct bpf_verifier_env *env, 461 enum bpf_reg_liveness live) 462 { 463 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 464 verbose(env, "_"); 465 if (live & REG_LIVE_READ) 466 verbose(env, "r"); 467 if (live & REG_LIVE_WRITTEN) 468 verbose(env, "w"); 469 if (live & REG_LIVE_DONE) 470 verbose(env, "D"); 471 } 472 473 static struct bpf_func_state *func(struct bpf_verifier_env *env, 474 const struct bpf_reg_state *reg) 475 { 476 struct bpf_verifier_state *cur = env->cur_state; 477 478 return cur->frame[reg->frameno]; 479 } 480 481 const char *kernel_type_name(u32 id) 482 { 483 return btf_name_by_offset(btf_vmlinux, 484 btf_type_by_id(btf_vmlinux, id)->name_off); 485 } 486 487 static void print_verifier_state(struct bpf_verifier_env *env, 488 const struct bpf_func_state *state) 489 { 490 const struct bpf_reg_state *reg; 491 enum bpf_reg_type t; 492 int i; 493 494 if (state->frameno) 495 verbose(env, " frame%d:", state->frameno); 496 for (i = 0; i < MAX_BPF_REG; i++) { 497 reg = &state->regs[i]; 498 t = reg->type; 499 if (t == NOT_INIT) 500 continue; 501 verbose(env, " R%d", i); 502 print_liveness(env, reg->live); 503 verbose(env, "=%s", reg_type_str[t]); 504 if (t == SCALAR_VALUE && reg->precise) 505 verbose(env, "P"); 506 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 507 tnum_is_const(reg->var_off)) { 508 /* reg->off should be 0 for SCALAR_VALUE */ 509 verbose(env, "%lld", reg->var_off.value + reg->off); 510 } else { 511 if (t == PTR_TO_BTF_ID) 512 verbose(env, "%s", kernel_type_name(reg->btf_id)); 513 verbose(env, "(id=%d", reg->id); 514 if (reg_type_may_be_refcounted_or_null(t)) 515 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); 516 if (t != SCALAR_VALUE) 517 verbose(env, ",off=%d", reg->off); 518 if (type_is_pkt_pointer(t)) 519 verbose(env, ",r=%d", reg->range); 520 else if (t == CONST_PTR_TO_MAP || 521 t == PTR_TO_MAP_VALUE || 522 t == PTR_TO_MAP_VALUE_OR_NULL) 523 verbose(env, ",ks=%d,vs=%d", 524 reg->map_ptr->key_size, 525 reg->map_ptr->value_size); 526 if (tnum_is_const(reg->var_off)) { 527 /* Typically an immediate SCALAR_VALUE, but 528 * could be a pointer whose offset is too big 529 * for reg->off 530 */ 531 verbose(env, ",imm=%llx", reg->var_off.value); 532 } else { 533 if (reg->smin_value != reg->umin_value && 534 reg->smin_value != S64_MIN) 535 verbose(env, ",smin_value=%lld", 536 (long long)reg->smin_value); 537 if (reg->smax_value != reg->umax_value && 538 reg->smax_value != S64_MAX) 539 verbose(env, ",smax_value=%lld", 540 (long long)reg->smax_value); 541 if (reg->umin_value != 0) 542 verbose(env, ",umin_value=%llu", 543 (unsigned long long)reg->umin_value); 544 if (reg->umax_value != U64_MAX) 545 verbose(env, ",umax_value=%llu", 546 (unsigned long long)reg->umax_value); 547 if (!tnum_is_unknown(reg->var_off)) { 548 char tn_buf[48]; 549 550 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 551 verbose(env, ",var_off=%s", tn_buf); 552 } 553 if (reg->s32_min_value != reg->smin_value && 554 reg->s32_min_value != S32_MIN) 555 verbose(env, ",s32_min_value=%d", 556 (int)(reg->s32_min_value)); 557 if (reg->s32_max_value != reg->smax_value && 558 reg->s32_max_value != S32_MAX) 559 verbose(env, ",s32_max_value=%d", 560 (int)(reg->s32_max_value)); 561 if (reg->u32_min_value != reg->umin_value && 562 reg->u32_min_value != U32_MIN) 563 verbose(env, ",u32_min_value=%d", 564 (int)(reg->u32_min_value)); 565 if (reg->u32_max_value != reg->umax_value && 566 reg->u32_max_value != U32_MAX) 567 verbose(env, ",u32_max_value=%d", 568 (int)(reg->u32_max_value)); 569 } 570 verbose(env, ")"); 571 } 572 } 573 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 574 char types_buf[BPF_REG_SIZE + 1]; 575 bool valid = false; 576 int j; 577 578 for (j = 0; j < BPF_REG_SIZE; j++) { 579 if (state->stack[i].slot_type[j] != STACK_INVALID) 580 valid = true; 581 types_buf[j] = slot_type_char[ 582 state->stack[i].slot_type[j]]; 583 } 584 types_buf[BPF_REG_SIZE] = 0; 585 if (!valid) 586 continue; 587 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 588 print_liveness(env, state->stack[i].spilled_ptr.live); 589 if (state->stack[i].slot_type[0] == STACK_SPILL) { 590 reg = &state->stack[i].spilled_ptr; 591 t = reg->type; 592 verbose(env, "=%s", reg_type_str[t]); 593 if (t == SCALAR_VALUE && reg->precise) 594 verbose(env, "P"); 595 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 596 verbose(env, "%lld", reg->var_off.value + reg->off); 597 } else { 598 verbose(env, "=%s", types_buf); 599 } 600 } 601 if (state->acquired_refs && state->refs[0].id) { 602 verbose(env, " refs=%d", state->refs[0].id); 603 for (i = 1; i < state->acquired_refs; i++) 604 if (state->refs[i].id) 605 verbose(env, ",%d", state->refs[i].id); 606 } 607 verbose(env, "\n"); 608 } 609 610 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 611 static int copy_##NAME##_state(struct bpf_func_state *dst, \ 612 const struct bpf_func_state *src) \ 613 { \ 614 if (!src->FIELD) \ 615 return 0; \ 616 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ 617 /* internal bug, make state invalid to reject the program */ \ 618 memset(dst, 0, sizeof(*dst)); \ 619 return -EFAULT; \ 620 } \ 621 memcpy(dst->FIELD, src->FIELD, \ 622 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ 623 return 0; \ 624 } 625 /* copy_reference_state() */ 626 COPY_STATE_FN(reference, acquired_refs, refs, 1) 627 /* copy_stack_state() */ 628 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 629 #undef COPY_STATE_FN 630 631 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 632 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ 633 bool copy_old) \ 634 { \ 635 u32 old_size = state->COUNT; \ 636 struct bpf_##NAME##_state *new_##FIELD; \ 637 int slot = size / SIZE; \ 638 \ 639 if (size <= old_size || !size) { \ 640 if (copy_old) \ 641 return 0; \ 642 state->COUNT = slot * SIZE; \ 643 if (!size && old_size) { \ 644 kfree(state->FIELD); \ 645 state->FIELD = NULL; \ 646 } \ 647 return 0; \ 648 } \ 649 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ 650 GFP_KERNEL); \ 651 if (!new_##FIELD) \ 652 return -ENOMEM; \ 653 if (copy_old) { \ 654 if (state->FIELD) \ 655 memcpy(new_##FIELD, state->FIELD, \ 656 sizeof(*new_##FIELD) * (old_size / SIZE)); \ 657 memset(new_##FIELD + old_size / SIZE, 0, \ 658 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ 659 } \ 660 state->COUNT = slot * SIZE; \ 661 kfree(state->FIELD); \ 662 state->FIELD = new_##FIELD; \ 663 return 0; \ 664 } 665 /* realloc_reference_state() */ 666 REALLOC_STATE_FN(reference, acquired_refs, refs, 1) 667 /* realloc_stack_state() */ 668 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 669 #undef REALLOC_STATE_FN 670 671 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 672 * make it consume minimal amount of memory. check_stack_write() access from 673 * the program calls into realloc_func_state() to grow the stack size. 674 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 675 * which realloc_stack_state() copies over. It points to previous 676 * bpf_verifier_state which is never reallocated. 677 */ 678 static int realloc_func_state(struct bpf_func_state *state, int stack_size, 679 int refs_size, bool copy_old) 680 { 681 int err = realloc_reference_state(state, refs_size, copy_old); 682 if (err) 683 return err; 684 return realloc_stack_state(state, stack_size, copy_old); 685 } 686 687 /* Acquire a pointer id from the env and update the state->refs to include 688 * this new pointer reference. 689 * On success, returns a valid pointer id to associate with the register 690 * On failure, returns a negative errno. 691 */ 692 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 693 { 694 struct bpf_func_state *state = cur_func(env); 695 int new_ofs = state->acquired_refs; 696 int id, err; 697 698 err = realloc_reference_state(state, state->acquired_refs + 1, true); 699 if (err) 700 return err; 701 id = ++env->id_gen; 702 state->refs[new_ofs].id = id; 703 state->refs[new_ofs].insn_idx = insn_idx; 704 705 return id; 706 } 707 708 /* release function corresponding to acquire_reference_state(). Idempotent. */ 709 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 710 { 711 int i, last_idx; 712 713 last_idx = state->acquired_refs - 1; 714 for (i = 0; i < state->acquired_refs; i++) { 715 if (state->refs[i].id == ptr_id) { 716 if (last_idx && i != last_idx) 717 memcpy(&state->refs[i], &state->refs[last_idx], 718 sizeof(*state->refs)); 719 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 720 state->acquired_refs--; 721 return 0; 722 } 723 } 724 return -EINVAL; 725 } 726 727 static int transfer_reference_state(struct bpf_func_state *dst, 728 struct bpf_func_state *src) 729 { 730 int err = realloc_reference_state(dst, src->acquired_refs, false); 731 if (err) 732 return err; 733 err = copy_reference_state(dst, src); 734 if (err) 735 return err; 736 return 0; 737 } 738 739 static void free_func_state(struct bpf_func_state *state) 740 { 741 if (!state) 742 return; 743 kfree(state->refs); 744 kfree(state->stack); 745 kfree(state); 746 } 747 748 static void clear_jmp_history(struct bpf_verifier_state *state) 749 { 750 kfree(state->jmp_history); 751 state->jmp_history = NULL; 752 state->jmp_history_cnt = 0; 753 } 754 755 static void free_verifier_state(struct bpf_verifier_state *state, 756 bool free_self) 757 { 758 int i; 759 760 for (i = 0; i <= state->curframe; i++) { 761 free_func_state(state->frame[i]); 762 state->frame[i] = NULL; 763 } 764 clear_jmp_history(state); 765 if (free_self) 766 kfree(state); 767 } 768 769 /* copy verifier state from src to dst growing dst stack space 770 * when necessary to accommodate larger src stack 771 */ 772 static int copy_func_state(struct bpf_func_state *dst, 773 const struct bpf_func_state *src) 774 { 775 int err; 776 777 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, 778 false); 779 if (err) 780 return err; 781 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 782 err = copy_reference_state(dst, src); 783 if (err) 784 return err; 785 return copy_stack_state(dst, src); 786 } 787 788 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 789 const struct bpf_verifier_state *src) 790 { 791 struct bpf_func_state *dst; 792 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; 793 int i, err; 794 795 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { 796 kfree(dst_state->jmp_history); 797 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); 798 if (!dst_state->jmp_history) 799 return -ENOMEM; 800 } 801 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); 802 dst_state->jmp_history_cnt = src->jmp_history_cnt; 803 804 /* if dst has more stack frames then src frame, free them */ 805 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 806 free_func_state(dst_state->frame[i]); 807 dst_state->frame[i] = NULL; 808 } 809 dst_state->speculative = src->speculative; 810 dst_state->curframe = src->curframe; 811 dst_state->active_spin_lock = src->active_spin_lock; 812 dst_state->branches = src->branches; 813 dst_state->parent = src->parent; 814 dst_state->first_insn_idx = src->first_insn_idx; 815 dst_state->last_insn_idx = src->last_insn_idx; 816 for (i = 0; i <= src->curframe; i++) { 817 dst = dst_state->frame[i]; 818 if (!dst) { 819 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 820 if (!dst) 821 return -ENOMEM; 822 dst_state->frame[i] = dst; 823 } 824 err = copy_func_state(dst, src->frame[i]); 825 if (err) 826 return err; 827 } 828 return 0; 829 } 830 831 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 832 { 833 while (st) { 834 u32 br = --st->branches; 835 836 /* WARN_ON(br > 1) technically makes sense here, 837 * but see comment in push_stack(), hence: 838 */ 839 WARN_ONCE((int)br < 0, 840 "BUG update_branch_counts:branches_to_explore=%d\n", 841 br); 842 if (br) 843 break; 844 st = st->parent; 845 } 846 } 847 848 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 849 int *insn_idx) 850 { 851 struct bpf_verifier_state *cur = env->cur_state; 852 struct bpf_verifier_stack_elem *elem, *head = env->head; 853 int err; 854 855 if (env->head == NULL) 856 return -ENOENT; 857 858 if (cur) { 859 err = copy_verifier_state(cur, &head->st); 860 if (err) 861 return err; 862 } 863 if (insn_idx) 864 *insn_idx = head->insn_idx; 865 if (prev_insn_idx) 866 *prev_insn_idx = head->prev_insn_idx; 867 elem = head->next; 868 free_verifier_state(&head->st, false); 869 kfree(head); 870 env->head = elem; 871 env->stack_size--; 872 return 0; 873 } 874 875 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 876 int insn_idx, int prev_insn_idx, 877 bool speculative) 878 { 879 struct bpf_verifier_state *cur = env->cur_state; 880 struct bpf_verifier_stack_elem *elem; 881 int err; 882 883 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 884 if (!elem) 885 goto err; 886 887 elem->insn_idx = insn_idx; 888 elem->prev_insn_idx = prev_insn_idx; 889 elem->next = env->head; 890 env->head = elem; 891 env->stack_size++; 892 err = copy_verifier_state(&elem->st, cur); 893 if (err) 894 goto err; 895 elem->st.speculative |= speculative; 896 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 897 verbose(env, "The sequence of %d jumps is too complex.\n", 898 env->stack_size); 899 goto err; 900 } 901 if (elem->st.parent) { 902 ++elem->st.parent->branches; 903 /* WARN_ON(branches > 2) technically makes sense here, 904 * but 905 * 1. speculative states will bump 'branches' for non-branch 906 * instructions 907 * 2. is_state_visited() heuristics may decide not to create 908 * a new state for a sequence of branches and all such current 909 * and cloned states will be pointing to a single parent state 910 * which might have large 'branches' count. 911 */ 912 } 913 return &elem->st; 914 err: 915 free_verifier_state(env->cur_state, true); 916 env->cur_state = NULL; 917 /* pop all elements and return */ 918 while (!pop_stack(env, NULL, NULL)); 919 return NULL; 920 } 921 922 #define CALLER_SAVED_REGS 6 923 static const int caller_saved[CALLER_SAVED_REGS] = { 924 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 925 }; 926 927 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 928 struct bpf_reg_state *reg); 929 930 /* Mark the unknown part of a register (variable offset or scalar value) as 931 * known to have the value @imm. 932 */ 933 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 934 { 935 /* Clear id, off, and union(map_ptr, range) */ 936 memset(((u8 *)reg) + sizeof(reg->type), 0, 937 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 938 reg->var_off = tnum_const(imm); 939 reg->smin_value = (s64)imm; 940 reg->smax_value = (s64)imm; 941 reg->umin_value = imm; 942 reg->umax_value = imm; 943 944 reg->s32_min_value = (s32)imm; 945 reg->s32_max_value = (s32)imm; 946 reg->u32_min_value = (u32)imm; 947 reg->u32_max_value = (u32)imm; 948 } 949 950 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 951 { 952 reg->var_off = tnum_const_subreg(reg->var_off, imm); 953 reg->s32_min_value = (s32)imm; 954 reg->s32_max_value = (s32)imm; 955 reg->u32_min_value = (u32)imm; 956 reg->u32_max_value = (u32)imm; 957 } 958 959 /* Mark the 'variable offset' part of a register as zero. This should be 960 * used only on registers holding a pointer type. 961 */ 962 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 963 { 964 __mark_reg_known(reg, 0); 965 } 966 967 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 968 { 969 __mark_reg_known(reg, 0); 970 reg->type = SCALAR_VALUE; 971 } 972 973 static void mark_reg_known_zero(struct bpf_verifier_env *env, 974 struct bpf_reg_state *regs, u32 regno) 975 { 976 if (WARN_ON(regno >= MAX_BPF_REG)) { 977 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 978 /* Something bad happened, let's kill all regs */ 979 for (regno = 0; regno < MAX_BPF_REG; regno++) 980 __mark_reg_not_init(env, regs + regno); 981 return; 982 } 983 __mark_reg_known_zero(regs + regno); 984 } 985 986 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 987 { 988 return type_is_pkt_pointer(reg->type); 989 } 990 991 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 992 { 993 return reg_is_pkt_pointer(reg) || 994 reg->type == PTR_TO_PACKET_END; 995 } 996 997 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 998 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 999 enum bpf_reg_type which) 1000 { 1001 /* The register can already have a range from prior markings. 1002 * This is fine as long as it hasn't been advanced from its 1003 * origin. 1004 */ 1005 return reg->type == which && 1006 reg->id == 0 && 1007 reg->off == 0 && 1008 tnum_equals_const(reg->var_off, 0); 1009 } 1010 1011 /* Reset the min/max bounds of a register */ 1012 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1013 { 1014 reg->smin_value = S64_MIN; 1015 reg->smax_value = S64_MAX; 1016 reg->umin_value = 0; 1017 reg->umax_value = U64_MAX; 1018 1019 reg->s32_min_value = S32_MIN; 1020 reg->s32_max_value = S32_MAX; 1021 reg->u32_min_value = 0; 1022 reg->u32_max_value = U32_MAX; 1023 } 1024 1025 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 1026 { 1027 reg->smin_value = S64_MIN; 1028 reg->smax_value = S64_MAX; 1029 reg->umin_value = 0; 1030 reg->umax_value = U64_MAX; 1031 } 1032 1033 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 1034 { 1035 reg->s32_min_value = S32_MIN; 1036 reg->s32_max_value = S32_MAX; 1037 reg->u32_min_value = 0; 1038 reg->u32_max_value = U32_MAX; 1039 } 1040 1041 static void __update_reg32_bounds(struct bpf_reg_state *reg) 1042 { 1043 struct tnum var32_off = tnum_subreg(reg->var_off); 1044 1045 /* min signed is max(sign bit) | min(other bits) */ 1046 reg->s32_min_value = max_t(s32, reg->s32_min_value, 1047 var32_off.value | (var32_off.mask & S32_MIN)); 1048 /* max signed is min(sign bit) | max(other bits) */ 1049 reg->s32_max_value = min_t(s32, reg->s32_max_value, 1050 var32_off.value | (var32_off.mask & S32_MAX)); 1051 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 1052 reg->u32_max_value = min(reg->u32_max_value, 1053 (u32)(var32_off.value | var32_off.mask)); 1054 } 1055 1056 static void __update_reg64_bounds(struct bpf_reg_state *reg) 1057 { 1058 /* min signed is max(sign bit) | min(other bits) */ 1059 reg->smin_value = max_t(s64, reg->smin_value, 1060 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 1061 /* max signed is min(sign bit) | max(other bits) */ 1062 reg->smax_value = min_t(s64, reg->smax_value, 1063 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 1064 reg->umin_value = max(reg->umin_value, reg->var_off.value); 1065 reg->umax_value = min(reg->umax_value, 1066 reg->var_off.value | reg->var_off.mask); 1067 } 1068 1069 static void __update_reg_bounds(struct bpf_reg_state *reg) 1070 { 1071 __update_reg32_bounds(reg); 1072 __update_reg64_bounds(reg); 1073 } 1074 1075 /* Uses signed min/max values to inform unsigned, and vice-versa */ 1076 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 1077 { 1078 /* Learn sign from signed bounds. 1079 * If we cannot cross the sign boundary, then signed and unsigned bounds 1080 * are the same, so combine. This works even in the negative case, e.g. 1081 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1082 */ 1083 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 1084 reg->s32_min_value = reg->u32_min_value = 1085 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1086 reg->s32_max_value = reg->u32_max_value = 1087 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1088 return; 1089 } 1090 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1091 * boundary, so we must be careful. 1092 */ 1093 if ((s32)reg->u32_max_value >= 0) { 1094 /* Positive. We can't learn anything from the smin, but smax 1095 * is positive, hence safe. 1096 */ 1097 reg->s32_min_value = reg->u32_min_value; 1098 reg->s32_max_value = reg->u32_max_value = 1099 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1100 } else if ((s32)reg->u32_min_value < 0) { 1101 /* Negative. We can't learn anything from the smax, but smin 1102 * is negative, hence safe. 1103 */ 1104 reg->s32_min_value = reg->u32_min_value = 1105 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1106 reg->s32_max_value = reg->u32_max_value; 1107 } 1108 } 1109 1110 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 1111 { 1112 /* Learn sign from signed bounds. 1113 * If we cannot cross the sign boundary, then signed and unsigned bounds 1114 * are the same, so combine. This works even in the negative case, e.g. 1115 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1116 */ 1117 if (reg->smin_value >= 0 || reg->smax_value < 0) { 1118 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1119 reg->umin_value); 1120 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1121 reg->umax_value); 1122 return; 1123 } 1124 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1125 * boundary, so we must be careful. 1126 */ 1127 if ((s64)reg->umax_value >= 0) { 1128 /* Positive. We can't learn anything from the smin, but smax 1129 * is positive, hence safe. 1130 */ 1131 reg->smin_value = reg->umin_value; 1132 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1133 reg->umax_value); 1134 } else if ((s64)reg->umin_value < 0) { 1135 /* Negative. We can't learn anything from the smax, but smin 1136 * is negative, hence safe. 1137 */ 1138 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1139 reg->umin_value); 1140 reg->smax_value = reg->umax_value; 1141 } 1142 } 1143 1144 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 1145 { 1146 __reg32_deduce_bounds(reg); 1147 __reg64_deduce_bounds(reg); 1148 } 1149 1150 /* Attempts to improve var_off based on unsigned min/max information */ 1151 static void __reg_bound_offset(struct bpf_reg_state *reg) 1152 { 1153 struct tnum var64_off = tnum_intersect(reg->var_off, 1154 tnum_range(reg->umin_value, 1155 reg->umax_value)); 1156 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), 1157 tnum_range(reg->u32_min_value, 1158 reg->u32_max_value)); 1159 1160 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 1161 } 1162 1163 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 1164 { 1165 reg->umin_value = reg->u32_min_value; 1166 reg->umax_value = reg->u32_max_value; 1167 /* Attempt to pull 32-bit signed bounds into 64-bit bounds 1168 * but must be positive otherwise set to worse case bounds 1169 * and refine later from tnum. 1170 */ 1171 if (reg->s32_min_value > 0) 1172 reg->smin_value = reg->s32_min_value; 1173 else 1174 reg->smin_value = 0; 1175 if (reg->s32_max_value > 0) 1176 reg->smax_value = reg->s32_max_value; 1177 else 1178 reg->smax_value = U32_MAX; 1179 } 1180 1181 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 1182 { 1183 /* special case when 64-bit register has upper 32-bit register 1184 * zeroed. Typically happens after zext or <<32, >>32 sequence 1185 * allowing us to use 32-bit bounds directly, 1186 */ 1187 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 1188 __reg_assign_32_into_64(reg); 1189 } else { 1190 /* Otherwise the best we can do is push lower 32bit known and 1191 * unknown bits into register (var_off set from jmp logic) 1192 * then learn as much as possible from the 64-bit tnum 1193 * known and unknown bits. The previous smin/smax bounds are 1194 * invalid here because of jmp32 compare so mark them unknown 1195 * so they do not impact tnum bounds calculation. 1196 */ 1197 __mark_reg64_unbounded(reg); 1198 __update_reg_bounds(reg); 1199 } 1200 1201 /* Intersecting with the old var_off might have improved our bounds 1202 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1203 * then new var_off is (0; 0x7f...fc) which improves our umax. 1204 */ 1205 __reg_deduce_bounds(reg); 1206 __reg_bound_offset(reg); 1207 __update_reg_bounds(reg); 1208 } 1209 1210 static bool __reg64_bound_s32(s64 a) 1211 { 1212 if (a > S32_MIN && a < S32_MAX) 1213 return true; 1214 return false; 1215 } 1216 1217 static bool __reg64_bound_u32(u64 a) 1218 { 1219 if (a > U32_MIN && a < U32_MAX) 1220 return true; 1221 return false; 1222 } 1223 1224 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 1225 { 1226 __mark_reg32_unbounded(reg); 1227 1228 if (__reg64_bound_s32(reg->smin_value)) 1229 reg->s32_min_value = (s32)reg->smin_value; 1230 if (__reg64_bound_s32(reg->smax_value)) 1231 reg->s32_max_value = (s32)reg->smax_value; 1232 if (__reg64_bound_u32(reg->umin_value)) 1233 reg->u32_min_value = (u32)reg->umin_value; 1234 if (__reg64_bound_u32(reg->umax_value)) 1235 reg->u32_max_value = (u32)reg->umax_value; 1236 1237 /* Intersecting with the old var_off might have improved our bounds 1238 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1239 * then new var_off is (0; 0x7f...fc) which improves our umax. 1240 */ 1241 __reg_deduce_bounds(reg); 1242 __reg_bound_offset(reg); 1243 __update_reg_bounds(reg); 1244 } 1245 1246 /* Mark a register as having a completely unknown (scalar) value. */ 1247 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1248 struct bpf_reg_state *reg) 1249 { 1250 /* 1251 * Clear type, id, off, and union(map_ptr, range) and 1252 * padding between 'type' and union 1253 */ 1254 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1255 reg->type = SCALAR_VALUE; 1256 reg->var_off = tnum_unknown; 1257 reg->frameno = 0; 1258 reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? 1259 true : false; 1260 __mark_reg_unbounded(reg); 1261 } 1262 1263 static void mark_reg_unknown(struct bpf_verifier_env *env, 1264 struct bpf_reg_state *regs, u32 regno) 1265 { 1266 if (WARN_ON(regno >= MAX_BPF_REG)) { 1267 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1268 /* Something bad happened, let's kill all regs except FP */ 1269 for (regno = 0; regno < BPF_REG_FP; regno++) 1270 __mark_reg_not_init(env, regs + regno); 1271 return; 1272 } 1273 __mark_reg_unknown(env, regs + regno); 1274 } 1275 1276 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1277 struct bpf_reg_state *reg) 1278 { 1279 __mark_reg_unknown(env, reg); 1280 reg->type = NOT_INIT; 1281 } 1282 1283 static void mark_reg_not_init(struct bpf_verifier_env *env, 1284 struct bpf_reg_state *regs, u32 regno) 1285 { 1286 if (WARN_ON(regno >= MAX_BPF_REG)) { 1287 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1288 /* Something bad happened, let's kill all regs except FP */ 1289 for (regno = 0; regno < BPF_REG_FP; regno++) 1290 __mark_reg_not_init(env, regs + regno); 1291 return; 1292 } 1293 __mark_reg_not_init(env, regs + regno); 1294 } 1295 1296 #define DEF_NOT_SUBREG (0) 1297 static void init_reg_state(struct bpf_verifier_env *env, 1298 struct bpf_func_state *state) 1299 { 1300 struct bpf_reg_state *regs = state->regs; 1301 int i; 1302 1303 for (i = 0; i < MAX_BPF_REG; i++) { 1304 mark_reg_not_init(env, regs, i); 1305 regs[i].live = REG_LIVE_NONE; 1306 regs[i].parent = NULL; 1307 regs[i].subreg_def = DEF_NOT_SUBREG; 1308 } 1309 1310 /* frame pointer */ 1311 regs[BPF_REG_FP].type = PTR_TO_STACK; 1312 mark_reg_known_zero(env, regs, BPF_REG_FP); 1313 regs[BPF_REG_FP].frameno = state->frameno; 1314 } 1315 1316 #define BPF_MAIN_FUNC (-1) 1317 static void init_func_state(struct bpf_verifier_env *env, 1318 struct bpf_func_state *state, 1319 int callsite, int frameno, int subprogno) 1320 { 1321 state->callsite = callsite; 1322 state->frameno = frameno; 1323 state->subprogno = subprogno; 1324 init_reg_state(env, state); 1325 } 1326 1327 enum reg_arg_type { 1328 SRC_OP, /* register is used as source operand */ 1329 DST_OP, /* register is used as destination operand */ 1330 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1331 }; 1332 1333 static int cmp_subprogs(const void *a, const void *b) 1334 { 1335 return ((struct bpf_subprog_info *)a)->start - 1336 ((struct bpf_subprog_info *)b)->start; 1337 } 1338 1339 static int find_subprog(struct bpf_verifier_env *env, int off) 1340 { 1341 struct bpf_subprog_info *p; 1342 1343 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1344 sizeof(env->subprog_info[0]), cmp_subprogs); 1345 if (!p) 1346 return -ENOENT; 1347 return p - env->subprog_info; 1348 1349 } 1350 1351 static int add_subprog(struct bpf_verifier_env *env, int off) 1352 { 1353 int insn_cnt = env->prog->len; 1354 int ret; 1355 1356 if (off >= insn_cnt || off < 0) { 1357 verbose(env, "call to invalid destination\n"); 1358 return -EINVAL; 1359 } 1360 ret = find_subprog(env, off); 1361 if (ret >= 0) 1362 return 0; 1363 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1364 verbose(env, "too many subprograms\n"); 1365 return -E2BIG; 1366 } 1367 env->subprog_info[env->subprog_cnt++].start = off; 1368 sort(env->subprog_info, env->subprog_cnt, 1369 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1370 return 0; 1371 } 1372 1373 static int check_subprogs(struct bpf_verifier_env *env) 1374 { 1375 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; 1376 struct bpf_subprog_info *subprog = env->subprog_info; 1377 struct bpf_insn *insn = env->prog->insnsi; 1378 int insn_cnt = env->prog->len; 1379 1380 /* Add entry function. */ 1381 ret = add_subprog(env, 0); 1382 if (ret < 0) 1383 return ret; 1384 1385 /* determine subprog starts. The end is one before the next starts */ 1386 for (i = 0; i < insn_cnt; i++) { 1387 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1388 continue; 1389 if (insn[i].src_reg != BPF_PSEUDO_CALL) 1390 continue; 1391 if (!env->allow_ptr_leaks) { 1392 verbose(env, "function calls to other bpf functions are allowed for root only\n"); 1393 return -EPERM; 1394 } 1395 ret = add_subprog(env, i + insn[i].imm + 1); 1396 if (ret < 0) 1397 return ret; 1398 } 1399 1400 /* Add a fake 'exit' subprog which could simplify subprog iteration 1401 * logic. 'subprog_cnt' should not be increased. 1402 */ 1403 subprog[env->subprog_cnt].start = insn_cnt; 1404 1405 if (env->log.level & BPF_LOG_LEVEL2) 1406 for (i = 0; i < env->subprog_cnt; i++) 1407 verbose(env, "func#%d @%d\n", i, subprog[i].start); 1408 1409 /* now check that all jumps are within the same subprog */ 1410 subprog_start = subprog[cur_subprog].start; 1411 subprog_end = subprog[cur_subprog + 1].start; 1412 for (i = 0; i < insn_cnt; i++) { 1413 u8 code = insn[i].code; 1414 1415 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 1416 goto next; 1417 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 1418 goto next; 1419 off = i + insn[i].off + 1; 1420 if (off < subprog_start || off >= subprog_end) { 1421 verbose(env, "jump out of range from insn %d to %d\n", i, off); 1422 return -EINVAL; 1423 } 1424 next: 1425 if (i == subprog_end - 1) { 1426 /* to avoid fall-through from one subprog into another 1427 * the last insn of the subprog should be either exit 1428 * or unconditional jump back 1429 */ 1430 if (code != (BPF_JMP | BPF_EXIT) && 1431 code != (BPF_JMP | BPF_JA)) { 1432 verbose(env, "last insn is not an exit or jmp\n"); 1433 return -EINVAL; 1434 } 1435 subprog_start = subprog_end; 1436 cur_subprog++; 1437 if (cur_subprog < env->subprog_cnt) 1438 subprog_end = subprog[cur_subprog + 1].start; 1439 } 1440 } 1441 return 0; 1442 } 1443 1444 /* Parentage chain of this register (or stack slot) should take care of all 1445 * issues like callee-saved registers, stack slot allocation time, etc. 1446 */ 1447 static int mark_reg_read(struct bpf_verifier_env *env, 1448 const struct bpf_reg_state *state, 1449 struct bpf_reg_state *parent, u8 flag) 1450 { 1451 bool writes = parent == state->parent; /* Observe write marks */ 1452 int cnt = 0; 1453 1454 while (parent) { 1455 /* if read wasn't screened by an earlier write ... */ 1456 if (writes && state->live & REG_LIVE_WRITTEN) 1457 break; 1458 if (parent->live & REG_LIVE_DONE) { 1459 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 1460 reg_type_str[parent->type], 1461 parent->var_off.value, parent->off); 1462 return -EFAULT; 1463 } 1464 /* The first condition is more likely to be true than the 1465 * second, checked it first. 1466 */ 1467 if ((parent->live & REG_LIVE_READ) == flag || 1468 parent->live & REG_LIVE_READ64) 1469 /* The parentage chain never changes and 1470 * this parent was already marked as LIVE_READ. 1471 * There is no need to keep walking the chain again and 1472 * keep re-marking all parents as LIVE_READ. 1473 * This case happens when the same register is read 1474 * multiple times without writes into it in-between. 1475 * Also, if parent has the stronger REG_LIVE_READ64 set, 1476 * then no need to set the weak REG_LIVE_READ32. 1477 */ 1478 break; 1479 /* ... then we depend on parent's value */ 1480 parent->live |= flag; 1481 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 1482 if (flag == REG_LIVE_READ64) 1483 parent->live &= ~REG_LIVE_READ32; 1484 state = parent; 1485 parent = state->parent; 1486 writes = true; 1487 cnt++; 1488 } 1489 1490 if (env->longest_mark_read_walk < cnt) 1491 env->longest_mark_read_walk = cnt; 1492 return 0; 1493 } 1494 1495 /* This function is supposed to be used by the following 32-bit optimization 1496 * code only. It returns TRUE if the source or destination register operates 1497 * on 64-bit, otherwise return FALSE. 1498 */ 1499 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 1500 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 1501 { 1502 u8 code, class, op; 1503 1504 code = insn->code; 1505 class = BPF_CLASS(code); 1506 op = BPF_OP(code); 1507 if (class == BPF_JMP) { 1508 /* BPF_EXIT for "main" will reach here. Return TRUE 1509 * conservatively. 1510 */ 1511 if (op == BPF_EXIT) 1512 return true; 1513 if (op == BPF_CALL) { 1514 /* BPF to BPF call will reach here because of marking 1515 * caller saved clobber with DST_OP_NO_MARK for which we 1516 * don't care the register def because they are anyway 1517 * marked as NOT_INIT already. 1518 */ 1519 if (insn->src_reg == BPF_PSEUDO_CALL) 1520 return false; 1521 /* Helper call will reach here because of arg type 1522 * check, conservatively return TRUE. 1523 */ 1524 if (t == SRC_OP) 1525 return true; 1526 1527 return false; 1528 } 1529 } 1530 1531 if (class == BPF_ALU64 || class == BPF_JMP || 1532 /* BPF_END always use BPF_ALU class. */ 1533 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 1534 return true; 1535 1536 if (class == BPF_ALU || class == BPF_JMP32) 1537 return false; 1538 1539 if (class == BPF_LDX) { 1540 if (t != SRC_OP) 1541 return BPF_SIZE(code) == BPF_DW; 1542 /* LDX source must be ptr. */ 1543 return true; 1544 } 1545 1546 if (class == BPF_STX) { 1547 if (reg->type != SCALAR_VALUE) 1548 return true; 1549 return BPF_SIZE(code) == BPF_DW; 1550 } 1551 1552 if (class == BPF_LD) { 1553 u8 mode = BPF_MODE(code); 1554 1555 /* LD_IMM64 */ 1556 if (mode == BPF_IMM) 1557 return true; 1558 1559 /* Both LD_IND and LD_ABS return 32-bit data. */ 1560 if (t != SRC_OP) 1561 return false; 1562 1563 /* Implicit ctx ptr. */ 1564 if (regno == BPF_REG_6) 1565 return true; 1566 1567 /* Explicit source could be any width. */ 1568 return true; 1569 } 1570 1571 if (class == BPF_ST) 1572 /* The only source register for BPF_ST is a ptr. */ 1573 return true; 1574 1575 /* Conservatively return true at default. */ 1576 return true; 1577 } 1578 1579 /* Return TRUE if INSN doesn't have explicit value define. */ 1580 static bool insn_no_def(struct bpf_insn *insn) 1581 { 1582 u8 class = BPF_CLASS(insn->code); 1583 1584 return (class == BPF_JMP || class == BPF_JMP32 || 1585 class == BPF_STX || class == BPF_ST); 1586 } 1587 1588 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 1589 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 1590 { 1591 if (insn_no_def(insn)) 1592 return false; 1593 1594 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); 1595 } 1596 1597 static void mark_insn_zext(struct bpf_verifier_env *env, 1598 struct bpf_reg_state *reg) 1599 { 1600 s32 def_idx = reg->subreg_def; 1601 1602 if (def_idx == DEF_NOT_SUBREG) 1603 return; 1604 1605 env->insn_aux_data[def_idx - 1].zext_dst = true; 1606 /* The dst will be zero extended, so won't be sub-register anymore. */ 1607 reg->subreg_def = DEF_NOT_SUBREG; 1608 } 1609 1610 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 1611 enum reg_arg_type t) 1612 { 1613 struct bpf_verifier_state *vstate = env->cur_state; 1614 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1615 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 1616 struct bpf_reg_state *reg, *regs = state->regs; 1617 bool rw64; 1618 1619 if (regno >= MAX_BPF_REG) { 1620 verbose(env, "R%d is invalid\n", regno); 1621 return -EINVAL; 1622 } 1623 1624 reg = ®s[regno]; 1625 rw64 = is_reg64(env, insn, regno, reg, t); 1626 if (t == SRC_OP) { 1627 /* check whether register used as source operand can be read */ 1628 if (reg->type == NOT_INIT) { 1629 verbose(env, "R%d !read_ok\n", regno); 1630 return -EACCES; 1631 } 1632 /* We don't need to worry about FP liveness because it's read-only */ 1633 if (regno == BPF_REG_FP) 1634 return 0; 1635 1636 if (rw64) 1637 mark_insn_zext(env, reg); 1638 1639 return mark_reg_read(env, reg, reg->parent, 1640 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 1641 } else { 1642 /* check whether register used as dest operand can be written to */ 1643 if (regno == BPF_REG_FP) { 1644 verbose(env, "frame pointer is read only\n"); 1645 return -EACCES; 1646 } 1647 reg->live |= REG_LIVE_WRITTEN; 1648 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 1649 if (t == DST_OP) 1650 mark_reg_unknown(env, regs, regno); 1651 } 1652 return 0; 1653 } 1654 1655 /* for any branch, call, exit record the history of jmps in the given state */ 1656 static int push_jmp_history(struct bpf_verifier_env *env, 1657 struct bpf_verifier_state *cur) 1658 { 1659 u32 cnt = cur->jmp_history_cnt; 1660 struct bpf_idx_pair *p; 1661 1662 cnt++; 1663 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 1664 if (!p) 1665 return -ENOMEM; 1666 p[cnt - 1].idx = env->insn_idx; 1667 p[cnt - 1].prev_idx = env->prev_insn_idx; 1668 cur->jmp_history = p; 1669 cur->jmp_history_cnt = cnt; 1670 return 0; 1671 } 1672 1673 /* Backtrack one insn at a time. If idx is not at the top of recorded 1674 * history then previous instruction came from straight line execution. 1675 */ 1676 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 1677 u32 *history) 1678 { 1679 u32 cnt = *history; 1680 1681 if (cnt && st->jmp_history[cnt - 1].idx == i) { 1682 i = st->jmp_history[cnt - 1].prev_idx; 1683 (*history)--; 1684 } else { 1685 i--; 1686 } 1687 return i; 1688 } 1689 1690 /* For given verifier state backtrack_insn() is called from the last insn to 1691 * the first insn. Its purpose is to compute a bitmask of registers and 1692 * stack slots that needs precision in the parent verifier state. 1693 */ 1694 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 1695 u32 *reg_mask, u64 *stack_mask) 1696 { 1697 const struct bpf_insn_cbs cbs = { 1698 .cb_print = verbose, 1699 .private_data = env, 1700 }; 1701 struct bpf_insn *insn = env->prog->insnsi + idx; 1702 u8 class = BPF_CLASS(insn->code); 1703 u8 opcode = BPF_OP(insn->code); 1704 u8 mode = BPF_MODE(insn->code); 1705 u32 dreg = 1u << insn->dst_reg; 1706 u32 sreg = 1u << insn->src_reg; 1707 u32 spi; 1708 1709 if (insn->code == 0) 1710 return 0; 1711 if (env->log.level & BPF_LOG_LEVEL) { 1712 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 1713 verbose(env, "%d: ", idx); 1714 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 1715 } 1716 1717 if (class == BPF_ALU || class == BPF_ALU64) { 1718 if (!(*reg_mask & dreg)) 1719 return 0; 1720 if (opcode == BPF_MOV) { 1721 if (BPF_SRC(insn->code) == BPF_X) { 1722 /* dreg = sreg 1723 * dreg needs precision after this insn 1724 * sreg needs precision before this insn 1725 */ 1726 *reg_mask &= ~dreg; 1727 *reg_mask |= sreg; 1728 } else { 1729 /* dreg = K 1730 * dreg needs precision after this insn. 1731 * Corresponding register is already marked 1732 * as precise=true in this verifier state. 1733 * No further markings in parent are necessary 1734 */ 1735 *reg_mask &= ~dreg; 1736 } 1737 } else { 1738 if (BPF_SRC(insn->code) == BPF_X) { 1739 /* dreg += sreg 1740 * both dreg and sreg need precision 1741 * before this insn 1742 */ 1743 *reg_mask |= sreg; 1744 } /* else dreg += K 1745 * dreg still needs precision before this insn 1746 */ 1747 } 1748 } else if (class == BPF_LDX) { 1749 if (!(*reg_mask & dreg)) 1750 return 0; 1751 *reg_mask &= ~dreg; 1752 1753 /* scalars can only be spilled into stack w/o losing precision. 1754 * Load from any other memory can be zero extended. 1755 * The desire to keep that precision is already indicated 1756 * by 'precise' mark in corresponding register of this state. 1757 * No further tracking necessary. 1758 */ 1759 if (insn->src_reg != BPF_REG_FP) 1760 return 0; 1761 if (BPF_SIZE(insn->code) != BPF_DW) 1762 return 0; 1763 1764 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 1765 * that [fp - off] slot contains scalar that needs to be 1766 * tracked with precision 1767 */ 1768 spi = (-insn->off - 1) / BPF_REG_SIZE; 1769 if (spi >= 64) { 1770 verbose(env, "BUG spi %d\n", spi); 1771 WARN_ONCE(1, "verifier backtracking bug"); 1772 return -EFAULT; 1773 } 1774 *stack_mask |= 1ull << spi; 1775 } else if (class == BPF_STX || class == BPF_ST) { 1776 if (*reg_mask & dreg) 1777 /* stx & st shouldn't be using _scalar_ dst_reg 1778 * to access memory. It means backtracking 1779 * encountered a case of pointer subtraction. 1780 */ 1781 return -ENOTSUPP; 1782 /* scalars can only be spilled into stack */ 1783 if (insn->dst_reg != BPF_REG_FP) 1784 return 0; 1785 if (BPF_SIZE(insn->code) != BPF_DW) 1786 return 0; 1787 spi = (-insn->off - 1) / BPF_REG_SIZE; 1788 if (spi >= 64) { 1789 verbose(env, "BUG spi %d\n", spi); 1790 WARN_ONCE(1, "verifier backtracking bug"); 1791 return -EFAULT; 1792 } 1793 if (!(*stack_mask & (1ull << spi))) 1794 return 0; 1795 *stack_mask &= ~(1ull << spi); 1796 if (class == BPF_STX) 1797 *reg_mask |= sreg; 1798 } else if (class == BPF_JMP || class == BPF_JMP32) { 1799 if (opcode == BPF_CALL) { 1800 if (insn->src_reg == BPF_PSEUDO_CALL) 1801 return -ENOTSUPP; 1802 /* regular helper call sets R0 */ 1803 *reg_mask &= ~1; 1804 if (*reg_mask & 0x3f) { 1805 /* if backtracing was looking for registers R1-R5 1806 * they should have been found already. 1807 */ 1808 verbose(env, "BUG regs %x\n", *reg_mask); 1809 WARN_ONCE(1, "verifier backtracking bug"); 1810 return -EFAULT; 1811 } 1812 } else if (opcode == BPF_EXIT) { 1813 return -ENOTSUPP; 1814 } 1815 } else if (class == BPF_LD) { 1816 if (!(*reg_mask & dreg)) 1817 return 0; 1818 *reg_mask &= ~dreg; 1819 /* It's ld_imm64 or ld_abs or ld_ind. 1820 * For ld_imm64 no further tracking of precision 1821 * into parent is necessary 1822 */ 1823 if (mode == BPF_IND || mode == BPF_ABS) 1824 /* to be analyzed */ 1825 return -ENOTSUPP; 1826 } 1827 return 0; 1828 } 1829 1830 /* the scalar precision tracking algorithm: 1831 * . at the start all registers have precise=false. 1832 * . scalar ranges are tracked as normal through alu and jmp insns. 1833 * . once precise value of the scalar register is used in: 1834 * . ptr + scalar alu 1835 * . if (scalar cond K|scalar) 1836 * . helper_call(.., scalar, ...) where ARG_CONST is expected 1837 * backtrack through the verifier states and mark all registers and 1838 * stack slots with spilled constants that these scalar regisers 1839 * should be precise. 1840 * . during state pruning two registers (or spilled stack slots) 1841 * are equivalent if both are not precise. 1842 * 1843 * Note the verifier cannot simply walk register parentage chain, 1844 * since many different registers and stack slots could have been 1845 * used to compute single precise scalar. 1846 * 1847 * The approach of starting with precise=true for all registers and then 1848 * backtrack to mark a register as not precise when the verifier detects 1849 * that program doesn't care about specific value (e.g., when helper 1850 * takes register as ARG_ANYTHING parameter) is not safe. 1851 * 1852 * It's ok to walk single parentage chain of the verifier states. 1853 * It's possible that this backtracking will go all the way till 1st insn. 1854 * All other branches will be explored for needing precision later. 1855 * 1856 * The backtracking needs to deal with cases like: 1857 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 1858 * r9 -= r8 1859 * r5 = r9 1860 * if r5 > 0x79f goto pc+7 1861 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 1862 * r5 += 1 1863 * ... 1864 * call bpf_perf_event_output#25 1865 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 1866 * 1867 * and this case: 1868 * r6 = 1 1869 * call foo // uses callee's r6 inside to compute r0 1870 * r0 += r6 1871 * if r0 == 0 goto 1872 * 1873 * to track above reg_mask/stack_mask needs to be independent for each frame. 1874 * 1875 * Also if parent's curframe > frame where backtracking started, 1876 * the verifier need to mark registers in both frames, otherwise callees 1877 * may incorrectly prune callers. This is similar to 1878 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 1879 * 1880 * For now backtracking falls back into conservative marking. 1881 */ 1882 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 1883 struct bpf_verifier_state *st) 1884 { 1885 struct bpf_func_state *func; 1886 struct bpf_reg_state *reg; 1887 int i, j; 1888 1889 /* big hammer: mark all scalars precise in this path. 1890 * pop_stack may still get !precise scalars. 1891 */ 1892 for (; st; st = st->parent) 1893 for (i = 0; i <= st->curframe; i++) { 1894 func = st->frame[i]; 1895 for (j = 0; j < BPF_REG_FP; j++) { 1896 reg = &func->regs[j]; 1897 if (reg->type != SCALAR_VALUE) 1898 continue; 1899 reg->precise = true; 1900 } 1901 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 1902 if (func->stack[j].slot_type[0] != STACK_SPILL) 1903 continue; 1904 reg = &func->stack[j].spilled_ptr; 1905 if (reg->type != SCALAR_VALUE) 1906 continue; 1907 reg->precise = true; 1908 } 1909 } 1910 } 1911 1912 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 1913 int spi) 1914 { 1915 struct bpf_verifier_state *st = env->cur_state; 1916 int first_idx = st->first_insn_idx; 1917 int last_idx = env->insn_idx; 1918 struct bpf_func_state *func; 1919 struct bpf_reg_state *reg; 1920 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 1921 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 1922 bool skip_first = true; 1923 bool new_marks = false; 1924 int i, err; 1925 1926 if (!env->allow_ptr_leaks) 1927 /* backtracking is root only for now */ 1928 return 0; 1929 1930 func = st->frame[st->curframe]; 1931 if (regno >= 0) { 1932 reg = &func->regs[regno]; 1933 if (reg->type != SCALAR_VALUE) { 1934 WARN_ONCE(1, "backtracing misuse"); 1935 return -EFAULT; 1936 } 1937 if (!reg->precise) 1938 new_marks = true; 1939 else 1940 reg_mask = 0; 1941 reg->precise = true; 1942 } 1943 1944 while (spi >= 0) { 1945 if (func->stack[spi].slot_type[0] != STACK_SPILL) { 1946 stack_mask = 0; 1947 break; 1948 } 1949 reg = &func->stack[spi].spilled_ptr; 1950 if (reg->type != SCALAR_VALUE) { 1951 stack_mask = 0; 1952 break; 1953 } 1954 if (!reg->precise) 1955 new_marks = true; 1956 else 1957 stack_mask = 0; 1958 reg->precise = true; 1959 break; 1960 } 1961 1962 if (!new_marks) 1963 return 0; 1964 if (!reg_mask && !stack_mask) 1965 return 0; 1966 for (;;) { 1967 DECLARE_BITMAP(mask, 64); 1968 u32 history = st->jmp_history_cnt; 1969 1970 if (env->log.level & BPF_LOG_LEVEL) 1971 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 1972 for (i = last_idx;;) { 1973 if (skip_first) { 1974 err = 0; 1975 skip_first = false; 1976 } else { 1977 err = backtrack_insn(env, i, ®_mask, &stack_mask); 1978 } 1979 if (err == -ENOTSUPP) { 1980 mark_all_scalars_precise(env, st); 1981 return 0; 1982 } else if (err) { 1983 return err; 1984 } 1985 if (!reg_mask && !stack_mask) 1986 /* Found assignment(s) into tracked register in this state. 1987 * Since this state is already marked, just return. 1988 * Nothing to be tracked further in the parent state. 1989 */ 1990 return 0; 1991 if (i == first_idx) 1992 break; 1993 i = get_prev_insn_idx(st, i, &history); 1994 if (i >= env->prog->len) { 1995 /* This can happen if backtracking reached insn 0 1996 * and there are still reg_mask or stack_mask 1997 * to backtrack. 1998 * It means the backtracking missed the spot where 1999 * particular register was initialized with a constant. 2000 */ 2001 verbose(env, "BUG backtracking idx %d\n", i); 2002 WARN_ONCE(1, "verifier backtracking bug"); 2003 return -EFAULT; 2004 } 2005 } 2006 st = st->parent; 2007 if (!st) 2008 break; 2009 2010 new_marks = false; 2011 func = st->frame[st->curframe]; 2012 bitmap_from_u64(mask, reg_mask); 2013 for_each_set_bit(i, mask, 32) { 2014 reg = &func->regs[i]; 2015 if (reg->type != SCALAR_VALUE) { 2016 reg_mask &= ~(1u << i); 2017 continue; 2018 } 2019 if (!reg->precise) 2020 new_marks = true; 2021 reg->precise = true; 2022 } 2023 2024 bitmap_from_u64(mask, stack_mask); 2025 for_each_set_bit(i, mask, 64) { 2026 if (i >= func->allocated_stack / BPF_REG_SIZE) { 2027 /* the sequence of instructions: 2028 * 2: (bf) r3 = r10 2029 * 3: (7b) *(u64 *)(r3 -8) = r0 2030 * 4: (79) r4 = *(u64 *)(r10 -8) 2031 * doesn't contain jmps. It's backtracked 2032 * as a single block. 2033 * During backtracking insn 3 is not recognized as 2034 * stack access, so at the end of backtracking 2035 * stack slot fp-8 is still marked in stack_mask. 2036 * However the parent state may not have accessed 2037 * fp-8 and it's "unallocated" stack space. 2038 * In such case fallback to conservative. 2039 */ 2040 mark_all_scalars_precise(env, st); 2041 return 0; 2042 } 2043 2044 if (func->stack[i].slot_type[0] != STACK_SPILL) { 2045 stack_mask &= ~(1ull << i); 2046 continue; 2047 } 2048 reg = &func->stack[i].spilled_ptr; 2049 if (reg->type != SCALAR_VALUE) { 2050 stack_mask &= ~(1ull << i); 2051 continue; 2052 } 2053 if (!reg->precise) 2054 new_marks = true; 2055 reg->precise = true; 2056 } 2057 if (env->log.level & BPF_LOG_LEVEL) { 2058 print_verifier_state(env, func); 2059 verbose(env, "parent %s regs=%x stack=%llx marks\n", 2060 new_marks ? "didn't have" : "already had", 2061 reg_mask, stack_mask); 2062 } 2063 2064 if (!reg_mask && !stack_mask) 2065 break; 2066 if (!new_marks) 2067 break; 2068 2069 last_idx = st->last_insn_idx; 2070 first_idx = st->first_insn_idx; 2071 } 2072 return 0; 2073 } 2074 2075 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) 2076 { 2077 return __mark_chain_precision(env, regno, -1); 2078 } 2079 2080 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 2081 { 2082 return __mark_chain_precision(env, -1, spi); 2083 } 2084 2085 static bool is_spillable_regtype(enum bpf_reg_type type) 2086 { 2087 switch (type) { 2088 case PTR_TO_MAP_VALUE: 2089 case PTR_TO_MAP_VALUE_OR_NULL: 2090 case PTR_TO_STACK: 2091 case PTR_TO_CTX: 2092 case PTR_TO_PACKET: 2093 case PTR_TO_PACKET_META: 2094 case PTR_TO_PACKET_END: 2095 case PTR_TO_FLOW_KEYS: 2096 case CONST_PTR_TO_MAP: 2097 case PTR_TO_SOCKET: 2098 case PTR_TO_SOCKET_OR_NULL: 2099 case PTR_TO_SOCK_COMMON: 2100 case PTR_TO_SOCK_COMMON_OR_NULL: 2101 case PTR_TO_TCP_SOCK: 2102 case PTR_TO_TCP_SOCK_OR_NULL: 2103 case PTR_TO_XDP_SOCK: 2104 case PTR_TO_BTF_ID: 2105 return true; 2106 default: 2107 return false; 2108 } 2109 } 2110 2111 /* Does this register contain a constant zero? */ 2112 static bool register_is_null(struct bpf_reg_state *reg) 2113 { 2114 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 2115 } 2116 2117 static bool register_is_const(struct bpf_reg_state *reg) 2118 { 2119 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 2120 } 2121 2122 static void save_register_state(struct bpf_func_state *state, 2123 int spi, struct bpf_reg_state *reg) 2124 { 2125 int i; 2126 2127 state->stack[spi].spilled_ptr = *reg; 2128 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2129 2130 for (i = 0; i < BPF_REG_SIZE; i++) 2131 state->stack[spi].slot_type[i] = STACK_SPILL; 2132 } 2133 2134 /* check_stack_read/write functions track spill/fill of registers, 2135 * stack boundary and alignment are checked in check_mem_access() 2136 */ 2137 static int check_stack_write(struct bpf_verifier_env *env, 2138 struct bpf_func_state *state, /* func where register points to */ 2139 int off, int size, int value_regno, int insn_idx) 2140 { 2141 struct bpf_func_state *cur; /* state of the current function */ 2142 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 2143 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 2144 struct bpf_reg_state *reg = NULL; 2145 2146 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 2147 state->acquired_refs, true); 2148 if (err) 2149 return err; 2150 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 2151 * so it's aligned access and [off, off + size) are within stack limits 2152 */ 2153 if (!env->allow_ptr_leaks && 2154 state->stack[spi].slot_type[0] == STACK_SPILL && 2155 size != BPF_REG_SIZE) { 2156 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 2157 return -EACCES; 2158 } 2159 2160 cur = env->cur_state->frame[env->cur_state->curframe]; 2161 if (value_regno >= 0) 2162 reg = &cur->regs[value_regno]; 2163 2164 if (reg && size == BPF_REG_SIZE && register_is_const(reg) && 2165 !register_is_null(reg) && env->allow_ptr_leaks) { 2166 if (dst_reg != BPF_REG_FP) { 2167 /* The backtracking logic can only recognize explicit 2168 * stack slot address like [fp - 8]. Other spill of 2169 * scalar via different register has to be conervative. 2170 * Backtrack from here and mark all registers as precise 2171 * that contributed into 'reg' being a constant. 2172 */ 2173 err = mark_chain_precision(env, value_regno); 2174 if (err) 2175 return err; 2176 } 2177 save_register_state(state, spi, reg); 2178 } else if (reg && is_spillable_regtype(reg->type)) { 2179 /* register containing pointer is being spilled into stack */ 2180 if (size != BPF_REG_SIZE) { 2181 verbose_linfo(env, insn_idx, "; "); 2182 verbose(env, "invalid size of register spill\n"); 2183 return -EACCES; 2184 } 2185 2186 if (state != cur && reg->type == PTR_TO_STACK) { 2187 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 2188 return -EINVAL; 2189 } 2190 2191 if (!env->allow_ptr_leaks) { 2192 bool sanitize = false; 2193 2194 if (state->stack[spi].slot_type[0] == STACK_SPILL && 2195 register_is_const(&state->stack[spi].spilled_ptr)) 2196 sanitize = true; 2197 for (i = 0; i < BPF_REG_SIZE; i++) 2198 if (state->stack[spi].slot_type[i] == STACK_MISC) { 2199 sanitize = true; 2200 break; 2201 } 2202 if (sanitize) { 2203 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; 2204 int soff = (-spi - 1) * BPF_REG_SIZE; 2205 2206 /* detected reuse of integer stack slot with a pointer 2207 * which means either llvm is reusing stack slot or 2208 * an attacker is trying to exploit CVE-2018-3639 2209 * (speculative store bypass) 2210 * Have to sanitize that slot with preemptive 2211 * store of zero. 2212 */ 2213 if (*poff && *poff != soff) { 2214 /* disallow programs where single insn stores 2215 * into two different stack slots, since verifier 2216 * cannot sanitize them 2217 */ 2218 verbose(env, 2219 "insn %d cannot access two stack slots fp%d and fp%d", 2220 insn_idx, *poff, soff); 2221 return -EINVAL; 2222 } 2223 *poff = soff; 2224 } 2225 } 2226 save_register_state(state, spi, reg); 2227 } else { 2228 u8 type = STACK_MISC; 2229 2230 /* regular write of data into stack destroys any spilled ptr */ 2231 state->stack[spi].spilled_ptr.type = NOT_INIT; 2232 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 2233 if (state->stack[spi].slot_type[0] == STACK_SPILL) 2234 for (i = 0; i < BPF_REG_SIZE; i++) 2235 state->stack[spi].slot_type[i] = STACK_MISC; 2236 2237 /* only mark the slot as written if all 8 bytes were written 2238 * otherwise read propagation may incorrectly stop too soon 2239 * when stack slots are partially written. 2240 * This heuristic means that read propagation will be 2241 * conservative, since it will add reg_live_read marks 2242 * to stack slots all the way to first state when programs 2243 * writes+reads less than 8 bytes 2244 */ 2245 if (size == BPF_REG_SIZE) 2246 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2247 2248 /* when we zero initialize stack slots mark them as such */ 2249 if (reg && register_is_null(reg)) { 2250 /* backtracking doesn't work for STACK_ZERO yet. */ 2251 err = mark_chain_precision(env, value_regno); 2252 if (err) 2253 return err; 2254 type = STACK_ZERO; 2255 } 2256 2257 /* Mark slots affected by this stack write. */ 2258 for (i = 0; i < size; i++) 2259 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 2260 type; 2261 } 2262 return 0; 2263 } 2264 2265 static int check_stack_read(struct bpf_verifier_env *env, 2266 struct bpf_func_state *reg_state /* func where register points to */, 2267 int off, int size, int value_regno) 2268 { 2269 struct bpf_verifier_state *vstate = env->cur_state; 2270 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2271 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 2272 struct bpf_reg_state *reg; 2273 u8 *stype; 2274 2275 if (reg_state->allocated_stack <= slot) { 2276 verbose(env, "invalid read from stack off %d+0 size %d\n", 2277 off, size); 2278 return -EACCES; 2279 } 2280 stype = reg_state->stack[spi].slot_type; 2281 reg = ®_state->stack[spi].spilled_ptr; 2282 2283 if (stype[0] == STACK_SPILL) { 2284 if (size != BPF_REG_SIZE) { 2285 if (reg->type != SCALAR_VALUE) { 2286 verbose_linfo(env, env->insn_idx, "; "); 2287 verbose(env, "invalid size of register fill\n"); 2288 return -EACCES; 2289 } 2290 if (value_regno >= 0) { 2291 mark_reg_unknown(env, state->regs, value_regno); 2292 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2293 } 2294 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2295 return 0; 2296 } 2297 for (i = 1; i < BPF_REG_SIZE; i++) { 2298 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 2299 verbose(env, "corrupted spill memory\n"); 2300 return -EACCES; 2301 } 2302 } 2303 2304 if (value_regno >= 0) { 2305 /* restore register state from stack */ 2306 state->regs[value_regno] = *reg; 2307 /* mark reg as written since spilled pointer state likely 2308 * has its liveness marks cleared by is_state_visited() 2309 * which resets stack/reg liveness for state transitions 2310 */ 2311 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2312 } 2313 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2314 } else { 2315 int zeros = 0; 2316 2317 for (i = 0; i < size; i++) { 2318 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) 2319 continue; 2320 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { 2321 zeros++; 2322 continue; 2323 } 2324 verbose(env, "invalid read from stack off %d+%d size %d\n", 2325 off, i, size); 2326 return -EACCES; 2327 } 2328 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2329 if (value_regno >= 0) { 2330 if (zeros == size) { 2331 /* any size read into register is zero extended, 2332 * so the whole register == const_zero 2333 */ 2334 __mark_reg_const_zero(&state->regs[value_regno]); 2335 /* backtracking doesn't support STACK_ZERO yet, 2336 * so mark it precise here, so that later 2337 * backtracking can stop here. 2338 * Backtracking may not need this if this register 2339 * doesn't participate in pointer adjustment. 2340 * Forward propagation of precise flag is not 2341 * necessary either. This mark is only to stop 2342 * backtracking. Any register that contributed 2343 * to const 0 was marked precise before spill. 2344 */ 2345 state->regs[value_regno].precise = true; 2346 } else { 2347 /* have read misc data from the stack */ 2348 mark_reg_unknown(env, state->regs, value_regno); 2349 } 2350 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2351 } 2352 } 2353 return 0; 2354 } 2355 2356 static int check_stack_access(struct bpf_verifier_env *env, 2357 const struct bpf_reg_state *reg, 2358 int off, int size) 2359 { 2360 /* Stack accesses must be at a fixed offset, so that we 2361 * can determine what type of data were returned. See 2362 * check_stack_read(). 2363 */ 2364 if (!tnum_is_const(reg->var_off)) { 2365 char tn_buf[48]; 2366 2367 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2368 verbose(env, "variable stack access var_off=%s off=%d size=%d\n", 2369 tn_buf, off, size); 2370 return -EACCES; 2371 } 2372 2373 if (off >= 0 || off < -MAX_BPF_STACK) { 2374 verbose(env, "invalid stack off=%d size=%d\n", off, size); 2375 return -EACCES; 2376 } 2377 2378 return 0; 2379 } 2380 2381 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 2382 int off, int size, enum bpf_access_type type) 2383 { 2384 struct bpf_reg_state *regs = cur_regs(env); 2385 struct bpf_map *map = regs[regno].map_ptr; 2386 u32 cap = bpf_map_flags_to_cap(map); 2387 2388 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 2389 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 2390 map->value_size, off, size); 2391 return -EACCES; 2392 } 2393 2394 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 2395 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 2396 map->value_size, off, size); 2397 return -EACCES; 2398 } 2399 2400 return 0; 2401 } 2402 2403 /* check read/write into map element returned by bpf_map_lookup_elem() */ 2404 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 2405 int size, bool zero_size_allowed) 2406 { 2407 struct bpf_reg_state *regs = cur_regs(env); 2408 struct bpf_map *map = regs[regno].map_ptr; 2409 2410 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 2411 off + size > map->value_size) { 2412 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 2413 map->value_size, off, size); 2414 return -EACCES; 2415 } 2416 return 0; 2417 } 2418 2419 /* check read/write into a map element with possible variable offset */ 2420 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 2421 int off, int size, bool zero_size_allowed) 2422 { 2423 struct bpf_verifier_state *vstate = env->cur_state; 2424 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2425 struct bpf_reg_state *reg = &state->regs[regno]; 2426 int err; 2427 2428 /* We may have adjusted the register to this map value, so we 2429 * need to try adding each of min_value and max_value to off 2430 * to make sure our theoretical access will be safe. 2431 */ 2432 if (env->log.level & BPF_LOG_LEVEL) 2433 print_verifier_state(env, state); 2434 2435 /* The minimum value is only important with signed 2436 * comparisons where we can't assume the floor of a 2437 * value is 0. If we are using signed variables for our 2438 * index'es we need to make sure that whatever we use 2439 * will have a set floor within our range. 2440 */ 2441 if (reg->smin_value < 0 && 2442 (reg->smin_value == S64_MIN || 2443 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 2444 reg->smin_value + off < 0)) { 2445 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2446 regno); 2447 return -EACCES; 2448 } 2449 err = __check_map_access(env, regno, reg->smin_value + off, size, 2450 zero_size_allowed); 2451 if (err) { 2452 verbose(env, "R%d min value is outside of the array range\n", 2453 regno); 2454 return err; 2455 } 2456 2457 /* If we haven't set a max value then we need to bail since we can't be 2458 * sure we won't do bad things. 2459 * If reg->umax_value + off could overflow, treat that as unbounded too. 2460 */ 2461 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 2462 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", 2463 regno); 2464 return -EACCES; 2465 } 2466 err = __check_map_access(env, regno, reg->umax_value + off, size, 2467 zero_size_allowed); 2468 if (err) 2469 verbose(env, "R%d max value is outside of the array range\n", 2470 regno); 2471 2472 if (map_value_has_spin_lock(reg->map_ptr)) { 2473 u32 lock = reg->map_ptr->spin_lock_off; 2474 2475 /* if any part of struct bpf_spin_lock can be touched by 2476 * load/store reject this program. 2477 * To check that [x1, x2) overlaps with [y1, y2) 2478 * it is sufficient to check x1 < y2 && y1 < x2. 2479 */ 2480 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 2481 lock < reg->umax_value + off + size) { 2482 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 2483 return -EACCES; 2484 } 2485 } 2486 return err; 2487 } 2488 2489 #define MAX_PACKET_OFF 0xffff 2490 2491 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 2492 const struct bpf_call_arg_meta *meta, 2493 enum bpf_access_type t) 2494 { 2495 switch (env->prog->type) { 2496 /* Program types only with direct read access go here! */ 2497 case BPF_PROG_TYPE_LWT_IN: 2498 case BPF_PROG_TYPE_LWT_OUT: 2499 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2500 case BPF_PROG_TYPE_SK_REUSEPORT: 2501 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2502 case BPF_PROG_TYPE_CGROUP_SKB: 2503 if (t == BPF_WRITE) 2504 return false; 2505 /* fallthrough */ 2506 2507 /* Program types with direct read + write access go here! */ 2508 case BPF_PROG_TYPE_SCHED_CLS: 2509 case BPF_PROG_TYPE_SCHED_ACT: 2510 case BPF_PROG_TYPE_XDP: 2511 case BPF_PROG_TYPE_LWT_XMIT: 2512 case BPF_PROG_TYPE_SK_SKB: 2513 case BPF_PROG_TYPE_SK_MSG: 2514 if (meta) 2515 return meta->pkt_access; 2516 2517 env->seen_direct_write = true; 2518 return true; 2519 2520 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2521 if (t == BPF_WRITE) 2522 env->seen_direct_write = true; 2523 2524 return true; 2525 2526 default: 2527 return false; 2528 } 2529 } 2530 2531 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, 2532 int off, int size, bool zero_size_allowed) 2533 { 2534 struct bpf_reg_state *regs = cur_regs(env); 2535 struct bpf_reg_state *reg = ®s[regno]; 2536 2537 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 2538 (u64)off + size > reg->range) { 2539 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 2540 off, size, regno, reg->id, reg->off, reg->range); 2541 return -EACCES; 2542 } 2543 return 0; 2544 } 2545 2546 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 2547 int size, bool zero_size_allowed) 2548 { 2549 struct bpf_reg_state *regs = cur_regs(env); 2550 struct bpf_reg_state *reg = ®s[regno]; 2551 int err; 2552 2553 /* We may have added a variable offset to the packet pointer; but any 2554 * reg->range we have comes after that. We are only checking the fixed 2555 * offset. 2556 */ 2557 2558 /* We don't allow negative numbers, because we aren't tracking enough 2559 * detail to prove they're safe. 2560 */ 2561 if (reg->smin_value < 0) { 2562 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2563 regno); 2564 return -EACCES; 2565 } 2566 err = __check_packet_access(env, regno, off, size, zero_size_allowed); 2567 if (err) { 2568 verbose(env, "R%d offset is outside of the packet\n", regno); 2569 return err; 2570 } 2571 2572 /* __check_packet_access has made sure "off + size - 1" is within u16. 2573 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 2574 * otherwise find_good_pkt_pointers would have refused to set range info 2575 * that __check_packet_access would have rejected this pkt access. 2576 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 2577 */ 2578 env->prog->aux->max_pkt_offset = 2579 max_t(u32, env->prog->aux->max_pkt_offset, 2580 off + reg->umax_value + size - 1); 2581 2582 return err; 2583 } 2584 2585 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 2586 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 2587 enum bpf_access_type t, enum bpf_reg_type *reg_type, 2588 u32 *btf_id) 2589 { 2590 struct bpf_insn_access_aux info = { 2591 .reg_type = *reg_type, 2592 .log = &env->log, 2593 }; 2594 2595 if (env->ops->is_valid_access && 2596 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 2597 /* A non zero info.ctx_field_size indicates that this field is a 2598 * candidate for later verifier transformation to load the whole 2599 * field and then apply a mask when accessed with a narrower 2600 * access than actual ctx access size. A zero info.ctx_field_size 2601 * will only allow for whole field access and rejects any other 2602 * type of narrower access. 2603 */ 2604 *reg_type = info.reg_type; 2605 2606 if (*reg_type == PTR_TO_BTF_ID) 2607 *btf_id = info.btf_id; 2608 else 2609 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 2610 /* remember the offset of last byte accessed in ctx */ 2611 if (env->prog->aux->max_ctx_offset < off + size) 2612 env->prog->aux->max_ctx_offset = off + size; 2613 return 0; 2614 } 2615 2616 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 2617 return -EACCES; 2618 } 2619 2620 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 2621 int size) 2622 { 2623 if (size < 0 || off < 0 || 2624 (u64)off + size > sizeof(struct bpf_flow_keys)) { 2625 verbose(env, "invalid access to flow keys off=%d size=%d\n", 2626 off, size); 2627 return -EACCES; 2628 } 2629 return 0; 2630 } 2631 2632 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 2633 u32 regno, int off, int size, 2634 enum bpf_access_type t) 2635 { 2636 struct bpf_reg_state *regs = cur_regs(env); 2637 struct bpf_reg_state *reg = ®s[regno]; 2638 struct bpf_insn_access_aux info = {}; 2639 bool valid; 2640 2641 if (reg->smin_value < 0) { 2642 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2643 regno); 2644 return -EACCES; 2645 } 2646 2647 switch (reg->type) { 2648 case PTR_TO_SOCK_COMMON: 2649 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 2650 break; 2651 case PTR_TO_SOCKET: 2652 valid = bpf_sock_is_valid_access(off, size, t, &info); 2653 break; 2654 case PTR_TO_TCP_SOCK: 2655 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 2656 break; 2657 case PTR_TO_XDP_SOCK: 2658 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 2659 break; 2660 default: 2661 valid = false; 2662 } 2663 2664 2665 if (valid) { 2666 env->insn_aux_data[insn_idx].ctx_field_size = 2667 info.ctx_field_size; 2668 return 0; 2669 } 2670 2671 verbose(env, "R%d invalid %s access off=%d size=%d\n", 2672 regno, reg_type_str[reg->type], off, size); 2673 2674 return -EACCES; 2675 } 2676 2677 static bool __is_pointer_value(bool allow_ptr_leaks, 2678 const struct bpf_reg_state *reg) 2679 { 2680 if (allow_ptr_leaks) 2681 return false; 2682 2683 return reg->type != SCALAR_VALUE; 2684 } 2685 2686 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 2687 { 2688 return cur_regs(env) + regno; 2689 } 2690 2691 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 2692 { 2693 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 2694 } 2695 2696 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 2697 { 2698 const struct bpf_reg_state *reg = reg_state(env, regno); 2699 2700 return reg->type == PTR_TO_CTX; 2701 } 2702 2703 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 2704 { 2705 const struct bpf_reg_state *reg = reg_state(env, regno); 2706 2707 return type_is_sk_pointer(reg->type); 2708 } 2709 2710 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 2711 { 2712 const struct bpf_reg_state *reg = reg_state(env, regno); 2713 2714 return type_is_pkt_pointer(reg->type); 2715 } 2716 2717 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 2718 { 2719 const struct bpf_reg_state *reg = reg_state(env, regno); 2720 2721 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 2722 return reg->type == PTR_TO_FLOW_KEYS; 2723 } 2724 2725 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 2726 const struct bpf_reg_state *reg, 2727 int off, int size, bool strict) 2728 { 2729 struct tnum reg_off; 2730 int ip_align; 2731 2732 /* Byte size accesses are always allowed. */ 2733 if (!strict || size == 1) 2734 return 0; 2735 2736 /* For platforms that do not have a Kconfig enabling 2737 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 2738 * NET_IP_ALIGN is universally set to '2'. And on platforms 2739 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 2740 * to this code only in strict mode where we want to emulate 2741 * the NET_IP_ALIGN==2 checking. Therefore use an 2742 * unconditional IP align value of '2'. 2743 */ 2744 ip_align = 2; 2745 2746 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 2747 if (!tnum_is_aligned(reg_off, size)) { 2748 char tn_buf[48]; 2749 2750 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2751 verbose(env, 2752 "misaligned packet access off %d+%s+%d+%d size %d\n", 2753 ip_align, tn_buf, reg->off, off, size); 2754 return -EACCES; 2755 } 2756 2757 return 0; 2758 } 2759 2760 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 2761 const struct bpf_reg_state *reg, 2762 const char *pointer_desc, 2763 int off, int size, bool strict) 2764 { 2765 struct tnum reg_off; 2766 2767 /* Byte size accesses are always allowed. */ 2768 if (!strict || size == 1) 2769 return 0; 2770 2771 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 2772 if (!tnum_is_aligned(reg_off, size)) { 2773 char tn_buf[48]; 2774 2775 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2776 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 2777 pointer_desc, tn_buf, reg->off, off, size); 2778 return -EACCES; 2779 } 2780 2781 return 0; 2782 } 2783 2784 static int check_ptr_alignment(struct bpf_verifier_env *env, 2785 const struct bpf_reg_state *reg, int off, 2786 int size, bool strict_alignment_once) 2787 { 2788 bool strict = env->strict_alignment || strict_alignment_once; 2789 const char *pointer_desc = ""; 2790 2791 switch (reg->type) { 2792 case PTR_TO_PACKET: 2793 case PTR_TO_PACKET_META: 2794 /* Special case, because of NET_IP_ALIGN. Given metadata sits 2795 * right in front, treat it the very same way. 2796 */ 2797 return check_pkt_ptr_alignment(env, reg, off, size, strict); 2798 case PTR_TO_FLOW_KEYS: 2799 pointer_desc = "flow keys "; 2800 break; 2801 case PTR_TO_MAP_VALUE: 2802 pointer_desc = "value "; 2803 break; 2804 case PTR_TO_CTX: 2805 pointer_desc = "context "; 2806 break; 2807 case PTR_TO_STACK: 2808 pointer_desc = "stack "; 2809 /* The stack spill tracking logic in check_stack_write() 2810 * and check_stack_read() relies on stack accesses being 2811 * aligned. 2812 */ 2813 strict = true; 2814 break; 2815 case PTR_TO_SOCKET: 2816 pointer_desc = "sock "; 2817 break; 2818 case PTR_TO_SOCK_COMMON: 2819 pointer_desc = "sock_common "; 2820 break; 2821 case PTR_TO_TCP_SOCK: 2822 pointer_desc = "tcp_sock "; 2823 break; 2824 case PTR_TO_XDP_SOCK: 2825 pointer_desc = "xdp_sock "; 2826 break; 2827 default: 2828 break; 2829 } 2830 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 2831 strict); 2832 } 2833 2834 static int update_stack_depth(struct bpf_verifier_env *env, 2835 const struct bpf_func_state *func, 2836 int off) 2837 { 2838 u16 stack = env->subprog_info[func->subprogno].stack_depth; 2839 2840 if (stack >= -off) 2841 return 0; 2842 2843 /* update known max for given subprogram */ 2844 env->subprog_info[func->subprogno].stack_depth = -off; 2845 return 0; 2846 } 2847 2848 /* starting from main bpf function walk all instructions of the function 2849 * and recursively walk all callees that given function can call. 2850 * Ignore jump and exit insns. 2851 * Since recursion is prevented by check_cfg() this algorithm 2852 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 2853 */ 2854 static int check_max_stack_depth(struct bpf_verifier_env *env) 2855 { 2856 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 2857 struct bpf_subprog_info *subprog = env->subprog_info; 2858 struct bpf_insn *insn = env->prog->insnsi; 2859 int ret_insn[MAX_CALL_FRAMES]; 2860 int ret_prog[MAX_CALL_FRAMES]; 2861 2862 process_func: 2863 /* round up to 32-bytes, since this is granularity 2864 * of interpreter stack size 2865 */ 2866 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 2867 if (depth > MAX_BPF_STACK) { 2868 verbose(env, "combined stack size of %d calls is %d. Too large\n", 2869 frame + 1, depth); 2870 return -EACCES; 2871 } 2872 continue_func: 2873 subprog_end = subprog[idx + 1].start; 2874 for (; i < subprog_end; i++) { 2875 if (insn[i].code != (BPF_JMP | BPF_CALL)) 2876 continue; 2877 if (insn[i].src_reg != BPF_PSEUDO_CALL) 2878 continue; 2879 /* remember insn and function to return to */ 2880 ret_insn[frame] = i + 1; 2881 ret_prog[frame] = idx; 2882 2883 /* find the callee */ 2884 i = i + insn[i].imm + 1; 2885 idx = find_subprog(env, i); 2886 if (idx < 0) { 2887 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 2888 i); 2889 return -EFAULT; 2890 } 2891 frame++; 2892 if (frame >= MAX_CALL_FRAMES) { 2893 verbose(env, "the call stack of %d frames is too deep !\n", 2894 frame); 2895 return -E2BIG; 2896 } 2897 goto process_func; 2898 } 2899 /* end of for() loop means the last insn of the 'subprog' 2900 * was reached. Doesn't matter whether it was JA or EXIT 2901 */ 2902 if (frame == 0) 2903 return 0; 2904 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 2905 frame--; 2906 i = ret_insn[frame]; 2907 idx = ret_prog[frame]; 2908 goto continue_func; 2909 } 2910 2911 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2912 static int get_callee_stack_depth(struct bpf_verifier_env *env, 2913 const struct bpf_insn *insn, int idx) 2914 { 2915 int start = idx + insn->imm + 1, subprog; 2916 2917 subprog = find_subprog(env, start); 2918 if (subprog < 0) { 2919 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 2920 start); 2921 return -EFAULT; 2922 } 2923 return env->subprog_info[subprog].stack_depth; 2924 } 2925 #endif 2926 2927 int check_ctx_reg(struct bpf_verifier_env *env, 2928 const struct bpf_reg_state *reg, int regno) 2929 { 2930 /* Access to ctx or passing it to a helper is only allowed in 2931 * its original, unmodified form. 2932 */ 2933 2934 if (reg->off) { 2935 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", 2936 regno, reg->off); 2937 return -EACCES; 2938 } 2939 2940 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 2941 char tn_buf[48]; 2942 2943 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2944 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); 2945 return -EACCES; 2946 } 2947 2948 return 0; 2949 } 2950 2951 static int check_tp_buffer_access(struct bpf_verifier_env *env, 2952 const struct bpf_reg_state *reg, 2953 int regno, int off, int size) 2954 { 2955 if (off < 0) { 2956 verbose(env, 2957 "R%d invalid tracepoint buffer access: off=%d, size=%d", 2958 regno, off, size); 2959 return -EACCES; 2960 } 2961 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 2962 char tn_buf[48]; 2963 2964 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2965 verbose(env, 2966 "R%d invalid variable buffer offset: off=%d, var_off=%s", 2967 regno, off, tn_buf); 2968 return -EACCES; 2969 } 2970 if (off + size > env->prog->aux->max_tp_access) 2971 env->prog->aux->max_tp_access = off + size; 2972 2973 return 0; 2974 } 2975 2976 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 2977 static void zext_32_to_64(struct bpf_reg_state *reg) 2978 { 2979 reg->var_off = tnum_subreg(reg->var_off); 2980 __reg_assign_32_into_64(reg); 2981 } 2982 2983 /* truncate register to smaller size (in bytes) 2984 * must be called with size < BPF_REG_SIZE 2985 */ 2986 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 2987 { 2988 u64 mask; 2989 2990 /* clear high bits in bit representation */ 2991 reg->var_off = tnum_cast(reg->var_off, size); 2992 2993 /* fix arithmetic bounds */ 2994 mask = ((u64)1 << (size * 8)) - 1; 2995 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 2996 reg->umin_value &= mask; 2997 reg->umax_value &= mask; 2998 } else { 2999 reg->umin_value = 0; 3000 reg->umax_value = mask; 3001 } 3002 reg->smin_value = reg->umin_value; 3003 reg->smax_value = reg->umax_value; 3004 3005 /* If size is smaller than 32bit register the 32bit register 3006 * values are also truncated so we push 64-bit bounds into 3007 * 32-bit bounds. Above were truncated < 32-bits already. 3008 */ 3009 if (size >= 4) 3010 return; 3011 __reg_combine_64_into_32(reg); 3012 } 3013 3014 static bool bpf_map_is_rdonly(const struct bpf_map *map) 3015 { 3016 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; 3017 } 3018 3019 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) 3020 { 3021 void *ptr; 3022 u64 addr; 3023 int err; 3024 3025 err = map->ops->map_direct_value_addr(map, &addr, off); 3026 if (err) 3027 return err; 3028 ptr = (void *)(long)addr + off; 3029 3030 switch (size) { 3031 case sizeof(u8): 3032 *val = (u64)*(u8 *)ptr; 3033 break; 3034 case sizeof(u16): 3035 *val = (u64)*(u16 *)ptr; 3036 break; 3037 case sizeof(u32): 3038 *val = (u64)*(u32 *)ptr; 3039 break; 3040 case sizeof(u64): 3041 *val = *(u64 *)ptr; 3042 break; 3043 default: 3044 return -EINVAL; 3045 } 3046 return 0; 3047 } 3048 3049 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 3050 struct bpf_reg_state *regs, 3051 int regno, int off, int size, 3052 enum bpf_access_type atype, 3053 int value_regno) 3054 { 3055 struct bpf_reg_state *reg = regs + regno; 3056 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id); 3057 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off); 3058 u32 btf_id; 3059 int ret; 3060 3061 if (off < 0) { 3062 verbose(env, 3063 "R%d is ptr_%s invalid negative access: off=%d\n", 3064 regno, tname, off); 3065 return -EACCES; 3066 } 3067 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3068 char tn_buf[48]; 3069 3070 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3071 verbose(env, 3072 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 3073 regno, tname, off, tn_buf); 3074 return -EACCES; 3075 } 3076 3077 if (env->ops->btf_struct_access) { 3078 ret = env->ops->btf_struct_access(&env->log, t, off, size, 3079 atype, &btf_id); 3080 } else { 3081 if (atype != BPF_READ) { 3082 verbose(env, "only read is supported\n"); 3083 return -EACCES; 3084 } 3085 3086 ret = btf_struct_access(&env->log, t, off, size, atype, 3087 &btf_id); 3088 } 3089 3090 if (ret < 0) 3091 return ret; 3092 3093 if (atype == BPF_READ) { 3094 if (ret == SCALAR_VALUE) { 3095 mark_reg_unknown(env, regs, value_regno); 3096 return 0; 3097 } 3098 mark_reg_known_zero(env, regs, value_regno); 3099 regs[value_regno].type = PTR_TO_BTF_ID; 3100 regs[value_regno].btf_id = btf_id; 3101 } 3102 3103 return 0; 3104 } 3105 3106 /* check whether memory at (regno + off) is accessible for t = (read | write) 3107 * if t==write, value_regno is a register which value is stored into memory 3108 * if t==read, value_regno is a register which will receive the value from memory 3109 * if t==write && value_regno==-1, some unknown value is stored into memory 3110 * if t==read && value_regno==-1, don't care what we read from memory 3111 */ 3112 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 3113 int off, int bpf_size, enum bpf_access_type t, 3114 int value_regno, bool strict_alignment_once) 3115 { 3116 struct bpf_reg_state *regs = cur_regs(env); 3117 struct bpf_reg_state *reg = regs + regno; 3118 struct bpf_func_state *state; 3119 int size, err = 0; 3120 3121 size = bpf_size_to_bytes(bpf_size); 3122 if (size < 0) 3123 return size; 3124 3125 /* alignment checks will add in reg->off themselves */ 3126 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 3127 if (err) 3128 return err; 3129 3130 /* for access checks, reg->off is just part of off */ 3131 off += reg->off; 3132 3133 if (reg->type == PTR_TO_MAP_VALUE) { 3134 if (t == BPF_WRITE && value_regno >= 0 && 3135 is_pointer_value(env, value_regno)) { 3136 verbose(env, "R%d leaks addr into map\n", value_regno); 3137 return -EACCES; 3138 } 3139 err = check_map_access_type(env, regno, off, size, t); 3140 if (err) 3141 return err; 3142 err = check_map_access(env, regno, off, size, false); 3143 if (!err && t == BPF_READ && value_regno >= 0) { 3144 struct bpf_map *map = reg->map_ptr; 3145 3146 /* if map is read-only, track its contents as scalars */ 3147 if (tnum_is_const(reg->var_off) && 3148 bpf_map_is_rdonly(map) && 3149 map->ops->map_direct_value_addr) { 3150 int map_off = off + reg->var_off.value; 3151 u64 val = 0; 3152 3153 err = bpf_map_direct_read(map, map_off, size, 3154 &val); 3155 if (err) 3156 return err; 3157 3158 regs[value_regno].type = SCALAR_VALUE; 3159 __mark_reg_known(®s[value_regno], val); 3160 } else { 3161 mark_reg_unknown(env, regs, value_regno); 3162 } 3163 } 3164 } else if (reg->type == PTR_TO_CTX) { 3165 enum bpf_reg_type reg_type = SCALAR_VALUE; 3166 u32 btf_id = 0; 3167 3168 if (t == BPF_WRITE && value_regno >= 0 && 3169 is_pointer_value(env, value_regno)) { 3170 verbose(env, "R%d leaks addr into ctx\n", value_regno); 3171 return -EACCES; 3172 } 3173 3174 err = check_ctx_reg(env, reg, regno); 3175 if (err < 0) 3176 return err; 3177 3178 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf_id); 3179 if (err) 3180 verbose_linfo(env, insn_idx, "; "); 3181 if (!err && t == BPF_READ && value_regno >= 0) { 3182 /* ctx access returns either a scalar, or a 3183 * PTR_TO_PACKET[_META,_END]. In the latter 3184 * case, we know the offset is zero. 3185 */ 3186 if (reg_type == SCALAR_VALUE) { 3187 mark_reg_unknown(env, regs, value_regno); 3188 } else { 3189 mark_reg_known_zero(env, regs, 3190 value_regno); 3191 if (reg_type_may_be_null(reg_type)) 3192 regs[value_regno].id = ++env->id_gen; 3193 /* A load of ctx field could have different 3194 * actual load size with the one encoded in the 3195 * insn. When the dst is PTR, it is for sure not 3196 * a sub-register. 3197 */ 3198 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 3199 if (reg_type == PTR_TO_BTF_ID) 3200 regs[value_regno].btf_id = btf_id; 3201 } 3202 regs[value_regno].type = reg_type; 3203 } 3204 3205 } else if (reg->type == PTR_TO_STACK) { 3206 off += reg->var_off.value; 3207 err = check_stack_access(env, reg, off, size); 3208 if (err) 3209 return err; 3210 3211 state = func(env, reg); 3212 err = update_stack_depth(env, state, off); 3213 if (err) 3214 return err; 3215 3216 if (t == BPF_WRITE) 3217 err = check_stack_write(env, state, off, size, 3218 value_regno, insn_idx); 3219 else 3220 err = check_stack_read(env, state, off, size, 3221 value_regno); 3222 } else if (reg_is_pkt_pointer(reg)) { 3223 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 3224 verbose(env, "cannot write into packet\n"); 3225 return -EACCES; 3226 } 3227 if (t == BPF_WRITE && value_regno >= 0 && 3228 is_pointer_value(env, value_regno)) { 3229 verbose(env, "R%d leaks addr into packet\n", 3230 value_regno); 3231 return -EACCES; 3232 } 3233 err = check_packet_access(env, regno, off, size, false); 3234 if (!err && t == BPF_READ && value_regno >= 0) 3235 mark_reg_unknown(env, regs, value_regno); 3236 } else if (reg->type == PTR_TO_FLOW_KEYS) { 3237 if (t == BPF_WRITE && value_regno >= 0 && 3238 is_pointer_value(env, value_regno)) { 3239 verbose(env, "R%d leaks addr into flow keys\n", 3240 value_regno); 3241 return -EACCES; 3242 } 3243 3244 err = check_flow_keys_access(env, off, size); 3245 if (!err && t == BPF_READ && value_regno >= 0) 3246 mark_reg_unknown(env, regs, value_regno); 3247 } else if (type_is_sk_pointer(reg->type)) { 3248 if (t == BPF_WRITE) { 3249 verbose(env, "R%d cannot write into %s\n", 3250 regno, reg_type_str[reg->type]); 3251 return -EACCES; 3252 } 3253 err = check_sock_access(env, insn_idx, regno, off, size, t); 3254 if (!err && value_regno >= 0) 3255 mark_reg_unknown(env, regs, value_regno); 3256 } else if (reg->type == PTR_TO_TP_BUFFER) { 3257 err = check_tp_buffer_access(env, reg, regno, off, size); 3258 if (!err && t == BPF_READ && value_regno >= 0) 3259 mark_reg_unknown(env, regs, value_regno); 3260 } else if (reg->type == PTR_TO_BTF_ID) { 3261 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 3262 value_regno); 3263 } else { 3264 verbose(env, "R%d invalid mem access '%s'\n", regno, 3265 reg_type_str[reg->type]); 3266 return -EACCES; 3267 } 3268 3269 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 3270 regs[value_regno].type == SCALAR_VALUE) { 3271 /* b/h/w load zero-extends, mark upper bits as known 0 */ 3272 coerce_reg_to_size(®s[value_regno], size); 3273 } 3274 return err; 3275 } 3276 3277 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 3278 { 3279 int err; 3280 3281 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 3282 insn->imm != 0) { 3283 verbose(env, "BPF_XADD uses reserved fields\n"); 3284 return -EINVAL; 3285 } 3286 3287 /* check src1 operand */ 3288 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3289 if (err) 3290 return err; 3291 3292 /* check src2 operand */ 3293 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3294 if (err) 3295 return err; 3296 3297 if (is_pointer_value(env, insn->src_reg)) { 3298 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 3299 return -EACCES; 3300 } 3301 3302 if (is_ctx_reg(env, insn->dst_reg) || 3303 is_pkt_reg(env, insn->dst_reg) || 3304 is_flow_key_reg(env, insn->dst_reg) || 3305 is_sk_reg(env, insn->dst_reg)) { 3306 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", 3307 insn->dst_reg, 3308 reg_type_str[reg_state(env, insn->dst_reg)->type]); 3309 return -EACCES; 3310 } 3311 3312 /* check whether atomic_add can read the memory */ 3313 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3314 BPF_SIZE(insn->code), BPF_READ, -1, true); 3315 if (err) 3316 return err; 3317 3318 /* check whether atomic_add can write into the same memory */ 3319 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3320 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 3321 } 3322 3323 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, 3324 int off, int access_size, 3325 bool zero_size_allowed) 3326 { 3327 struct bpf_reg_state *reg = reg_state(env, regno); 3328 3329 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 3330 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 3331 if (tnum_is_const(reg->var_off)) { 3332 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 3333 regno, off, access_size); 3334 } else { 3335 char tn_buf[48]; 3336 3337 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3338 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", 3339 regno, tn_buf, access_size); 3340 } 3341 return -EACCES; 3342 } 3343 return 0; 3344 } 3345 3346 /* when register 'regno' is passed into function that will read 'access_size' 3347 * bytes from that pointer, make sure that it's within stack boundary 3348 * and all elements of stack are initialized. 3349 * Unlike most pointer bounds-checking functions, this one doesn't take an 3350 * 'off' argument, so it has to add in reg->off itself. 3351 */ 3352 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 3353 int access_size, bool zero_size_allowed, 3354 struct bpf_call_arg_meta *meta) 3355 { 3356 struct bpf_reg_state *reg = reg_state(env, regno); 3357 struct bpf_func_state *state = func(env, reg); 3358 int err, min_off, max_off, i, j, slot, spi; 3359 3360 if (reg->type != PTR_TO_STACK) { 3361 /* Allow zero-byte read from NULL, regardless of pointer type */ 3362 if (zero_size_allowed && access_size == 0 && 3363 register_is_null(reg)) 3364 return 0; 3365 3366 verbose(env, "R%d type=%s expected=%s\n", regno, 3367 reg_type_str[reg->type], 3368 reg_type_str[PTR_TO_STACK]); 3369 return -EACCES; 3370 } 3371 3372 if (tnum_is_const(reg->var_off)) { 3373 min_off = max_off = reg->var_off.value + reg->off; 3374 err = __check_stack_boundary(env, regno, min_off, access_size, 3375 zero_size_allowed); 3376 if (err) 3377 return err; 3378 } else { 3379 /* Variable offset is prohibited for unprivileged mode for 3380 * simplicity since it requires corresponding support in 3381 * Spectre masking for stack ALU. 3382 * See also retrieve_ptr_limit(). 3383 */ 3384 if (!env->allow_ptr_leaks) { 3385 char tn_buf[48]; 3386 3387 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3388 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", 3389 regno, tn_buf); 3390 return -EACCES; 3391 } 3392 /* Only initialized buffer on stack is allowed to be accessed 3393 * with variable offset. With uninitialized buffer it's hard to 3394 * guarantee that whole memory is marked as initialized on 3395 * helper return since specific bounds are unknown what may 3396 * cause uninitialized stack leaking. 3397 */ 3398 if (meta && meta->raw_mode) 3399 meta = NULL; 3400 3401 if (reg->smax_value >= BPF_MAX_VAR_OFF || 3402 reg->smax_value <= -BPF_MAX_VAR_OFF) { 3403 verbose(env, "R%d unbounded indirect variable offset stack access\n", 3404 regno); 3405 return -EACCES; 3406 } 3407 min_off = reg->smin_value + reg->off; 3408 max_off = reg->smax_value + reg->off; 3409 err = __check_stack_boundary(env, regno, min_off, access_size, 3410 zero_size_allowed); 3411 if (err) { 3412 verbose(env, "R%d min value is outside of stack bound\n", 3413 regno); 3414 return err; 3415 } 3416 err = __check_stack_boundary(env, regno, max_off, access_size, 3417 zero_size_allowed); 3418 if (err) { 3419 verbose(env, "R%d max value is outside of stack bound\n", 3420 regno); 3421 return err; 3422 } 3423 } 3424 3425 if (meta && meta->raw_mode) { 3426 meta->access_size = access_size; 3427 meta->regno = regno; 3428 return 0; 3429 } 3430 3431 for (i = min_off; i < max_off + access_size; i++) { 3432 u8 *stype; 3433 3434 slot = -i - 1; 3435 spi = slot / BPF_REG_SIZE; 3436 if (state->allocated_stack <= slot) 3437 goto err; 3438 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 3439 if (*stype == STACK_MISC) 3440 goto mark; 3441 if (*stype == STACK_ZERO) { 3442 /* helper can write anything into the stack */ 3443 *stype = STACK_MISC; 3444 goto mark; 3445 } 3446 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3447 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { 3448 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 3449 for (j = 0; j < BPF_REG_SIZE; j++) 3450 state->stack[spi].slot_type[j] = STACK_MISC; 3451 goto mark; 3452 } 3453 3454 err: 3455 if (tnum_is_const(reg->var_off)) { 3456 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 3457 min_off, i - min_off, access_size); 3458 } else { 3459 char tn_buf[48]; 3460 3461 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3462 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", 3463 tn_buf, i - min_off, access_size); 3464 } 3465 return -EACCES; 3466 mark: 3467 /* reading any byte out of 8-byte 'spill_slot' will cause 3468 * the whole slot to be marked as 'read' 3469 */ 3470 mark_reg_read(env, &state->stack[spi].spilled_ptr, 3471 state->stack[spi].spilled_ptr.parent, 3472 REG_LIVE_READ64); 3473 } 3474 return update_stack_depth(env, state, min_off); 3475 } 3476 3477 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 3478 int access_size, bool zero_size_allowed, 3479 struct bpf_call_arg_meta *meta) 3480 { 3481 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3482 3483 switch (reg->type) { 3484 case PTR_TO_PACKET: 3485 case PTR_TO_PACKET_META: 3486 return check_packet_access(env, regno, reg->off, access_size, 3487 zero_size_allowed); 3488 case PTR_TO_MAP_VALUE: 3489 if (check_map_access_type(env, regno, reg->off, access_size, 3490 meta && meta->raw_mode ? BPF_WRITE : 3491 BPF_READ)) 3492 return -EACCES; 3493 return check_map_access(env, regno, reg->off, access_size, 3494 zero_size_allowed); 3495 default: /* scalar_value|ptr_to_stack or invalid ptr */ 3496 return check_stack_boundary(env, regno, access_size, 3497 zero_size_allowed, meta); 3498 } 3499 } 3500 3501 /* Implementation details: 3502 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 3503 * Two bpf_map_lookups (even with the same key) will have different reg->id. 3504 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 3505 * value_or_null->value transition, since the verifier only cares about 3506 * the range of access to valid map value pointer and doesn't care about actual 3507 * address of the map element. 3508 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 3509 * reg->id > 0 after value_or_null->value transition. By doing so 3510 * two bpf_map_lookups will be considered two different pointers that 3511 * point to different bpf_spin_locks. 3512 * The verifier allows taking only one bpf_spin_lock at a time to avoid 3513 * dead-locks. 3514 * Since only one bpf_spin_lock is allowed the checks are simpler than 3515 * reg_is_refcounted() logic. The verifier needs to remember only 3516 * one spin_lock instead of array of acquired_refs. 3517 * cur_state->active_spin_lock remembers which map value element got locked 3518 * and clears it after bpf_spin_unlock. 3519 */ 3520 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 3521 bool is_lock) 3522 { 3523 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3524 struct bpf_verifier_state *cur = env->cur_state; 3525 bool is_const = tnum_is_const(reg->var_off); 3526 struct bpf_map *map = reg->map_ptr; 3527 u64 val = reg->var_off.value; 3528 3529 if (reg->type != PTR_TO_MAP_VALUE) { 3530 verbose(env, "R%d is not a pointer to map_value\n", regno); 3531 return -EINVAL; 3532 } 3533 if (!is_const) { 3534 verbose(env, 3535 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 3536 regno); 3537 return -EINVAL; 3538 } 3539 if (!map->btf) { 3540 verbose(env, 3541 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 3542 map->name); 3543 return -EINVAL; 3544 } 3545 if (!map_value_has_spin_lock(map)) { 3546 if (map->spin_lock_off == -E2BIG) 3547 verbose(env, 3548 "map '%s' has more than one 'struct bpf_spin_lock'\n", 3549 map->name); 3550 else if (map->spin_lock_off == -ENOENT) 3551 verbose(env, 3552 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 3553 map->name); 3554 else 3555 verbose(env, 3556 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 3557 map->name); 3558 return -EINVAL; 3559 } 3560 if (map->spin_lock_off != val + reg->off) { 3561 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 3562 val + reg->off); 3563 return -EINVAL; 3564 } 3565 if (is_lock) { 3566 if (cur->active_spin_lock) { 3567 verbose(env, 3568 "Locking two bpf_spin_locks are not allowed\n"); 3569 return -EINVAL; 3570 } 3571 cur->active_spin_lock = reg->id; 3572 } else { 3573 if (!cur->active_spin_lock) { 3574 verbose(env, "bpf_spin_unlock without taking a lock\n"); 3575 return -EINVAL; 3576 } 3577 if (cur->active_spin_lock != reg->id) { 3578 verbose(env, "bpf_spin_unlock of different lock\n"); 3579 return -EINVAL; 3580 } 3581 cur->active_spin_lock = 0; 3582 } 3583 return 0; 3584 } 3585 3586 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 3587 { 3588 return type == ARG_PTR_TO_MEM || 3589 type == ARG_PTR_TO_MEM_OR_NULL || 3590 type == ARG_PTR_TO_UNINIT_MEM; 3591 } 3592 3593 static bool arg_type_is_mem_size(enum bpf_arg_type type) 3594 { 3595 return type == ARG_CONST_SIZE || 3596 type == ARG_CONST_SIZE_OR_ZERO; 3597 } 3598 3599 static bool arg_type_is_int_ptr(enum bpf_arg_type type) 3600 { 3601 return type == ARG_PTR_TO_INT || 3602 type == ARG_PTR_TO_LONG; 3603 } 3604 3605 static int int_ptr_type_to_size(enum bpf_arg_type type) 3606 { 3607 if (type == ARG_PTR_TO_INT) 3608 return sizeof(u32); 3609 else if (type == ARG_PTR_TO_LONG) 3610 return sizeof(u64); 3611 3612 return -EINVAL; 3613 } 3614 3615 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 3616 enum bpf_arg_type arg_type, 3617 struct bpf_call_arg_meta *meta) 3618 { 3619 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3620 enum bpf_reg_type expected_type, type = reg->type; 3621 int err = 0; 3622 3623 if (arg_type == ARG_DONTCARE) 3624 return 0; 3625 3626 err = check_reg_arg(env, regno, SRC_OP); 3627 if (err) 3628 return err; 3629 3630 if (arg_type == ARG_ANYTHING) { 3631 if (is_pointer_value(env, regno)) { 3632 verbose(env, "R%d leaks addr into helper function\n", 3633 regno); 3634 return -EACCES; 3635 } 3636 return 0; 3637 } 3638 3639 if (type_is_pkt_pointer(type) && 3640 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 3641 verbose(env, "helper access to the packet is not allowed\n"); 3642 return -EACCES; 3643 } 3644 3645 if (arg_type == ARG_PTR_TO_MAP_KEY || 3646 arg_type == ARG_PTR_TO_MAP_VALUE || 3647 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || 3648 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { 3649 expected_type = PTR_TO_STACK; 3650 if (register_is_null(reg) && 3651 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) 3652 /* final test in check_stack_boundary() */; 3653 else if (!type_is_pkt_pointer(type) && 3654 type != PTR_TO_MAP_VALUE && 3655 type != expected_type) 3656 goto err_type; 3657 } else if (arg_type == ARG_CONST_SIZE || 3658 arg_type == ARG_CONST_SIZE_OR_ZERO) { 3659 expected_type = SCALAR_VALUE; 3660 if (type != expected_type) 3661 goto err_type; 3662 } else if (arg_type == ARG_CONST_MAP_PTR) { 3663 expected_type = CONST_PTR_TO_MAP; 3664 if (type != expected_type) 3665 goto err_type; 3666 } else if (arg_type == ARG_PTR_TO_CTX || 3667 arg_type == ARG_PTR_TO_CTX_OR_NULL) { 3668 expected_type = PTR_TO_CTX; 3669 if (!(register_is_null(reg) && 3670 arg_type == ARG_PTR_TO_CTX_OR_NULL)) { 3671 if (type != expected_type) 3672 goto err_type; 3673 err = check_ctx_reg(env, reg, regno); 3674 if (err < 0) 3675 return err; 3676 } 3677 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { 3678 expected_type = PTR_TO_SOCK_COMMON; 3679 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ 3680 if (!type_is_sk_pointer(type)) 3681 goto err_type; 3682 if (reg->ref_obj_id) { 3683 if (meta->ref_obj_id) { 3684 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 3685 regno, reg->ref_obj_id, 3686 meta->ref_obj_id); 3687 return -EFAULT; 3688 } 3689 meta->ref_obj_id = reg->ref_obj_id; 3690 } 3691 } else if (arg_type == ARG_PTR_TO_SOCKET) { 3692 expected_type = PTR_TO_SOCKET; 3693 if (type != expected_type) 3694 goto err_type; 3695 } else if (arg_type == ARG_PTR_TO_BTF_ID) { 3696 expected_type = PTR_TO_BTF_ID; 3697 if (type != expected_type) 3698 goto err_type; 3699 if (reg->btf_id != meta->btf_id) { 3700 verbose(env, "Helper has type %s got %s in R%d\n", 3701 kernel_type_name(meta->btf_id), 3702 kernel_type_name(reg->btf_id), regno); 3703 3704 return -EACCES; 3705 } 3706 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) { 3707 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", 3708 regno); 3709 return -EACCES; 3710 } 3711 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 3712 if (meta->func_id == BPF_FUNC_spin_lock) { 3713 if (process_spin_lock(env, regno, true)) 3714 return -EACCES; 3715 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 3716 if (process_spin_lock(env, regno, false)) 3717 return -EACCES; 3718 } else { 3719 verbose(env, "verifier internal error\n"); 3720 return -EFAULT; 3721 } 3722 } else if (arg_type_is_mem_ptr(arg_type)) { 3723 expected_type = PTR_TO_STACK; 3724 /* One exception here. In case function allows for NULL to be 3725 * passed in as argument, it's a SCALAR_VALUE type. Final test 3726 * happens during stack boundary checking. 3727 */ 3728 if (register_is_null(reg) && 3729 arg_type == ARG_PTR_TO_MEM_OR_NULL) 3730 /* final test in check_stack_boundary() */; 3731 else if (!type_is_pkt_pointer(type) && 3732 type != PTR_TO_MAP_VALUE && 3733 type != expected_type) 3734 goto err_type; 3735 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 3736 } else if (arg_type_is_int_ptr(arg_type)) { 3737 expected_type = PTR_TO_STACK; 3738 if (!type_is_pkt_pointer(type) && 3739 type != PTR_TO_MAP_VALUE && 3740 type != expected_type) 3741 goto err_type; 3742 } else { 3743 verbose(env, "unsupported arg_type %d\n", arg_type); 3744 return -EFAULT; 3745 } 3746 3747 if (arg_type == ARG_CONST_MAP_PTR) { 3748 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 3749 meta->map_ptr = reg->map_ptr; 3750 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 3751 /* bpf_map_xxx(..., map_ptr, ..., key) call: 3752 * check that [key, key + map->key_size) are within 3753 * stack limits and initialized 3754 */ 3755 if (!meta->map_ptr) { 3756 /* in function declaration map_ptr must come before 3757 * map_key, so that it's verified and known before 3758 * we have to check map_key here. Otherwise it means 3759 * that kernel subsystem misconfigured verifier 3760 */ 3761 verbose(env, "invalid map_ptr to access map->key\n"); 3762 return -EACCES; 3763 } 3764 err = check_helper_mem_access(env, regno, 3765 meta->map_ptr->key_size, false, 3766 NULL); 3767 } else if (arg_type == ARG_PTR_TO_MAP_VALUE || 3768 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && 3769 !register_is_null(reg)) || 3770 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { 3771 /* bpf_map_xxx(..., map_ptr, ..., value) call: 3772 * check [value, value + map->value_size) validity 3773 */ 3774 if (!meta->map_ptr) { 3775 /* kernel subsystem misconfigured verifier */ 3776 verbose(env, "invalid map_ptr to access map->value\n"); 3777 return -EACCES; 3778 } 3779 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); 3780 err = check_helper_mem_access(env, regno, 3781 meta->map_ptr->value_size, false, 3782 meta); 3783 } else if (arg_type_is_mem_size(arg_type)) { 3784 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 3785 3786 /* This is used to refine r0 return value bounds for helpers 3787 * that enforce this value as an upper bound on return values. 3788 * See do_refine_retval_range() for helpers that can refine 3789 * the return value. C type of helper is u32 so we pull register 3790 * bound from umax_value however, if negative verifier errors 3791 * out. Only upper bounds can be learned because retval is an 3792 * int type and negative retvals are allowed. 3793 */ 3794 meta->msize_max_value = reg->umax_value; 3795 3796 /* The register is SCALAR_VALUE; the access check 3797 * happens using its boundaries. 3798 */ 3799 if (!tnum_is_const(reg->var_off)) 3800 /* For unprivileged variable accesses, disable raw 3801 * mode so that the program is required to 3802 * initialize all the memory that the helper could 3803 * just partially fill up. 3804 */ 3805 meta = NULL; 3806 3807 if (reg->smin_value < 0) { 3808 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 3809 regno); 3810 return -EACCES; 3811 } 3812 3813 if (reg->umin_value == 0) { 3814 err = check_helper_mem_access(env, regno - 1, 0, 3815 zero_size_allowed, 3816 meta); 3817 if (err) 3818 return err; 3819 } 3820 3821 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 3822 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 3823 regno); 3824 return -EACCES; 3825 } 3826 err = check_helper_mem_access(env, regno - 1, 3827 reg->umax_value, 3828 zero_size_allowed, meta); 3829 if (!err) 3830 err = mark_chain_precision(env, regno); 3831 } else if (arg_type_is_int_ptr(arg_type)) { 3832 int size = int_ptr_type_to_size(arg_type); 3833 3834 err = check_helper_mem_access(env, regno, size, false, meta); 3835 if (err) 3836 return err; 3837 err = check_ptr_alignment(env, reg, 0, size, true); 3838 } 3839 3840 return err; 3841 err_type: 3842 verbose(env, "R%d type=%s expected=%s\n", regno, 3843 reg_type_str[type], reg_type_str[expected_type]); 3844 return -EACCES; 3845 } 3846 3847 static int check_map_func_compatibility(struct bpf_verifier_env *env, 3848 struct bpf_map *map, int func_id) 3849 { 3850 if (!map) 3851 return 0; 3852 3853 /* We need a two way check, first is from map perspective ... */ 3854 switch (map->map_type) { 3855 case BPF_MAP_TYPE_PROG_ARRAY: 3856 if (func_id != BPF_FUNC_tail_call) 3857 goto error; 3858 break; 3859 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 3860 if (func_id != BPF_FUNC_perf_event_read && 3861 func_id != BPF_FUNC_perf_event_output && 3862 func_id != BPF_FUNC_skb_output && 3863 func_id != BPF_FUNC_perf_event_read_value && 3864 func_id != BPF_FUNC_xdp_output) 3865 goto error; 3866 break; 3867 case BPF_MAP_TYPE_STACK_TRACE: 3868 if (func_id != BPF_FUNC_get_stackid) 3869 goto error; 3870 break; 3871 case BPF_MAP_TYPE_CGROUP_ARRAY: 3872 if (func_id != BPF_FUNC_skb_under_cgroup && 3873 func_id != BPF_FUNC_current_task_under_cgroup) 3874 goto error; 3875 break; 3876 case BPF_MAP_TYPE_CGROUP_STORAGE: 3877 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 3878 if (func_id != BPF_FUNC_get_local_storage) 3879 goto error; 3880 break; 3881 case BPF_MAP_TYPE_DEVMAP: 3882 case BPF_MAP_TYPE_DEVMAP_HASH: 3883 if (func_id != BPF_FUNC_redirect_map && 3884 func_id != BPF_FUNC_map_lookup_elem) 3885 goto error; 3886 break; 3887 /* Restrict bpf side of cpumap and xskmap, open when use-cases 3888 * appear. 3889 */ 3890 case BPF_MAP_TYPE_CPUMAP: 3891 if (func_id != BPF_FUNC_redirect_map) 3892 goto error; 3893 break; 3894 case BPF_MAP_TYPE_XSKMAP: 3895 if (func_id != BPF_FUNC_redirect_map && 3896 func_id != BPF_FUNC_map_lookup_elem) 3897 goto error; 3898 break; 3899 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 3900 case BPF_MAP_TYPE_HASH_OF_MAPS: 3901 if (func_id != BPF_FUNC_map_lookup_elem) 3902 goto error; 3903 break; 3904 case BPF_MAP_TYPE_SOCKMAP: 3905 if (func_id != BPF_FUNC_sk_redirect_map && 3906 func_id != BPF_FUNC_sock_map_update && 3907 func_id != BPF_FUNC_map_delete_elem && 3908 func_id != BPF_FUNC_msg_redirect_map && 3909 func_id != BPF_FUNC_sk_select_reuseport) 3910 goto error; 3911 break; 3912 case BPF_MAP_TYPE_SOCKHASH: 3913 if (func_id != BPF_FUNC_sk_redirect_hash && 3914 func_id != BPF_FUNC_sock_hash_update && 3915 func_id != BPF_FUNC_map_delete_elem && 3916 func_id != BPF_FUNC_msg_redirect_hash && 3917 func_id != BPF_FUNC_sk_select_reuseport) 3918 goto error; 3919 break; 3920 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 3921 if (func_id != BPF_FUNC_sk_select_reuseport) 3922 goto error; 3923 break; 3924 case BPF_MAP_TYPE_QUEUE: 3925 case BPF_MAP_TYPE_STACK: 3926 if (func_id != BPF_FUNC_map_peek_elem && 3927 func_id != BPF_FUNC_map_pop_elem && 3928 func_id != BPF_FUNC_map_push_elem) 3929 goto error; 3930 break; 3931 case BPF_MAP_TYPE_SK_STORAGE: 3932 if (func_id != BPF_FUNC_sk_storage_get && 3933 func_id != BPF_FUNC_sk_storage_delete) 3934 goto error; 3935 break; 3936 default: 3937 break; 3938 } 3939 3940 /* ... and second from the function itself. */ 3941 switch (func_id) { 3942 case BPF_FUNC_tail_call: 3943 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 3944 goto error; 3945 if (env->subprog_cnt > 1) { 3946 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); 3947 return -EINVAL; 3948 } 3949 break; 3950 case BPF_FUNC_perf_event_read: 3951 case BPF_FUNC_perf_event_output: 3952 case BPF_FUNC_perf_event_read_value: 3953 case BPF_FUNC_skb_output: 3954 case BPF_FUNC_xdp_output: 3955 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 3956 goto error; 3957 break; 3958 case BPF_FUNC_get_stackid: 3959 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 3960 goto error; 3961 break; 3962 case BPF_FUNC_current_task_under_cgroup: 3963 case BPF_FUNC_skb_under_cgroup: 3964 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 3965 goto error; 3966 break; 3967 case BPF_FUNC_redirect_map: 3968 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 3969 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 3970 map->map_type != BPF_MAP_TYPE_CPUMAP && 3971 map->map_type != BPF_MAP_TYPE_XSKMAP) 3972 goto error; 3973 break; 3974 case BPF_FUNC_sk_redirect_map: 3975 case BPF_FUNC_msg_redirect_map: 3976 case BPF_FUNC_sock_map_update: 3977 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 3978 goto error; 3979 break; 3980 case BPF_FUNC_sk_redirect_hash: 3981 case BPF_FUNC_msg_redirect_hash: 3982 case BPF_FUNC_sock_hash_update: 3983 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 3984 goto error; 3985 break; 3986 case BPF_FUNC_get_local_storage: 3987 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 3988 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 3989 goto error; 3990 break; 3991 case BPF_FUNC_sk_select_reuseport: 3992 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 3993 map->map_type != BPF_MAP_TYPE_SOCKMAP && 3994 map->map_type != BPF_MAP_TYPE_SOCKHASH) 3995 goto error; 3996 break; 3997 case BPF_FUNC_map_peek_elem: 3998 case BPF_FUNC_map_pop_elem: 3999 case BPF_FUNC_map_push_elem: 4000 if (map->map_type != BPF_MAP_TYPE_QUEUE && 4001 map->map_type != BPF_MAP_TYPE_STACK) 4002 goto error; 4003 break; 4004 case BPF_FUNC_sk_storage_get: 4005 case BPF_FUNC_sk_storage_delete: 4006 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 4007 goto error; 4008 break; 4009 default: 4010 break; 4011 } 4012 4013 return 0; 4014 error: 4015 verbose(env, "cannot pass map_type %d into func %s#%d\n", 4016 map->map_type, func_id_name(func_id), func_id); 4017 return -EINVAL; 4018 } 4019 4020 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 4021 { 4022 int count = 0; 4023 4024 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 4025 count++; 4026 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 4027 count++; 4028 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 4029 count++; 4030 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 4031 count++; 4032 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 4033 count++; 4034 4035 /* We only support one arg being in raw mode at the moment, 4036 * which is sufficient for the helper functions we have 4037 * right now. 4038 */ 4039 return count <= 1; 4040 } 4041 4042 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 4043 enum bpf_arg_type arg_next) 4044 { 4045 return (arg_type_is_mem_ptr(arg_curr) && 4046 !arg_type_is_mem_size(arg_next)) || 4047 (!arg_type_is_mem_ptr(arg_curr) && 4048 arg_type_is_mem_size(arg_next)); 4049 } 4050 4051 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 4052 { 4053 /* bpf_xxx(..., buf, len) call will access 'len' 4054 * bytes from memory 'buf'. Both arg types need 4055 * to be paired, so make sure there's no buggy 4056 * helper function specification. 4057 */ 4058 if (arg_type_is_mem_size(fn->arg1_type) || 4059 arg_type_is_mem_ptr(fn->arg5_type) || 4060 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 4061 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 4062 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 4063 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 4064 return false; 4065 4066 return true; 4067 } 4068 4069 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) 4070 { 4071 int count = 0; 4072 4073 if (arg_type_may_be_refcounted(fn->arg1_type)) 4074 count++; 4075 if (arg_type_may_be_refcounted(fn->arg2_type)) 4076 count++; 4077 if (arg_type_may_be_refcounted(fn->arg3_type)) 4078 count++; 4079 if (arg_type_may_be_refcounted(fn->arg4_type)) 4080 count++; 4081 if (arg_type_may_be_refcounted(fn->arg5_type)) 4082 count++; 4083 4084 /* A reference acquiring function cannot acquire 4085 * another refcounted ptr. 4086 */ 4087 if (is_acquire_function(func_id) && count) 4088 return false; 4089 4090 /* We only support one arg being unreferenced at the moment, 4091 * which is sufficient for the helper functions we have right now. 4092 */ 4093 return count <= 1; 4094 } 4095 4096 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 4097 { 4098 return check_raw_mode_ok(fn) && 4099 check_arg_pair_ok(fn) && 4100 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; 4101 } 4102 4103 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 4104 * are now invalid, so turn them into unknown SCALAR_VALUE. 4105 */ 4106 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 4107 struct bpf_func_state *state) 4108 { 4109 struct bpf_reg_state *regs = state->regs, *reg; 4110 int i; 4111 4112 for (i = 0; i < MAX_BPF_REG; i++) 4113 if (reg_is_pkt_pointer_any(®s[i])) 4114 mark_reg_unknown(env, regs, i); 4115 4116 bpf_for_each_spilled_reg(i, state, reg) { 4117 if (!reg) 4118 continue; 4119 if (reg_is_pkt_pointer_any(reg)) 4120 __mark_reg_unknown(env, reg); 4121 } 4122 } 4123 4124 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 4125 { 4126 struct bpf_verifier_state *vstate = env->cur_state; 4127 int i; 4128 4129 for (i = 0; i <= vstate->curframe; i++) 4130 __clear_all_pkt_pointers(env, vstate->frame[i]); 4131 } 4132 4133 static void release_reg_references(struct bpf_verifier_env *env, 4134 struct bpf_func_state *state, 4135 int ref_obj_id) 4136 { 4137 struct bpf_reg_state *regs = state->regs, *reg; 4138 int i; 4139 4140 for (i = 0; i < MAX_BPF_REG; i++) 4141 if (regs[i].ref_obj_id == ref_obj_id) 4142 mark_reg_unknown(env, regs, i); 4143 4144 bpf_for_each_spilled_reg(i, state, reg) { 4145 if (!reg) 4146 continue; 4147 if (reg->ref_obj_id == ref_obj_id) 4148 __mark_reg_unknown(env, reg); 4149 } 4150 } 4151 4152 /* The pointer with the specified id has released its reference to kernel 4153 * resources. Identify all copies of the same pointer and clear the reference. 4154 */ 4155 static int release_reference(struct bpf_verifier_env *env, 4156 int ref_obj_id) 4157 { 4158 struct bpf_verifier_state *vstate = env->cur_state; 4159 int err; 4160 int i; 4161 4162 err = release_reference_state(cur_func(env), ref_obj_id); 4163 if (err) 4164 return err; 4165 4166 for (i = 0; i <= vstate->curframe; i++) 4167 release_reg_references(env, vstate->frame[i], ref_obj_id); 4168 4169 return 0; 4170 } 4171 4172 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 4173 struct bpf_reg_state *regs) 4174 { 4175 int i; 4176 4177 /* after the call registers r0 - r5 were scratched */ 4178 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4179 mark_reg_not_init(env, regs, caller_saved[i]); 4180 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4181 } 4182 } 4183 4184 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 4185 int *insn_idx) 4186 { 4187 struct bpf_verifier_state *state = env->cur_state; 4188 struct bpf_func_info_aux *func_info_aux; 4189 struct bpf_func_state *caller, *callee; 4190 int i, err, subprog, target_insn; 4191 bool is_global = false; 4192 4193 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 4194 verbose(env, "the call stack of %d frames is too deep\n", 4195 state->curframe + 2); 4196 return -E2BIG; 4197 } 4198 4199 target_insn = *insn_idx + insn->imm; 4200 subprog = find_subprog(env, target_insn + 1); 4201 if (subprog < 0) { 4202 verbose(env, "verifier bug. No program starts at insn %d\n", 4203 target_insn + 1); 4204 return -EFAULT; 4205 } 4206 4207 caller = state->frame[state->curframe]; 4208 if (state->frame[state->curframe + 1]) { 4209 verbose(env, "verifier bug. Frame %d already allocated\n", 4210 state->curframe + 1); 4211 return -EFAULT; 4212 } 4213 4214 func_info_aux = env->prog->aux->func_info_aux; 4215 if (func_info_aux) 4216 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 4217 err = btf_check_func_arg_match(env, subprog, caller->regs); 4218 if (err == -EFAULT) 4219 return err; 4220 if (is_global) { 4221 if (err) { 4222 verbose(env, "Caller passes invalid args into func#%d\n", 4223 subprog); 4224 return err; 4225 } else { 4226 if (env->log.level & BPF_LOG_LEVEL) 4227 verbose(env, 4228 "Func#%d is global and valid. Skipping.\n", 4229 subprog); 4230 clear_caller_saved_regs(env, caller->regs); 4231 4232 /* All global functions return SCALAR_VALUE */ 4233 mark_reg_unknown(env, caller->regs, BPF_REG_0); 4234 4235 /* continue with next insn after call */ 4236 return 0; 4237 } 4238 } 4239 4240 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 4241 if (!callee) 4242 return -ENOMEM; 4243 state->frame[state->curframe + 1] = callee; 4244 4245 /* callee cannot access r0, r6 - r9 for reading and has to write 4246 * into its own stack before reading from it. 4247 * callee can read/write into caller's stack 4248 */ 4249 init_func_state(env, callee, 4250 /* remember the callsite, it will be used by bpf_exit */ 4251 *insn_idx /* callsite */, 4252 state->curframe + 1 /* frameno within this callchain */, 4253 subprog /* subprog number within this prog */); 4254 4255 /* Transfer references to the callee */ 4256 err = transfer_reference_state(callee, caller); 4257 if (err) 4258 return err; 4259 4260 /* copy r1 - r5 args that callee can access. The copy includes parent 4261 * pointers, which connects us up to the liveness chain 4262 */ 4263 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 4264 callee->regs[i] = caller->regs[i]; 4265 4266 clear_caller_saved_regs(env, caller->regs); 4267 4268 /* only increment it after check_reg_arg() finished */ 4269 state->curframe++; 4270 4271 /* and go analyze first insn of the callee */ 4272 *insn_idx = target_insn; 4273 4274 if (env->log.level & BPF_LOG_LEVEL) { 4275 verbose(env, "caller:\n"); 4276 print_verifier_state(env, caller); 4277 verbose(env, "callee:\n"); 4278 print_verifier_state(env, callee); 4279 } 4280 return 0; 4281 } 4282 4283 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 4284 { 4285 struct bpf_verifier_state *state = env->cur_state; 4286 struct bpf_func_state *caller, *callee; 4287 struct bpf_reg_state *r0; 4288 int err; 4289 4290 callee = state->frame[state->curframe]; 4291 r0 = &callee->regs[BPF_REG_0]; 4292 if (r0->type == PTR_TO_STACK) { 4293 /* technically it's ok to return caller's stack pointer 4294 * (or caller's caller's pointer) back to the caller, 4295 * since these pointers are valid. Only current stack 4296 * pointer will be invalid as soon as function exits, 4297 * but let's be conservative 4298 */ 4299 verbose(env, "cannot return stack pointer to the caller\n"); 4300 return -EINVAL; 4301 } 4302 4303 state->curframe--; 4304 caller = state->frame[state->curframe]; 4305 /* return to the caller whatever r0 had in the callee */ 4306 caller->regs[BPF_REG_0] = *r0; 4307 4308 /* Transfer references to the caller */ 4309 err = transfer_reference_state(caller, callee); 4310 if (err) 4311 return err; 4312 4313 *insn_idx = callee->callsite + 1; 4314 if (env->log.level & BPF_LOG_LEVEL) { 4315 verbose(env, "returning from callee:\n"); 4316 print_verifier_state(env, callee); 4317 verbose(env, "to caller at %d:\n", *insn_idx); 4318 print_verifier_state(env, caller); 4319 } 4320 /* clear everything in the callee */ 4321 free_func_state(callee); 4322 state->frame[state->curframe + 1] = NULL; 4323 return 0; 4324 } 4325 4326 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 4327 int func_id, 4328 struct bpf_call_arg_meta *meta) 4329 { 4330 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 4331 4332 if (ret_type != RET_INTEGER || 4333 (func_id != BPF_FUNC_get_stack && 4334 func_id != BPF_FUNC_probe_read_str)) 4335 return; 4336 4337 ret_reg->smax_value = meta->msize_max_value; 4338 ret_reg->s32_max_value = meta->msize_max_value; 4339 __reg_deduce_bounds(ret_reg); 4340 __reg_bound_offset(ret_reg); 4341 __update_reg_bounds(ret_reg); 4342 } 4343 4344 static int 4345 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 4346 int func_id, int insn_idx) 4347 { 4348 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 4349 struct bpf_map *map = meta->map_ptr; 4350 4351 if (func_id != BPF_FUNC_tail_call && 4352 func_id != BPF_FUNC_map_lookup_elem && 4353 func_id != BPF_FUNC_map_update_elem && 4354 func_id != BPF_FUNC_map_delete_elem && 4355 func_id != BPF_FUNC_map_push_elem && 4356 func_id != BPF_FUNC_map_pop_elem && 4357 func_id != BPF_FUNC_map_peek_elem) 4358 return 0; 4359 4360 if (map == NULL) { 4361 verbose(env, "kernel subsystem misconfigured verifier\n"); 4362 return -EINVAL; 4363 } 4364 4365 /* In case of read-only, some additional restrictions 4366 * need to be applied in order to prevent altering the 4367 * state of the map from program side. 4368 */ 4369 if ((map->map_flags & BPF_F_RDONLY_PROG) && 4370 (func_id == BPF_FUNC_map_delete_elem || 4371 func_id == BPF_FUNC_map_update_elem || 4372 func_id == BPF_FUNC_map_push_elem || 4373 func_id == BPF_FUNC_map_pop_elem)) { 4374 verbose(env, "write into map forbidden\n"); 4375 return -EACCES; 4376 } 4377 4378 if (!BPF_MAP_PTR(aux->map_ptr_state)) 4379 bpf_map_ptr_store(aux, meta->map_ptr, 4380 meta->map_ptr->unpriv_array); 4381 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 4382 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 4383 meta->map_ptr->unpriv_array); 4384 return 0; 4385 } 4386 4387 static int 4388 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 4389 int func_id, int insn_idx) 4390 { 4391 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 4392 struct bpf_reg_state *regs = cur_regs(env), *reg; 4393 struct bpf_map *map = meta->map_ptr; 4394 struct tnum range; 4395 u64 val; 4396 int err; 4397 4398 if (func_id != BPF_FUNC_tail_call) 4399 return 0; 4400 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 4401 verbose(env, "kernel subsystem misconfigured verifier\n"); 4402 return -EINVAL; 4403 } 4404 4405 range = tnum_range(0, map->max_entries - 1); 4406 reg = ®s[BPF_REG_3]; 4407 4408 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { 4409 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 4410 return 0; 4411 } 4412 4413 err = mark_chain_precision(env, BPF_REG_3); 4414 if (err) 4415 return err; 4416 4417 val = reg->var_off.value; 4418 if (bpf_map_key_unseen(aux)) 4419 bpf_map_key_store(aux, val); 4420 else if (!bpf_map_key_poisoned(aux) && 4421 bpf_map_key_immediate(aux) != val) 4422 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 4423 return 0; 4424 } 4425 4426 static int check_reference_leak(struct bpf_verifier_env *env) 4427 { 4428 struct bpf_func_state *state = cur_func(env); 4429 int i; 4430 4431 for (i = 0; i < state->acquired_refs; i++) { 4432 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 4433 state->refs[i].id, state->refs[i].insn_idx); 4434 } 4435 return state->acquired_refs ? -EINVAL : 0; 4436 } 4437 4438 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 4439 { 4440 const struct bpf_func_proto *fn = NULL; 4441 struct bpf_reg_state *regs; 4442 struct bpf_call_arg_meta meta; 4443 bool changes_data; 4444 int i, err; 4445 4446 /* find function prototype */ 4447 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 4448 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 4449 func_id); 4450 return -EINVAL; 4451 } 4452 4453 if (env->ops->get_func_proto) 4454 fn = env->ops->get_func_proto(func_id, env->prog); 4455 if (!fn) { 4456 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 4457 func_id); 4458 return -EINVAL; 4459 } 4460 4461 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 4462 if (!env->prog->gpl_compatible && fn->gpl_only) { 4463 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 4464 return -EINVAL; 4465 } 4466 4467 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 4468 changes_data = bpf_helper_changes_pkt_data(fn->func); 4469 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 4470 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 4471 func_id_name(func_id), func_id); 4472 return -EINVAL; 4473 } 4474 4475 memset(&meta, 0, sizeof(meta)); 4476 meta.pkt_access = fn->pkt_access; 4477 4478 err = check_func_proto(fn, func_id); 4479 if (err) { 4480 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 4481 func_id_name(func_id), func_id); 4482 return err; 4483 } 4484 4485 meta.func_id = func_id; 4486 /* check args */ 4487 for (i = 0; i < 5; i++) { 4488 err = btf_resolve_helper_id(&env->log, fn, i); 4489 if (err > 0) 4490 meta.btf_id = err; 4491 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta); 4492 if (err) 4493 return err; 4494 } 4495 4496 err = record_func_map(env, &meta, func_id, insn_idx); 4497 if (err) 4498 return err; 4499 4500 err = record_func_key(env, &meta, func_id, insn_idx); 4501 if (err) 4502 return err; 4503 4504 /* Mark slots with STACK_MISC in case of raw mode, stack offset 4505 * is inferred from register state. 4506 */ 4507 for (i = 0; i < meta.access_size; i++) { 4508 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 4509 BPF_WRITE, -1, false); 4510 if (err) 4511 return err; 4512 } 4513 4514 if (func_id == BPF_FUNC_tail_call) { 4515 err = check_reference_leak(env); 4516 if (err) { 4517 verbose(env, "tail_call would lead to reference leak\n"); 4518 return err; 4519 } 4520 } else if (is_release_function(func_id)) { 4521 err = release_reference(env, meta.ref_obj_id); 4522 if (err) { 4523 verbose(env, "func %s#%d reference has not been acquired before\n", 4524 func_id_name(func_id), func_id); 4525 return err; 4526 } 4527 } 4528 4529 regs = cur_regs(env); 4530 4531 /* check that flags argument in get_local_storage(map, flags) is 0, 4532 * this is required because get_local_storage() can't return an error. 4533 */ 4534 if (func_id == BPF_FUNC_get_local_storage && 4535 !register_is_null(®s[BPF_REG_2])) { 4536 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 4537 return -EINVAL; 4538 } 4539 4540 /* reset caller saved regs */ 4541 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4542 mark_reg_not_init(env, regs, caller_saved[i]); 4543 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4544 } 4545 4546 /* helper call returns 64-bit value. */ 4547 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 4548 4549 /* update return register (already marked as written above) */ 4550 if (fn->ret_type == RET_INTEGER) { 4551 /* sets type to SCALAR_VALUE */ 4552 mark_reg_unknown(env, regs, BPF_REG_0); 4553 } else if (fn->ret_type == RET_VOID) { 4554 regs[BPF_REG_0].type = NOT_INIT; 4555 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || 4556 fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4557 /* There is no offset yet applied, variable or fixed */ 4558 mark_reg_known_zero(env, regs, BPF_REG_0); 4559 /* remember map_ptr, so that check_map_access() 4560 * can check 'value_size' boundary of memory access 4561 * to map element returned from bpf_map_lookup_elem() 4562 */ 4563 if (meta.map_ptr == NULL) { 4564 verbose(env, 4565 "kernel subsystem misconfigured verifier\n"); 4566 return -EINVAL; 4567 } 4568 regs[BPF_REG_0].map_ptr = meta.map_ptr; 4569 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4570 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; 4571 if (map_value_has_spin_lock(meta.map_ptr)) 4572 regs[BPF_REG_0].id = ++env->id_gen; 4573 } else { 4574 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 4575 regs[BPF_REG_0].id = ++env->id_gen; 4576 } 4577 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 4578 mark_reg_known_zero(env, regs, BPF_REG_0); 4579 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 4580 regs[BPF_REG_0].id = ++env->id_gen; 4581 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { 4582 mark_reg_known_zero(env, regs, BPF_REG_0); 4583 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; 4584 regs[BPF_REG_0].id = ++env->id_gen; 4585 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { 4586 mark_reg_known_zero(env, regs, BPF_REG_0); 4587 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; 4588 regs[BPF_REG_0].id = ++env->id_gen; 4589 } else { 4590 verbose(env, "unknown return type %d of func %s#%d\n", 4591 fn->ret_type, func_id_name(func_id), func_id); 4592 return -EINVAL; 4593 } 4594 4595 if (is_ptr_cast_function(func_id)) { 4596 /* For release_reference() */ 4597 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 4598 } else if (is_acquire_function(func_id)) { 4599 int id = acquire_reference_state(env, insn_idx); 4600 4601 if (id < 0) 4602 return id; 4603 /* For mark_ptr_or_null_reg() */ 4604 regs[BPF_REG_0].id = id; 4605 /* For release_reference() */ 4606 regs[BPF_REG_0].ref_obj_id = id; 4607 } 4608 4609 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 4610 4611 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 4612 if (err) 4613 return err; 4614 4615 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { 4616 const char *err_str; 4617 4618 #ifdef CONFIG_PERF_EVENTS 4619 err = get_callchain_buffers(sysctl_perf_event_max_stack); 4620 err_str = "cannot get callchain buffer for func %s#%d\n"; 4621 #else 4622 err = -ENOTSUPP; 4623 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 4624 #endif 4625 if (err) { 4626 verbose(env, err_str, func_id_name(func_id), func_id); 4627 return err; 4628 } 4629 4630 env->prog->has_callchain_buf = true; 4631 } 4632 4633 if (changes_data) 4634 clear_all_pkt_pointers(env); 4635 return 0; 4636 } 4637 4638 static bool signed_add_overflows(s64 a, s64 b) 4639 { 4640 /* Do the add in u64, where overflow is well-defined */ 4641 s64 res = (s64)((u64)a + (u64)b); 4642 4643 if (b < 0) 4644 return res > a; 4645 return res < a; 4646 } 4647 4648 static bool signed_add32_overflows(s64 a, s64 b) 4649 { 4650 /* Do the add in u32, where overflow is well-defined */ 4651 s32 res = (s32)((u32)a + (u32)b); 4652 4653 if (b < 0) 4654 return res > a; 4655 return res < a; 4656 } 4657 4658 static bool signed_sub_overflows(s32 a, s32 b) 4659 { 4660 /* Do the sub in u64, where overflow is well-defined */ 4661 s64 res = (s64)((u64)a - (u64)b); 4662 4663 if (b < 0) 4664 return res < a; 4665 return res > a; 4666 } 4667 4668 static bool signed_sub32_overflows(s32 a, s32 b) 4669 { 4670 /* Do the sub in u64, where overflow is well-defined */ 4671 s32 res = (s32)((u32)a - (u32)b); 4672 4673 if (b < 0) 4674 return res < a; 4675 return res > a; 4676 } 4677 4678 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 4679 const struct bpf_reg_state *reg, 4680 enum bpf_reg_type type) 4681 { 4682 bool known = tnum_is_const(reg->var_off); 4683 s64 val = reg->var_off.value; 4684 s64 smin = reg->smin_value; 4685 4686 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 4687 verbose(env, "math between %s pointer and %lld is not allowed\n", 4688 reg_type_str[type], val); 4689 return false; 4690 } 4691 4692 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 4693 verbose(env, "%s pointer offset %d is not allowed\n", 4694 reg_type_str[type], reg->off); 4695 return false; 4696 } 4697 4698 if (smin == S64_MIN) { 4699 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 4700 reg_type_str[type]); 4701 return false; 4702 } 4703 4704 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 4705 verbose(env, "value %lld makes %s pointer be out of bounds\n", 4706 smin, reg_type_str[type]); 4707 return false; 4708 } 4709 4710 return true; 4711 } 4712 4713 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 4714 { 4715 return &env->insn_aux_data[env->insn_idx]; 4716 } 4717 4718 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 4719 u32 *ptr_limit, u8 opcode, bool off_is_neg) 4720 { 4721 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || 4722 (opcode == BPF_SUB && !off_is_neg); 4723 u32 off; 4724 4725 switch (ptr_reg->type) { 4726 case PTR_TO_STACK: 4727 /* Indirect variable offset stack access is prohibited in 4728 * unprivileged mode so it's not handled here. 4729 */ 4730 off = ptr_reg->off + ptr_reg->var_off.value; 4731 if (mask_to_left) 4732 *ptr_limit = MAX_BPF_STACK + off; 4733 else 4734 *ptr_limit = -off; 4735 return 0; 4736 case PTR_TO_MAP_VALUE: 4737 if (mask_to_left) { 4738 *ptr_limit = ptr_reg->umax_value + ptr_reg->off; 4739 } else { 4740 off = ptr_reg->smin_value + ptr_reg->off; 4741 *ptr_limit = ptr_reg->map_ptr->value_size - off; 4742 } 4743 return 0; 4744 default: 4745 return -EINVAL; 4746 } 4747 } 4748 4749 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 4750 const struct bpf_insn *insn) 4751 { 4752 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; 4753 } 4754 4755 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 4756 u32 alu_state, u32 alu_limit) 4757 { 4758 /* If we arrived here from different branches with different 4759 * state or limits to sanitize, then this won't work. 4760 */ 4761 if (aux->alu_state && 4762 (aux->alu_state != alu_state || 4763 aux->alu_limit != alu_limit)) 4764 return -EACCES; 4765 4766 /* Corresponding fixup done in fixup_bpf_calls(). */ 4767 aux->alu_state = alu_state; 4768 aux->alu_limit = alu_limit; 4769 return 0; 4770 } 4771 4772 static int sanitize_val_alu(struct bpf_verifier_env *env, 4773 struct bpf_insn *insn) 4774 { 4775 struct bpf_insn_aux_data *aux = cur_aux(env); 4776 4777 if (can_skip_alu_sanitation(env, insn)) 4778 return 0; 4779 4780 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 4781 } 4782 4783 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 4784 struct bpf_insn *insn, 4785 const struct bpf_reg_state *ptr_reg, 4786 struct bpf_reg_state *dst_reg, 4787 bool off_is_neg) 4788 { 4789 struct bpf_verifier_state *vstate = env->cur_state; 4790 struct bpf_insn_aux_data *aux = cur_aux(env); 4791 bool ptr_is_dst_reg = ptr_reg == dst_reg; 4792 u8 opcode = BPF_OP(insn->code); 4793 u32 alu_state, alu_limit; 4794 struct bpf_reg_state tmp; 4795 bool ret; 4796 4797 if (can_skip_alu_sanitation(env, insn)) 4798 return 0; 4799 4800 /* We already marked aux for masking from non-speculative 4801 * paths, thus we got here in the first place. We only care 4802 * to explore bad access from here. 4803 */ 4804 if (vstate->speculative) 4805 goto do_sim; 4806 4807 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 4808 alu_state |= ptr_is_dst_reg ? 4809 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 4810 4811 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 4812 return 0; 4813 if (update_alu_sanitation_state(aux, alu_state, alu_limit)) 4814 return -EACCES; 4815 do_sim: 4816 /* Simulate and find potential out-of-bounds access under 4817 * speculative execution from truncation as a result of 4818 * masking when off was not within expected range. If off 4819 * sits in dst, then we temporarily need to move ptr there 4820 * to simulate dst (== 0) +/-= ptr. Needed, for example, 4821 * for cases where we use K-based arithmetic in one direction 4822 * and truncated reg-based in the other in order to explore 4823 * bad access. 4824 */ 4825 if (!ptr_is_dst_reg) { 4826 tmp = *dst_reg; 4827 *dst_reg = *ptr_reg; 4828 } 4829 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); 4830 if (!ptr_is_dst_reg && ret) 4831 *dst_reg = tmp; 4832 return !ret ? -EFAULT : 0; 4833 } 4834 4835 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 4836 * Caller should also handle BPF_MOV case separately. 4837 * If we return -EACCES, caller may want to try again treating pointer as a 4838 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 4839 */ 4840 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 4841 struct bpf_insn *insn, 4842 const struct bpf_reg_state *ptr_reg, 4843 const struct bpf_reg_state *off_reg) 4844 { 4845 struct bpf_verifier_state *vstate = env->cur_state; 4846 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4847 struct bpf_reg_state *regs = state->regs, *dst_reg; 4848 bool known = tnum_is_const(off_reg->var_off); 4849 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 4850 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 4851 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 4852 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 4853 u32 dst = insn->dst_reg, src = insn->src_reg; 4854 u8 opcode = BPF_OP(insn->code); 4855 int ret; 4856 4857 dst_reg = ®s[dst]; 4858 4859 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 4860 smin_val > smax_val || umin_val > umax_val) { 4861 /* Taint dst register if offset had invalid bounds derived from 4862 * e.g. dead branches. 4863 */ 4864 __mark_reg_unknown(env, dst_reg); 4865 return 0; 4866 } 4867 4868 if (BPF_CLASS(insn->code) != BPF_ALU64) { 4869 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 4870 verbose(env, 4871 "R%d 32-bit pointer arithmetic prohibited\n", 4872 dst); 4873 return -EACCES; 4874 } 4875 4876 switch (ptr_reg->type) { 4877 case PTR_TO_MAP_VALUE_OR_NULL: 4878 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 4879 dst, reg_type_str[ptr_reg->type]); 4880 return -EACCES; 4881 case CONST_PTR_TO_MAP: 4882 case PTR_TO_PACKET_END: 4883 case PTR_TO_SOCKET: 4884 case PTR_TO_SOCKET_OR_NULL: 4885 case PTR_TO_SOCK_COMMON: 4886 case PTR_TO_SOCK_COMMON_OR_NULL: 4887 case PTR_TO_TCP_SOCK: 4888 case PTR_TO_TCP_SOCK_OR_NULL: 4889 case PTR_TO_XDP_SOCK: 4890 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 4891 dst, reg_type_str[ptr_reg->type]); 4892 return -EACCES; 4893 case PTR_TO_MAP_VALUE: 4894 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { 4895 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", 4896 off_reg == dst_reg ? dst : src); 4897 return -EACCES; 4898 } 4899 /* fall-through */ 4900 default: 4901 break; 4902 } 4903 4904 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 4905 * The id may be overwritten later if we create a new variable offset. 4906 */ 4907 dst_reg->type = ptr_reg->type; 4908 dst_reg->id = ptr_reg->id; 4909 4910 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 4911 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 4912 return -EINVAL; 4913 4914 /* pointer types do not carry 32-bit bounds at the moment. */ 4915 __mark_reg32_unbounded(dst_reg); 4916 4917 switch (opcode) { 4918 case BPF_ADD: 4919 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 4920 if (ret < 0) { 4921 verbose(env, "R%d tried to add from different maps or paths\n", dst); 4922 return ret; 4923 } 4924 /* We can take a fixed offset as long as it doesn't overflow 4925 * the s32 'off' field 4926 */ 4927 if (known && (ptr_reg->off + smin_val == 4928 (s64)(s32)(ptr_reg->off + smin_val))) { 4929 /* pointer += K. Accumulate it into fixed offset */ 4930 dst_reg->smin_value = smin_ptr; 4931 dst_reg->smax_value = smax_ptr; 4932 dst_reg->umin_value = umin_ptr; 4933 dst_reg->umax_value = umax_ptr; 4934 dst_reg->var_off = ptr_reg->var_off; 4935 dst_reg->off = ptr_reg->off + smin_val; 4936 dst_reg->raw = ptr_reg->raw; 4937 break; 4938 } 4939 /* A new variable offset is created. Note that off_reg->off 4940 * == 0, since it's a scalar. 4941 * dst_reg gets the pointer type and since some positive 4942 * integer value was added to the pointer, give it a new 'id' 4943 * if it's a PTR_TO_PACKET. 4944 * this creates a new 'base' pointer, off_reg (variable) gets 4945 * added into the variable offset, and we copy the fixed offset 4946 * from ptr_reg. 4947 */ 4948 if (signed_add_overflows(smin_ptr, smin_val) || 4949 signed_add_overflows(smax_ptr, smax_val)) { 4950 dst_reg->smin_value = S64_MIN; 4951 dst_reg->smax_value = S64_MAX; 4952 } else { 4953 dst_reg->smin_value = smin_ptr + smin_val; 4954 dst_reg->smax_value = smax_ptr + smax_val; 4955 } 4956 if (umin_ptr + umin_val < umin_ptr || 4957 umax_ptr + umax_val < umax_ptr) { 4958 dst_reg->umin_value = 0; 4959 dst_reg->umax_value = U64_MAX; 4960 } else { 4961 dst_reg->umin_value = umin_ptr + umin_val; 4962 dst_reg->umax_value = umax_ptr + umax_val; 4963 } 4964 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 4965 dst_reg->off = ptr_reg->off; 4966 dst_reg->raw = ptr_reg->raw; 4967 if (reg_is_pkt_pointer(ptr_reg)) { 4968 dst_reg->id = ++env->id_gen; 4969 /* something was added to pkt_ptr, set range to zero */ 4970 dst_reg->raw = 0; 4971 } 4972 break; 4973 case BPF_SUB: 4974 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 4975 if (ret < 0) { 4976 verbose(env, "R%d tried to sub from different maps or paths\n", dst); 4977 return ret; 4978 } 4979 if (dst_reg == off_reg) { 4980 /* scalar -= pointer. Creates an unknown scalar */ 4981 verbose(env, "R%d tried to subtract pointer from scalar\n", 4982 dst); 4983 return -EACCES; 4984 } 4985 /* We don't allow subtraction from FP, because (according to 4986 * test_verifier.c test "invalid fp arithmetic", JITs might not 4987 * be able to deal with it. 4988 */ 4989 if (ptr_reg->type == PTR_TO_STACK) { 4990 verbose(env, "R%d subtraction from stack pointer prohibited\n", 4991 dst); 4992 return -EACCES; 4993 } 4994 if (known && (ptr_reg->off - smin_val == 4995 (s64)(s32)(ptr_reg->off - smin_val))) { 4996 /* pointer -= K. Subtract it from fixed offset */ 4997 dst_reg->smin_value = smin_ptr; 4998 dst_reg->smax_value = smax_ptr; 4999 dst_reg->umin_value = umin_ptr; 5000 dst_reg->umax_value = umax_ptr; 5001 dst_reg->var_off = ptr_reg->var_off; 5002 dst_reg->id = ptr_reg->id; 5003 dst_reg->off = ptr_reg->off - smin_val; 5004 dst_reg->raw = ptr_reg->raw; 5005 break; 5006 } 5007 /* A new variable offset is created. If the subtrahend is known 5008 * nonnegative, then any reg->range we had before is still good. 5009 */ 5010 if (signed_sub_overflows(smin_ptr, smax_val) || 5011 signed_sub_overflows(smax_ptr, smin_val)) { 5012 /* Overflow possible, we know nothing */ 5013 dst_reg->smin_value = S64_MIN; 5014 dst_reg->smax_value = S64_MAX; 5015 } else { 5016 dst_reg->smin_value = smin_ptr - smax_val; 5017 dst_reg->smax_value = smax_ptr - smin_val; 5018 } 5019 if (umin_ptr < umax_val) { 5020 /* Overflow possible, we know nothing */ 5021 dst_reg->umin_value = 0; 5022 dst_reg->umax_value = U64_MAX; 5023 } else { 5024 /* Cannot overflow (as long as bounds are consistent) */ 5025 dst_reg->umin_value = umin_ptr - umax_val; 5026 dst_reg->umax_value = umax_ptr - umin_val; 5027 } 5028 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 5029 dst_reg->off = ptr_reg->off; 5030 dst_reg->raw = ptr_reg->raw; 5031 if (reg_is_pkt_pointer(ptr_reg)) { 5032 dst_reg->id = ++env->id_gen; 5033 /* something was added to pkt_ptr, set range to zero */ 5034 if (smin_val < 0) 5035 dst_reg->raw = 0; 5036 } 5037 break; 5038 case BPF_AND: 5039 case BPF_OR: 5040 case BPF_XOR: 5041 /* bitwise ops on pointers are troublesome, prohibit. */ 5042 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 5043 dst, bpf_alu_string[opcode >> 4]); 5044 return -EACCES; 5045 default: 5046 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 5047 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 5048 dst, bpf_alu_string[opcode >> 4]); 5049 return -EACCES; 5050 } 5051 5052 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 5053 return -EINVAL; 5054 5055 __update_reg_bounds(dst_reg); 5056 __reg_deduce_bounds(dst_reg); 5057 __reg_bound_offset(dst_reg); 5058 5059 /* For unprivileged we require that resulting offset must be in bounds 5060 * in order to be able to sanitize access later on. 5061 */ 5062 if (!env->allow_ptr_leaks) { 5063 if (dst_reg->type == PTR_TO_MAP_VALUE && 5064 check_map_access(env, dst, dst_reg->off, 1, false)) { 5065 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 5066 "prohibited for !root\n", dst); 5067 return -EACCES; 5068 } else if (dst_reg->type == PTR_TO_STACK && 5069 check_stack_access(env, dst_reg, dst_reg->off + 5070 dst_reg->var_off.value, 1)) { 5071 verbose(env, "R%d stack pointer arithmetic goes out of range, " 5072 "prohibited for !root\n", dst); 5073 return -EACCES; 5074 } 5075 } 5076 5077 return 0; 5078 } 5079 5080 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 5081 struct bpf_reg_state *src_reg) 5082 { 5083 s32 smin_val = src_reg->s32_min_value; 5084 s32 smax_val = src_reg->s32_max_value; 5085 u32 umin_val = src_reg->u32_min_value; 5086 u32 umax_val = src_reg->u32_max_value; 5087 5088 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 5089 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 5090 dst_reg->s32_min_value = S32_MIN; 5091 dst_reg->s32_max_value = S32_MAX; 5092 } else { 5093 dst_reg->s32_min_value += smin_val; 5094 dst_reg->s32_max_value += smax_val; 5095 } 5096 if (dst_reg->u32_min_value + umin_val < umin_val || 5097 dst_reg->u32_max_value + umax_val < umax_val) { 5098 dst_reg->u32_min_value = 0; 5099 dst_reg->u32_max_value = U32_MAX; 5100 } else { 5101 dst_reg->u32_min_value += umin_val; 5102 dst_reg->u32_max_value += umax_val; 5103 } 5104 } 5105 5106 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 5107 struct bpf_reg_state *src_reg) 5108 { 5109 s64 smin_val = src_reg->smin_value; 5110 s64 smax_val = src_reg->smax_value; 5111 u64 umin_val = src_reg->umin_value; 5112 u64 umax_val = src_reg->umax_value; 5113 5114 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 5115 signed_add_overflows(dst_reg->smax_value, smax_val)) { 5116 dst_reg->smin_value = S64_MIN; 5117 dst_reg->smax_value = S64_MAX; 5118 } else { 5119 dst_reg->smin_value += smin_val; 5120 dst_reg->smax_value += smax_val; 5121 } 5122 if (dst_reg->umin_value + umin_val < umin_val || 5123 dst_reg->umax_value + umax_val < umax_val) { 5124 dst_reg->umin_value = 0; 5125 dst_reg->umax_value = U64_MAX; 5126 } else { 5127 dst_reg->umin_value += umin_val; 5128 dst_reg->umax_value += umax_val; 5129 } 5130 } 5131 5132 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 5133 struct bpf_reg_state *src_reg) 5134 { 5135 s32 smin_val = src_reg->s32_min_value; 5136 s32 smax_val = src_reg->s32_max_value; 5137 u32 umin_val = src_reg->u32_min_value; 5138 u32 umax_val = src_reg->u32_max_value; 5139 5140 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 5141 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 5142 /* Overflow possible, we know nothing */ 5143 dst_reg->s32_min_value = S32_MIN; 5144 dst_reg->s32_max_value = S32_MAX; 5145 } else { 5146 dst_reg->s32_min_value -= smax_val; 5147 dst_reg->s32_max_value -= smin_val; 5148 } 5149 if (dst_reg->u32_min_value < umax_val) { 5150 /* Overflow possible, we know nothing */ 5151 dst_reg->u32_min_value = 0; 5152 dst_reg->u32_max_value = U32_MAX; 5153 } else { 5154 /* Cannot overflow (as long as bounds are consistent) */ 5155 dst_reg->u32_min_value -= umax_val; 5156 dst_reg->u32_max_value -= umin_val; 5157 } 5158 } 5159 5160 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 5161 struct bpf_reg_state *src_reg) 5162 { 5163 s64 smin_val = src_reg->smin_value; 5164 s64 smax_val = src_reg->smax_value; 5165 u64 umin_val = src_reg->umin_value; 5166 u64 umax_val = src_reg->umax_value; 5167 5168 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 5169 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 5170 /* Overflow possible, we know nothing */ 5171 dst_reg->smin_value = S64_MIN; 5172 dst_reg->smax_value = S64_MAX; 5173 } else { 5174 dst_reg->smin_value -= smax_val; 5175 dst_reg->smax_value -= smin_val; 5176 } 5177 if (dst_reg->umin_value < umax_val) { 5178 /* Overflow possible, we know nothing */ 5179 dst_reg->umin_value = 0; 5180 dst_reg->umax_value = U64_MAX; 5181 } else { 5182 /* Cannot overflow (as long as bounds are consistent) */ 5183 dst_reg->umin_value -= umax_val; 5184 dst_reg->umax_value -= umin_val; 5185 } 5186 } 5187 5188 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 5189 struct bpf_reg_state *src_reg) 5190 { 5191 s32 smin_val = src_reg->s32_min_value; 5192 u32 umin_val = src_reg->u32_min_value; 5193 u32 umax_val = src_reg->u32_max_value; 5194 5195 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 5196 /* Ain't nobody got time to multiply that sign */ 5197 __mark_reg32_unbounded(dst_reg); 5198 return; 5199 } 5200 /* Both values are positive, so we can work with unsigned and 5201 * copy the result to signed (unless it exceeds S32_MAX). 5202 */ 5203 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 5204 /* Potential overflow, we know nothing */ 5205 __mark_reg32_unbounded(dst_reg); 5206 return; 5207 } 5208 dst_reg->u32_min_value *= umin_val; 5209 dst_reg->u32_max_value *= umax_val; 5210 if (dst_reg->u32_max_value > S32_MAX) { 5211 /* Overflow possible, we know nothing */ 5212 dst_reg->s32_min_value = S32_MIN; 5213 dst_reg->s32_max_value = S32_MAX; 5214 } else { 5215 dst_reg->s32_min_value = dst_reg->u32_min_value; 5216 dst_reg->s32_max_value = dst_reg->u32_max_value; 5217 } 5218 } 5219 5220 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 5221 struct bpf_reg_state *src_reg) 5222 { 5223 s64 smin_val = src_reg->smin_value; 5224 u64 umin_val = src_reg->umin_value; 5225 u64 umax_val = src_reg->umax_value; 5226 5227 if (smin_val < 0 || dst_reg->smin_value < 0) { 5228 /* Ain't nobody got time to multiply that sign */ 5229 __mark_reg64_unbounded(dst_reg); 5230 return; 5231 } 5232 /* Both values are positive, so we can work with unsigned and 5233 * copy the result to signed (unless it exceeds S64_MAX). 5234 */ 5235 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 5236 /* Potential overflow, we know nothing */ 5237 __mark_reg64_unbounded(dst_reg); 5238 return; 5239 } 5240 dst_reg->umin_value *= umin_val; 5241 dst_reg->umax_value *= umax_val; 5242 if (dst_reg->umax_value > S64_MAX) { 5243 /* Overflow possible, we know nothing */ 5244 dst_reg->smin_value = S64_MIN; 5245 dst_reg->smax_value = S64_MAX; 5246 } else { 5247 dst_reg->smin_value = dst_reg->umin_value; 5248 dst_reg->smax_value = dst_reg->umax_value; 5249 } 5250 } 5251 5252 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 5253 struct bpf_reg_state *src_reg) 5254 { 5255 bool src_known = tnum_subreg_is_const(src_reg->var_off); 5256 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 5257 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 5258 s32 smin_val = src_reg->s32_min_value; 5259 u32 umax_val = src_reg->u32_max_value; 5260 5261 /* Assuming scalar64_min_max_and will be called so its safe 5262 * to skip updating register for known 32-bit case. 5263 */ 5264 if (src_known && dst_known) 5265 return; 5266 5267 /* We get our minimum from the var_off, since that's inherently 5268 * bitwise. Our maximum is the minimum of the operands' maxima. 5269 */ 5270 dst_reg->u32_min_value = var32_off.value; 5271 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 5272 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 5273 /* Lose signed bounds when ANDing negative numbers, 5274 * ain't nobody got time for that. 5275 */ 5276 dst_reg->s32_min_value = S32_MIN; 5277 dst_reg->s32_max_value = S32_MAX; 5278 } else { 5279 /* ANDing two positives gives a positive, so safe to 5280 * cast result into s64. 5281 */ 5282 dst_reg->s32_min_value = dst_reg->u32_min_value; 5283 dst_reg->s32_max_value = dst_reg->u32_max_value; 5284 } 5285 5286 } 5287 5288 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 5289 struct bpf_reg_state *src_reg) 5290 { 5291 bool src_known = tnum_is_const(src_reg->var_off); 5292 bool dst_known = tnum_is_const(dst_reg->var_off); 5293 s64 smin_val = src_reg->smin_value; 5294 u64 umax_val = src_reg->umax_value; 5295 5296 if (src_known && dst_known) { 5297 __mark_reg_known(dst_reg, dst_reg->var_off.value & 5298 src_reg->var_off.value); 5299 return; 5300 } 5301 5302 /* We get our minimum from the var_off, since that's inherently 5303 * bitwise. Our maximum is the minimum of the operands' maxima. 5304 */ 5305 dst_reg->umin_value = dst_reg->var_off.value; 5306 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 5307 if (dst_reg->smin_value < 0 || smin_val < 0) { 5308 /* Lose signed bounds when ANDing negative numbers, 5309 * ain't nobody got time for that. 5310 */ 5311 dst_reg->smin_value = S64_MIN; 5312 dst_reg->smax_value = S64_MAX; 5313 } else { 5314 /* ANDing two positives gives a positive, so safe to 5315 * cast result into s64. 5316 */ 5317 dst_reg->smin_value = dst_reg->umin_value; 5318 dst_reg->smax_value = dst_reg->umax_value; 5319 } 5320 /* We may learn something more from the var_off */ 5321 __update_reg_bounds(dst_reg); 5322 } 5323 5324 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 5325 struct bpf_reg_state *src_reg) 5326 { 5327 bool src_known = tnum_subreg_is_const(src_reg->var_off); 5328 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 5329 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 5330 s32 smin_val = src_reg->smin_value; 5331 u32 umin_val = src_reg->umin_value; 5332 5333 /* Assuming scalar64_min_max_or will be called so it is safe 5334 * to skip updating register for known case. 5335 */ 5336 if (src_known && dst_known) 5337 return; 5338 5339 /* We get our maximum from the var_off, and our minimum is the 5340 * maximum of the operands' minima 5341 */ 5342 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 5343 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 5344 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 5345 /* Lose signed bounds when ORing negative numbers, 5346 * ain't nobody got time for that. 5347 */ 5348 dst_reg->s32_min_value = S32_MIN; 5349 dst_reg->s32_max_value = S32_MAX; 5350 } else { 5351 /* ORing two positives gives a positive, so safe to 5352 * cast result into s64. 5353 */ 5354 dst_reg->s32_min_value = dst_reg->umin_value; 5355 dst_reg->s32_max_value = dst_reg->umax_value; 5356 } 5357 } 5358 5359 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 5360 struct bpf_reg_state *src_reg) 5361 { 5362 bool src_known = tnum_is_const(src_reg->var_off); 5363 bool dst_known = tnum_is_const(dst_reg->var_off); 5364 s64 smin_val = src_reg->smin_value; 5365 u64 umin_val = src_reg->umin_value; 5366 5367 if (src_known && dst_known) { 5368 __mark_reg_known(dst_reg, dst_reg->var_off.value | 5369 src_reg->var_off.value); 5370 return; 5371 } 5372 5373 /* We get our maximum from the var_off, and our minimum is the 5374 * maximum of the operands' minima 5375 */ 5376 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 5377 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 5378 if (dst_reg->smin_value < 0 || smin_val < 0) { 5379 /* Lose signed bounds when ORing negative numbers, 5380 * ain't nobody got time for that. 5381 */ 5382 dst_reg->smin_value = S64_MIN; 5383 dst_reg->smax_value = S64_MAX; 5384 } else { 5385 /* ORing two positives gives a positive, so safe to 5386 * cast result into s64. 5387 */ 5388 dst_reg->smin_value = dst_reg->umin_value; 5389 dst_reg->smax_value = dst_reg->umax_value; 5390 } 5391 /* We may learn something more from the var_off */ 5392 __update_reg_bounds(dst_reg); 5393 } 5394 5395 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 5396 u64 umin_val, u64 umax_val) 5397 { 5398 /* We lose all sign bit information (except what we can pick 5399 * up from var_off) 5400 */ 5401 dst_reg->s32_min_value = S32_MIN; 5402 dst_reg->s32_max_value = S32_MAX; 5403 /* If we might shift our top bit out, then we know nothing */ 5404 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 5405 dst_reg->u32_min_value = 0; 5406 dst_reg->u32_max_value = U32_MAX; 5407 } else { 5408 dst_reg->u32_min_value <<= umin_val; 5409 dst_reg->u32_max_value <<= umax_val; 5410 } 5411 } 5412 5413 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 5414 struct bpf_reg_state *src_reg) 5415 { 5416 u32 umax_val = src_reg->u32_max_value; 5417 u32 umin_val = src_reg->u32_min_value; 5418 /* u32 alu operation will zext upper bits */ 5419 struct tnum subreg = tnum_subreg(dst_reg->var_off); 5420 5421 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 5422 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 5423 /* Not required but being careful mark reg64 bounds as unknown so 5424 * that we are forced to pick them up from tnum and zext later and 5425 * if some path skips this step we are still safe. 5426 */ 5427 __mark_reg64_unbounded(dst_reg); 5428 __update_reg32_bounds(dst_reg); 5429 } 5430 5431 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 5432 u64 umin_val, u64 umax_val) 5433 { 5434 /* Special case <<32 because it is a common compiler pattern to sign 5435 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 5436 * positive we know this shift will also be positive so we can track 5437 * bounds correctly. Otherwise we lose all sign bit information except 5438 * what we can pick up from var_off. Perhaps we can generalize this 5439 * later to shifts of any length. 5440 */ 5441 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 5442 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 5443 else 5444 dst_reg->smax_value = S64_MAX; 5445 5446 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 5447 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 5448 else 5449 dst_reg->smin_value = S64_MIN; 5450 5451 /* If we might shift our top bit out, then we know nothing */ 5452 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 5453 dst_reg->umin_value = 0; 5454 dst_reg->umax_value = U64_MAX; 5455 } else { 5456 dst_reg->umin_value <<= umin_val; 5457 dst_reg->umax_value <<= umax_val; 5458 } 5459 } 5460 5461 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 5462 struct bpf_reg_state *src_reg) 5463 { 5464 u64 umax_val = src_reg->umax_value; 5465 u64 umin_val = src_reg->umin_value; 5466 5467 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 5468 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 5469 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 5470 5471 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 5472 /* We may learn something more from the var_off */ 5473 __update_reg_bounds(dst_reg); 5474 } 5475 5476 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 5477 struct bpf_reg_state *src_reg) 5478 { 5479 struct tnum subreg = tnum_subreg(dst_reg->var_off); 5480 u32 umax_val = src_reg->u32_max_value; 5481 u32 umin_val = src_reg->u32_min_value; 5482 5483 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 5484 * be negative, then either: 5485 * 1) src_reg might be zero, so the sign bit of the result is 5486 * unknown, so we lose our signed bounds 5487 * 2) it's known negative, thus the unsigned bounds capture the 5488 * signed bounds 5489 * 3) the signed bounds cross zero, so they tell us nothing 5490 * about the result 5491 * If the value in dst_reg is known nonnegative, then again the 5492 * unsigned bounts capture the signed bounds. 5493 * Thus, in all cases it suffices to blow away our signed bounds 5494 * and rely on inferring new ones from the unsigned bounds and 5495 * var_off of the result. 5496 */ 5497 dst_reg->s32_min_value = S32_MIN; 5498 dst_reg->s32_max_value = S32_MAX; 5499 5500 dst_reg->var_off = tnum_rshift(subreg, umin_val); 5501 dst_reg->u32_min_value >>= umax_val; 5502 dst_reg->u32_max_value >>= umin_val; 5503 5504 __mark_reg64_unbounded(dst_reg); 5505 __update_reg32_bounds(dst_reg); 5506 } 5507 5508 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 5509 struct bpf_reg_state *src_reg) 5510 { 5511 u64 umax_val = src_reg->umax_value; 5512 u64 umin_val = src_reg->umin_value; 5513 5514 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 5515 * be negative, then either: 5516 * 1) src_reg might be zero, so the sign bit of the result is 5517 * unknown, so we lose our signed bounds 5518 * 2) it's known negative, thus the unsigned bounds capture the 5519 * signed bounds 5520 * 3) the signed bounds cross zero, so they tell us nothing 5521 * about the result 5522 * If the value in dst_reg is known nonnegative, then again the 5523 * unsigned bounts capture the signed bounds. 5524 * Thus, in all cases it suffices to blow away our signed bounds 5525 * and rely on inferring new ones from the unsigned bounds and 5526 * var_off of the result. 5527 */ 5528 dst_reg->smin_value = S64_MIN; 5529 dst_reg->smax_value = S64_MAX; 5530 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 5531 dst_reg->umin_value >>= umax_val; 5532 dst_reg->umax_value >>= umin_val; 5533 5534 /* Its not easy to operate on alu32 bounds here because it depends 5535 * on bits being shifted in. Take easy way out and mark unbounded 5536 * so we can recalculate later from tnum. 5537 */ 5538 __mark_reg32_unbounded(dst_reg); 5539 __update_reg_bounds(dst_reg); 5540 } 5541 5542 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 5543 struct bpf_reg_state *src_reg) 5544 { 5545 u64 umin_val = src_reg->u32_min_value; 5546 5547 /* Upon reaching here, src_known is true and 5548 * umax_val is equal to umin_val. 5549 */ 5550 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 5551 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 5552 5553 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 5554 5555 /* blow away the dst_reg umin_value/umax_value and rely on 5556 * dst_reg var_off to refine the result. 5557 */ 5558 dst_reg->u32_min_value = 0; 5559 dst_reg->u32_max_value = U32_MAX; 5560 5561 __mark_reg64_unbounded(dst_reg); 5562 __update_reg32_bounds(dst_reg); 5563 } 5564 5565 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 5566 struct bpf_reg_state *src_reg) 5567 { 5568 u64 umin_val = src_reg->umin_value; 5569 5570 /* Upon reaching here, src_known is true and umax_val is equal 5571 * to umin_val. 5572 */ 5573 dst_reg->smin_value >>= umin_val; 5574 dst_reg->smax_value >>= umin_val; 5575 5576 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 5577 5578 /* blow away the dst_reg umin_value/umax_value and rely on 5579 * dst_reg var_off to refine the result. 5580 */ 5581 dst_reg->umin_value = 0; 5582 dst_reg->umax_value = U64_MAX; 5583 5584 /* Its not easy to operate on alu32 bounds here because it depends 5585 * on bits being shifted in from upper 32-bits. Take easy way out 5586 * and mark unbounded so we can recalculate later from tnum. 5587 */ 5588 __mark_reg32_unbounded(dst_reg); 5589 __update_reg_bounds(dst_reg); 5590 } 5591 5592 /* WARNING: This function does calculations on 64-bit values, but the actual 5593 * execution may occur on 32-bit values. Therefore, things like bitshifts 5594 * need extra checks in the 32-bit case. 5595 */ 5596 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 5597 struct bpf_insn *insn, 5598 struct bpf_reg_state *dst_reg, 5599 struct bpf_reg_state src_reg) 5600 { 5601 struct bpf_reg_state *regs = cur_regs(env); 5602 u8 opcode = BPF_OP(insn->code); 5603 bool src_known, dst_known; 5604 s64 smin_val, smax_val; 5605 u64 umin_val, umax_val; 5606 s32 s32_min_val, s32_max_val; 5607 u32 u32_min_val, u32_max_val; 5608 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 5609 u32 dst = insn->dst_reg; 5610 int ret; 5611 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 5612 5613 smin_val = src_reg.smin_value; 5614 smax_val = src_reg.smax_value; 5615 umin_val = src_reg.umin_value; 5616 umax_val = src_reg.umax_value; 5617 5618 s32_min_val = src_reg.s32_min_value; 5619 s32_max_val = src_reg.s32_max_value; 5620 u32_min_val = src_reg.u32_min_value; 5621 u32_max_val = src_reg.u32_max_value; 5622 5623 if (alu32) { 5624 src_known = tnum_subreg_is_const(src_reg.var_off); 5625 dst_known = tnum_subreg_is_const(dst_reg->var_off); 5626 if ((src_known && 5627 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 5628 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 5629 /* Taint dst register if offset had invalid bounds 5630 * derived from e.g. dead branches. 5631 */ 5632 __mark_reg_unknown(env, dst_reg); 5633 return 0; 5634 } 5635 } else { 5636 src_known = tnum_is_const(src_reg.var_off); 5637 dst_known = tnum_is_const(dst_reg->var_off); 5638 if ((src_known && 5639 (smin_val != smax_val || umin_val != umax_val)) || 5640 smin_val > smax_val || umin_val > umax_val) { 5641 /* Taint dst register if offset had invalid bounds 5642 * derived from e.g. dead branches. 5643 */ 5644 __mark_reg_unknown(env, dst_reg); 5645 return 0; 5646 } 5647 } 5648 5649 if (!src_known && 5650 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 5651 __mark_reg_unknown(env, dst_reg); 5652 return 0; 5653 } 5654 5655 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 5656 * There are two classes of instructions: The first class we track both 5657 * alu32 and alu64 sign/unsigned bounds independently this provides the 5658 * greatest amount of precision when alu operations are mixed with jmp32 5659 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 5660 * and BPF_OR. This is possible because these ops have fairly easy to 5661 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 5662 * See alu32 verifier tests for examples. The second class of 5663 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 5664 * with regards to tracking sign/unsigned bounds because the bits may 5665 * cross subreg boundaries in the alu64 case. When this happens we mark 5666 * the reg unbounded in the subreg bound space and use the resulting 5667 * tnum to calculate an approximation of the sign/unsigned bounds. 5668 */ 5669 switch (opcode) { 5670 case BPF_ADD: 5671 ret = sanitize_val_alu(env, insn); 5672 if (ret < 0) { 5673 verbose(env, "R%d tried to add from different pointers or scalars\n", dst); 5674 return ret; 5675 } 5676 scalar32_min_max_add(dst_reg, &src_reg); 5677 scalar_min_max_add(dst_reg, &src_reg); 5678 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 5679 break; 5680 case BPF_SUB: 5681 ret = sanitize_val_alu(env, insn); 5682 if (ret < 0) { 5683 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); 5684 return ret; 5685 } 5686 scalar32_min_max_sub(dst_reg, &src_reg); 5687 scalar_min_max_sub(dst_reg, &src_reg); 5688 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 5689 break; 5690 case BPF_MUL: 5691 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 5692 scalar32_min_max_mul(dst_reg, &src_reg); 5693 scalar_min_max_mul(dst_reg, &src_reg); 5694 break; 5695 case BPF_AND: 5696 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 5697 scalar32_min_max_and(dst_reg, &src_reg); 5698 scalar_min_max_and(dst_reg, &src_reg); 5699 break; 5700 case BPF_OR: 5701 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 5702 scalar32_min_max_or(dst_reg, &src_reg); 5703 scalar_min_max_or(dst_reg, &src_reg); 5704 break; 5705 case BPF_LSH: 5706 if (umax_val >= insn_bitness) { 5707 /* Shifts greater than 31 or 63 are undefined. 5708 * This includes shifts by a negative number. 5709 */ 5710 mark_reg_unknown(env, regs, insn->dst_reg); 5711 break; 5712 } 5713 if (alu32) 5714 scalar32_min_max_lsh(dst_reg, &src_reg); 5715 else 5716 scalar_min_max_lsh(dst_reg, &src_reg); 5717 break; 5718 case BPF_RSH: 5719 if (umax_val >= insn_bitness) { 5720 /* Shifts greater than 31 or 63 are undefined. 5721 * This includes shifts by a negative number. 5722 */ 5723 mark_reg_unknown(env, regs, insn->dst_reg); 5724 break; 5725 } 5726 if (alu32) 5727 scalar32_min_max_rsh(dst_reg, &src_reg); 5728 else 5729 scalar_min_max_rsh(dst_reg, &src_reg); 5730 break; 5731 case BPF_ARSH: 5732 if (umax_val >= insn_bitness) { 5733 /* Shifts greater than 31 or 63 are undefined. 5734 * This includes shifts by a negative number. 5735 */ 5736 mark_reg_unknown(env, regs, insn->dst_reg); 5737 break; 5738 } 5739 if (alu32) 5740 scalar32_min_max_arsh(dst_reg, &src_reg); 5741 else 5742 scalar_min_max_arsh(dst_reg, &src_reg); 5743 break; 5744 default: 5745 mark_reg_unknown(env, regs, insn->dst_reg); 5746 break; 5747 } 5748 5749 /* ALU32 ops are zero extended into 64bit register */ 5750 if (alu32) 5751 zext_32_to_64(dst_reg); 5752 5753 __update_reg_bounds(dst_reg); 5754 __reg_deduce_bounds(dst_reg); 5755 __reg_bound_offset(dst_reg); 5756 return 0; 5757 } 5758 5759 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 5760 * and var_off. 5761 */ 5762 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 5763 struct bpf_insn *insn) 5764 { 5765 struct bpf_verifier_state *vstate = env->cur_state; 5766 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5767 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 5768 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 5769 u8 opcode = BPF_OP(insn->code); 5770 int err; 5771 5772 dst_reg = ®s[insn->dst_reg]; 5773 src_reg = NULL; 5774 if (dst_reg->type != SCALAR_VALUE) 5775 ptr_reg = dst_reg; 5776 if (BPF_SRC(insn->code) == BPF_X) { 5777 src_reg = ®s[insn->src_reg]; 5778 if (src_reg->type != SCALAR_VALUE) { 5779 if (dst_reg->type != SCALAR_VALUE) { 5780 /* Combining two pointers by any ALU op yields 5781 * an arbitrary scalar. Disallow all math except 5782 * pointer subtraction 5783 */ 5784 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 5785 mark_reg_unknown(env, regs, insn->dst_reg); 5786 return 0; 5787 } 5788 verbose(env, "R%d pointer %s pointer prohibited\n", 5789 insn->dst_reg, 5790 bpf_alu_string[opcode >> 4]); 5791 return -EACCES; 5792 } else { 5793 /* scalar += pointer 5794 * This is legal, but we have to reverse our 5795 * src/dest handling in computing the range 5796 */ 5797 err = mark_chain_precision(env, insn->dst_reg); 5798 if (err) 5799 return err; 5800 return adjust_ptr_min_max_vals(env, insn, 5801 src_reg, dst_reg); 5802 } 5803 } else if (ptr_reg) { 5804 /* pointer += scalar */ 5805 err = mark_chain_precision(env, insn->src_reg); 5806 if (err) 5807 return err; 5808 return adjust_ptr_min_max_vals(env, insn, 5809 dst_reg, src_reg); 5810 } 5811 } else { 5812 /* Pretend the src is a reg with a known value, since we only 5813 * need to be able to read from this state. 5814 */ 5815 off_reg.type = SCALAR_VALUE; 5816 __mark_reg_known(&off_reg, insn->imm); 5817 src_reg = &off_reg; 5818 if (ptr_reg) /* pointer += K */ 5819 return adjust_ptr_min_max_vals(env, insn, 5820 ptr_reg, src_reg); 5821 } 5822 5823 /* Got here implies adding two SCALAR_VALUEs */ 5824 if (WARN_ON_ONCE(ptr_reg)) { 5825 print_verifier_state(env, state); 5826 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 5827 return -EINVAL; 5828 } 5829 if (WARN_ON(!src_reg)) { 5830 print_verifier_state(env, state); 5831 verbose(env, "verifier internal error: no src_reg\n"); 5832 return -EINVAL; 5833 } 5834 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 5835 } 5836 5837 /* check validity of 32-bit and 64-bit arithmetic operations */ 5838 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 5839 { 5840 struct bpf_reg_state *regs = cur_regs(env); 5841 u8 opcode = BPF_OP(insn->code); 5842 int err; 5843 5844 if (opcode == BPF_END || opcode == BPF_NEG) { 5845 if (opcode == BPF_NEG) { 5846 if (BPF_SRC(insn->code) != 0 || 5847 insn->src_reg != BPF_REG_0 || 5848 insn->off != 0 || insn->imm != 0) { 5849 verbose(env, "BPF_NEG uses reserved fields\n"); 5850 return -EINVAL; 5851 } 5852 } else { 5853 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 5854 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 5855 BPF_CLASS(insn->code) == BPF_ALU64) { 5856 verbose(env, "BPF_END uses reserved fields\n"); 5857 return -EINVAL; 5858 } 5859 } 5860 5861 /* check src operand */ 5862 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 5863 if (err) 5864 return err; 5865 5866 if (is_pointer_value(env, insn->dst_reg)) { 5867 verbose(env, "R%d pointer arithmetic prohibited\n", 5868 insn->dst_reg); 5869 return -EACCES; 5870 } 5871 5872 /* check dest operand */ 5873 err = check_reg_arg(env, insn->dst_reg, DST_OP); 5874 if (err) 5875 return err; 5876 5877 } else if (opcode == BPF_MOV) { 5878 5879 if (BPF_SRC(insn->code) == BPF_X) { 5880 if (insn->imm != 0 || insn->off != 0) { 5881 verbose(env, "BPF_MOV uses reserved fields\n"); 5882 return -EINVAL; 5883 } 5884 5885 /* check src operand */ 5886 err = check_reg_arg(env, insn->src_reg, SRC_OP); 5887 if (err) 5888 return err; 5889 } else { 5890 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 5891 verbose(env, "BPF_MOV uses reserved fields\n"); 5892 return -EINVAL; 5893 } 5894 } 5895 5896 /* check dest operand, mark as required later */ 5897 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 5898 if (err) 5899 return err; 5900 5901 if (BPF_SRC(insn->code) == BPF_X) { 5902 struct bpf_reg_state *src_reg = regs + insn->src_reg; 5903 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 5904 5905 if (BPF_CLASS(insn->code) == BPF_ALU64) { 5906 /* case: R1 = R2 5907 * copy register state to dest reg 5908 */ 5909 *dst_reg = *src_reg; 5910 dst_reg->live |= REG_LIVE_WRITTEN; 5911 dst_reg->subreg_def = DEF_NOT_SUBREG; 5912 } else { 5913 /* R1 = (u32) R2 */ 5914 if (is_pointer_value(env, insn->src_reg)) { 5915 verbose(env, 5916 "R%d partial copy of pointer\n", 5917 insn->src_reg); 5918 return -EACCES; 5919 } else if (src_reg->type == SCALAR_VALUE) { 5920 *dst_reg = *src_reg; 5921 dst_reg->live |= REG_LIVE_WRITTEN; 5922 dst_reg->subreg_def = env->insn_idx + 1; 5923 } else { 5924 mark_reg_unknown(env, regs, 5925 insn->dst_reg); 5926 } 5927 zext_32_to_64(dst_reg); 5928 } 5929 } else { 5930 /* case: R = imm 5931 * remember the value we stored into this reg 5932 */ 5933 /* clear any state __mark_reg_known doesn't set */ 5934 mark_reg_unknown(env, regs, insn->dst_reg); 5935 regs[insn->dst_reg].type = SCALAR_VALUE; 5936 if (BPF_CLASS(insn->code) == BPF_ALU64) { 5937 __mark_reg_known(regs + insn->dst_reg, 5938 insn->imm); 5939 } else { 5940 __mark_reg_known(regs + insn->dst_reg, 5941 (u32)insn->imm); 5942 } 5943 } 5944 5945 } else if (opcode > BPF_END) { 5946 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 5947 return -EINVAL; 5948 5949 } else { /* all other ALU ops: and, sub, xor, add, ... */ 5950 5951 if (BPF_SRC(insn->code) == BPF_X) { 5952 if (insn->imm != 0 || insn->off != 0) { 5953 verbose(env, "BPF_ALU uses reserved fields\n"); 5954 return -EINVAL; 5955 } 5956 /* check src1 operand */ 5957 err = check_reg_arg(env, insn->src_reg, SRC_OP); 5958 if (err) 5959 return err; 5960 } else { 5961 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 5962 verbose(env, "BPF_ALU uses reserved fields\n"); 5963 return -EINVAL; 5964 } 5965 } 5966 5967 /* check src2 operand */ 5968 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 5969 if (err) 5970 return err; 5971 5972 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 5973 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 5974 verbose(env, "div by zero\n"); 5975 return -EINVAL; 5976 } 5977 5978 if ((opcode == BPF_LSH || opcode == BPF_RSH || 5979 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 5980 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 5981 5982 if (insn->imm < 0 || insn->imm >= size) { 5983 verbose(env, "invalid shift %d\n", insn->imm); 5984 return -EINVAL; 5985 } 5986 } 5987 5988 /* check dest operand */ 5989 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 5990 if (err) 5991 return err; 5992 5993 return adjust_reg_min_max_vals(env, insn); 5994 } 5995 5996 return 0; 5997 } 5998 5999 static void __find_good_pkt_pointers(struct bpf_func_state *state, 6000 struct bpf_reg_state *dst_reg, 6001 enum bpf_reg_type type, u16 new_range) 6002 { 6003 struct bpf_reg_state *reg; 6004 int i; 6005 6006 for (i = 0; i < MAX_BPF_REG; i++) { 6007 reg = &state->regs[i]; 6008 if (reg->type == type && reg->id == dst_reg->id) 6009 /* keep the maximum range already checked */ 6010 reg->range = max(reg->range, new_range); 6011 } 6012 6013 bpf_for_each_spilled_reg(i, state, reg) { 6014 if (!reg) 6015 continue; 6016 if (reg->type == type && reg->id == dst_reg->id) 6017 reg->range = max(reg->range, new_range); 6018 } 6019 } 6020 6021 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 6022 struct bpf_reg_state *dst_reg, 6023 enum bpf_reg_type type, 6024 bool range_right_open) 6025 { 6026 u16 new_range; 6027 int i; 6028 6029 if (dst_reg->off < 0 || 6030 (dst_reg->off == 0 && range_right_open)) 6031 /* This doesn't give us any range */ 6032 return; 6033 6034 if (dst_reg->umax_value > MAX_PACKET_OFF || 6035 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 6036 /* Risk of overflow. For instance, ptr + (1<<63) may be less 6037 * than pkt_end, but that's because it's also less than pkt. 6038 */ 6039 return; 6040 6041 new_range = dst_reg->off; 6042 if (range_right_open) 6043 new_range--; 6044 6045 /* Examples for register markings: 6046 * 6047 * pkt_data in dst register: 6048 * 6049 * r2 = r3; 6050 * r2 += 8; 6051 * if (r2 > pkt_end) goto <handle exception> 6052 * <access okay> 6053 * 6054 * r2 = r3; 6055 * r2 += 8; 6056 * if (r2 < pkt_end) goto <access okay> 6057 * <handle exception> 6058 * 6059 * Where: 6060 * r2 == dst_reg, pkt_end == src_reg 6061 * r2=pkt(id=n,off=8,r=0) 6062 * r3=pkt(id=n,off=0,r=0) 6063 * 6064 * pkt_data in src register: 6065 * 6066 * r2 = r3; 6067 * r2 += 8; 6068 * if (pkt_end >= r2) goto <access okay> 6069 * <handle exception> 6070 * 6071 * r2 = r3; 6072 * r2 += 8; 6073 * if (pkt_end <= r2) goto <handle exception> 6074 * <access okay> 6075 * 6076 * Where: 6077 * pkt_end == dst_reg, r2 == src_reg 6078 * r2=pkt(id=n,off=8,r=0) 6079 * r3=pkt(id=n,off=0,r=0) 6080 * 6081 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 6082 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 6083 * and [r3, r3 + 8-1) respectively is safe to access depending on 6084 * the check. 6085 */ 6086 6087 /* If our ids match, then we must have the same max_value. And we 6088 * don't care about the other reg's fixed offset, since if it's too big 6089 * the range won't allow anything. 6090 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 6091 */ 6092 for (i = 0; i <= vstate->curframe; i++) 6093 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, 6094 new_range); 6095 } 6096 6097 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 6098 { 6099 struct tnum subreg = tnum_subreg(reg->var_off); 6100 s32 sval = (s32)val; 6101 6102 switch (opcode) { 6103 case BPF_JEQ: 6104 if (tnum_is_const(subreg)) 6105 return !!tnum_equals_const(subreg, val); 6106 break; 6107 case BPF_JNE: 6108 if (tnum_is_const(subreg)) 6109 return !tnum_equals_const(subreg, val); 6110 break; 6111 case BPF_JSET: 6112 if ((~subreg.mask & subreg.value) & val) 6113 return 1; 6114 if (!((subreg.mask | subreg.value) & val)) 6115 return 0; 6116 break; 6117 case BPF_JGT: 6118 if (reg->u32_min_value > val) 6119 return 1; 6120 else if (reg->u32_max_value <= val) 6121 return 0; 6122 break; 6123 case BPF_JSGT: 6124 if (reg->s32_min_value > sval) 6125 return 1; 6126 else if (reg->s32_max_value < sval) 6127 return 0; 6128 break; 6129 case BPF_JLT: 6130 if (reg->u32_max_value < val) 6131 return 1; 6132 else if (reg->u32_min_value >= val) 6133 return 0; 6134 break; 6135 case BPF_JSLT: 6136 if (reg->s32_max_value < sval) 6137 return 1; 6138 else if (reg->s32_min_value >= sval) 6139 return 0; 6140 break; 6141 case BPF_JGE: 6142 if (reg->u32_min_value >= val) 6143 return 1; 6144 else if (reg->u32_max_value < val) 6145 return 0; 6146 break; 6147 case BPF_JSGE: 6148 if (reg->s32_min_value >= sval) 6149 return 1; 6150 else if (reg->s32_max_value < sval) 6151 return 0; 6152 break; 6153 case BPF_JLE: 6154 if (reg->u32_max_value <= val) 6155 return 1; 6156 else if (reg->u32_min_value > val) 6157 return 0; 6158 break; 6159 case BPF_JSLE: 6160 if (reg->s32_max_value <= sval) 6161 return 1; 6162 else if (reg->s32_min_value > sval) 6163 return 0; 6164 break; 6165 } 6166 6167 return -1; 6168 } 6169 6170 6171 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 6172 { 6173 s64 sval = (s64)val; 6174 6175 switch (opcode) { 6176 case BPF_JEQ: 6177 if (tnum_is_const(reg->var_off)) 6178 return !!tnum_equals_const(reg->var_off, val); 6179 break; 6180 case BPF_JNE: 6181 if (tnum_is_const(reg->var_off)) 6182 return !tnum_equals_const(reg->var_off, val); 6183 break; 6184 case BPF_JSET: 6185 if ((~reg->var_off.mask & reg->var_off.value) & val) 6186 return 1; 6187 if (!((reg->var_off.mask | reg->var_off.value) & val)) 6188 return 0; 6189 break; 6190 case BPF_JGT: 6191 if (reg->umin_value > val) 6192 return 1; 6193 else if (reg->umax_value <= val) 6194 return 0; 6195 break; 6196 case BPF_JSGT: 6197 if (reg->smin_value > sval) 6198 return 1; 6199 else if (reg->smax_value < sval) 6200 return 0; 6201 break; 6202 case BPF_JLT: 6203 if (reg->umax_value < val) 6204 return 1; 6205 else if (reg->umin_value >= val) 6206 return 0; 6207 break; 6208 case BPF_JSLT: 6209 if (reg->smax_value < sval) 6210 return 1; 6211 else if (reg->smin_value >= sval) 6212 return 0; 6213 break; 6214 case BPF_JGE: 6215 if (reg->umin_value >= val) 6216 return 1; 6217 else if (reg->umax_value < val) 6218 return 0; 6219 break; 6220 case BPF_JSGE: 6221 if (reg->smin_value >= sval) 6222 return 1; 6223 else if (reg->smax_value < sval) 6224 return 0; 6225 break; 6226 case BPF_JLE: 6227 if (reg->umax_value <= val) 6228 return 1; 6229 else if (reg->umin_value > val) 6230 return 0; 6231 break; 6232 case BPF_JSLE: 6233 if (reg->smax_value <= sval) 6234 return 1; 6235 else if (reg->smin_value > sval) 6236 return 0; 6237 break; 6238 } 6239 6240 return -1; 6241 } 6242 6243 /* compute branch direction of the expression "if (reg opcode val) goto target;" 6244 * and return: 6245 * 1 - branch will be taken and "goto target" will be executed 6246 * 0 - branch will not be taken and fall-through to next insn 6247 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 6248 * range [0,10] 6249 */ 6250 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 6251 bool is_jmp32) 6252 { 6253 if (__is_pointer_value(false, reg)) 6254 return -1; 6255 6256 if (is_jmp32) 6257 return is_branch32_taken(reg, val, opcode); 6258 return is_branch64_taken(reg, val, opcode); 6259 } 6260 6261 /* Adjusts the register min/max values in the case that the dst_reg is the 6262 * variable register that we are working on, and src_reg is a constant or we're 6263 * simply doing a BPF_K check. 6264 * In JEQ/JNE cases we also adjust the var_off values. 6265 */ 6266 static void reg_set_min_max(struct bpf_reg_state *true_reg, 6267 struct bpf_reg_state *false_reg, 6268 u64 val, u32 val32, 6269 u8 opcode, bool is_jmp32) 6270 { 6271 struct tnum false_32off = tnum_subreg(false_reg->var_off); 6272 struct tnum false_64off = false_reg->var_off; 6273 struct tnum true_32off = tnum_subreg(true_reg->var_off); 6274 struct tnum true_64off = true_reg->var_off; 6275 s64 sval = (s64)val; 6276 s32 sval32 = (s32)val32; 6277 6278 /* If the dst_reg is a pointer, we can't learn anything about its 6279 * variable offset from the compare (unless src_reg were a pointer into 6280 * the same object, but we don't bother with that. 6281 * Since false_reg and true_reg have the same type by construction, we 6282 * only need to check one of them for pointerness. 6283 */ 6284 if (__is_pointer_value(false, false_reg)) 6285 return; 6286 6287 switch (opcode) { 6288 case BPF_JEQ: 6289 case BPF_JNE: 6290 { 6291 struct bpf_reg_state *reg = 6292 opcode == BPF_JEQ ? true_reg : false_reg; 6293 6294 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but 6295 * if it is true we know the value for sure. Likewise for 6296 * BPF_JNE. 6297 */ 6298 if (is_jmp32) 6299 __mark_reg32_known(reg, val32); 6300 else 6301 __mark_reg_known(reg, val); 6302 break; 6303 } 6304 case BPF_JSET: 6305 if (is_jmp32) { 6306 false_32off = tnum_and(false_32off, tnum_const(~val32)); 6307 if (is_power_of_2(val32)) 6308 true_32off = tnum_or(true_32off, 6309 tnum_const(val32)); 6310 } else { 6311 false_64off = tnum_and(false_64off, tnum_const(~val)); 6312 if (is_power_of_2(val)) 6313 true_64off = tnum_or(true_64off, 6314 tnum_const(val)); 6315 } 6316 break; 6317 case BPF_JGE: 6318 case BPF_JGT: 6319 { 6320 if (is_jmp32) { 6321 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 6322 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 6323 6324 false_reg->u32_max_value = min(false_reg->u32_max_value, 6325 false_umax); 6326 true_reg->u32_min_value = max(true_reg->u32_min_value, 6327 true_umin); 6328 } else { 6329 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 6330 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 6331 6332 false_reg->umax_value = min(false_reg->umax_value, false_umax); 6333 true_reg->umin_value = max(true_reg->umin_value, true_umin); 6334 } 6335 break; 6336 } 6337 case BPF_JSGE: 6338 case BPF_JSGT: 6339 { 6340 if (is_jmp32) { 6341 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 6342 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 6343 6344 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 6345 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 6346 } else { 6347 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 6348 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 6349 6350 false_reg->smax_value = min(false_reg->smax_value, false_smax); 6351 true_reg->smin_value = max(true_reg->smin_value, true_smin); 6352 } 6353 break; 6354 } 6355 case BPF_JLE: 6356 case BPF_JLT: 6357 { 6358 if (is_jmp32) { 6359 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 6360 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 6361 6362 false_reg->u32_min_value = max(false_reg->u32_min_value, 6363 false_umin); 6364 true_reg->u32_max_value = min(true_reg->u32_max_value, 6365 true_umax); 6366 } else { 6367 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 6368 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 6369 6370 false_reg->umin_value = max(false_reg->umin_value, false_umin); 6371 true_reg->umax_value = min(true_reg->umax_value, true_umax); 6372 } 6373 break; 6374 } 6375 case BPF_JSLE: 6376 case BPF_JSLT: 6377 { 6378 if (is_jmp32) { 6379 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 6380 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 6381 6382 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 6383 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 6384 } else { 6385 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 6386 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 6387 6388 false_reg->smin_value = max(false_reg->smin_value, false_smin); 6389 true_reg->smax_value = min(true_reg->smax_value, true_smax); 6390 } 6391 break; 6392 } 6393 default: 6394 return; 6395 } 6396 6397 if (is_jmp32) { 6398 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 6399 tnum_subreg(false_32off)); 6400 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 6401 tnum_subreg(true_32off)); 6402 __reg_combine_32_into_64(false_reg); 6403 __reg_combine_32_into_64(true_reg); 6404 } else { 6405 false_reg->var_off = false_64off; 6406 true_reg->var_off = true_64off; 6407 __reg_combine_64_into_32(false_reg); 6408 __reg_combine_64_into_32(true_reg); 6409 } 6410 } 6411 6412 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 6413 * the variable reg. 6414 */ 6415 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 6416 struct bpf_reg_state *false_reg, 6417 u64 val, u32 val32, 6418 u8 opcode, bool is_jmp32) 6419 { 6420 /* How can we transform "a <op> b" into "b <op> a"? */ 6421 static const u8 opcode_flip[16] = { 6422 /* these stay the same */ 6423 [BPF_JEQ >> 4] = BPF_JEQ, 6424 [BPF_JNE >> 4] = BPF_JNE, 6425 [BPF_JSET >> 4] = BPF_JSET, 6426 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 6427 [BPF_JGE >> 4] = BPF_JLE, 6428 [BPF_JGT >> 4] = BPF_JLT, 6429 [BPF_JLE >> 4] = BPF_JGE, 6430 [BPF_JLT >> 4] = BPF_JGT, 6431 [BPF_JSGE >> 4] = BPF_JSLE, 6432 [BPF_JSGT >> 4] = BPF_JSLT, 6433 [BPF_JSLE >> 4] = BPF_JSGE, 6434 [BPF_JSLT >> 4] = BPF_JSGT 6435 }; 6436 opcode = opcode_flip[opcode >> 4]; 6437 /* This uses zero as "not present in table"; luckily the zero opcode, 6438 * BPF_JA, can't get here. 6439 */ 6440 if (opcode) 6441 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 6442 } 6443 6444 /* Regs are known to be equal, so intersect their min/max/var_off */ 6445 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 6446 struct bpf_reg_state *dst_reg) 6447 { 6448 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 6449 dst_reg->umin_value); 6450 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 6451 dst_reg->umax_value); 6452 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 6453 dst_reg->smin_value); 6454 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 6455 dst_reg->smax_value); 6456 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 6457 dst_reg->var_off); 6458 /* We might have learned new bounds from the var_off. */ 6459 __update_reg_bounds(src_reg); 6460 __update_reg_bounds(dst_reg); 6461 /* We might have learned something about the sign bit. */ 6462 __reg_deduce_bounds(src_reg); 6463 __reg_deduce_bounds(dst_reg); 6464 /* We might have learned some bits from the bounds. */ 6465 __reg_bound_offset(src_reg); 6466 __reg_bound_offset(dst_reg); 6467 /* Intersecting with the old var_off might have improved our bounds 6468 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 6469 * then new var_off is (0; 0x7f...fc) which improves our umax. 6470 */ 6471 __update_reg_bounds(src_reg); 6472 __update_reg_bounds(dst_reg); 6473 } 6474 6475 static void reg_combine_min_max(struct bpf_reg_state *true_src, 6476 struct bpf_reg_state *true_dst, 6477 struct bpf_reg_state *false_src, 6478 struct bpf_reg_state *false_dst, 6479 u8 opcode) 6480 { 6481 switch (opcode) { 6482 case BPF_JEQ: 6483 __reg_combine_min_max(true_src, true_dst); 6484 break; 6485 case BPF_JNE: 6486 __reg_combine_min_max(false_src, false_dst); 6487 break; 6488 } 6489 } 6490 6491 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 6492 struct bpf_reg_state *reg, u32 id, 6493 bool is_null) 6494 { 6495 if (reg_type_may_be_null(reg->type) && reg->id == id) { 6496 /* Old offset (both fixed and variable parts) should 6497 * have been known-zero, because we don't allow pointer 6498 * arithmetic on pointers that might be NULL. 6499 */ 6500 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 6501 !tnum_equals_const(reg->var_off, 0) || 6502 reg->off)) { 6503 __mark_reg_known_zero(reg); 6504 reg->off = 0; 6505 } 6506 if (is_null) { 6507 reg->type = SCALAR_VALUE; 6508 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 6509 if (reg->map_ptr->inner_map_meta) { 6510 reg->type = CONST_PTR_TO_MAP; 6511 reg->map_ptr = reg->map_ptr->inner_map_meta; 6512 } else if (reg->map_ptr->map_type == 6513 BPF_MAP_TYPE_XSKMAP) { 6514 reg->type = PTR_TO_XDP_SOCK; 6515 } else { 6516 reg->type = PTR_TO_MAP_VALUE; 6517 } 6518 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { 6519 reg->type = PTR_TO_SOCKET; 6520 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { 6521 reg->type = PTR_TO_SOCK_COMMON; 6522 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { 6523 reg->type = PTR_TO_TCP_SOCK; 6524 } 6525 if (is_null) { 6526 /* We don't need id and ref_obj_id from this point 6527 * onwards anymore, thus we should better reset it, 6528 * so that state pruning has chances to take effect. 6529 */ 6530 reg->id = 0; 6531 reg->ref_obj_id = 0; 6532 } else if (!reg_may_point_to_spin_lock(reg)) { 6533 /* For not-NULL ptr, reg->ref_obj_id will be reset 6534 * in release_reg_references(). 6535 * 6536 * reg->id is still used by spin_lock ptr. Other 6537 * than spin_lock ptr type, reg->id can be reset. 6538 */ 6539 reg->id = 0; 6540 } 6541 } 6542 } 6543 6544 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, 6545 bool is_null) 6546 { 6547 struct bpf_reg_state *reg; 6548 int i; 6549 6550 for (i = 0; i < MAX_BPF_REG; i++) 6551 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); 6552 6553 bpf_for_each_spilled_reg(i, state, reg) { 6554 if (!reg) 6555 continue; 6556 mark_ptr_or_null_reg(state, reg, id, is_null); 6557 } 6558 } 6559 6560 /* The logic is similar to find_good_pkt_pointers(), both could eventually 6561 * be folded together at some point. 6562 */ 6563 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 6564 bool is_null) 6565 { 6566 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 6567 struct bpf_reg_state *regs = state->regs; 6568 u32 ref_obj_id = regs[regno].ref_obj_id; 6569 u32 id = regs[regno].id; 6570 int i; 6571 6572 if (ref_obj_id && ref_obj_id == id && is_null) 6573 /* regs[regno] is in the " == NULL" branch. 6574 * No one could have freed the reference state before 6575 * doing the NULL check. 6576 */ 6577 WARN_ON_ONCE(release_reference_state(state, id)); 6578 6579 for (i = 0; i <= vstate->curframe; i++) 6580 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); 6581 } 6582 6583 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 6584 struct bpf_reg_state *dst_reg, 6585 struct bpf_reg_state *src_reg, 6586 struct bpf_verifier_state *this_branch, 6587 struct bpf_verifier_state *other_branch) 6588 { 6589 if (BPF_SRC(insn->code) != BPF_X) 6590 return false; 6591 6592 /* Pointers are always 64-bit. */ 6593 if (BPF_CLASS(insn->code) == BPF_JMP32) 6594 return false; 6595 6596 switch (BPF_OP(insn->code)) { 6597 case BPF_JGT: 6598 if ((dst_reg->type == PTR_TO_PACKET && 6599 src_reg->type == PTR_TO_PACKET_END) || 6600 (dst_reg->type == PTR_TO_PACKET_META && 6601 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6602 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 6603 find_good_pkt_pointers(this_branch, dst_reg, 6604 dst_reg->type, false); 6605 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6606 src_reg->type == PTR_TO_PACKET) || 6607 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6608 src_reg->type == PTR_TO_PACKET_META)) { 6609 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 6610 find_good_pkt_pointers(other_branch, src_reg, 6611 src_reg->type, true); 6612 } else { 6613 return false; 6614 } 6615 break; 6616 case BPF_JLT: 6617 if ((dst_reg->type == PTR_TO_PACKET && 6618 src_reg->type == PTR_TO_PACKET_END) || 6619 (dst_reg->type == PTR_TO_PACKET_META && 6620 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6621 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 6622 find_good_pkt_pointers(other_branch, dst_reg, 6623 dst_reg->type, true); 6624 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6625 src_reg->type == PTR_TO_PACKET) || 6626 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6627 src_reg->type == PTR_TO_PACKET_META)) { 6628 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 6629 find_good_pkt_pointers(this_branch, src_reg, 6630 src_reg->type, false); 6631 } else { 6632 return false; 6633 } 6634 break; 6635 case BPF_JGE: 6636 if ((dst_reg->type == PTR_TO_PACKET && 6637 src_reg->type == PTR_TO_PACKET_END) || 6638 (dst_reg->type == PTR_TO_PACKET_META && 6639 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6640 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 6641 find_good_pkt_pointers(this_branch, dst_reg, 6642 dst_reg->type, true); 6643 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6644 src_reg->type == PTR_TO_PACKET) || 6645 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6646 src_reg->type == PTR_TO_PACKET_META)) { 6647 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 6648 find_good_pkt_pointers(other_branch, src_reg, 6649 src_reg->type, false); 6650 } else { 6651 return false; 6652 } 6653 break; 6654 case BPF_JLE: 6655 if ((dst_reg->type == PTR_TO_PACKET && 6656 src_reg->type == PTR_TO_PACKET_END) || 6657 (dst_reg->type == PTR_TO_PACKET_META && 6658 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6659 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 6660 find_good_pkt_pointers(other_branch, dst_reg, 6661 dst_reg->type, false); 6662 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6663 src_reg->type == PTR_TO_PACKET) || 6664 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6665 src_reg->type == PTR_TO_PACKET_META)) { 6666 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 6667 find_good_pkt_pointers(this_branch, src_reg, 6668 src_reg->type, true); 6669 } else { 6670 return false; 6671 } 6672 break; 6673 default: 6674 return false; 6675 } 6676 6677 return true; 6678 } 6679 6680 static int check_cond_jmp_op(struct bpf_verifier_env *env, 6681 struct bpf_insn *insn, int *insn_idx) 6682 { 6683 struct bpf_verifier_state *this_branch = env->cur_state; 6684 struct bpf_verifier_state *other_branch; 6685 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 6686 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 6687 u8 opcode = BPF_OP(insn->code); 6688 bool is_jmp32; 6689 int pred = -1; 6690 int err; 6691 6692 /* Only conditional jumps are expected to reach here. */ 6693 if (opcode == BPF_JA || opcode > BPF_JSLE) { 6694 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 6695 return -EINVAL; 6696 } 6697 6698 if (BPF_SRC(insn->code) == BPF_X) { 6699 if (insn->imm != 0) { 6700 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 6701 return -EINVAL; 6702 } 6703 6704 /* check src1 operand */ 6705 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6706 if (err) 6707 return err; 6708 6709 if (is_pointer_value(env, insn->src_reg)) { 6710 verbose(env, "R%d pointer comparison prohibited\n", 6711 insn->src_reg); 6712 return -EACCES; 6713 } 6714 src_reg = ®s[insn->src_reg]; 6715 } else { 6716 if (insn->src_reg != BPF_REG_0) { 6717 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 6718 return -EINVAL; 6719 } 6720 } 6721 6722 /* check src2 operand */ 6723 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6724 if (err) 6725 return err; 6726 6727 dst_reg = ®s[insn->dst_reg]; 6728 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 6729 6730 if (BPF_SRC(insn->code) == BPF_K) { 6731 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 6732 } else if (src_reg->type == SCALAR_VALUE && 6733 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 6734 pred = is_branch_taken(dst_reg, 6735 tnum_subreg(src_reg->var_off).value, 6736 opcode, 6737 is_jmp32); 6738 } else if (src_reg->type == SCALAR_VALUE && 6739 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 6740 pred = is_branch_taken(dst_reg, 6741 src_reg->var_off.value, 6742 opcode, 6743 is_jmp32); 6744 } 6745 6746 if (pred >= 0) { 6747 err = mark_chain_precision(env, insn->dst_reg); 6748 if (BPF_SRC(insn->code) == BPF_X && !err) 6749 err = mark_chain_precision(env, insn->src_reg); 6750 if (err) 6751 return err; 6752 } 6753 if (pred == 1) { 6754 /* only follow the goto, ignore fall-through */ 6755 *insn_idx += insn->off; 6756 return 0; 6757 } else if (pred == 0) { 6758 /* only follow fall-through branch, since 6759 * that's where the program will go 6760 */ 6761 return 0; 6762 } 6763 6764 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 6765 false); 6766 if (!other_branch) 6767 return -EFAULT; 6768 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 6769 6770 /* detect if we are comparing against a constant value so we can adjust 6771 * our min/max values for our dst register. 6772 * this is only legit if both are scalars (or pointers to the same 6773 * object, I suppose, but we don't support that right now), because 6774 * otherwise the different base pointers mean the offsets aren't 6775 * comparable. 6776 */ 6777 if (BPF_SRC(insn->code) == BPF_X) { 6778 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 6779 6780 if (dst_reg->type == SCALAR_VALUE && 6781 src_reg->type == SCALAR_VALUE) { 6782 if (tnum_is_const(src_reg->var_off) || 6783 (is_jmp32 && 6784 tnum_is_const(tnum_subreg(src_reg->var_off)))) 6785 reg_set_min_max(&other_branch_regs[insn->dst_reg], 6786 dst_reg, 6787 src_reg->var_off.value, 6788 tnum_subreg(src_reg->var_off).value, 6789 opcode, is_jmp32); 6790 else if (tnum_is_const(dst_reg->var_off) || 6791 (is_jmp32 && 6792 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 6793 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 6794 src_reg, 6795 dst_reg->var_off.value, 6796 tnum_subreg(dst_reg->var_off).value, 6797 opcode, is_jmp32); 6798 else if (!is_jmp32 && 6799 (opcode == BPF_JEQ || opcode == BPF_JNE)) 6800 /* Comparing for equality, we can combine knowledge */ 6801 reg_combine_min_max(&other_branch_regs[insn->src_reg], 6802 &other_branch_regs[insn->dst_reg], 6803 src_reg, dst_reg, opcode); 6804 } 6805 } else if (dst_reg->type == SCALAR_VALUE) { 6806 reg_set_min_max(&other_branch_regs[insn->dst_reg], 6807 dst_reg, insn->imm, (u32)insn->imm, 6808 opcode, is_jmp32); 6809 } 6810 6811 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 6812 * NOTE: these optimizations below are related with pointer comparison 6813 * which will never be JMP32. 6814 */ 6815 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 6816 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 6817 reg_type_may_be_null(dst_reg->type)) { 6818 /* Mark all identical registers in each branch as either 6819 * safe or unknown depending R == 0 or R != 0 conditional. 6820 */ 6821 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 6822 opcode == BPF_JNE); 6823 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 6824 opcode == BPF_JEQ); 6825 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 6826 this_branch, other_branch) && 6827 is_pointer_value(env, insn->dst_reg)) { 6828 verbose(env, "R%d pointer comparison prohibited\n", 6829 insn->dst_reg); 6830 return -EACCES; 6831 } 6832 if (env->log.level & BPF_LOG_LEVEL) 6833 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 6834 return 0; 6835 } 6836 6837 /* verify BPF_LD_IMM64 instruction */ 6838 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 6839 { 6840 struct bpf_insn_aux_data *aux = cur_aux(env); 6841 struct bpf_reg_state *regs = cur_regs(env); 6842 struct bpf_map *map; 6843 int err; 6844 6845 if (BPF_SIZE(insn->code) != BPF_DW) { 6846 verbose(env, "invalid BPF_LD_IMM insn\n"); 6847 return -EINVAL; 6848 } 6849 if (insn->off != 0) { 6850 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 6851 return -EINVAL; 6852 } 6853 6854 err = check_reg_arg(env, insn->dst_reg, DST_OP); 6855 if (err) 6856 return err; 6857 6858 if (insn->src_reg == 0) { 6859 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 6860 6861 regs[insn->dst_reg].type = SCALAR_VALUE; 6862 __mark_reg_known(®s[insn->dst_reg], imm); 6863 return 0; 6864 } 6865 6866 map = env->used_maps[aux->map_index]; 6867 mark_reg_known_zero(env, regs, insn->dst_reg); 6868 regs[insn->dst_reg].map_ptr = map; 6869 6870 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { 6871 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 6872 regs[insn->dst_reg].off = aux->map_off; 6873 if (map_value_has_spin_lock(map)) 6874 regs[insn->dst_reg].id = ++env->id_gen; 6875 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 6876 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 6877 } else { 6878 verbose(env, "bpf verifier is misconfigured\n"); 6879 return -EINVAL; 6880 } 6881 6882 return 0; 6883 } 6884 6885 static bool may_access_skb(enum bpf_prog_type type) 6886 { 6887 switch (type) { 6888 case BPF_PROG_TYPE_SOCKET_FILTER: 6889 case BPF_PROG_TYPE_SCHED_CLS: 6890 case BPF_PROG_TYPE_SCHED_ACT: 6891 return true; 6892 default: 6893 return false; 6894 } 6895 } 6896 6897 /* verify safety of LD_ABS|LD_IND instructions: 6898 * - they can only appear in the programs where ctx == skb 6899 * - since they are wrappers of function calls, they scratch R1-R5 registers, 6900 * preserve R6-R9, and store return value into R0 6901 * 6902 * Implicit input: 6903 * ctx == skb == R6 == CTX 6904 * 6905 * Explicit input: 6906 * SRC == any register 6907 * IMM == 32-bit immediate 6908 * 6909 * Output: 6910 * R0 - 8/16/32-bit skb data converted to cpu endianness 6911 */ 6912 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 6913 { 6914 struct bpf_reg_state *regs = cur_regs(env); 6915 static const int ctx_reg = BPF_REG_6; 6916 u8 mode = BPF_MODE(insn->code); 6917 int i, err; 6918 6919 if (!may_access_skb(env->prog->type)) { 6920 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 6921 return -EINVAL; 6922 } 6923 6924 if (!env->ops->gen_ld_abs) { 6925 verbose(env, "bpf verifier is misconfigured\n"); 6926 return -EINVAL; 6927 } 6928 6929 if (env->subprog_cnt > 1) { 6930 /* when program has LD_ABS insn JITs and interpreter assume 6931 * that r1 == ctx == skb which is not the case for callees 6932 * that can have arbitrary arguments. It's problematic 6933 * for main prog as well since JITs would need to analyze 6934 * all functions in order to make proper register save/restore 6935 * decisions in the main prog. Hence disallow LD_ABS with calls 6936 */ 6937 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); 6938 return -EINVAL; 6939 } 6940 6941 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 6942 BPF_SIZE(insn->code) == BPF_DW || 6943 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 6944 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 6945 return -EINVAL; 6946 } 6947 6948 /* check whether implicit source operand (register R6) is readable */ 6949 err = check_reg_arg(env, ctx_reg, SRC_OP); 6950 if (err) 6951 return err; 6952 6953 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 6954 * gen_ld_abs() may terminate the program at runtime, leading to 6955 * reference leak. 6956 */ 6957 err = check_reference_leak(env); 6958 if (err) { 6959 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 6960 return err; 6961 } 6962 6963 if (env->cur_state->active_spin_lock) { 6964 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 6965 return -EINVAL; 6966 } 6967 6968 if (regs[ctx_reg].type != PTR_TO_CTX) { 6969 verbose(env, 6970 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 6971 return -EINVAL; 6972 } 6973 6974 if (mode == BPF_IND) { 6975 /* check explicit source operand */ 6976 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6977 if (err) 6978 return err; 6979 } 6980 6981 err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); 6982 if (err < 0) 6983 return err; 6984 6985 /* reset caller saved regs to unreadable */ 6986 for (i = 0; i < CALLER_SAVED_REGS; i++) { 6987 mark_reg_not_init(env, regs, caller_saved[i]); 6988 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 6989 } 6990 6991 /* mark destination R0 register as readable, since it contains 6992 * the value fetched from the packet. 6993 * Already marked as written above. 6994 */ 6995 mark_reg_unknown(env, regs, BPF_REG_0); 6996 /* ld_abs load up to 32-bit skb data. */ 6997 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 6998 return 0; 6999 } 7000 7001 static int check_return_code(struct bpf_verifier_env *env) 7002 { 7003 struct tnum enforce_attach_type_range = tnum_unknown; 7004 const struct bpf_prog *prog = env->prog; 7005 struct bpf_reg_state *reg; 7006 struct tnum range = tnum_range(0, 1); 7007 int err; 7008 7009 /* LSM and struct_ops func-ptr's return type could be "void" */ 7010 if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS || 7011 env->prog->type == BPF_PROG_TYPE_LSM) && 7012 !prog->aux->attach_func_proto->type) 7013 return 0; 7014 7015 /* eBPF calling convetion is such that R0 is used 7016 * to return the value from eBPF program. 7017 * Make sure that it's readable at this time 7018 * of bpf_exit, which means that program wrote 7019 * something into it earlier 7020 */ 7021 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 7022 if (err) 7023 return err; 7024 7025 if (is_pointer_value(env, BPF_REG_0)) { 7026 verbose(env, "R0 leaks addr as return value\n"); 7027 return -EACCES; 7028 } 7029 7030 switch (env->prog->type) { 7031 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 7032 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 7033 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG) 7034 range = tnum_range(1, 1); 7035 break; 7036 case BPF_PROG_TYPE_CGROUP_SKB: 7037 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 7038 range = tnum_range(0, 3); 7039 enforce_attach_type_range = tnum_range(2, 3); 7040 } 7041 break; 7042 case BPF_PROG_TYPE_CGROUP_SOCK: 7043 case BPF_PROG_TYPE_SOCK_OPS: 7044 case BPF_PROG_TYPE_CGROUP_DEVICE: 7045 case BPF_PROG_TYPE_CGROUP_SYSCTL: 7046 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 7047 break; 7048 case BPF_PROG_TYPE_RAW_TRACEPOINT: 7049 if (!env->prog->aux->attach_btf_id) 7050 return 0; 7051 range = tnum_const(0); 7052 break; 7053 default: 7054 return 0; 7055 } 7056 7057 reg = cur_regs(env) + BPF_REG_0; 7058 if (reg->type != SCALAR_VALUE) { 7059 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 7060 reg_type_str[reg->type]); 7061 return -EINVAL; 7062 } 7063 7064 if (!tnum_in(range, reg->var_off)) { 7065 char tn_buf[48]; 7066 7067 verbose(env, "At program exit the register R0 "); 7068 if (!tnum_is_unknown(reg->var_off)) { 7069 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 7070 verbose(env, "has value %s", tn_buf); 7071 } else { 7072 verbose(env, "has unknown scalar value"); 7073 } 7074 tnum_strn(tn_buf, sizeof(tn_buf), range); 7075 verbose(env, " should have been in %s\n", tn_buf); 7076 return -EINVAL; 7077 } 7078 7079 if (!tnum_is_unknown(enforce_attach_type_range) && 7080 tnum_in(enforce_attach_type_range, reg->var_off)) 7081 env->prog->enforce_expected_attach_type = 1; 7082 return 0; 7083 } 7084 7085 /* non-recursive DFS pseudo code 7086 * 1 procedure DFS-iterative(G,v): 7087 * 2 label v as discovered 7088 * 3 let S be a stack 7089 * 4 S.push(v) 7090 * 5 while S is not empty 7091 * 6 t <- S.pop() 7092 * 7 if t is what we're looking for: 7093 * 8 return t 7094 * 9 for all edges e in G.adjacentEdges(t) do 7095 * 10 if edge e is already labelled 7096 * 11 continue with the next edge 7097 * 12 w <- G.adjacentVertex(t,e) 7098 * 13 if vertex w is not discovered and not explored 7099 * 14 label e as tree-edge 7100 * 15 label w as discovered 7101 * 16 S.push(w) 7102 * 17 continue at 5 7103 * 18 else if vertex w is discovered 7104 * 19 label e as back-edge 7105 * 20 else 7106 * 21 // vertex w is explored 7107 * 22 label e as forward- or cross-edge 7108 * 23 label t as explored 7109 * 24 S.pop() 7110 * 7111 * convention: 7112 * 0x10 - discovered 7113 * 0x11 - discovered and fall-through edge labelled 7114 * 0x12 - discovered and fall-through and branch edges labelled 7115 * 0x20 - explored 7116 */ 7117 7118 enum { 7119 DISCOVERED = 0x10, 7120 EXPLORED = 0x20, 7121 FALLTHROUGH = 1, 7122 BRANCH = 2, 7123 }; 7124 7125 static u32 state_htab_size(struct bpf_verifier_env *env) 7126 { 7127 return env->prog->len; 7128 } 7129 7130 static struct bpf_verifier_state_list **explored_state( 7131 struct bpf_verifier_env *env, 7132 int idx) 7133 { 7134 struct bpf_verifier_state *cur = env->cur_state; 7135 struct bpf_func_state *state = cur->frame[cur->curframe]; 7136 7137 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 7138 } 7139 7140 static void init_explored_state(struct bpf_verifier_env *env, int idx) 7141 { 7142 env->insn_aux_data[idx].prune_point = true; 7143 } 7144 7145 /* t, w, e - match pseudo-code above: 7146 * t - index of current instruction 7147 * w - next instruction 7148 * e - edge 7149 */ 7150 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 7151 bool loop_ok) 7152 { 7153 int *insn_stack = env->cfg.insn_stack; 7154 int *insn_state = env->cfg.insn_state; 7155 7156 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 7157 return 0; 7158 7159 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 7160 return 0; 7161 7162 if (w < 0 || w >= env->prog->len) { 7163 verbose_linfo(env, t, "%d: ", t); 7164 verbose(env, "jump out of range from insn %d to %d\n", t, w); 7165 return -EINVAL; 7166 } 7167 7168 if (e == BRANCH) 7169 /* mark branch target for state pruning */ 7170 init_explored_state(env, w); 7171 7172 if (insn_state[w] == 0) { 7173 /* tree-edge */ 7174 insn_state[t] = DISCOVERED | e; 7175 insn_state[w] = DISCOVERED; 7176 if (env->cfg.cur_stack >= env->prog->len) 7177 return -E2BIG; 7178 insn_stack[env->cfg.cur_stack++] = w; 7179 return 1; 7180 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 7181 if (loop_ok && env->allow_ptr_leaks) 7182 return 0; 7183 verbose_linfo(env, t, "%d: ", t); 7184 verbose_linfo(env, w, "%d: ", w); 7185 verbose(env, "back-edge from insn %d to %d\n", t, w); 7186 return -EINVAL; 7187 } else if (insn_state[w] == EXPLORED) { 7188 /* forward- or cross-edge */ 7189 insn_state[t] = DISCOVERED | e; 7190 } else { 7191 verbose(env, "insn state internal bug\n"); 7192 return -EFAULT; 7193 } 7194 return 0; 7195 } 7196 7197 /* non-recursive depth-first-search to detect loops in BPF program 7198 * loop == back-edge in directed graph 7199 */ 7200 static int check_cfg(struct bpf_verifier_env *env) 7201 { 7202 struct bpf_insn *insns = env->prog->insnsi; 7203 int insn_cnt = env->prog->len; 7204 int *insn_stack, *insn_state; 7205 int ret = 0; 7206 int i, t; 7207 7208 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 7209 if (!insn_state) 7210 return -ENOMEM; 7211 7212 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 7213 if (!insn_stack) { 7214 kvfree(insn_state); 7215 return -ENOMEM; 7216 } 7217 7218 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 7219 insn_stack[0] = 0; /* 0 is the first instruction */ 7220 env->cfg.cur_stack = 1; 7221 7222 peek_stack: 7223 if (env->cfg.cur_stack == 0) 7224 goto check_state; 7225 t = insn_stack[env->cfg.cur_stack - 1]; 7226 7227 if (BPF_CLASS(insns[t].code) == BPF_JMP || 7228 BPF_CLASS(insns[t].code) == BPF_JMP32) { 7229 u8 opcode = BPF_OP(insns[t].code); 7230 7231 if (opcode == BPF_EXIT) { 7232 goto mark_explored; 7233 } else if (opcode == BPF_CALL) { 7234 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 7235 if (ret == 1) 7236 goto peek_stack; 7237 else if (ret < 0) 7238 goto err_free; 7239 if (t + 1 < insn_cnt) 7240 init_explored_state(env, t + 1); 7241 if (insns[t].src_reg == BPF_PSEUDO_CALL) { 7242 init_explored_state(env, t); 7243 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, 7244 env, false); 7245 if (ret == 1) 7246 goto peek_stack; 7247 else if (ret < 0) 7248 goto err_free; 7249 } 7250 } else if (opcode == BPF_JA) { 7251 if (BPF_SRC(insns[t].code) != BPF_K) { 7252 ret = -EINVAL; 7253 goto err_free; 7254 } 7255 /* unconditional jump with single edge */ 7256 ret = push_insn(t, t + insns[t].off + 1, 7257 FALLTHROUGH, env, true); 7258 if (ret == 1) 7259 goto peek_stack; 7260 else if (ret < 0) 7261 goto err_free; 7262 /* unconditional jmp is not a good pruning point, 7263 * but it's marked, since backtracking needs 7264 * to record jmp history in is_state_visited(). 7265 */ 7266 init_explored_state(env, t + insns[t].off + 1); 7267 /* tell verifier to check for equivalent states 7268 * after every call and jump 7269 */ 7270 if (t + 1 < insn_cnt) 7271 init_explored_state(env, t + 1); 7272 } else { 7273 /* conditional jump with two edges */ 7274 init_explored_state(env, t); 7275 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 7276 if (ret == 1) 7277 goto peek_stack; 7278 else if (ret < 0) 7279 goto err_free; 7280 7281 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 7282 if (ret == 1) 7283 goto peek_stack; 7284 else if (ret < 0) 7285 goto err_free; 7286 } 7287 } else { 7288 /* all other non-branch instructions with single 7289 * fall-through edge 7290 */ 7291 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 7292 if (ret == 1) 7293 goto peek_stack; 7294 else if (ret < 0) 7295 goto err_free; 7296 } 7297 7298 mark_explored: 7299 insn_state[t] = EXPLORED; 7300 if (env->cfg.cur_stack-- <= 0) { 7301 verbose(env, "pop stack internal bug\n"); 7302 ret = -EFAULT; 7303 goto err_free; 7304 } 7305 goto peek_stack; 7306 7307 check_state: 7308 for (i = 0; i < insn_cnt; i++) { 7309 if (insn_state[i] != EXPLORED) { 7310 verbose(env, "unreachable insn %d\n", i); 7311 ret = -EINVAL; 7312 goto err_free; 7313 } 7314 } 7315 ret = 0; /* cfg looks good */ 7316 7317 err_free: 7318 kvfree(insn_state); 7319 kvfree(insn_stack); 7320 env->cfg.insn_state = env->cfg.insn_stack = NULL; 7321 return ret; 7322 } 7323 7324 /* The minimum supported BTF func info size */ 7325 #define MIN_BPF_FUNCINFO_SIZE 8 7326 #define MAX_FUNCINFO_REC_SIZE 252 7327 7328 static int check_btf_func(struct bpf_verifier_env *env, 7329 const union bpf_attr *attr, 7330 union bpf_attr __user *uattr) 7331 { 7332 u32 i, nfuncs, urec_size, min_size; 7333 u32 krec_size = sizeof(struct bpf_func_info); 7334 struct bpf_func_info *krecord; 7335 struct bpf_func_info_aux *info_aux = NULL; 7336 const struct btf_type *type; 7337 struct bpf_prog *prog; 7338 const struct btf *btf; 7339 void __user *urecord; 7340 u32 prev_offset = 0; 7341 int ret = 0; 7342 7343 nfuncs = attr->func_info_cnt; 7344 if (!nfuncs) 7345 return 0; 7346 7347 if (nfuncs != env->subprog_cnt) { 7348 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 7349 return -EINVAL; 7350 } 7351 7352 urec_size = attr->func_info_rec_size; 7353 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 7354 urec_size > MAX_FUNCINFO_REC_SIZE || 7355 urec_size % sizeof(u32)) { 7356 verbose(env, "invalid func info rec size %u\n", urec_size); 7357 return -EINVAL; 7358 } 7359 7360 prog = env->prog; 7361 btf = prog->aux->btf; 7362 7363 urecord = u64_to_user_ptr(attr->func_info); 7364 min_size = min_t(u32, krec_size, urec_size); 7365 7366 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 7367 if (!krecord) 7368 return -ENOMEM; 7369 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 7370 if (!info_aux) 7371 goto err_free; 7372 7373 for (i = 0; i < nfuncs; i++) { 7374 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 7375 if (ret) { 7376 if (ret == -E2BIG) { 7377 verbose(env, "nonzero tailing record in func info"); 7378 /* set the size kernel expects so loader can zero 7379 * out the rest of the record. 7380 */ 7381 if (put_user(min_size, &uattr->func_info_rec_size)) 7382 ret = -EFAULT; 7383 } 7384 goto err_free; 7385 } 7386 7387 if (copy_from_user(&krecord[i], urecord, min_size)) { 7388 ret = -EFAULT; 7389 goto err_free; 7390 } 7391 7392 /* check insn_off */ 7393 if (i == 0) { 7394 if (krecord[i].insn_off) { 7395 verbose(env, 7396 "nonzero insn_off %u for the first func info record", 7397 krecord[i].insn_off); 7398 ret = -EINVAL; 7399 goto err_free; 7400 } 7401 } else if (krecord[i].insn_off <= prev_offset) { 7402 verbose(env, 7403 "same or smaller insn offset (%u) than previous func info record (%u)", 7404 krecord[i].insn_off, prev_offset); 7405 ret = -EINVAL; 7406 goto err_free; 7407 } 7408 7409 if (env->subprog_info[i].start != krecord[i].insn_off) { 7410 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 7411 ret = -EINVAL; 7412 goto err_free; 7413 } 7414 7415 /* check type_id */ 7416 type = btf_type_by_id(btf, krecord[i].type_id); 7417 if (!type || !btf_type_is_func(type)) { 7418 verbose(env, "invalid type id %d in func info", 7419 krecord[i].type_id); 7420 ret = -EINVAL; 7421 goto err_free; 7422 } 7423 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 7424 prev_offset = krecord[i].insn_off; 7425 urecord += urec_size; 7426 } 7427 7428 prog->aux->func_info = krecord; 7429 prog->aux->func_info_cnt = nfuncs; 7430 prog->aux->func_info_aux = info_aux; 7431 return 0; 7432 7433 err_free: 7434 kvfree(krecord); 7435 kfree(info_aux); 7436 return ret; 7437 } 7438 7439 static void adjust_btf_func(struct bpf_verifier_env *env) 7440 { 7441 struct bpf_prog_aux *aux = env->prog->aux; 7442 int i; 7443 7444 if (!aux->func_info) 7445 return; 7446 7447 for (i = 0; i < env->subprog_cnt; i++) 7448 aux->func_info[i].insn_off = env->subprog_info[i].start; 7449 } 7450 7451 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ 7452 sizeof(((struct bpf_line_info *)(0))->line_col)) 7453 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 7454 7455 static int check_btf_line(struct bpf_verifier_env *env, 7456 const union bpf_attr *attr, 7457 union bpf_attr __user *uattr) 7458 { 7459 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 7460 struct bpf_subprog_info *sub; 7461 struct bpf_line_info *linfo; 7462 struct bpf_prog *prog; 7463 const struct btf *btf; 7464 void __user *ulinfo; 7465 int err; 7466 7467 nr_linfo = attr->line_info_cnt; 7468 if (!nr_linfo) 7469 return 0; 7470 7471 rec_size = attr->line_info_rec_size; 7472 if (rec_size < MIN_BPF_LINEINFO_SIZE || 7473 rec_size > MAX_LINEINFO_REC_SIZE || 7474 rec_size & (sizeof(u32) - 1)) 7475 return -EINVAL; 7476 7477 /* Need to zero it in case the userspace may 7478 * pass in a smaller bpf_line_info object. 7479 */ 7480 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 7481 GFP_KERNEL | __GFP_NOWARN); 7482 if (!linfo) 7483 return -ENOMEM; 7484 7485 prog = env->prog; 7486 btf = prog->aux->btf; 7487 7488 s = 0; 7489 sub = env->subprog_info; 7490 ulinfo = u64_to_user_ptr(attr->line_info); 7491 expected_size = sizeof(struct bpf_line_info); 7492 ncopy = min_t(u32, expected_size, rec_size); 7493 for (i = 0; i < nr_linfo; i++) { 7494 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 7495 if (err) { 7496 if (err == -E2BIG) { 7497 verbose(env, "nonzero tailing record in line_info"); 7498 if (put_user(expected_size, 7499 &uattr->line_info_rec_size)) 7500 err = -EFAULT; 7501 } 7502 goto err_free; 7503 } 7504 7505 if (copy_from_user(&linfo[i], ulinfo, ncopy)) { 7506 err = -EFAULT; 7507 goto err_free; 7508 } 7509 7510 /* 7511 * Check insn_off to ensure 7512 * 1) strictly increasing AND 7513 * 2) bounded by prog->len 7514 * 7515 * The linfo[0].insn_off == 0 check logically falls into 7516 * the later "missing bpf_line_info for func..." case 7517 * because the first linfo[0].insn_off must be the 7518 * first sub also and the first sub must have 7519 * subprog_info[0].start == 0. 7520 */ 7521 if ((i && linfo[i].insn_off <= prev_offset) || 7522 linfo[i].insn_off >= prog->len) { 7523 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 7524 i, linfo[i].insn_off, prev_offset, 7525 prog->len); 7526 err = -EINVAL; 7527 goto err_free; 7528 } 7529 7530 if (!prog->insnsi[linfo[i].insn_off].code) { 7531 verbose(env, 7532 "Invalid insn code at line_info[%u].insn_off\n", 7533 i); 7534 err = -EINVAL; 7535 goto err_free; 7536 } 7537 7538 if (!btf_name_by_offset(btf, linfo[i].line_off) || 7539 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 7540 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 7541 err = -EINVAL; 7542 goto err_free; 7543 } 7544 7545 if (s != env->subprog_cnt) { 7546 if (linfo[i].insn_off == sub[s].start) { 7547 sub[s].linfo_idx = i; 7548 s++; 7549 } else if (sub[s].start < linfo[i].insn_off) { 7550 verbose(env, "missing bpf_line_info for func#%u\n", s); 7551 err = -EINVAL; 7552 goto err_free; 7553 } 7554 } 7555 7556 prev_offset = linfo[i].insn_off; 7557 ulinfo += rec_size; 7558 } 7559 7560 if (s != env->subprog_cnt) { 7561 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 7562 env->subprog_cnt - s, s); 7563 err = -EINVAL; 7564 goto err_free; 7565 } 7566 7567 prog->aux->linfo = linfo; 7568 prog->aux->nr_linfo = nr_linfo; 7569 7570 return 0; 7571 7572 err_free: 7573 kvfree(linfo); 7574 return err; 7575 } 7576 7577 static int check_btf_info(struct bpf_verifier_env *env, 7578 const union bpf_attr *attr, 7579 union bpf_attr __user *uattr) 7580 { 7581 struct btf *btf; 7582 int err; 7583 7584 if (!attr->func_info_cnt && !attr->line_info_cnt) 7585 return 0; 7586 7587 btf = btf_get_by_fd(attr->prog_btf_fd); 7588 if (IS_ERR(btf)) 7589 return PTR_ERR(btf); 7590 env->prog->aux->btf = btf; 7591 7592 err = check_btf_func(env, attr, uattr); 7593 if (err) 7594 return err; 7595 7596 err = check_btf_line(env, attr, uattr); 7597 if (err) 7598 return err; 7599 7600 return 0; 7601 } 7602 7603 /* check %cur's range satisfies %old's */ 7604 static bool range_within(struct bpf_reg_state *old, 7605 struct bpf_reg_state *cur) 7606 { 7607 return old->umin_value <= cur->umin_value && 7608 old->umax_value >= cur->umax_value && 7609 old->smin_value <= cur->smin_value && 7610 old->smax_value >= cur->smax_value; 7611 } 7612 7613 /* Maximum number of register states that can exist at once */ 7614 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 7615 struct idpair { 7616 u32 old; 7617 u32 cur; 7618 }; 7619 7620 /* If in the old state two registers had the same id, then they need to have 7621 * the same id in the new state as well. But that id could be different from 7622 * the old state, so we need to track the mapping from old to new ids. 7623 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 7624 * regs with old id 5 must also have new id 9 for the new state to be safe. But 7625 * regs with a different old id could still have new id 9, we don't care about 7626 * that. 7627 * So we look through our idmap to see if this old id has been seen before. If 7628 * so, we require the new id to match; otherwise, we add the id pair to the map. 7629 */ 7630 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 7631 { 7632 unsigned int i; 7633 7634 for (i = 0; i < ID_MAP_SIZE; i++) { 7635 if (!idmap[i].old) { 7636 /* Reached an empty slot; haven't seen this id before */ 7637 idmap[i].old = old_id; 7638 idmap[i].cur = cur_id; 7639 return true; 7640 } 7641 if (idmap[i].old == old_id) 7642 return idmap[i].cur == cur_id; 7643 } 7644 /* We ran out of idmap slots, which should be impossible */ 7645 WARN_ON_ONCE(1); 7646 return false; 7647 } 7648 7649 static void clean_func_state(struct bpf_verifier_env *env, 7650 struct bpf_func_state *st) 7651 { 7652 enum bpf_reg_liveness live; 7653 int i, j; 7654 7655 for (i = 0; i < BPF_REG_FP; i++) { 7656 live = st->regs[i].live; 7657 /* liveness must not touch this register anymore */ 7658 st->regs[i].live |= REG_LIVE_DONE; 7659 if (!(live & REG_LIVE_READ)) 7660 /* since the register is unused, clear its state 7661 * to make further comparison simpler 7662 */ 7663 __mark_reg_not_init(env, &st->regs[i]); 7664 } 7665 7666 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 7667 live = st->stack[i].spilled_ptr.live; 7668 /* liveness must not touch this stack slot anymore */ 7669 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 7670 if (!(live & REG_LIVE_READ)) { 7671 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 7672 for (j = 0; j < BPF_REG_SIZE; j++) 7673 st->stack[i].slot_type[j] = STACK_INVALID; 7674 } 7675 } 7676 } 7677 7678 static void clean_verifier_state(struct bpf_verifier_env *env, 7679 struct bpf_verifier_state *st) 7680 { 7681 int i; 7682 7683 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 7684 /* all regs in this state in all frames were already marked */ 7685 return; 7686 7687 for (i = 0; i <= st->curframe; i++) 7688 clean_func_state(env, st->frame[i]); 7689 } 7690 7691 /* the parentage chains form a tree. 7692 * the verifier states are added to state lists at given insn and 7693 * pushed into state stack for future exploration. 7694 * when the verifier reaches bpf_exit insn some of the verifer states 7695 * stored in the state lists have their final liveness state already, 7696 * but a lot of states will get revised from liveness point of view when 7697 * the verifier explores other branches. 7698 * Example: 7699 * 1: r0 = 1 7700 * 2: if r1 == 100 goto pc+1 7701 * 3: r0 = 2 7702 * 4: exit 7703 * when the verifier reaches exit insn the register r0 in the state list of 7704 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 7705 * of insn 2 and goes exploring further. At the insn 4 it will walk the 7706 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 7707 * 7708 * Since the verifier pushes the branch states as it sees them while exploring 7709 * the program the condition of walking the branch instruction for the second 7710 * time means that all states below this branch were already explored and 7711 * their final liveness markes are already propagated. 7712 * Hence when the verifier completes the search of state list in is_state_visited() 7713 * we can call this clean_live_states() function to mark all liveness states 7714 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 7715 * will not be used. 7716 * This function also clears the registers and stack for states that !READ 7717 * to simplify state merging. 7718 * 7719 * Important note here that walking the same branch instruction in the callee 7720 * doesn't meant that the states are DONE. The verifier has to compare 7721 * the callsites 7722 */ 7723 static void clean_live_states(struct bpf_verifier_env *env, int insn, 7724 struct bpf_verifier_state *cur) 7725 { 7726 struct bpf_verifier_state_list *sl; 7727 int i; 7728 7729 sl = *explored_state(env, insn); 7730 while (sl) { 7731 if (sl->state.branches) 7732 goto next; 7733 if (sl->state.insn_idx != insn || 7734 sl->state.curframe != cur->curframe) 7735 goto next; 7736 for (i = 0; i <= cur->curframe; i++) 7737 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 7738 goto next; 7739 clean_verifier_state(env, &sl->state); 7740 next: 7741 sl = sl->next; 7742 } 7743 } 7744 7745 /* Returns true if (rold safe implies rcur safe) */ 7746 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 7747 struct idpair *idmap) 7748 { 7749 bool equal; 7750 7751 if (!(rold->live & REG_LIVE_READ)) 7752 /* explored state didn't use this */ 7753 return true; 7754 7755 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 7756 7757 if (rold->type == PTR_TO_STACK) 7758 /* two stack pointers are equal only if they're pointing to 7759 * the same stack frame, since fp-8 in foo != fp-8 in bar 7760 */ 7761 return equal && rold->frameno == rcur->frameno; 7762 7763 if (equal) 7764 return true; 7765 7766 if (rold->type == NOT_INIT) 7767 /* explored state can't have used this */ 7768 return true; 7769 if (rcur->type == NOT_INIT) 7770 return false; 7771 switch (rold->type) { 7772 case SCALAR_VALUE: 7773 if (rcur->type == SCALAR_VALUE) { 7774 if (!rold->precise && !rcur->precise) 7775 return true; 7776 /* new val must satisfy old val knowledge */ 7777 return range_within(rold, rcur) && 7778 tnum_in(rold->var_off, rcur->var_off); 7779 } else { 7780 /* We're trying to use a pointer in place of a scalar. 7781 * Even if the scalar was unbounded, this could lead to 7782 * pointer leaks because scalars are allowed to leak 7783 * while pointers are not. We could make this safe in 7784 * special cases if root is calling us, but it's 7785 * probably not worth the hassle. 7786 */ 7787 return false; 7788 } 7789 case PTR_TO_MAP_VALUE: 7790 /* If the new min/max/var_off satisfy the old ones and 7791 * everything else matches, we are OK. 7792 * 'id' is not compared, since it's only used for maps with 7793 * bpf_spin_lock inside map element and in such cases if 7794 * the rest of the prog is valid for one map element then 7795 * it's valid for all map elements regardless of the key 7796 * used in bpf_map_lookup() 7797 */ 7798 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 7799 range_within(rold, rcur) && 7800 tnum_in(rold->var_off, rcur->var_off); 7801 case PTR_TO_MAP_VALUE_OR_NULL: 7802 /* a PTR_TO_MAP_VALUE could be safe to use as a 7803 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 7804 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 7805 * checked, doing so could have affected others with the same 7806 * id, and we can't check for that because we lost the id when 7807 * we converted to a PTR_TO_MAP_VALUE. 7808 */ 7809 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 7810 return false; 7811 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 7812 return false; 7813 /* Check our ids match any regs they're supposed to */ 7814 return check_ids(rold->id, rcur->id, idmap); 7815 case PTR_TO_PACKET_META: 7816 case PTR_TO_PACKET: 7817 if (rcur->type != rold->type) 7818 return false; 7819 /* We must have at least as much range as the old ptr 7820 * did, so that any accesses which were safe before are 7821 * still safe. This is true even if old range < old off, 7822 * since someone could have accessed through (ptr - k), or 7823 * even done ptr -= k in a register, to get a safe access. 7824 */ 7825 if (rold->range > rcur->range) 7826 return false; 7827 /* If the offsets don't match, we can't trust our alignment; 7828 * nor can we be sure that we won't fall out of range. 7829 */ 7830 if (rold->off != rcur->off) 7831 return false; 7832 /* id relations must be preserved */ 7833 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 7834 return false; 7835 /* new val must satisfy old val knowledge */ 7836 return range_within(rold, rcur) && 7837 tnum_in(rold->var_off, rcur->var_off); 7838 case PTR_TO_CTX: 7839 case CONST_PTR_TO_MAP: 7840 case PTR_TO_PACKET_END: 7841 case PTR_TO_FLOW_KEYS: 7842 case PTR_TO_SOCKET: 7843 case PTR_TO_SOCKET_OR_NULL: 7844 case PTR_TO_SOCK_COMMON: 7845 case PTR_TO_SOCK_COMMON_OR_NULL: 7846 case PTR_TO_TCP_SOCK: 7847 case PTR_TO_TCP_SOCK_OR_NULL: 7848 case PTR_TO_XDP_SOCK: 7849 /* Only valid matches are exact, which memcmp() above 7850 * would have accepted 7851 */ 7852 default: 7853 /* Don't know what's going on, just say it's not safe */ 7854 return false; 7855 } 7856 7857 /* Shouldn't get here; if we do, say it's not safe */ 7858 WARN_ON_ONCE(1); 7859 return false; 7860 } 7861 7862 static bool stacksafe(struct bpf_func_state *old, 7863 struct bpf_func_state *cur, 7864 struct idpair *idmap) 7865 { 7866 int i, spi; 7867 7868 /* walk slots of the explored stack and ignore any additional 7869 * slots in the current stack, since explored(safe) state 7870 * didn't use them 7871 */ 7872 for (i = 0; i < old->allocated_stack; i++) { 7873 spi = i / BPF_REG_SIZE; 7874 7875 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 7876 i += BPF_REG_SIZE - 1; 7877 /* explored state didn't use this */ 7878 continue; 7879 } 7880 7881 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 7882 continue; 7883 7884 /* explored stack has more populated slots than current stack 7885 * and these slots were used 7886 */ 7887 if (i >= cur->allocated_stack) 7888 return false; 7889 7890 /* if old state was safe with misc data in the stack 7891 * it will be safe with zero-initialized stack. 7892 * The opposite is not true 7893 */ 7894 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 7895 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 7896 continue; 7897 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 7898 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 7899 /* Ex: old explored (safe) state has STACK_SPILL in 7900 * this stack slot, but current has has STACK_MISC -> 7901 * this verifier states are not equivalent, 7902 * return false to continue verification of this path 7903 */ 7904 return false; 7905 if (i % BPF_REG_SIZE) 7906 continue; 7907 if (old->stack[spi].slot_type[0] != STACK_SPILL) 7908 continue; 7909 if (!regsafe(&old->stack[spi].spilled_ptr, 7910 &cur->stack[spi].spilled_ptr, 7911 idmap)) 7912 /* when explored and current stack slot are both storing 7913 * spilled registers, check that stored pointers types 7914 * are the same as well. 7915 * Ex: explored safe path could have stored 7916 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 7917 * but current path has stored: 7918 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 7919 * such verifier states are not equivalent. 7920 * return false to continue verification of this path 7921 */ 7922 return false; 7923 } 7924 return true; 7925 } 7926 7927 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 7928 { 7929 if (old->acquired_refs != cur->acquired_refs) 7930 return false; 7931 return !memcmp(old->refs, cur->refs, 7932 sizeof(*old->refs) * old->acquired_refs); 7933 } 7934 7935 /* compare two verifier states 7936 * 7937 * all states stored in state_list are known to be valid, since 7938 * verifier reached 'bpf_exit' instruction through them 7939 * 7940 * this function is called when verifier exploring different branches of 7941 * execution popped from the state stack. If it sees an old state that has 7942 * more strict register state and more strict stack state then this execution 7943 * branch doesn't need to be explored further, since verifier already 7944 * concluded that more strict state leads to valid finish. 7945 * 7946 * Therefore two states are equivalent if register state is more conservative 7947 * and explored stack state is more conservative than the current one. 7948 * Example: 7949 * explored current 7950 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 7951 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 7952 * 7953 * In other words if current stack state (one being explored) has more 7954 * valid slots than old one that already passed validation, it means 7955 * the verifier can stop exploring and conclude that current state is valid too 7956 * 7957 * Similarly with registers. If explored state has register type as invalid 7958 * whereas register type in current state is meaningful, it means that 7959 * the current state will reach 'bpf_exit' instruction safely 7960 */ 7961 static bool func_states_equal(struct bpf_func_state *old, 7962 struct bpf_func_state *cur) 7963 { 7964 struct idpair *idmap; 7965 bool ret = false; 7966 int i; 7967 7968 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 7969 /* If we failed to allocate the idmap, just say it's not safe */ 7970 if (!idmap) 7971 return false; 7972 7973 for (i = 0; i < MAX_BPF_REG; i++) { 7974 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 7975 goto out_free; 7976 } 7977 7978 if (!stacksafe(old, cur, idmap)) 7979 goto out_free; 7980 7981 if (!refsafe(old, cur)) 7982 goto out_free; 7983 ret = true; 7984 out_free: 7985 kfree(idmap); 7986 return ret; 7987 } 7988 7989 static bool states_equal(struct bpf_verifier_env *env, 7990 struct bpf_verifier_state *old, 7991 struct bpf_verifier_state *cur) 7992 { 7993 int i; 7994 7995 if (old->curframe != cur->curframe) 7996 return false; 7997 7998 /* Verification state from speculative execution simulation 7999 * must never prune a non-speculative execution one. 8000 */ 8001 if (old->speculative && !cur->speculative) 8002 return false; 8003 8004 if (old->active_spin_lock != cur->active_spin_lock) 8005 return false; 8006 8007 /* for states to be equal callsites have to be the same 8008 * and all frame states need to be equivalent 8009 */ 8010 for (i = 0; i <= old->curframe; i++) { 8011 if (old->frame[i]->callsite != cur->frame[i]->callsite) 8012 return false; 8013 if (!func_states_equal(old->frame[i], cur->frame[i])) 8014 return false; 8015 } 8016 return true; 8017 } 8018 8019 /* Return 0 if no propagation happened. Return negative error code if error 8020 * happened. Otherwise, return the propagated bit. 8021 */ 8022 static int propagate_liveness_reg(struct bpf_verifier_env *env, 8023 struct bpf_reg_state *reg, 8024 struct bpf_reg_state *parent_reg) 8025 { 8026 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 8027 u8 flag = reg->live & REG_LIVE_READ; 8028 int err; 8029 8030 /* When comes here, read flags of PARENT_REG or REG could be any of 8031 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 8032 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 8033 */ 8034 if (parent_flag == REG_LIVE_READ64 || 8035 /* Or if there is no read flag from REG. */ 8036 !flag || 8037 /* Or if the read flag from REG is the same as PARENT_REG. */ 8038 parent_flag == flag) 8039 return 0; 8040 8041 err = mark_reg_read(env, reg, parent_reg, flag); 8042 if (err) 8043 return err; 8044 8045 return flag; 8046 } 8047 8048 /* A write screens off any subsequent reads; but write marks come from the 8049 * straight-line code between a state and its parent. When we arrive at an 8050 * equivalent state (jump target or such) we didn't arrive by the straight-line 8051 * code, so read marks in the state must propagate to the parent regardless 8052 * of the state's write marks. That's what 'parent == state->parent' comparison 8053 * in mark_reg_read() is for. 8054 */ 8055 static int propagate_liveness(struct bpf_verifier_env *env, 8056 const struct bpf_verifier_state *vstate, 8057 struct bpf_verifier_state *vparent) 8058 { 8059 struct bpf_reg_state *state_reg, *parent_reg; 8060 struct bpf_func_state *state, *parent; 8061 int i, frame, err = 0; 8062 8063 if (vparent->curframe != vstate->curframe) { 8064 WARN(1, "propagate_live: parent frame %d current frame %d\n", 8065 vparent->curframe, vstate->curframe); 8066 return -EFAULT; 8067 } 8068 /* Propagate read liveness of registers... */ 8069 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 8070 for (frame = 0; frame <= vstate->curframe; frame++) { 8071 parent = vparent->frame[frame]; 8072 state = vstate->frame[frame]; 8073 parent_reg = parent->regs; 8074 state_reg = state->regs; 8075 /* We don't need to worry about FP liveness, it's read-only */ 8076 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 8077 err = propagate_liveness_reg(env, &state_reg[i], 8078 &parent_reg[i]); 8079 if (err < 0) 8080 return err; 8081 if (err == REG_LIVE_READ64) 8082 mark_insn_zext(env, &parent_reg[i]); 8083 } 8084 8085 /* Propagate stack slots. */ 8086 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 8087 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 8088 parent_reg = &parent->stack[i].spilled_ptr; 8089 state_reg = &state->stack[i].spilled_ptr; 8090 err = propagate_liveness_reg(env, state_reg, 8091 parent_reg); 8092 if (err < 0) 8093 return err; 8094 } 8095 } 8096 return 0; 8097 } 8098 8099 /* find precise scalars in the previous equivalent state and 8100 * propagate them into the current state 8101 */ 8102 static int propagate_precision(struct bpf_verifier_env *env, 8103 const struct bpf_verifier_state *old) 8104 { 8105 struct bpf_reg_state *state_reg; 8106 struct bpf_func_state *state; 8107 int i, err = 0; 8108 8109 state = old->frame[old->curframe]; 8110 state_reg = state->regs; 8111 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 8112 if (state_reg->type != SCALAR_VALUE || 8113 !state_reg->precise) 8114 continue; 8115 if (env->log.level & BPF_LOG_LEVEL2) 8116 verbose(env, "propagating r%d\n", i); 8117 err = mark_chain_precision(env, i); 8118 if (err < 0) 8119 return err; 8120 } 8121 8122 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 8123 if (state->stack[i].slot_type[0] != STACK_SPILL) 8124 continue; 8125 state_reg = &state->stack[i].spilled_ptr; 8126 if (state_reg->type != SCALAR_VALUE || 8127 !state_reg->precise) 8128 continue; 8129 if (env->log.level & BPF_LOG_LEVEL2) 8130 verbose(env, "propagating fp%d\n", 8131 (-i - 1) * BPF_REG_SIZE); 8132 err = mark_chain_precision_stack(env, i); 8133 if (err < 0) 8134 return err; 8135 } 8136 return 0; 8137 } 8138 8139 static bool states_maybe_looping(struct bpf_verifier_state *old, 8140 struct bpf_verifier_state *cur) 8141 { 8142 struct bpf_func_state *fold, *fcur; 8143 int i, fr = cur->curframe; 8144 8145 if (old->curframe != fr) 8146 return false; 8147 8148 fold = old->frame[fr]; 8149 fcur = cur->frame[fr]; 8150 for (i = 0; i < MAX_BPF_REG; i++) 8151 if (memcmp(&fold->regs[i], &fcur->regs[i], 8152 offsetof(struct bpf_reg_state, parent))) 8153 return false; 8154 return true; 8155 } 8156 8157 8158 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 8159 { 8160 struct bpf_verifier_state_list *new_sl; 8161 struct bpf_verifier_state_list *sl, **pprev; 8162 struct bpf_verifier_state *cur = env->cur_state, *new; 8163 int i, j, err, states_cnt = 0; 8164 bool add_new_state = env->test_state_freq ? true : false; 8165 8166 cur->last_insn_idx = env->prev_insn_idx; 8167 if (!env->insn_aux_data[insn_idx].prune_point) 8168 /* this 'insn_idx' instruction wasn't marked, so we will not 8169 * be doing state search here 8170 */ 8171 return 0; 8172 8173 /* bpf progs typically have pruning point every 4 instructions 8174 * http://vger.kernel.org/bpfconf2019.html#session-1 8175 * Do not add new state for future pruning if the verifier hasn't seen 8176 * at least 2 jumps and at least 8 instructions. 8177 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 8178 * In tests that amounts to up to 50% reduction into total verifier 8179 * memory consumption and 20% verifier time speedup. 8180 */ 8181 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 8182 env->insn_processed - env->prev_insn_processed >= 8) 8183 add_new_state = true; 8184 8185 pprev = explored_state(env, insn_idx); 8186 sl = *pprev; 8187 8188 clean_live_states(env, insn_idx, cur); 8189 8190 while (sl) { 8191 states_cnt++; 8192 if (sl->state.insn_idx != insn_idx) 8193 goto next; 8194 if (sl->state.branches) { 8195 if (states_maybe_looping(&sl->state, cur) && 8196 states_equal(env, &sl->state, cur)) { 8197 verbose_linfo(env, insn_idx, "; "); 8198 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 8199 return -EINVAL; 8200 } 8201 /* if the verifier is processing a loop, avoid adding new state 8202 * too often, since different loop iterations have distinct 8203 * states and may not help future pruning. 8204 * This threshold shouldn't be too low to make sure that 8205 * a loop with large bound will be rejected quickly. 8206 * The most abusive loop will be: 8207 * r1 += 1 8208 * if r1 < 1000000 goto pc-2 8209 * 1M insn_procssed limit / 100 == 10k peak states. 8210 * This threshold shouldn't be too high either, since states 8211 * at the end of the loop are likely to be useful in pruning. 8212 */ 8213 if (env->jmps_processed - env->prev_jmps_processed < 20 && 8214 env->insn_processed - env->prev_insn_processed < 100) 8215 add_new_state = false; 8216 goto miss; 8217 } 8218 if (states_equal(env, &sl->state, cur)) { 8219 sl->hit_cnt++; 8220 /* reached equivalent register/stack state, 8221 * prune the search. 8222 * Registers read by the continuation are read by us. 8223 * If we have any write marks in env->cur_state, they 8224 * will prevent corresponding reads in the continuation 8225 * from reaching our parent (an explored_state). Our 8226 * own state will get the read marks recorded, but 8227 * they'll be immediately forgotten as we're pruning 8228 * this state and will pop a new one. 8229 */ 8230 err = propagate_liveness(env, &sl->state, cur); 8231 8232 /* if previous state reached the exit with precision and 8233 * current state is equivalent to it (except precsion marks) 8234 * the precision needs to be propagated back in 8235 * the current state. 8236 */ 8237 err = err ? : push_jmp_history(env, cur); 8238 err = err ? : propagate_precision(env, &sl->state); 8239 if (err) 8240 return err; 8241 return 1; 8242 } 8243 miss: 8244 /* when new state is not going to be added do not increase miss count. 8245 * Otherwise several loop iterations will remove the state 8246 * recorded earlier. The goal of these heuristics is to have 8247 * states from some iterations of the loop (some in the beginning 8248 * and some at the end) to help pruning. 8249 */ 8250 if (add_new_state) 8251 sl->miss_cnt++; 8252 /* heuristic to determine whether this state is beneficial 8253 * to keep checking from state equivalence point of view. 8254 * Higher numbers increase max_states_per_insn and verification time, 8255 * but do not meaningfully decrease insn_processed. 8256 */ 8257 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 8258 /* the state is unlikely to be useful. Remove it to 8259 * speed up verification 8260 */ 8261 *pprev = sl->next; 8262 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 8263 u32 br = sl->state.branches; 8264 8265 WARN_ONCE(br, 8266 "BUG live_done but branches_to_explore %d\n", 8267 br); 8268 free_verifier_state(&sl->state, false); 8269 kfree(sl); 8270 env->peak_states--; 8271 } else { 8272 /* cannot free this state, since parentage chain may 8273 * walk it later. Add it for free_list instead to 8274 * be freed at the end of verification 8275 */ 8276 sl->next = env->free_list; 8277 env->free_list = sl; 8278 } 8279 sl = *pprev; 8280 continue; 8281 } 8282 next: 8283 pprev = &sl->next; 8284 sl = *pprev; 8285 } 8286 8287 if (env->max_states_per_insn < states_cnt) 8288 env->max_states_per_insn = states_cnt; 8289 8290 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 8291 return push_jmp_history(env, cur); 8292 8293 if (!add_new_state) 8294 return push_jmp_history(env, cur); 8295 8296 /* There were no equivalent states, remember the current one. 8297 * Technically the current state is not proven to be safe yet, 8298 * but it will either reach outer most bpf_exit (which means it's safe) 8299 * or it will be rejected. When there are no loops the verifier won't be 8300 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 8301 * again on the way to bpf_exit. 8302 * When looping the sl->state.branches will be > 0 and this state 8303 * will not be considered for equivalence until branches == 0. 8304 */ 8305 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 8306 if (!new_sl) 8307 return -ENOMEM; 8308 env->total_states++; 8309 env->peak_states++; 8310 env->prev_jmps_processed = env->jmps_processed; 8311 env->prev_insn_processed = env->insn_processed; 8312 8313 /* add new state to the head of linked list */ 8314 new = &new_sl->state; 8315 err = copy_verifier_state(new, cur); 8316 if (err) { 8317 free_verifier_state(new, false); 8318 kfree(new_sl); 8319 return err; 8320 } 8321 new->insn_idx = insn_idx; 8322 WARN_ONCE(new->branches != 1, 8323 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 8324 8325 cur->parent = new; 8326 cur->first_insn_idx = insn_idx; 8327 clear_jmp_history(cur); 8328 new_sl->next = *explored_state(env, insn_idx); 8329 *explored_state(env, insn_idx) = new_sl; 8330 /* connect new state to parentage chain. Current frame needs all 8331 * registers connected. Only r6 - r9 of the callers are alive (pushed 8332 * to the stack implicitly by JITs) so in callers' frames connect just 8333 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 8334 * the state of the call instruction (with WRITTEN set), and r0 comes 8335 * from callee with its full parentage chain, anyway. 8336 */ 8337 /* clear write marks in current state: the writes we did are not writes 8338 * our child did, so they don't screen off its reads from us. 8339 * (There are no read marks in current state, because reads always mark 8340 * their parent and current state never has children yet. Only 8341 * explored_states can get read marks.) 8342 */ 8343 for (j = 0; j <= cur->curframe; j++) { 8344 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 8345 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 8346 for (i = 0; i < BPF_REG_FP; i++) 8347 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 8348 } 8349 8350 /* all stack frames are accessible from callee, clear them all */ 8351 for (j = 0; j <= cur->curframe; j++) { 8352 struct bpf_func_state *frame = cur->frame[j]; 8353 struct bpf_func_state *newframe = new->frame[j]; 8354 8355 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 8356 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 8357 frame->stack[i].spilled_ptr.parent = 8358 &newframe->stack[i].spilled_ptr; 8359 } 8360 } 8361 return 0; 8362 } 8363 8364 /* Return true if it's OK to have the same insn return a different type. */ 8365 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 8366 { 8367 switch (type) { 8368 case PTR_TO_CTX: 8369 case PTR_TO_SOCKET: 8370 case PTR_TO_SOCKET_OR_NULL: 8371 case PTR_TO_SOCK_COMMON: 8372 case PTR_TO_SOCK_COMMON_OR_NULL: 8373 case PTR_TO_TCP_SOCK: 8374 case PTR_TO_TCP_SOCK_OR_NULL: 8375 case PTR_TO_XDP_SOCK: 8376 case PTR_TO_BTF_ID: 8377 return false; 8378 default: 8379 return true; 8380 } 8381 } 8382 8383 /* If an instruction was previously used with particular pointer types, then we 8384 * need to be careful to avoid cases such as the below, where it may be ok 8385 * for one branch accessing the pointer, but not ok for the other branch: 8386 * 8387 * R1 = sock_ptr 8388 * goto X; 8389 * ... 8390 * R1 = some_other_valid_ptr; 8391 * goto X; 8392 * ... 8393 * R2 = *(u32 *)(R1 + 0); 8394 */ 8395 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 8396 { 8397 return src != prev && (!reg_type_mismatch_ok(src) || 8398 !reg_type_mismatch_ok(prev)); 8399 } 8400 8401 static int do_check(struct bpf_verifier_env *env) 8402 { 8403 struct bpf_verifier_state *state = env->cur_state; 8404 struct bpf_insn *insns = env->prog->insnsi; 8405 struct bpf_reg_state *regs; 8406 int insn_cnt = env->prog->len; 8407 bool do_print_state = false; 8408 int prev_insn_idx = -1; 8409 8410 for (;;) { 8411 struct bpf_insn *insn; 8412 u8 class; 8413 int err; 8414 8415 env->prev_insn_idx = prev_insn_idx; 8416 if (env->insn_idx >= insn_cnt) { 8417 verbose(env, "invalid insn idx %d insn_cnt %d\n", 8418 env->insn_idx, insn_cnt); 8419 return -EFAULT; 8420 } 8421 8422 insn = &insns[env->insn_idx]; 8423 class = BPF_CLASS(insn->code); 8424 8425 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 8426 verbose(env, 8427 "BPF program is too large. Processed %d insn\n", 8428 env->insn_processed); 8429 return -E2BIG; 8430 } 8431 8432 err = is_state_visited(env, env->insn_idx); 8433 if (err < 0) 8434 return err; 8435 if (err == 1) { 8436 /* found equivalent state, can prune the search */ 8437 if (env->log.level & BPF_LOG_LEVEL) { 8438 if (do_print_state) 8439 verbose(env, "\nfrom %d to %d%s: safe\n", 8440 env->prev_insn_idx, env->insn_idx, 8441 env->cur_state->speculative ? 8442 " (speculative execution)" : ""); 8443 else 8444 verbose(env, "%d: safe\n", env->insn_idx); 8445 } 8446 goto process_bpf_exit; 8447 } 8448 8449 if (signal_pending(current)) 8450 return -EAGAIN; 8451 8452 if (need_resched()) 8453 cond_resched(); 8454 8455 if (env->log.level & BPF_LOG_LEVEL2 || 8456 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { 8457 if (env->log.level & BPF_LOG_LEVEL2) 8458 verbose(env, "%d:", env->insn_idx); 8459 else 8460 verbose(env, "\nfrom %d to %d%s:", 8461 env->prev_insn_idx, env->insn_idx, 8462 env->cur_state->speculative ? 8463 " (speculative execution)" : ""); 8464 print_verifier_state(env, state->frame[state->curframe]); 8465 do_print_state = false; 8466 } 8467 8468 if (env->log.level & BPF_LOG_LEVEL) { 8469 const struct bpf_insn_cbs cbs = { 8470 .cb_print = verbose, 8471 .private_data = env, 8472 }; 8473 8474 verbose_linfo(env, env->insn_idx, "; "); 8475 verbose(env, "%d: ", env->insn_idx); 8476 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 8477 } 8478 8479 if (bpf_prog_is_dev_bound(env->prog->aux)) { 8480 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 8481 env->prev_insn_idx); 8482 if (err) 8483 return err; 8484 } 8485 8486 regs = cur_regs(env); 8487 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 8488 prev_insn_idx = env->insn_idx; 8489 8490 if (class == BPF_ALU || class == BPF_ALU64) { 8491 err = check_alu_op(env, insn); 8492 if (err) 8493 return err; 8494 8495 } else if (class == BPF_LDX) { 8496 enum bpf_reg_type *prev_src_type, src_reg_type; 8497 8498 /* check for reserved fields is already done */ 8499 8500 /* check src operand */ 8501 err = check_reg_arg(env, insn->src_reg, SRC_OP); 8502 if (err) 8503 return err; 8504 8505 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 8506 if (err) 8507 return err; 8508 8509 src_reg_type = regs[insn->src_reg].type; 8510 8511 /* check that memory (src_reg + off) is readable, 8512 * the state of dst_reg will be updated by this func 8513 */ 8514 err = check_mem_access(env, env->insn_idx, insn->src_reg, 8515 insn->off, BPF_SIZE(insn->code), 8516 BPF_READ, insn->dst_reg, false); 8517 if (err) 8518 return err; 8519 8520 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 8521 8522 if (*prev_src_type == NOT_INIT) { 8523 /* saw a valid insn 8524 * dst_reg = *(u32 *)(src_reg + off) 8525 * save type to validate intersecting paths 8526 */ 8527 *prev_src_type = src_reg_type; 8528 8529 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 8530 /* ABuser program is trying to use the same insn 8531 * dst_reg = *(u32*) (src_reg + off) 8532 * with different pointer types: 8533 * src_reg == ctx in one branch and 8534 * src_reg == stack|map in some other branch. 8535 * Reject it. 8536 */ 8537 verbose(env, "same insn cannot be used with different pointers\n"); 8538 return -EINVAL; 8539 } 8540 8541 } else if (class == BPF_STX) { 8542 enum bpf_reg_type *prev_dst_type, dst_reg_type; 8543 8544 if (BPF_MODE(insn->code) == BPF_XADD) { 8545 err = check_xadd(env, env->insn_idx, insn); 8546 if (err) 8547 return err; 8548 env->insn_idx++; 8549 continue; 8550 } 8551 8552 /* check src1 operand */ 8553 err = check_reg_arg(env, insn->src_reg, SRC_OP); 8554 if (err) 8555 return err; 8556 /* check src2 operand */ 8557 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 8558 if (err) 8559 return err; 8560 8561 dst_reg_type = regs[insn->dst_reg].type; 8562 8563 /* check that memory (dst_reg + off) is writeable */ 8564 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 8565 insn->off, BPF_SIZE(insn->code), 8566 BPF_WRITE, insn->src_reg, false); 8567 if (err) 8568 return err; 8569 8570 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 8571 8572 if (*prev_dst_type == NOT_INIT) { 8573 *prev_dst_type = dst_reg_type; 8574 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 8575 verbose(env, "same insn cannot be used with different pointers\n"); 8576 return -EINVAL; 8577 } 8578 8579 } else if (class == BPF_ST) { 8580 if (BPF_MODE(insn->code) != BPF_MEM || 8581 insn->src_reg != BPF_REG_0) { 8582 verbose(env, "BPF_ST uses reserved fields\n"); 8583 return -EINVAL; 8584 } 8585 /* check src operand */ 8586 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 8587 if (err) 8588 return err; 8589 8590 if (is_ctx_reg(env, insn->dst_reg)) { 8591 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 8592 insn->dst_reg, 8593 reg_type_str[reg_state(env, insn->dst_reg)->type]); 8594 return -EACCES; 8595 } 8596 8597 /* check that memory (dst_reg + off) is writeable */ 8598 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 8599 insn->off, BPF_SIZE(insn->code), 8600 BPF_WRITE, -1, false); 8601 if (err) 8602 return err; 8603 8604 } else if (class == BPF_JMP || class == BPF_JMP32) { 8605 u8 opcode = BPF_OP(insn->code); 8606 8607 env->jmps_processed++; 8608 if (opcode == BPF_CALL) { 8609 if (BPF_SRC(insn->code) != BPF_K || 8610 insn->off != 0 || 8611 (insn->src_reg != BPF_REG_0 && 8612 insn->src_reg != BPF_PSEUDO_CALL) || 8613 insn->dst_reg != BPF_REG_0 || 8614 class == BPF_JMP32) { 8615 verbose(env, "BPF_CALL uses reserved fields\n"); 8616 return -EINVAL; 8617 } 8618 8619 if (env->cur_state->active_spin_lock && 8620 (insn->src_reg == BPF_PSEUDO_CALL || 8621 insn->imm != BPF_FUNC_spin_unlock)) { 8622 verbose(env, "function calls are not allowed while holding a lock\n"); 8623 return -EINVAL; 8624 } 8625 if (insn->src_reg == BPF_PSEUDO_CALL) 8626 err = check_func_call(env, insn, &env->insn_idx); 8627 else 8628 err = check_helper_call(env, insn->imm, env->insn_idx); 8629 if (err) 8630 return err; 8631 8632 } else if (opcode == BPF_JA) { 8633 if (BPF_SRC(insn->code) != BPF_K || 8634 insn->imm != 0 || 8635 insn->src_reg != BPF_REG_0 || 8636 insn->dst_reg != BPF_REG_0 || 8637 class == BPF_JMP32) { 8638 verbose(env, "BPF_JA uses reserved fields\n"); 8639 return -EINVAL; 8640 } 8641 8642 env->insn_idx += insn->off + 1; 8643 continue; 8644 8645 } else if (opcode == BPF_EXIT) { 8646 if (BPF_SRC(insn->code) != BPF_K || 8647 insn->imm != 0 || 8648 insn->src_reg != BPF_REG_0 || 8649 insn->dst_reg != BPF_REG_0 || 8650 class == BPF_JMP32) { 8651 verbose(env, "BPF_EXIT uses reserved fields\n"); 8652 return -EINVAL; 8653 } 8654 8655 if (env->cur_state->active_spin_lock) { 8656 verbose(env, "bpf_spin_unlock is missing\n"); 8657 return -EINVAL; 8658 } 8659 8660 if (state->curframe) { 8661 /* exit from nested function */ 8662 err = prepare_func_exit(env, &env->insn_idx); 8663 if (err) 8664 return err; 8665 do_print_state = true; 8666 continue; 8667 } 8668 8669 err = check_reference_leak(env); 8670 if (err) 8671 return err; 8672 8673 err = check_return_code(env); 8674 if (err) 8675 return err; 8676 process_bpf_exit: 8677 update_branch_counts(env, env->cur_state); 8678 err = pop_stack(env, &prev_insn_idx, 8679 &env->insn_idx); 8680 if (err < 0) { 8681 if (err != -ENOENT) 8682 return err; 8683 break; 8684 } else { 8685 do_print_state = true; 8686 continue; 8687 } 8688 } else { 8689 err = check_cond_jmp_op(env, insn, &env->insn_idx); 8690 if (err) 8691 return err; 8692 } 8693 } else if (class == BPF_LD) { 8694 u8 mode = BPF_MODE(insn->code); 8695 8696 if (mode == BPF_ABS || mode == BPF_IND) { 8697 err = check_ld_abs(env, insn); 8698 if (err) 8699 return err; 8700 8701 } else if (mode == BPF_IMM) { 8702 err = check_ld_imm(env, insn); 8703 if (err) 8704 return err; 8705 8706 env->insn_idx++; 8707 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 8708 } else { 8709 verbose(env, "invalid BPF_LD mode\n"); 8710 return -EINVAL; 8711 } 8712 } else { 8713 verbose(env, "unknown insn class %d\n", class); 8714 return -EINVAL; 8715 } 8716 8717 env->insn_idx++; 8718 } 8719 8720 return 0; 8721 } 8722 8723 static int check_map_prealloc(struct bpf_map *map) 8724 { 8725 return (map->map_type != BPF_MAP_TYPE_HASH && 8726 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 8727 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 8728 !(map->map_flags & BPF_F_NO_PREALLOC); 8729 } 8730 8731 static bool is_tracing_prog_type(enum bpf_prog_type type) 8732 { 8733 switch (type) { 8734 case BPF_PROG_TYPE_KPROBE: 8735 case BPF_PROG_TYPE_TRACEPOINT: 8736 case BPF_PROG_TYPE_PERF_EVENT: 8737 case BPF_PROG_TYPE_RAW_TRACEPOINT: 8738 return true; 8739 default: 8740 return false; 8741 } 8742 } 8743 8744 static bool is_preallocated_map(struct bpf_map *map) 8745 { 8746 if (!check_map_prealloc(map)) 8747 return false; 8748 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) 8749 return false; 8750 return true; 8751 } 8752 8753 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 8754 struct bpf_map *map, 8755 struct bpf_prog *prog) 8756 8757 { 8758 /* 8759 * Validate that trace type programs use preallocated hash maps. 8760 * 8761 * For programs attached to PERF events this is mandatory as the 8762 * perf NMI can hit any arbitrary code sequence. 8763 * 8764 * All other trace types using preallocated hash maps are unsafe as 8765 * well because tracepoint or kprobes can be inside locked regions 8766 * of the memory allocator or at a place where a recursion into the 8767 * memory allocator would see inconsistent state. 8768 * 8769 * On RT enabled kernels run-time allocation of all trace type 8770 * programs is strictly prohibited due to lock type constraints. On 8771 * !RT kernels it is allowed for backwards compatibility reasons for 8772 * now, but warnings are emitted so developers are made aware of 8773 * the unsafety and can fix their programs before this is enforced. 8774 */ 8775 if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) { 8776 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 8777 verbose(env, "perf_event programs can only use preallocated hash map\n"); 8778 return -EINVAL; 8779 } 8780 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 8781 verbose(env, "trace type programs can only use preallocated hash map\n"); 8782 return -EINVAL; 8783 } 8784 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n"); 8785 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); 8786 } 8787 8788 if ((is_tracing_prog_type(prog->type) || 8789 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && 8790 map_value_has_spin_lock(map)) { 8791 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 8792 return -EINVAL; 8793 } 8794 8795 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 8796 !bpf_offload_prog_map_match(prog, map)) { 8797 verbose(env, "offload device mismatch between prog and map\n"); 8798 return -EINVAL; 8799 } 8800 8801 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 8802 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 8803 return -EINVAL; 8804 } 8805 8806 return 0; 8807 } 8808 8809 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 8810 { 8811 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 8812 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 8813 } 8814 8815 /* look for pseudo eBPF instructions that access map FDs and 8816 * replace them with actual map pointers 8817 */ 8818 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 8819 { 8820 struct bpf_insn *insn = env->prog->insnsi; 8821 int insn_cnt = env->prog->len; 8822 int i, j, err; 8823 8824 err = bpf_prog_calc_tag(env->prog); 8825 if (err) 8826 return err; 8827 8828 for (i = 0; i < insn_cnt; i++, insn++) { 8829 if (BPF_CLASS(insn->code) == BPF_LDX && 8830 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 8831 verbose(env, "BPF_LDX uses reserved fields\n"); 8832 return -EINVAL; 8833 } 8834 8835 if (BPF_CLASS(insn->code) == BPF_STX && 8836 ((BPF_MODE(insn->code) != BPF_MEM && 8837 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 8838 verbose(env, "BPF_STX uses reserved fields\n"); 8839 return -EINVAL; 8840 } 8841 8842 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 8843 struct bpf_insn_aux_data *aux; 8844 struct bpf_map *map; 8845 struct fd f; 8846 u64 addr; 8847 8848 if (i == insn_cnt - 1 || insn[1].code != 0 || 8849 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 8850 insn[1].off != 0) { 8851 verbose(env, "invalid bpf_ld_imm64 insn\n"); 8852 return -EINVAL; 8853 } 8854 8855 if (insn[0].src_reg == 0) 8856 /* valid generic load 64-bit imm */ 8857 goto next_insn; 8858 8859 /* In final convert_pseudo_ld_imm64() step, this is 8860 * converted into regular 64-bit imm load insn. 8861 */ 8862 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && 8863 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || 8864 (insn[0].src_reg == BPF_PSEUDO_MAP_FD && 8865 insn[1].imm != 0)) { 8866 verbose(env, 8867 "unrecognized bpf_ld_imm64 insn\n"); 8868 return -EINVAL; 8869 } 8870 8871 f = fdget(insn[0].imm); 8872 map = __bpf_map_get(f); 8873 if (IS_ERR(map)) { 8874 verbose(env, "fd %d is not pointing to valid bpf_map\n", 8875 insn[0].imm); 8876 return PTR_ERR(map); 8877 } 8878 8879 err = check_map_prog_compatibility(env, map, env->prog); 8880 if (err) { 8881 fdput(f); 8882 return err; 8883 } 8884 8885 aux = &env->insn_aux_data[i]; 8886 if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 8887 addr = (unsigned long)map; 8888 } else { 8889 u32 off = insn[1].imm; 8890 8891 if (off >= BPF_MAX_VAR_OFF) { 8892 verbose(env, "direct value offset of %u is not allowed\n", off); 8893 fdput(f); 8894 return -EINVAL; 8895 } 8896 8897 if (!map->ops->map_direct_value_addr) { 8898 verbose(env, "no direct value access support for this map type\n"); 8899 fdput(f); 8900 return -EINVAL; 8901 } 8902 8903 err = map->ops->map_direct_value_addr(map, &addr, off); 8904 if (err) { 8905 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 8906 map->value_size, off); 8907 fdput(f); 8908 return err; 8909 } 8910 8911 aux->map_off = off; 8912 addr += off; 8913 } 8914 8915 insn[0].imm = (u32)addr; 8916 insn[1].imm = addr >> 32; 8917 8918 /* check whether we recorded this map already */ 8919 for (j = 0; j < env->used_map_cnt; j++) { 8920 if (env->used_maps[j] == map) { 8921 aux->map_index = j; 8922 fdput(f); 8923 goto next_insn; 8924 } 8925 } 8926 8927 if (env->used_map_cnt >= MAX_USED_MAPS) { 8928 fdput(f); 8929 return -E2BIG; 8930 } 8931 8932 /* hold the map. If the program is rejected by verifier, 8933 * the map will be released by release_maps() or it 8934 * will be used by the valid program until it's unloaded 8935 * and all maps are released in free_used_maps() 8936 */ 8937 bpf_map_inc(map); 8938 8939 aux->map_index = env->used_map_cnt; 8940 env->used_maps[env->used_map_cnt++] = map; 8941 8942 if (bpf_map_is_cgroup_storage(map) && 8943 bpf_cgroup_storage_assign(env->prog->aux, map)) { 8944 verbose(env, "only one cgroup storage of each type is allowed\n"); 8945 fdput(f); 8946 return -EBUSY; 8947 } 8948 8949 fdput(f); 8950 next_insn: 8951 insn++; 8952 i++; 8953 continue; 8954 } 8955 8956 /* Basic sanity check before we invest more work here. */ 8957 if (!bpf_opcode_in_insntable(insn->code)) { 8958 verbose(env, "unknown opcode %02x\n", insn->code); 8959 return -EINVAL; 8960 } 8961 } 8962 8963 /* now all pseudo BPF_LD_IMM64 instructions load valid 8964 * 'struct bpf_map *' into a register instead of user map_fd. 8965 * These pointers will be used later by verifier to validate map access. 8966 */ 8967 return 0; 8968 } 8969 8970 /* drop refcnt of maps used by the rejected program */ 8971 static void release_maps(struct bpf_verifier_env *env) 8972 { 8973 __bpf_free_used_maps(env->prog->aux, env->used_maps, 8974 env->used_map_cnt); 8975 } 8976 8977 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 8978 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 8979 { 8980 struct bpf_insn *insn = env->prog->insnsi; 8981 int insn_cnt = env->prog->len; 8982 int i; 8983 8984 for (i = 0; i < insn_cnt; i++, insn++) 8985 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 8986 insn->src_reg = 0; 8987 } 8988 8989 /* single env->prog->insni[off] instruction was replaced with the range 8990 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 8991 * [0, off) and [off, end) to new locations, so the patched range stays zero 8992 */ 8993 static int adjust_insn_aux_data(struct bpf_verifier_env *env, 8994 struct bpf_prog *new_prog, u32 off, u32 cnt) 8995 { 8996 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 8997 struct bpf_insn *insn = new_prog->insnsi; 8998 u32 prog_len; 8999 int i; 9000 9001 /* aux info at OFF always needs adjustment, no matter fast path 9002 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 9003 * original insn at old prog. 9004 */ 9005 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 9006 9007 if (cnt == 1) 9008 return 0; 9009 prog_len = new_prog->len; 9010 new_data = vzalloc(array_size(prog_len, 9011 sizeof(struct bpf_insn_aux_data))); 9012 if (!new_data) 9013 return -ENOMEM; 9014 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 9015 memcpy(new_data + off + cnt - 1, old_data + off, 9016 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 9017 for (i = off; i < off + cnt - 1; i++) { 9018 new_data[i].seen = env->pass_cnt; 9019 new_data[i].zext_dst = insn_has_def32(env, insn + i); 9020 } 9021 env->insn_aux_data = new_data; 9022 vfree(old_data); 9023 return 0; 9024 } 9025 9026 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 9027 { 9028 int i; 9029 9030 if (len == 1) 9031 return; 9032 /* NOTE: fake 'exit' subprog should be updated as well. */ 9033 for (i = 0; i <= env->subprog_cnt; i++) { 9034 if (env->subprog_info[i].start <= off) 9035 continue; 9036 env->subprog_info[i].start += len - 1; 9037 } 9038 } 9039 9040 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 9041 const struct bpf_insn *patch, u32 len) 9042 { 9043 struct bpf_prog *new_prog; 9044 9045 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 9046 if (IS_ERR(new_prog)) { 9047 if (PTR_ERR(new_prog) == -ERANGE) 9048 verbose(env, 9049 "insn %d cannot be patched due to 16-bit range\n", 9050 env->insn_aux_data[off].orig_idx); 9051 return NULL; 9052 } 9053 if (adjust_insn_aux_data(env, new_prog, off, len)) 9054 return NULL; 9055 adjust_subprog_starts(env, off, len); 9056 return new_prog; 9057 } 9058 9059 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 9060 u32 off, u32 cnt) 9061 { 9062 int i, j; 9063 9064 /* find first prog starting at or after off (first to remove) */ 9065 for (i = 0; i < env->subprog_cnt; i++) 9066 if (env->subprog_info[i].start >= off) 9067 break; 9068 /* find first prog starting at or after off + cnt (first to stay) */ 9069 for (j = i; j < env->subprog_cnt; j++) 9070 if (env->subprog_info[j].start >= off + cnt) 9071 break; 9072 /* if j doesn't start exactly at off + cnt, we are just removing 9073 * the front of previous prog 9074 */ 9075 if (env->subprog_info[j].start != off + cnt) 9076 j--; 9077 9078 if (j > i) { 9079 struct bpf_prog_aux *aux = env->prog->aux; 9080 int move; 9081 9082 /* move fake 'exit' subprog as well */ 9083 move = env->subprog_cnt + 1 - j; 9084 9085 memmove(env->subprog_info + i, 9086 env->subprog_info + j, 9087 sizeof(*env->subprog_info) * move); 9088 env->subprog_cnt -= j - i; 9089 9090 /* remove func_info */ 9091 if (aux->func_info) { 9092 move = aux->func_info_cnt - j; 9093 9094 memmove(aux->func_info + i, 9095 aux->func_info + j, 9096 sizeof(*aux->func_info) * move); 9097 aux->func_info_cnt -= j - i; 9098 /* func_info->insn_off is set after all code rewrites, 9099 * in adjust_btf_func() - no need to adjust 9100 */ 9101 } 9102 } else { 9103 /* convert i from "first prog to remove" to "first to adjust" */ 9104 if (env->subprog_info[i].start == off) 9105 i++; 9106 } 9107 9108 /* update fake 'exit' subprog as well */ 9109 for (; i <= env->subprog_cnt; i++) 9110 env->subprog_info[i].start -= cnt; 9111 9112 return 0; 9113 } 9114 9115 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 9116 u32 cnt) 9117 { 9118 struct bpf_prog *prog = env->prog; 9119 u32 i, l_off, l_cnt, nr_linfo; 9120 struct bpf_line_info *linfo; 9121 9122 nr_linfo = prog->aux->nr_linfo; 9123 if (!nr_linfo) 9124 return 0; 9125 9126 linfo = prog->aux->linfo; 9127 9128 /* find first line info to remove, count lines to be removed */ 9129 for (i = 0; i < nr_linfo; i++) 9130 if (linfo[i].insn_off >= off) 9131 break; 9132 9133 l_off = i; 9134 l_cnt = 0; 9135 for (; i < nr_linfo; i++) 9136 if (linfo[i].insn_off < off + cnt) 9137 l_cnt++; 9138 else 9139 break; 9140 9141 /* First live insn doesn't match first live linfo, it needs to "inherit" 9142 * last removed linfo. prog is already modified, so prog->len == off 9143 * means no live instructions after (tail of the program was removed). 9144 */ 9145 if (prog->len != off && l_cnt && 9146 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 9147 l_cnt--; 9148 linfo[--i].insn_off = off + cnt; 9149 } 9150 9151 /* remove the line info which refer to the removed instructions */ 9152 if (l_cnt) { 9153 memmove(linfo + l_off, linfo + i, 9154 sizeof(*linfo) * (nr_linfo - i)); 9155 9156 prog->aux->nr_linfo -= l_cnt; 9157 nr_linfo = prog->aux->nr_linfo; 9158 } 9159 9160 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 9161 for (i = l_off; i < nr_linfo; i++) 9162 linfo[i].insn_off -= cnt; 9163 9164 /* fix up all subprogs (incl. 'exit') which start >= off */ 9165 for (i = 0; i <= env->subprog_cnt; i++) 9166 if (env->subprog_info[i].linfo_idx > l_off) { 9167 /* program may have started in the removed region but 9168 * may not be fully removed 9169 */ 9170 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 9171 env->subprog_info[i].linfo_idx -= l_cnt; 9172 else 9173 env->subprog_info[i].linfo_idx = l_off; 9174 } 9175 9176 return 0; 9177 } 9178 9179 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 9180 { 9181 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9182 unsigned int orig_prog_len = env->prog->len; 9183 int err; 9184 9185 if (bpf_prog_is_dev_bound(env->prog->aux)) 9186 bpf_prog_offload_remove_insns(env, off, cnt); 9187 9188 err = bpf_remove_insns(env->prog, off, cnt); 9189 if (err) 9190 return err; 9191 9192 err = adjust_subprog_starts_after_remove(env, off, cnt); 9193 if (err) 9194 return err; 9195 9196 err = bpf_adj_linfo_after_remove(env, off, cnt); 9197 if (err) 9198 return err; 9199 9200 memmove(aux_data + off, aux_data + off + cnt, 9201 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 9202 9203 return 0; 9204 } 9205 9206 /* The verifier does more data flow analysis than llvm and will not 9207 * explore branches that are dead at run time. Malicious programs can 9208 * have dead code too. Therefore replace all dead at-run-time code 9209 * with 'ja -1'. 9210 * 9211 * Just nops are not optimal, e.g. if they would sit at the end of the 9212 * program and through another bug we would manage to jump there, then 9213 * we'd execute beyond program memory otherwise. Returning exception 9214 * code also wouldn't work since we can have subprogs where the dead 9215 * code could be located. 9216 */ 9217 static void sanitize_dead_code(struct bpf_verifier_env *env) 9218 { 9219 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9220 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 9221 struct bpf_insn *insn = env->prog->insnsi; 9222 const int insn_cnt = env->prog->len; 9223 int i; 9224 9225 for (i = 0; i < insn_cnt; i++) { 9226 if (aux_data[i].seen) 9227 continue; 9228 memcpy(insn + i, &trap, sizeof(trap)); 9229 } 9230 } 9231 9232 static bool insn_is_cond_jump(u8 code) 9233 { 9234 u8 op; 9235 9236 if (BPF_CLASS(code) == BPF_JMP32) 9237 return true; 9238 9239 if (BPF_CLASS(code) != BPF_JMP) 9240 return false; 9241 9242 op = BPF_OP(code); 9243 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 9244 } 9245 9246 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 9247 { 9248 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9249 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 9250 struct bpf_insn *insn = env->prog->insnsi; 9251 const int insn_cnt = env->prog->len; 9252 int i; 9253 9254 for (i = 0; i < insn_cnt; i++, insn++) { 9255 if (!insn_is_cond_jump(insn->code)) 9256 continue; 9257 9258 if (!aux_data[i + 1].seen) 9259 ja.off = insn->off; 9260 else if (!aux_data[i + 1 + insn->off].seen) 9261 ja.off = 0; 9262 else 9263 continue; 9264 9265 if (bpf_prog_is_dev_bound(env->prog->aux)) 9266 bpf_prog_offload_replace_insn(env, i, &ja); 9267 9268 memcpy(insn, &ja, sizeof(ja)); 9269 } 9270 } 9271 9272 static int opt_remove_dead_code(struct bpf_verifier_env *env) 9273 { 9274 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9275 int insn_cnt = env->prog->len; 9276 int i, err; 9277 9278 for (i = 0; i < insn_cnt; i++) { 9279 int j; 9280 9281 j = 0; 9282 while (i + j < insn_cnt && !aux_data[i + j].seen) 9283 j++; 9284 if (!j) 9285 continue; 9286 9287 err = verifier_remove_insns(env, i, j); 9288 if (err) 9289 return err; 9290 insn_cnt = env->prog->len; 9291 } 9292 9293 return 0; 9294 } 9295 9296 static int opt_remove_nops(struct bpf_verifier_env *env) 9297 { 9298 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 9299 struct bpf_insn *insn = env->prog->insnsi; 9300 int insn_cnt = env->prog->len; 9301 int i, err; 9302 9303 for (i = 0; i < insn_cnt; i++) { 9304 if (memcmp(&insn[i], &ja, sizeof(ja))) 9305 continue; 9306 9307 err = verifier_remove_insns(env, i, 1); 9308 if (err) 9309 return err; 9310 insn_cnt--; 9311 i--; 9312 } 9313 9314 return 0; 9315 } 9316 9317 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 9318 const union bpf_attr *attr) 9319 { 9320 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 9321 struct bpf_insn_aux_data *aux = env->insn_aux_data; 9322 int i, patch_len, delta = 0, len = env->prog->len; 9323 struct bpf_insn *insns = env->prog->insnsi; 9324 struct bpf_prog *new_prog; 9325 bool rnd_hi32; 9326 9327 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 9328 zext_patch[1] = BPF_ZEXT_REG(0); 9329 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 9330 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 9331 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 9332 for (i = 0; i < len; i++) { 9333 int adj_idx = i + delta; 9334 struct bpf_insn insn; 9335 9336 insn = insns[adj_idx]; 9337 if (!aux[adj_idx].zext_dst) { 9338 u8 code, class; 9339 u32 imm_rnd; 9340 9341 if (!rnd_hi32) 9342 continue; 9343 9344 code = insn.code; 9345 class = BPF_CLASS(code); 9346 if (insn_no_def(&insn)) 9347 continue; 9348 9349 /* NOTE: arg "reg" (the fourth one) is only used for 9350 * BPF_STX which has been ruled out in above 9351 * check, it is safe to pass NULL here. 9352 */ 9353 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { 9354 if (class == BPF_LD && 9355 BPF_MODE(code) == BPF_IMM) 9356 i++; 9357 continue; 9358 } 9359 9360 /* ctx load could be transformed into wider load. */ 9361 if (class == BPF_LDX && 9362 aux[adj_idx].ptr_type == PTR_TO_CTX) 9363 continue; 9364 9365 imm_rnd = get_random_int(); 9366 rnd_hi32_patch[0] = insn; 9367 rnd_hi32_patch[1].imm = imm_rnd; 9368 rnd_hi32_patch[3].dst_reg = insn.dst_reg; 9369 patch = rnd_hi32_patch; 9370 patch_len = 4; 9371 goto apply_patch_buffer; 9372 } 9373 9374 if (!bpf_jit_needs_zext()) 9375 continue; 9376 9377 zext_patch[0] = insn; 9378 zext_patch[1].dst_reg = insn.dst_reg; 9379 zext_patch[1].src_reg = insn.dst_reg; 9380 patch = zext_patch; 9381 patch_len = 2; 9382 apply_patch_buffer: 9383 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 9384 if (!new_prog) 9385 return -ENOMEM; 9386 env->prog = new_prog; 9387 insns = new_prog->insnsi; 9388 aux = env->insn_aux_data; 9389 delta += patch_len - 1; 9390 } 9391 9392 return 0; 9393 } 9394 9395 /* convert load instructions that access fields of a context type into a 9396 * sequence of instructions that access fields of the underlying structure: 9397 * struct __sk_buff -> struct sk_buff 9398 * struct bpf_sock_ops -> struct sock 9399 */ 9400 static int convert_ctx_accesses(struct bpf_verifier_env *env) 9401 { 9402 const struct bpf_verifier_ops *ops = env->ops; 9403 int i, cnt, size, ctx_field_size, delta = 0; 9404 const int insn_cnt = env->prog->len; 9405 struct bpf_insn insn_buf[16], *insn; 9406 u32 target_size, size_default, off; 9407 struct bpf_prog *new_prog; 9408 enum bpf_access_type type; 9409 bool is_narrower_load; 9410 9411 if (ops->gen_prologue || env->seen_direct_write) { 9412 if (!ops->gen_prologue) { 9413 verbose(env, "bpf verifier is misconfigured\n"); 9414 return -EINVAL; 9415 } 9416 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 9417 env->prog); 9418 if (cnt >= ARRAY_SIZE(insn_buf)) { 9419 verbose(env, "bpf verifier is misconfigured\n"); 9420 return -EINVAL; 9421 } else if (cnt) { 9422 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 9423 if (!new_prog) 9424 return -ENOMEM; 9425 9426 env->prog = new_prog; 9427 delta += cnt - 1; 9428 } 9429 } 9430 9431 if (bpf_prog_is_dev_bound(env->prog->aux)) 9432 return 0; 9433 9434 insn = env->prog->insnsi + delta; 9435 9436 for (i = 0; i < insn_cnt; i++, insn++) { 9437 bpf_convert_ctx_access_t convert_ctx_access; 9438 9439 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 9440 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 9441 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 9442 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 9443 type = BPF_READ; 9444 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 9445 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 9446 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 9447 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 9448 type = BPF_WRITE; 9449 else 9450 continue; 9451 9452 if (type == BPF_WRITE && 9453 env->insn_aux_data[i + delta].sanitize_stack_off) { 9454 struct bpf_insn patch[] = { 9455 /* Sanitize suspicious stack slot with zero. 9456 * There are no memory dependencies for this store, 9457 * since it's only using frame pointer and immediate 9458 * constant of zero 9459 */ 9460 BPF_ST_MEM(BPF_DW, BPF_REG_FP, 9461 env->insn_aux_data[i + delta].sanitize_stack_off, 9462 0), 9463 /* the original STX instruction will immediately 9464 * overwrite the same stack slot with appropriate value 9465 */ 9466 *insn, 9467 }; 9468 9469 cnt = ARRAY_SIZE(patch); 9470 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 9471 if (!new_prog) 9472 return -ENOMEM; 9473 9474 delta += cnt - 1; 9475 env->prog = new_prog; 9476 insn = new_prog->insnsi + i + delta; 9477 continue; 9478 } 9479 9480 switch (env->insn_aux_data[i + delta].ptr_type) { 9481 case PTR_TO_CTX: 9482 if (!ops->convert_ctx_access) 9483 continue; 9484 convert_ctx_access = ops->convert_ctx_access; 9485 break; 9486 case PTR_TO_SOCKET: 9487 case PTR_TO_SOCK_COMMON: 9488 convert_ctx_access = bpf_sock_convert_ctx_access; 9489 break; 9490 case PTR_TO_TCP_SOCK: 9491 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 9492 break; 9493 case PTR_TO_XDP_SOCK: 9494 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 9495 break; 9496 case PTR_TO_BTF_ID: 9497 if (type == BPF_READ) { 9498 insn->code = BPF_LDX | BPF_PROBE_MEM | 9499 BPF_SIZE((insn)->code); 9500 env->prog->aux->num_exentries++; 9501 } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) { 9502 verbose(env, "Writes through BTF pointers are not allowed\n"); 9503 return -EINVAL; 9504 } 9505 continue; 9506 default: 9507 continue; 9508 } 9509 9510 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 9511 size = BPF_LDST_BYTES(insn); 9512 9513 /* If the read access is a narrower load of the field, 9514 * convert to a 4/8-byte load, to minimum program type specific 9515 * convert_ctx_access changes. If conversion is successful, 9516 * we will apply proper mask to the result. 9517 */ 9518 is_narrower_load = size < ctx_field_size; 9519 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 9520 off = insn->off; 9521 if (is_narrower_load) { 9522 u8 size_code; 9523 9524 if (type == BPF_WRITE) { 9525 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 9526 return -EINVAL; 9527 } 9528 9529 size_code = BPF_H; 9530 if (ctx_field_size == 4) 9531 size_code = BPF_W; 9532 else if (ctx_field_size == 8) 9533 size_code = BPF_DW; 9534 9535 insn->off = off & ~(size_default - 1); 9536 insn->code = BPF_LDX | BPF_MEM | size_code; 9537 } 9538 9539 target_size = 0; 9540 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 9541 &target_size); 9542 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 9543 (ctx_field_size && !target_size)) { 9544 verbose(env, "bpf verifier is misconfigured\n"); 9545 return -EINVAL; 9546 } 9547 9548 if (is_narrower_load && size < target_size) { 9549 u8 shift = bpf_ctx_narrow_access_offset( 9550 off, size, size_default) * 8; 9551 if (ctx_field_size <= 4) { 9552 if (shift) 9553 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 9554 insn->dst_reg, 9555 shift); 9556 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 9557 (1 << size * 8) - 1); 9558 } else { 9559 if (shift) 9560 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 9561 insn->dst_reg, 9562 shift); 9563 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 9564 (1ULL << size * 8) - 1); 9565 } 9566 } 9567 9568 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9569 if (!new_prog) 9570 return -ENOMEM; 9571 9572 delta += cnt - 1; 9573 9574 /* keep walking new program and skip insns we just inserted */ 9575 env->prog = new_prog; 9576 insn = new_prog->insnsi + i + delta; 9577 } 9578 9579 return 0; 9580 } 9581 9582 static int jit_subprogs(struct bpf_verifier_env *env) 9583 { 9584 struct bpf_prog *prog = env->prog, **func, *tmp; 9585 int i, j, subprog_start, subprog_end = 0, len, subprog; 9586 struct bpf_insn *insn; 9587 void *old_bpf_func; 9588 int err; 9589 9590 if (env->subprog_cnt <= 1) 9591 return 0; 9592 9593 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 9594 if (insn->code != (BPF_JMP | BPF_CALL) || 9595 insn->src_reg != BPF_PSEUDO_CALL) 9596 continue; 9597 /* Upon error here we cannot fall back to interpreter but 9598 * need a hard reject of the program. Thus -EFAULT is 9599 * propagated in any case. 9600 */ 9601 subprog = find_subprog(env, i + insn->imm + 1); 9602 if (subprog < 0) { 9603 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 9604 i + insn->imm + 1); 9605 return -EFAULT; 9606 } 9607 /* temporarily remember subprog id inside insn instead of 9608 * aux_data, since next loop will split up all insns into funcs 9609 */ 9610 insn->off = subprog; 9611 /* remember original imm in case JIT fails and fallback 9612 * to interpreter will be needed 9613 */ 9614 env->insn_aux_data[i].call_imm = insn->imm; 9615 /* point imm to __bpf_call_base+1 from JITs point of view */ 9616 insn->imm = 1; 9617 } 9618 9619 err = bpf_prog_alloc_jited_linfo(prog); 9620 if (err) 9621 goto out_undo_insn; 9622 9623 err = -ENOMEM; 9624 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 9625 if (!func) 9626 goto out_undo_insn; 9627 9628 for (i = 0; i < env->subprog_cnt; i++) { 9629 subprog_start = subprog_end; 9630 subprog_end = env->subprog_info[i + 1].start; 9631 9632 len = subprog_end - subprog_start; 9633 /* BPF_PROG_RUN doesn't call subprogs directly, 9634 * hence main prog stats include the runtime of subprogs. 9635 * subprogs don't have IDs and not reachable via prog_get_next_id 9636 * func[i]->aux->stats will never be accessed and stays NULL 9637 */ 9638 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 9639 if (!func[i]) 9640 goto out_free; 9641 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 9642 len * sizeof(struct bpf_insn)); 9643 func[i]->type = prog->type; 9644 func[i]->len = len; 9645 if (bpf_prog_calc_tag(func[i])) 9646 goto out_free; 9647 func[i]->is_func = 1; 9648 func[i]->aux->func_idx = i; 9649 /* the btf and func_info will be freed only at prog->aux */ 9650 func[i]->aux->btf = prog->aux->btf; 9651 func[i]->aux->func_info = prog->aux->func_info; 9652 9653 /* Use bpf_prog_F_tag to indicate functions in stack traces. 9654 * Long term would need debug info to populate names 9655 */ 9656 func[i]->aux->name[0] = 'F'; 9657 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 9658 func[i]->jit_requested = 1; 9659 func[i]->aux->linfo = prog->aux->linfo; 9660 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 9661 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 9662 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 9663 func[i] = bpf_int_jit_compile(func[i]); 9664 if (!func[i]->jited) { 9665 err = -ENOTSUPP; 9666 goto out_free; 9667 } 9668 cond_resched(); 9669 } 9670 /* at this point all bpf functions were successfully JITed 9671 * now populate all bpf_calls with correct addresses and 9672 * run last pass of JIT 9673 */ 9674 for (i = 0; i < env->subprog_cnt; i++) { 9675 insn = func[i]->insnsi; 9676 for (j = 0; j < func[i]->len; j++, insn++) { 9677 if (insn->code != (BPF_JMP | BPF_CALL) || 9678 insn->src_reg != BPF_PSEUDO_CALL) 9679 continue; 9680 subprog = insn->off; 9681 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - 9682 __bpf_call_base; 9683 } 9684 9685 /* we use the aux data to keep a list of the start addresses 9686 * of the JITed images for each function in the program 9687 * 9688 * for some architectures, such as powerpc64, the imm field 9689 * might not be large enough to hold the offset of the start 9690 * address of the callee's JITed image from __bpf_call_base 9691 * 9692 * in such cases, we can lookup the start address of a callee 9693 * by using its subprog id, available from the off field of 9694 * the call instruction, as an index for this list 9695 */ 9696 func[i]->aux->func = func; 9697 func[i]->aux->func_cnt = env->subprog_cnt; 9698 } 9699 for (i = 0; i < env->subprog_cnt; i++) { 9700 old_bpf_func = func[i]->bpf_func; 9701 tmp = bpf_int_jit_compile(func[i]); 9702 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 9703 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 9704 err = -ENOTSUPP; 9705 goto out_free; 9706 } 9707 cond_resched(); 9708 } 9709 9710 /* finally lock prog and jit images for all functions and 9711 * populate kallsysm 9712 */ 9713 for (i = 0; i < env->subprog_cnt; i++) { 9714 bpf_prog_lock_ro(func[i]); 9715 bpf_prog_kallsyms_add(func[i]); 9716 } 9717 9718 /* Last step: make now unused interpreter insns from main 9719 * prog consistent for later dump requests, so they can 9720 * later look the same as if they were interpreted only. 9721 */ 9722 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 9723 if (insn->code != (BPF_JMP | BPF_CALL) || 9724 insn->src_reg != BPF_PSEUDO_CALL) 9725 continue; 9726 insn->off = env->insn_aux_data[i].call_imm; 9727 subprog = find_subprog(env, i + insn->off + 1); 9728 insn->imm = subprog; 9729 } 9730 9731 prog->jited = 1; 9732 prog->bpf_func = func[0]->bpf_func; 9733 prog->aux->func = func; 9734 prog->aux->func_cnt = env->subprog_cnt; 9735 bpf_prog_free_unused_jited_linfo(prog); 9736 return 0; 9737 out_free: 9738 for (i = 0; i < env->subprog_cnt; i++) 9739 if (func[i]) 9740 bpf_jit_free(func[i]); 9741 kfree(func); 9742 out_undo_insn: 9743 /* cleanup main prog to be interpreted */ 9744 prog->jit_requested = 0; 9745 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 9746 if (insn->code != (BPF_JMP | BPF_CALL) || 9747 insn->src_reg != BPF_PSEUDO_CALL) 9748 continue; 9749 insn->off = 0; 9750 insn->imm = env->insn_aux_data[i].call_imm; 9751 } 9752 bpf_prog_free_jited_linfo(prog); 9753 return err; 9754 } 9755 9756 static int fixup_call_args(struct bpf_verifier_env *env) 9757 { 9758 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 9759 struct bpf_prog *prog = env->prog; 9760 struct bpf_insn *insn = prog->insnsi; 9761 int i, depth; 9762 #endif 9763 int err = 0; 9764 9765 if (env->prog->jit_requested && 9766 !bpf_prog_is_dev_bound(env->prog->aux)) { 9767 err = jit_subprogs(env); 9768 if (err == 0) 9769 return 0; 9770 if (err == -EFAULT) 9771 return err; 9772 } 9773 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 9774 for (i = 0; i < prog->len; i++, insn++) { 9775 if (insn->code != (BPF_JMP | BPF_CALL) || 9776 insn->src_reg != BPF_PSEUDO_CALL) 9777 continue; 9778 depth = get_callee_stack_depth(env, insn, i); 9779 if (depth < 0) 9780 return depth; 9781 bpf_patch_call_args(insn, depth); 9782 } 9783 err = 0; 9784 #endif 9785 return err; 9786 } 9787 9788 /* fixup insn->imm field of bpf_call instructions 9789 * and inline eligible helpers as explicit sequence of BPF instructions 9790 * 9791 * this function is called after eBPF program passed verification 9792 */ 9793 static int fixup_bpf_calls(struct bpf_verifier_env *env) 9794 { 9795 struct bpf_prog *prog = env->prog; 9796 bool expect_blinding = bpf_jit_blinding_enabled(prog); 9797 struct bpf_insn *insn = prog->insnsi; 9798 const struct bpf_func_proto *fn; 9799 const int insn_cnt = prog->len; 9800 const struct bpf_map_ops *ops; 9801 struct bpf_insn_aux_data *aux; 9802 struct bpf_insn insn_buf[16]; 9803 struct bpf_prog *new_prog; 9804 struct bpf_map *map_ptr; 9805 int i, ret, cnt, delta = 0; 9806 9807 for (i = 0; i < insn_cnt; i++, insn++) { 9808 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 9809 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 9810 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 9811 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 9812 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 9813 struct bpf_insn mask_and_div[] = { 9814 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 9815 /* Rx div 0 -> 0 */ 9816 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), 9817 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 9818 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 9819 *insn, 9820 }; 9821 struct bpf_insn mask_and_mod[] = { 9822 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 9823 /* Rx mod 0 -> Rx */ 9824 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), 9825 *insn, 9826 }; 9827 struct bpf_insn *patchlet; 9828 9829 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 9830 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 9831 patchlet = mask_and_div + (is64 ? 1 : 0); 9832 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); 9833 } else { 9834 patchlet = mask_and_mod + (is64 ? 1 : 0); 9835 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); 9836 } 9837 9838 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 9839 if (!new_prog) 9840 return -ENOMEM; 9841 9842 delta += cnt - 1; 9843 env->prog = prog = new_prog; 9844 insn = new_prog->insnsi + i + delta; 9845 continue; 9846 } 9847 9848 if (BPF_CLASS(insn->code) == BPF_LD && 9849 (BPF_MODE(insn->code) == BPF_ABS || 9850 BPF_MODE(insn->code) == BPF_IND)) { 9851 cnt = env->ops->gen_ld_abs(insn, insn_buf); 9852 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 9853 verbose(env, "bpf verifier is misconfigured\n"); 9854 return -EINVAL; 9855 } 9856 9857 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9858 if (!new_prog) 9859 return -ENOMEM; 9860 9861 delta += cnt - 1; 9862 env->prog = prog = new_prog; 9863 insn = new_prog->insnsi + i + delta; 9864 continue; 9865 } 9866 9867 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 9868 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 9869 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 9870 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 9871 struct bpf_insn insn_buf[16]; 9872 struct bpf_insn *patch = &insn_buf[0]; 9873 bool issrc, isneg; 9874 u32 off_reg; 9875 9876 aux = &env->insn_aux_data[i + delta]; 9877 if (!aux->alu_state || 9878 aux->alu_state == BPF_ALU_NON_POINTER) 9879 continue; 9880 9881 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 9882 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 9883 BPF_ALU_SANITIZE_SRC; 9884 9885 off_reg = issrc ? insn->src_reg : insn->dst_reg; 9886 if (isneg) 9887 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 9888 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); 9889 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 9890 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 9891 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 9892 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 9893 if (issrc) { 9894 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, 9895 off_reg); 9896 insn->src_reg = BPF_REG_AX; 9897 } else { 9898 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, 9899 BPF_REG_AX); 9900 } 9901 if (isneg) 9902 insn->code = insn->code == code_add ? 9903 code_sub : code_add; 9904 *patch++ = *insn; 9905 if (issrc && isneg) 9906 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 9907 cnt = patch - insn_buf; 9908 9909 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9910 if (!new_prog) 9911 return -ENOMEM; 9912 9913 delta += cnt - 1; 9914 env->prog = prog = new_prog; 9915 insn = new_prog->insnsi + i + delta; 9916 continue; 9917 } 9918 9919 if (insn->code != (BPF_JMP | BPF_CALL)) 9920 continue; 9921 if (insn->src_reg == BPF_PSEUDO_CALL) 9922 continue; 9923 9924 if (insn->imm == BPF_FUNC_get_route_realm) 9925 prog->dst_needed = 1; 9926 if (insn->imm == BPF_FUNC_get_prandom_u32) 9927 bpf_user_rnd_init_once(); 9928 if (insn->imm == BPF_FUNC_override_return) 9929 prog->kprobe_override = 1; 9930 if (insn->imm == BPF_FUNC_tail_call) { 9931 /* If we tail call into other programs, we 9932 * cannot make any assumptions since they can 9933 * be replaced dynamically during runtime in 9934 * the program array. 9935 */ 9936 prog->cb_access = 1; 9937 env->prog->aux->stack_depth = MAX_BPF_STACK; 9938 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; 9939 9940 /* mark bpf_tail_call as different opcode to avoid 9941 * conditional branch in the interpeter for every normal 9942 * call and to prevent accidental JITing by JIT compiler 9943 * that doesn't support bpf_tail_call yet 9944 */ 9945 insn->imm = 0; 9946 insn->code = BPF_JMP | BPF_TAIL_CALL; 9947 9948 aux = &env->insn_aux_data[i + delta]; 9949 if (env->allow_ptr_leaks && !expect_blinding && 9950 prog->jit_requested && 9951 !bpf_map_key_poisoned(aux) && 9952 !bpf_map_ptr_poisoned(aux) && 9953 !bpf_map_ptr_unpriv(aux)) { 9954 struct bpf_jit_poke_descriptor desc = { 9955 .reason = BPF_POKE_REASON_TAIL_CALL, 9956 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 9957 .tail_call.key = bpf_map_key_immediate(aux), 9958 }; 9959 9960 ret = bpf_jit_add_poke_descriptor(prog, &desc); 9961 if (ret < 0) { 9962 verbose(env, "adding tail call poke descriptor failed\n"); 9963 return ret; 9964 } 9965 9966 insn->imm = ret + 1; 9967 continue; 9968 } 9969 9970 if (!bpf_map_ptr_unpriv(aux)) 9971 continue; 9972 9973 /* instead of changing every JIT dealing with tail_call 9974 * emit two extra insns: 9975 * if (index >= max_entries) goto out; 9976 * index &= array->index_mask; 9977 * to avoid out-of-bounds cpu speculation 9978 */ 9979 if (bpf_map_ptr_poisoned(aux)) { 9980 verbose(env, "tail_call abusing map_ptr\n"); 9981 return -EINVAL; 9982 } 9983 9984 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 9985 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 9986 map_ptr->max_entries, 2); 9987 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 9988 container_of(map_ptr, 9989 struct bpf_array, 9990 map)->index_mask); 9991 insn_buf[2] = *insn; 9992 cnt = 3; 9993 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9994 if (!new_prog) 9995 return -ENOMEM; 9996 9997 delta += cnt - 1; 9998 env->prog = prog = new_prog; 9999 insn = new_prog->insnsi + i + delta; 10000 continue; 10001 } 10002 10003 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 10004 * and other inlining handlers are currently limited to 64 bit 10005 * only. 10006 */ 10007 if (prog->jit_requested && BITS_PER_LONG == 64 && 10008 (insn->imm == BPF_FUNC_map_lookup_elem || 10009 insn->imm == BPF_FUNC_map_update_elem || 10010 insn->imm == BPF_FUNC_map_delete_elem || 10011 insn->imm == BPF_FUNC_map_push_elem || 10012 insn->imm == BPF_FUNC_map_pop_elem || 10013 insn->imm == BPF_FUNC_map_peek_elem)) { 10014 aux = &env->insn_aux_data[i + delta]; 10015 if (bpf_map_ptr_poisoned(aux)) 10016 goto patch_call_imm; 10017 10018 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 10019 ops = map_ptr->ops; 10020 if (insn->imm == BPF_FUNC_map_lookup_elem && 10021 ops->map_gen_lookup) { 10022 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 10023 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 10024 verbose(env, "bpf verifier is misconfigured\n"); 10025 return -EINVAL; 10026 } 10027 10028 new_prog = bpf_patch_insn_data(env, i + delta, 10029 insn_buf, cnt); 10030 if (!new_prog) 10031 return -ENOMEM; 10032 10033 delta += cnt - 1; 10034 env->prog = prog = new_prog; 10035 insn = new_prog->insnsi + i + delta; 10036 continue; 10037 } 10038 10039 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 10040 (void *(*)(struct bpf_map *map, void *key))NULL)); 10041 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 10042 (int (*)(struct bpf_map *map, void *key))NULL)); 10043 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 10044 (int (*)(struct bpf_map *map, void *key, void *value, 10045 u64 flags))NULL)); 10046 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 10047 (int (*)(struct bpf_map *map, void *value, 10048 u64 flags))NULL)); 10049 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 10050 (int (*)(struct bpf_map *map, void *value))NULL)); 10051 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 10052 (int (*)(struct bpf_map *map, void *value))NULL)); 10053 10054 switch (insn->imm) { 10055 case BPF_FUNC_map_lookup_elem: 10056 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - 10057 __bpf_call_base; 10058 continue; 10059 case BPF_FUNC_map_update_elem: 10060 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - 10061 __bpf_call_base; 10062 continue; 10063 case BPF_FUNC_map_delete_elem: 10064 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - 10065 __bpf_call_base; 10066 continue; 10067 case BPF_FUNC_map_push_elem: 10068 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - 10069 __bpf_call_base; 10070 continue; 10071 case BPF_FUNC_map_pop_elem: 10072 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - 10073 __bpf_call_base; 10074 continue; 10075 case BPF_FUNC_map_peek_elem: 10076 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - 10077 __bpf_call_base; 10078 continue; 10079 } 10080 10081 goto patch_call_imm; 10082 } 10083 10084 if (prog->jit_requested && BITS_PER_LONG == 64 && 10085 insn->imm == BPF_FUNC_jiffies64) { 10086 struct bpf_insn ld_jiffies_addr[2] = { 10087 BPF_LD_IMM64(BPF_REG_0, 10088 (unsigned long)&jiffies), 10089 }; 10090 10091 insn_buf[0] = ld_jiffies_addr[0]; 10092 insn_buf[1] = ld_jiffies_addr[1]; 10093 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 10094 BPF_REG_0, 0); 10095 cnt = 3; 10096 10097 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 10098 cnt); 10099 if (!new_prog) 10100 return -ENOMEM; 10101 10102 delta += cnt - 1; 10103 env->prog = prog = new_prog; 10104 insn = new_prog->insnsi + i + delta; 10105 continue; 10106 } 10107 10108 patch_call_imm: 10109 fn = env->ops->get_func_proto(insn->imm, env->prog); 10110 /* all functions that have prototype and verifier allowed 10111 * programs to call them, must be real in-kernel functions 10112 */ 10113 if (!fn->func) { 10114 verbose(env, 10115 "kernel subsystem misconfigured func %s#%d\n", 10116 func_id_name(insn->imm), insn->imm); 10117 return -EFAULT; 10118 } 10119 insn->imm = fn->func - __bpf_call_base; 10120 } 10121 10122 /* Since poke tab is now finalized, publish aux to tracker. */ 10123 for (i = 0; i < prog->aux->size_poke_tab; i++) { 10124 map_ptr = prog->aux->poke_tab[i].tail_call.map; 10125 if (!map_ptr->ops->map_poke_track || 10126 !map_ptr->ops->map_poke_untrack || 10127 !map_ptr->ops->map_poke_run) { 10128 verbose(env, "bpf verifier is misconfigured\n"); 10129 return -EINVAL; 10130 } 10131 10132 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 10133 if (ret < 0) { 10134 verbose(env, "tracking tail call prog failed\n"); 10135 return ret; 10136 } 10137 } 10138 10139 return 0; 10140 } 10141 10142 static void free_states(struct bpf_verifier_env *env) 10143 { 10144 struct bpf_verifier_state_list *sl, *sln; 10145 int i; 10146 10147 sl = env->free_list; 10148 while (sl) { 10149 sln = sl->next; 10150 free_verifier_state(&sl->state, false); 10151 kfree(sl); 10152 sl = sln; 10153 } 10154 env->free_list = NULL; 10155 10156 if (!env->explored_states) 10157 return; 10158 10159 for (i = 0; i < state_htab_size(env); i++) { 10160 sl = env->explored_states[i]; 10161 10162 while (sl) { 10163 sln = sl->next; 10164 free_verifier_state(&sl->state, false); 10165 kfree(sl); 10166 sl = sln; 10167 } 10168 env->explored_states[i] = NULL; 10169 } 10170 } 10171 10172 /* The verifier is using insn_aux_data[] to store temporary data during 10173 * verification and to store information for passes that run after the 10174 * verification like dead code sanitization. do_check_common() for subprogram N 10175 * may analyze many other subprograms. sanitize_insn_aux_data() clears all 10176 * temporary data after do_check_common() finds that subprogram N cannot be 10177 * verified independently. pass_cnt counts the number of times 10178 * do_check_common() was run and insn->aux->seen tells the pass number 10179 * insn_aux_data was touched. These variables are compared to clear temporary 10180 * data from failed pass. For testing and experiments do_check_common() can be 10181 * run multiple times even when prior attempt to verify is unsuccessful. 10182 */ 10183 static void sanitize_insn_aux_data(struct bpf_verifier_env *env) 10184 { 10185 struct bpf_insn *insn = env->prog->insnsi; 10186 struct bpf_insn_aux_data *aux; 10187 int i, class; 10188 10189 for (i = 0; i < env->prog->len; i++) { 10190 class = BPF_CLASS(insn[i].code); 10191 if (class != BPF_LDX && class != BPF_STX) 10192 continue; 10193 aux = &env->insn_aux_data[i]; 10194 if (aux->seen != env->pass_cnt) 10195 continue; 10196 memset(aux, 0, offsetof(typeof(*aux), orig_idx)); 10197 } 10198 } 10199 10200 static int do_check_common(struct bpf_verifier_env *env, int subprog) 10201 { 10202 struct bpf_verifier_state *state; 10203 struct bpf_reg_state *regs; 10204 int ret, i; 10205 10206 env->prev_linfo = NULL; 10207 env->pass_cnt++; 10208 10209 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 10210 if (!state) 10211 return -ENOMEM; 10212 state->curframe = 0; 10213 state->speculative = false; 10214 state->branches = 1; 10215 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 10216 if (!state->frame[0]) { 10217 kfree(state); 10218 return -ENOMEM; 10219 } 10220 env->cur_state = state; 10221 init_func_state(env, state->frame[0], 10222 BPF_MAIN_FUNC /* callsite */, 10223 0 /* frameno */, 10224 subprog); 10225 10226 regs = state->frame[state->curframe]->regs; 10227 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 10228 ret = btf_prepare_func_args(env, subprog, regs); 10229 if (ret) 10230 goto out; 10231 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 10232 if (regs[i].type == PTR_TO_CTX) 10233 mark_reg_known_zero(env, regs, i); 10234 else if (regs[i].type == SCALAR_VALUE) 10235 mark_reg_unknown(env, regs, i); 10236 } 10237 } else { 10238 /* 1st arg to a function */ 10239 regs[BPF_REG_1].type = PTR_TO_CTX; 10240 mark_reg_known_zero(env, regs, BPF_REG_1); 10241 ret = btf_check_func_arg_match(env, subprog, regs); 10242 if (ret == -EFAULT) 10243 /* unlikely verifier bug. abort. 10244 * ret == 0 and ret < 0 are sadly acceptable for 10245 * main() function due to backward compatibility. 10246 * Like socket filter program may be written as: 10247 * int bpf_prog(struct pt_regs *ctx) 10248 * and never dereference that ctx in the program. 10249 * 'struct pt_regs' is a type mismatch for socket 10250 * filter that should be using 'struct __sk_buff'. 10251 */ 10252 goto out; 10253 } 10254 10255 ret = do_check(env); 10256 out: 10257 /* check for NULL is necessary, since cur_state can be freed inside 10258 * do_check() under memory pressure. 10259 */ 10260 if (env->cur_state) { 10261 free_verifier_state(env->cur_state, true); 10262 env->cur_state = NULL; 10263 } 10264 while (!pop_stack(env, NULL, NULL)); 10265 free_states(env); 10266 if (ret) 10267 /* clean aux data in case subprog was rejected */ 10268 sanitize_insn_aux_data(env); 10269 return ret; 10270 } 10271 10272 /* Verify all global functions in a BPF program one by one based on their BTF. 10273 * All global functions must pass verification. Otherwise the whole program is rejected. 10274 * Consider: 10275 * int bar(int); 10276 * int foo(int f) 10277 * { 10278 * return bar(f); 10279 * } 10280 * int bar(int b) 10281 * { 10282 * ... 10283 * } 10284 * foo() will be verified first for R1=any_scalar_value. During verification it 10285 * will be assumed that bar() already verified successfully and call to bar() 10286 * from foo() will be checked for type match only. Later bar() will be verified 10287 * independently to check that it's safe for R1=any_scalar_value. 10288 */ 10289 static int do_check_subprogs(struct bpf_verifier_env *env) 10290 { 10291 struct bpf_prog_aux *aux = env->prog->aux; 10292 int i, ret; 10293 10294 if (!aux->func_info) 10295 return 0; 10296 10297 for (i = 1; i < env->subprog_cnt; i++) { 10298 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 10299 continue; 10300 env->insn_idx = env->subprog_info[i].start; 10301 WARN_ON_ONCE(env->insn_idx == 0); 10302 ret = do_check_common(env, i); 10303 if (ret) { 10304 return ret; 10305 } else if (env->log.level & BPF_LOG_LEVEL) { 10306 verbose(env, 10307 "Func#%d is safe for any args that match its prototype\n", 10308 i); 10309 } 10310 } 10311 return 0; 10312 } 10313 10314 static int do_check_main(struct bpf_verifier_env *env) 10315 { 10316 int ret; 10317 10318 env->insn_idx = 0; 10319 ret = do_check_common(env, 0); 10320 if (!ret) 10321 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 10322 return ret; 10323 } 10324 10325 10326 static void print_verification_stats(struct bpf_verifier_env *env) 10327 { 10328 int i; 10329 10330 if (env->log.level & BPF_LOG_STATS) { 10331 verbose(env, "verification time %lld usec\n", 10332 div_u64(env->verification_time, 1000)); 10333 verbose(env, "stack depth "); 10334 for (i = 0; i < env->subprog_cnt; i++) { 10335 u32 depth = env->subprog_info[i].stack_depth; 10336 10337 verbose(env, "%d", depth); 10338 if (i + 1 < env->subprog_cnt) 10339 verbose(env, "+"); 10340 } 10341 verbose(env, "\n"); 10342 } 10343 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 10344 "total_states %d peak_states %d mark_read %d\n", 10345 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 10346 env->max_states_per_insn, env->total_states, 10347 env->peak_states, env->longest_mark_read_walk); 10348 } 10349 10350 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 10351 { 10352 const struct btf_type *t, *func_proto; 10353 const struct bpf_struct_ops *st_ops; 10354 const struct btf_member *member; 10355 struct bpf_prog *prog = env->prog; 10356 u32 btf_id, member_idx; 10357 const char *mname; 10358 10359 btf_id = prog->aux->attach_btf_id; 10360 st_ops = bpf_struct_ops_find(btf_id); 10361 if (!st_ops) { 10362 verbose(env, "attach_btf_id %u is not a supported struct\n", 10363 btf_id); 10364 return -ENOTSUPP; 10365 } 10366 10367 t = st_ops->type; 10368 member_idx = prog->expected_attach_type; 10369 if (member_idx >= btf_type_vlen(t)) { 10370 verbose(env, "attach to invalid member idx %u of struct %s\n", 10371 member_idx, st_ops->name); 10372 return -EINVAL; 10373 } 10374 10375 member = &btf_type_member(t)[member_idx]; 10376 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 10377 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 10378 NULL); 10379 if (!func_proto) { 10380 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 10381 mname, member_idx, st_ops->name); 10382 return -EINVAL; 10383 } 10384 10385 if (st_ops->check_member) { 10386 int err = st_ops->check_member(t, member); 10387 10388 if (err) { 10389 verbose(env, "attach to unsupported member %s of struct %s\n", 10390 mname, st_ops->name); 10391 return err; 10392 } 10393 } 10394 10395 prog->aux->attach_func_proto = func_proto; 10396 prog->aux->attach_func_name = mname; 10397 env->ops = st_ops->verifier_ops; 10398 10399 return 0; 10400 } 10401 #define SECURITY_PREFIX "security_" 10402 10403 static int check_attach_modify_return(struct bpf_verifier_env *env) 10404 { 10405 struct bpf_prog *prog = env->prog; 10406 unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr; 10407 10408 /* This is expected to be cleaned up in the future with the KRSI effort 10409 * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h. 10410 */ 10411 if (within_error_injection_list(addr) || 10412 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, 10413 sizeof(SECURITY_PREFIX) - 1)) 10414 return 0; 10415 10416 verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n", 10417 prog->aux->attach_btf_id, prog->aux->attach_func_name); 10418 10419 return -EINVAL; 10420 } 10421 10422 static int check_attach_btf_id(struct bpf_verifier_env *env) 10423 { 10424 struct bpf_prog *prog = env->prog; 10425 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 10426 struct bpf_prog *tgt_prog = prog->aux->linked_prog; 10427 u32 btf_id = prog->aux->attach_btf_id; 10428 const char prefix[] = "btf_trace_"; 10429 int ret = 0, subprog = -1, i; 10430 struct bpf_trampoline *tr; 10431 const struct btf_type *t; 10432 bool conservative = true; 10433 const char *tname; 10434 struct btf *btf; 10435 long addr; 10436 u64 key; 10437 10438 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 10439 return check_struct_ops_btf_id(env); 10440 10441 if (prog->type != BPF_PROG_TYPE_TRACING && 10442 prog->type != BPF_PROG_TYPE_LSM && 10443 !prog_extension) 10444 return 0; 10445 10446 if (!btf_id) { 10447 verbose(env, "Tracing programs must provide btf_id\n"); 10448 return -EINVAL; 10449 } 10450 btf = bpf_prog_get_target_btf(prog); 10451 if (!btf) { 10452 verbose(env, 10453 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 10454 return -EINVAL; 10455 } 10456 t = btf_type_by_id(btf, btf_id); 10457 if (!t) { 10458 verbose(env, "attach_btf_id %u is invalid\n", btf_id); 10459 return -EINVAL; 10460 } 10461 tname = btf_name_by_offset(btf, t->name_off); 10462 if (!tname) { 10463 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); 10464 return -EINVAL; 10465 } 10466 if (tgt_prog) { 10467 struct bpf_prog_aux *aux = tgt_prog->aux; 10468 10469 for (i = 0; i < aux->func_info_cnt; i++) 10470 if (aux->func_info[i].type_id == btf_id) { 10471 subprog = i; 10472 break; 10473 } 10474 if (subprog == -1) { 10475 verbose(env, "Subprog %s doesn't exist\n", tname); 10476 return -EINVAL; 10477 } 10478 conservative = aux->func_info_aux[subprog].unreliable; 10479 if (prog_extension) { 10480 if (conservative) { 10481 verbose(env, 10482 "Cannot replace static functions\n"); 10483 return -EINVAL; 10484 } 10485 if (!prog->jit_requested) { 10486 verbose(env, 10487 "Extension programs should be JITed\n"); 10488 return -EINVAL; 10489 } 10490 env->ops = bpf_verifier_ops[tgt_prog->type]; 10491 } 10492 if (!tgt_prog->jited) { 10493 verbose(env, "Can attach to only JITed progs\n"); 10494 return -EINVAL; 10495 } 10496 if (tgt_prog->type == prog->type) { 10497 /* Cannot fentry/fexit another fentry/fexit program. 10498 * Cannot attach program extension to another extension. 10499 * It's ok to attach fentry/fexit to extension program. 10500 */ 10501 verbose(env, "Cannot recursively attach\n"); 10502 return -EINVAL; 10503 } 10504 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 10505 prog_extension && 10506 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 10507 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 10508 /* Program extensions can extend all program types 10509 * except fentry/fexit. The reason is the following. 10510 * The fentry/fexit programs are used for performance 10511 * analysis, stats and can be attached to any program 10512 * type except themselves. When extension program is 10513 * replacing XDP function it is necessary to allow 10514 * performance analysis of all functions. Both original 10515 * XDP program and its program extension. Hence 10516 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 10517 * allowed. If extending of fentry/fexit was allowed it 10518 * would be possible to create long call chain 10519 * fentry->extension->fentry->extension beyond 10520 * reasonable stack size. Hence extending fentry is not 10521 * allowed. 10522 */ 10523 verbose(env, "Cannot extend fentry/fexit\n"); 10524 return -EINVAL; 10525 } 10526 key = ((u64)aux->id) << 32 | btf_id; 10527 } else { 10528 if (prog_extension) { 10529 verbose(env, "Cannot replace kernel functions\n"); 10530 return -EINVAL; 10531 } 10532 key = btf_id; 10533 } 10534 10535 switch (prog->expected_attach_type) { 10536 case BPF_TRACE_RAW_TP: 10537 if (tgt_prog) { 10538 verbose(env, 10539 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 10540 return -EINVAL; 10541 } 10542 if (!btf_type_is_typedef(t)) { 10543 verbose(env, "attach_btf_id %u is not a typedef\n", 10544 btf_id); 10545 return -EINVAL; 10546 } 10547 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 10548 verbose(env, "attach_btf_id %u points to wrong type name %s\n", 10549 btf_id, tname); 10550 return -EINVAL; 10551 } 10552 tname += sizeof(prefix) - 1; 10553 t = btf_type_by_id(btf, t->type); 10554 if (!btf_type_is_ptr(t)) 10555 /* should never happen in valid vmlinux build */ 10556 return -EINVAL; 10557 t = btf_type_by_id(btf, t->type); 10558 if (!btf_type_is_func_proto(t)) 10559 /* should never happen in valid vmlinux build */ 10560 return -EINVAL; 10561 10562 /* remember two read only pointers that are valid for 10563 * the life time of the kernel 10564 */ 10565 prog->aux->attach_func_name = tname; 10566 prog->aux->attach_func_proto = t; 10567 prog->aux->attach_btf_trace = true; 10568 return 0; 10569 default: 10570 if (!prog_extension) 10571 return -EINVAL; 10572 /* fallthrough */ 10573 case BPF_MODIFY_RETURN: 10574 case BPF_LSM_MAC: 10575 case BPF_TRACE_FENTRY: 10576 case BPF_TRACE_FEXIT: 10577 prog->aux->attach_func_name = tname; 10578 if (prog->type == BPF_PROG_TYPE_LSM) { 10579 ret = bpf_lsm_verify_prog(&env->log, prog); 10580 if (ret < 0) 10581 return ret; 10582 } 10583 10584 if (!btf_type_is_func(t)) { 10585 verbose(env, "attach_btf_id %u is not a function\n", 10586 btf_id); 10587 return -EINVAL; 10588 } 10589 if (prog_extension && 10590 btf_check_type_match(env, prog, btf, t)) 10591 return -EINVAL; 10592 t = btf_type_by_id(btf, t->type); 10593 if (!btf_type_is_func_proto(t)) 10594 return -EINVAL; 10595 tr = bpf_trampoline_lookup(key); 10596 if (!tr) 10597 return -ENOMEM; 10598 /* t is either vmlinux type or another program's type */ 10599 prog->aux->attach_func_proto = t; 10600 mutex_lock(&tr->mutex); 10601 if (tr->func.addr) { 10602 prog->aux->trampoline = tr; 10603 goto out; 10604 } 10605 if (tgt_prog && conservative) { 10606 prog->aux->attach_func_proto = NULL; 10607 t = NULL; 10608 } 10609 ret = btf_distill_func_proto(&env->log, btf, t, 10610 tname, &tr->func.model); 10611 if (ret < 0) 10612 goto out; 10613 if (tgt_prog) { 10614 if (subprog == 0) 10615 addr = (long) tgt_prog->bpf_func; 10616 else 10617 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 10618 } else { 10619 addr = kallsyms_lookup_name(tname); 10620 if (!addr) { 10621 verbose(env, 10622 "The address of function %s cannot be found\n", 10623 tname); 10624 ret = -ENOENT; 10625 goto out; 10626 } 10627 } 10628 tr->func.addr = (void *)addr; 10629 prog->aux->trampoline = tr; 10630 10631 if (prog->expected_attach_type == BPF_MODIFY_RETURN) 10632 ret = check_attach_modify_return(env); 10633 out: 10634 mutex_unlock(&tr->mutex); 10635 if (ret) 10636 bpf_trampoline_put(tr); 10637 return ret; 10638 } 10639 } 10640 10641 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, 10642 union bpf_attr __user *uattr) 10643 { 10644 u64 start_time = ktime_get_ns(); 10645 struct bpf_verifier_env *env; 10646 struct bpf_verifier_log *log; 10647 int i, len, ret = -EINVAL; 10648 bool is_priv; 10649 10650 /* no program is valid */ 10651 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 10652 return -EINVAL; 10653 10654 /* 'struct bpf_verifier_env' can be global, but since it's not small, 10655 * allocate/free it every time bpf_check() is called 10656 */ 10657 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 10658 if (!env) 10659 return -ENOMEM; 10660 log = &env->log; 10661 10662 len = (*prog)->len; 10663 env->insn_aux_data = 10664 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 10665 ret = -ENOMEM; 10666 if (!env->insn_aux_data) 10667 goto err_free_env; 10668 for (i = 0; i < len; i++) 10669 env->insn_aux_data[i].orig_idx = i; 10670 env->prog = *prog; 10671 env->ops = bpf_verifier_ops[env->prog->type]; 10672 is_priv = capable(CAP_SYS_ADMIN); 10673 10674 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 10675 mutex_lock(&bpf_verifier_lock); 10676 if (!btf_vmlinux) 10677 btf_vmlinux = btf_parse_vmlinux(); 10678 mutex_unlock(&bpf_verifier_lock); 10679 } 10680 10681 /* grab the mutex to protect few globals used by verifier */ 10682 if (!is_priv) 10683 mutex_lock(&bpf_verifier_lock); 10684 10685 if (attr->log_level || attr->log_buf || attr->log_size) { 10686 /* user requested verbose verifier output 10687 * and supplied buffer to store the verification trace 10688 */ 10689 log->level = attr->log_level; 10690 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 10691 log->len_total = attr->log_size; 10692 10693 ret = -EINVAL; 10694 /* log attributes have to be sane */ 10695 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || 10696 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) 10697 goto err_unlock; 10698 } 10699 10700 if (IS_ERR(btf_vmlinux)) { 10701 /* Either gcc or pahole or kernel are broken. */ 10702 verbose(env, "in-kernel BTF is malformed\n"); 10703 ret = PTR_ERR(btf_vmlinux); 10704 goto skip_full_check; 10705 } 10706 10707 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 10708 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 10709 env->strict_alignment = true; 10710 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 10711 env->strict_alignment = false; 10712 10713 env->allow_ptr_leaks = is_priv; 10714 10715 if (is_priv) 10716 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 10717 10718 ret = replace_map_fd_with_map_ptr(env); 10719 if (ret < 0) 10720 goto skip_full_check; 10721 10722 if (bpf_prog_is_dev_bound(env->prog->aux)) { 10723 ret = bpf_prog_offload_verifier_prep(env->prog); 10724 if (ret) 10725 goto skip_full_check; 10726 } 10727 10728 env->explored_states = kvcalloc(state_htab_size(env), 10729 sizeof(struct bpf_verifier_state_list *), 10730 GFP_USER); 10731 ret = -ENOMEM; 10732 if (!env->explored_states) 10733 goto skip_full_check; 10734 10735 ret = check_subprogs(env); 10736 if (ret < 0) 10737 goto skip_full_check; 10738 10739 ret = check_btf_info(env, attr, uattr); 10740 if (ret < 0) 10741 goto skip_full_check; 10742 10743 ret = check_attach_btf_id(env); 10744 if (ret) 10745 goto skip_full_check; 10746 10747 ret = check_cfg(env); 10748 if (ret < 0) 10749 goto skip_full_check; 10750 10751 ret = do_check_subprogs(env); 10752 ret = ret ?: do_check_main(env); 10753 10754 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 10755 ret = bpf_prog_offload_finalize(env); 10756 10757 skip_full_check: 10758 kvfree(env->explored_states); 10759 10760 if (ret == 0) 10761 ret = check_max_stack_depth(env); 10762 10763 /* instruction rewrites happen after this point */ 10764 if (is_priv) { 10765 if (ret == 0) 10766 opt_hard_wire_dead_code_branches(env); 10767 if (ret == 0) 10768 ret = opt_remove_dead_code(env); 10769 if (ret == 0) 10770 ret = opt_remove_nops(env); 10771 } else { 10772 if (ret == 0) 10773 sanitize_dead_code(env); 10774 } 10775 10776 if (ret == 0) 10777 /* program is valid, convert *(u32*)(ctx + off) accesses */ 10778 ret = convert_ctx_accesses(env); 10779 10780 if (ret == 0) 10781 ret = fixup_bpf_calls(env); 10782 10783 /* do 32-bit optimization after insn patching has done so those patched 10784 * insns could be handled correctly. 10785 */ 10786 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 10787 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 10788 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 10789 : false; 10790 } 10791 10792 if (ret == 0) 10793 ret = fixup_call_args(env); 10794 10795 env->verification_time = ktime_get_ns() - start_time; 10796 print_verification_stats(env); 10797 10798 if (log->level && bpf_verifier_log_full(log)) 10799 ret = -ENOSPC; 10800 if (log->level && !log->ubuf) { 10801 ret = -EFAULT; 10802 goto err_release_maps; 10803 } 10804 10805 if (ret == 0 && env->used_map_cnt) { 10806 /* if program passed verifier, update used_maps in bpf_prog_info */ 10807 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 10808 sizeof(env->used_maps[0]), 10809 GFP_KERNEL); 10810 10811 if (!env->prog->aux->used_maps) { 10812 ret = -ENOMEM; 10813 goto err_release_maps; 10814 } 10815 10816 memcpy(env->prog->aux->used_maps, env->used_maps, 10817 sizeof(env->used_maps[0]) * env->used_map_cnt); 10818 env->prog->aux->used_map_cnt = env->used_map_cnt; 10819 10820 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 10821 * bpf_ld_imm64 instructions 10822 */ 10823 convert_pseudo_ld_imm64(env); 10824 } 10825 10826 if (ret == 0) 10827 adjust_btf_func(env); 10828 10829 err_release_maps: 10830 if (!env->prog->aux->used_maps) 10831 /* if we didn't copy map pointers into bpf_prog_info, release 10832 * them now. Otherwise free_used_maps() will release them. 10833 */ 10834 release_maps(env); 10835 *prog = env->prog; 10836 err_unlock: 10837 if (!is_priv) 10838 mutex_unlock(&bpf_verifier_lock); 10839 vfree(env->insn_aux_data); 10840 err_free_env: 10841 kfree(env); 10842 return ret; 10843 } 10844