1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/filter.h> 19 #include <net/netlink.h> 20 #include <linux/file.h> 21 #include <linux/vmalloc.h> 22 #include <linux/stringify.h> 23 #include <linux/bsearch.h> 24 #include <linux/sort.h> 25 #include <linux/perf_event.h> 26 27 #include "disasm.h" 28 29 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 30 #define BPF_PROG_TYPE(_id, _name) \ 31 [_id] = & _name ## _verifier_ops, 32 #define BPF_MAP_TYPE(_id, _ops) 33 #include <linux/bpf_types.h> 34 #undef BPF_PROG_TYPE 35 #undef BPF_MAP_TYPE 36 }; 37 38 /* bpf_check() is a static code analyzer that walks eBPF program 39 * instruction by instruction and updates register/stack state. 40 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 41 * 42 * The first pass is depth-first-search to check that the program is a DAG. 43 * It rejects the following programs: 44 * - larger than BPF_MAXINSNS insns 45 * - if loop is present (detected via back-edge) 46 * - unreachable insns exist (shouldn't be a forest. program = one function) 47 * - out of bounds or malformed jumps 48 * The second pass is all possible path descent from the 1st insn. 49 * Since it's analyzing all pathes through the program, the length of the 50 * analysis is limited to 64k insn, which may be hit even if total number of 51 * insn is less then 4K, but there are too many branches that change stack/regs. 52 * Number of 'branches to be analyzed' is limited to 1k 53 * 54 * On entry to each instruction, each register has a type, and the instruction 55 * changes the types of the registers depending on instruction semantics. 56 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 57 * copied to R1. 58 * 59 * All registers are 64-bit. 60 * R0 - return register 61 * R1-R5 argument passing registers 62 * R6-R9 callee saved registers 63 * R10 - frame pointer read-only 64 * 65 * At the start of BPF program the register R1 contains a pointer to bpf_context 66 * and has type PTR_TO_CTX. 67 * 68 * Verifier tracks arithmetic operations on pointers in case: 69 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 70 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 71 * 1st insn copies R10 (which has FRAME_PTR) type into R1 72 * and 2nd arithmetic instruction is pattern matched to recognize 73 * that it wants to construct a pointer to some element within stack. 74 * So after 2nd insn, the register R1 has type PTR_TO_STACK 75 * (and -20 constant is saved for further stack bounds checking). 76 * Meaning that this reg is a pointer to stack plus known immediate constant. 77 * 78 * Most of the time the registers have SCALAR_VALUE type, which 79 * means the register has some value, but it's not a valid pointer. 80 * (like pointer plus pointer becomes SCALAR_VALUE type) 81 * 82 * When verifier sees load or store instructions the type of base register 83 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer 84 * types recognized by check_mem_access() function. 85 * 86 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 87 * and the range of [ptr, ptr + map's value_size) is accessible. 88 * 89 * registers used to pass values to function calls are checked against 90 * function argument constraints. 91 * 92 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 93 * It means that the register type passed to this function must be 94 * PTR_TO_STACK and it will be used inside the function as 95 * 'pointer to map element key' 96 * 97 * For example the argument constraints for bpf_map_lookup_elem(): 98 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 99 * .arg1_type = ARG_CONST_MAP_PTR, 100 * .arg2_type = ARG_PTR_TO_MAP_KEY, 101 * 102 * ret_type says that this function returns 'pointer to map elem value or null' 103 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 104 * 2nd argument should be a pointer to stack, which will be used inside 105 * the helper function as a pointer to map element key. 106 * 107 * On the kernel side the helper function looks like: 108 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 109 * { 110 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 111 * void *key = (void *) (unsigned long) r2; 112 * void *value; 113 * 114 * here kernel can access 'key' and 'map' pointers safely, knowing that 115 * [key, key + map->key_size) bytes are valid and were initialized on 116 * the stack of eBPF program. 117 * } 118 * 119 * Corresponding eBPF program may look like: 120 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 121 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 122 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 123 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 124 * here verifier looks at prototype of map_lookup_elem() and sees: 125 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 126 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 127 * 128 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 129 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 130 * and were initialized prior to this call. 131 * If it's ok, then verifier allows this BPF_CALL insn and looks at 132 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 133 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 134 * returns ether pointer to map value or NULL. 135 * 136 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 137 * insn, the register holding that pointer in the true branch changes state to 138 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 139 * branch. See check_cond_jmp_op(). 140 * 141 * After the call R0 is set to return type of the function and registers R1-R5 142 * are set to NOT_INIT to indicate that they are no longer readable. 143 */ 144 145 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 146 struct bpf_verifier_stack_elem { 147 /* verifer state is 'st' 148 * before processing instruction 'insn_idx' 149 * and after processing instruction 'prev_insn_idx' 150 */ 151 struct bpf_verifier_state st; 152 int insn_idx; 153 int prev_insn_idx; 154 struct bpf_verifier_stack_elem *next; 155 }; 156 157 #define BPF_COMPLEXITY_LIMIT_INSNS 131072 158 #define BPF_COMPLEXITY_LIMIT_STACK 1024 159 160 #define BPF_MAP_PTR_UNPRIV 1UL 161 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 162 POISON_POINTER_DELTA)) 163 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 164 165 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 166 { 167 return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; 168 } 169 170 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 171 { 172 return aux->map_state & BPF_MAP_PTR_UNPRIV; 173 } 174 175 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 176 const struct bpf_map *map, bool unpriv) 177 { 178 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 179 unpriv |= bpf_map_ptr_unpriv(aux); 180 aux->map_state = (unsigned long)map | 181 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 182 } 183 184 struct bpf_call_arg_meta { 185 struct bpf_map *map_ptr; 186 bool raw_mode; 187 bool pkt_access; 188 int regno; 189 int access_size; 190 s64 msize_smax_value; 191 u64 msize_umax_value; 192 }; 193 194 static DEFINE_MUTEX(bpf_verifier_lock); 195 196 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 197 va_list args) 198 { 199 unsigned int n; 200 201 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 202 203 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 204 "verifier log line truncated - local buffer too short\n"); 205 206 n = min(log->len_total - log->len_used - 1, n); 207 log->kbuf[n] = '\0'; 208 209 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 210 log->len_used += n; 211 else 212 log->ubuf = NULL; 213 } 214 215 /* log_level controls verbosity level of eBPF verifier. 216 * bpf_verifier_log_write() is used to dump the verification trace to the log, 217 * so the user can figure out what's wrong with the program 218 */ 219 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 220 const char *fmt, ...) 221 { 222 va_list args; 223 224 if (!bpf_verifier_log_needed(&env->log)) 225 return; 226 227 va_start(args, fmt); 228 bpf_verifier_vlog(&env->log, fmt, args); 229 va_end(args); 230 } 231 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 232 233 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 234 { 235 struct bpf_verifier_env *env = private_data; 236 va_list args; 237 238 if (!bpf_verifier_log_needed(&env->log)) 239 return; 240 241 va_start(args, fmt); 242 bpf_verifier_vlog(&env->log, fmt, args); 243 va_end(args); 244 } 245 246 static bool type_is_pkt_pointer(enum bpf_reg_type type) 247 { 248 return type == PTR_TO_PACKET || 249 type == PTR_TO_PACKET_META; 250 } 251 252 /* string representation of 'enum bpf_reg_type' */ 253 static const char * const reg_type_str[] = { 254 [NOT_INIT] = "?", 255 [SCALAR_VALUE] = "inv", 256 [PTR_TO_CTX] = "ctx", 257 [CONST_PTR_TO_MAP] = "map_ptr", 258 [PTR_TO_MAP_VALUE] = "map_value", 259 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 260 [PTR_TO_STACK] = "fp", 261 [PTR_TO_PACKET] = "pkt", 262 [PTR_TO_PACKET_META] = "pkt_meta", 263 [PTR_TO_PACKET_END] = "pkt_end", 264 }; 265 266 static void print_liveness(struct bpf_verifier_env *env, 267 enum bpf_reg_liveness live) 268 { 269 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN)) 270 verbose(env, "_"); 271 if (live & REG_LIVE_READ) 272 verbose(env, "r"); 273 if (live & REG_LIVE_WRITTEN) 274 verbose(env, "w"); 275 } 276 277 static struct bpf_func_state *func(struct bpf_verifier_env *env, 278 const struct bpf_reg_state *reg) 279 { 280 struct bpf_verifier_state *cur = env->cur_state; 281 282 return cur->frame[reg->frameno]; 283 } 284 285 static void print_verifier_state(struct bpf_verifier_env *env, 286 const struct bpf_func_state *state) 287 { 288 const struct bpf_reg_state *reg; 289 enum bpf_reg_type t; 290 int i; 291 292 if (state->frameno) 293 verbose(env, " frame%d:", state->frameno); 294 for (i = 0; i < MAX_BPF_REG; i++) { 295 reg = &state->regs[i]; 296 t = reg->type; 297 if (t == NOT_INIT) 298 continue; 299 verbose(env, " R%d", i); 300 print_liveness(env, reg->live); 301 verbose(env, "=%s", reg_type_str[t]); 302 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 303 tnum_is_const(reg->var_off)) { 304 /* reg->off should be 0 for SCALAR_VALUE */ 305 verbose(env, "%lld", reg->var_off.value + reg->off); 306 if (t == PTR_TO_STACK) 307 verbose(env, ",call_%d", func(env, reg)->callsite); 308 } else { 309 verbose(env, "(id=%d", reg->id); 310 if (t != SCALAR_VALUE) 311 verbose(env, ",off=%d", reg->off); 312 if (type_is_pkt_pointer(t)) 313 verbose(env, ",r=%d", reg->range); 314 else if (t == CONST_PTR_TO_MAP || 315 t == PTR_TO_MAP_VALUE || 316 t == PTR_TO_MAP_VALUE_OR_NULL) 317 verbose(env, ",ks=%d,vs=%d", 318 reg->map_ptr->key_size, 319 reg->map_ptr->value_size); 320 if (tnum_is_const(reg->var_off)) { 321 /* Typically an immediate SCALAR_VALUE, but 322 * could be a pointer whose offset is too big 323 * for reg->off 324 */ 325 verbose(env, ",imm=%llx", reg->var_off.value); 326 } else { 327 if (reg->smin_value != reg->umin_value && 328 reg->smin_value != S64_MIN) 329 verbose(env, ",smin_value=%lld", 330 (long long)reg->smin_value); 331 if (reg->smax_value != reg->umax_value && 332 reg->smax_value != S64_MAX) 333 verbose(env, ",smax_value=%lld", 334 (long long)reg->smax_value); 335 if (reg->umin_value != 0) 336 verbose(env, ",umin_value=%llu", 337 (unsigned long long)reg->umin_value); 338 if (reg->umax_value != U64_MAX) 339 verbose(env, ",umax_value=%llu", 340 (unsigned long long)reg->umax_value); 341 if (!tnum_is_unknown(reg->var_off)) { 342 char tn_buf[48]; 343 344 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 345 verbose(env, ",var_off=%s", tn_buf); 346 } 347 } 348 verbose(env, ")"); 349 } 350 } 351 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 352 if (state->stack[i].slot_type[0] == STACK_SPILL) { 353 verbose(env, " fp%d", 354 (-i - 1) * BPF_REG_SIZE); 355 print_liveness(env, state->stack[i].spilled_ptr.live); 356 verbose(env, "=%s", 357 reg_type_str[state->stack[i].spilled_ptr.type]); 358 } 359 if (state->stack[i].slot_type[0] == STACK_ZERO) 360 verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE); 361 } 362 verbose(env, "\n"); 363 } 364 365 static int copy_stack_state(struct bpf_func_state *dst, 366 const struct bpf_func_state *src) 367 { 368 if (!src->stack) 369 return 0; 370 if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { 371 /* internal bug, make state invalid to reject the program */ 372 memset(dst, 0, sizeof(*dst)); 373 return -EFAULT; 374 } 375 memcpy(dst->stack, src->stack, 376 sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); 377 return 0; 378 } 379 380 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 381 * make it consume minimal amount of memory. check_stack_write() access from 382 * the program calls into realloc_func_state() to grow the stack size. 383 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 384 * which this function copies over. It points to previous bpf_verifier_state 385 * which is never reallocated 386 */ 387 static int realloc_func_state(struct bpf_func_state *state, int size, 388 bool copy_old) 389 { 390 u32 old_size = state->allocated_stack; 391 struct bpf_stack_state *new_stack; 392 int slot = size / BPF_REG_SIZE; 393 394 if (size <= old_size || !size) { 395 if (copy_old) 396 return 0; 397 state->allocated_stack = slot * BPF_REG_SIZE; 398 if (!size && old_size) { 399 kfree(state->stack); 400 state->stack = NULL; 401 } 402 return 0; 403 } 404 new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), 405 GFP_KERNEL); 406 if (!new_stack) 407 return -ENOMEM; 408 if (copy_old) { 409 if (state->stack) 410 memcpy(new_stack, state->stack, 411 sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); 412 memset(new_stack + old_size / BPF_REG_SIZE, 0, 413 sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); 414 } 415 state->allocated_stack = slot * BPF_REG_SIZE; 416 kfree(state->stack); 417 state->stack = new_stack; 418 return 0; 419 } 420 421 static void free_func_state(struct bpf_func_state *state) 422 { 423 if (!state) 424 return; 425 kfree(state->stack); 426 kfree(state); 427 } 428 429 static void free_verifier_state(struct bpf_verifier_state *state, 430 bool free_self) 431 { 432 int i; 433 434 for (i = 0; i <= state->curframe; i++) { 435 free_func_state(state->frame[i]); 436 state->frame[i] = NULL; 437 } 438 if (free_self) 439 kfree(state); 440 } 441 442 /* copy verifier state from src to dst growing dst stack space 443 * when necessary to accommodate larger src stack 444 */ 445 static int copy_func_state(struct bpf_func_state *dst, 446 const struct bpf_func_state *src) 447 { 448 int err; 449 450 err = realloc_func_state(dst, src->allocated_stack, false); 451 if (err) 452 return err; 453 memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack)); 454 return copy_stack_state(dst, src); 455 } 456 457 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 458 const struct bpf_verifier_state *src) 459 { 460 struct bpf_func_state *dst; 461 int i, err; 462 463 /* if dst has more stack frames then src frame, free them */ 464 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 465 free_func_state(dst_state->frame[i]); 466 dst_state->frame[i] = NULL; 467 } 468 dst_state->curframe = src->curframe; 469 dst_state->parent = src->parent; 470 for (i = 0; i <= src->curframe; i++) { 471 dst = dst_state->frame[i]; 472 if (!dst) { 473 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 474 if (!dst) 475 return -ENOMEM; 476 dst_state->frame[i] = dst; 477 } 478 err = copy_func_state(dst, src->frame[i]); 479 if (err) 480 return err; 481 } 482 return 0; 483 } 484 485 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 486 int *insn_idx) 487 { 488 struct bpf_verifier_state *cur = env->cur_state; 489 struct bpf_verifier_stack_elem *elem, *head = env->head; 490 int err; 491 492 if (env->head == NULL) 493 return -ENOENT; 494 495 if (cur) { 496 err = copy_verifier_state(cur, &head->st); 497 if (err) 498 return err; 499 } 500 if (insn_idx) 501 *insn_idx = head->insn_idx; 502 if (prev_insn_idx) 503 *prev_insn_idx = head->prev_insn_idx; 504 elem = head->next; 505 free_verifier_state(&head->st, false); 506 kfree(head); 507 env->head = elem; 508 env->stack_size--; 509 return 0; 510 } 511 512 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 513 int insn_idx, int prev_insn_idx) 514 { 515 struct bpf_verifier_state *cur = env->cur_state; 516 struct bpf_verifier_stack_elem *elem; 517 int err; 518 519 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 520 if (!elem) 521 goto err; 522 523 elem->insn_idx = insn_idx; 524 elem->prev_insn_idx = prev_insn_idx; 525 elem->next = env->head; 526 env->head = elem; 527 env->stack_size++; 528 err = copy_verifier_state(&elem->st, cur); 529 if (err) 530 goto err; 531 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 532 verbose(env, "BPF program is too complex\n"); 533 goto err; 534 } 535 return &elem->st; 536 err: 537 free_verifier_state(env->cur_state, true); 538 env->cur_state = NULL; 539 /* pop all elements and return */ 540 while (!pop_stack(env, NULL, NULL)); 541 return NULL; 542 } 543 544 #define CALLER_SAVED_REGS 6 545 static const int caller_saved[CALLER_SAVED_REGS] = { 546 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 547 }; 548 549 static void __mark_reg_not_init(struct bpf_reg_state *reg); 550 551 /* Mark the unknown part of a register (variable offset or scalar value) as 552 * known to have the value @imm. 553 */ 554 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 555 { 556 reg->id = 0; 557 reg->var_off = tnum_const(imm); 558 reg->smin_value = (s64)imm; 559 reg->smax_value = (s64)imm; 560 reg->umin_value = imm; 561 reg->umax_value = imm; 562 } 563 564 /* Mark the 'variable offset' part of a register as zero. This should be 565 * used only on registers holding a pointer type. 566 */ 567 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 568 { 569 __mark_reg_known(reg, 0); 570 } 571 572 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 573 { 574 __mark_reg_known(reg, 0); 575 reg->off = 0; 576 reg->type = SCALAR_VALUE; 577 } 578 579 static void mark_reg_known_zero(struct bpf_verifier_env *env, 580 struct bpf_reg_state *regs, u32 regno) 581 { 582 if (WARN_ON(regno >= MAX_BPF_REG)) { 583 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 584 /* Something bad happened, let's kill all regs */ 585 for (regno = 0; regno < MAX_BPF_REG; regno++) 586 __mark_reg_not_init(regs + regno); 587 return; 588 } 589 __mark_reg_known_zero(regs + regno); 590 } 591 592 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 593 { 594 return type_is_pkt_pointer(reg->type); 595 } 596 597 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 598 { 599 return reg_is_pkt_pointer(reg) || 600 reg->type == PTR_TO_PACKET_END; 601 } 602 603 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 604 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 605 enum bpf_reg_type which) 606 { 607 /* The register can already have a range from prior markings. 608 * This is fine as long as it hasn't been advanced from its 609 * origin. 610 */ 611 return reg->type == which && 612 reg->id == 0 && 613 reg->off == 0 && 614 tnum_equals_const(reg->var_off, 0); 615 } 616 617 /* Attempts to improve min/max values based on var_off information */ 618 static void __update_reg_bounds(struct bpf_reg_state *reg) 619 { 620 /* min signed is max(sign bit) | min(other bits) */ 621 reg->smin_value = max_t(s64, reg->smin_value, 622 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 623 /* max signed is min(sign bit) | max(other bits) */ 624 reg->smax_value = min_t(s64, reg->smax_value, 625 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 626 reg->umin_value = max(reg->umin_value, reg->var_off.value); 627 reg->umax_value = min(reg->umax_value, 628 reg->var_off.value | reg->var_off.mask); 629 } 630 631 /* Uses signed min/max values to inform unsigned, and vice-versa */ 632 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 633 { 634 /* Learn sign from signed bounds. 635 * If we cannot cross the sign boundary, then signed and unsigned bounds 636 * are the same, so combine. This works even in the negative case, e.g. 637 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 638 */ 639 if (reg->smin_value >= 0 || reg->smax_value < 0) { 640 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 641 reg->umin_value); 642 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 643 reg->umax_value); 644 return; 645 } 646 /* Learn sign from unsigned bounds. Signed bounds cross the sign 647 * boundary, so we must be careful. 648 */ 649 if ((s64)reg->umax_value >= 0) { 650 /* Positive. We can't learn anything from the smin, but smax 651 * is positive, hence safe. 652 */ 653 reg->smin_value = reg->umin_value; 654 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 655 reg->umax_value); 656 } else if ((s64)reg->umin_value < 0) { 657 /* Negative. We can't learn anything from the smax, but smin 658 * is negative, hence safe. 659 */ 660 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 661 reg->umin_value); 662 reg->smax_value = reg->umax_value; 663 } 664 } 665 666 /* Attempts to improve var_off based on unsigned min/max information */ 667 static void __reg_bound_offset(struct bpf_reg_state *reg) 668 { 669 reg->var_off = tnum_intersect(reg->var_off, 670 tnum_range(reg->umin_value, 671 reg->umax_value)); 672 } 673 674 /* Reset the min/max bounds of a register */ 675 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 676 { 677 reg->smin_value = S64_MIN; 678 reg->smax_value = S64_MAX; 679 reg->umin_value = 0; 680 reg->umax_value = U64_MAX; 681 } 682 683 /* Mark a register as having a completely unknown (scalar) value. */ 684 static void __mark_reg_unknown(struct bpf_reg_state *reg) 685 { 686 reg->type = SCALAR_VALUE; 687 reg->id = 0; 688 reg->off = 0; 689 reg->var_off = tnum_unknown; 690 reg->frameno = 0; 691 __mark_reg_unbounded(reg); 692 } 693 694 static void mark_reg_unknown(struct bpf_verifier_env *env, 695 struct bpf_reg_state *regs, u32 regno) 696 { 697 if (WARN_ON(regno >= MAX_BPF_REG)) { 698 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 699 /* Something bad happened, let's kill all regs except FP */ 700 for (regno = 0; regno < BPF_REG_FP; regno++) 701 __mark_reg_not_init(regs + regno); 702 return; 703 } 704 __mark_reg_unknown(regs + regno); 705 } 706 707 static void __mark_reg_not_init(struct bpf_reg_state *reg) 708 { 709 __mark_reg_unknown(reg); 710 reg->type = NOT_INIT; 711 } 712 713 static void mark_reg_not_init(struct bpf_verifier_env *env, 714 struct bpf_reg_state *regs, u32 regno) 715 { 716 if (WARN_ON(regno >= MAX_BPF_REG)) { 717 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 718 /* Something bad happened, let's kill all regs except FP */ 719 for (regno = 0; regno < BPF_REG_FP; regno++) 720 __mark_reg_not_init(regs + regno); 721 return; 722 } 723 __mark_reg_not_init(regs + regno); 724 } 725 726 static void init_reg_state(struct bpf_verifier_env *env, 727 struct bpf_func_state *state) 728 { 729 struct bpf_reg_state *regs = state->regs; 730 int i; 731 732 for (i = 0; i < MAX_BPF_REG; i++) { 733 mark_reg_not_init(env, regs, i); 734 regs[i].live = REG_LIVE_NONE; 735 } 736 737 /* frame pointer */ 738 regs[BPF_REG_FP].type = PTR_TO_STACK; 739 mark_reg_known_zero(env, regs, BPF_REG_FP); 740 regs[BPF_REG_FP].frameno = state->frameno; 741 742 /* 1st arg to a function */ 743 regs[BPF_REG_1].type = PTR_TO_CTX; 744 mark_reg_known_zero(env, regs, BPF_REG_1); 745 } 746 747 #define BPF_MAIN_FUNC (-1) 748 static void init_func_state(struct bpf_verifier_env *env, 749 struct bpf_func_state *state, 750 int callsite, int frameno, int subprogno) 751 { 752 state->callsite = callsite; 753 state->frameno = frameno; 754 state->subprogno = subprogno; 755 init_reg_state(env, state); 756 } 757 758 enum reg_arg_type { 759 SRC_OP, /* register is used as source operand */ 760 DST_OP, /* register is used as destination operand */ 761 DST_OP_NO_MARK /* same as above, check only, don't mark */ 762 }; 763 764 static int cmp_subprogs(const void *a, const void *b) 765 { 766 return ((struct bpf_subprog_info *)a)->start - 767 ((struct bpf_subprog_info *)b)->start; 768 } 769 770 static int find_subprog(struct bpf_verifier_env *env, int off) 771 { 772 struct bpf_subprog_info *p; 773 774 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 775 sizeof(env->subprog_info[0]), cmp_subprogs); 776 if (!p) 777 return -ENOENT; 778 return p - env->subprog_info; 779 780 } 781 782 static int add_subprog(struct bpf_verifier_env *env, int off) 783 { 784 int insn_cnt = env->prog->len; 785 int ret; 786 787 if (off >= insn_cnt || off < 0) { 788 verbose(env, "call to invalid destination\n"); 789 return -EINVAL; 790 } 791 ret = find_subprog(env, off); 792 if (ret >= 0) 793 return 0; 794 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 795 verbose(env, "too many subprograms\n"); 796 return -E2BIG; 797 } 798 env->subprog_info[env->subprog_cnt++].start = off; 799 sort(env->subprog_info, env->subprog_cnt, 800 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 801 return 0; 802 } 803 804 static int check_subprogs(struct bpf_verifier_env *env) 805 { 806 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; 807 struct bpf_subprog_info *subprog = env->subprog_info; 808 struct bpf_insn *insn = env->prog->insnsi; 809 int insn_cnt = env->prog->len; 810 811 /* Add entry function. */ 812 ret = add_subprog(env, 0); 813 if (ret < 0) 814 return ret; 815 816 /* determine subprog starts. The end is one before the next starts */ 817 for (i = 0; i < insn_cnt; i++) { 818 if (insn[i].code != (BPF_JMP | BPF_CALL)) 819 continue; 820 if (insn[i].src_reg != BPF_PSEUDO_CALL) 821 continue; 822 if (!env->allow_ptr_leaks) { 823 verbose(env, "function calls to other bpf functions are allowed for root only\n"); 824 return -EPERM; 825 } 826 if (bpf_prog_is_dev_bound(env->prog->aux)) { 827 verbose(env, "function calls in offloaded programs are not supported yet\n"); 828 return -EINVAL; 829 } 830 ret = add_subprog(env, i + insn[i].imm + 1); 831 if (ret < 0) 832 return ret; 833 } 834 835 /* Add a fake 'exit' subprog which could simplify subprog iteration 836 * logic. 'subprog_cnt' should not be increased. 837 */ 838 subprog[env->subprog_cnt].start = insn_cnt; 839 840 if (env->log.level > 1) 841 for (i = 0; i < env->subprog_cnt; i++) 842 verbose(env, "func#%d @%d\n", i, subprog[i].start); 843 844 /* now check that all jumps are within the same subprog */ 845 subprog_start = subprog[cur_subprog].start; 846 subprog_end = subprog[cur_subprog + 1].start; 847 for (i = 0; i < insn_cnt; i++) { 848 u8 code = insn[i].code; 849 850 if (BPF_CLASS(code) != BPF_JMP) 851 goto next; 852 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 853 goto next; 854 off = i + insn[i].off + 1; 855 if (off < subprog_start || off >= subprog_end) { 856 verbose(env, "jump out of range from insn %d to %d\n", i, off); 857 return -EINVAL; 858 } 859 next: 860 if (i == subprog_end - 1) { 861 /* to avoid fall-through from one subprog into another 862 * the last insn of the subprog should be either exit 863 * or unconditional jump back 864 */ 865 if (code != (BPF_JMP | BPF_EXIT) && 866 code != (BPF_JMP | BPF_JA)) { 867 verbose(env, "last insn is not an exit or jmp\n"); 868 return -EINVAL; 869 } 870 subprog_start = subprog_end; 871 cur_subprog++; 872 if (cur_subprog < env->subprog_cnt) 873 subprog_end = subprog[cur_subprog + 1].start; 874 } 875 } 876 return 0; 877 } 878 879 static 880 struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, 881 const struct bpf_verifier_state *state, 882 struct bpf_verifier_state *parent, 883 u32 regno) 884 { 885 struct bpf_verifier_state *tmp = NULL; 886 887 /* 'parent' could be a state of caller and 888 * 'state' could be a state of callee. In such case 889 * parent->curframe < state->curframe 890 * and it's ok for r1 - r5 registers 891 * 892 * 'parent' could be a callee's state after it bpf_exit-ed. 893 * In such case parent->curframe > state->curframe 894 * and it's ok for r0 only 895 */ 896 if (parent->curframe == state->curframe || 897 (parent->curframe < state->curframe && 898 regno >= BPF_REG_1 && regno <= BPF_REG_5) || 899 (parent->curframe > state->curframe && 900 regno == BPF_REG_0)) 901 return parent; 902 903 if (parent->curframe > state->curframe && 904 regno >= BPF_REG_6) { 905 /* for callee saved regs we have to skip the whole chain 906 * of states that belong to callee and mark as LIVE_READ 907 * the registers before the call 908 */ 909 tmp = parent; 910 while (tmp && tmp->curframe != state->curframe) { 911 tmp = tmp->parent; 912 } 913 if (!tmp) 914 goto bug; 915 parent = tmp; 916 } else { 917 goto bug; 918 } 919 return parent; 920 bug: 921 verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); 922 verbose(env, "regno %d parent frame %d current frame %d\n", 923 regno, parent->curframe, state->curframe); 924 return NULL; 925 } 926 927 static int mark_reg_read(struct bpf_verifier_env *env, 928 const struct bpf_verifier_state *state, 929 struct bpf_verifier_state *parent, 930 u32 regno) 931 { 932 bool writes = parent == state->parent; /* Observe write marks */ 933 934 if (regno == BPF_REG_FP) 935 /* We don't need to worry about FP liveness because it's read-only */ 936 return 0; 937 938 while (parent) { 939 /* if read wasn't screened by an earlier write ... */ 940 if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN) 941 break; 942 parent = skip_callee(env, state, parent, regno); 943 if (!parent) 944 return -EFAULT; 945 /* ... then we depend on parent's value */ 946 parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ; 947 state = parent; 948 parent = state->parent; 949 writes = true; 950 } 951 return 0; 952 } 953 954 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 955 enum reg_arg_type t) 956 { 957 struct bpf_verifier_state *vstate = env->cur_state; 958 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 959 struct bpf_reg_state *regs = state->regs; 960 961 if (regno >= MAX_BPF_REG) { 962 verbose(env, "R%d is invalid\n", regno); 963 return -EINVAL; 964 } 965 966 if (t == SRC_OP) { 967 /* check whether register used as source operand can be read */ 968 if (regs[regno].type == NOT_INIT) { 969 verbose(env, "R%d !read_ok\n", regno); 970 return -EACCES; 971 } 972 return mark_reg_read(env, vstate, vstate->parent, regno); 973 } else { 974 /* check whether register used as dest operand can be written to */ 975 if (regno == BPF_REG_FP) { 976 verbose(env, "frame pointer is read only\n"); 977 return -EACCES; 978 } 979 regs[regno].live |= REG_LIVE_WRITTEN; 980 if (t == DST_OP) 981 mark_reg_unknown(env, regs, regno); 982 } 983 return 0; 984 } 985 986 static bool is_spillable_regtype(enum bpf_reg_type type) 987 { 988 switch (type) { 989 case PTR_TO_MAP_VALUE: 990 case PTR_TO_MAP_VALUE_OR_NULL: 991 case PTR_TO_STACK: 992 case PTR_TO_CTX: 993 case PTR_TO_PACKET: 994 case PTR_TO_PACKET_META: 995 case PTR_TO_PACKET_END: 996 case CONST_PTR_TO_MAP: 997 return true; 998 default: 999 return false; 1000 } 1001 } 1002 1003 /* Does this register contain a constant zero? */ 1004 static bool register_is_null(struct bpf_reg_state *reg) 1005 { 1006 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 1007 } 1008 1009 /* check_stack_read/write functions track spill/fill of registers, 1010 * stack boundary and alignment are checked in check_mem_access() 1011 */ 1012 static int check_stack_write(struct bpf_verifier_env *env, 1013 struct bpf_func_state *state, /* func where register points to */ 1014 int off, int size, int value_regno, int insn_idx) 1015 { 1016 struct bpf_func_state *cur; /* state of the current function */ 1017 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 1018 enum bpf_reg_type type; 1019 1020 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 1021 true); 1022 if (err) 1023 return err; 1024 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 1025 * so it's aligned access and [off, off + size) are within stack limits 1026 */ 1027 if (!env->allow_ptr_leaks && 1028 state->stack[spi].slot_type[0] == STACK_SPILL && 1029 size != BPF_REG_SIZE) { 1030 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 1031 return -EACCES; 1032 } 1033 1034 cur = env->cur_state->frame[env->cur_state->curframe]; 1035 if (value_regno >= 0 && 1036 is_spillable_regtype((type = cur->regs[value_regno].type))) { 1037 1038 /* register containing pointer is being spilled into stack */ 1039 if (size != BPF_REG_SIZE) { 1040 verbose(env, "invalid size of register spill\n"); 1041 return -EACCES; 1042 } 1043 1044 if (state != cur && type == PTR_TO_STACK) { 1045 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 1046 return -EINVAL; 1047 } 1048 1049 /* save register state */ 1050 state->stack[spi].spilled_ptr = cur->regs[value_regno]; 1051 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1052 1053 for (i = 0; i < BPF_REG_SIZE; i++) { 1054 if (state->stack[spi].slot_type[i] == STACK_MISC && 1055 !env->allow_ptr_leaks) { 1056 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; 1057 int soff = (-spi - 1) * BPF_REG_SIZE; 1058 1059 /* detected reuse of integer stack slot with a pointer 1060 * which means either llvm is reusing stack slot or 1061 * an attacker is trying to exploit CVE-2018-3639 1062 * (speculative store bypass) 1063 * Have to sanitize that slot with preemptive 1064 * store of zero. 1065 */ 1066 if (*poff && *poff != soff) { 1067 /* disallow programs where single insn stores 1068 * into two different stack slots, since verifier 1069 * cannot sanitize them 1070 */ 1071 verbose(env, 1072 "insn %d cannot access two stack slots fp%d and fp%d", 1073 insn_idx, *poff, soff); 1074 return -EINVAL; 1075 } 1076 *poff = soff; 1077 } 1078 state->stack[spi].slot_type[i] = STACK_SPILL; 1079 } 1080 } else { 1081 u8 type = STACK_MISC; 1082 1083 /* regular write of data into stack */ 1084 state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; 1085 1086 /* only mark the slot as written if all 8 bytes were written 1087 * otherwise read propagation may incorrectly stop too soon 1088 * when stack slots are partially written. 1089 * This heuristic means that read propagation will be 1090 * conservative, since it will add reg_live_read marks 1091 * to stack slots all the way to first state when programs 1092 * writes+reads less than 8 bytes 1093 */ 1094 if (size == BPF_REG_SIZE) 1095 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1096 1097 /* when we zero initialize stack slots mark them as such */ 1098 if (value_regno >= 0 && 1099 register_is_null(&cur->regs[value_regno])) 1100 type = STACK_ZERO; 1101 1102 for (i = 0; i < size; i++) 1103 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 1104 type; 1105 } 1106 return 0; 1107 } 1108 1109 /* registers of every function are unique and mark_reg_read() propagates 1110 * the liveness in the following cases: 1111 * - from callee into caller for R1 - R5 that were used as arguments 1112 * - from caller into callee for R0 that used as result of the call 1113 * - from caller to the same caller skipping states of the callee for R6 - R9, 1114 * since R6 - R9 are callee saved by implicit function prologue and 1115 * caller's R6 != callee's R6, so when we propagate liveness up to 1116 * parent states we need to skip callee states for R6 - R9. 1117 * 1118 * stack slot marking is different, since stacks of caller and callee are 1119 * accessible in both (since caller can pass a pointer to caller's stack to 1120 * callee which can pass it to another function), hence mark_stack_slot_read() 1121 * has to propagate the stack liveness to all parent states at given frame number. 1122 * Consider code: 1123 * f1() { 1124 * ptr = fp - 8; 1125 * *ptr = ctx; 1126 * call f2 { 1127 * .. = *ptr; 1128 * } 1129 * .. = *ptr; 1130 * } 1131 * First *ptr is reading from f1's stack and mark_stack_slot_read() has 1132 * to mark liveness at the f1's frame and not f2's frame. 1133 * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has 1134 * to propagate liveness to f2 states at f1's frame level and further into 1135 * f1 states at f1's frame level until write into that stack slot 1136 */ 1137 static void mark_stack_slot_read(struct bpf_verifier_env *env, 1138 const struct bpf_verifier_state *state, 1139 struct bpf_verifier_state *parent, 1140 int slot, int frameno) 1141 { 1142 bool writes = parent == state->parent; /* Observe write marks */ 1143 1144 while (parent) { 1145 if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE) 1146 /* since LIVE_WRITTEN mark is only done for full 8-byte 1147 * write the read marks are conservative and parent 1148 * state may not even have the stack allocated. In such case 1149 * end the propagation, since the loop reached beginning 1150 * of the function 1151 */ 1152 break; 1153 /* if read wasn't screened by an earlier write ... */ 1154 if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) 1155 break; 1156 /* ... then we depend on parent's value */ 1157 parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ; 1158 state = parent; 1159 parent = state->parent; 1160 writes = true; 1161 } 1162 } 1163 1164 static int check_stack_read(struct bpf_verifier_env *env, 1165 struct bpf_func_state *reg_state /* func where register points to */, 1166 int off, int size, int value_regno) 1167 { 1168 struct bpf_verifier_state *vstate = env->cur_state; 1169 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1170 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 1171 u8 *stype; 1172 1173 if (reg_state->allocated_stack <= slot) { 1174 verbose(env, "invalid read from stack off %d+0 size %d\n", 1175 off, size); 1176 return -EACCES; 1177 } 1178 stype = reg_state->stack[spi].slot_type; 1179 1180 if (stype[0] == STACK_SPILL) { 1181 if (size != BPF_REG_SIZE) { 1182 verbose(env, "invalid size of register spill\n"); 1183 return -EACCES; 1184 } 1185 for (i = 1; i < BPF_REG_SIZE; i++) { 1186 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 1187 verbose(env, "corrupted spill memory\n"); 1188 return -EACCES; 1189 } 1190 } 1191 1192 if (value_regno >= 0) { 1193 /* restore register state from stack */ 1194 state->regs[value_regno] = reg_state->stack[spi].spilled_ptr; 1195 /* mark reg as written since spilled pointer state likely 1196 * has its liveness marks cleared by is_state_visited() 1197 * which resets stack/reg liveness for state transitions 1198 */ 1199 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 1200 } 1201 mark_stack_slot_read(env, vstate, vstate->parent, spi, 1202 reg_state->frameno); 1203 return 0; 1204 } else { 1205 int zeros = 0; 1206 1207 for (i = 0; i < size; i++) { 1208 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) 1209 continue; 1210 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { 1211 zeros++; 1212 continue; 1213 } 1214 verbose(env, "invalid read from stack off %d+%d size %d\n", 1215 off, i, size); 1216 return -EACCES; 1217 } 1218 mark_stack_slot_read(env, vstate, vstate->parent, spi, 1219 reg_state->frameno); 1220 if (value_regno >= 0) { 1221 if (zeros == size) { 1222 /* any size read into register is zero extended, 1223 * so the whole register == const_zero 1224 */ 1225 __mark_reg_const_zero(&state->regs[value_regno]); 1226 } else { 1227 /* have read misc data from the stack */ 1228 mark_reg_unknown(env, state->regs, value_regno); 1229 } 1230 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 1231 } 1232 return 0; 1233 } 1234 } 1235 1236 /* check read/write into map element returned by bpf_map_lookup_elem() */ 1237 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 1238 int size, bool zero_size_allowed) 1239 { 1240 struct bpf_reg_state *regs = cur_regs(env); 1241 struct bpf_map *map = regs[regno].map_ptr; 1242 1243 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 1244 off + size > map->value_size) { 1245 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 1246 map->value_size, off, size); 1247 return -EACCES; 1248 } 1249 return 0; 1250 } 1251 1252 /* check read/write into a map element with possible variable offset */ 1253 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 1254 int off, int size, bool zero_size_allowed) 1255 { 1256 struct bpf_verifier_state *vstate = env->cur_state; 1257 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1258 struct bpf_reg_state *reg = &state->regs[regno]; 1259 int err; 1260 1261 /* We may have adjusted the register to this map value, so we 1262 * need to try adding each of min_value and max_value to off 1263 * to make sure our theoretical access will be safe. 1264 */ 1265 if (env->log.level) 1266 print_verifier_state(env, state); 1267 /* The minimum value is only important with signed 1268 * comparisons where we can't assume the floor of a 1269 * value is 0. If we are using signed variables for our 1270 * index'es we need to make sure that whatever we use 1271 * will have a set floor within our range. 1272 */ 1273 if (reg->smin_value < 0) { 1274 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 1275 regno); 1276 return -EACCES; 1277 } 1278 err = __check_map_access(env, regno, reg->smin_value + off, size, 1279 zero_size_allowed); 1280 if (err) { 1281 verbose(env, "R%d min value is outside of the array range\n", 1282 regno); 1283 return err; 1284 } 1285 1286 /* If we haven't set a max value then we need to bail since we can't be 1287 * sure we won't do bad things. 1288 * If reg->umax_value + off could overflow, treat that as unbounded too. 1289 */ 1290 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 1291 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", 1292 regno); 1293 return -EACCES; 1294 } 1295 err = __check_map_access(env, regno, reg->umax_value + off, size, 1296 zero_size_allowed); 1297 if (err) 1298 verbose(env, "R%d max value is outside of the array range\n", 1299 regno); 1300 return err; 1301 } 1302 1303 #define MAX_PACKET_OFF 0xffff 1304 1305 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 1306 const struct bpf_call_arg_meta *meta, 1307 enum bpf_access_type t) 1308 { 1309 switch (env->prog->type) { 1310 case BPF_PROG_TYPE_LWT_IN: 1311 case BPF_PROG_TYPE_LWT_OUT: 1312 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 1313 /* dst_input() and dst_output() can't write for now */ 1314 if (t == BPF_WRITE) 1315 return false; 1316 /* fallthrough */ 1317 case BPF_PROG_TYPE_SCHED_CLS: 1318 case BPF_PROG_TYPE_SCHED_ACT: 1319 case BPF_PROG_TYPE_XDP: 1320 case BPF_PROG_TYPE_LWT_XMIT: 1321 case BPF_PROG_TYPE_SK_SKB: 1322 case BPF_PROG_TYPE_SK_MSG: 1323 if (meta) 1324 return meta->pkt_access; 1325 1326 env->seen_direct_write = true; 1327 return true; 1328 default: 1329 return false; 1330 } 1331 } 1332 1333 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, 1334 int off, int size, bool zero_size_allowed) 1335 { 1336 struct bpf_reg_state *regs = cur_regs(env); 1337 struct bpf_reg_state *reg = ®s[regno]; 1338 1339 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 1340 (u64)off + size > reg->range) { 1341 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 1342 off, size, regno, reg->id, reg->off, reg->range); 1343 return -EACCES; 1344 } 1345 return 0; 1346 } 1347 1348 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 1349 int size, bool zero_size_allowed) 1350 { 1351 struct bpf_reg_state *regs = cur_regs(env); 1352 struct bpf_reg_state *reg = ®s[regno]; 1353 int err; 1354 1355 /* We may have added a variable offset to the packet pointer; but any 1356 * reg->range we have comes after that. We are only checking the fixed 1357 * offset. 1358 */ 1359 1360 /* We don't allow negative numbers, because we aren't tracking enough 1361 * detail to prove they're safe. 1362 */ 1363 if (reg->smin_value < 0) { 1364 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 1365 regno); 1366 return -EACCES; 1367 } 1368 err = __check_packet_access(env, regno, off, size, zero_size_allowed); 1369 if (err) { 1370 verbose(env, "R%d offset is outside of the packet\n", regno); 1371 return err; 1372 } 1373 return err; 1374 } 1375 1376 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 1377 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 1378 enum bpf_access_type t, enum bpf_reg_type *reg_type) 1379 { 1380 struct bpf_insn_access_aux info = { 1381 .reg_type = *reg_type, 1382 }; 1383 1384 if (env->ops->is_valid_access && 1385 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 1386 /* A non zero info.ctx_field_size indicates that this field is a 1387 * candidate for later verifier transformation to load the whole 1388 * field and then apply a mask when accessed with a narrower 1389 * access than actual ctx access size. A zero info.ctx_field_size 1390 * will only allow for whole field access and rejects any other 1391 * type of narrower access. 1392 */ 1393 *reg_type = info.reg_type; 1394 1395 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 1396 /* remember the offset of last byte accessed in ctx */ 1397 if (env->prog->aux->max_ctx_offset < off + size) 1398 env->prog->aux->max_ctx_offset = off + size; 1399 return 0; 1400 } 1401 1402 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 1403 return -EACCES; 1404 } 1405 1406 static bool __is_pointer_value(bool allow_ptr_leaks, 1407 const struct bpf_reg_state *reg) 1408 { 1409 if (allow_ptr_leaks) 1410 return false; 1411 1412 return reg->type != SCALAR_VALUE; 1413 } 1414 1415 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 1416 { 1417 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); 1418 } 1419 1420 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 1421 { 1422 const struct bpf_reg_state *reg = cur_regs(env) + regno; 1423 1424 return reg->type == PTR_TO_CTX; 1425 } 1426 1427 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 1428 { 1429 const struct bpf_reg_state *reg = cur_regs(env) + regno; 1430 1431 return type_is_pkt_pointer(reg->type); 1432 } 1433 1434 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 1435 const struct bpf_reg_state *reg, 1436 int off, int size, bool strict) 1437 { 1438 struct tnum reg_off; 1439 int ip_align; 1440 1441 /* Byte size accesses are always allowed. */ 1442 if (!strict || size == 1) 1443 return 0; 1444 1445 /* For platforms that do not have a Kconfig enabling 1446 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 1447 * NET_IP_ALIGN is universally set to '2'. And on platforms 1448 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 1449 * to this code only in strict mode where we want to emulate 1450 * the NET_IP_ALIGN==2 checking. Therefore use an 1451 * unconditional IP align value of '2'. 1452 */ 1453 ip_align = 2; 1454 1455 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 1456 if (!tnum_is_aligned(reg_off, size)) { 1457 char tn_buf[48]; 1458 1459 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1460 verbose(env, 1461 "misaligned packet access off %d+%s+%d+%d size %d\n", 1462 ip_align, tn_buf, reg->off, off, size); 1463 return -EACCES; 1464 } 1465 1466 return 0; 1467 } 1468 1469 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 1470 const struct bpf_reg_state *reg, 1471 const char *pointer_desc, 1472 int off, int size, bool strict) 1473 { 1474 struct tnum reg_off; 1475 1476 /* Byte size accesses are always allowed. */ 1477 if (!strict || size == 1) 1478 return 0; 1479 1480 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 1481 if (!tnum_is_aligned(reg_off, size)) { 1482 char tn_buf[48]; 1483 1484 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1485 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 1486 pointer_desc, tn_buf, reg->off, off, size); 1487 return -EACCES; 1488 } 1489 1490 return 0; 1491 } 1492 1493 static int check_ptr_alignment(struct bpf_verifier_env *env, 1494 const struct bpf_reg_state *reg, int off, 1495 int size, bool strict_alignment_once) 1496 { 1497 bool strict = env->strict_alignment || strict_alignment_once; 1498 const char *pointer_desc = ""; 1499 1500 switch (reg->type) { 1501 case PTR_TO_PACKET: 1502 case PTR_TO_PACKET_META: 1503 /* Special case, because of NET_IP_ALIGN. Given metadata sits 1504 * right in front, treat it the very same way. 1505 */ 1506 return check_pkt_ptr_alignment(env, reg, off, size, strict); 1507 case PTR_TO_MAP_VALUE: 1508 pointer_desc = "value "; 1509 break; 1510 case PTR_TO_CTX: 1511 pointer_desc = "context "; 1512 break; 1513 case PTR_TO_STACK: 1514 pointer_desc = "stack "; 1515 /* The stack spill tracking logic in check_stack_write() 1516 * and check_stack_read() relies on stack accesses being 1517 * aligned. 1518 */ 1519 strict = true; 1520 break; 1521 default: 1522 break; 1523 } 1524 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 1525 strict); 1526 } 1527 1528 static int update_stack_depth(struct bpf_verifier_env *env, 1529 const struct bpf_func_state *func, 1530 int off) 1531 { 1532 u16 stack = env->subprog_info[func->subprogno].stack_depth; 1533 1534 if (stack >= -off) 1535 return 0; 1536 1537 /* update known max for given subprogram */ 1538 env->subprog_info[func->subprogno].stack_depth = -off; 1539 return 0; 1540 } 1541 1542 /* starting from main bpf function walk all instructions of the function 1543 * and recursively walk all callees that given function can call. 1544 * Ignore jump and exit insns. 1545 * Since recursion is prevented by check_cfg() this algorithm 1546 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 1547 */ 1548 static int check_max_stack_depth(struct bpf_verifier_env *env) 1549 { 1550 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 1551 struct bpf_subprog_info *subprog = env->subprog_info; 1552 struct bpf_insn *insn = env->prog->insnsi; 1553 int ret_insn[MAX_CALL_FRAMES]; 1554 int ret_prog[MAX_CALL_FRAMES]; 1555 1556 process_func: 1557 /* round up to 32-bytes, since this is granularity 1558 * of interpreter stack size 1559 */ 1560 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 1561 if (depth > MAX_BPF_STACK) { 1562 verbose(env, "combined stack size of %d calls is %d. Too large\n", 1563 frame + 1, depth); 1564 return -EACCES; 1565 } 1566 continue_func: 1567 subprog_end = subprog[idx + 1].start; 1568 for (; i < subprog_end; i++) { 1569 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1570 continue; 1571 if (insn[i].src_reg != BPF_PSEUDO_CALL) 1572 continue; 1573 /* remember insn and function to return to */ 1574 ret_insn[frame] = i + 1; 1575 ret_prog[frame] = idx; 1576 1577 /* find the callee */ 1578 i = i + insn[i].imm + 1; 1579 idx = find_subprog(env, i); 1580 if (idx < 0) { 1581 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 1582 i); 1583 return -EFAULT; 1584 } 1585 frame++; 1586 if (frame >= MAX_CALL_FRAMES) { 1587 WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); 1588 return -EFAULT; 1589 } 1590 goto process_func; 1591 } 1592 /* end of for() loop means the last insn of the 'subprog' 1593 * was reached. Doesn't matter whether it was JA or EXIT 1594 */ 1595 if (frame == 0) 1596 return 0; 1597 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 1598 frame--; 1599 i = ret_insn[frame]; 1600 idx = ret_prog[frame]; 1601 goto continue_func; 1602 } 1603 1604 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1605 static int get_callee_stack_depth(struct bpf_verifier_env *env, 1606 const struct bpf_insn *insn, int idx) 1607 { 1608 int start = idx + insn->imm + 1, subprog; 1609 1610 subprog = find_subprog(env, start); 1611 if (subprog < 0) { 1612 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 1613 start); 1614 return -EFAULT; 1615 } 1616 return env->subprog_info[subprog].stack_depth; 1617 } 1618 #endif 1619 1620 /* truncate register to smaller size (in bytes) 1621 * must be called with size < BPF_REG_SIZE 1622 */ 1623 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 1624 { 1625 u64 mask; 1626 1627 /* clear high bits in bit representation */ 1628 reg->var_off = tnum_cast(reg->var_off, size); 1629 1630 /* fix arithmetic bounds */ 1631 mask = ((u64)1 << (size * 8)) - 1; 1632 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 1633 reg->umin_value &= mask; 1634 reg->umax_value &= mask; 1635 } else { 1636 reg->umin_value = 0; 1637 reg->umax_value = mask; 1638 } 1639 reg->smin_value = reg->umin_value; 1640 reg->smax_value = reg->umax_value; 1641 } 1642 1643 /* check whether memory at (regno + off) is accessible for t = (read | write) 1644 * if t==write, value_regno is a register which value is stored into memory 1645 * if t==read, value_regno is a register which will receive the value from memory 1646 * if t==write && value_regno==-1, some unknown value is stored into memory 1647 * if t==read && value_regno==-1, don't care what we read from memory 1648 */ 1649 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 1650 int off, int bpf_size, enum bpf_access_type t, 1651 int value_regno, bool strict_alignment_once) 1652 { 1653 struct bpf_reg_state *regs = cur_regs(env); 1654 struct bpf_reg_state *reg = regs + regno; 1655 struct bpf_func_state *state; 1656 int size, err = 0; 1657 1658 size = bpf_size_to_bytes(bpf_size); 1659 if (size < 0) 1660 return size; 1661 1662 /* alignment checks will add in reg->off themselves */ 1663 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 1664 if (err) 1665 return err; 1666 1667 /* for access checks, reg->off is just part of off */ 1668 off += reg->off; 1669 1670 if (reg->type == PTR_TO_MAP_VALUE) { 1671 if (t == BPF_WRITE && value_regno >= 0 && 1672 is_pointer_value(env, value_regno)) { 1673 verbose(env, "R%d leaks addr into map\n", value_regno); 1674 return -EACCES; 1675 } 1676 1677 err = check_map_access(env, regno, off, size, false); 1678 if (!err && t == BPF_READ && value_regno >= 0) 1679 mark_reg_unknown(env, regs, value_regno); 1680 1681 } else if (reg->type == PTR_TO_CTX) { 1682 enum bpf_reg_type reg_type = SCALAR_VALUE; 1683 1684 if (t == BPF_WRITE && value_regno >= 0 && 1685 is_pointer_value(env, value_regno)) { 1686 verbose(env, "R%d leaks addr into ctx\n", value_regno); 1687 return -EACCES; 1688 } 1689 /* ctx accesses must be at a fixed offset, so that we can 1690 * determine what type of data were returned. 1691 */ 1692 if (reg->off) { 1693 verbose(env, 1694 "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", 1695 regno, reg->off, off - reg->off); 1696 return -EACCES; 1697 } 1698 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 1699 char tn_buf[48]; 1700 1701 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1702 verbose(env, 1703 "variable ctx access var_off=%s off=%d size=%d", 1704 tn_buf, off, size); 1705 return -EACCES; 1706 } 1707 err = check_ctx_access(env, insn_idx, off, size, t, ®_type); 1708 if (!err && t == BPF_READ && value_regno >= 0) { 1709 /* ctx access returns either a scalar, or a 1710 * PTR_TO_PACKET[_META,_END]. In the latter 1711 * case, we know the offset is zero. 1712 */ 1713 if (reg_type == SCALAR_VALUE) 1714 mark_reg_unknown(env, regs, value_regno); 1715 else 1716 mark_reg_known_zero(env, regs, 1717 value_regno); 1718 regs[value_regno].id = 0; 1719 regs[value_regno].off = 0; 1720 regs[value_regno].range = 0; 1721 regs[value_regno].type = reg_type; 1722 } 1723 1724 } else if (reg->type == PTR_TO_STACK) { 1725 /* stack accesses must be at a fixed offset, so that we can 1726 * determine what type of data were returned. 1727 * See check_stack_read(). 1728 */ 1729 if (!tnum_is_const(reg->var_off)) { 1730 char tn_buf[48]; 1731 1732 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1733 verbose(env, "variable stack access var_off=%s off=%d size=%d", 1734 tn_buf, off, size); 1735 return -EACCES; 1736 } 1737 off += reg->var_off.value; 1738 if (off >= 0 || off < -MAX_BPF_STACK) { 1739 verbose(env, "invalid stack off=%d size=%d\n", off, 1740 size); 1741 return -EACCES; 1742 } 1743 1744 state = func(env, reg); 1745 err = update_stack_depth(env, state, off); 1746 if (err) 1747 return err; 1748 1749 if (t == BPF_WRITE) 1750 err = check_stack_write(env, state, off, size, 1751 value_regno, insn_idx); 1752 else 1753 err = check_stack_read(env, state, off, size, 1754 value_regno); 1755 } else if (reg_is_pkt_pointer(reg)) { 1756 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 1757 verbose(env, "cannot write into packet\n"); 1758 return -EACCES; 1759 } 1760 if (t == BPF_WRITE && value_regno >= 0 && 1761 is_pointer_value(env, value_regno)) { 1762 verbose(env, "R%d leaks addr into packet\n", 1763 value_regno); 1764 return -EACCES; 1765 } 1766 err = check_packet_access(env, regno, off, size, false); 1767 if (!err && t == BPF_READ && value_regno >= 0) 1768 mark_reg_unknown(env, regs, value_regno); 1769 } else { 1770 verbose(env, "R%d invalid mem access '%s'\n", regno, 1771 reg_type_str[reg->type]); 1772 return -EACCES; 1773 } 1774 1775 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1776 regs[value_regno].type == SCALAR_VALUE) { 1777 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1778 coerce_reg_to_size(®s[value_regno], size); 1779 } 1780 return err; 1781 } 1782 1783 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 1784 { 1785 int err; 1786 1787 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 1788 insn->imm != 0) { 1789 verbose(env, "BPF_XADD uses reserved fields\n"); 1790 return -EINVAL; 1791 } 1792 1793 /* check src1 operand */ 1794 err = check_reg_arg(env, insn->src_reg, SRC_OP); 1795 if (err) 1796 return err; 1797 1798 /* check src2 operand */ 1799 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 1800 if (err) 1801 return err; 1802 1803 if (is_pointer_value(env, insn->src_reg)) { 1804 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 1805 return -EACCES; 1806 } 1807 1808 if (is_ctx_reg(env, insn->dst_reg) || 1809 is_pkt_reg(env, insn->dst_reg)) { 1810 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", 1811 insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ? 1812 "context" : "packet"); 1813 return -EACCES; 1814 } 1815 1816 /* check whether atomic_add can read the memory */ 1817 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1818 BPF_SIZE(insn->code), BPF_READ, -1, true); 1819 if (err) 1820 return err; 1821 1822 /* check whether atomic_add can write into the same memory */ 1823 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1824 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 1825 } 1826 1827 /* when register 'regno' is passed into function that will read 'access_size' 1828 * bytes from that pointer, make sure that it's within stack boundary 1829 * and all elements of stack are initialized. 1830 * Unlike most pointer bounds-checking functions, this one doesn't take an 1831 * 'off' argument, so it has to add in reg->off itself. 1832 */ 1833 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 1834 int access_size, bool zero_size_allowed, 1835 struct bpf_call_arg_meta *meta) 1836 { 1837 struct bpf_reg_state *reg = cur_regs(env) + regno; 1838 struct bpf_func_state *state = func(env, reg); 1839 int off, i, slot, spi; 1840 1841 if (reg->type != PTR_TO_STACK) { 1842 /* Allow zero-byte read from NULL, regardless of pointer type */ 1843 if (zero_size_allowed && access_size == 0 && 1844 register_is_null(reg)) 1845 return 0; 1846 1847 verbose(env, "R%d type=%s expected=%s\n", regno, 1848 reg_type_str[reg->type], 1849 reg_type_str[PTR_TO_STACK]); 1850 return -EACCES; 1851 } 1852 1853 /* Only allow fixed-offset stack reads */ 1854 if (!tnum_is_const(reg->var_off)) { 1855 char tn_buf[48]; 1856 1857 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1858 verbose(env, "invalid variable stack read R%d var_off=%s\n", 1859 regno, tn_buf); 1860 return -EACCES; 1861 } 1862 off = reg->off + reg->var_off.value; 1863 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1864 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 1865 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 1866 regno, off, access_size); 1867 return -EACCES; 1868 } 1869 1870 if (meta && meta->raw_mode) { 1871 meta->access_size = access_size; 1872 meta->regno = regno; 1873 return 0; 1874 } 1875 1876 for (i = 0; i < access_size; i++) { 1877 u8 *stype; 1878 1879 slot = -(off + i) - 1; 1880 spi = slot / BPF_REG_SIZE; 1881 if (state->allocated_stack <= slot) 1882 goto err; 1883 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 1884 if (*stype == STACK_MISC) 1885 goto mark; 1886 if (*stype == STACK_ZERO) { 1887 /* helper can write anything into the stack */ 1888 *stype = STACK_MISC; 1889 goto mark; 1890 } 1891 err: 1892 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 1893 off, i, access_size); 1894 return -EACCES; 1895 mark: 1896 /* reading any byte out of 8-byte 'spill_slot' will cause 1897 * the whole slot to be marked as 'read' 1898 */ 1899 mark_stack_slot_read(env, env->cur_state, env->cur_state->parent, 1900 spi, state->frameno); 1901 } 1902 return update_stack_depth(env, state, off); 1903 } 1904 1905 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 1906 int access_size, bool zero_size_allowed, 1907 struct bpf_call_arg_meta *meta) 1908 { 1909 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 1910 1911 switch (reg->type) { 1912 case PTR_TO_PACKET: 1913 case PTR_TO_PACKET_META: 1914 return check_packet_access(env, regno, reg->off, access_size, 1915 zero_size_allowed); 1916 case PTR_TO_MAP_VALUE: 1917 return check_map_access(env, regno, reg->off, access_size, 1918 zero_size_allowed); 1919 default: /* scalar_value|ptr_to_stack or invalid ptr */ 1920 return check_stack_boundary(env, regno, access_size, 1921 zero_size_allowed, meta); 1922 } 1923 } 1924 1925 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 1926 { 1927 return type == ARG_PTR_TO_MEM || 1928 type == ARG_PTR_TO_MEM_OR_NULL || 1929 type == ARG_PTR_TO_UNINIT_MEM; 1930 } 1931 1932 static bool arg_type_is_mem_size(enum bpf_arg_type type) 1933 { 1934 return type == ARG_CONST_SIZE || 1935 type == ARG_CONST_SIZE_OR_ZERO; 1936 } 1937 1938 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 1939 enum bpf_arg_type arg_type, 1940 struct bpf_call_arg_meta *meta) 1941 { 1942 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 1943 enum bpf_reg_type expected_type, type = reg->type; 1944 int err = 0; 1945 1946 if (arg_type == ARG_DONTCARE) 1947 return 0; 1948 1949 err = check_reg_arg(env, regno, SRC_OP); 1950 if (err) 1951 return err; 1952 1953 if (arg_type == ARG_ANYTHING) { 1954 if (is_pointer_value(env, regno)) { 1955 verbose(env, "R%d leaks addr into helper function\n", 1956 regno); 1957 return -EACCES; 1958 } 1959 return 0; 1960 } 1961 1962 if (type_is_pkt_pointer(type) && 1963 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 1964 verbose(env, "helper access to the packet is not allowed\n"); 1965 return -EACCES; 1966 } 1967 1968 if (arg_type == ARG_PTR_TO_MAP_KEY || 1969 arg_type == ARG_PTR_TO_MAP_VALUE) { 1970 expected_type = PTR_TO_STACK; 1971 if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && 1972 type != expected_type) 1973 goto err_type; 1974 } else if (arg_type == ARG_CONST_SIZE || 1975 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1976 expected_type = SCALAR_VALUE; 1977 if (type != expected_type) 1978 goto err_type; 1979 } else if (arg_type == ARG_CONST_MAP_PTR) { 1980 expected_type = CONST_PTR_TO_MAP; 1981 if (type != expected_type) 1982 goto err_type; 1983 } else if (arg_type == ARG_PTR_TO_CTX) { 1984 expected_type = PTR_TO_CTX; 1985 if (type != expected_type) 1986 goto err_type; 1987 } else if (arg_type_is_mem_ptr(arg_type)) { 1988 expected_type = PTR_TO_STACK; 1989 /* One exception here. In case function allows for NULL to be 1990 * passed in as argument, it's a SCALAR_VALUE type. Final test 1991 * happens during stack boundary checking. 1992 */ 1993 if (register_is_null(reg) && 1994 arg_type == ARG_PTR_TO_MEM_OR_NULL) 1995 /* final test in check_stack_boundary() */; 1996 else if (!type_is_pkt_pointer(type) && 1997 type != PTR_TO_MAP_VALUE && 1998 type != expected_type) 1999 goto err_type; 2000 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 2001 } else { 2002 verbose(env, "unsupported arg_type %d\n", arg_type); 2003 return -EFAULT; 2004 } 2005 2006 if (arg_type == ARG_CONST_MAP_PTR) { 2007 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 2008 meta->map_ptr = reg->map_ptr; 2009 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 2010 /* bpf_map_xxx(..., map_ptr, ..., key) call: 2011 * check that [key, key + map->key_size) are within 2012 * stack limits and initialized 2013 */ 2014 if (!meta->map_ptr) { 2015 /* in function declaration map_ptr must come before 2016 * map_key, so that it's verified and known before 2017 * we have to check map_key here. Otherwise it means 2018 * that kernel subsystem misconfigured verifier 2019 */ 2020 verbose(env, "invalid map_ptr to access map->key\n"); 2021 return -EACCES; 2022 } 2023 err = check_helper_mem_access(env, regno, 2024 meta->map_ptr->key_size, false, 2025 NULL); 2026 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 2027 /* bpf_map_xxx(..., map_ptr, ..., value) call: 2028 * check [value, value + map->value_size) validity 2029 */ 2030 if (!meta->map_ptr) { 2031 /* kernel subsystem misconfigured verifier */ 2032 verbose(env, "invalid map_ptr to access map->value\n"); 2033 return -EACCES; 2034 } 2035 err = check_helper_mem_access(env, regno, 2036 meta->map_ptr->value_size, false, 2037 NULL); 2038 } else if (arg_type_is_mem_size(arg_type)) { 2039 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 2040 2041 /* remember the mem_size which may be used later 2042 * to refine return values. 2043 */ 2044 meta->msize_smax_value = reg->smax_value; 2045 meta->msize_umax_value = reg->umax_value; 2046 2047 /* The register is SCALAR_VALUE; the access check 2048 * happens using its boundaries. 2049 */ 2050 if (!tnum_is_const(reg->var_off)) 2051 /* For unprivileged variable accesses, disable raw 2052 * mode so that the program is required to 2053 * initialize all the memory that the helper could 2054 * just partially fill up. 2055 */ 2056 meta = NULL; 2057 2058 if (reg->smin_value < 0) { 2059 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 2060 regno); 2061 return -EACCES; 2062 } 2063 2064 if (reg->umin_value == 0) { 2065 err = check_helper_mem_access(env, regno - 1, 0, 2066 zero_size_allowed, 2067 meta); 2068 if (err) 2069 return err; 2070 } 2071 2072 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 2073 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 2074 regno); 2075 return -EACCES; 2076 } 2077 err = check_helper_mem_access(env, regno - 1, 2078 reg->umax_value, 2079 zero_size_allowed, meta); 2080 } 2081 2082 return err; 2083 err_type: 2084 verbose(env, "R%d type=%s expected=%s\n", regno, 2085 reg_type_str[type], reg_type_str[expected_type]); 2086 return -EACCES; 2087 } 2088 2089 static int check_map_func_compatibility(struct bpf_verifier_env *env, 2090 struct bpf_map *map, int func_id) 2091 { 2092 if (!map) 2093 return 0; 2094 2095 /* We need a two way check, first is from map perspective ... */ 2096 switch (map->map_type) { 2097 case BPF_MAP_TYPE_PROG_ARRAY: 2098 if (func_id != BPF_FUNC_tail_call) 2099 goto error; 2100 break; 2101 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 2102 if (func_id != BPF_FUNC_perf_event_read && 2103 func_id != BPF_FUNC_perf_event_output && 2104 func_id != BPF_FUNC_perf_event_read_value) 2105 goto error; 2106 break; 2107 case BPF_MAP_TYPE_STACK_TRACE: 2108 if (func_id != BPF_FUNC_get_stackid) 2109 goto error; 2110 break; 2111 case BPF_MAP_TYPE_CGROUP_ARRAY: 2112 if (func_id != BPF_FUNC_skb_under_cgroup && 2113 func_id != BPF_FUNC_current_task_under_cgroup) 2114 goto error; 2115 break; 2116 /* devmap returns a pointer to a live net_device ifindex that we cannot 2117 * allow to be modified from bpf side. So do not allow lookup elements 2118 * for now. 2119 */ 2120 case BPF_MAP_TYPE_DEVMAP: 2121 if (func_id != BPF_FUNC_redirect_map) 2122 goto error; 2123 break; 2124 /* Restrict bpf side of cpumap and xskmap, open when use-cases 2125 * appear. 2126 */ 2127 case BPF_MAP_TYPE_CPUMAP: 2128 case BPF_MAP_TYPE_XSKMAP: 2129 if (func_id != BPF_FUNC_redirect_map) 2130 goto error; 2131 break; 2132 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 2133 case BPF_MAP_TYPE_HASH_OF_MAPS: 2134 if (func_id != BPF_FUNC_map_lookup_elem) 2135 goto error; 2136 break; 2137 case BPF_MAP_TYPE_SOCKMAP: 2138 if (func_id != BPF_FUNC_sk_redirect_map && 2139 func_id != BPF_FUNC_sock_map_update && 2140 func_id != BPF_FUNC_map_delete_elem && 2141 func_id != BPF_FUNC_msg_redirect_map) 2142 goto error; 2143 break; 2144 case BPF_MAP_TYPE_SOCKHASH: 2145 if (func_id != BPF_FUNC_sk_redirect_hash && 2146 func_id != BPF_FUNC_sock_hash_update && 2147 func_id != BPF_FUNC_map_delete_elem && 2148 func_id != BPF_FUNC_msg_redirect_hash) 2149 goto error; 2150 break; 2151 default: 2152 break; 2153 } 2154 2155 /* ... and second from the function itself. */ 2156 switch (func_id) { 2157 case BPF_FUNC_tail_call: 2158 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 2159 goto error; 2160 if (env->subprog_cnt > 1) { 2161 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); 2162 return -EINVAL; 2163 } 2164 break; 2165 case BPF_FUNC_perf_event_read: 2166 case BPF_FUNC_perf_event_output: 2167 case BPF_FUNC_perf_event_read_value: 2168 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 2169 goto error; 2170 break; 2171 case BPF_FUNC_get_stackid: 2172 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 2173 goto error; 2174 break; 2175 case BPF_FUNC_current_task_under_cgroup: 2176 case BPF_FUNC_skb_under_cgroup: 2177 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 2178 goto error; 2179 break; 2180 case BPF_FUNC_redirect_map: 2181 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 2182 map->map_type != BPF_MAP_TYPE_CPUMAP && 2183 map->map_type != BPF_MAP_TYPE_XSKMAP) 2184 goto error; 2185 break; 2186 case BPF_FUNC_sk_redirect_map: 2187 case BPF_FUNC_msg_redirect_map: 2188 case BPF_FUNC_sock_map_update: 2189 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 2190 goto error; 2191 break; 2192 case BPF_FUNC_sk_redirect_hash: 2193 case BPF_FUNC_msg_redirect_hash: 2194 case BPF_FUNC_sock_hash_update: 2195 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 2196 goto error; 2197 break; 2198 default: 2199 break; 2200 } 2201 2202 return 0; 2203 error: 2204 verbose(env, "cannot pass map_type %d into func %s#%d\n", 2205 map->map_type, func_id_name(func_id), func_id); 2206 return -EINVAL; 2207 } 2208 2209 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 2210 { 2211 int count = 0; 2212 2213 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 2214 count++; 2215 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 2216 count++; 2217 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 2218 count++; 2219 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 2220 count++; 2221 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 2222 count++; 2223 2224 /* We only support one arg being in raw mode at the moment, 2225 * which is sufficient for the helper functions we have 2226 * right now. 2227 */ 2228 return count <= 1; 2229 } 2230 2231 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 2232 enum bpf_arg_type arg_next) 2233 { 2234 return (arg_type_is_mem_ptr(arg_curr) && 2235 !arg_type_is_mem_size(arg_next)) || 2236 (!arg_type_is_mem_ptr(arg_curr) && 2237 arg_type_is_mem_size(arg_next)); 2238 } 2239 2240 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 2241 { 2242 /* bpf_xxx(..., buf, len) call will access 'len' 2243 * bytes from memory 'buf'. Both arg types need 2244 * to be paired, so make sure there's no buggy 2245 * helper function specification. 2246 */ 2247 if (arg_type_is_mem_size(fn->arg1_type) || 2248 arg_type_is_mem_ptr(fn->arg5_type) || 2249 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 2250 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 2251 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 2252 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 2253 return false; 2254 2255 return true; 2256 } 2257 2258 static int check_func_proto(const struct bpf_func_proto *fn) 2259 { 2260 return check_raw_mode_ok(fn) && 2261 check_arg_pair_ok(fn) ? 0 : -EINVAL; 2262 } 2263 2264 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 2265 * are now invalid, so turn them into unknown SCALAR_VALUE. 2266 */ 2267 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 2268 struct bpf_func_state *state) 2269 { 2270 struct bpf_reg_state *regs = state->regs, *reg; 2271 int i; 2272 2273 for (i = 0; i < MAX_BPF_REG; i++) 2274 if (reg_is_pkt_pointer_any(®s[i])) 2275 mark_reg_unknown(env, regs, i); 2276 2277 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 2278 if (state->stack[i].slot_type[0] != STACK_SPILL) 2279 continue; 2280 reg = &state->stack[i].spilled_ptr; 2281 if (reg_is_pkt_pointer_any(reg)) 2282 __mark_reg_unknown(reg); 2283 } 2284 } 2285 2286 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 2287 { 2288 struct bpf_verifier_state *vstate = env->cur_state; 2289 int i; 2290 2291 for (i = 0; i <= vstate->curframe; i++) 2292 __clear_all_pkt_pointers(env, vstate->frame[i]); 2293 } 2294 2295 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 2296 int *insn_idx) 2297 { 2298 struct bpf_verifier_state *state = env->cur_state; 2299 struct bpf_func_state *caller, *callee; 2300 int i, subprog, target_insn; 2301 2302 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 2303 verbose(env, "the call stack of %d frames is too deep\n", 2304 state->curframe + 2); 2305 return -E2BIG; 2306 } 2307 2308 target_insn = *insn_idx + insn->imm; 2309 subprog = find_subprog(env, target_insn + 1); 2310 if (subprog < 0) { 2311 verbose(env, "verifier bug. No program starts at insn %d\n", 2312 target_insn + 1); 2313 return -EFAULT; 2314 } 2315 2316 caller = state->frame[state->curframe]; 2317 if (state->frame[state->curframe + 1]) { 2318 verbose(env, "verifier bug. Frame %d already allocated\n", 2319 state->curframe + 1); 2320 return -EFAULT; 2321 } 2322 2323 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 2324 if (!callee) 2325 return -ENOMEM; 2326 state->frame[state->curframe + 1] = callee; 2327 2328 /* callee cannot access r0, r6 - r9 for reading and has to write 2329 * into its own stack before reading from it. 2330 * callee can read/write into caller's stack 2331 */ 2332 init_func_state(env, callee, 2333 /* remember the callsite, it will be used by bpf_exit */ 2334 *insn_idx /* callsite */, 2335 state->curframe + 1 /* frameno within this callchain */, 2336 subprog /* subprog number within this prog */); 2337 2338 /* copy r1 - r5 args that callee can access */ 2339 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 2340 callee->regs[i] = caller->regs[i]; 2341 2342 /* after the call regsiters r0 - r5 were scratched */ 2343 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2344 mark_reg_not_init(env, caller->regs, caller_saved[i]); 2345 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 2346 } 2347 2348 /* only increment it after check_reg_arg() finished */ 2349 state->curframe++; 2350 2351 /* and go analyze first insn of the callee */ 2352 *insn_idx = target_insn; 2353 2354 if (env->log.level) { 2355 verbose(env, "caller:\n"); 2356 print_verifier_state(env, caller); 2357 verbose(env, "callee:\n"); 2358 print_verifier_state(env, callee); 2359 } 2360 return 0; 2361 } 2362 2363 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 2364 { 2365 struct bpf_verifier_state *state = env->cur_state; 2366 struct bpf_func_state *caller, *callee; 2367 struct bpf_reg_state *r0; 2368 2369 callee = state->frame[state->curframe]; 2370 r0 = &callee->regs[BPF_REG_0]; 2371 if (r0->type == PTR_TO_STACK) { 2372 /* technically it's ok to return caller's stack pointer 2373 * (or caller's caller's pointer) back to the caller, 2374 * since these pointers are valid. Only current stack 2375 * pointer will be invalid as soon as function exits, 2376 * but let's be conservative 2377 */ 2378 verbose(env, "cannot return stack pointer to the caller\n"); 2379 return -EINVAL; 2380 } 2381 2382 state->curframe--; 2383 caller = state->frame[state->curframe]; 2384 /* return to the caller whatever r0 had in the callee */ 2385 caller->regs[BPF_REG_0] = *r0; 2386 2387 *insn_idx = callee->callsite + 1; 2388 if (env->log.level) { 2389 verbose(env, "returning from callee:\n"); 2390 print_verifier_state(env, callee); 2391 verbose(env, "to caller at %d:\n", *insn_idx); 2392 print_verifier_state(env, caller); 2393 } 2394 /* clear everything in the callee */ 2395 free_func_state(callee); 2396 state->frame[state->curframe + 1] = NULL; 2397 return 0; 2398 } 2399 2400 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 2401 int func_id, 2402 struct bpf_call_arg_meta *meta) 2403 { 2404 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 2405 2406 if (ret_type != RET_INTEGER || 2407 (func_id != BPF_FUNC_get_stack && 2408 func_id != BPF_FUNC_probe_read_str)) 2409 return; 2410 2411 ret_reg->smax_value = meta->msize_smax_value; 2412 ret_reg->umax_value = meta->msize_umax_value; 2413 __reg_deduce_bounds(ret_reg); 2414 __reg_bound_offset(ret_reg); 2415 } 2416 2417 static int 2418 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 2419 int func_id, int insn_idx) 2420 { 2421 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 2422 2423 if (func_id != BPF_FUNC_tail_call && 2424 func_id != BPF_FUNC_map_lookup_elem) 2425 return 0; 2426 if (meta->map_ptr == NULL) { 2427 verbose(env, "kernel subsystem misconfigured verifier\n"); 2428 return -EINVAL; 2429 } 2430 2431 if (!BPF_MAP_PTR(aux->map_state)) 2432 bpf_map_ptr_store(aux, meta->map_ptr, 2433 meta->map_ptr->unpriv_array); 2434 else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) 2435 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 2436 meta->map_ptr->unpriv_array); 2437 return 0; 2438 } 2439 2440 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 2441 { 2442 const struct bpf_func_proto *fn = NULL; 2443 struct bpf_reg_state *regs; 2444 struct bpf_call_arg_meta meta; 2445 bool changes_data; 2446 int i, err; 2447 2448 /* find function prototype */ 2449 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 2450 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 2451 func_id); 2452 return -EINVAL; 2453 } 2454 2455 if (env->ops->get_func_proto) 2456 fn = env->ops->get_func_proto(func_id, env->prog); 2457 if (!fn) { 2458 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 2459 func_id); 2460 return -EINVAL; 2461 } 2462 2463 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2464 if (!env->prog->gpl_compatible && fn->gpl_only) { 2465 verbose(env, "cannot call GPL only function from proprietary program\n"); 2466 return -EINVAL; 2467 } 2468 2469 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 2470 changes_data = bpf_helper_changes_pkt_data(fn->func); 2471 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 2472 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 2473 func_id_name(func_id), func_id); 2474 return -EINVAL; 2475 } 2476 2477 memset(&meta, 0, sizeof(meta)); 2478 meta.pkt_access = fn->pkt_access; 2479 2480 err = check_func_proto(fn); 2481 if (err) { 2482 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 2483 func_id_name(func_id), func_id); 2484 return err; 2485 } 2486 2487 /* check args */ 2488 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 2489 if (err) 2490 return err; 2491 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 2492 if (err) 2493 return err; 2494 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 2495 if (err) 2496 return err; 2497 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 2498 if (err) 2499 return err; 2500 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 2501 if (err) 2502 return err; 2503 2504 err = record_func_map(env, &meta, func_id, insn_idx); 2505 if (err) 2506 return err; 2507 2508 /* Mark slots with STACK_MISC in case of raw mode, stack offset 2509 * is inferred from register state. 2510 */ 2511 for (i = 0; i < meta.access_size; i++) { 2512 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 2513 BPF_WRITE, -1, false); 2514 if (err) 2515 return err; 2516 } 2517 2518 regs = cur_regs(env); 2519 /* reset caller saved regs */ 2520 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2521 mark_reg_not_init(env, regs, caller_saved[i]); 2522 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 2523 } 2524 2525 /* update return register (already marked as written above) */ 2526 if (fn->ret_type == RET_INTEGER) { 2527 /* sets type to SCALAR_VALUE */ 2528 mark_reg_unknown(env, regs, BPF_REG_0); 2529 } else if (fn->ret_type == RET_VOID) { 2530 regs[BPF_REG_0].type = NOT_INIT; 2531 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 2532 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 2533 /* There is no offset yet applied, variable or fixed */ 2534 mark_reg_known_zero(env, regs, BPF_REG_0); 2535 regs[BPF_REG_0].off = 0; 2536 /* remember map_ptr, so that check_map_access() 2537 * can check 'value_size' boundary of memory access 2538 * to map element returned from bpf_map_lookup_elem() 2539 */ 2540 if (meta.map_ptr == NULL) { 2541 verbose(env, 2542 "kernel subsystem misconfigured verifier\n"); 2543 return -EINVAL; 2544 } 2545 regs[BPF_REG_0].map_ptr = meta.map_ptr; 2546 regs[BPF_REG_0].id = ++env->id_gen; 2547 } else { 2548 verbose(env, "unknown return type %d of func %s#%d\n", 2549 fn->ret_type, func_id_name(func_id), func_id); 2550 return -EINVAL; 2551 } 2552 2553 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 2554 2555 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 2556 if (err) 2557 return err; 2558 2559 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { 2560 const char *err_str; 2561 2562 #ifdef CONFIG_PERF_EVENTS 2563 err = get_callchain_buffers(sysctl_perf_event_max_stack); 2564 err_str = "cannot get callchain buffer for func %s#%d\n"; 2565 #else 2566 err = -ENOTSUPP; 2567 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 2568 #endif 2569 if (err) { 2570 verbose(env, err_str, func_id_name(func_id), func_id); 2571 return err; 2572 } 2573 2574 env->prog->has_callchain_buf = true; 2575 } 2576 2577 if (changes_data) 2578 clear_all_pkt_pointers(env); 2579 return 0; 2580 } 2581 2582 static bool signed_add_overflows(s64 a, s64 b) 2583 { 2584 /* Do the add in u64, where overflow is well-defined */ 2585 s64 res = (s64)((u64)a + (u64)b); 2586 2587 if (b < 0) 2588 return res > a; 2589 return res < a; 2590 } 2591 2592 static bool signed_sub_overflows(s64 a, s64 b) 2593 { 2594 /* Do the sub in u64, where overflow is well-defined */ 2595 s64 res = (s64)((u64)a - (u64)b); 2596 2597 if (b < 0) 2598 return res < a; 2599 return res > a; 2600 } 2601 2602 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 2603 const struct bpf_reg_state *reg, 2604 enum bpf_reg_type type) 2605 { 2606 bool known = tnum_is_const(reg->var_off); 2607 s64 val = reg->var_off.value; 2608 s64 smin = reg->smin_value; 2609 2610 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 2611 verbose(env, "math between %s pointer and %lld is not allowed\n", 2612 reg_type_str[type], val); 2613 return false; 2614 } 2615 2616 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 2617 verbose(env, "%s pointer offset %d is not allowed\n", 2618 reg_type_str[type], reg->off); 2619 return false; 2620 } 2621 2622 if (smin == S64_MIN) { 2623 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 2624 reg_type_str[type]); 2625 return false; 2626 } 2627 2628 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 2629 verbose(env, "value %lld makes %s pointer be out of bounds\n", 2630 smin, reg_type_str[type]); 2631 return false; 2632 } 2633 2634 return true; 2635 } 2636 2637 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 2638 * Caller should also handle BPF_MOV case separately. 2639 * If we return -EACCES, caller may want to try again treating pointer as a 2640 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 2641 */ 2642 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 2643 struct bpf_insn *insn, 2644 const struct bpf_reg_state *ptr_reg, 2645 const struct bpf_reg_state *off_reg) 2646 { 2647 struct bpf_verifier_state *vstate = env->cur_state; 2648 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2649 struct bpf_reg_state *regs = state->regs, *dst_reg; 2650 bool known = tnum_is_const(off_reg->var_off); 2651 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 2652 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 2653 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 2654 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 2655 u8 opcode = BPF_OP(insn->code); 2656 u32 dst = insn->dst_reg; 2657 2658 dst_reg = ®s[dst]; 2659 2660 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 2661 smin_val > smax_val || umin_val > umax_val) { 2662 /* Taint dst register if offset had invalid bounds derived from 2663 * e.g. dead branches. 2664 */ 2665 __mark_reg_unknown(dst_reg); 2666 return 0; 2667 } 2668 2669 if (BPF_CLASS(insn->code) != BPF_ALU64) { 2670 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 2671 verbose(env, 2672 "R%d 32-bit pointer arithmetic prohibited\n", 2673 dst); 2674 return -EACCES; 2675 } 2676 2677 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 2678 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", 2679 dst); 2680 return -EACCES; 2681 } 2682 if (ptr_reg->type == CONST_PTR_TO_MAP) { 2683 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", 2684 dst); 2685 return -EACCES; 2686 } 2687 if (ptr_reg->type == PTR_TO_PACKET_END) { 2688 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", 2689 dst); 2690 return -EACCES; 2691 } 2692 2693 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 2694 * The id may be overwritten later if we create a new variable offset. 2695 */ 2696 dst_reg->type = ptr_reg->type; 2697 dst_reg->id = ptr_reg->id; 2698 2699 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 2700 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 2701 return -EINVAL; 2702 2703 switch (opcode) { 2704 case BPF_ADD: 2705 /* We can take a fixed offset as long as it doesn't overflow 2706 * the s32 'off' field 2707 */ 2708 if (known && (ptr_reg->off + smin_val == 2709 (s64)(s32)(ptr_reg->off + smin_val))) { 2710 /* pointer += K. Accumulate it into fixed offset */ 2711 dst_reg->smin_value = smin_ptr; 2712 dst_reg->smax_value = smax_ptr; 2713 dst_reg->umin_value = umin_ptr; 2714 dst_reg->umax_value = umax_ptr; 2715 dst_reg->var_off = ptr_reg->var_off; 2716 dst_reg->off = ptr_reg->off + smin_val; 2717 dst_reg->range = ptr_reg->range; 2718 break; 2719 } 2720 /* A new variable offset is created. Note that off_reg->off 2721 * == 0, since it's a scalar. 2722 * dst_reg gets the pointer type and since some positive 2723 * integer value was added to the pointer, give it a new 'id' 2724 * if it's a PTR_TO_PACKET. 2725 * this creates a new 'base' pointer, off_reg (variable) gets 2726 * added into the variable offset, and we copy the fixed offset 2727 * from ptr_reg. 2728 */ 2729 if (signed_add_overflows(smin_ptr, smin_val) || 2730 signed_add_overflows(smax_ptr, smax_val)) { 2731 dst_reg->smin_value = S64_MIN; 2732 dst_reg->smax_value = S64_MAX; 2733 } else { 2734 dst_reg->smin_value = smin_ptr + smin_val; 2735 dst_reg->smax_value = smax_ptr + smax_val; 2736 } 2737 if (umin_ptr + umin_val < umin_ptr || 2738 umax_ptr + umax_val < umax_ptr) { 2739 dst_reg->umin_value = 0; 2740 dst_reg->umax_value = U64_MAX; 2741 } else { 2742 dst_reg->umin_value = umin_ptr + umin_val; 2743 dst_reg->umax_value = umax_ptr + umax_val; 2744 } 2745 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 2746 dst_reg->off = ptr_reg->off; 2747 if (reg_is_pkt_pointer(ptr_reg)) { 2748 dst_reg->id = ++env->id_gen; 2749 /* something was added to pkt_ptr, set range to zero */ 2750 dst_reg->range = 0; 2751 } 2752 break; 2753 case BPF_SUB: 2754 if (dst_reg == off_reg) { 2755 /* scalar -= pointer. Creates an unknown scalar */ 2756 verbose(env, "R%d tried to subtract pointer from scalar\n", 2757 dst); 2758 return -EACCES; 2759 } 2760 /* We don't allow subtraction from FP, because (according to 2761 * test_verifier.c test "invalid fp arithmetic", JITs might not 2762 * be able to deal with it. 2763 */ 2764 if (ptr_reg->type == PTR_TO_STACK) { 2765 verbose(env, "R%d subtraction from stack pointer prohibited\n", 2766 dst); 2767 return -EACCES; 2768 } 2769 if (known && (ptr_reg->off - smin_val == 2770 (s64)(s32)(ptr_reg->off - smin_val))) { 2771 /* pointer -= K. Subtract it from fixed offset */ 2772 dst_reg->smin_value = smin_ptr; 2773 dst_reg->smax_value = smax_ptr; 2774 dst_reg->umin_value = umin_ptr; 2775 dst_reg->umax_value = umax_ptr; 2776 dst_reg->var_off = ptr_reg->var_off; 2777 dst_reg->id = ptr_reg->id; 2778 dst_reg->off = ptr_reg->off - smin_val; 2779 dst_reg->range = ptr_reg->range; 2780 break; 2781 } 2782 /* A new variable offset is created. If the subtrahend is known 2783 * nonnegative, then any reg->range we had before is still good. 2784 */ 2785 if (signed_sub_overflows(smin_ptr, smax_val) || 2786 signed_sub_overflows(smax_ptr, smin_val)) { 2787 /* Overflow possible, we know nothing */ 2788 dst_reg->smin_value = S64_MIN; 2789 dst_reg->smax_value = S64_MAX; 2790 } else { 2791 dst_reg->smin_value = smin_ptr - smax_val; 2792 dst_reg->smax_value = smax_ptr - smin_val; 2793 } 2794 if (umin_ptr < umax_val) { 2795 /* Overflow possible, we know nothing */ 2796 dst_reg->umin_value = 0; 2797 dst_reg->umax_value = U64_MAX; 2798 } else { 2799 /* Cannot overflow (as long as bounds are consistent) */ 2800 dst_reg->umin_value = umin_ptr - umax_val; 2801 dst_reg->umax_value = umax_ptr - umin_val; 2802 } 2803 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 2804 dst_reg->off = ptr_reg->off; 2805 if (reg_is_pkt_pointer(ptr_reg)) { 2806 dst_reg->id = ++env->id_gen; 2807 /* something was added to pkt_ptr, set range to zero */ 2808 if (smin_val < 0) 2809 dst_reg->range = 0; 2810 } 2811 break; 2812 case BPF_AND: 2813 case BPF_OR: 2814 case BPF_XOR: 2815 /* bitwise ops on pointers are troublesome, prohibit. */ 2816 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 2817 dst, bpf_alu_string[opcode >> 4]); 2818 return -EACCES; 2819 default: 2820 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 2821 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 2822 dst, bpf_alu_string[opcode >> 4]); 2823 return -EACCES; 2824 } 2825 2826 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 2827 return -EINVAL; 2828 2829 __update_reg_bounds(dst_reg); 2830 __reg_deduce_bounds(dst_reg); 2831 __reg_bound_offset(dst_reg); 2832 return 0; 2833 } 2834 2835 /* WARNING: This function does calculations on 64-bit values, but the actual 2836 * execution may occur on 32-bit values. Therefore, things like bitshifts 2837 * need extra checks in the 32-bit case. 2838 */ 2839 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 2840 struct bpf_insn *insn, 2841 struct bpf_reg_state *dst_reg, 2842 struct bpf_reg_state src_reg) 2843 { 2844 struct bpf_reg_state *regs = cur_regs(env); 2845 u8 opcode = BPF_OP(insn->code); 2846 bool src_known, dst_known; 2847 s64 smin_val, smax_val; 2848 u64 umin_val, umax_val; 2849 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 2850 2851 smin_val = src_reg.smin_value; 2852 smax_val = src_reg.smax_value; 2853 umin_val = src_reg.umin_value; 2854 umax_val = src_reg.umax_value; 2855 src_known = tnum_is_const(src_reg.var_off); 2856 dst_known = tnum_is_const(dst_reg->var_off); 2857 2858 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || 2859 smin_val > smax_val || umin_val > umax_val) { 2860 /* Taint dst register if offset had invalid bounds derived from 2861 * e.g. dead branches. 2862 */ 2863 __mark_reg_unknown(dst_reg); 2864 return 0; 2865 } 2866 2867 if (!src_known && 2868 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 2869 __mark_reg_unknown(dst_reg); 2870 return 0; 2871 } 2872 2873 switch (opcode) { 2874 case BPF_ADD: 2875 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 2876 signed_add_overflows(dst_reg->smax_value, smax_val)) { 2877 dst_reg->smin_value = S64_MIN; 2878 dst_reg->smax_value = S64_MAX; 2879 } else { 2880 dst_reg->smin_value += smin_val; 2881 dst_reg->smax_value += smax_val; 2882 } 2883 if (dst_reg->umin_value + umin_val < umin_val || 2884 dst_reg->umax_value + umax_val < umax_val) { 2885 dst_reg->umin_value = 0; 2886 dst_reg->umax_value = U64_MAX; 2887 } else { 2888 dst_reg->umin_value += umin_val; 2889 dst_reg->umax_value += umax_val; 2890 } 2891 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 2892 break; 2893 case BPF_SUB: 2894 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 2895 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 2896 /* Overflow possible, we know nothing */ 2897 dst_reg->smin_value = S64_MIN; 2898 dst_reg->smax_value = S64_MAX; 2899 } else { 2900 dst_reg->smin_value -= smax_val; 2901 dst_reg->smax_value -= smin_val; 2902 } 2903 if (dst_reg->umin_value < umax_val) { 2904 /* Overflow possible, we know nothing */ 2905 dst_reg->umin_value = 0; 2906 dst_reg->umax_value = U64_MAX; 2907 } else { 2908 /* Cannot overflow (as long as bounds are consistent) */ 2909 dst_reg->umin_value -= umax_val; 2910 dst_reg->umax_value -= umin_val; 2911 } 2912 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 2913 break; 2914 case BPF_MUL: 2915 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 2916 if (smin_val < 0 || dst_reg->smin_value < 0) { 2917 /* Ain't nobody got time to multiply that sign */ 2918 __mark_reg_unbounded(dst_reg); 2919 __update_reg_bounds(dst_reg); 2920 break; 2921 } 2922 /* Both values are positive, so we can work with unsigned and 2923 * copy the result to signed (unless it exceeds S64_MAX). 2924 */ 2925 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 2926 /* Potential overflow, we know nothing */ 2927 __mark_reg_unbounded(dst_reg); 2928 /* (except what we can learn from the var_off) */ 2929 __update_reg_bounds(dst_reg); 2930 break; 2931 } 2932 dst_reg->umin_value *= umin_val; 2933 dst_reg->umax_value *= umax_val; 2934 if (dst_reg->umax_value > S64_MAX) { 2935 /* Overflow possible, we know nothing */ 2936 dst_reg->smin_value = S64_MIN; 2937 dst_reg->smax_value = S64_MAX; 2938 } else { 2939 dst_reg->smin_value = dst_reg->umin_value; 2940 dst_reg->smax_value = dst_reg->umax_value; 2941 } 2942 break; 2943 case BPF_AND: 2944 if (src_known && dst_known) { 2945 __mark_reg_known(dst_reg, dst_reg->var_off.value & 2946 src_reg.var_off.value); 2947 break; 2948 } 2949 /* We get our minimum from the var_off, since that's inherently 2950 * bitwise. Our maximum is the minimum of the operands' maxima. 2951 */ 2952 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 2953 dst_reg->umin_value = dst_reg->var_off.value; 2954 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 2955 if (dst_reg->smin_value < 0 || smin_val < 0) { 2956 /* Lose signed bounds when ANDing negative numbers, 2957 * ain't nobody got time for that. 2958 */ 2959 dst_reg->smin_value = S64_MIN; 2960 dst_reg->smax_value = S64_MAX; 2961 } else { 2962 /* ANDing two positives gives a positive, so safe to 2963 * cast result into s64. 2964 */ 2965 dst_reg->smin_value = dst_reg->umin_value; 2966 dst_reg->smax_value = dst_reg->umax_value; 2967 } 2968 /* We may learn something more from the var_off */ 2969 __update_reg_bounds(dst_reg); 2970 break; 2971 case BPF_OR: 2972 if (src_known && dst_known) { 2973 __mark_reg_known(dst_reg, dst_reg->var_off.value | 2974 src_reg.var_off.value); 2975 break; 2976 } 2977 /* We get our maximum from the var_off, and our minimum is the 2978 * maximum of the operands' minima 2979 */ 2980 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 2981 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 2982 dst_reg->umax_value = dst_reg->var_off.value | 2983 dst_reg->var_off.mask; 2984 if (dst_reg->smin_value < 0 || smin_val < 0) { 2985 /* Lose signed bounds when ORing negative numbers, 2986 * ain't nobody got time for that. 2987 */ 2988 dst_reg->smin_value = S64_MIN; 2989 dst_reg->smax_value = S64_MAX; 2990 } else { 2991 /* ORing two positives gives a positive, so safe to 2992 * cast result into s64. 2993 */ 2994 dst_reg->smin_value = dst_reg->umin_value; 2995 dst_reg->smax_value = dst_reg->umax_value; 2996 } 2997 /* We may learn something more from the var_off */ 2998 __update_reg_bounds(dst_reg); 2999 break; 3000 case BPF_LSH: 3001 if (umax_val >= insn_bitness) { 3002 /* Shifts greater than 31 or 63 are undefined. 3003 * This includes shifts by a negative number. 3004 */ 3005 mark_reg_unknown(env, regs, insn->dst_reg); 3006 break; 3007 } 3008 /* We lose all sign bit information (except what we can pick 3009 * up from var_off) 3010 */ 3011 dst_reg->smin_value = S64_MIN; 3012 dst_reg->smax_value = S64_MAX; 3013 /* If we might shift our top bit out, then we know nothing */ 3014 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 3015 dst_reg->umin_value = 0; 3016 dst_reg->umax_value = U64_MAX; 3017 } else { 3018 dst_reg->umin_value <<= umin_val; 3019 dst_reg->umax_value <<= umax_val; 3020 } 3021 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 3022 /* We may learn something more from the var_off */ 3023 __update_reg_bounds(dst_reg); 3024 break; 3025 case BPF_RSH: 3026 if (umax_val >= insn_bitness) { 3027 /* Shifts greater than 31 or 63 are undefined. 3028 * This includes shifts by a negative number. 3029 */ 3030 mark_reg_unknown(env, regs, insn->dst_reg); 3031 break; 3032 } 3033 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 3034 * be negative, then either: 3035 * 1) src_reg might be zero, so the sign bit of the result is 3036 * unknown, so we lose our signed bounds 3037 * 2) it's known negative, thus the unsigned bounds capture the 3038 * signed bounds 3039 * 3) the signed bounds cross zero, so they tell us nothing 3040 * about the result 3041 * If the value in dst_reg is known nonnegative, then again the 3042 * unsigned bounts capture the signed bounds. 3043 * Thus, in all cases it suffices to blow away our signed bounds 3044 * and rely on inferring new ones from the unsigned bounds and 3045 * var_off of the result. 3046 */ 3047 dst_reg->smin_value = S64_MIN; 3048 dst_reg->smax_value = S64_MAX; 3049 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 3050 dst_reg->umin_value >>= umax_val; 3051 dst_reg->umax_value >>= umin_val; 3052 /* We may learn something more from the var_off */ 3053 __update_reg_bounds(dst_reg); 3054 break; 3055 case BPF_ARSH: 3056 if (umax_val >= insn_bitness) { 3057 /* Shifts greater than 31 or 63 are undefined. 3058 * This includes shifts by a negative number. 3059 */ 3060 mark_reg_unknown(env, regs, insn->dst_reg); 3061 break; 3062 } 3063 3064 /* Upon reaching here, src_known is true and 3065 * umax_val is equal to umin_val. 3066 */ 3067 dst_reg->smin_value >>= umin_val; 3068 dst_reg->smax_value >>= umin_val; 3069 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); 3070 3071 /* blow away the dst_reg umin_value/umax_value and rely on 3072 * dst_reg var_off to refine the result. 3073 */ 3074 dst_reg->umin_value = 0; 3075 dst_reg->umax_value = U64_MAX; 3076 __update_reg_bounds(dst_reg); 3077 break; 3078 default: 3079 mark_reg_unknown(env, regs, insn->dst_reg); 3080 break; 3081 } 3082 3083 if (BPF_CLASS(insn->code) != BPF_ALU64) { 3084 /* 32-bit ALU ops are (32,32)->32 */ 3085 coerce_reg_to_size(dst_reg, 4); 3086 coerce_reg_to_size(&src_reg, 4); 3087 } 3088 3089 __reg_deduce_bounds(dst_reg); 3090 __reg_bound_offset(dst_reg); 3091 return 0; 3092 } 3093 3094 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 3095 * and var_off. 3096 */ 3097 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 3098 struct bpf_insn *insn) 3099 { 3100 struct bpf_verifier_state *vstate = env->cur_state; 3101 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3102 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 3103 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 3104 u8 opcode = BPF_OP(insn->code); 3105 3106 dst_reg = ®s[insn->dst_reg]; 3107 src_reg = NULL; 3108 if (dst_reg->type != SCALAR_VALUE) 3109 ptr_reg = dst_reg; 3110 if (BPF_SRC(insn->code) == BPF_X) { 3111 src_reg = ®s[insn->src_reg]; 3112 if (src_reg->type != SCALAR_VALUE) { 3113 if (dst_reg->type != SCALAR_VALUE) { 3114 /* Combining two pointers by any ALU op yields 3115 * an arbitrary scalar. Disallow all math except 3116 * pointer subtraction 3117 */ 3118 if (opcode == BPF_SUB){ 3119 mark_reg_unknown(env, regs, insn->dst_reg); 3120 return 0; 3121 } 3122 verbose(env, "R%d pointer %s pointer prohibited\n", 3123 insn->dst_reg, 3124 bpf_alu_string[opcode >> 4]); 3125 return -EACCES; 3126 } else { 3127 /* scalar += pointer 3128 * This is legal, but we have to reverse our 3129 * src/dest handling in computing the range 3130 */ 3131 return adjust_ptr_min_max_vals(env, insn, 3132 src_reg, dst_reg); 3133 } 3134 } else if (ptr_reg) { 3135 /* pointer += scalar */ 3136 return adjust_ptr_min_max_vals(env, insn, 3137 dst_reg, src_reg); 3138 } 3139 } else { 3140 /* Pretend the src is a reg with a known value, since we only 3141 * need to be able to read from this state. 3142 */ 3143 off_reg.type = SCALAR_VALUE; 3144 __mark_reg_known(&off_reg, insn->imm); 3145 src_reg = &off_reg; 3146 if (ptr_reg) /* pointer += K */ 3147 return adjust_ptr_min_max_vals(env, insn, 3148 ptr_reg, src_reg); 3149 } 3150 3151 /* Got here implies adding two SCALAR_VALUEs */ 3152 if (WARN_ON_ONCE(ptr_reg)) { 3153 print_verifier_state(env, state); 3154 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 3155 return -EINVAL; 3156 } 3157 if (WARN_ON(!src_reg)) { 3158 print_verifier_state(env, state); 3159 verbose(env, "verifier internal error: no src_reg\n"); 3160 return -EINVAL; 3161 } 3162 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 3163 } 3164 3165 /* check validity of 32-bit and 64-bit arithmetic operations */ 3166 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 3167 { 3168 struct bpf_reg_state *regs = cur_regs(env); 3169 u8 opcode = BPF_OP(insn->code); 3170 int err; 3171 3172 if (opcode == BPF_END || opcode == BPF_NEG) { 3173 if (opcode == BPF_NEG) { 3174 if (BPF_SRC(insn->code) != 0 || 3175 insn->src_reg != BPF_REG_0 || 3176 insn->off != 0 || insn->imm != 0) { 3177 verbose(env, "BPF_NEG uses reserved fields\n"); 3178 return -EINVAL; 3179 } 3180 } else { 3181 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 3182 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 3183 BPF_CLASS(insn->code) == BPF_ALU64) { 3184 verbose(env, "BPF_END uses reserved fields\n"); 3185 return -EINVAL; 3186 } 3187 } 3188 3189 /* check src operand */ 3190 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3191 if (err) 3192 return err; 3193 3194 if (is_pointer_value(env, insn->dst_reg)) { 3195 verbose(env, "R%d pointer arithmetic prohibited\n", 3196 insn->dst_reg); 3197 return -EACCES; 3198 } 3199 3200 /* check dest operand */ 3201 err = check_reg_arg(env, insn->dst_reg, DST_OP); 3202 if (err) 3203 return err; 3204 3205 } else if (opcode == BPF_MOV) { 3206 3207 if (BPF_SRC(insn->code) == BPF_X) { 3208 if (insn->imm != 0 || insn->off != 0) { 3209 verbose(env, "BPF_MOV uses reserved fields\n"); 3210 return -EINVAL; 3211 } 3212 3213 /* check src operand */ 3214 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3215 if (err) 3216 return err; 3217 } else { 3218 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 3219 verbose(env, "BPF_MOV uses reserved fields\n"); 3220 return -EINVAL; 3221 } 3222 } 3223 3224 /* check dest operand */ 3225 err = check_reg_arg(env, insn->dst_reg, DST_OP); 3226 if (err) 3227 return err; 3228 3229 if (BPF_SRC(insn->code) == BPF_X) { 3230 if (BPF_CLASS(insn->code) == BPF_ALU64) { 3231 /* case: R1 = R2 3232 * copy register state to dest reg 3233 */ 3234 regs[insn->dst_reg] = regs[insn->src_reg]; 3235 regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; 3236 } else { 3237 /* R1 = (u32) R2 */ 3238 if (is_pointer_value(env, insn->src_reg)) { 3239 verbose(env, 3240 "R%d partial copy of pointer\n", 3241 insn->src_reg); 3242 return -EACCES; 3243 } 3244 mark_reg_unknown(env, regs, insn->dst_reg); 3245 coerce_reg_to_size(®s[insn->dst_reg], 4); 3246 } 3247 } else { 3248 /* case: R = imm 3249 * remember the value we stored into this reg 3250 */ 3251 regs[insn->dst_reg].type = SCALAR_VALUE; 3252 if (BPF_CLASS(insn->code) == BPF_ALU64) { 3253 __mark_reg_known(regs + insn->dst_reg, 3254 insn->imm); 3255 } else { 3256 __mark_reg_known(regs + insn->dst_reg, 3257 (u32)insn->imm); 3258 } 3259 } 3260 3261 } else if (opcode > BPF_END) { 3262 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 3263 return -EINVAL; 3264 3265 } else { /* all other ALU ops: and, sub, xor, add, ... */ 3266 3267 if (BPF_SRC(insn->code) == BPF_X) { 3268 if (insn->imm != 0 || insn->off != 0) { 3269 verbose(env, "BPF_ALU uses reserved fields\n"); 3270 return -EINVAL; 3271 } 3272 /* check src1 operand */ 3273 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3274 if (err) 3275 return err; 3276 } else { 3277 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 3278 verbose(env, "BPF_ALU uses reserved fields\n"); 3279 return -EINVAL; 3280 } 3281 } 3282 3283 /* check src2 operand */ 3284 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3285 if (err) 3286 return err; 3287 3288 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 3289 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 3290 verbose(env, "div by zero\n"); 3291 return -EINVAL; 3292 } 3293 3294 if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { 3295 verbose(env, "BPF_ARSH not supported for 32 bit ALU\n"); 3296 return -EINVAL; 3297 } 3298 3299 if ((opcode == BPF_LSH || opcode == BPF_RSH || 3300 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 3301 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 3302 3303 if (insn->imm < 0 || insn->imm >= size) { 3304 verbose(env, "invalid shift %d\n", insn->imm); 3305 return -EINVAL; 3306 } 3307 } 3308 3309 /* check dest operand */ 3310 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 3311 if (err) 3312 return err; 3313 3314 return adjust_reg_min_max_vals(env, insn); 3315 } 3316 3317 return 0; 3318 } 3319 3320 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 3321 struct bpf_reg_state *dst_reg, 3322 enum bpf_reg_type type, 3323 bool range_right_open) 3324 { 3325 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3326 struct bpf_reg_state *regs = state->regs, *reg; 3327 u16 new_range; 3328 int i, j; 3329 3330 if (dst_reg->off < 0 || 3331 (dst_reg->off == 0 && range_right_open)) 3332 /* This doesn't give us any range */ 3333 return; 3334 3335 if (dst_reg->umax_value > MAX_PACKET_OFF || 3336 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 3337 /* Risk of overflow. For instance, ptr + (1<<63) may be less 3338 * than pkt_end, but that's because it's also less than pkt. 3339 */ 3340 return; 3341 3342 new_range = dst_reg->off; 3343 if (range_right_open) 3344 new_range--; 3345 3346 /* Examples for register markings: 3347 * 3348 * pkt_data in dst register: 3349 * 3350 * r2 = r3; 3351 * r2 += 8; 3352 * if (r2 > pkt_end) goto <handle exception> 3353 * <access okay> 3354 * 3355 * r2 = r3; 3356 * r2 += 8; 3357 * if (r2 < pkt_end) goto <access okay> 3358 * <handle exception> 3359 * 3360 * Where: 3361 * r2 == dst_reg, pkt_end == src_reg 3362 * r2=pkt(id=n,off=8,r=0) 3363 * r3=pkt(id=n,off=0,r=0) 3364 * 3365 * pkt_data in src register: 3366 * 3367 * r2 = r3; 3368 * r2 += 8; 3369 * if (pkt_end >= r2) goto <access okay> 3370 * <handle exception> 3371 * 3372 * r2 = r3; 3373 * r2 += 8; 3374 * if (pkt_end <= r2) goto <handle exception> 3375 * <access okay> 3376 * 3377 * Where: 3378 * pkt_end == dst_reg, r2 == src_reg 3379 * r2=pkt(id=n,off=8,r=0) 3380 * r3=pkt(id=n,off=0,r=0) 3381 * 3382 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 3383 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 3384 * and [r3, r3 + 8-1) respectively is safe to access depending on 3385 * the check. 3386 */ 3387 3388 /* If our ids match, then we must have the same max_value. And we 3389 * don't care about the other reg's fixed offset, since if it's too big 3390 * the range won't allow anything. 3391 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 3392 */ 3393 for (i = 0; i < MAX_BPF_REG; i++) 3394 if (regs[i].type == type && regs[i].id == dst_reg->id) 3395 /* keep the maximum range already checked */ 3396 regs[i].range = max(regs[i].range, new_range); 3397 3398 for (j = 0; j <= vstate->curframe; j++) { 3399 state = vstate->frame[j]; 3400 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 3401 if (state->stack[i].slot_type[0] != STACK_SPILL) 3402 continue; 3403 reg = &state->stack[i].spilled_ptr; 3404 if (reg->type == type && reg->id == dst_reg->id) 3405 reg->range = max(reg->range, new_range); 3406 } 3407 } 3408 } 3409 3410 /* Adjusts the register min/max values in the case that the dst_reg is the 3411 * variable register that we are working on, and src_reg is a constant or we're 3412 * simply doing a BPF_K check. 3413 * In JEQ/JNE cases we also adjust the var_off values. 3414 */ 3415 static void reg_set_min_max(struct bpf_reg_state *true_reg, 3416 struct bpf_reg_state *false_reg, u64 val, 3417 u8 opcode) 3418 { 3419 /* If the dst_reg is a pointer, we can't learn anything about its 3420 * variable offset from the compare (unless src_reg were a pointer into 3421 * the same object, but we don't bother with that. 3422 * Since false_reg and true_reg have the same type by construction, we 3423 * only need to check one of them for pointerness. 3424 */ 3425 if (__is_pointer_value(false, false_reg)) 3426 return; 3427 3428 switch (opcode) { 3429 case BPF_JEQ: 3430 /* If this is false then we know nothing Jon Snow, but if it is 3431 * true then we know for sure. 3432 */ 3433 __mark_reg_known(true_reg, val); 3434 break; 3435 case BPF_JNE: 3436 /* If this is true we know nothing Jon Snow, but if it is false 3437 * we know the value for sure; 3438 */ 3439 __mark_reg_known(false_reg, val); 3440 break; 3441 case BPF_JGT: 3442 false_reg->umax_value = min(false_reg->umax_value, val); 3443 true_reg->umin_value = max(true_reg->umin_value, val + 1); 3444 break; 3445 case BPF_JSGT: 3446 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 3447 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 3448 break; 3449 case BPF_JLT: 3450 false_reg->umin_value = max(false_reg->umin_value, val); 3451 true_reg->umax_value = min(true_reg->umax_value, val - 1); 3452 break; 3453 case BPF_JSLT: 3454 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 3455 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 3456 break; 3457 case BPF_JGE: 3458 false_reg->umax_value = min(false_reg->umax_value, val - 1); 3459 true_reg->umin_value = max(true_reg->umin_value, val); 3460 break; 3461 case BPF_JSGE: 3462 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 3463 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 3464 break; 3465 case BPF_JLE: 3466 false_reg->umin_value = max(false_reg->umin_value, val + 1); 3467 true_reg->umax_value = min(true_reg->umax_value, val); 3468 break; 3469 case BPF_JSLE: 3470 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 3471 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 3472 break; 3473 default: 3474 break; 3475 } 3476 3477 __reg_deduce_bounds(false_reg); 3478 __reg_deduce_bounds(true_reg); 3479 /* We might have learned some bits from the bounds. */ 3480 __reg_bound_offset(false_reg); 3481 __reg_bound_offset(true_reg); 3482 /* Intersecting with the old var_off might have improved our bounds 3483 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 3484 * then new var_off is (0; 0x7f...fc) which improves our umax. 3485 */ 3486 __update_reg_bounds(false_reg); 3487 __update_reg_bounds(true_reg); 3488 } 3489 3490 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 3491 * the variable reg. 3492 */ 3493 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 3494 struct bpf_reg_state *false_reg, u64 val, 3495 u8 opcode) 3496 { 3497 if (__is_pointer_value(false, false_reg)) 3498 return; 3499 3500 switch (opcode) { 3501 case BPF_JEQ: 3502 /* If this is false then we know nothing Jon Snow, but if it is 3503 * true then we know for sure. 3504 */ 3505 __mark_reg_known(true_reg, val); 3506 break; 3507 case BPF_JNE: 3508 /* If this is true we know nothing Jon Snow, but if it is false 3509 * we know the value for sure; 3510 */ 3511 __mark_reg_known(false_reg, val); 3512 break; 3513 case BPF_JGT: 3514 true_reg->umax_value = min(true_reg->umax_value, val - 1); 3515 false_reg->umin_value = max(false_reg->umin_value, val); 3516 break; 3517 case BPF_JSGT: 3518 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 3519 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 3520 break; 3521 case BPF_JLT: 3522 true_reg->umin_value = max(true_reg->umin_value, val + 1); 3523 false_reg->umax_value = min(false_reg->umax_value, val); 3524 break; 3525 case BPF_JSLT: 3526 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 3527 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 3528 break; 3529 case BPF_JGE: 3530 true_reg->umax_value = min(true_reg->umax_value, val); 3531 false_reg->umin_value = max(false_reg->umin_value, val + 1); 3532 break; 3533 case BPF_JSGE: 3534 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 3535 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 3536 break; 3537 case BPF_JLE: 3538 true_reg->umin_value = max(true_reg->umin_value, val); 3539 false_reg->umax_value = min(false_reg->umax_value, val - 1); 3540 break; 3541 case BPF_JSLE: 3542 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 3543 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 3544 break; 3545 default: 3546 break; 3547 } 3548 3549 __reg_deduce_bounds(false_reg); 3550 __reg_deduce_bounds(true_reg); 3551 /* We might have learned some bits from the bounds. */ 3552 __reg_bound_offset(false_reg); 3553 __reg_bound_offset(true_reg); 3554 /* Intersecting with the old var_off might have improved our bounds 3555 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 3556 * then new var_off is (0; 0x7f...fc) which improves our umax. 3557 */ 3558 __update_reg_bounds(false_reg); 3559 __update_reg_bounds(true_reg); 3560 } 3561 3562 /* Regs are known to be equal, so intersect their min/max/var_off */ 3563 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 3564 struct bpf_reg_state *dst_reg) 3565 { 3566 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 3567 dst_reg->umin_value); 3568 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 3569 dst_reg->umax_value); 3570 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 3571 dst_reg->smin_value); 3572 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 3573 dst_reg->smax_value); 3574 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 3575 dst_reg->var_off); 3576 /* We might have learned new bounds from the var_off. */ 3577 __update_reg_bounds(src_reg); 3578 __update_reg_bounds(dst_reg); 3579 /* We might have learned something about the sign bit. */ 3580 __reg_deduce_bounds(src_reg); 3581 __reg_deduce_bounds(dst_reg); 3582 /* We might have learned some bits from the bounds. */ 3583 __reg_bound_offset(src_reg); 3584 __reg_bound_offset(dst_reg); 3585 /* Intersecting with the old var_off might have improved our bounds 3586 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 3587 * then new var_off is (0; 0x7f...fc) which improves our umax. 3588 */ 3589 __update_reg_bounds(src_reg); 3590 __update_reg_bounds(dst_reg); 3591 } 3592 3593 static void reg_combine_min_max(struct bpf_reg_state *true_src, 3594 struct bpf_reg_state *true_dst, 3595 struct bpf_reg_state *false_src, 3596 struct bpf_reg_state *false_dst, 3597 u8 opcode) 3598 { 3599 switch (opcode) { 3600 case BPF_JEQ: 3601 __reg_combine_min_max(true_src, true_dst); 3602 break; 3603 case BPF_JNE: 3604 __reg_combine_min_max(false_src, false_dst); 3605 break; 3606 } 3607 } 3608 3609 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, 3610 bool is_null) 3611 { 3612 struct bpf_reg_state *reg = ®s[regno]; 3613 3614 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { 3615 /* Old offset (both fixed and variable parts) should 3616 * have been known-zero, because we don't allow pointer 3617 * arithmetic on pointers that might be NULL. 3618 */ 3619 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 3620 !tnum_equals_const(reg->var_off, 0) || 3621 reg->off)) { 3622 __mark_reg_known_zero(reg); 3623 reg->off = 0; 3624 } 3625 if (is_null) { 3626 reg->type = SCALAR_VALUE; 3627 } else if (reg->map_ptr->inner_map_meta) { 3628 reg->type = CONST_PTR_TO_MAP; 3629 reg->map_ptr = reg->map_ptr->inner_map_meta; 3630 } else { 3631 reg->type = PTR_TO_MAP_VALUE; 3632 } 3633 /* We don't need id from this point onwards anymore, thus we 3634 * should better reset it, so that state pruning has chances 3635 * to take effect. 3636 */ 3637 reg->id = 0; 3638 } 3639 } 3640 3641 /* The logic is similar to find_good_pkt_pointers(), both could eventually 3642 * be folded together at some point. 3643 */ 3644 static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, 3645 bool is_null) 3646 { 3647 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3648 struct bpf_reg_state *regs = state->regs; 3649 u32 id = regs[regno].id; 3650 int i, j; 3651 3652 for (i = 0; i < MAX_BPF_REG; i++) 3653 mark_map_reg(regs, i, id, is_null); 3654 3655 for (j = 0; j <= vstate->curframe; j++) { 3656 state = vstate->frame[j]; 3657 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 3658 if (state->stack[i].slot_type[0] != STACK_SPILL) 3659 continue; 3660 mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); 3661 } 3662 } 3663 } 3664 3665 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 3666 struct bpf_reg_state *dst_reg, 3667 struct bpf_reg_state *src_reg, 3668 struct bpf_verifier_state *this_branch, 3669 struct bpf_verifier_state *other_branch) 3670 { 3671 if (BPF_SRC(insn->code) != BPF_X) 3672 return false; 3673 3674 switch (BPF_OP(insn->code)) { 3675 case BPF_JGT: 3676 if ((dst_reg->type == PTR_TO_PACKET && 3677 src_reg->type == PTR_TO_PACKET_END) || 3678 (dst_reg->type == PTR_TO_PACKET_META && 3679 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3680 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 3681 find_good_pkt_pointers(this_branch, dst_reg, 3682 dst_reg->type, false); 3683 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3684 src_reg->type == PTR_TO_PACKET) || 3685 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3686 src_reg->type == PTR_TO_PACKET_META)) { 3687 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 3688 find_good_pkt_pointers(other_branch, src_reg, 3689 src_reg->type, true); 3690 } else { 3691 return false; 3692 } 3693 break; 3694 case BPF_JLT: 3695 if ((dst_reg->type == PTR_TO_PACKET && 3696 src_reg->type == PTR_TO_PACKET_END) || 3697 (dst_reg->type == PTR_TO_PACKET_META && 3698 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3699 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 3700 find_good_pkt_pointers(other_branch, dst_reg, 3701 dst_reg->type, true); 3702 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3703 src_reg->type == PTR_TO_PACKET) || 3704 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3705 src_reg->type == PTR_TO_PACKET_META)) { 3706 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 3707 find_good_pkt_pointers(this_branch, src_reg, 3708 src_reg->type, false); 3709 } else { 3710 return false; 3711 } 3712 break; 3713 case BPF_JGE: 3714 if ((dst_reg->type == PTR_TO_PACKET && 3715 src_reg->type == PTR_TO_PACKET_END) || 3716 (dst_reg->type == PTR_TO_PACKET_META && 3717 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3718 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 3719 find_good_pkt_pointers(this_branch, dst_reg, 3720 dst_reg->type, true); 3721 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3722 src_reg->type == PTR_TO_PACKET) || 3723 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3724 src_reg->type == PTR_TO_PACKET_META)) { 3725 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 3726 find_good_pkt_pointers(other_branch, src_reg, 3727 src_reg->type, false); 3728 } else { 3729 return false; 3730 } 3731 break; 3732 case BPF_JLE: 3733 if ((dst_reg->type == PTR_TO_PACKET && 3734 src_reg->type == PTR_TO_PACKET_END) || 3735 (dst_reg->type == PTR_TO_PACKET_META && 3736 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3737 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 3738 find_good_pkt_pointers(other_branch, dst_reg, 3739 dst_reg->type, false); 3740 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3741 src_reg->type == PTR_TO_PACKET) || 3742 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3743 src_reg->type == PTR_TO_PACKET_META)) { 3744 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 3745 find_good_pkt_pointers(this_branch, src_reg, 3746 src_reg->type, true); 3747 } else { 3748 return false; 3749 } 3750 break; 3751 default: 3752 return false; 3753 } 3754 3755 return true; 3756 } 3757 3758 static int check_cond_jmp_op(struct bpf_verifier_env *env, 3759 struct bpf_insn *insn, int *insn_idx) 3760 { 3761 struct bpf_verifier_state *this_branch = env->cur_state; 3762 struct bpf_verifier_state *other_branch; 3763 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 3764 struct bpf_reg_state *dst_reg, *other_branch_regs; 3765 u8 opcode = BPF_OP(insn->code); 3766 int err; 3767 3768 if (opcode > BPF_JSLE) { 3769 verbose(env, "invalid BPF_JMP opcode %x\n", opcode); 3770 return -EINVAL; 3771 } 3772 3773 if (BPF_SRC(insn->code) == BPF_X) { 3774 if (insn->imm != 0) { 3775 verbose(env, "BPF_JMP uses reserved fields\n"); 3776 return -EINVAL; 3777 } 3778 3779 /* check src1 operand */ 3780 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3781 if (err) 3782 return err; 3783 3784 if (is_pointer_value(env, insn->src_reg)) { 3785 verbose(env, "R%d pointer comparison prohibited\n", 3786 insn->src_reg); 3787 return -EACCES; 3788 } 3789 } else { 3790 if (insn->src_reg != BPF_REG_0) { 3791 verbose(env, "BPF_JMP uses reserved fields\n"); 3792 return -EINVAL; 3793 } 3794 } 3795 3796 /* check src2 operand */ 3797 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3798 if (err) 3799 return err; 3800 3801 dst_reg = ®s[insn->dst_reg]; 3802 3803 /* detect if R == 0 where R was initialized to zero earlier */ 3804 if (BPF_SRC(insn->code) == BPF_K && 3805 (opcode == BPF_JEQ || opcode == BPF_JNE) && 3806 dst_reg->type == SCALAR_VALUE && 3807 tnum_is_const(dst_reg->var_off)) { 3808 if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) || 3809 (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) { 3810 /* if (imm == imm) goto pc+off; 3811 * only follow the goto, ignore fall-through 3812 */ 3813 *insn_idx += insn->off; 3814 return 0; 3815 } else { 3816 /* if (imm != imm) goto pc+off; 3817 * only follow fall-through branch, since 3818 * that's where the program will go 3819 */ 3820 return 0; 3821 } 3822 } 3823 3824 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 3825 if (!other_branch) 3826 return -EFAULT; 3827 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 3828 3829 /* detect if we are comparing against a constant value so we can adjust 3830 * our min/max values for our dst register. 3831 * this is only legit if both are scalars (or pointers to the same 3832 * object, I suppose, but we don't support that right now), because 3833 * otherwise the different base pointers mean the offsets aren't 3834 * comparable. 3835 */ 3836 if (BPF_SRC(insn->code) == BPF_X) { 3837 if (dst_reg->type == SCALAR_VALUE && 3838 regs[insn->src_reg].type == SCALAR_VALUE) { 3839 if (tnum_is_const(regs[insn->src_reg].var_off)) 3840 reg_set_min_max(&other_branch_regs[insn->dst_reg], 3841 dst_reg, regs[insn->src_reg].var_off.value, 3842 opcode); 3843 else if (tnum_is_const(dst_reg->var_off)) 3844 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 3845 ®s[insn->src_reg], 3846 dst_reg->var_off.value, opcode); 3847 else if (opcode == BPF_JEQ || opcode == BPF_JNE) 3848 /* Comparing for equality, we can combine knowledge */ 3849 reg_combine_min_max(&other_branch_regs[insn->src_reg], 3850 &other_branch_regs[insn->dst_reg], 3851 ®s[insn->src_reg], 3852 ®s[insn->dst_reg], opcode); 3853 } 3854 } else if (dst_reg->type == SCALAR_VALUE) { 3855 reg_set_min_max(&other_branch_regs[insn->dst_reg], 3856 dst_reg, insn->imm, opcode); 3857 } 3858 3859 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ 3860 if (BPF_SRC(insn->code) == BPF_K && 3861 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 3862 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 3863 /* Mark all identical map registers in each branch as either 3864 * safe or unknown depending R == 0 or R != 0 conditional. 3865 */ 3866 mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); 3867 mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); 3868 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 3869 this_branch, other_branch) && 3870 is_pointer_value(env, insn->dst_reg)) { 3871 verbose(env, "R%d pointer comparison prohibited\n", 3872 insn->dst_reg); 3873 return -EACCES; 3874 } 3875 if (env->log.level) 3876 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 3877 return 0; 3878 } 3879 3880 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 3881 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 3882 { 3883 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 3884 3885 return (struct bpf_map *) (unsigned long) imm64; 3886 } 3887 3888 /* verify BPF_LD_IMM64 instruction */ 3889 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 3890 { 3891 struct bpf_reg_state *regs = cur_regs(env); 3892 int err; 3893 3894 if (BPF_SIZE(insn->code) != BPF_DW) { 3895 verbose(env, "invalid BPF_LD_IMM insn\n"); 3896 return -EINVAL; 3897 } 3898 if (insn->off != 0) { 3899 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 3900 return -EINVAL; 3901 } 3902 3903 err = check_reg_arg(env, insn->dst_reg, DST_OP); 3904 if (err) 3905 return err; 3906 3907 if (insn->src_reg == 0) { 3908 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 3909 3910 regs[insn->dst_reg].type = SCALAR_VALUE; 3911 __mark_reg_known(®s[insn->dst_reg], imm); 3912 return 0; 3913 } 3914 3915 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 3916 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 3917 3918 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 3919 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 3920 return 0; 3921 } 3922 3923 static bool may_access_skb(enum bpf_prog_type type) 3924 { 3925 switch (type) { 3926 case BPF_PROG_TYPE_SOCKET_FILTER: 3927 case BPF_PROG_TYPE_SCHED_CLS: 3928 case BPF_PROG_TYPE_SCHED_ACT: 3929 return true; 3930 default: 3931 return false; 3932 } 3933 } 3934 3935 /* verify safety of LD_ABS|LD_IND instructions: 3936 * - they can only appear in the programs where ctx == skb 3937 * - since they are wrappers of function calls, they scratch R1-R5 registers, 3938 * preserve R6-R9, and store return value into R0 3939 * 3940 * Implicit input: 3941 * ctx == skb == R6 == CTX 3942 * 3943 * Explicit input: 3944 * SRC == any register 3945 * IMM == 32-bit immediate 3946 * 3947 * Output: 3948 * R0 - 8/16/32-bit skb data converted to cpu endianness 3949 */ 3950 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 3951 { 3952 struct bpf_reg_state *regs = cur_regs(env); 3953 u8 mode = BPF_MODE(insn->code); 3954 int i, err; 3955 3956 if (!may_access_skb(env->prog->type)) { 3957 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 3958 return -EINVAL; 3959 } 3960 3961 if (!env->ops->gen_ld_abs) { 3962 verbose(env, "bpf verifier is misconfigured\n"); 3963 return -EINVAL; 3964 } 3965 3966 if (env->subprog_cnt > 1) { 3967 /* when program has LD_ABS insn JITs and interpreter assume 3968 * that r1 == ctx == skb which is not the case for callees 3969 * that can have arbitrary arguments. It's problematic 3970 * for main prog as well since JITs would need to analyze 3971 * all functions in order to make proper register save/restore 3972 * decisions in the main prog. Hence disallow LD_ABS with calls 3973 */ 3974 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); 3975 return -EINVAL; 3976 } 3977 3978 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 3979 BPF_SIZE(insn->code) == BPF_DW || 3980 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 3981 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 3982 return -EINVAL; 3983 } 3984 3985 /* check whether implicit source operand (register R6) is readable */ 3986 err = check_reg_arg(env, BPF_REG_6, SRC_OP); 3987 if (err) 3988 return err; 3989 3990 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 3991 verbose(env, 3992 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 3993 return -EINVAL; 3994 } 3995 3996 if (mode == BPF_IND) { 3997 /* check explicit source operand */ 3998 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3999 if (err) 4000 return err; 4001 } 4002 4003 /* reset caller saved regs to unreadable */ 4004 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4005 mark_reg_not_init(env, regs, caller_saved[i]); 4006 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4007 } 4008 4009 /* mark destination R0 register as readable, since it contains 4010 * the value fetched from the packet. 4011 * Already marked as written above. 4012 */ 4013 mark_reg_unknown(env, regs, BPF_REG_0); 4014 return 0; 4015 } 4016 4017 static int check_return_code(struct bpf_verifier_env *env) 4018 { 4019 struct bpf_reg_state *reg; 4020 struct tnum range = tnum_range(0, 1); 4021 4022 switch (env->prog->type) { 4023 case BPF_PROG_TYPE_CGROUP_SKB: 4024 case BPF_PROG_TYPE_CGROUP_SOCK: 4025 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4026 case BPF_PROG_TYPE_SOCK_OPS: 4027 case BPF_PROG_TYPE_CGROUP_DEVICE: 4028 break; 4029 default: 4030 return 0; 4031 } 4032 4033 reg = cur_regs(env) + BPF_REG_0; 4034 if (reg->type != SCALAR_VALUE) { 4035 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 4036 reg_type_str[reg->type]); 4037 return -EINVAL; 4038 } 4039 4040 if (!tnum_in(range, reg->var_off)) { 4041 verbose(env, "At program exit the register R0 "); 4042 if (!tnum_is_unknown(reg->var_off)) { 4043 char tn_buf[48]; 4044 4045 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4046 verbose(env, "has value %s", tn_buf); 4047 } else { 4048 verbose(env, "has unknown scalar value"); 4049 } 4050 verbose(env, " should have been 0 or 1\n"); 4051 return -EINVAL; 4052 } 4053 return 0; 4054 } 4055 4056 /* non-recursive DFS pseudo code 4057 * 1 procedure DFS-iterative(G,v): 4058 * 2 label v as discovered 4059 * 3 let S be a stack 4060 * 4 S.push(v) 4061 * 5 while S is not empty 4062 * 6 t <- S.pop() 4063 * 7 if t is what we're looking for: 4064 * 8 return t 4065 * 9 for all edges e in G.adjacentEdges(t) do 4066 * 10 if edge e is already labelled 4067 * 11 continue with the next edge 4068 * 12 w <- G.adjacentVertex(t,e) 4069 * 13 if vertex w is not discovered and not explored 4070 * 14 label e as tree-edge 4071 * 15 label w as discovered 4072 * 16 S.push(w) 4073 * 17 continue at 5 4074 * 18 else if vertex w is discovered 4075 * 19 label e as back-edge 4076 * 20 else 4077 * 21 // vertex w is explored 4078 * 22 label e as forward- or cross-edge 4079 * 23 label t as explored 4080 * 24 S.pop() 4081 * 4082 * convention: 4083 * 0x10 - discovered 4084 * 0x11 - discovered and fall-through edge labelled 4085 * 0x12 - discovered and fall-through and branch edges labelled 4086 * 0x20 - explored 4087 */ 4088 4089 enum { 4090 DISCOVERED = 0x10, 4091 EXPLORED = 0x20, 4092 FALLTHROUGH = 1, 4093 BRANCH = 2, 4094 }; 4095 4096 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) 4097 4098 static int *insn_stack; /* stack of insns to process */ 4099 static int cur_stack; /* current stack index */ 4100 static int *insn_state; 4101 4102 /* t, w, e - match pseudo-code above: 4103 * t - index of current instruction 4104 * w - next instruction 4105 * e - edge 4106 */ 4107 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 4108 { 4109 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 4110 return 0; 4111 4112 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 4113 return 0; 4114 4115 if (w < 0 || w >= env->prog->len) { 4116 verbose(env, "jump out of range from insn %d to %d\n", t, w); 4117 return -EINVAL; 4118 } 4119 4120 if (e == BRANCH) 4121 /* mark branch target for state pruning */ 4122 env->explored_states[w] = STATE_LIST_MARK; 4123 4124 if (insn_state[w] == 0) { 4125 /* tree-edge */ 4126 insn_state[t] = DISCOVERED | e; 4127 insn_state[w] = DISCOVERED; 4128 if (cur_stack >= env->prog->len) 4129 return -E2BIG; 4130 insn_stack[cur_stack++] = w; 4131 return 1; 4132 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 4133 verbose(env, "back-edge from insn %d to %d\n", t, w); 4134 return -EINVAL; 4135 } else if (insn_state[w] == EXPLORED) { 4136 /* forward- or cross-edge */ 4137 insn_state[t] = DISCOVERED | e; 4138 } else { 4139 verbose(env, "insn state internal bug\n"); 4140 return -EFAULT; 4141 } 4142 return 0; 4143 } 4144 4145 /* non-recursive depth-first-search to detect loops in BPF program 4146 * loop == back-edge in directed graph 4147 */ 4148 static int check_cfg(struct bpf_verifier_env *env) 4149 { 4150 struct bpf_insn *insns = env->prog->insnsi; 4151 int insn_cnt = env->prog->len; 4152 int ret = 0; 4153 int i, t; 4154 4155 ret = check_subprogs(env); 4156 if (ret < 0) 4157 return ret; 4158 4159 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 4160 if (!insn_state) 4161 return -ENOMEM; 4162 4163 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 4164 if (!insn_stack) { 4165 kfree(insn_state); 4166 return -ENOMEM; 4167 } 4168 4169 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 4170 insn_stack[0] = 0; /* 0 is the first instruction */ 4171 cur_stack = 1; 4172 4173 peek_stack: 4174 if (cur_stack == 0) 4175 goto check_state; 4176 t = insn_stack[cur_stack - 1]; 4177 4178 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 4179 u8 opcode = BPF_OP(insns[t].code); 4180 4181 if (opcode == BPF_EXIT) { 4182 goto mark_explored; 4183 } else if (opcode == BPF_CALL) { 4184 ret = push_insn(t, t + 1, FALLTHROUGH, env); 4185 if (ret == 1) 4186 goto peek_stack; 4187 else if (ret < 0) 4188 goto err_free; 4189 if (t + 1 < insn_cnt) 4190 env->explored_states[t + 1] = STATE_LIST_MARK; 4191 if (insns[t].src_reg == BPF_PSEUDO_CALL) { 4192 env->explored_states[t] = STATE_LIST_MARK; 4193 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); 4194 if (ret == 1) 4195 goto peek_stack; 4196 else if (ret < 0) 4197 goto err_free; 4198 } 4199 } else if (opcode == BPF_JA) { 4200 if (BPF_SRC(insns[t].code) != BPF_K) { 4201 ret = -EINVAL; 4202 goto err_free; 4203 } 4204 /* unconditional jump with single edge */ 4205 ret = push_insn(t, t + insns[t].off + 1, 4206 FALLTHROUGH, env); 4207 if (ret == 1) 4208 goto peek_stack; 4209 else if (ret < 0) 4210 goto err_free; 4211 /* tell verifier to check for equivalent states 4212 * after every call and jump 4213 */ 4214 if (t + 1 < insn_cnt) 4215 env->explored_states[t + 1] = STATE_LIST_MARK; 4216 } else { 4217 /* conditional jump with two edges */ 4218 env->explored_states[t] = STATE_LIST_MARK; 4219 ret = push_insn(t, t + 1, FALLTHROUGH, env); 4220 if (ret == 1) 4221 goto peek_stack; 4222 else if (ret < 0) 4223 goto err_free; 4224 4225 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 4226 if (ret == 1) 4227 goto peek_stack; 4228 else if (ret < 0) 4229 goto err_free; 4230 } 4231 } else { 4232 /* all other non-branch instructions with single 4233 * fall-through edge 4234 */ 4235 ret = push_insn(t, t + 1, FALLTHROUGH, env); 4236 if (ret == 1) 4237 goto peek_stack; 4238 else if (ret < 0) 4239 goto err_free; 4240 } 4241 4242 mark_explored: 4243 insn_state[t] = EXPLORED; 4244 if (cur_stack-- <= 0) { 4245 verbose(env, "pop stack internal bug\n"); 4246 ret = -EFAULT; 4247 goto err_free; 4248 } 4249 goto peek_stack; 4250 4251 check_state: 4252 for (i = 0; i < insn_cnt; i++) { 4253 if (insn_state[i] != EXPLORED) { 4254 verbose(env, "unreachable insn %d\n", i); 4255 ret = -EINVAL; 4256 goto err_free; 4257 } 4258 } 4259 ret = 0; /* cfg looks good */ 4260 4261 err_free: 4262 kfree(insn_state); 4263 kfree(insn_stack); 4264 return ret; 4265 } 4266 4267 /* check %cur's range satisfies %old's */ 4268 static bool range_within(struct bpf_reg_state *old, 4269 struct bpf_reg_state *cur) 4270 { 4271 return old->umin_value <= cur->umin_value && 4272 old->umax_value >= cur->umax_value && 4273 old->smin_value <= cur->smin_value && 4274 old->smax_value >= cur->smax_value; 4275 } 4276 4277 /* Maximum number of register states that can exist at once */ 4278 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 4279 struct idpair { 4280 u32 old; 4281 u32 cur; 4282 }; 4283 4284 /* If in the old state two registers had the same id, then they need to have 4285 * the same id in the new state as well. But that id could be different from 4286 * the old state, so we need to track the mapping from old to new ids. 4287 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 4288 * regs with old id 5 must also have new id 9 for the new state to be safe. But 4289 * regs with a different old id could still have new id 9, we don't care about 4290 * that. 4291 * So we look through our idmap to see if this old id has been seen before. If 4292 * so, we require the new id to match; otherwise, we add the id pair to the map. 4293 */ 4294 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 4295 { 4296 unsigned int i; 4297 4298 for (i = 0; i < ID_MAP_SIZE; i++) { 4299 if (!idmap[i].old) { 4300 /* Reached an empty slot; haven't seen this id before */ 4301 idmap[i].old = old_id; 4302 idmap[i].cur = cur_id; 4303 return true; 4304 } 4305 if (idmap[i].old == old_id) 4306 return idmap[i].cur == cur_id; 4307 } 4308 /* We ran out of idmap slots, which should be impossible */ 4309 WARN_ON_ONCE(1); 4310 return false; 4311 } 4312 4313 /* Returns true if (rold safe implies rcur safe) */ 4314 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 4315 struct idpair *idmap) 4316 { 4317 bool equal; 4318 4319 if (!(rold->live & REG_LIVE_READ)) 4320 /* explored state didn't use this */ 4321 return true; 4322 4323 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0; 4324 4325 if (rold->type == PTR_TO_STACK) 4326 /* two stack pointers are equal only if they're pointing to 4327 * the same stack frame, since fp-8 in foo != fp-8 in bar 4328 */ 4329 return equal && rold->frameno == rcur->frameno; 4330 4331 if (equal) 4332 return true; 4333 4334 if (rold->type == NOT_INIT) 4335 /* explored state can't have used this */ 4336 return true; 4337 if (rcur->type == NOT_INIT) 4338 return false; 4339 switch (rold->type) { 4340 case SCALAR_VALUE: 4341 if (rcur->type == SCALAR_VALUE) { 4342 /* new val must satisfy old val knowledge */ 4343 return range_within(rold, rcur) && 4344 tnum_in(rold->var_off, rcur->var_off); 4345 } else { 4346 /* We're trying to use a pointer in place of a scalar. 4347 * Even if the scalar was unbounded, this could lead to 4348 * pointer leaks because scalars are allowed to leak 4349 * while pointers are not. We could make this safe in 4350 * special cases if root is calling us, but it's 4351 * probably not worth the hassle. 4352 */ 4353 return false; 4354 } 4355 case PTR_TO_MAP_VALUE: 4356 /* If the new min/max/var_off satisfy the old ones and 4357 * everything else matches, we are OK. 4358 * We don't care about the 'id' value, because nothing 4359 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) 4360 */ 4361 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 4362 range_within(rold, rcur) && 4363 tnum_in(rold->var_off, rcur->var_off); 4364 case PTR_TO_MAP_VALUE_OR_NULL: 4365 /* a PTR_TO_MAP_VALUE could be safe to use as a 4366 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 4367 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 4368 * checked, doing so could have affected others with the same 4369 * id, and we can't check for that because we lost the id when 4370 * we converted to a PTR_TO_MAP_VALUE. 4371 */ 4372 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 4373 return false; 4374 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 4375 return false; 4376 /* Check our ids match any regs they're supposed to */ 4377 return check_ids(rold->id, rcur->id, idmap); 4378 case PTR_TO_PACKET_META: 4379 case PTR_TO_PACKET: 4380 if (rcur->type != rold->type) 4381 return false; 4382 /* We must have at least as much range as the old ptr 4383 * did, so that any accesses which were safe before are 4384 * still safe. This is true even if old range < old off, 4385 * since someone could have accessed through (ptr - k), or 4386 * even done ptr -= k in a register, to get a safe access. 4387 */ 4388 if (rold->range > rcur->range) 4389 return false; 4390 /* If the offsets don't match, we can't trust our alignment; 4391 * nor can we be sure that we won't fall out of range. 4392 */ 4393 if (rold->off != rcur->off) 4394 return false; 4395 /* id relations must be preserved */ 4396 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 4397 return false; 4398 /* new val must satisfy old val knowledge */ 4399 return range_within(rold, rcur) && 4400 tnum_in(rold->var_off, rcur->var_off); 4401 case PTR_TO_CTX: 4402 case CONST_PTR_TO_MAP: 4403 case PTR_TO_PACKET_END: 4404 /* Only valid matches are exact, which memcmp() above 4405 * would have accepted 4406 */ 4407 default: 4408 /* Don't know what's going on, just say it's not safe */ 4409 return false; 4410 } 4411 4412 /* Shouldn't get here; if we do, say it's not safe */ 4413 WARN_ON_ONCE(1); 4414 return false; 4415 } 4416 4417 static bool stacksafe(struct bpf_func_state *old, 4418 struct bpf_func_state *cur, 4419 struct idpair *idmap) 4420 { 4421 int i, spi; 4422 4423 /* if explored stack has more populated slots than current stack 4424 * such stacks are not equivalent 4425 */ 4426 if (old->allocated_stack > cur->allocated_stack) 4427 return false; 4428 4429 /* walk slots of the explored stack and ignore any additional 4430 * slots in the current stack, since explored(safe) state 4431 * didn't use them 4432 */ 4433 for (i = 0; i < old->allocated_stack; i++) { 4434 spi = i / BPF_REG_SIZE; 4435 4436 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) 4437 /* explored state didn't use this */ 4438 continue; 4439 4440 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 4441 continue; 4442 /* if old state was safe with misc data in the stack 4443 * it will be safe with zero-initialized stack. 4444 * The opposite is not true 4445 */ 4446 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 4447 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 4448 continue; 4449 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 4450 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 4451 /* Ex: old explored (safe) state has STACK_SPILL in 4452 * this stack slot, but current has has STACK_MISC -> 4453 * this verifier states are not equivalent, 4454 * return false to continue verification of this path 4455 */ 4456 return false; 4457 if (i % BPF_REG_SIZE) 4458 continue; 4459 if (old->stack[spi].slot_type[0] != STACK_SPILL) 4460 continue; 4461 if (!regsafe(&old->stack[spi].spilled_ptr, 4462 &cur->stack[spi].spilled_ptr, 4463 idmap)) 4464 /* when explored and current stack slot are both storing 4465 * spilled registers, check that stored pointers types 4466 * are the same as well. 4467 * Ex: explored safe path could have stored 4468 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 4469 * but current path has stored: 4470 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 4471 * such verifier states are not equivalent. 4472 * return false to continue verification of this path 4473 */ 4474 return false; 4475 } 4476 return true; 4477 } 4478 4479 /* compare two verifier states 4480 * 4481 * all states stored in state_list are known to be valid, since 4482 * verifier reached 'bpf_exit' instruction through them 4483 * 4484 * this function is called when verifier exploring different branches of 4485 * execution popped from the state stack. If it sees an old state that has 4486 * more strict register state and more strict stack state then this execution 4487 * branch doesn't need to be explored further, since verifier already 4488 * concluded that more strict state leads to valid finish. 4489 * 4490 * Therefore two states are equivalent if register state is more conservative 4491 * and explored stack state is more conservative than the current one. 4492 * Example: 4493 * explored current 4494 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 4495 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 4496 * 4497 * In other words if current stack state (one being explored) has more 4498 * valid slots than old one that already passed validation, it means 4499 * the verifier can stop exploring and conclude that current state is valid too 4500 * 4501 * Similarly with registers. If explored state has register type as invalid 4502 * whereas register type in current state is meaningful, it means that 4503 * the current state will reach 'bpf_exit' instruction safely 4504 */ 4505 static bool func_states_equal(struct bpf_func_state *old, 4506 struct bpf_func_state *cur) 4507 { 4508 struct idpair *idmap; 4509 bool ret = false; 4510 int i; 4511 4512 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 4513 /* If we failed to allocate the idmap, just say it's not safe */ 4514 if (!idmap) 4515 return false; 4516 4517 for (i = 0; i < MAX_BPF_REG; i++) { 4518 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 4519 goto out_free; 4520 } 4521 4522 if (!stacksafe(old, cur, idmap)) 4523 goto out_free; 4524 ret = true; 4525 out_free: 4526 kfree(idmap); 4527 return ret; 4528 } 4529 4530 static bool states_equal(struct bpf_verifier_env *env, 4531 struct bpf_verifier_state *old, 4532 struct bpf_verifier_state *cur) 4533 { 4534 int i; 4535 4536 if (old->curframe != cur->curframe) 4537 return false; 4538 4539 /* for states to be equal callsites have to be the same 4540 * and all frame states need to be equivalent 4541 */ 4542 for (i = 0; i <= old->curframe; i++) { 4543 if (old->frame[i]->callsite != cur->frame[i]->callsite) 4544 return false; 4545 if (!func_states_equal(old->frame[i], cur->frame[i])) 4546 return false; 4547 } 4548 return true; 4549 } 4550 4551 /* A write screens off any subsequent reads; but write marks come from the 4552 * straight-line code between a state and its parent. When we arrive at an 4553 * equivalent state (jump target or such) we didn't arrive by the straight-line 4554 * code, so read marks in the state must propagate to the parent regardless 4555 * of the state's write marks. That's what 'parent == state->parent' comparison 4556 * in mark_reg_read() and mark_stack_slot_read() is for. 4557 */ 4558 static int propagate_liveness(struct bpf_verifier_env *env, 4559 const struct bpf_verifier_state *vstate, 4560 struct bpf_verifier_state *vparent) 4561 { 4562 int i, frame, err = 0; 4563 struct bpf_func_state *state, *parent; 4564 4565 if (vparent->curframe != vstate->curframe) { 4566 WARN(1, "propagate_live: parent frame %d current frame %d\n", 4567 vparent->curframe, vstate->curframe); 4568 return -EFAULT; 4569 } 4570 /* Propagate read liveness of registers... */ 4571 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 4572 /* We don't need to worry about FP liveness because it's read-only */ 4573 for (i = 0; i < BPF_REG_FP; i++) { 4574 if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) 4575 continue; 4576 if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { 4577 err = mark_reg_read(env, vstate, vparent, i); 4578 if (err) 4579 return err; 4580 } 4581 } 4582 4583 /* ... and stack slots */ 4584 for (frame = 0; frame <= vstate->curframe; frame++) { 4585 state = vstate->frame[frame]; 4586 parent = vparent->frame[frame]; 4587 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 4588 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 4589 if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) 4590 continue; 4591 if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) 4592 mark_stack_slot_read(env, vstate, vparent, i, frame); 4593 } 4594 } 4595 return err; 4596 } 4597 4598 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 4599 { 4600 struct bpf_verifier_state_list *new_sl; 4601 struct bpf_verifier_state_list *sl; 4602 struct bpf_verifier_state *cur = env->cur_state; 4603 int i, j, err; 4604 4605 sl = env->explored_states[insn_idx]; 4606 if (!sl) 4607 /* this 'insn_idx' instruction wasn't marked, so we will not 4608 * be doing state search here 4609 */ 4610 return 0; 4611 4612 while (sl != STATE_LIST_MARK) { 4613 if (states_equal(env, &sl->state, cur)) { 4614 /* reached equivalent register/stack state, 4615 * prune the search. 4616 * Registers read by the continuation are read by us. 4617 * If we have any write marks in env->cur_state, they 4618 * will prevent corresponding reads in the continuation 4619 * from reaching our parent (an explored_state). Our 4620 * own state will get the read marks recorded, but 4621 * they'll be immediately forgotten as we're pruning 4622 * this state and will pop a new one. 4623 */ 4624 err = propagate_liveness(env, &sl->state, cur); 4625 if (err) 4626 return err; 4627 return 1; 4628 } 4629 sl = sl->next; 4630 } 4631 4632 /* there were no equivalent states, remember current one. 4633 * technically the current state is not proven to be safe yet, 4634 * but it will either reach outer most bpf_exit (which means it's safe) 4635 * or it will be rejected. Since there are no loops, we won't be 4636 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 4637 * again on the way to bpf_exit 4638 */ 4639 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 4640 if (!new_sl) 4641 return -ENOMEM; 4642 4643 /* add new state to the head of linked list */ 4644 err = copy_verifier_state(&new_sl->state, cur); 4645 if (err) { 4646 free_verifier_state(&new_sl->state, false); 4647 kfree(new_sl); 4648 return err; 4649 } 4650 new_sl->next = env->explored_states[insn_idx]; 4651 env->explored_states[insn_idx] = new_sl; 4652 /* connect new state to parentage chain */ 4653 cur->parent = &new_sl->state; 4654 /* clear write marks in current state: the writes we did are not writes 4655 * our child did, so they don't screen off its reads from us. 4656 * (There are no read marks in current state, because reads always mark 4657 * their parent and current state never has children yet. Only 4658 * explored_states can get read marks.) 4659 */ 4660 for (i = 0; i < BPF_REG_FP; i++) 4661 cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE; 4662 4663 /* all stack frames are accessible from callee, clear them all */ 4664 for (j = 0; j <= cur->curframe; j++) { 4665 struct bpf_func_state *frame = cur->frame[j]; 4666 4667 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) 4668 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 4669 } 4670 return 0; 4671 } 4672 4673 static int do_check(struct bpf_verifier_env *env) 4674 { 4675 struct bpf_verifier_state *state; 4676 struct bpf_insn *insns = env->prog->insnsi; 4677 struct bpf_reg_state *regs; 4678 int insn_cnt = env->prog->len, i; 4679 int insn_idx, prev_insn_idx = 0; 4680 int insn_processed = 0; 4681 bool do_print_state = false; 4682 4683 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 4684 if (!state) 4685 return -ENOMEM; 4686 state->curframe = 0; 4687 state->parent = NULL; 4688 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 4689 if (!state->frame[0]) { 4690 kfree(state); 4691 return -ENOMEM; 4692 } 4693 env->cur_state = state; 4694 init_func_state(env, state->frame[0], 4695 BPF_MAIN_FUNC /* callsite */, 4696 0 /* frameno */, 4697 0 /* subprogno, zero == main subprog */); 4698 insn_idx = 0; 4699 for (;;) { 4700 struct bpf_insn *insn; 4701 u8 class; 4702 int err; 4703 4704 if (insn_idx >= insn_cnt) { 4705 verbose(env, "invalid insn idx %d insn_cnt %d\n", 4706 insn_idx, insn_cnt); 4707 return -EFAULT; 4708 } 4709 4710 insn = &insns[insn_idx]; 4711 class = BPF_CLASS(insn->code); 4712 4713 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 4714 verbose(env, 4715 "BPF program is too large. Processed %d insn\n", 4716 insn_processed); 4717 return -E2BIG; 4718 } 4719 4720 err = is_state_visited(env, insn_idx); 4721 if (err < 0) 4722 return err; 4723 if (err == 1) { 4724 /* found equivalent state, can prune the search */ 4725 if (env->log.level) { 4726 if (do_print_state) 4727 verbose(env, "\nfrom %d to %d: safe\n", 4728 prev_insn_idx, insn_idx); 4729 else 4730 verbose(env, "%d: safe\n", insn_idx); 4731 } 4732 goto process_bpf_exit; 4733 } 4734 4735 if (need_resched()) 4736 cond_resched(); 4737 4738 if (env->log.level > 1 || (env->log.level && do_print_state)) { 4739 if (env->log.level > 1) 4740 verbose(env, "%d:", insn_idx); 4741 else 4742 verbose(env, "\nfrom %d to %d:", 4743 prev_insn_idx, insn_idx); 4744 print_verifier_state(env, state->frame[state->curframe]); 4745 do_print_state = false; 4746 } 4747 4748 if (env->log.level) { 4749 const struct bpf_insn_cbs cbs = { 4750 .cb_print = verbose, 4751 .private_data = env, 4752 }; 4753 4754 verbose(env, "%d: ", insn_idx); 4755 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 4756 } 4757 4758 if (bpf_prog_is_dev_bound(env->prog->aux)) { 4759 err = bpf_prog_offload_verify_insn(env, insn_idx, 4760 prev_insn_idx); 4761 if (err) 4762 return err; 4763 } 4764 4765 regs = cur_regs(env); 4766 env->insn_aux_data[insn_idx].seen = true; 4767 if (class == BPF_ALU || class == BPF_ALU64) { 4768 err = check_alu_op(env, insn); 4769 if (err) 4770 return err; 4771 4772 } else if (class == BPF_LDX) { 4773 enum bpf_reg_type *prev_src_type, src_reg_type; 4774 4775 /* check for reserved fields is already done */ 4776 4777 /* check src operand */ 4778 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4779 if (err) 4780 return err; 4781 4782 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 4783 if (err) 4784 return err; 4785 4786 src_reg_type = regs[insn->src_reg].type; 4787 4788 /* check that memory (src_reg + off) is readable, 4789 * the state of dst_reg will be updated by this func 4790 */ 4791 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, 4792 BPF_SIZE(insn->code), BPF_READ, 4793 insn->dst_reg, false); 4794 if (err) 4795 return err; 4796 4797 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; 4798 4799 if (*prev_src_type == NOT_INIT) { 4800 /* saw a valid insn 4801 * dst_reg = *(u32 *)(src_reg + off) 4802 * save type to validate intersecting paths 4803 */ 4804 *prev_src_type = src_reg_type; 4805 4806 } else if (src_reg_type != *prev_src_type && 4807 (src_reg_type == PTR_TO_CTX || 4808 *prev_src_type == PTR_TO_CTX)) { 4809 /* ABuser program is trying to use the same insn 4810 * dst_reg = *(u32*) (src_reg + off) 4811 * with different pointer types: 4812 * src_reg == ctx in one branch and 4813 * src_reg == stack|map in some other branch. 4814 * Reject it. 4815 */ 4816 verbose(env, "same insn cannot be used with different pointers\n"); 4817 return -EINVAL; 4818 } 4819 4820 } else if (class == BPF_STX) { 4821 enum bpf_reg_type *prev_dst_type, dst_reg_type; 4822 4823 if (BPF_MODE(insn->code) == BPF_XADD) { 4824 err = check_xadd(env, insn_idx, insn); 4825 if (err) 4826 return err; 4827 insn_idx++; 4828 continue; 4829 } 4830 4831 /* check src1 operand */ 4832 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4833 if (err) 4834 return err; 4835 /* check src2 operand */ 4836 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4837 if (err) 4838 return err; 4839 4840 dst_reg_type = regs[insn->dst_reg].type; 4841 4842 /* check that memory (dst_reg + off) is writeable */ 4843 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4844 BPF_SIZE(insn->code), BPF_WRITE, 4845 insn->src_reg, false); 4846 if (err) 4847 return err; 4848 4849 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; 4850 4851 if (*prev_dst_type == NOT_INIT) { 4852 *prev_dst_type = dst_reg_type; 4853 } else if (dst_reg_type != *prev_dst_type && 4854 (dst_reg_type == PTR_TO_CTX || 4855 *prev_dst_type == PTR_TO_CTX)) { 4856 verbose(env, "same insn cannot be used with different pointers\n"); 4857 return -EINVAL; 4858 } 4859 4860 } else if (class == BPF_ST) { 4861 if (BPF_MODE(insn->code) != BPF_MEM || 4862 insn->src_reg != BPF_REG_0) { 4863 verbose(env, "BPF_ST uses reserved fields\n"); 4864 return -EINVAL; 4865 } 4866 /* check src operand */ 4867 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4868 if (err) 4869 return err; 4870 4871 if (is_ctx_reg(env, insn->dst_reg)) { 4872 verbose(env, "BPF_ST stores into R%d context is not allowed\n", 4873 insn->dst_reg); 4874 return -EACCES; 4875 } 4876 4877 /* check that memory (dst_reg + off) is writeable */ 4878 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4879 BPF_SIZE(insn->code), BPF_WRITE, 4880 -1, false); 4881 if (err) 4882 return err; 4883 4884 } else if (class == BPF_JMP) { 4885 u8 opcode = BPF_OP(insn->code); 4886 4887 if (opcode == BPF_CALL) { 4888 if (BPF_SRC(insn->code) != BPF_K || 4889 insn->off != 0 || 4890 (insn->src_reg != BPF_REG_0 && 4891 insn->src_reg != BPF_PSEUDO_CALL) || 4892 insn->dst_reg != BPF_REG_0) { 4893 verbose(env, "BPF_CALL uses reserved fields\n"); 4894 return -EINVAL; 4895 } 4896 4897 if (insn->src_reg == BPF_PSEUDO_CALL) 4898 err = check_func_call(env, insn, &insn_idx); 4899 else 4900 err = check_helper_call(env, insn->imm, insn_idx); 4901 if (err) 4902 return err; 4903 4904 } else if (opcode == BPF_JA) { 4905 if (BPF_SRC(insn->code) != BPF_K || 4906 insn->imm != 0 || 4907 insn->src_reg != BPF_REG_0 || 4908 insn->dst_reg != BPF_REG_0) { 4909 verbose(env, "BPF_JA uses reserved fields\n"); 4910 return -EINVAL; 4911 } 4912 4913 insn_idx += insn->off + 1; 4914 continue; 4915 4916 } else if (opcode == BPF_EXIT) { 4917 if (BPF_SRC(insn->code) != BPF_K || 4918 insn->imm != 0 || 4919 insn->src_reg != BPF_REG_0 || 4920 insn->dst_reg != BPF_REG_0) { 4921 verbose(env, "BPF_EXIT uses reserved fields\n"); 4922 return -EINVAL; 4923 } 4924 4925 if (state->curframe) { 4926 /* exit from nested function */ 4927 prev_insn_idx = insn_idx; 4928 err = prepare_func_exit(env, &insn_idx); 4929 if (err) 4930 return err; 4931 do_print_state = true; 4932 continue; 4933 } 4934 4935 /* eBPF calling convetion is such that R0 is used 4936 * to return the value from eBPF program. 4937 * Make sure that it's readable at this time 4938 * of bpf_exit, which means that program wrote 4939 * something into it earlier 4940 */ 4941 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 4942 if (err) 4943 return err; 4944 4945 if (is_pointer_value(env, BPF_REG_0)) { 4946 verbose(env, "R0 leaks addr as return value\n"); 4947 return -EACCES; 4948 } 4949 4950 err = check_return_code(env); 4951 if (err) 4952 return err; 4953 process_bpf_exit: 4954 err = pop_stack(env, &prev_insn_idx, &insn_idx); 4955 if (err < 0) { 4956 if (err != -ENOENT) 4957 return err; 4958 break; 4959 } else { 4960 do_print_state = true; 4961 continue; 4962 } 4963 } else { 4964 err = check_cond_jmp_op(env, insn, &insn_idx); 4965 if (err) 4966 return err; 4967 } 4968 } else if (class == BPF_LD) { 4969 u8 mode = BPF_MODE(insn->code); 4970 4971 if (mode == BPF_ABS || mode == BPF_IND) { 4972 err = check_ld_abs(env, insn); 4973 if (err) 4974 return err; 4975 4976 } else if (mode == BPF_IMM) { 4977 err = check_ld_imm(env, insn); 4978 if (err) 4979 return err; 4980 4981 insn_idx++; 4982 env->insn_aux_data[insn_idx].seen = true; 4983 } else { 4984 verbose(env, "invalid BPF_LD mode\n"); 4985 return -EINVAL; 4986 } 4987 } else { 4988 verbose(env, "unknown insn class %d\n", class); 4989 return -EINVAL; 4990 } 4991 4992 insn_idx++; 4993 } 4994 4995 verbose(env, "processed %d insns (limit %d), stack depth ", 4996 insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); 4997 for (i = 0; i < env->subprog_cnt; i++) { 4998 u32 depth = env->subprog_info[i].stack_depth; 4999 5000 verbose(env, "%d", depth); 5001 if (i + 1 < env->subprog_cnt) 5002 verbose(env, "+"); 5003 } 5004 verbose(env, "\n"); 5005 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 5006 return 0; 5007 } 5008 5009 static int check_map_prealloc(struct bpf_map *map) 5010 { 5011 return (map->map_type != BPF_MAP_TYPE_HASH && 5012 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 5013 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 5014 !(map->map_flags & BPF_F_NO_PREALLOC); 5015 } 5016 5017 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 5018 struct bpf_map *map, 5019 struct bpf_prog *prog) 5020 5021 { 5022 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 5023 * preallocated hash maps, since doing memory allocation 5024 * in overflow_handler can crash depending on where nmi got 5025 * triggered. 5026 */ 5027 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 5028 if (!check_map_prealloc(map)) { 5029 verbose(env, "perf_event programs can only use preallocated hash map\n"); 5030 return -EINVAL; 5031 } 5032 if (map->inner_map_meta && 5033 !check_map_prealloc(map->inner_map_meta)) { 5034 verbose(env, "perf_event programs can only use preallocated inner hash map\n"); 5035 return -EINVAL; 5036 } 5037 } 5038 5039 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 5040 !bpf_offload_dev_match(prog, map)) { 5041 verbose(env, "offload device mismatch between prog and map\n"); 5042 return -EINVAL; 5043 } 5044 5045 return 0; 5046 } 5047 5048 /* look for pseudo eBPF instructions that access map FDs and 5049 * replace them with actual map pointers 5050 */ 5051 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 5052 { 5053 struct bpf_insn *insn = env->prog->insnsi; 5054 int insn_cnt = env->prog->len; 5055 int i, j, err; 5056 5057 err = bpf_prog_calc_tag(env->prog); 5058 if (err) 5059 return err; 5060 5061 for (i = 0; i < insn_cnt; i++, insn++) { 5062 if (BPF_CLASS(insn->code) == BPF_LDX && 5063 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 5064 verbose(env, "BPF_LDX uses reserved fields\n"); 5065 return -EINVAL; 5066 } 5067 5068 if (BPF_CLASS(insn->code) == BPF_STX && 5069 ((BPF_MODE(insn->code) != BPF_MEM && 5070 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 5071 verbose(env, "BPF_STX uses reserved fields\n"); 5072 return -EINVAL; 5073 } 5074 5075 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 5076 struct bpf_map *map; 5077 struct fd f; 5078 5079 if (i == insn_cnt - 1 || insn[1].code != 0 || 5080 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 5081 insn[1].off != 0) { 5082 verbose(env, "invalid bpf_ld_imm64 insn\n"); 5083 return -EINVAL; 5084 } 5085 5086 if (insn->src_reg == 0) 5087 /* valid generic load 64-bit imm */ 5088 goto next_insn; 5089 5090 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 5091 verbose(env, 5092 "unrecognized bpf_ld_imm64 insn\n"); 5093 return -EINVAL; 5094 } 5095 5096 f = fdget(insn->imm); 5097 map = __bpf_map_get(f); 5098 if (IS_ERR(map)) { 5099 verbose(env, "fd %d is not pointing to valid bpf_map\n", 5100 insn->imm); 5101 return PTR_ERR(map); 5102 } 5103 5104 err = check_map_prog_compatibility(env, map, env->prog); 5105 if (err) { 5106 fdput(f); 5107 return err; 5108 } 5109 5110 /* store map pointer inside BPF_LD_IMM64 instruction */ 5111 insn[0].imm = (u32) (unsigned long) map; 5112 insn[1].imm = ((u64) (unsigned long) map) >> 32; 5113 5114 /* check whether we recorded this map already */ 5115 for (j = 0; j < env->used_map_cnt; j++) 5116 if (env->used_maps[j] == map) { 5117 fdput(f); 5118 goto next_insn; 5119 } 5120 5121 if (env->used_map_cnt >= MAX_USED_MAPS) { 5122 fdput(f); 5123 return -E2BIG; 5124 } 5125 5126 /* hold the map. If the program is rejected by verifier, 5127 * the map will be released by release_maps() or it 5128 * will be used by the valid program until it's unloaded 5129 * and all maps are released in free_used_maps() 5130 */ 5131 map = bpf_map_inc(map, false); 5132 if (IS_ERR(map)) { 5133 fdput(f); 5134 return PTR_ERR(map); 5135 } 5136 env->used_maps[env->used_map_cnt++] = map; 5137 5138 fdput(f); 5139 next_insn: 5140 insn++; 5141 i++; 5142 continue; 5143 } 5144 5145 /* Basic sanity check before we invest more work here. */ 5146 if (!bpf_opcode_in_insntable(insn->code)) { 5147 verbose(env, "unknown opcode %02x\n", insn->code); 5148 return -EINVAL; 5149 } 5150 } 5151 5152 /* now all pseudo BPF_LD_IMM64 instructions load valid 5153 * 'struct bpf_map *' into a register instead of user map_fd. 5154 * These pointers will be used later by verifier to validate map access. 5155 */ 5156 return 0; 5157 } 5158 5159 /* drop refcnt of maps used by the rejected program */ 5160 static void release_maps(struct bpf_verifier_env *env) 5161 { 5162 int i; 5163 5164 for (i = 0; i < env->used_map_cnt; i++) 5165 bpf_map_put(env->used_maps[i]); 5166 } 5167 5168 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 5169 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 5170 { 5171 struct bpf_insn *insn = env->prog->insnsi; 5172 int insn_cnt = env->prog->len; 5173 int i; 5174 5175 for (i = 0; i < insn_cnt; i++, insn++) 5176 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 5177 insn->src_reg = 0; 5178 } 5179 5180 /* single env->prog->insni[off] instruction was replaced with the range 5181 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 5182 * [0, off) and [off, end) to new locations, so the patched range stays zero 5183 */ 5184 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, 5185 u32 off, u32 cnt) 5186 { 5187 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 5188 int i; 5189 5190 if (cnt == 1) 5191 return 0; 5192 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); 5193 if (!new_data) 5194 return -ENOMEM; 5195 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 5196 memcpy(new_data + off + cnt - 1, old_data + off, 5197 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 5198 for (i = off; i < off + cnt - 1; i++) 5199 new_data[i].seen = true; 5200 env->insn_aux_data = new_data; 5201 vfree(old_data); 5202 return 0; 5203 } 5204 5205 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 5206 { 5207 int i; 5208 5209 if (len == 1) 5210 return; 5211 /* NOTE: fake 'exit' subprog should be updated as well. */ 5212 for (i = 0; i <= env->subprog_cnt; i++) { 5213 if (env->subprog_info[i].start < off) 5214 continue; 5215 env->subprog_info[i].start += len - 1; 5216 } 5217 } 5218 5219 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 5220 const struct bpf_insn *patch, u32 len) 5221 { 5222 struct bpf_prog *new_prog; 5223 5224 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 5225 if (!new_prog) 5226 return NULL; 5227 if (adjust_insn_aux_data(env, new_prog->len, off, len)) 5228 return NULL; 5229 adjust_subprog_starts(env, off, len); 5230 return new_prog; 5231 } 5232 5233 /* The verifier does more data flow analysis than llvm and will not 5234 * explore branches that are dead at run time. Malicious programs can 5235 * have dead code too. Therefore replace all dead at-run-time code 5236 * with 'ja -1'. 5237 * 5238 * Just nops are not optimal, e.g. if they would sit at the end of the 5239 * program and through another bug we would manage to jump there, then 5240 * we'd execute beyond program memory otherwise. Returning exception 5241 * code also wouldn't work since we can have subprogs where the dead 5242 * code could be located. 5243 */ 5244 static void sanitize_dead_code(struct bpf_verifier_env *env) 5245 { 5246 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 5247 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 5248 struct bpf_insn *insn = env->prog->insnsi; 5249 const int insn_cnt = env->prog->len; 5250 int i; 5251 5252 for (i = 0; i < insn_cnt; i++) { 5253 if (aux_data[i].seen) 5254 continue; 5255 memcpy(insn + i, &trap, sizeof(trap)); 5256 } 5257 } 5258 5259 /* convert load instructions that access fields of 'struct __sk_buff' 5260 * into sequence of instructions that access fields of 'struct sk_buff' 5261 */ 5262 static int convert_ctx_accesses(struct bpf_verifier_env *env) 5263 { 5264 const struct bpf_verifier_ops *ops = env->ops; 5265 int i, cnt, size, ctx_field_size, delta = 0; 5266 const int insn_cnt = env->prog->len; 5267 struct bpf_insn insn_buf[16], *insn; 5268 struct bpf_prog *new_prog; 5269 enum bpf_access_type type; 5270 bool is_narrower_load; 5271 u32 target_size; 5272 5273 if (ops->gen_prologue) { 5274 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 5275 env->prog); 5276 if (cnt >= ARRAY_SIZE(insn_buf)) { 5277 verbose(env, "bpf verifier is misconfigured\n"); 5278 return -EINVAL; 5279 } else if (cnt) { 5280 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 5281 if (!new_prog) 5282 return -ENOMEM; 5283 5284 env->prog = new_prog; 5285 delta += cnt - 1; 5286 } 5287 } 5288 5289 if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux)) 5290 return 0; 5291 5292 insn = env->prog->insnsi + delta; 5293 5294 for (i = 0; i < insn_cnt; i++, insn++) { 5295 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 5296 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 5297 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 5298 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 5299 type = BPF_READ; 5300 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 5301 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 5302 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 5303 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 5304 type = BPF_WRITE; 5305 else 5306 continue; 5307 5308 if (type == BPF_WRITE && 5309 env->insn_aux_data[i + delta].sanitize_stack_off) { 5310 struct bpf_insn patch[] = { 5311 /* Sanitize suspicious stack slot with zero. 5312 * There are no memory dependencies for this store, 5313 * since it's only using frame pointer and immediate 5314 * constant of zero 5315 */ 5316 BPF_ST_MEM(BPF_DW, BPF_REG_FP, 5317 env->insn_aux_data[i + delta].sanitize_stack_off, 5318 0), 5319 /* the original STX instruction will immediately 5320 * overwrite the same stack slot with appropriate value 5321 */ 5322 *insn, 5323 }; 5324 5325 cnt = ARRAY_SIZE(patch); 5326 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 5327 if (!new_prog) 5328 return -ENOMEM; 5329 5330 delta += cnt - 1; 5331 env->prog = new_prog; 5332 insn = new_prog->insnsi + i + delta; 5333 continue; 5334 } 5335 5336 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 5337 continue; 5338 5339 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 5340 size = BPF_LDST_BYTES(insn); 5341 5342 /* If the read access is a narrower load of the field, 5343 * convert to a 4/8-byte load, to minimum program type specific 5344 * convert_ctx_access changes. If conversion is successful, 5345 * we will apply proper mask to the result. 5346 */ 5347 is_narrower_load = size < ctx_field_size; 5348 if (is_narrower_load) { 5349 u32 off = insn->off; 5350 u8 size_code; 5351 5352 if (type == BPF_WRITE) { 5353 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 5354 return -EINVAL; 5355 } 5356 5357 size_code = BPF_H; 5358 if (ctx_field_size == 4) 5359 size_code = BPF_W; 5360 else if (ctx_field_size == 8) 5361 size_code = BPF_DW; 5362 5363 insn->off = off & ~(ctx_field_size - 1); 5364 insn->code = BPF_LDX | BPF_MEM | size_code; 5365 } 5366 5367 target_size = 0; 5368 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, 5369 &target_size); 5370 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 5371 (ctx_field_size && !target_size)) { 5372 verbose(env, "bpf verifier is misconfigured\n"); 5373 return -EINVAL; 5374 } 5375 5376 if (is_narrower_load && size < target_size) { 5377 if (ctx_field_size <= 4) 5378 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 5379 (1 << size * 8) - 1); 5380 else 5381 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 5382 (1 << size * 8) - 1); 5383 } 5384 5385 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 5386 if (!new_prog) 5387 return -ENOMEM; 5388 5389 delta += cnt - 1; 5390 5391 /* keep walking new program and skip insns we just inserted */ 5392 env->prog = new_prog; 5393 insn = new_prog->insnsi + i + delta; 5394 } 5395 5396 return 0; 5397 } 5398 5399 static int jit_subprogs(struct bpf_verifier_env *env) 5400 { 5401 struct bpf_prog *prog = env->prog, **func, *tmp; 5402 int i, j, subprog_start, subprog_end = 0, len, subprog; 5403 struct bpf_insn *insn; 5404 void *old_bpf_func; 5405 int err = -ENOMEM; 5406 5407 if (env->subprog_cnt <= 1) 5408 return 0; 5409 5410 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5411 if (insn->code != (BPF_JMP | BPF_CALL) || 5412 insn->src_reg != BPF_PSEUDO_CALL) 5413 continue; 5414 subprog = find_subprog(env, i + insn->imm + 1); 5415 if (subprog < 0) { 5416 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 5417 i + insn->imm + 1); 5418 return -EFAULT; 5419 } 5420 /* temporarily remember subprog id inside insn instead of 5421 * aux_data, since next loop will split up all insns into funcs 5422 */ 5423 insn->off = subprog; 5424 /* remember original imm in case JIT fails and fallback 5425 * to interpreter will be needed 5426 */ 5427 env->insn_aux_data[i].call_imm = insn->imm; 5428 /* point imm to __bpf_call_base+1 from JITs point of view */ 5429 insn->imm = 1; 5430 } 5431 5432 func = kzalloc(sizeof(prog) * env->subprog_cnt, GFP_KERNEL); 5433 if (!func) 5434 return -ENOMEM; 5435 5436 for (i = 0; i < env->subprog_cnt; i++) { 5437 subprog_start = subprog_end; 5438 subprog_end = env->subprog_info[i + 1].start; 5439 5440 len = subprog_end - subprog_start; 5441 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); 5442 if (!func[i]) 5443 goto out_free; 5444 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 5445 len * sizeof(struct bpf_insn)); 5446 func[i]->type = prog->type; 5447 func[i]->len = len; 5448 if (bpf_prog_calc_tag(func[i])) 5449 goto out_free; 5450 func[i]->is_func = 1; 5451 /* Use bpf_prog_F_tag to indicate functions in stack traces. 5452 * Long term would need debug info to populate names 5453 */ 5454 func[i]->aux->name[0] = 'F'; 5455 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 5456 func[i]->jit_requested = 1; 5457 func[i] = bpf_int_jit_compile(func[i]); 5458 if (!func[i]->jited) { 5459 err = -ENOTSUPP; 5460 goto out_free; 5461 } 5462 cond_resched(); 5463 } 5464 /* at this point all bpf functions were successfully JITed 5465 * now populate all bpf_calls with correct addresses and 5466 * run last pass of JIT 5467 */ 5468 for (i = 0; i < env->subprog_cnt; i++) { 5469 insn = func[i]->insnsi; 5470 for (j = 0; j < func[i]->len; j++, insn++) { 5471 if (insn->code != (BPF_JMP | BPF_CALL) || 5472 insn->src_reg != BPF_PSEUDO_CALL) 5473 continue; 5474 subprog = insn->off; 5475 insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) 5476 func[subprog]->bpf_func - 5477 __bpf_call_base; 5478 } 5479 5480 /* we use the aux data to keep a list of the start addresses 5481 * of the JITed images for each function in the program 5482 * 5483 * for some architectures, such as powerpc64, the imm field 5484 * might not be large enough to hold the offset of the start 5485 * address of the callee's JITed image from __bpf_call_base 5486 * 5487 * in such cases, we can lookup the start address of a callee 5488 * by using its subprog id, available from the off field of 5489 * the call instruction, as an index for this list 5490 */ 5491 func[i]->aux->func = func; 5492 func[i]->aux->func_cnt = env->subprog_cnt; 5493 } 5494 for (i = 0; i < env->subprog_cnt; i++) { 5495 old_bpf_func = func[i]->bpf_func; 5496 tmp = bpf_int_jit_compile(func[i]); 5497 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 5498 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 5499 err = -EFAULT; 5500 goto out_free; 5501 } 5502 cond_resched(); 5503 } 5504 5505 /* finally lock prog and jit images for all functions and 5506 * populate kallsysm 5507 */ 5508 for (i = 0; i < env->subprog_cnt; i++) { 5509 bpf_prog_lock_ro(func[i]); 5510 bpf_prog_kallsyms_add(func[i]); 5511 } 5512 5513 /* Last step: make now unused interpreter insns from main 5514 * prog consistent for later dump requests, so they can 5515 * later look the same as if they were interpreted only. 5516 */ 5517 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5518 if (insn->code != (BPF_JMP | BPF_CALL) || 5519 insn->src_reg != BPF_PSEUDO_CALL) 5520 continue; 5521 insn->off = env->insn_aux_data[i].call_imm; 5522 subprog = find_subprog(env, i + insn->off + 1); 5523 insn->imm = subprog; 5524 } 5525 5526 prog->jited = 1; 5527 prog->bpf_func = func[0]->bpf_func; 5528 prog->aux->func = func; 5529 prog->aux->func_cnt = env->subprog_cnt; 5530 return 0; 5531 out_free: 5532 for (i = 0; i < env->subprog_cnt; i++) 5533 if (func[i]) 5534 bpf_jit_free(func[i]); 5535 kfree(func); 5536 /* cleanup main prog to be interpreted */ 5537 prog->jit_requested = 0; 5538 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5539 if (insn->code != (BPF_JMP | BPF_CALL) || 5540 insn->src_reg != BPF_PSEUDO_CALL) 5541 continue; 5542 insn->off = 0; 5543 insn->imm = env->insn_aux_data[i].call_imm; 5544 } 5545 return err; 5546 } 5547 5548 static int fixup_call_args(struct bpf_verifier_env *env) 5549 { 5550 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 5551 struct bpf_prog *prog = env->prog; 5552 struct bpf_insn *insn = prog->insnsi; 5553 int i, depth; 5554 #endif 5555 int err; 5556 5557 err = 0; 5558 if (env->prog->jit_requested) { 5559 err = jit_subprogs(env); 5560 if (err == 0) 5561 return 0; 5562 } 5563 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 5564 for (i = 0; i < prog->len; i++, insn++) { 5565 if (insn->code != (BPF_JMP | BPF_CALL) || 5566 insn->src_reg != BPF_PSEUDO_CALL) 5567 continue; 5568 depth = get_callee_stack_depth(env, insn, i); 5569 if (depth < 0) 5570 return depth; 5571 bpf_patch_call_args(insn, depth); 5572 } 5573 err = 0; 5574 #endif 5575 return err; 5576 } 5577 5578 /* fixup insn->imm field of bpf_call instructions 5579 * and inline eligible helpers as explicit sequence of BPF instructions 5580 * 5581 * this function is called after eBPF program passed verification 5582 */ 5583 static int fixup_bpf_calls(struct bpf_verifier_env *env) 5584 { 5585 struct bpf_prog *prog = env->prog; 5586 struct bpf_insn *insn = prog->insnsi; 5587 const struct bpf_func_proto *fn; 5588 const int insn_cnt = prog->len; 5589 struct bpf_insn_aux_data *aux; 5590 struct bpf_insn insn_buf[16]; 5591 struct bpf_prog *new_prog; 5592 struct bpf_map *map_ptr; 5593 int i, cnt, delta = 0; 5594 5595 for (i = 0; i < insn_cnt; i++, insn++) { 5596 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 5597 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 5598 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 5599 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 5600 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 5601 struct bpf_insn mask_and_div[] = { 5602 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 5603 /* Rx div 0 -> 0 */ 5604 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), 5605 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 5606 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 5607 *insn, 5608 }; 5609 struct bpf_insn mask_and_mod[] = { 5610 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 5611 /* Rx mod 0 -> Rx */ 5612 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), 5613 *insn, 5614 }; 5615 struct bpf_insn *patchlet; 5616 5617 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 5618 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 5619 patchlet = mask_and_div + (is64 ? 1 : 0); 5620 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); 5621 } else { 5622 patchlet = mask_and_mod + (is64 ? 1 : 0); 5623 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); 5624 } 5625 5626 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 5627 if (!new_prog) 5628 return -ENOMEM; 5629 5630 delta += cnt - 1; 5631 env->prog = prog = new_prog; 5632 insn = new_prog->insnsi + i + delta; 5633 continue; 5634 } 5635 5636 if (BPF_CLASS(insn->code) == BPF_LD && 5637 (BPF_MODE(insn->code) == BPF_ABS || 5638 BPF_MODE(insn->code) == BPF_IND)) { 5639 cnt = env->ops->gen_ld_abs(insn, insn_buf); 5640 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 5641 verbose(env, "bpf verifier is misconfigured\n"); 5642 return -EINVAL; 5643 } 5644 5645 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 5646 if (!new_prog) 5647 return -ENOMEM; 5648 5649 delta += cnt - 1; 5650 env->prog = prog = new_prog; 5651 insn = new_prog->insnsi + i + delta; 5652 continue; 5653 } 5654 5655 if (insn->code != (BPF_JMP | BPF_CALL)) 5656 continue; 5657 if (insn->src_reg == BPF_PSEUDO_CALL) 5658 continue; 5659 5660 if (insn->imm == BPF_FUNC_get_route_realm) 5661 prog->dst_needed = 1; 5662 if (insn->imm == BPF_FUNC_get_prandom_u32) 5663 bpf_user_rnd_init_once(); 5664 if (insn->imm == BPF_FUNC_override_return) 5665 prog->kprobe_override = 1; 5666 if (insn->imm == BPF_FUNC_tail_call) { 5667 /* If we tail call into other programs, we 5668 * cannot make any assumptions since they can 5669 * be replaced dynamically during runtime in 5670 * the program array. 5671 */ 5672 prog->cb_access = 1; 5673 env->prog->aux->stack_depth = MAX_BPF_STACK; 5674 5675 /* mark bpf_tail_call as different opcode to avoid 5676 * conditional branch in the interpeter for every normal 5677 * call and to prevent accidental JITing by JIT compiler 5678 * that doesn't support bpf_tail_call yet 5679 */ 5680 insn->imm = 0; 5681 insn->code = BPF_JMP | BPF_TAIL_CALL; 5682 5683 aux = &env->insn_aux_data[i + delta]; 5684 if (!bpf_map_ptr_unpriv(aux)) 5685 continue; 5686 5687 /* instead of changing every JIT dealing with tail_call 5688 * emit two extra insns: 5689 * if (index >= max_entries) goto out; 5690 * index &= array->index_mask; 5691 * to avoid out-of-bounds cpu speculation 5692 */ 5693 if (bpf_map_ptr_poisoned(aux)) { 5694 verbose(env, "tail_call abusing map_ptr\n"); 5695 return -EINVAL; 5696 } 5697 5698 map_ptr = BPF_MAP_PTR(aux->map_state); 5699 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 5700 map_ptr->max_entries, 2); 5701 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 5702 container_of(map_ptr, 5703 struct bpf_array, 5704 map)->index_mask); 5705 insn_buf[2] = *insn; 5706 cnt = 3; 5707 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 5708 if (!new_prog) 5709 return -ENOMEM; 5710 5711 delta += cnt - 1; 5712 env->prog = prog = new_prog; 5713 insn = new_prog->insnsi + i + delta; 5714 continue; 5715 } 5716 5717 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 5718 * handlers are currently limited to 64 bit only. 5719 */ 5720 if (prog->jit_requested && BITS_PER_LONG == 64 && 5721 insn->imm == BPF_FUNC_map_lookup_elem) { 5722 aux = &env->insn_aux_data[i + delta]; 5723 if (bpf_map_ptr_poisoned(aux)) 5724 goto patch_call_imm; 5725 5726 map_ptr = BPF_MAP_PTR(aux->map_state); 5727 if (!map_ptr->ops->map_gen_lookup) 5728 goto patch_call_imm; 5729 5730 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 5731 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 5732 verbose(env, "bpf verifier is misconfigured\n"); 5733 return -EINVAL; 5734 } 5735 5736 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 5737 cnt); 5738 if (!new_prog) 5739 return -ENOMEM; 5740 5741 delta += cnt - 1; 5742 5743 /* keep walking new program and skip insns we just inserted */ 5744 env->prog = prog = new_prog; 5745 insn = new_prog->insnsi + i + delta; 5746 continue; 5747 } 5748 5749 if (insn->imm == BPF_FUNC_redirect_map) { 5750 /* Note, we cannot use prog directly as imm as subsequent 5751 * rewrites would still change the prog pointer. The only 5752 * stable address we can use is aux, which also works with 5753 * prog clones during blinding. 5754 */ 5755 u64 addr = (unsigned long)prog->aux; 5756 struct bpf_insn r4_ld[] = { 5757 BPF_LD_IMM64(BPF_REG_4, addr), 5758 *insn, 5759 }; 5760 cnt = ARRAY_SIZE(r4_ld); 5761 5762 new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); 5763 if (!new_prog) 5764 return -ENOMEM; 5765 5766 delta += cnt - 1; 5767 env->prog = prog = new_prog; 5768 insn = new_prog->insnsi + i + delta; 5769 } 5770 patch_call_imm: 5771 fn = env->ops->get_func_proto(insn->imm, env->prog); 5772 /* all functions that have prototype and verifier allowed 5773 * programs to call them, must be real in-kernel functions 5774 */ 5775 if (!fn->func) { 5776 verbose(env, 5777 "kernel subsystem misconfigured func %s#%d\n", 5778 func_id_name(insn->imm), insn->imm); 5779 return -EFAULT; 5780 } 5781 insn->imm = fn->func - __bpf_call_base; 5782 } 5783 5784 return 0; 5785 } 5786 5787 static void free_states(struct bpf_verifier_env *env) 5788 { 5789 struct bpf_verifier_state_list *sl, *sln; 5790 int i; 5791 5792 if (!env->explored_states) 5793 return; 5794 5795 for (i = 0; i < env->prog->len; i++) { 5796 sl = env->explored_states[i]; 5797 5798 if (sl) 5799 while (sl != STATE_LIST_MARK) { 5800 sln = sl->next; 5801 free_verifier_state(&sl->state, false); 5802 kfree(sl); 5803 sl = sln; 5804 } 5805 } 5806 5807 kfree(env->explored_states); 5808 } 5809 5810 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 5811 { 5812 struct bpf_verifier_env *env; 5813 struct bpf_verifier_log *log; 5814 int ret = -EINVAL; 5815 5816 /* no program is valid */ 5817 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 5818 return -EINVAL; 5819 5820 /* 'struct bpf_verifier_env' can be global, but since it's not small, 5821 * allocate/free it every time bpf_check() is called 5822 */ 5823 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 5824 if (!env) 5825 return -ENOMEM; 5826 log = &env->log; 5827 5828 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 5829 (*prog)->len); 5830 ret = -ENOMEM; 5831 if (!env->insn_aux_data) 5832 goto err_free_env; 5833 env->prog = *prog; 5834 env->ops = bpf_verifier_ops[env->prog->type]; 5835 5836 /* grab the mutex to protect few globals used by verifier */ 5837 mutex_lock(&bpf_verifier_lock); 5838 5839 if (attr->log_level || attr->log_buf || attr->log_size) { 5840 /* user requested verbose verifier output 5841 * and supplied buffer to store the verification trace 5842 */ 5843 log->level = attr->log_level; 5844 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 5845 log->len_total = attr->log_size; 5846 5847 ret = -EINVAL; 5848 /* log attributes have to be sane */ 5849 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || 5850 !log->level || !log->ubuf) 5851 goto err_unlock; 5852 } 5853 5854 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 5855 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 5856 env->strict_alignment = true; 5857 5858 ret = replace_map_fd_with_map_ptr(env); 5859 if (ret < 0) 5860 goto skip_full_check; 5861 5862 if (bpf_prog_is_dev_bound(env->prog->aux)) { 5863 ret = bpf_prog_offload_verifier_prep(env); 5864 if (ret) 5865 goto skip_full_check; 5866 } 5867 5868 env->explored_states = kcalloc(env->prog->len, 5869 sizeof(struct bpf_verifier_state_list *), 5870 GFP_USER); 5871 ret = -ENOMEM; 5872 if (!env->explored_states) 5873 goto skip_full_check; 5874 5875 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 5876 5877 ret = check_cfg(env); 5878 if (ret < 0) 5879 goto skip_full_check; 5880 5881 ret = do_check(env); 5882 if (env->cur_state) { 5883 free_verifier_state(env->cur_state, true); 5884 env->cur_state = NULL; 5885 } 5886 5887 skip_full_check: 5888 while (!pop_stack(env, NULL, NULL)); 5889 free_states(env); 5890 5891 if (ret == 0) 5892 sanitize_dead_code(env); 5893 5894 if (ret == 0) 5895 ret = check_max_stack_depth(env); 5896 5897 if (ret == 0) 5898 /* program is valid, convert *(u32*)(ctx + off) accesses */ 5899 ret = convert_ctx_accesses(env); 5900 5901 if (ret == 0) 5902 ret = fixup_bpf_calls(env); 5903 5904 if (ret == 0) 5905 ret = fixup_call_args(env); 5906 5907 if (log->level && bpf_verifier_log_full(log)) 5908 ret = -ENOSPC; 5909 if (log->level && !log->ubuf) { 5910 ret = -EFAULT; 5911 goto err_release_maps; 5912 } 5913 5914 if (ret == 0 && env->used_map_cnt) { 5915 /* if program passed verifier, update used_maps in bpf_prog_info */ 5916 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 5917 sizeof(env->used_maps[0]), 5918 GFP_KERNEL); 5919 5920 if (!env->prog->aux->used_maps) { 5921 ret = -ENOMEM; 5922 goto err_release_maps; 5923 } 5924 5925 memcpy(env->prog->aux->used_maps, env->used_maps, 5926 sizeof(env->used_maps[0]) * env->used_map_cnt); 5927 env->prog->aux->used_map_cnt = env->used_map_cnt; 5928 5929 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 5930 * bpf_ld_imm64 instructions 5931 */ 5932 convert_pseudo_ld_imm64(env); 5933 } 5934 5935 err_release_maps: 5936 if (!env->prog->aux->used_maps) 5937 /* if we didn't copy map pointers into bpf_prog_info, release 5938 * them now. Otherwise free_used_maps() will release them. 5939 */ 5940 release_maps(env); 5941 *prog = env->prog; 5942 err_unlock: 5943 mutex_unlock(&bpf_verifier_lock); 5944 vfree(env->insn_aux_data); 5945 err_free_env: 5946 kfree(env); 5947 return ret; 5948 } 5949