1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/filter.h> 19 #include <net/netlink.h> 20 #include <linux/file.h> 21 #include <linux/vmalloc.h> 22 #include <linux/stringify.h> 23 #include <linux/bsearch.h> 24 #include <linux/sort.h> 25 26 #include "disasm.h" 27 28 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 29 #define BPF_PROG_TYPE(_id, _name) \ 30 [_id] = & _name ## _verifier_ops, 31 #define BPF_MAP_TYPE(_id, _ops) 32 #include <linux/bpf_types.h> 33 #undef BPF_PROG_TYPE 34 #undef BPF_MAP_TYPE 35 }; 36 37 /* bpf_check() is a static code analyzer that walks eBPF program 38 * instruction by instruction and updates register/stack state. 39 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 40 * 41 * The first pass is depth-first-search to check that the program is a DAG. 42 * It rejects the following programs: 43 * - larger than BPF_MAXINSNS insns 44 * - if loop is present (detected via back-edge) 45 * - unreachable insns exist (shouldn't be a forest. program = one function) 46 * - out of bounds or malformed jumps 47 * The second pass is all possible path descent from the 1st insn. 48 * Since it's analyzing all pathes through the program, the length of the 49 * analysis is limited to 64k insn, which may be hit even if total number of 50 * insn is less then 4K, but there are too many branches that change stack/regs. 51 * Number of 'branches to be analyzed' is limited to 1k 52 * 53 * On entry to each instruction, each register has a type, and the instruction 54 * changes the types of the registers depending on instruction semantics. 55 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 56 * copied to R1. 57 * 58 * All registers are 64-bit. 59 * R0 - return register 60 * R1-R5 argument passing registers 61 * R6-R9 callee saved registers 62 * R10 - frame pointer read-only 63 * 64 * At the start of BPF program the register R1 contains a pointer to bpf_context 65 * and has type PTR_TO_CTX. 66 * 67 * Verifier tracks arithmetic operations on pointers in case: 68 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 69 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 70 * 1st insn copies R10 (which has FRAME_PTR) type into R1 71 * and 2nd arithmetic instruction is pattern matched to recognize 72 * that it wants to construct a pointer to some element within stack. 73 * So after 2nd insn, the register R1 has type PTR_TO_STACK 74 * (and -20 constant is saved for further stack bounds checking). 75 * Meaning that this reg is a pointer to stack plus known immediate constant. 76 * 77 * Most of the time the registers have SCALAR_VALUE type, which 78 * means the register has some value, but it's not a valid pointer. 79 * (like pointer plus pointer becomes SCALAR_VALUE type) 80 * 81 * When verifier sees load or store instructions the type of base register 82 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer 83 * types recognized by check_mem_access() function. 84 * 85 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 86 * and the range of [ptr, ptr + map's value_size) is accessible. 87 * 88 * registers used to pass values to function calls are checked against 89 * function argument constraints. 90 * 91 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 92 * It means that the register type passed to this function must be 93 * PTR_TO_STACK and it will be used inside the function as 94 * 'pointer to map element key' 95 * 96 * For example the argument constraints for bpf_map_lookup_elem(): 97 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 98 * .arg1_type = ARG_CONST_MAP_PTR, 99 * .arg2_type = ARG_PTR_TO_MAP_KEY, 100 * 101 * ret_type says that this function returns 'pointer to map elem value or null' 102 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 103 * 2nd argument should be a pointer to stack, which will be used inside 104 * the helper function as a pointer to map element key. 105 * 106 * On the kernel side the helper function looks like: 107 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 108 * { 109 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 110 * void *key = (void *) (unsigned long) r2; 111 * void *value; 112 * 113 * here kernel can access 'key' and 'map' pointers safely, knowing that 114 * [key, key + map->key_size) bytes are valid and were initialized on 115 * the stack of eBPF program. 116 * } 117 * 118 * Corresponding eBPF program may look like: 119 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 120 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 121 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 122 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 123 * here verifier looks at prototype of map_lookup_elem() and sees: 124 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 125 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 126 * 127 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 128 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 129 * and were initialized prior to this call. 130 * If it's ok, then verifier allows this BPF_CALL insn and looks at 131 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 132 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 133 * returns ether pointer to map value or NULL. 134 * 135 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 136 * insn, the register holding that pointer in the true branch changes state to 137 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 138 * branch. See check_cond_jmp_op(). 139 * 140 * After the call R0 is set to return type of the function and registers R1-R5 141 * are set to NOT_INIT to indicate that they are no longer readable. 142 */ 143 144 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 145 struct bpf_verifier_stack_elem { 146 /* verifer state is 'st' 147 * before processing instruction 'insn_idx' 148 * and after processing instruction 'prev_insn_idx' 149 */ 150 struct bpf_verifier_state st; 151 int insn_idx; 152 int prev_insn_idx; 153 struct bpf_verifier_stack_elem *next; 154 }; 155 156 #define BPF_COMPLEXITY_LIMIT_INSNS 131072 157 #define BPF_COMPLEXITY_LIMIT_STACK 1024 158 159 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 160 161 struct bpf_call_arg_meta { 162 struct bpf_map *map_ptr; 163 bool raw_mode; 164 bool pkt_access; 165 int regno; 166 int access_size; 167 }; 168 169 static DEFINE_MUTEX(bpf_verifier_lock); 170 171 /* log_level controls verbosity level of eBPF verifier. 172 * bpf_verifier_log_write() is used to dump the verification trace to the log, 173 * so the user can figure out what's wrong with the program 174 */ 175 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 176 const char *fmt, ...) 177 { 178 struct bpf_verifer_log *log = &env->log; 179 unsigned int n; 180 va_list args; 181 182 if (!log->level || !log->ubuf || bpf_verifier_log_full(log)) 183 return; 184 185 va_start(args, fmt); 186 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 187 va_end(args); 188 189 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 190 "verifier log line truncated - local buffer too short\n"); 191 192 n = min(log->len_total - log->len_used - 1, n); 193 log->kbuf[n] = '\0'; 194 195 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 196 log->len_used += n; 197 else 198 log->ubuf = NULL; 199 } 200 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 201 /* Historically bpf_verifier_log_write was called verbose, but the name was too 202 * generic for symbol export. The function was renamed, but not the calls in 203 * the verifier to avoid complicating backports. Hence the alias below. 204 */ 205 static __printf(2, 3) void verbose(struct bpf_verifier_env *env, 206 const char *fmt, ...) 207 __attribute__((alias("bpf_verifier_log_write"))); 208 209 static bool type_is_pkt_pointer(enum bpf_reg_type type) 210 { 211 return type == PTR_TO_PACKET || 212 type == PTR_TO_PACKET_META; 213 } 214 215 /* string representation of 'enum bpf_reg_type' */ 216 static const char * const reg_type_str[] = { 217 [NOT_INIT] = "?", 218 [SCALAR_VALUE] = "inv", 219 [PTR_TO_CTX] = "ctx", 220 [CONST_PTR_TO_MAP] = "map_ptr", 221 [PTR_TO_MAP_VALUE] = "map_value", 222 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 223 [PTR_TO_STACK] = "fp", 224 [PTR_TO_PACKET] = "pkt", 225 [PTR_TO_PACKET_META] = "pkt_meta", 226 [PTR_TO_PACKET_END] = "pkt_end", 227 }; 228 229 static void print_liveness(struct bpf_verifier_env *env, 230 enum bpf_reg_liveness live) 231 { 232 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN)) 233 verbose(env, "_"); 234 if (live & REG_LIVE_READ) 235 verbose(env, "r"); 236 if (live & REG_LIVE_WRITTEN) 237 verbose(env, "w"); 238 } 239 240 static struct bpf_func_state *func(struct bpf_verifier_env *env, 241 const struct bpf_reg_state *reg) 242 { 243 struct bpf_verifier_state *cur = env->cur_state; 244 245 return cur->frame[reg->frameno]; 246 } 247 248 static void print_verifier_state(struct bpf_verifier_env *env, 249 const struct bpf_func_state *state) 250 { 251 const struct bpf_reg_state *reg; 252 enum bpf_reg_type t; 253 int i; 254 255 if (state->frameno) 256 verbose(env, " frame%d:", state->frameno); 257 for (i = 0; i < MAX_BPF_REG; i++) { 258 reg = &state->regs[i]; 259 t = reg->type; 260 if (t == NOT_INIT) 261 continue; 262 verbose(env, " R%d", i); 263 print_liveness(env, reg->live); 264 verbose(env, "=%s", reg_type_str[t]); 265 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 266 tnum_is_const(reg->var_off)) { 267 /* reg->off should be 0 for SCALAR_VALUE */ 268 verbose(env, "%lld", reg->var_off.value + reg->off); 269 if (t == PTR_TO_STACK) 270 verbose(env, ",call_%d", func(env, reg)->callsite); 271 } else { 272 verbose(env, "(id=%d", reg->id); 273 if (t != SCALAR_VALUE) 274 verbose(env, ",off=%d", reg->off); 275 if (type_is_pkt_pointer(t)) 276 verbose(env, ",r=%d", reg->range); 277 else if (t == CONST_PTR_TO_MAP || 278 t == PTR_TO_MAP_VALUE || 279 t == PTR_TO_MAP_VALUE_OR_NULL) 280 verbose(env, ",ks=%d,vs=%d", 281 reg->map_ptr->key_size, 282 reg->map_ptr->value_size); 283 if (tnum_is_const(reg->var_off)) { 284 /* Typically an immediate SCALAR_VALUE, but 285 * could be a pointer whose offset is too big 286 * for reg->off 287 */ 288 verbose(env, ",imm=%llx", reg->var_off.value); 289 } else { 290 if (reg->smin_value != reg->umin_value && 291 reg->smin_value != S64_MIN) 292 verbose(env, ",smin_value=%lld", 293 (long long)reg->smin_value); 294 if (reg->smax_value != reg->umax_value && 295 reg->smax_value != S64_MAX) 296 verbose(env, ",smax_value=%lld", 297 (long long)reg->smax_value); 298 if (reg->umin_value != 0) 299 verbose(env, ",umin_value=%llu", 300 (unsigned long long)reg->umin_value); 301 if (reg->umax_value != U64_MAX) 302 verbose(env, ",umax_value=%llu", 303 (unsigned long long)reg->umax_value); 304 if (!tnum_is_unknown(reg->var_off)) { 305 char tn_buf[48]; 306 307 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 308 verbose(env, ",var_off=%s", tn_buf); 309 } 310 } 311 verbose(env, ")"); 312 } 313 } 314 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 315 if (state->stack[i].slot_type[0] == STACK_SPILL) { 316 verbose(env, " fp%d", 317 (-i - 1) * BPF_REG_SIZE); 318 print_liveness(env, state->stack[i].spilled_ptr.live); 319 verbose(env, "=%s", 320 reg_type_str[state->stack[i].spilled_ptr.type]); 321 } 322 if (state->stack[i].slot_type[0] == STACK_ZERO) 323 verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE); 324 } 325 verbose(env, "\n"); 326 } 327 328 static int copy_stack_state(struct bpf_func_state *dst, 329 const struct bpf_func_state *src) 330 { 331 if (!src->stack) 332 return 0; 333 if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { 334 /* internal bug, make state invalid to reject the program */ 335 memset(dst, 0, sizeof(*dst)); 336 return -EFAULT; 337 } 338 memcpy(dst->stack, src->stack, 339 sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); 340 return 0; 341 } 342 343 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 344 * make it consume minimal amount of memory. check_stack_write() access from 345 * the program calls into realloc_func_state() to grow the stack size. 346 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 347 * which this function copies over. It points to previous bpf_verifier_state 348 * which is never reallocated 349 */ 350 static int realloc_func_state(struct bpf_func_state *state, int size, 351 bool copy_old) 352 { 353 u32 old_size = state->allocated_stack; 354 struct bpf_stack_state *new_stack; 355 int slot = size / BPF_REG_SIZE; 356 357 if (size <= old_size || !size) { 358 if (copy_old) 359 return 0; 360 state->allocated_stack = slot * BPF_REG_SIZE; 361 if (!size && old_size) { 362 kfree(state->stack); 363 state->stack = NULL; 364 } 365 return 0; 366 } 367 new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), 368 GFP_KERNEL); 369 if (!new_stack) 370 return -ENOMEM; 371 if (copy_old) { 372 if (state->stack) 373 memcpy(new_stack, state->stack, 374 sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); 375 memset(new_stack + old_size / BPF_REG_SIZE, 0, 376 sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); 377 } 378 state->allocated_stack = slot * BPF_REG_SIZE; 379 kfree(state->stack); 380 state->stack = new_stack; 381 return 0; 382 } 383 384 static void free_func_state(struct bpf_func_state *state) 385 { 386 if (!state) 387 return; 388 kfree(state->stack); 389 kfree(state); 390 } 391 392 static void free_verifier_state(struct bpf_verifier_state *state, 393 bool free_self) 394 { 395 int i; 396 397 for (i = 0; i <= state->curframe; i++) { 398 free_func_state(state->frame[i]); 399 state->frame[i] = NULL; 400 } 401 if (free_self) 402 kfree(state); 403 } 404 405 /* copy verifier state from src to dst growing dst stack space 406 * when necessary to accommodate larger src stack 407 */ 408 static int copy_func_state(struct bpf_func_state *dst, 409 const struct bpf_func_state *src) 410 { 411 int err; 412 413 err = realloc_func_state(dst, src->allocated_stack, false); 414 if (err) 415 return err; 416 memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack)); 417 return copy_stack_state(dst, src); 418 } 419 420 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 421 const struct bpf_verifier_state *src) 422 { 423 struct bpf_func_state *dst; 424 int i, err; 425 426 /* if dst has more stack frames then src frame, free them */ 427 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 428 free_func_state(dst_state->frame[i]); 429 dst_state->frame[i] = NULL; 430 } 431 dst_state->curframe = src->curframe; 432 dst_state->parent = src->parent; 433 for (i = 0; i <= src->curframe; i++) { 434 dst = dst_state->frame[i]; 435 if (!dst) { 436 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 437 if (!dst) 438 return -ENOMEM; 439 dst_state->frame[i] = dst; 440 } 441 err = copy_func_state(dst, src->frame[i]); 442 if (err) 443 return err; 444 } 445 return 0; 446 } 447 448 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 449 int *insn_idx) 450 { 451 struct bpf_verifier_state *cur = env->cur_state; 452 struct bpf_verifier_stack_elem *elem, *head = env->head; 453 int err; 454 455 if (env->head == NULL) 456 return -ENOENT; 457 458 if (cur) { 459 err = copy_verifier_state(cur, &head->st); 460 if (err) 461 return err; 462 } 463 if (insn_idx) 464 *insn_idx = head->insn_idx; 465 if (prev_insn_idx) 466 *prev_insn_idx = head->prev_insn_idx; 467 elem = head->next; 468 free_verifier_state(&head->st, false); 469 kfree(head); 470 env->head = elem; 471 env->stack_size--; 472 return 0; 473 } 474 475 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 476 int insn_idx, int prev_insn_idx) 477 { 478 struct bpf_verifier_state *cur = env->cur_state; 479 struct bpf_verifier_stack_elem *elem; 480 int err; 481 482 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 483 if (!elem) 484 goto err; 485 486 elem->insn_idx = insn_idx; 487 elem->prev_insn_idx = prev_insn_idx; 488 elem->next = env->head; 489 env->head = elem; 490 env->stack_size++; 491 err = copy_verifier_state(&elem->st, cur); 492 if (err) 493 goto err; 494 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 495 verbose(env, "BPF program is too complex\n"); 496 goto err; 497 } 498 return &elem->st; 499 err: 500 free_verifier_state(env->cur_state, true); 501 env->cur_state = NULL; 502 /* pop all elements and return */ 503 while (!pop_stack(env, NULL, NULL)); 504 return NULL; 505 } 506 507 #define CALLER_SAVED_REGS 6 508 static const int caller_saved[CALLER_SAVED_REGS] = { 509 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 510 }; 511 #define CALLEE_SAVED_REGS 5 512 static const int callee_saved[CALLEE_SAVED_REGS] = { 513 BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9 514 }; 515 516 static void __mark_reg_not_init(struct bpf_reg_state *reg); 517 518 /* Mark the unknown part of a register (variable offset or scalar value) as 519 * known to have the value @imm. 520 */ 521 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 522 { 523 reg->id = 0; 524 reg->var_off = tnum_const(imm); 525 reg->smin_value = (s64)imm; 526 reg->smax_value = (s64)imm; 527 reg->umin_value = imm; 528 reg->umax_value = imm; 529 } 530 531 /* Mark the 'variable offset' part of a register as zero. This should be 532 * used only on registers holding a pointer type. 533 */ 534 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 535 { 536 __mark_reg_known(reg, 0); 537 } 538 539 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 540 { 541 __mark_reg_known(reg, 0); 542 reg->off = 0; 543 reg->type = SCALAR_VALUE; 544 } 545 546 static void mark_reg_known_zero(struct bpf_verifier_env *env, 547 struct bpf_reg_state *regs, u32 regno) 548 { 549 if (WARN_ON(regno >= MAX_BPF_REG)) { 550 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 551 /* Something bad happened, let's kill all regs */ 552 for (regno = 0; regno < MAX_BPF_REG; regno++) 553 __mark_reg_not_init(regs + regno); 554 return; 555 } 556 __mark_reg_known_zero(regs + regno); 557 } 558 559 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 560 { 561 return type_is_pkt_pointer(reg->type); 562 } 563 564 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 565 { 566 return reg_is_pkt_pointer(reg) || 567 reg->type == PTR_TO_PACKET_END; 568 } 569 570 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 571 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 572 enum bpf_reg_type which) 573 { 574 /* The register can already have a range from prior markings. 575 * This is fine as long as it hasn't been advanced from its 576 * origin. 577 */ 578 return reg->type == which && 579 reg->id == 0 && 580 reg->off == 0 && 581 tnum_equals_const(reg->var_off, 0); 582 } 583 584 /* Attempts to improve min/max values based on var_off information */ 585 static void __update_reg_bounds(struct bpf_reg_state *reg) 586 { 587 /* min signed is max(sign bit) | min(other bits) */ 588 reg->smin_value = max_t(s64, reg->smin_value, 589 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 590 /* max signed is min(sign bit) | max(other bits) */ 591 reg->smax_value = min_t(s64, reg->smax_value, 592 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 593 reg->umin_value = max(reg->umin_value, reg->var_off.value); 594 reg->umax_value = min(reg->umax_value, 595 reg->var_off.value | reg->var_off.mask); 596 } 597 598 /* Uses signed min/max values to inform unsigned, and vice-versa */ 599 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 600 { 601 /* Learn sign from signed bounds. 602 * If we cannot cross the sign boundary, then signed and unsigned bounds 603 * are the same, so combine. This works even in the negative case, e.g. 604 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 605 */ 606 if (reg->smin_value >= 0 || reg->smax_value < 0) { 607 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 608 reg->umin_value); 609 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 610 reg->umax_value); 611 return; 612 } 613 /* Learn sign from unsigned bounds. Signed bounds cross the sign 614 * boundary, so we must be careful. 615 */ 616 if ((s64)reg->umax_value >= 0) { 617 /* Positive. We can't learn anything from the smin, but smax 618 * is positive, hence safe. 619 */ 620 reg->smin_value = reg->umin_value; 621 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 622 reg->umax_value); 623 } else if ((s64)reg->umin_value < 0) { 624 /* Negative. We can't learn anything from the smax, but smin 625 * is negative, hence safe. 626 */ 627 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 628 reg->umin_value); 629 reg->smax_value = reg->umax_value; 630 } 631 } 632 633 /* Attempts to improve var_off based on unsigned min/max information */ 634 static void __reg_bound_offset(struct bpf_reg_state *reg) 635 { 636 reg->var_off = tnum_intersect(reg->var_off, 637 tnum_range(reg->umin_value, 638 reg->umax_value)); 639 } 640 641 /* Reset the min/max bounds of a register */ 642 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 643 { 644 reg->smin_value = S64_MIN; 645 reg->smax_value = S64_MAX; 646 reg->umin_value = 0; 647 reg->umax_value = U64_MAX; 648 } 649 650 /* Mark a register as having a completely unknown (scalar) value. */ 651 static void __mark_reg_unknown(struct bpf_reg_state *reg) 652 { 653 reg->type = SCALAR_VALUE; 654 reg->id = 0; 655 reg->off = 0; 656 reg->var_off = tnum_unknown; 657 reg->frameno = 0; 658 __mark_reg_unbounded(reg); 659 } 660 661 static void mark_reg_unknown(struct bpf_verifier_env *env, 662 struct bpf_reg_state *regs, u32 regno) 663 { 664 if (WARN_ON(regno >= MAX_BPF_REG)) { 665 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 666 /* Something bad happened, let's kill all regs except FP */ 667 for (regno = 0; regno < BPF_REG_FP; regno++) 668 __mark_reg_not_init(regs + regno); 669 return; 670 } 671 __mark_reg_unknown(regs + regno); 672 } 673 674 static void __mark_reg_not_init(struct bpf_reg_state *reg) 675 { 676 __mark_reg_unknown(reg); 677 reg->type = NOT_INIT; 678 } 679 680 static void mark_reg_not_init(struct bpf_verifier_env *env, 681 struct bpf_reg_state *regs, u32 regno) 682 { 683 if (WARN_ON(regno >= MAX_BPF_REG)) { 684 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 685 /* Something bad happened, let's kill all regs except FP */ 686 for (regno = 0; regno < BPF_REG_FP; regno++) 687 __mark_reg_not_init(regs + regno); 688 return; 689 } 690 __mark_reg_not_init(regs + regno); 691 } 692 693 static void init_reg_state(struct bpf_verifier_env *env, 694 struct bpf_func_state *state) 695 { 696 struct bpf_reg_state *regs = state->regs; 697 int i; 698 699 for (i = 0; i < MAX_BPF_REG; i++) { 700 mark_reg_not_init(env, regs, i); 701 regs[i].live = REG_LIVE_NONE; 702 } 703 704 /* frame pointer */ 705 regs[BPF_REG_FP].type = PTR_TO_STACK; 706 mark_reg_known_zero(env, regs, BPF_REG_FP); 707 regs[BPF_REG_FP].frameno = state->frameno; 708 709 /* 1st arg to a function */ 710 regs[BPF_REG_1].type = PTR_TO_CTX; 711 mark_reg_known_zero(env, regs, BPF_REG_1); 712 } 713 714 #define BPF_MAIN_FUNC (-1) 715 static void init_func_state(struct bpf_verifier_env *env, 716 struct bpf_func_state *state, 717 int callsite, int frameno, int subprogno) 718 { 719 state->callsite = callsite; 720 state->frameno = frameno; 721 state->subprogno = subprogno; 722 init_reg_state(env, state); 723 } 724 725 enum reg_arg_type { 726 SRC_OP, /* register is used as source operand */ 727 DST_OP, /* register is used as destination operand */ 728 DST_OP_NO_MARK /* same as above, check only, don't mark */ 729 }; 730 731 static int cmp_subprogs(const void *a, const void *b) 732 { 733 return *(int *)a - *(int *)b; 734 } 735 736 static int find_subprog(struct bpf_verifier_env *env, int off) 737 { 738 u32 *p; 739 740 p = bsearch(&off, env->subprog_starts, env->subprog_cnt, 741 sizeof(env->subprog_starts[0]), cmp_subprogs); 742 if (!p) 743 return -ENOENT; 744 return p - env->subprog_starts; 745 746 } 747 748 static int add_subprog(struct bpf_verifier_env *env, int off) 749 { 750 int insn_cnt = env->prog->len; 751 int ret; 752 753 if (off >= insn_cnt || off < 0) { 754 verbose(env, "call to invalid destination\n"); 755 return -EINVAL; 756 } 757 ret = find_subprog(env, off); 758 if (ret >= 0) 759 return 0; 760 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 761 verbose(env, "too many subprograms\n"); 762 return -E2BIG; 763 } 764 env->subprog_starts[env->subprog_cnt++] = off; 765 sort(env->subprog_starts, env->subprog_cnt, 766 sizeof(env->subprog_starts[0]), cmp_subprogs, NULL); 767 return 0; 768 } 769 770 static int check_subprogs(struct bpf_verifier_env *env) 771 { 772 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; 773 struct bpf_insn *insn = env->prog->insnsi; 774 int insn_cnt = env->prog->len; 775 776 /* determine subprog starts. The end is one before the next starts */ 777 for (i = 0; i < insn_cnt; i++) { 778 if (insn[i].code != (BPF_JMP | BPF_CALL)) 779 continue; 780 if (insn[i].src_reg != BPF_PSEUDO_CALL) 781 continue; 782 if (!env->allow_ptr_leaks) { 783 verbose(env, "function calls to other bpf functions are allowed for root only\n"); 784 return -EPERM; 785 } 786 if (bpf_prog_is_dev_bound(env->prog->aux)) { 787 verbose(env, "function calls in offloaded programs are not supported yet\n"); 788 return -EINVAL; 789 } 790 ret = add_subprog(env, i + insn[i].imm + 1); 791 if (ret < 0) 792 return ret; 793 } 794 795 if (env->log.level > 1) 796 for (i = 0; i < env->subprog_cnt; i++) 797 verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]); 798 799 /* now check that all jumps are within the same subprog */ 800 subprog_start = 0; 801 if (env->subprog_cnt == cur_subprog) 802 subprog_end = insn_cnt; 803 else 804 subprog_end = env->subprog_starts[cur_subprog++]; 805 for (i = 0; i < insn_cnt; i++) { 806 u8 code = insn[i].code; 807 808 if (BPF_CLASS(code) != BPF_JMP) 809 goto next; 810 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 811 goto next; 812 off = i + insn[i].off + 1; 813 if (off < subprog_start || off >= subprog_end) { 814 verbose(env, "jump out of range from insn %d to %d\n", i, off); 815 return -EINVAL; 816 } 817 next: 818 if (i == subprog_end - 1) { 819 /* to avoid fall-through from one subprog into another 820 * the last insn of the subprog should be either exit 821 * or unconditional jump back 822 */ 823 if (code != (BPF_JMP | BPF_EXIT) && 824 code != (BPF_JMP | BPF_JA)) { 825 verbose(env, "last insn is not an exit or jmp\n"); 826 return -EINVAL; 827 } 828 subprog_start = subprog_end; 829 if (env->subprog_cnt == cur_subprog) 830 subprog_end = insn_cnt; 831 else 832 subprog_end = env->subprog_starts[cur_subprog++]; 833 } 834 } 835 return 0; 836 } 837 838 static 839 struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, 840 const struct bpf_verifier_state *state, 841 struct bpf_verifier_state *parent, 842 u32 regno) 843 { 844 struct bpf_verifier_state *tmp = NULL; 845 846 /* 'parent' could be a state of caller and 847 * 'state' could be a state of callee. In such case 848 * parent->curframe < state->curframe 849 * and it's ok for r1 - r5 registers 850 * 851 * 'parent' could be a callee's state after it bpf_exit-ed. 852 * In such case parent->curframe > state->curframe 853 * and it's ok for r0 only 854 */ 855 if (parent->curframe == state->curframe || 856 (parent->curframe < state->curframe && 857 regno >= BPF_REG_1 && regno <= BPF_REG_5) || 858 (parent->curframe > state->curframe && 859 regno == BPF_REG_0)) 860 return parent; 861 862 if (parent->curframe > state->curframe && 863 regno >= BPF_REG_6) { 864 /* for callee saved regs we have to skip the whole chain 865 * of states that belong to callee and mark as LIVE_READ 866 * the registers before the call 867 */ 868 tmp = parent; 869 while (tmp && tmp->curframe != state->curframe) { 870 tmp = tmp->parent; 871 } 872 if (!tmp) 873 goto bug; 874 parent = tmp; 875 } else { 876 goto bug; 877 } 878 return parent; 879 bug: 880 verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); 881 verbose(env, "regno %d parent frame %d current frame %d\n", 882 regno, parent->curframe, state->curframe); 883 return NULL; 884 } 885 886 static int mark_reg_read(struct bpf_verifier_env *env, 887 const struct bpf_verifier_state *state, 888 struct bpf_verifier_state *parent, 889 u32 regno) 890 { 891 bool writes = parent == state->parent; /* Observe write marks */ 892 893 if (regno == BPF_REG_FP) 894 /* We don't need to worry about FP liveness because it's read-only */ 895 return 0; 896 897 while (parent) { 898 /* if read wasn't screened by an earlier write ... */ 899 if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN) 900 break; 901 parent = skip_callee(env, state, parent, regno); 902 if (!parent) 903 return -EFAULT; 904 /* ... then we depend on parent's value */ 905 parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ; 906 state = parent; 907 parent = state->parent; 908 writes = true; 909 } 910 return 0; 911 } 912 913 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 914 enum reg_arg_type t) 915 { 916 struct bpf_verifier_state *vstate = env->cur_state; 917 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 918 struct bpf_reg_state *regs = state->regs; 919 920 if (regno >= MAX_BPF_REG) { 921 verbose(env, "R%d is invalid\n", regno); 922 return -EINVAL; 923 } 924 925 if (t == SRC_OP) { 926 /* check whether register used as source operand can be read */ 927 if (regs[regno].type == NOT_INIT) { 928 verbose(env, "R%d !read_ok\n", regno); 929 return -EACCES; 930 } 931 return mark_reg_read(env, vstate, vstate->parent, regno); 932 } else { 933 /* check whether register used as dest operand can be written to */ 934 if (regno == BPF_REG_FP) { 935 verbose(env, "frame pointer is read only\n"); 936 return -EACCES; 937 } 938 regs[regno].live |= REG_LIVE_WRITTEN; 939 if (t == DST_OP) 940 mark_reg_unknown(env, regs, regno); 941 } 942 return 0; 943 } 944 945 static bool is_spillable_regtype(enum bpf_reg_type type) 946 { 947 switch (type) { 948 case PTR_TO_MAP_VALUE: 949 case PTR_TO_MAP_VALUE_OR_NULL: 950 case PTR_TO_STACK: 951 case PTR_TO_CTX: 952 case PTR_TO_PACKET: 953 case PTR_TO_PACKET_META: 954 case PTR_TO_PACKET_END: 955 case CONST_PTR_TO_MAP: 956 return true; 957 default: 958 return false; 959 } 960 } 961 962 /* Does this register contain a constant zero? */ 963 static bool register_is_null(struct bpf_reg_state *reg) 964 { 965 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 966 } 967 968 /* check_stack_read/write functions track spill/fill of registers, 969 * stack boundary and alignment are checked in check_mem_access() 970 */ 971 static int check_stack_write(struct bpf_verifier_env *env, 972 struct bpf_func_state *state, /* func where register points to */ 973 int off, int size, int value_regno) 974 { 975 struct bpf_func_state *cur; /* state of the current function */ 976 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 977 enum bpf_reg_type type; 978 979 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 980 true); 981 if (err) 982 return err; 983 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 984 * so it's aligned access and [off, off + size) are within stack limits 985 */ 986 if (!env->allow_ptr_leaks && 987 state->stack[spi].slot_type[0] == STACK_SPILL && 988 size != BPF_REG_SIZE) { 989 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 990 return -EACCES; 991 } 992 993 cur = env->cur_state->frame[env->cur_state->curframe]; 994 if (value_regno >= 0 && 995 is_spillable_regtype((type = cur->regs[value_regno].type))) { 996 997 /* register containing pointer is being spilled into stack */ 998 if (size != BPF_REG_SIZE) { 999 verbose(env, "invalid size of register spill\n"); 1000 return -EACCES; 1001 } 1002 1003 if (state != cur && type == PTR_TO_STACK) { 1004 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 1005 return -EINVAL; 1006 } 1007 1008 /* save register state */ 1009 state->stack[spi].spilled_ptr = cur->regs[value_regno]; 1010 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1011 1012 for (i = 0; i < BPF_REG_SIZE; i++) 1013 state->stack[spi].slot_type[i] = STACK_SPILL; 1014 } else { 1015 u8 type = STACK_MISC; 1016 1017 /* regular write of data into stack */ 1018 state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; 1019 1020 /* only mark the slot as written if all 8 bytes were written 1021 * otherwise read propagation may incorrectly stop too soon 1022 * when stack slots are partially written. 1023 * This heuristic means that read propagation will be 1024 * conservative, since it will add reg_live_read marks 1025 * to stack slots all the way to first state when programs 1026 * writes+reads less than 8 bytes 1027 */ 1028 if (size == BPF_REG_SIZE) 1029 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1030 1031 /* when we zero initialize stack slots mark them as such */ 1032 if (value_regno >= 0 && 1033 register_is_null(&cur->regs[value_regno])) 1034 type = STACK_ZERO; 1035 1036 for (i = 0; i < size; i++) 1037 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 1038 type; 1039 } 1040 return 0; 1041 } 1042 1043 /* registers of every function are unique and mark_reg_read() propagates 1044 * the liveness in the following cases: 1045 * - from callee into caller for R1 - R5 that were used as arguments 1046 * - from caller into callee for R0 that used as result of the call 1047 * - from caller to the same caller skipping states of the callee for R6 - R9, 1048 * since R6 - R9 are callee saved by implicit function prologue and 1049 * caller's R6 != callee's R6, so when we propagate liveness up to 1050 * parent states we need to skip callee states for R6 - R9. 1051 * 1052 * stack slot marking is different, since stacks of caller and callee are 1053 * accessible in both (since caller can pass a pointer to caller's stack to 1054 * callee which can pass it to another function), hence mark_stack_slot_read() 1055 * has to propagate the stack liveness to all parent states at given frame number. 1056 * Consider code: 1057 * f1() { 1058 * ptr = fp - 8; 1059 * *ptr = ctx; 1060 * call f2 { 1061 * .. = *ptr; 1062 * } 1063 * .. = *ptr; 1064 * } 1065 * First *ptr is reading from f1's stack and mark_stack_slot_read() has 1066 * to mark liveness at the f1's frame and not f2's frame. 1067 * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has 1068 * to propagate liveness to f2 states at f1's frame level and further into 1069 * f1 states at f1's frame level until write into that stack slot 1070 */ 1071 static void mark_stack_slot_read(struct bpf_verifier_env *env, 1072 const struct bpf_verifier_state *state, 1073 struct bpf_verifier_state *parent, 1074 int slot, int frameno) 1075 { 1076 bool writes = parent == state->parent; /* Observe write marks */ 1077 1078 while (parent) { 1079 if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE) 1080 /* since LIVE_WRITTEN mark is only done for full 8-byte 1081 * write the read marks are conservative and parent 1082 * state may not even have the stack allocated. In such case 1083 * end the propagation, since the loop reached beginning 1084 * of the function 1085 */ 1086 break; 1087 /* if read wasn't screened by an earlier write ... */ 1088 if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) 1089 break; 1090 /* ... then we depend on parent's value */ 1091 parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ; 1092 state = parent; 1093 parent = state->parent; 1094 writes = true; 1095 } 1096 } 1097 1098 static int check_stack_read(struct bpf_verifier_env *env, 1099 struct bpf_func_state *reg_state /* func where register points to */, 1100 int off, int size, int value_regno) 1101 { 1102 struct bpf_verifier_state *vstate = env->cur_state; 1103 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1104 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 1105 u8 *stype; 1106 1107 if (reg_state->allocated_stack <= slot) { 1108 verbose(env, "invalid read from stack off %d+0 size %d\n", 1109 off, size); 1110 return -EACCES; 1111 } 1112 stype = reg_state->stack[spi].slot_type; 1113 1114 if (stype[0] == STACK_SPILL) { 1115 if (size != BPF_REG_SIZE) { 1116 verbose(env, "invalid size of register spill\n"); 1117 return -EACCES; 1118 } 1119 for (i = 1; i < BPF_REG_SIZE; i++) { 1120 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 1121 verbose(env, "corrupted spill memory\n"); 1122 return -EACCES; 1123 } 1124 } 1125 1126 if (value_regno >= 0) { 1127 /* restore register state from stack */ 1128 state->regs[value_regno] = reg_state->stack[spi].spilled_ptr; 1129 /* mark reg as written since spilled pointer state likely 1130 * has its liveness marks cleared by is_state_visited() 1131 * which resets stack/reg liveness for state transitions 1132 */ 1133 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 1134 } 1135 mark_stack_slot_read(env, vstate, vstate->parent, spi, 1136 reg_state->frameno); 1137 return 0; 1138 } else { 1139 int zeros = 0; 1140 1141 for (i = 0; i < size; i++) { 1142 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) 1143 continue; 1144 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { 1145 zeros++; 1146 continue; 1147 } 1148 verbose(env, "invalid read from stack off %d+%d size %d\n", 1149 off, i, size); 1150 return -EACCES; 1151 } 1152 mark_stack_slot_read(env, vstate, vstate->parent, spi, 1153 reg_state->frameno); 1154 if (value_regno >= 0) { 1155 if (zeros == size) { 1156 /* any size read into register is zero extended, 1157 * so the whole register == const_zero 1158 */ 1159 __mark_reg_const_zero(&state->regs[value_regno]); 1160 } else { 1161 /* have read misc data from the stack */ 1162 mark_reg_unknown(env, state->regs, value_regno); 1163 } 1164 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 1165 } 1166 return 0; 1167 } 1168 } 1169 1170 /* check read/write into map element returned by bpf_map_lookup_elem() */ 1171 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 1172 int size, bool zero_size_allowed) 1173 { 1174 struct bpf_reg_state *regs = cur_regs(env); 1175 struct bpf_map *map = regs[regno].map_ptr; 1176 1177 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 1178 off + size > map->value_size) { 1179 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 1180 map->value_size, off, size); 1181 return -EACCES; 1182 } 1183 return 0; 1184 } 1185 1186 /* check read/write into a map element with possible variable offset */ 1187 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 1188 int off, int size, bool zero_size_allowed) 1189 { 1190 struct bpf_verifier_state *vstate = env->cur_state; 1191 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1192 struct bpf_reg_state *reg = &state->regs[regno]; 1193 int err; 1194 1195 /* We may have adjusted the register to this map value, so we 1196 * need to try adding each of min_value and max_value to off 1197 * to make sure our theoretical access will be safe. 1198 */ 1199 if (env->log.level) 1200 print_verifier_state(env, state); 1201 /* The minimum value is only important with signed 1202 * comparisons where we can't assume the floor of a 1203 * value is 0. If we are using signed variables for our 1204 * index'es we need to make sure that whatever we use 1205 * will have a set floor within our range. 1206 */ 1207 if (reg->smin_value < 0) { 1208 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 1209 regno); 1210 return -EACCES; 1211 } 1212 err = __check_map_access(env, regno, reg->smin_value + off, size, 1213 zero_size_allowed); 1214 if (err) { 1215 verbose(env, "R%d min value is outside of the array range\n", 1216 regno); 1217 return err; 1218 } 1219 1220 /* If we haven't set a max value then we need to bail since we can't be 1221 * sure we won't do bad things. 1222 * If reg->umax_value + off could overflow, treat that as unbounded too. 1223 */ 1224 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 1225 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", 1226 regno); 1227 return -EACCES; 1228 } 1229 err = __check_map_access(env, regno, reg->umax_value + off, size, 1230 zero_size_allowed); 1231 if (err) 1232 verbose(env, "R%d max value is outside of the array range\n", 1233 regno); 1234 return err; 1235 } 1236 1237 #define MAX_PACKET_OFF 0xffff 1238 1239 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 1240 const struct bpf_call_arg_meta *meta, 1241 enum bpf_access_type t) 1242 { 1243 switch (env->prog->type) { 1244 case BPF_PROG_TYPE_LWT_IN: 1245 case BPF_PROG_TYPE_LWT_OUT: 1246 /* dst_input() and dst_output() can't write for now */ 1247 if (t == BPF_WRITE) 1248 return false; 1249 /* fallthrough */ 1250 case BPF_PROG_TYPE_SCHED_CLS: 1251 case BPF_PROG_TYPE_SCHED_ACT: 1252 case BPF_PROG_TYPE_XDP: 1253 case BPF_PROG_TYPE_LWT_XMIT: 1254 case BPF_PROG_TYPE_SK_SKB: 1255 if (meta) 1256 return meta->pkt_access; 1257 1258 env->seen_direct_write = true; 1259 return true; 1260 default: 1261 return false; 1262 } 1263 } 1264 1265 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, 1266 int off, int size, bool zero_size_allowed) 1267 { 1268 struct bpf_reg_state *regs = cur_regs(env); 1269 struct bpf_reg_state *reg = ®s[regno]; 1270 1271 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 1272 (u64)off + size > reg->range) { 1273 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 1274 off, size, regno, reg->id, reg->off, reg->range); 1275 return -EACCES; 1276 } 1277 return 0; 1278 } 1279 1280 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 1281 int size, bool zero_size_allowed) 1282 { 1283 struct bpf_reg_state *regs = cur_regs(env); 1284 struct bpf_reg_state *reg = ®s[regno]; 1285 int err; 1286 1287 /* We may have added a variable offset to the packet pointer; but any 1288 * reg->range we have comes after that. We are only checking the fixed 1289 * offset. 1290 */ 1291 1292 /* We don't allow negative numbers, because we aren't tracking enough 1293 * detail to prove they're safe. 1294 */ 1295 if (reg->smin_value < 0) { 1296 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 1297 regno); 1298 return -EACCES; 1299 } 1300 err = __check_packet_access(env, regno, off, size, zero_size_allowed); 1301 if (err) { 1302 verbose(env, "R%d offset is outside of the packet\n", regno); 1303 return err; 1304 } 1305 return err; 1306 } 1307 1308 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 1309 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 1310 enum bpf_access_type t, enum bpf_reg_type *reg_type) 1311 { 1312 struct bpf_insn_access_aux info = { 1313 .reg_type = *reg_type, 1314 }; 1315 1316 if (env->ops->is_valid_access && 1317 env->ops->is_valid_access(off, size, t, &info)) { 1318 /* A non zero info.ctx_field_size indicates that this field is a 1319 * candidate for later verifier transformation to load the whole 1320 * field and then apply a mask when accessed with a narrower 1321 * access than actual ctx access size. A zero info.ctx_field_size 1322 * will only allow for whole field access and rejects any other 1323 * type of narrower access. 1324 */ 1325 *reg_type = info.reg_type; 1326 1327 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 1328 /* remember the offset of last byte accessed in ctx */ 1329 if (env->prog->aux->max_ctx_offset < off + size) 1330 env->prog->aux->max_ctx_offset = off + size; 1331 return 0; 1332 } 1333 1334 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 1335 return -EACCES; 1336 } 1337 1338 static bool __is_pointer_value(bool allow_ptr_leaks, 1339 const struct bpf_reg_state *reg) 1340 { 1341 if (allow_ptr_leaks) 1342 return false; 1343 1344 return reg->type != SCALAR_VALUE; 1345 } 1346 1347 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 1348 { 1349 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); 1350 } 1351 1352 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 1353 { 1354 const struct bpf_reg_state *reg = cur_regs(env) + regno; 1355 1356 return reg->type == PTR_TO_CTX; 1357 } 1358 1359 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 1360 const struct bpf_reg_state *reg, 1361 int off, int size, bool strict) 1362 { 1363 struct tnum reg_off; 1364 int ip_align; 1365 1366 /* Byte size accesses are always allowed. */ 1367 if (!strict || size == 1) 1368 return 0; 1369 1370 /* For platforms that do not have a Kconfig enabling 1371 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 1372 * NET_IP_ALIGN is universally set to '2'. And on platforms 1373 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 1374 * to this code only in strict mode where we want to emulate 1375 * the NET_IP_ALIGN==2 checking. Therefore use an 1376 * unconditional IP align value of '2'. 1377 */ 1378 ip_align = 2; 1379 1380 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 1381 if (!tnum_is_aligned(reg_off, size)) { 1382 char tn_buf[48]; 1383 1384 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1385 verbose(env, 1386 "misaligned packet access off %d+%s+%d+%d size %d\n", 1387 ip_align, tn_buf, reg->off, off, size); 1388 return -EACCES; 1389 } 1390 1391 return 0; 1392 } 1393 1394 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 1395 const struct bpf_reg_state *reg, 1396 const char *pointer_desc, 1397 int off, int size, bool strict) 1398 { 1399 struct tnum reg_off; 1400 1401 /* Byte size accesses are always allowed. */ 1402 if (!strict || size == 1) 1403 return 0; 1404 1405 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 1406 if (!tnum_is_aligned(reg_off, size)) { 1407 char tn_buf[48]; 1408 1409 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1410 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 1411 pointer_desc, tn_buf, reg->off, off, size); 1412 return -EACCES; 1413 } 1414 1415 return 0; 1416 } 1417 1418 static int check_ptr_alignment(struct bpf_verifier_env *env, 1419 const struct bpf_reg_state *reg, 1420 int off, int size) 1421 { 1422 bool strict = env->strict_alignment; 1423 const char *pointer_desc = ""; 1424 1425 switch (reg->type) { 1426 case PTR_TO_PACKET: 1427 case PTR_TO_PACKET_META: 1428 /* Special case, because of NET_IP_ALIGN. Given metadata sits 1429 * right in front, treat it the very same way. 1430 */ 1431 return check_pkt_ptr_alignment(env, reg, off, size, strict); 1432 case PTR_TO_MAP_VALUE: 1433 pointer_desc = "value "; 1434 break; 1435 case PTR_TO_CTX: 1436 pointer_desc = "context "; 1437 break; 1438 case PTR_TO_STACK: 1439 pointer_desc = "stack "; 1440 /* The stack spill tracking logic in check_stack_write() 1441 * and check_stack_read() relies on stack accesses being 1442 * aligned. 1443 */ 1444 strict = true; 1445 break; 1446 default: 1447 break; 1448 } 1449 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 1450 strict); 1451 } 1452 1453 static int update_stack_depth(struct bpf_verifier_env *env, 1454 const struct bpf_func_state *func, 1455 int off) 1456 { 1457 u16 stack = env->subprog_stack_depth[func->subprogno]; 1458 1459 if (stack >= -off) 1460 return 0; 1461 1462 /* update known max for given subprogram */ 1463 env->subprog_stack_depth[func->subprogno] = -off; 1464 return 0; 1465 } 1466 1467 /* starting from main bpf function walk all instructions of the function 1468 * and recursively walk all callees that given function can call. 1469 * Ignore jump and exit insns. 1470 * Since recursion is prevented by check_cfg() this algorithm 1471 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 1472 */ 1473 static int check_max_stack_depth(struct bpf_verifier_env *env) 1474 { 1475 int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end; 1476 struct bpf_insn *insn = env->prog->insnsi; 1477 int insn_cnt = env->prog->len; 1478 int ret_insn[MAX_CALL_FRAMES]; 1479 int ret_prog[MAX_CALL_FRAMES]; 1480 1481 process_func: 1482 /* round up to 32-bytes, since this is granularity 1483 * of interpreter stack size 1484 */ 1485 depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); 1486 if (depth > MAX_BPF_STACK) { 1487 verbose(env, "combined stack size of %d calls is %d. Too large\n", 1488 frame + 1, depth); 1489 return -EACCES; 1490 } 1491 continue_func: 1492 if (env->subprog_cnt == subprog) 1493 subprog_end = insn_cnt; 1494 else 1495 subprog_end = env->subprog_starts[subprog]; 1496 for (; i < subprog_end; i++) { 1497 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1498 continue; 1499 if (insn[i].src_reg != BPF_PSEUDO_CALL) 1500 continue; 1501 /* remember insn and function to return to */ 1502 ret_insn[frame] = i + 1; 1503 ret_prog[frame] = subprog; 1504 1505 /* find the callee */ 1506 i = i + insn[i].imm + 1; 1507 subprog = find_subprog(env, i); 1508 if (subprog < 0) { 1509 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 1510 i); 1511 return -EFAULT; 1512 } 1513 subprog++; 1514 frame++; 1515 if (frame >= MAX_CALL_FRAMES) { 1516 WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); 1517 return -EFAULT; 1518 } 1519 goto process_func; 1520 } 1521 /* end of for() loop means the last insn of the 'subprog' 1522 * was reached. Doesn't matter whether it was JA or EXIT 1523 */ 1524 if (frame == 0) 1525 return 0; 1526 depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); 1527 frame--; 1528 i = ret_insn[frame]; 1529 subprog = ret_prog[frame]; 1530 goto continue_func; 1531 } 1532 1533 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1534 static int get_callee_stack_depth(struct bpf_verifier_env *env, 1535 const struct bpf_insn *insn, int idx) 1536 { 1537 int start = idx + insn->imm + 1, subprog; 1538 1539 subprog = find_subprog(env, start); 1540 if (subprog < 0) { 1541 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 1542 start); 1543 return -EFAULT; 1544 } 1545 subprog++; 1546 return env->subprog_stack_depth[subprog]; 1547 } 1548 #endif 1549 1550 /* truncate register to smaller size (in bytes) 1551 * must be called with size < BPF_REG_SIZE 1552 */ 1553 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 1554 { 1555 u64 mask; 1556 1557 /* clear high bits in bit representation */ 1558 reg->var_off = tnum_cast(reg->var_off, size); 1559 1560 /* fix arithmetic bounds */ 1561 mask = ((u64)1 << (size * 8)) - 1; 1562 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 1563 reg->umin_value &= mask; 1564 reg->umax_value &= mask; 1565 } else { 1566 reg->umin_value = 0; 1567 reg->umax_value = mask; 1568 } 1569 reg->smin_value = reg->umin_value; 1570 reg->smax_value = reg->umax_value; 1571 } 1572 1573 /* check whether memory at (regno + off) is accessible for t = (read | write) 1574 * if t==write, value_regno is a register which value is stored into memory 1575 * if t==read, value_regno is a register which will receive the value from memory 1576 * if t==write && value_regno==-1, some unknown value is stored into memory 1577 * if t==read && value_regno==-1, don't care what we read from memory 1578 */ 1579 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, 1580 int bpf_size, enum bpf_access_type t, 1581 int value_regno) 1582 { 1583 struct bpf_reg_state *regs = cur_regs(env); 1584 struct bpf_reg_state *reg = regs + regno; 1585 struct bpf_func_state *state; 1586 int size, err = 0; 1587 1588 size = bpf_size_to_bytes(bpf_size); 1589 if (size < 0) 1590 return size; 1591 1592 /* alignment checks will add in reg->off themselves */ 1593 err = check_ptr_alignment(env, reg, off, size); 1594 if (err) 1595 return err; 1596 1597 /* for access checks, reg->off is just part of off */ 1598 off += reg->off; 1599 1600 if (reg->type == PTR_TO_MAP_VALUE) { 1601 if (t == BPF_WRITE && value_regno >= 0 && 1602 is_pointer_value(env, value_regno)) { 1603 verbose(env, "R%d leaks addr into map\n", value_regno); 1604 return -EACCES; 1605 } 1606 1607 err = check_map_access(env, regno, off, size, false); 1608 if (!err && t == BPF_READ && value_regno >= 0) 1609 mark_reg_unknown(env, regs, value_regno); 1610 1611 } else if (reg->type == PTR_TO_CTX) { 1612 enum bpf_reg_type reg_type = SCALAR_VALUE; 1613 1614 if (t == BPF_WRITE && value_regno >= 0 && 1615 is_pointer_value(env, value_regno)) { 1616 verbose(env, "R%d leaks addr into ctx\n", value_regno); 1617 return -EACCES; 1618 } 1619 /* ctx accesses must be at a fixed offset, so that we can 1620 * determine what type of data were returned. 1621 */ 1622 if (reg->off) { 1623 verbose(env, 1624 "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", 1625 regno, reg->off, off - reg->off); 1626 return -EACCES; 1627 } 1628 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 1629 char tn_buf[48]; 1630 1631 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1632 verbose(env, 1633 "variable ctx access var_off=%s off=%d size=%d", 1634 tn_buf, off, size); 1635 return -EACCES; 1636 } 1637 err = check_ctx_access(env, insn_idx, off, size, t, ®_type); 1638 if (!err && t == BPF_READ && value_regno >= 0) { 1639 /* ctx access returns either a scalar, or a 1640 * PTR_TO_PACKET[_META,_END]. In the latter 1641 * case, we know the offset is zero. 1642 */ 1643 if (reg_type == SCALAR_VALUE) 1644 mark_reg_unknown(env, regs, value_regno); 1645 else 1646 mark_reg_known_zero(env, regs, 1647 value_regno); 1648 regs[value_regno].id = 0; 1649 regs[value_regno].off = 0; 1650 regs[value_regno].range = 0; 1651 regs[value_regno].type = reg_type; 1652 } 1653 1654 } else if (reg->type == PTR_TO_STACK) { 1655 /* stack accesses must be at a fixed offset, so that we can 1656 * determine what type of data were returned. 1657 * See check_stack_read(). 1658 */ 1659 if (!tnum_is_const(reg->var_off)) { 1660 char tn_buf[48]; 1661 1662 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1663 verbose(env, "variable stack access var_off=%s off=%d size=%d", 1664 tn_buf, off, size); 1665 return -EACCES; 1666 } 1667 off += reg->var_off.value; 1668 if (off >= 0 || off < -MAX_BPF_STACK) { 1669 verbose(env, "invalid stack off=%d size=%d\n", off, 1670 size); 1671 return -EACCES; 1672 } 1673 1674 state = func(env, reg); 1675 err = update_stack_depth(env, state, off); 1676 if (err) 1677 return err; 1678 1679 if (t == BPF_WRITE) 1680 err = check_stack_write(env, state, off, size, 1681 value_regno); 1682 else 1683 err = check_stack_read(env, state, off, size, 1684 value_regno); 1685 } else if (reg_is_pkt_pointer(reg)) { 1686 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 1687 verbose(env, "cannot write into packet\n"); 1688 return -EACCES; 1689 } 1690 if (t == BPF_WRITE && value_regno >= 0 && 1691 is_pointer_value(env, value_regno)) { 1692 verbose(env, "R%d leaks addr into packet\n", 1693 value_regno); 1694 return -EACCES; 1695 } 1696 err = check_packet_access(env, regno, off, size, false); 1697 if (!err && t == BPF_READ && value_regno >= 0) 1698 mark_reg_unknown(env, regs, value_regno); 1699 } else { 1700 verbose(env, "R%d invalid mem access '%s'\n", regno, 1701 reg_type_str[reg->type]); 1702 return -EACCES; 1703 } 1704 1705 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1706 regs[value_regno].type == SCALAR_VALUE) { 1707 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1708 coerce_reg_to_size(®s[value_regno], size); 1709 } 1710 return err; 1711 } 1712 1713 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 1714 { 1715 int err; 1716 1717 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 1718 insn->imm != 0) { 1719 verbose(env, "BPF_XADD uses reserved fields\n"); 1720 return -EINVAL; 1721 } 1722 1723 /* check src1 operand */ 1724 err = check_reg_arg(env, insn->src_reg, SRC_OP); 1725 if (err) 1726 return err; 1727 1728 /* check src2 operand */ 1729 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 1730 if (err) 1731 return err; 1732 1733 if (is_pointer_value(env, insn->src_reg)) { 1734 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 1735 return -EACCES; 1736 } 1737 1738 if (is_ctx_reg(env, insn->dst_reg)) { 1739 verbose(env, "BPF_XADD stores into R%d context is not allowed\n", 1740 insn->dst_reg); 1741 return -EACCES; 1742 } 1743 1744 /* check whether atomic_add can read the memory */ 1745 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1746 BPF_SIZE(insn->code), BPF_READ, -1); 1747 if (err) 1748 return err; 1749 1750 /* check whether atomic_add can write into the same memory */ 1751 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1752 BPF_SIZE(insn->code), BPF_WRITE, -1); 1753 } 1754 1755 /* when register 'regno' is passed into function that will read 'access_size' 1756 * bytes from that pointer, make sure that it's within stack boundary 1757 * and all elements of stack are initialized. 1758 * Unlike most pointer bounds-checking functions, this one doesn't take an 1759 * 'off' argument, so it has to add in reg->off itself. 1760 */ 1761 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 1762 int access_size, bool zero_size_allowed, 1763 struct bpf_call_arg_meta *meta) 1764 { 1765 struct bpf_reg_state *reg = cur_regs(env) + regno; 1766 struct bpf_func_state *state = func(env, reg); 1767 int off, i, slot, spi; 1768 1769 if (reg->type != PTR_TO_STACK) { 1770 /* Allow zero-byte read from NULL, regardless of pointer type */ 1771 if (zero_size_allowed && access_size == 0 && 1772 register_is_null(reg)) 1773 return 0; 1774 1775 verbose(env, "R%d type=%s expected=%s\n", regno, 1776 reg_type_str[reg->type], 1777 reg_type_str[PTR_TO_STACK]); 1778 return -EACCES; 1779 } 1780 1781 /* Only allow fixed-offset stack reads */ 1782 if (!tnum_is_const(reg->var_off)) { 1783 char tn_buf[48]; 1784 1785 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1786 verbose(env, "invalid variable stack read R%d var_off=%s\n", 1787 regno, tn_buf); 1788 return -EACCES; 1789 } 1790 off = reg->off + reg->var_off.value; 1791 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1792 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 1793 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 1794 regno, off, access_size); 1795 return -EACCES; 1796 } 1797 1798 if (meta && meta->raw_mode) { 1799 meta->access_size = access_size; 1800 meta->regno = regno; 1801 return 0; 1802 } 1803 1804 for (i = 0; i < access_size; i++) { 1805 u8 *stype; 1806 1807 slot = -(off + i) - 1; 1808 spi = slot / BPF_REG_SIZE; 1809 if (state->allocated_stack <= slot) 1810 goto err; 1811 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 1812 if (*stype == STACK_MISC) 1813 goto mark; 1814 if (*stype == STACK_ZERO) { 1815 /* helper can write anything into the stack */ 1816 *stype = STACK_MISC; 1817 goto mark; 1818 } 1819 err: 1820 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 1821 off, i, access_size); 1822 return -EACCES; 1823 mark: 1824 /* reading any byte out of 8-byte 'spill_slot' will cause 1825 * the whole slot to be marked as 'read' 1826 */ 1827 mark_stack_slot_read(env, env->cur_state, env->cur_state->parent, 1828 spi, state->frameno); 1829 } 1830 return update_stack_depth(env, state, off); 1831 } 1832 1833 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 1834 int access_size, bool zero_size_allowed, 1835 struct bpf_call_arg_meta *meta) 1836 { 1837 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 1838 1839 switch (reg->type) { 1840 case PTR_TO_PACKET: 1841 case PTR_TO_PACKET_META: 1842 return check_packet_access(env, regno, reg->off, access_size, 1843 zero_size_allowed); 1844 case PTR_TO_MAP_VALUE: 1845 return check_map_access(env, regno, reg->off, access_size, 1846 zero_size_allowed); 1847 default: /* scalar_value|ptr_to_stack or invalid ptr */ 1848 return check_stack_boundary(env, regno, access_size, 1849 zero_size_allowed, meta); 1850 } 1851 } 1852 1853 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 1854 { 1855 return type == ARG_PTR_TO_MEM || 1856 type == ARG_PTR_TO_MEM_OR_NULL || 1857 type == ARG_PTR_TO_UNINIT_MEM; 1858 } 1859 1860 static bool arg_type_is_mem_size(enum bpf_arg_type type) 1861 { 1862 return type == ARG_CONST_SIZE || 1863 type == ARG_CONST_SIZE_OR_ZERO; 1864 } 1865 1866 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 1867 enum bpf_arg_type arg_type, 1868 struct bpf_call_arg_meta *meta) 1869 { 1870 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 1871 enum bpf_reg_type expected_type, type = reg->type; 1872 int err = 0; 1873 1874 if (arg_type == ARG_DONTCARE) 1875 return 0; 1876 1877 err = check_reg_arg(env, regno, SRC_OP); 1878 if (err) 1879 return err; 1880 1881 if (arg_type == ARG_ANYTHING) { 1882 if (is_pointer_value(env, regno)) { 1883 verbose(env, "R%d leaks addr into helper function\n", 1884 regno); 1885 return -EACCES; 1886 } 1887 return 0; 1888 } 1889 1890 if (type_is_pkt_pointer(type) && 1891 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 1892 verbose(env, "helper access to the packet is not allowed\n"); 1893 return -EACCES; 1894 } 1895 1896 if (arg_type == ARG_PTR_TO_MAP_KEY || 1897 arg_type == ARG_PTR_TO_MAP_VALUE) { 1898 expected_type = PTR_TO_STACK; 1899 if (!type_is_pkt_pointer(type) && 1900 type != expected_type) 1901 goto err_type; 1902 } else if (arg_type == ARG_CONST_SIZE || 1903 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1904 expected_type = SCALAR_VALUE; 1905 if (type != expected_type) 1906 goto err_type; 1907 } else if (arg_type == ARG_CONST_MAP_PTR) { 1908 expected_type = CONST_PTR_TO_MAP; 1909 if (type != expected_type) 1910 goto err_type; 1911 } else if (arg_type == ARG_PTR_TO_CTX) { 1912 expected_type = PTR_TO_CTX; 1913 if (type != expected_type) 1914 goto err_type; 1915 } else if (arg_type_is_mem_ptr(arg_type)) { 1916 expected_type = PTR_TO_STACK; 1917 /* One exception here. In case function allows for NULL to be 1918 * passed in as argument, it's a SCALAR_VALUE type. Final test 1919 * happens during stack boundary checking. 1920 */ 1921 if (register_is_null(reg) && 1922 arg_type == ARG_PTR_TO_MEM_OR_NULL) 1923 /* final test in check_stack_boundary() */; 1924 else if (!type_is_pkt_pointer(type) && 1925 type != PTR_TO_MAP_VALUE && 1926 type != expected_type) 1927 goto err_type; 1928 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 1929 } else { 1930 verbose(env, "unsupported arg_type %d\n", arg_type); 1931 return -EFAULT; 1932 } 1933 1934 if (arg_type == ARG_CONST_MAP_PTR) { 1935 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 1936 meta->map_ptr = reg->map_ptr; 1937 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 1938 /* bpf_map_xxx(..., map_ptr, ..., key) call: 1939 * check that [key, key + map->key_size) are within 1940 * stack limits and initialized 1941 */ 1942 if (!meta->map_ptr) { 1943 /* in function declaration map_ptr must come before 1944 * map_key, so that it's verified and known before 1945 * we have to check map_key here. Otherwise it means 1946 * that kernel subsystem misconfigured verifier 1947 */ 1948 verbose(env, "invalid map_ptr to access map->key\n"); 1949 return -EACCES; 1950 } 1951 if (type_is_pkt_pointer(type)) 1952 err = check_packet_access(env, regno, reg->off, 1953 meta->map_ptr->key_size, 1954 false); 1955 else 1956 err = check_stack_boundary(env, regno, 1957 meta->map_ptr->key_size, 1958 false, NULL); 1959 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1960 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1961 * check [value, value + map->value_size) validity 1962 */ 1963 if (!meta->map_ptr) { 1964 /* kernel subsystem misconfigured verifier */ 1965 verbose(env, "invalid map_ptr to access map->value\n"); 1966 return -EACCES; 1967 } 1968 if (type_is_pkt_pointer(type)) 1969 err = check_packet_access(env, regno, reg->off, 1970 meta->map_ptr->value_size, 1971 false); 1972 else 1973 err = check_stack_boundary(env, regno, 1974 meta->map_ptr->value_size, 1975 false, NULL); 1976 } else if (arg_type_is_mem_size(arg_type)) { 1977 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 1978 1979 /* The register is SCALAR_VALUE; the access check 1980 * happens using its boundaries. 1981 */ 1982 if (!tnum_is_const(reg->var_off)) 1983 /* For unprivileged variable accesses, disable raw 1984 * mode so that the program is required to 1985 * initialize all the memory that the helper could 1986 * just partially fill up. 1987 */ 1988 meta = NULL; 1989 1990 if (reg->smin_value < 0) { 1991 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 1992 regno); 1993 return -EACCES; 1994 } 1995 1996 if (reg->umin_value == 0) { 1997 err = check_helper_mem_access(env, regno - 1, 0, 1998 zero_size_allowed, 1999 meta); 2000 if (err) 2001 return err; 2002 } 2003 2004 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 2005 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 2006 regno); 2007 return -EACCES; 2008 } 2009 err = check_helper_mem_access(env, regno - 1, 2010 reg->umax_value, 2011 zero_size_allowed, meta); 2012 } 2013 2014 return err; 2015 err_type: 2016 verbose(env, "R%d type=%s expected=%s\n", regno, 2017 reg_type_str[type], reg_type_str[expected_type]); 2018 return -EACCES; 2019 } 2020 2021 static int check_map_func_compatibility(struct bpf_verifier_env *env, 2022 struct bpf_map *map, int func_id) 2023 { 2024 if (!map) 2025 return 0; 2026 2027 /* We need a two way check, first is from map perspective ... */ 2028 switch (map->map_type) { 2029 case BPF_MAP_TYPE_PROG_ARRAY: 2030 if (func_id != BPF_FUNC_tail_call) 2031 goto error; 2032 break; 2033 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 2034 if (func_id != BPF_FUNC_perf_event_read && 2035 func_id != BPF_FUNC_perf_event_output && 2036 func_id != BPF_FUNC_perf_event_read_value) 2037 goto error; 2038 break; 2039 case BPF_MAP_TYPE_STACK_TRACE: 2040 if (func_id != BPF_FUNC_get_stackid) 2041 goto error; 2042 break; 2043 case BPF_MAP_TYPE_CGROUP_ARRAY: 2044 if (func_id != BPF_FUNC_skb_under_cgroup && 2045 func_id != BPF_FUNC_current_task_under_cgroup) 2046 goto error; 2047 break; 2048 /* devmap returns a pointer to a live net_device ifindex that we cannot 2049 * allow to be modified from bpf side. So do not allow lookup elements 2050 * for now. 2051 */ 2052 case BPF_MAP_TYPE_DEVMAP: 2053 if (func_id != BPF_FUNC_redirect_map) 2054 goto error; 2055 break; 2056 /* Restrict bpf side of cpumap, open when use-cases appear */ 2057 case BPF_MAP_TYPE_CPUMAP: 2058 if (func_id != BPF_FUNC_redirect_map) 2059 goto error; 2060 break; 2061 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 2062 case BPF_MAP_TYPE_HASH_OF_MAPS: 2063 if (func_id != BPF_FUNC_map_lookup_elem) 2064 goto error; 2065 break; 2066 case BPF_MAP_TYPE_SOCKMAP: 2067 if (func_id != BPF_FUNC_sk_redirect_map && 2068 func_id != BPF_FUNC_sock_map_update && 2069 func_id != BPF_FUNC_map_delete_elem) 2070 goto error; 2071 break; 2072 default: 2073 break; 2074 } 2075 2076 /* ... and second from the function itself. */ 2077 switch (func_id) { 2078 case BPF_FUNC_tail_call: 2079 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 2080 goto error; 2081 if (env->subprog_cnt) { 2082 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); 2083 return -EINVAL; 2084 } 2085 break; 2086 case BPF_FUNC_perf_event_read: 2087 case BPF_FUNC_perf_event_output: 2088 case BPF_FUNC_perf_event_read_value: 2089 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 2090 goto error; 2091 break; 2092 case BPF_FUNC_get_stackid: 2093 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 2094 goto error; 2095 break; 2096 case BPF_FUNC_current_task_under_cgroup: 2097 case BPF_FUNC_skb_under_cgroup: 2098 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 2099 goto error; 2100 break; 2101 case BPF_FUNC_redirect_map: 2102 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 2103 map->map_type != BPF_MAP_TYPE_CPUMAP) 2104 goto error; 2105 break; 2106 case BPF_FUNC_sk_redirect_map: 2107 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 2108 goto error; 2109 break; 2110 case BPF_FUNC_sock_map_update: 2111 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 2112 goto error; 2113 break; 2114 default: 2115 break; 2116 } 2117 2118 return 0; 2119 error: 2120 verbose(env, "cannot pass map_type %d into func %s#%d\n", 2121 map->map_type, func_id_name(func_id), func_id); 2122 return -EINVAL; 2123 } 2124 2125 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 2126 { 2127 int count = 0; 2128 2129 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 2130 count++; 2131 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 2132 count++; 2133 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 2134 count++; 2135 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 2136 count++; 2137 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 2138 count++; 2139 2140 /* We only support one arg being in raw mode at the moment, 2141 * which is sufficient for the helper functions we have 2142 * right now. 2143 */ 2144 return count <= 1; 2145 } 2146 2147 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 2148 enum bpf_arg_type arg_next) 2149 { 2150 return (arg_type_is_mem_ptr(arg_curr) && 2151 !arg_type_is_mem_size(arg_next)) || 2152 (!arg_type_is_mem_ptr(arg_curr) && 2153 arg_type_is_mem_size(arg_next)); 2154 } 2155 2156 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 2157 { 2158 /* bpf_xxx(..., buf, len) call will access 'len' 2159 * bytes from memory 'buf'. Both arg types need 2160 * to be paired, so make sure there's no buggy 2161 * helper function specification. 2162 */ 2163 if (arg_type_is_mem_size(fn->arg1_type) || 2164 arg_type_is_mem_ptr(fn->arg5_type) || 2165 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 2166 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 2167 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 2168 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 2169 return false; 2170 2171 return true; 2172 } 2173 2174 static int check_func_proto(const struct bpf_func_proto *fn) 2175 { 2176 return check_raw_mode_ok(fn) && 2177 check_arg_pair_ok(fn) ? 0 : -EINVAL; 2178 } 2179 2180 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 2181 * are now invalid, so turn them into unknown SCALAR_VALUE. 2182 */ 2183 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 2184 struct bpf_func_state *state) 2185 { 2186 struct bpf_reg_state *regs = state->regs, *reg; 2187 int i; 2188 2189 for (i = 0; i < MAX_BPF_REG; i++) 2190 if (reg_is_pkt_pointer_any(®s[i])) 2191 mark_reg_unknown(env, regs, i); 2192 2193 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 2194 if (state->stack[i].slot_type[0] != STACK_SPILL) 2195 continue; 2196 reg = &state->stack[i].spilled_ptr; 2197 if (reg_is_pkt_pointer_any(reg)) 2198 __mark_reg_unknown(reg); 2199 } 2200 } 2201 2202 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 2203 { 2204 struct bpf_verifier_state *vstate = env->cur_state; 2205 int i; 2206 2207 for (i = 0; i <= vstate->curframe; i++) 2208 __clear_all_pkt_pointers(env, vstate->frame[i]); 2209 } 2210 2211 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 2212 int *insn_idx) 2213 { 2214 struct bpf_verifier_state *state = env->cur_state; 2215 struct bpf_func_state *caller, *callee; 2216 int i, subprog, target_insn; 2217 2218 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 2219 verbose(env, "the call stack of %d frames is too deep\n", 2220 state->curframe + 2); 2221 return -E2BIG; 2222 } 2223 2224 target_insn = *insn_idx + insn->imm; 2225 subprog = find_subprog(env, target_insn + 1); 2226 if (subprog < 0) { 2227 verbose(env, "verifier bug. No program starts at insn %d\n", 2228 target_insn + 1); 2229 return -EFAULT; 2230 } 2231 2232 caller = state->frame[state->curframe]; 2233 if (state->frame[state->curframe + 1]) { 2234 verbose(env, "verifier bug. Frame %d already allocated\n", 2235 state->curframe + 1); 2236 return -EFAULT; 2237 } 2238 2239 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 2240 if (!callee) 2241 return -ENOMEM; 2242 state->frame[state->curframe + 1] = callee; 2243 2244 /* callee cannot access r0, r6 - r9 for reading and has to write 2245 * into its own stack before reading from it. 2246 * callee can read/write into caller's stack 2247 */ 2248 init_func_state(env, callee, 2249 /* remember the callsite, it will be used by bpf_exit */ 2250 *insn_idx /* callsite */, 2251 state->curframe + 1 /* frameno within this callchain */, 2252 subprog + 1 /* subprog number within this prog */); 2253 2254 /* copy r1 - r5 args that callee can access */ 2255 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 2256 callee->regs[i] = caller->regs[i]; 2257 2258 /* after the call regsiters r0 - r5 were scratched */ 2259 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2260 mark_reg_not_init(env, caller->regs, caller_saved[i]); 2261 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 2262 } 2263 2264 /* only increment it after check_reg_arg() finished */ 2265 state->curframe++; 2266 2267 /* and go analyze first insn of the callee */ 2268 *insn_idx = target_insn; 2269 2270 if (env->log.level) { 2271 verbose(env, "caller:\n"); 2272 print_verifier_state(env, caller); 2273 verbose(env, "callee:\n"); 2274 print_verifier_state(env, callee); 2275 } 2276 return 0; 2277 } 2278 2279 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 2280 { 2281 struct bpf_verifier_state *state = env->cur_state; 2282 struct bpf_func_state *caller, *callee; 2283 struct bpf_reg_state *r0; 2284 2285 callee = state->frame[state->curframe]; 2286 r0 = &callee->regs[BPF_REG_0]; 2287 if (r0->type == PTR_TO_STACK) { 2288 /* technically it's ok to return caller's stack pointer 2289 * (or caller's caller's pointer) back to the caller, 2290 * since these pointers are valid. Only current stack 2291 * pointer will be invalid as soon as function exits, 2292 * but let's be conservative 2293 */ 2294 verbose(env, "cannot return stack pointer to the caller\n"); 2295 return -EINVAL; 2296 } 2297 2298 state->curframe--; 2299 caller = state->frame[state->curframe]; 2300 /* return to the caller whatever r0 had in the callee */ 2301 caller->regs[BPF_REG_0] = *r0; 2302 2303 *insn_idx = callee->callsite + 1; 2304 if (env->log.level) { 2305 verbose(env, "returning from callee:\n"); 2306 print_verifier_state(env, callee); 2307 verbose(env, "to caller at %d:\n", *insn_idx); 2308 print_verifier_state(env, caller); 2309 } 2310 /* clear everything in the callee */ 2311 free_func_state(callee); 2312 state->frame[state->curframe + 1] = NULL; 2313 return 0; 2314 } 2315 2316 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 2317 { 2318 const struct bpf_func_proto *fn = NULL; 2319 struct bpf_reg_state *regs; 2320 struct bpf_call_arg_meta meta; 2321 bool changes_data; 2322 int i, err; 2323 2324 /* find function prototype */ 2325 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 2326 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 2327 func_id); 2328 return -EINVAL; 2329 } 2330 2331 if (env->ops->get_func_proto) 2332 fn = env->ops->get_func_proto(func_id); 2333 if (!fn) { 2334 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 2335 func_id); 2336 return -EINVAL; 2337 } 2338 2339 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2340 if (!env->prog->gpl_compatible && fn->gpl_only) { 2341 verbose(env, "cannot call GPL only function from proprietary program\n"); 2342 return -EINVAL; 2343 } 2344 2345 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 2346 changes_data = bpf_helper_changes_pkt_data(fn->func); 2347 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 2348 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 2349 func_id_name(func_id), func_id); 2350 return -EINVAL; 2351 } 2352 2353 memset(&meta, 0, sizeof(meta)); 2354 meta.pkt_access = fn->pkt_access; 2355 2356 err = check_func_proto(fn); 2357 if (err) { 2358 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 2359 func_id_name(func_id), func_id); 2360 return err; 2361 } 2362 2363 /* check args */ 2364 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 2365 if (err) 2366 return err; 2367 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 2368 if (err) 2369 return err; 2370 if (func_id == BPF_FUNC_tail_call) { 2371 if (meta.map_ptr == NULL) { 2372 verbose(env, "verifier bug\n"); 2373 return -EINVAL; 2374 } 2375 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr; 2376 } 2377 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 2378 if (err) 2379 return err; 2380 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 2381 if (err) 2382 return err; 2383 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 2384 if (err) 2385 return err; 2386 2387 /* Mark slots with STACK_MISC in case of raw mode, stack offset 2388 * is inferred from register state. 2389 */ 2390 for (i = 0; i < meta.access_size; i++) { 2391 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); 2392 if (err) 2393 return err; 2394 } 2395 2396 regs = cur_regs(env); 2397 /* reset caller saved regs */ 2398 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2399 mark_reg_not_init(env, regs, caller_saved[i]); 2400 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 2401 } 2402 2403 /* update return register (already marked as written above) */ 2404 if (fn->ret_type == RET_INTEGER) { 2405 /* sets type to SCALAR_VALUE */ 2406 mark_reg_unknown(env, regs, BPF_REG_0); 2407 } else if (fn->ret_type == RET_VOID) { 2408 regs[BPF_REG_0].type = NOT_INIT; 2409 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 2410 struct bpf_insn_aux_data *insn_aux; 2411 2412 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 2413 /* There is no offset yet applied, variable or fixed */ 2414 mark_reg_known_zero(env, regs, BPF_REG_0); 2415 regs[BPF_REG_0].off = 0; 2416 /* remember map_ptr, so that check_map_access() 2417 * can check 'value_size' boundary of memory access 2418 * to map element returned from bpf_map_lookup_elem() 2419 */ 2420 if (meta.map_ptr == NULL) { 2421 verbose(env, 2422 "kernel subsystem misconfigured verifier\n"); 2423 return -EINVAL; 2424 } 2425 regs[BPF_REG_0].map_ptr = meta.map_ptr; 2426 regs[BPF_REG_0].id = ++env->id_gen; 2427 insn_aux = &env->insn_aux_data[insn_idx]; 2428 if (!insn_aux->map_ptr) 2429 insn_aux->map_ptr = meta.map_ptr; 2430 else if (insn_aux->map_ptr != meta.map_ptr) 2431 insn_aux->map_ptr = BPF_MAP_PTR_POISON; 2432 } else { 2433 verbose(env, "unknown return type %d of func %s#%d\n", 2434 fn->ret_type, func_id_name(func_id), func_id); 2435 return -EINVAL; 2436 } 2437 2438 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 2439 if (err) 2440 return err; 2441 2442 if (changes_data) 2443 clear_all_pkt_pointers(env); 2444 return 0; 2445 } 2446 2447 static bool signed_add_overflows(s64 a, s64 b) 2448 { 2449 /* Do the add in u64, where overflow is well-defined */ 2450 s64 res = (s64)((u64)a + (u64)b); 2451 2452 if (b < 0) 2453 return res > a; 2454 return res < a; 2455 } 2456 2457 static bool signed_sub_overflows(s64 a, s64 b) 2458 { 2459 /* Do the sub in u64, where overflow is well-defined */ 2460 s64 res = (s64)((u64)a - (u64)b); 2461 2462 if (b < 0) 2463 return res < a; 2464 return res > a; 2465 } 2466 2467 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 2468 const struct bpf_reg_state *reg, 2469 enum bpf_reg_type type) 2470 { 2471 bool known = tnum_is_const(reg->var_off); 2472 s64 val = reg->var_off.value; 2473 s64 smin = reg->smin_value; 2474 2475 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 2476 verbose(env, "math between %s pointer and %lld is not allowed\n", 2477 reg_type_str[type], val); 2478 return false; 2479 } 2480 2481 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 2482 verbose(env, "%s pointer offset %d is not allowed\n", 2483 reg_type_str[type], reg->off); 2484 return false; 2485 } 2486 2487 if (smin == S64_MIN) { 2488 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 2489 reg_type_str[type]); 2490 return false; 2491 } 2492 2493 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 2494 verbose(env, "value %lld makes %s pointer be out of bounds\n", 2495 smin, reg_type_str[type]); 2496 return false; 2497 } 2498 2499 return true; 2500 } 2501 2502 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 2503 * Caller should also handle BPF_MOV case separately. 2504 * If we return -EACCES, caller may want to try again treating pointer as a 2505 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 2506 */ 2507 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 2508 struct bpf_insn *insn, 2509 const struct bpf_reg_state *ptr_reg, 2510 const struct bpf_reg_state *off_reg) 2511 { 2512 struct bpf_verifier_state *vstate = env->cur_state; 2513 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2514 struct bpf_reg_state *regs = state->regs, *dst_reg; 2515 bool known = tnum_is_const(off_reg->var_off); 2516 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 2517 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 2518 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 2519 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 2520 u8 opcode = BPF_OP(insn->code); 2521 u32 dst = insn->dst_reg; 2522 2523 dst_reg = ®s[dst]; 2524 2525 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 2526 smin_val > smax_val || umin_val > umax_val) { 2527 /* Taint dst register if offset had invalid bounds derived from 2528 * e.g. dead branches. 2529 */ 2530 __mark_reg_unknown(dst_reg); 2531 return 0; 2532 } 2533 2534 if (BPF_CLASS(insn->code) != BPF_ALU64) { 2535 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 2536 verbose(env, 2537 "R%d 32-bit pointer arithmetic prohibited\n", 2538 dst); 2539 return -EACCES; 2540 } 2541 2542 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 2543 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", 2544 dst); 2545 return -EACCES; 2546 } 2547 if (ptr_reg->type == CONST_PTR_TO_MAP) { 2548 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", 2549 dst); 2550 return -EACCES; 2551 } 2552 if (ptr_reg->type == PTR_TO_PACKET_END) { 2553 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", 2554 dst); 2555 return -EACCES; 2556 } 2557 2558 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 2559 * The id may be overwritten later if we create a new variable offset. 2560 */ 2561 dst_reg->type = ptr_reg->type; 2562 dst_reg->id = ptr_reg->id; 2563 2564 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 2565 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 2566 return -EINVAL; 2567 2568 switch (opcode) { 2569 case BPF_ADD: 2570 /* We can take a fixed offset as long as it doesn't overflow 2571 * the s32 'off' field 2572 */ 2573 if (known && (ptr_reg->off + smin_val == 2574 (s64)(s32)(ptr_reg->off + smin_val))) { 2575 /* pointer += K. Accumulate it into fixed offset */ 2576 dst_reg->smin_value = smin_ptr; 2577 dst_reg->smax_value = smax_ptr; 2578 dst_reg->umin_value = umin_ptr; 2579 dst_reg->umax_value = umax_ptr; 2580 dst_reg->var_off = ptr_reg->var_off; 2581 dst_reg->off = ptr_reg->off + smin_val; 2582 dst_reg->range = ptr_reg->range; 2583 break; 2584 } 2585 /* A new variable offset is created. Note that off_reg->off 2586 * == 0, since it's a scalar. 2587 * dst_reg gets the pointer type and since some positive 2588 * integer value was added to the pointer, give it a new 'id' 2589 * if it's a PTR_TO_PACKET. 2590 * this creates a new 'base' pointer, off_reg (variable) gets 2591 * added into the variable offset, and we copy the fixed offset 2592 * from ptr_reg. 2593 */ 2594 if (signed_add_overflows(smin_ptr, smin_val) || 2595 signed_add_overflows(smax_ptr, smax_val)) { 2596 dst_reg->smin_value = S64_MIN; 2597 dst_reg->smax_value = S64_MAX; 2598 } else { 2599 dst_reg->smin_value = smin_ptr + smin_val; 2600 dst_reg->smax_value = smax_ptr + smax_val; 2601 } 2602 if (umin_ptr + umin_val < umin_ptr || 2603 umax_ptr + umax_val < umax_ptr) { 2604 dst_reg->umin_value = 0; 2605 dst_reg->umax_value = U64_MAX; 2606 } else { 2607 dst_reg->umin_value = umin_ptr + umin_val; 2608 dst_reg->umax_value = umax_ptr + umax_val; 2609 } 2610 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 2611 dst_reg->off = ptr_reg->off; 2612 if (reg_is_pkt_pointer(ptr_reg)) { 2613 dst_reg->id = ++env->id_gen; 2614 /* something was added to pkt_ptr, set range to zero */ 2615 dst_reg->range = 0; 2616 } 2617 break; 2618 case BPF_SUB: 2619 if (dst_reg == off_reg) { 2620 /* scalar -= pointer. Creates an unknown scalar */ 2621 verbose(env, "R%d tried to subtract pointer from scalar\n", 2622 dst); 2623 return -EACCES; 2624 } 2625 /* We don't allow subtraction from FP, because (according to 2626 * test_verifier.c test "invalid fp arithmetic", JITs might not 2627 * be able to deal with it. 2628 */ 2629 if (ptr_reg->type == PTR_TO_STACK) { 2630 verbose(env, "R%d subtraction from stack pointer prohibited\n", 2631 dst); 2632 return -EACCES; 2633 } 2634 if (known && (ptr_reg->off - smin_val == 2635 (s64)(s32)(ptr_reg->off - smin_val))) { 2636 /* pointer -= K. Subtract it from fixed offset */ 2637 dst_reg->smin_value = smin_ptr; 2638 dst_reg->smax_value = smax_ptr; 2639 dst_reg->umin_value = umin_ptr; 2640 dst_reg->umax_value = umax_ptr; 2641 dst_reg->var_off = ptr_reg->var_off; 2642 dst_reg->id = ptr_reg->id; 2643 dst_reg->off = ptr_reg->off - smin_val; 2644 dst_reg->range = ptr_reg->range; 2645 break; 2646 } 2647 /* A new variable offset is created. If the subtrahend is known 2648 * nonnegative, then any reg->range we had before is still good. 2649 */ 2650 if (signed_sub_overflows(smin_ptr, smax_val) || 2651 signed_sub_overflows(smax_ptr, smin_val)) { 2652 /* Overflow possible, we know nothing */ 2653 dst_reg->smin_value = S64_MIN; 2654 dst_reg->smax_value = S64_MAX; 2655 } else { 2656 dst_reg->smin_value = smin_ptr - smax_val; 2657 dst_reg->smax_value = smax_ptr - smin_val; 2658 } 2659 if (umin_ptr < umax_val) { 2660 /* Overflow possible, we know nothing */ 2661 dst_reg->umin_value = 0; 2662 dst_reg->umax_value = U64_MAX; 2663 } else { 2664 /* Cannot overflow (as long as bounds are consistent) */ 2665 dst_reg->umin_value = umin_ptr - umax_val; 2666 dst_reg->umax_value = umax_ptr - umin_val; 2667 } 2668 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 2669 dst_reg->off = ptr_reg->off; 2670 if (reg_is_pkt_pointer(ptr_reg)) { 2671 dst_reg->id = ++env->id_gen; 2672 /* something was added to pkt_ptr, set range to zero */ 2673 if (smin_val < 0) 2674 dst_reg->range = 0; 2675 } 2676 break; 2677 case BPF_AND: 2678 case BPF_OR: 2679 case BPF_XOR: 2680 /* bitwise ops on pointers are troublesome, prohibit. */ 2681 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 2682 dst, bpf_alu_string[opcode >> 4]); 2683 return -EACCES; 2684 default: 2685 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 2686 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 2687 dst, bpf_alu_string[opcode >> 4]); 2688 return -EACCES; 2689 } 2690 2691 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 2692 return -EINVAL; 2693 2694 __update_reg_bounds(dst_reg); 2695 __reg_deduce_bounds(dst_reg); 2696 __reg_bound_offset(dst_reg); 2697 return 0; 2698 } 2699 2700 /* WARNING: This function does calculations on 64-bit values, but the actual 2701 * execution may occur on 32-bit values. Therefore, things like bitshifts 2702 * need extra checks in the 32-bit case. 2703 */ 2704 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 2705 struct bpf_insn *insn, 2706 struct bpf_reg_state *dst_reg, 2707 struct bpf_reg_state src_reg) 2708 { 2709 struct bpf_reg_state *regs = cur_regs(env); 2710 u8 opcode = BPF_OP(insn->code); 2711 bool src_known, dst_known; 2712 s64 smin_val, smax_val; 2713 u64 umin_val, umax_val; 2714 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 2715 2716 smin_val = src_reg.smin_value; 2717 smax_val = src_reg.smax_value; 2718 umin_val = src_reg.umin_value; 2719 umax_val = src_reg.umax_value; 2720 src_known = tnum_is_const(src_reg.var_off); 2721 dst_known = tnum_is_const(dst_reg->var_off); 2722 2723 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || 2724 smin_val > smax_val || umin_val > umax_val) { 2725 /* Taint dst register if offset had invalid bounds derived from 2726 * e.g. dead branches. 2727 */ 2728 __mark_reg_unknown(dst_reg); 2729 return 0; 2730 } 2731 2732 if (!src_known && 2733 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 2734 __mark_reg_unknown(dst_reg); 2735 return 0; 2736 } 2737 2738 switch (opcode) { 2739 case BPF_ADD: 2740 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 2741 signed_add_overflows(dst_reg->smax_value, smax_val)) { 2742 dst_reg->smin_value = S64_MIN; 2743 dst_reg->smax_value = S64_MAX; 2744 } else { 2745 dst_reg->smin_value += smin_val; 2746 dst_reg->smax_value += smax_val; 2747 } 2748 if (dst_reg->umin_value + umin_val < umin_val || 2749 dst_reg->umax_value + umax_val < umax_val) { 2750 dst_reg->umin_value = 0; 2751 dst_reg->umax_value = U64_MAX; 2752 } else { 2753 dst_reg->umin_value += umin_val; 2754 dst_reg->umax_value += umax_val; 2755 } 2756 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 2757 break; 2758 case BPF_SUB: 2759 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 2760 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 2761 /* Overflow possible, we know nothing */ 2762 dst_reg->smin_value = S64_MIN; 2763 dst_reg->smax_value = S64_MAX; 2764 } else { 2765 dst_reg->smin_value -= smax_val; 2766 dst_reg->smax_value -= smin_val; 2767 } 2768 if (dst_reg->umin_value < umax_val) { 2769 /* Overflow possible, we know nothing */ 2770 dst_reg->umin_value = 0; 2771 dst_reg->umax_value = U64_MAX; 2772 } else { 2773 /* Cannot overflow (as long as bounds are consistent) */ 2774 dst_reg->umin_value -= umax_val; 2775 dst_reg->umax_value -= umin_val; 2776 } 2777 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 2778 break; 2779 case BPF_MUL: 2780 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 2781 if (smin_val < 0 || dst_reg->smin_value < 0) { 2782 /* Ain't nobody got time to multiply that sign */ 2783 __mark_reg_unbounded(dst_reg); 2784 __update_reg_bounds(dst_reg); 2785 break; 2786 } 2787 /* Both values are positive, so we can work with unsigned and 2788 * copy the result to signed (unless it exceeds S64_MAX). 2789 */ 2790 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 2791 /* Potential overflow, we know nothing */ 2792 __mark_reg_unbounded(dst_reg); 2793 /* (except what we can learn from the var_off) */ 2794 __update_reg_bounds(dst_reg); 2795 break; 2796 } 2797 dst_reg->umin_value *= umin_val; 2798 dst_reg->umax_value *= umax_val; 2799 if (dst_reg->umax_value > S64_MAX) { 2800 /* Overflow possible, we know nothing */ 2801 dst_reg->smin_value = S64_MIN; 2802 dst_reg->smax_value = S64_MAX; 2803 } else { 2804 dst_reg->smin_value = dst_reg->umin_value; 2805 dst_reg->smax_value = dst_reg->umax_value; 2806 } 2807 break; 2808 case BPF_AND: 2809 if (src_known && dst_known) { 2810 __mark_reg_known(dst_reg, dst_reg->var_off.value & 2811 src_reg.var_off.value); 2812 break; 2813 } 2814 /* We get our minimum from the var_off, since that's inherently 2815 * bitwise. Our maximum is the minimum of the operands' maxima. 2816 */ 2817 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 2818 dst_reg->umin_value = dst_reg->var_off.value; 2819 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 2820 if (dst_reg->smin_value < 0 || smin_val < 0) { 2821 /* Lose signed bounds when ANDing negative numbers, 2822 * ain't nobody got time for that. 2823 */ 2824 dst_reg->smin_value = S64_MIN; 2825 dst_reg->smax_value = S64_MAX; 2826 } else { 2827 /* ANDing two positives gives a positive, so safe to 2828 * cast result into s64. 2829 */ 2830 dst_reg->smin_value = dst_reg->umin_value; 2831 dst_reg->smax_value = dst_reg->umax_value; 2832 } 2833 /* We may learn something more from the var_off */ 2834 __update_reg_bounds(dst_reg); 2835 break; 2836 case BPF_OR: 2837 if (src_known && dst_known) { 2838 __mark_reg_known(dst_reg, dst_reg->var_off.value | 2839 src_reg.var_off.value); 2840 break; 2841 } 2842 /* We get our maximum from the var_off, and our minimum is the 2843 * maximum of the operands' minima 2844 */ 2845 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 2846 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 2847 dst_reg->umax_value = dst_reg->var_off.value | 2848 dst_reg->var_off.mask; 2849 if (dst_reg->smin_value < 0 || smin_val < 0) { 2850 /* Lose signed bounds when ORing negative numbers, 2851 * ain't nobody got time for that. 2852 */ 2853 dst_reg->smin_value = S64_MIN; 2854 dst_reg->smax_value = S64_MAX; 2855 } else { 2856 /* ORing two positives gives a positive, so safe to 2857 * cast result into s64. 2858 */ 2859 dst_reg->smin_value = dst_reg->umin_value; 2860 dst_reg->smax_value = dst_reg->umax_value; 2861 } 2862 /* We may learn something more from the var_off */ 2863 __update_reg_bounds(dst_reg); 2864 break; 2865 case BPF_LSH: 2866 if (umax_val >= insn_bitness) { 2867 /* Shifts greater than 31 or 63 are undefined. 2868 * This includes shifts by a negative number. 2869 */ 2870 mark_reg_unknown(env, regs, insn->dst_reg); 2871 break; 2872 } 2873 /* We lose all sign bit information (except what we can pick 2874 * up from var_off) 2875 */ 2876 dst_reg->smin_value = S64_MIN; 2877 dst_reg->smax_value = S64_MAX; 2878 /* If we might shift our top bit out, then we know nothing */ 2879 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 2880 dst_reg->umin_value = 0; 2881 dst_reg->umax_value = U64_MAX; 2882 } else { 2883 dst_reg->umin_value <<= umin_val; 2884 dst_reg->umax_value <<= umax_val; 2885 } 2886 if (src_known) 2887 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 2888 else 2889 dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); 2890 /* We may learn something more from the var_off */ 2891 __update_reg_bounds(dst_reg); 2892 break; 2893 case BPF_RSH: 2894 if (umax_val >= insn_bitness) { 2895 /* Shifts greater than 31 or 63 are undefined. 2896 * This includes shifts by a negative number. 2897 */ 2898 mark_reg_unknown(env, regs, insn->dst_reg); 2899 break; 2900 } 2901 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 2902 * be negative, then either: 2903 * 1) src_reg might be zero, so the sign bit of the result is 2904 * unknown, so we lose our signed bounds 2905 * 2) it's known negative, thus the unsigned bounds capture the 2906 * signed bounds 2907 * 3) the signed bounds cross zero, so they tell us nothing 2908 * about the result 2909 * If the value in dst_reg is known nonnegative, then again the 2910 * unsigned bounts capture the signed bounds. 2911 * Thus, in all cases it suffices to blow away our signed bounds 2912 * and rely on inferring new ones from the unsigned bounds and 2913 * var_off of the result. 2914 */ 2915 dst_reg->smin_value = S64_MIN; 2916 dst_reg->smax_value = S64_MAX; 2917 if (src_known) 2918 dst_reg->var_off = tnum_rshift(dst_reg->var_off, 2919 umin_val); 2920 else 2921 dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); 2922 dst_reg->umin_value >>= umax_val; 2923 dst_reg->umax_value >>= umin_val; 2924 /* We may learn something more from the var_off */ 2925 __update_reg_bounds(dst_reg); 2926 break; 2927 default: 2928 mark_reg_unknown(env, regs, insn->dst_reg); 2929 break; 2930 } 2931 2932 if (BPF_CLASS(insn->code) != BPF_ALU64) { 2933 /* 32-bit ALU ops are (32,32)->32 */ 2934 coerce_reg_to_size(dst_reg, 4); 2935 coerce_reg_to_size(&src_reg, 4); 2936 } 2937 2938 __reg_deduce_bounds(dst_reg); 2939 __reg_bound_offset(dst_reg); 2940 return 0; 2941 } 2942 2943 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 2944 * and var_off. 2945 */ 2946 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 2947 struct bpf_insn *insn) 2948 { 2949 struct bpf_verifier_state *vstate = env->cur_state; 2950 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2951 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 2952 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 2953 u8 opcode = BPF_OP(insn->code); 2954 2955 dst_reg = ®s[insn->dst_reg]; 2956 src_reg = NULL; 2957 if (dst_reg->type != SCALAR_VALUE) 2958 ptr_reg = dst_reg; 2959 if (BPF_SRC(insn->code) == BPF_X) { 2960 src_reg = ®s[insn->src_reg]; 2961 if (src_reg->type != SCALAR_VALUE) { 2962 if (dst_reg->type != SCALAR_VALUE) { 2963 /* Combining two pointers by any ALU op yields 2964 * an arbitrary scalar. Disallow all math except 2965 * pointer subtraction 2966 */ 2967 if (opcode == BPF_SUB){ 2968 mark_reg_unknown(env, regs, insn->dst_reg); 2969 return 0; 2970 } 2971 verbose(env, "R%d pointer %s pointer prohibited\n", 2972 insn->dst_reg, 2973 bpf_alu_string[opcode >> 4]); 2974 return -EACCES; 2975 } else { 2976 /* scalar += pointer 2977 * This is legal, but we have to reverse our 2978 * src/dest handling in computing the range 2979 */ 2980 return adjust_ptr_min_max_vals(env, insn, 2981 src_reg, dst_reg); 2982 } 2983 } else if (ptr_reg) { 2984 /* pointer += scalar */ 2985 return adjust_ptr_min_max_vals(env, insn, 2986 dst_reg, src_reg); 2987 } 2988 } else { 2989 /* Pretend the src is a reg with a known value, since we only 2990 * need to be able to read from this state. 2991 */ 2992 off_reg.type = SCALAR_VALUE; 2993 __mark_reg_known(&off_reg, insn->imm); 2994 src_reg = &off_reg; 2995 if (ptr_reg) /* pointer += K */ 2996 return adjust_ptr_min_max_vals(env, insn, 2997 ptr_reg, src_reg); 2998 } 2999 3000 /* Got here implies adding two SCALAR_VALUEs */ 3001 if (WARN_ON_ONCE(ptr_reg)) { 3002 print_verifier_state(env, state); 3003 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 3004 return -EINVAL; 3005 } 3006 if (WARN_ON(!src_reg)) { 3007 print_verifier_state(env, state); 3008 verbose(env, "verifier internal error: no src_reg\n"); 3009 return -EINVAL; 3010 } 3011 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 3012 } 3013 3014 /* check validity of 32-bit and 64-bit arithmetic operations */ 3015 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 3016 { 3017 struct bpf_reg_state *regs = cur_regs(env); 3018 u8 opcode = BPF_OP(insn->code); 3019 int err; 3020 3021 if (opcode == BPF_END || opcode == BPF_NEG) { 3022 if (opcode == BPF_NEG) { 3023 if (BPF_SRC(insn->code) != 0 || 3024 insn->src_reg != BPF_REG_0 || 3025 insn->off != 0 || insn->imm != 0) { 3026 verbose(env, "BPF_NEG uses reserved fields\n"); 3027 return -EINVAL; 3028 } 3029 } else { 3030 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 3031 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 3032 BPF_CLASS(insn->code) == BPF_ALU64) { 3033 verbose(env, "BPF_END uses reserved fields\n"); 3034 return -EINVAL; 3035 } 3036 } 3037 3038 /* check src operand */ 3039 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3040 if (err) 3041 return err; 3042 3043 if (is_pointer_value(env, insn->dst_reg)) { 3044 verbose(env, "R%d pointer arithmetic prohibited\n", 3045 insn->dst_reg); 3046 return -EACCES; 3047 } 3048 3049 /* check dest operand */ 3050 err = check_reg_arg(env, insn->dst_reg, DST_OP); 3051 if (err) 3052 return err; 3053 3054 } else if (opcode == BPF_MOV) { 3055 3056 if (BPF_SRC(insn->code) == BPF_X) { 3057 if (insn->imm != 0 || insn->off != 0) { 3058 verbose(env, "BPF_MOV uses reserved fields\n"); 3059 return -EINVAL; 3060 } 3061 3062 /* check src operand */ 3063 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3064 if (err) 3065 return err; 3066 } else { 3067 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 3068 verbose(env, "BPF_MOV uses reserved fields\n"); 3069 return -EINVAL; 3070 } 3071 } 3072 3073 /* check dest operand */ 3074 err = check_reg_arg(env, insn->dst_reg, DST_OP); 3075 if (err) 3076 return err; 3077 3078 if (BPF_SRC(insn->code) == BPF_X) { 3079 if (BPF_CLASS(insn->code) == BPF_ALU64) { 3080 /* case: R1 = R2 3081 * copy register state to dest reg 3082 */ 3083 regs[insn->dst_reg] = regs[insn->src_reg]; 3084 regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; 3085 } else { 3086 /* R1 = (u32) R2 */ 3087 if (is_pointer_value(env, insn->src_reg)) { 3088 verbose(env, 3089 "R%d partial copy of pointer\n", 3090 insn->src_reg); 3091 return -EACCES; 3092 } 3093 mark_reg_unknown(env, regs, insn->dst_reg); 3094 coerce_reg_to_size(®s[insn->dst_reg], 4); 3095 } 3096 } else { 3097 /* case: R = imm 3098 * remember the value we stored into this reg 3099 */ 3100 regs[insn->dst_reg].type = SCALAR_VALUE; 3101 if (BPF_CLASS(insn->code) == BPF_ALU64) { 3102 __mark_reg_known(regs + insn->dst_reg, 3103 insn->imm); 3104 } else { 3105 __mark_reg_known(regs + insn->dst_reg, 3106 (u32)insn->imm); 3107 } 3108 } 3109 3110 } else if (opcode > BPF_END) { 3111 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 3112 return -EINVAL; 3113 3114 } else { /* all other ALU ops: and, sub, xor, add, ... */ 3115 3116 if (BPF_SRC(insn->code) == BPF_X) { 3117 if (insn->imm != 0 || insn->off != 0) { 3118 verbose(env, "BPF_ALU uses reserved fields\n"); 3119 return -EINVAL; 3120 } 3121 /* check src1 operand */ 3122 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3123 if (err) 3124 return err; 3125 } else { 3126 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 3127 verbose(env, "BPF_ALU uses reserved fields\n"); 3128 return -EINVAL; 3129 } 3130 } 3131 3132 /* check src2 operand */ 3133 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3134 if (err) 3135 return err; 3136 3137 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 3138 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 3139 verbose(env, "div by zero\n"); 3140 return -EINVAL; 3141 } 3142 3143 if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { 3144 verbose(env, "BPF_ARSH not supported for 32 bit ALU\n"); 3145 return -EINVAL; 3146 } 3147 3148 if ((opcode == BPF_LSH || opcode == BPF_RSH || 3149 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 3150 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 3151 3152 if (insn->imm < 0 || insn->imm >= size) { 3153 verbose(env, "invalid shift %d\n", insn->imm); 3154 return -EINVAL; 3155 } 3156 } 3157 3158 /* check dest operand */ 3159 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 3160 if (err) 3161 return err; 3162 3163 return adjust_reg_min_max_vals(env, insn); 3164 } 3165 3166 return 0; 3167 } 3168 3169 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 3170 struct bpf_reg_state *dst_reg, 3171 enum bpf_reg_type type, 3172 bool range_right_open) 3173 { 3174 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3175 struct bpf_reg_state *regs = state->regs, *reg; 3176 u16 new_range; 3177 int i, j; 3178 3179 if (dst_reg->off < 0 || 3180 (dst_reg->off == 0 && range_right_open)) 3181 /* This doesn't give us any range */ 3182 return; 3183 3184 if (dst_reg->umax_value > MAX_PACKET_OFF || 3185 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 3186 /* Risk of overflow. For instance, ptr + (1<<63) may be less 3187 * than pkt_end, but that's because it's also less than pkt. 3188 */ 3189 return; 3190 3191 new_range = dst_reg->off; 3192 if (range_right_open) 3193 new_range--; 3194 3195 /* Examples for register markings: 3196 * 3197 * pkt_data in dst register: 3198 * 3199 * r2 = r3; 3200 * r2 += 8; 3201 * if (r2 > pkt_end) goto <handle exception> 3202 * <access okay> 3203 * 3204 * r2 = r3; 3205 * r2 += 8; 3206 * if (r2 < pkt_end) goto <access okay> 3207 * <handle exception> 3208 * 3209 * Where: 3210 * r2 == dst_reg, pkt_end == src_reg 3211 * r2=pkt(id=n,off=8,r=0) 3212 * r3=pkt(id=n,off=0,r=0) 3213 * 3214 * pkt_data in src register: 3215 * 3216 * r2 = r3; 3217 * r2 += 8; 3218 * if (pkt_end >= r2) goto <access okay> 3219 * <handle exception> 3220 * 3221 * r2 = r3; 3222 * r2 += 8; 3223 * if (pkt_end <= r2) goto <handle exception> 3224 * <access okay> 3225 * 3226 * Where: 3227 * pkt_end == dst_reg, r2 == src_reg 3228 * r2=pkt(id=n,off=8,r=0) 3229 * r3=pkt(id=n,off=0,r=0) 3230 * 3231 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 3232 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 3233 * and [r3, r3 + 8-1) respectively is safe to access depending on 3234 * the check. 3235 */ 3236 3237 /* If our ids match, then we must have the same max_value. And we 3238 * don't care about the other reg's fixed offset, since if it's too big 3239 * the range won't allow anything. 3240 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 3241 */ 3242 for (i = 0; i < MAX_BPF_REG; i++) 3243 if (regs[i].type == type && regs[i].id == dst_reg->id) 3244 /* keep the maximum range already checked */ 3245 regs[i].range = max(regs[i].range, new_range); 3246 3247 for (j = 0; j <= vstate->curframe; j++) { 3248 state = vstate->frame[j]; 3249 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 3250 if (state->stack[i].slot_type[0] != STACK_SPILL) 3251 continue; 3252 reg = &state->stack[i].spilled_ptr; 3253 if (reg->type == type && reg->id == dst_reg->id) 3254 reg->range = max(reg->range, new_range); 3255 } 3256 } 3257 } 3258 3259 /* Adjusts the register min/max values in the case that the dst_reg is the 3260 * variable register that we are working on, and src_reg is a constant or we're 3261 * simply doing a BPF_K check. 3262 * In JEQ/JNE cases we also adjust the var_off values. 3263 */ 3264 static void reg_set_min_max(struct bpf_reg_state *true_reg, 3265 struct bpf_reg_state *false_reg, u64 val, 3266 u8 opcode) 3267 { 3268 /* If the dst_reg is a pointer, we can't learn anything about its 3269 * variable offset from the compare (unless src_reg were a pointer into 3270 * the same object, but we don't bother with that. 3271 * Since false_reg and true_reg have the same type by construction, we 3272 * only need to check one of them for pointerness. 3273 */ 3274 if (__is_pointer_value(false, false_reg)) 3275 return; 3276 3277 switch (opcode) { 3278 case BPF_JEQ: 3279 /* If this is false then we know nothing Jon Snow, but if it is 3280 * true then we know for sure. 3281 */ 3282 __mark_reg_known(true_reg, val); 3283 break; 3284 case BPF_JNE: 3285 /* If this is true we know nothing Jon Snow, but if it is false 3286 * we know the value for sure; 3287 */ 3288 __mark_reg_known(false_reg, val); 3289 break; 3290 case BPF_JGT: 3291 false_reg->umax_value = min(false_reg->umax_value, val); 3292 true_reg->umin_value = max(true_reg->umin_value, val + 1); 3293 break; 3294 case BPF_JSGT: 3295 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 3296 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 3297 break; 3298 case BPF_JLT: 3299 false_reg->umin_value = max(false_reg->umin_value, val); 3300 true_reg->umax_value = min(true_reg->umax_value, val - 1); 3301 break; 3302 case BPF_JSLT: 3303 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 3304 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 3305 break; 3306 case BPF_JGE: 3307 false_reg->umax_value = min(false_reg->umax_value, val - 1); 3308 true_reg->umin_value = max(true_reg->umin_value, val); 3309 break; 3310 case BPF_JSGE: 3311 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 3312 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 3313 break; 3314 case BPF_JLE: 3315 false_reg->umin_value = max(false_reg->umin_value, val + 1); 3316 true_reg->umax_value = min(true_reg->umax_value, val); 3317 break; 3318 case BPF_JSLE: 3319 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 3320 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 3321 break; 3322 default: 3323 break; 3324 } 3325 3326 __reg_deduce_bounds(false_reg); 3327 __reg_deduce_bounds(true_reg); 3328 /* We might have learned some bits from the bounds. */ 3329 __reg_bound_offset(false_reg); 3330 __reg_bound_offset(true_reg); 3331 /* Intersecting with the old var_off might have improved our bounds 3332 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 3333 * then new var_off is (0; 0x7f...fc) which improves our umax. 3334 */ 3335 __update_reg_bounds(false_reg); 3336 __update_reg_bounds(true_reg); 3337 } 3338 3339 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 3340 * the variable reg. 3341 */ 3342 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 3343 struct bpf_reg_state *false_reg, u64 val, 3344 u8 opcode) 3345 { 3346 if (__is_pointer_value(false, false_reg)) 3347 return; 3348 3349 switch (opcode) { 3350 case BPF_JEQ: 3351 /* If this is false then we know nothing Jon Snow, but if it is 3352 * true then we know for sure. 3353 */ 3354 __mark_reg_known(true_reg, val); 3355 break; 3356 case BPF_JNE: 3357 /* If this is true we know nothing Jon Snow, but if it is false 3358 * we know the value for sure; 3359 */ 3360 __mark_reg_known(false_reg, val); 3361 break; 3362 case BPF_JGT: 3363 true_reg->umax_value = min(true_reg->umax_value, val - 1); 3364 false_reg->umin_value = max(false_reg->umin_value, val); 3365 break; 3366 case BPF_JSGT: 3367 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 3368 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 3369 break; 3370 case BPF_JLT: 3371 true_reg->umin_value = max(true_reg->umin_value, val + 1); 3372 false_reg->umax_value = min(false_reg->umax_value, val); 3373 break; 3374 case BPF_JSLT: 3375 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 3376 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 3377 break; 3378 case BPF_JGE: 3379 true_reg->umax_value = min(true_reg->umax_value, val); 3380 false_reg->umin_value = max(false_reg->umin_value, val + 1); 3381 break; 3382 case BPF_JSGE: 3383 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 3384 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 3385 break; 3386 case BPF_JLE: 3387 true_reg->umin_value = max(true_reg->umin_value, val); 3388 false_reg->umax_value = min(false_reg->umax_value, val - 1); 3389 break; 3390 case BPF_JSLE: 3391 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 3392 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 3393 break; 3394 default: 3395 break; 3396 } 3397 3398 __reg_deduce_bounds(false_reg); 3399 __reg_deduce_bounds(true_reg); 3400 /* We might have learned some bits from the bounds. */ 3401 __reg_bound_offset(false_reg); 3402 __reg_bound_offset(true_reg); 3403 /* Intersecting with the old var_off might have improved our bounds 3404 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 3405 * then new var_off is (0; 0x7f...fc) which improves our umax. 3406 */ 3407 __update_reg_bounds(false_reg); 3408 __update_reg_bounds(true_reg); 3409 } 3410 3411 /* Regs are known to be equal, so intersect their min/max/var_off */ 3412 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 3413 struct bpf_reg_state *dst_reg) 3414 { 3415 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 3416 dst_reg->umin_value); 3417 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 3418 dst_reg->umax_value); 3419 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 3420 dst_reg->smin_value); 3421 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 3422 dst_reg->smax_value); 3423 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 3424 dst_reg->var_off); 3425 /* We might have learned new bounds from the var_off. */ 3426 __update_reg_bounds(src_reg); 3427 __update_reg_bounds(dst_reg); 3428 /* We might have learned something about the sign bit. */ 3429 __reg_deduce_bounds(src_reg); 3430 __reg_deduce_bounds(dst_reg); 3431 /* We might have learned some bits from the bounds. */ 3432 __reg_bound_offset(src_reg); 3433 __reg_bound_offset(dst_reg); 3434 /* Intersecting with the old var_off might have improved our bounds 3435 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 3436 * then new var_off is (0; 0x7f...fc) which improves our umax. 3437 */ 3438 __update_reg_bounds(src_reg); 3439 __update_reg_bounds(dst_reg); 3440 } 3441 3442 static void reg_combine_min_max(struct bpf_reg_state *true_src, 3443 struct bpf_reg_state *true_dst, 3444 struct bpf_reg_state *false_src, 3445 struct bpf_reg_state *false_dst, 3446 u8 opcode) 3447 { 3448 switch (opcode) { 3449 case BPF_JEQ: 3450 __reg_combine_min_max(true_src, true_dst); 3451 break; 3452 case BPF_JNE: 3453 __reg_combine_min_max(false_src, false_dst); 3454 break; 3455 } 3456 } 3457 3458 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, 3459 bool is_null) 3460 { 3461 struct bpf_reg_state *reg = ®s[regno]; 3462 3463 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { 3464 /* Old offset (both fixed and variable parts) should 3465 * have been known-zero, because we don't allow pointer 3466 * arithmetic on pointers that might be NULL. 3467 */ 3468 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 3469 !tnum_equals_const(reg->var_off, 0) || 3470 reg->off)) { 3471 __mark_reg_known_zero(reg); 3472 reg->off = 0; 3473 } 3474 if (is_null) { 3475 reg->type = SCALAR_VALUE; 3476 } else if (reg->map_ptr->inner_map_meta) { 3477 reg->type = CONST_PTR_TO_MAP; 3478 reg->map_ptr = reg->map_ptr->inner_map_meta; 3479 } else { 3480 reg->type = PTR_TO_MAP_VALUE; 3481 } 3482 /* We don't need id from this point onwards anymore, thus we 3483 * should better reset it, so that state pruning has chances 3484 * to take effect. 3485 */ 3486 reg->id = 0; 3487 } 3488 } 3489 3490 /* The logic is similar to find_good_pkt_pointers(), both could eventually 3491 * be folded together at some point. 3492 */ 3493 static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, 3494 bool is_null) 3495 { 3496 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3497 struct bpf_reg_state *regs = state->regs; 3498 u32 id = regs[regno].id; 3499 int i, j; 3500 3501 for (i = 0; i < MAX_BPF_REG; i++) 3502 mark_map_reg(regs, i, id, is_null); 3503 3504 for (j = 0; j <= vstate->curframe; j++) { 3505 state = vstate->frame[j]; 3506 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 3507 if (state->stack[i].slot_type[0] != STACK_SPILL) 3508 continue; 3509 mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); 3510 } 3511 } 3512 } 3513 3514 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 3515 struct bpf_reg_state *dst_reg, 3516 struct bpf_reg_state *src_reg, 3517 struct bpf_verifier_state *this_branch, 3518 struct bpf_verifier_state *other_branch) 3519 { 3520 if (BPF_SRC(insn->code) != BPF_X) 3521 return false; 3522 3523 switch (BPF_OP(insn->code)) { 3524 case BPF_JGT: 3525 if ((dst_reg->type == PTR_TO_PACKET && 3526 src_reg->type == PTR_TO_PACKET_END) || 3527 (dst_reg->type == PTR_TO_PACKET_META && 3528 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3529 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 3530 find_good_pkt_pointers(this_branch, dst_reg, 3531 dst_reg->type, false); 3532 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3533 src_reg->type == PTR_TO_PACKET) || 3534 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3535 src_reg->type == PTR_TO_PACKET_META)) { 3536 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 3537 find_good_pkt_pointers(other_branch, src_reg, 3538 src_reg->type, true); 3539 } else { 3540 return false; 3541 } 3542 break; 3543 case BPF_JLT: 3544 if ((dst_reg->type == PTR_TO_PACKET && 3545 src_reg->type == PTR_TO_PACKET_END) || 3546 (dst_reg->type == PTR_TO_PACKET_META && 3547 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3548 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 3549 find_good_pkt_pointers(other_branch, dst_reg, 3550 dst_reg->type, true); 3551 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3552 src_reg->type == PTR_TO_PACKET) || 3553 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3554 src_reg->type == PTR_TO_PACKET_META)) { 3555 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 3556 find_good_pkt_pointers(this_branch, src_reg, 3557 src_reg->type, false); 3558 } else { 3559 return false; 3560 } 3561 break; 3562 case BPF_JGE: 3563 if ((dst_reg->type == PTR_TO_PACKET && 3564 src_reg->type == PTR_TO_PACKET_END) || 3565 (dst_reg->type == PTR_TO_PACKET_META && 3566 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3567 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 3568 find_good_pkt_pointers(this_branch, dst_reg, 3569 dst_reg->type, true); 3570 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3571 src_reg->type == PTR_TO_PACKET) || 3572 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3573 src_reg->type == PTR_TO_PACKET_META)) { 3574 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 3575 find_good_pkt_pointers(other_branch, src_reg, 3576 src_reg->type, false); 3577 } else { 3578 return false; 3579 } 3580 break; 3581 case BPF_JLE: 3582 if ((dst_reg->type == PTR_TO_PACKET && 3583 src_reg->type == PTR_TO_PACKET_END) || 3584 (dst_reg->type == PTR_TO_PACKET_META && 3585 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 3586 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 3587 find_good_pkt_pointers(other_branch, dst_reg, 3588 dst_reg->type, false); 3589 } else if ((dst_reg->type == PTR_TO_PACKET_END && 3590 src_reg->type == PTR_TO_PACKET) || 3591 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 3592 src_reg->type == PTR_TO_PACKET_META)) { 3593 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 3594 find_good_pkt_pointers(this_branch, src_reg, 3595 src_reg->type, true); 3596 } else { 3597 return false; 3598 } 3599 break; 3600 default: 3601 return false; 3602 } 3603 3604 return true; 3605 } 3606 3607 static int check_cond_jmp_op(struct bpf_verifier_env *env, 3608 struct bpf_insn *insn, int *insn_idx) 3609 { 3610 struct bpf_verifier_state *this_branch = env->cur_state; 3611 struct bpf_verifier_state *other_branch; 3612 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 3613 struct bpf_reg_state *dst_reg, *other_branch_regs; 3614 u8 opcode = BPF_OP(insn->code); 3615 int err; 3616 3617 if (opcode > BPF_JSLE) { 3618 verbose(env, "invalid BPF_JMP opcode %x\n", opcode); 3619 return -EINVAL; 3620 } 3621 3622 if (BPF_SRC(insn->code) == BPF_X) { 3623 if (insn->imm != 0) { 3624 verbose(env, "BPF_JMP uses reserved fields\n"); 3625 return -EINVAL; 3626 } 3627 3628 /* check src1 operand */ 3629 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3630 if (err) 3631 return err; 3632 3633 if (is_pointer_value(env, insn->src_reg)) { 3634 verbose(env, "R%d pointer comparison prohibited\n", 3635 insn->src_reg); 3636 return -EACCES; 3637 } 3638 } else { 3639 if (insn->src_reg != BPF_REG_0) { 3640 verbose(env, "BPF_JMP uses reserved fields\n"); 3641 return -EINVAL; 3642 } 3643 } 3644 3645 /* check src2 operand */ 3646 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3647 if (err) 3648 return err; 3649 3650 dst_reg = ®s[insn->dst_reg]; 3651 3652 /* detect if R == 0 where R was initialized to zero earlier */ 3653 if (BPF_SRC(insn->code) == BPF_K && 3654 (opcode == BPF_JEQ || opcode == BPF_JNE) && 3655 dst_reg->type == SCALAR_VALUE && 3656 tnum_is_const(dst_reg->var_off)) { 3657 if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) || 3658 (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) { 3659 /* if (imm == imm) goto pc+off; 3660 * only follow the goto, ignore fall-through 3661 */ 3662 *insn_idx += insn->off; 3663 return 0; 3664 } else { 3665 /* if (imm != imm) goto pc+off; 3666 * only follow fall-through branch, since 3667 * that's where the program will go 3668 */ 3669 return 0; 3670 } 3671 } 3672 3673 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 3674 if (!other_branch) 3675 return -EFAULT; 3676 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 3677 3678 /* detect if we are comparing against a constant value so we can adjust 3679 * our min/max values for our dst register. 3680 * this is only legit if both are scalars (or pointers to the same 3681 * object, I suppose, but we don't support that right now), because 3682 * otherwise the different base pointers mean the offsets aren't 3683 * comparable. 3684 */ 3685 if (BPF_SRC(insn->code) == BPF_X) { 3686 if (dst_reg->type == SCALAR_VALUE && 3687 regs[insn->src_reg].type == SCALAR_VALUE) { 3688 if (tnum_is_const(regs[insn->src_reg].var_off)) 3689 reg_set_min_max(&other_branch_regs[insn->dst_reg], 3690 dst_reg, regs[insn->src_reg].var_off.value, 3691 opcode); 3692 else if (tnum_is_const(dst_reg->var_off)) 3693 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 3694 ®s[insn->src_reg], 3695 dst_reg->var_off.value, opcode); 3696 else if (opcode == BPF_JEQ || opcode == BPF_JNE) 3697 /* Comparing for equality, we can combine knowledge */ 3698 reg_combine_min_max(&other_branch_regs[insn->src_reg], 3699 &other_branch_regs[insn->dst_reg], 3700 ®s[insn->src_reg], 3701 ®s[insn->dst_reg], opcode); 3702 } 3703 } else if (dst_reg->type == SCALAR_VALUE) { 3704 reg_set_min_max(&other_branch_regs[insn->dst_reg], 3705 dst_reg, insn->imm, opcode); 3706 } 3707 3708 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ 3709 if (BPF_SRC(insn->code) == BPF_K && 3710 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 3711 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 3712 /* Mark all identical map registers in each branch as either 3713 * safe or unknown depending R == 0 or R != 0 conditional. 3714 */ 3715 mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); 3716 mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); 3717 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 3718 this_branch, other_branch) && 3719 is_pointer_value(env, insn->dst_reg)) { 3720 verbose(env, "R%d pointer comparison prohibited\n", 3721 insn->dst_reg); 3722 return -EACCES; 3723 } 3724 if (env->log.level) 3725 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 3726 return 0; 3727 } 3728 3729 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 3730 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 3731 { 3732 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 3733 3734 return (struct bpf_map *) (unsigned long) imm64; 3735 } 3736 3737 /* verify BPF_LD_IMM64 instruction */ 3738 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 3739 { 3740 struct bpf_reg_state *regs = cur_regs(env); 3741 int err; 3742 3743 if (BPF_SIZE(insn->code) != BPF_DW) { 3744 verbose(env, "invalid BPF_LD_IMM insn\n"); 3745 return -EINVAL; 3746 } 3747 if (insn->off != 0) { 3748 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 3749 return -EINVAL; 3750 } 3751 3752 err = check_reg_arg(env, insn->dst_reg, DST_OP); 3753 if (err) 3754 return err; 3755 3756 if (insn->src_reg == 0) { 3757 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 3758 3759 regs[insn->dst_reg].type = SCALAR_VALUE; 3760 __mark_reg_known(®s[insn->dst_reg], imm); 3761 return 0; 3762 } 3763 3764 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 3765 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 3766 3767 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 3768 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 3769 return 0; 3770 } 3771 3772 static bool may_access_skb(enum bpf_prog_type type) 3773 { 3774 switch (type) { 3775 case BPF_PROG_TYPE_SOCKET_FILTER: 3776 case BPF_PROG_TYPE_SCHED_CLS: 3777 case BPF_PROG_TYPE_SCHED_ACT: 3778 return true; 3779 default: 3780 return false; 3781 } 3782 } 3783 3784 /* verify safety of LD_ABS|LD_IND instructions: 3785 * - they can only appear in the programs where ctx == skb 3786 * - since they are wrappers of function calls, they scratch R1-R5 registers, 3787 * preserve R6-R9, and store return value into R0 3788 * 3789 * Implicit input: 3790 * ctx == skb == R6 == CTX 3791 * 3792 * Explicit input: 3793 * SRC == any register 3794 * IMM == 32-bit immediate 3795 * 3796 * Output: 3797 * R0 - 8/16/32-bit skb data converted to cpu endianness 3798 */ 3799 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 3800 { 3801 struct bpf_reg_state *regs = cur_regs(env); 3802 u8 mode = BPF_MODE(insn->code); 3803 int i, err; 3804 3805 if (!may_access_skb(env->prog->type)) { 3806 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 3807 return -EINVAL; 3808 } 3809 3810 if (env->subprog_cnt) { 3811 /* when program has LD_ABS insn JITs and interpreter assume 3812 * that r1 == ctx == skb which is not the case for callees 3813 * that can have arbitrary arguments. It's problematic 3814 * for main prog as well since JITs would need to analyze 3815 * all functions in order to make proper register save/restore 3816 * decisions in the main prog. Hence disallow LD_ABS with calls 3817 */ 3818 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); 3819 return -EINVAL; 3820 } 3821 3822 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 3823 BPF_SIZE(insn->code) == BPF_DW || 3824 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 3825 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 3826 return -EINVAL; 3827 } 3828 3829 /* check whether implicit source operand (register R6) is readable */ 3830 err = check_reg_arg(env, BPF_REG_6, SRC_OP); 3831 if (err) 3832 return err; 3833 3834 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 3835 verbose(env, 3836 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 3837 return -EINVAL; 3838 } 3839 3840 if (mode == BPF_IND) { 3841 /* check explicit source operand */ 3842 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3843 if (err) 3844 return err; 3845 } 3846 3847 /* reset caller saved regs to unreadable */ 3848 for (i = 0; i < CALLER_SAVED_REGS; i++) { 3849 mark_reg_not_init(env, regs, caller_saved[i]); 3850 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 3851 } 3852 3853 /* mark destination R0 register as readable, since it contains 3854 * the value fetched from the packet. 3855 * Already marked as written above. 3856 */ 3857 mark_reg_unknown(env, regs, BPF_REG_0); 3858 return 0; 3859 } 3860 3861 static int check_return_code(struct bpf_verifier_env *env) 3862 { 3863 struct bpf_reg_state *reg; 3864 struct tnum range = tnum_range(0, 1); 3865 3866 switch (env->prog->type) { 3867 case BPF_PROG_TYPE_CGROUP_SKB: 3868 case BPF_PROG_TYPE_CGROUP_SOCK: 3869 case BPF_PROG_TYPE_SOCK_OPS: 3870 case BPF_PROG_TYPE_CGROUP_DEVICE: 3871 break; 3872 default: 3873 return 0; 3874 } 3875 3876 reg = cur_regs(env) + BPF_REG_0; 3877 if (reg->type != SCALAR_VALUE) { 3878 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 3879 reg_type_str[reg->type]); 3880 return -EINVAL; 3881 } 3882 3883 if (!tnum_in(range, reg->var_off)) { 3884 verbose(env, "At program exit the register R0 "); 3885 if (!tnum_is_unknown(reg->var_off)) { 3886 char tn_buf[48]; 3887 3888 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3889 verbose(env, "has value %s", tn_buf); 3890 } else { 3891 verbose(env, "has unknown scalar value"); 3892 } 3893 verbose(env, " should have been 0 or 1\n"); 3894 return -EINVAL; 3895 } 3896 return 0; 3897 } 3898 3899 /* non-recursive DFS pseudo code 3900 * 1 procedure DFS-iterative(G,v): 3901 * 2 label v as discovered 3902 * 3 let S be a stack 3903 * 4 S.push(v) 3904 * 5 while S is not empty 3905 * 6 t <- S.pop() 3906 * 7 if t is what we're looking for: 3907 * 8 return t 3908 * 9 for all edges e in G.adjacentEdges(t) do 3909 * 10 if edge e is already labelled 3910 * 11 continue with the next edge 3911 * 12 w <- G.adjacentVertex(t,e) 3912 * 13 if vertex w is not discovered and not explored 3913 * 14 label e as tree-edge 3914 * 15 label w as discovered 3915 * 16 S.push(w) 3916 * 17 continue at 5 3917 * 18 else if vertex w is discovered 3918 * 19 label e as back-edge 3919 * 20 else 3920 * 21 // vertex w is explored 3921 * 22 label e as forward- or cross-edge 3922 * 23 label t as explored 3923 * 24 S.pop() 3924 * 3925 * convention: 3926 * 0x10 - discovered 3927 * 0x11 - discovered and fall-through edge labelled 3928 * 0x12 - discovered and fall-through and branch edges labelled 3929 * 0x20 - explored 3930 */ 3931 3932 enum { 3933 DISCOVERED = 0x10, 3934 EXPLORED = 0x20, 3935 FALLTHROUGH = 1, 3936 BRANCH = 2, 3937 }; 3938 3939 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) 3940 3941 static int *insn_stack; /* stack of insns to process */ 3942 static int cur_stack; /* current stack index */ 3943 static int *insn_state; 3944 3945 /* t, w, e - match pseudo-code above: 3946 * t - index of current instruction 3947 * w - next instruction 3948 * e - edge 3949 */ 3950 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 3951 { 3952 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 3953 return 0; 3954 3955 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 3956 return 0; 3957 3958 if (w < 0 || w >= env->prog->len) { 3959 verbose(env, "jump out of range from insn %d to %d\n", t, w); 3960 return -EINVAL; 3961 } 3962 3963 if (e == BRANCH) 3964 /* mark branch target for state pruning */ 3965 env->explored_states[w] = STATE_LIST_MARK; 3966 3967 if (insn_state[w] == 0) { 3968 /* tree-edge */ 3969 insn_state[t] = DISCOVERED | e; 3970 insn_state[w] = DISCOVERED; 3971 if (cur_stack >= env->prog->len) 3972 return -E2BIG; 3973 insn_stack[cur_stack++] = w; 3974 return 1; 3975 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 3976 verbose(env, "back-edge from insn %d to %d\n", t, w); 3977 return -EINVAL; 3978 } else if (insn_state[w] == EXPLORED) { 3979 /* forward- or cross-edge */ 3980 insn_state[t] = DISCOVERED | e; 3981 } else { 3982 verbose(env, "insn state internal bug\n"); 3983 return -EFAULT; 3984 } 3985 return 0; 3986 } 3987 3988 /* non-recursive depth-first-search to detect loops in BPF program 3989 * loop == back-edge in directed graph 3990 */ 3991 static int check_cfg(struct bpf_verifier_env *env) 3992 { 3993 struct bpf_insn *insns = env->prog->insnsi; 3994 int insn_cnt = env->prog->len; 3995 int ret = 0; 3996 int i, t; 3997 3998 ret = check_subprogs(env); 3999 if (ret < 0) 4000 return ret; 4001 4002 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 4003 if (!insn_state) 4004 return -ENOMEM; 4005 4006 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 4007 if (!insn_stack) { 4008 kfree(insn_state); 4009 return -ENOMEM; 4010 } 4011 4012 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 4013 insn_stack[0] = 0; /* 0 is the first instruction */ 4014 cur_stack = 1; 4015 4016 peek_stack: 4017 if (cur_stack == 0) 4018 goto check_state; 4019 t = insn_stack[cur_stack - 1]; 4020 4021 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 4022 u8 opcode = BPF_OP(insns[t].code); 4023 4024 if (opcode == BPF_EXIT) { 4025 goto mark_explored; 4026 } else if (opcode == BPF_CALL) { 4027 ret = push_insn(t, t + 1, FALLTHROUGH, env); 4028 if (ret == 1) 4029 goto peek_stack; 4030 else if (ret < 0) 4031 goto err_free; 4032 if (t + 1 < insn_cnt) 4033 env->explored_states[t + 1] = STATE_LIST_MARK; 4034 if (insns[t].src_reg == BPF_PSEUDO_CALL) { 4035 env->explored_states[t] = STATE_LIST_MARK; 4036 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); 4037 if (ret == 1) 4038 goto peek_stack; 4039 else if (ret < 0) 4040 goto err_free; 4041 } 4042 } else if (opcode == BPF_JA) { 4043 if (BPF_SRC(insns[t].code) != BPF_K) { 4044 ret = -EINVAL; 4045 goto err_free; 4046 } 4047 /* unconditional jump with single edge */ 4048 ret = push_insn(t, t + insns[t].off + 1, 4049 FALLTHROUGH, env); 4050 if (ret == 1) 4051 goto peek_stack; 4052 else if (ret < 0) 4053 goto err_free; 4054 /* tell verifier to check for equivalent states 4055 * after every call and jump 4056 */ 4057 if (t + 1 < insn_cnt) 4058 env->explored_states[t + 1] = STATE_LIST_MARK; 4059 } else { 4060 /* conditional jump with two edges */ 4061 env->explored_states[t] = STATE_LIST_MARK; 4062 ret = push_insn(t, t + 1, FALLTHROUGH, env); 4063 if (ret == 1) 4064 goto peek_stack; 4065 else if (ret < 0) 4066 goto err_free; 4067 4068 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 4069 if (ret == 1) 4070 goto peek_stack; 4071 else if (ret < 0) 4072 goto err_free; 4073 } 4074 } else { 4075 /* all other non-branch instructions with single 4076 * fall-through edge 4077 */ 4078 ret = push_insn(t, t + 1, FALLTHROUGH, env); 4079 if (ret == 1) 4080 goto peek_stack; 4081 else if (ret < 0) 4082 goto err_free; 4083 } 4084 4085 mark_explored: 4086 insn_state[t] = EXPLORED; 4087 if (cur_stack-- <= 0) { 4088 verbose(env, "pop stack internal bug\n"); 4089 ret = -EFAULT; 4090 goto err_free; 4091 } 4092 goto peek_stack; 4093 4094 check_state: 4095 for (i = 0; i < insn_cnt; i++) { 4096 if (insn_state[i] != EXPLORED) { 4097 verbose(env, "unreachable insn %d\n", i); 4098 ret = -EINVAL; 4099 goto err_free; 4100 } 4101 } 4102 ret = 0; /* cfg looks good */ 4103 4104 err_free: 4105 kfree(insn_state); 4106 kfree(insn_stack); 4107 return ret; 4108 } 4109 4110 /* check %cur's range satisfies %old's */ 4111 static bool range_within(struct bpf_reg_state *old, 4112 struct bpf_reg_state *cur) 4113 { 4114 return old->umin_value <= cur->umin_value && 4115 old->umax_value >= cur->umax_value && 4116 old->smin_value <= cur->smin_value && 4117 old->smax_value >= cur->smax_value; 4118 } 4119 4120 /* Maximum number of register states that can exist at once */ 4121 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 4122 struct idpair { 4123 u32 old; 4124 u32 cur; 4125 }; 4126 4127 /* If in the old state two registers had the same id, then they need to have 4128 * the same id in the new state as well. But that id could be different from 4129 * the old state, so we need to track the mapping from old to new ids. 4130 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 4131 * regs with old id 5 must also have new id 9 for the new state to be safe. But 4132 * regs with a different old id could still have new id 9, we don't care about 4133 * that. 4134 * So we look through our idmap to see if this old id has been seen before. If 4135 * so, we require the new id to match; otherwise, we add the id pair to the map. 4136 */ 4137 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 4138 { 4139 unsigned int i; 4140 4141 for (i = 0; i < ID_MAP_SIZE; i++) { 4142 if (!idmap[i].old) { 4143 /* Reached an empty slot; haven't seen this id before */ 4144 idmap[i].old = old_id; 4145 idmap[i].cur = cur_id; 4146 return true; 4147 } 4148 if (idmap[i].old == old_id) 4149 return idmap[i].cur == cur_id; 4150 } 4151 /* We ran out of idmap slots, which should be impossible */ 4152 WARN_ON_ONCE(1); 4153 return false; 4154 } 4155 4156 /* Returns true if (rold safe implies rcur safe) */ 4157 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 4158 struct idpair *idmap) 4159 { 4160 bool equal; 4161 4162 if (!(rold->live & REG_LIVE_READ)) 4163 /* explored state didn't use this */ 4164 return true; 4165 4166 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0; 4167 4168 if (rold->type == PTR_TO_STACK) 4169 /* two stack pointers are equal only if they're pointing to 4170 * the same stack frame, since fp-8 in foo != fp-8 in bar 4171 */ 4172 return equal && rold->frameno == rcur->frameno; 4173 4174 if (equal) 4175 return true; 4176 4177 if (rold->type == NOT_INIT) 4178 /* explored state can't have used this */ 4179 return true; 4180 if (rcur->type == NOT_INIT) 4181 return false; 4182 switch (rold->type) { 4183 case SCALAR_VALUE: 4184 if (rcur->type == SCALAR_VALUE) { 4185 /* new val must satisfy old val knowledge */ 4186 return range_within(rold, rcur) && 4187 tnum_in(rold->var_off, rcur->var_off); 4188 } else { 4189 /* We're trying to use a pointer in place of a scalar. 4190 * Even if the scalar was unbounded, this could lead to 4191 * pointer leaks because scalars are allowed to leak 4192 * while pointers are not. We could make this safe in 4193 * special cases if root is calling us, but it's 4194 * probably not worth the hassle. 4195 */ 4196 return false; 4197 } 4198 case PTR_TO_MAP_VALUE: 4199 /* If the new min/max/var_off satisfy the old ones and 4200 * everything else matches, we are OK. 4201 * We don't care about the 'id' value, because nothing 4202 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) 4203 */ 4204 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 4205 range_within(rold, rcur) && 4206 tnum_in(rold->var_off, rcur->var_off); 4207 case PTR_TO_MAP_VALUE_OR_NULL: 4208 /* a PTR_TO_MAP_VALUE could be safe to use as a 4209 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 4210 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 4211 * checked, doing so could have affected others with the same 4212 * id, and we can't check for that because we lost the id when 4213 * we converted to a PTR_TO_MAP_VALUE. 4214 */ 4215 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 4216 return false; 4217 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 4218 return false; 4219 /* Check our ids match any regs they're supposed to */ 4220 return check_ids(rold->id, rcur->id, idmap); 4221 case PTR_TO_PACKET_META: 4222 case PTR_TO_PACKET: 4223 if (rcur->type != rold->type) 4224 return false; 4225 /* We must have at least as much range as the old ptr 4226 * did, so that any accesses which were safe before are 4227 * still safe. This is true even if old range < old off, 4228 * since someone could have accessed through (ptr - k), or 4229 * even done ptr -= k in a register, to get a safe access. 4230 */ 4231 if (rold->range > rcur->range) 4232 return false; 4233 /* If the offsets don't match, we can't trust our alignment; 4234 * nor can we be sure that we won't fall out of range. 4235 */ 4236 if (rold->off != rcur->off) 4237 return false; 4238 /* id relations must be preserved */ 4239 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 4240 return false; 4241 /* new val must satisfy old val knowledge */ 4242 return range_within(rold, rcur) && 4243 tnum_in(rold->var_off, rcur->var_off); 4244 case PTR_TO_CTX: 4245 case CONST_PTR_TO_MAP: 4246 case PTR_TO_PACKET_END: 4247 /* Only valid matches are exact, which memcmp() above 4248 * would have accepted 4249 */ 4250 default: 4251 /* Don't know what's going on, just say it's not safe */ 4252 return false; 4253 } 4254 4255 /* Shouldn't get here; if we do, say it's not safe */ 4256 WARN_ON_ONCE(1); 4257 return false; 4258 } 4259 4260 static bool stacksafe(struct bpf_func_state *old, 4261 struct bpf_func_state *cur, 4262 struct idpair *idmap) 4263 { 4264 int i, spi; 4265 4266 /* if explored stack has more populated slots than current stack 4267 * such stacks are not equivalent 4268 */ 4269 if (old->allocated_stack > cur->allocated_stack) 4270 return false; 4271 4272 /* walk slots of the explored stack and ignore any additional 4273 * slots in the current stack, since explored(safe) state 4274 * didn't use them 4275 */ 4276 for (i = 0; i < old->allocated_stack; i++) { 4277 spi = i / BPF_REG_SIZE; 4278 4279 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) 4280 /* explored state didn't use this */ 4281 continue; 4282 4283 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 4284 continue; 4285 /* if old state was safe with misc data in the stack 4286 * it will be safe with zero-initialized stack. 4287 * The opposite is not true 4288 */ 4289 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 4290 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 4291 continue; 4292 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 4293 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 4294 /* Ex: old explored (safe) state has STACK_SPILL in 4295 * this stack slot, but current has has STACK_MISC -> 4296 * this verifier states are not equivalent, 4297 * return false to continue verification of this path 4298 */ 4299 return false; 4300 if (i % BPF_REG_SIZE) 4301 continue; 4302 if (old->stack[spi].slot_type[0] != STACK_SPILL) 4303 continue; 4304 if (!regsafe(&old->stack[spi].spilled_ptr, 4305 &cur->stack[spi].spilled_ptr, 4306 idmap)) 4307 /* when explored and current stack slot are both storing 4308 * spilled registers, check that stored pointers types 4309 * are the same as well. 4310 * Ex: explored safe path could have stored 4311 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 4312 * but current path has stored: 4313 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 4314 * such verifier states are not equivalent. 4315 * return false to continue verification of this path 4316 */ 4317 return false; 4318 } 4319 return true; 4320 } 4321 4322 /* compare two verifier states 4323 * 4324 * all states stored in state_list are known to be valid, since 4325 * verifier reached 'bpf_exit' instruction through them 4326 * 4327 * this function is called when verifier exploring different branches of 4328 * execution popped from the state stack. If it sees an old state that has 4329 * more strict register state and more strict stack state then this execution 4330 * branch doesn't need to be explored further, since verifier already 4331 * concluded that more strict state leads to valid finish. 4332 * 4333 * Therefore two states are equivalent if register state is more conservative 4334 * and explored stack state is more conservative than the current one. 4335 * Example: 4336 * explored current 4337 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 4338 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 4339 * 4340 * In other words if current stack state (one being explored) has more 4341 * valid slots than old one that already passed validation, it means 4342 * the verifier can stop exploring and conclude that current state is valid too 4343 * 4344 * Similarly with registers. If explored state has register type as invalid 4345 * whereas register type in current state is meaningful, it means that 4346 * the current state will reach 'bpf_exit' instruction safely 4347 */ 4348 static bool func_states_equal(struct bpf_func_state *old, 4349 struct bpf_func_state *cur) 4350 { 4351 struct idpair *idmap; 4352 bool ret = false; 4353 int i; 4354 4355 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 4356 /* If we failed to allocate the idmap, just say it's not safe */ 4357 if (!idmap) 4358 return false; 4359 4360 for (i = 0; i < MAX_BPF_REG; i++) { 4361 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 4362 goto out_free; 4363 } 4364 4365 if (!stacksafe(old, cur, idmap)) 4366 goto out_free; 4367 ret = true; 4368 out_free: 4369 kfree(idmap); 4370 return ret; 4371 } 4372 4373 static bool states_equal(struct bpf_verifier_env *env, 4374 struct bpf_verifier_state *old, 4375 struct bpf_verifier_state *cur) 4376 { 4377 int i; 4378 4379 if (old->curframe != cur->curframe) 4380 return false; 4381 4382 /* for states to be equal callsites have to be the same 4383 * and all frame states need to be equivalent 4384 */ 4385 for (i = 0; i <= old->curframe; i++) { 4386 if (old->frame[i]->callsite != cur->frame[i]->callsite) 4387 return false; 4388 if (!func_states_equal(old->frame[i], cur->frame[i])) 4389 return false; 4390 } 4391 return true; 4392 } 4393 4394 /* A write screens off any subsequent reads; but write marks come from the 4395 * straight-line code between a state and its parent. When we arrive at an 4396 * equivalent state (jump target or such) we didn't arrive by the straight-line 4397 * code, so read marks in the state must propagate to the parent regardless 4398 * of the state's write marks. That's what 'parent == state->parent' comparison 4399 * in mark_reg_read() and mark_stack_slot_read() is for. 4400 */ 4401 static int propagate_liveness(struct bpf_verifier_env *env, 4402 const struct bpf_verifier_state *vstate, 4403 struct bpf_verifier_state *vparent) 4404 { 4405 int i, frame, err = 0; 4406 struct bpf_func_state *state, *parent; 4407 4408 if (vparent->curframe != vstate->curframe) { 4409 WARN(1, "propagate_live: parent frame %d current frame %d\n", 4410 vparent->curframe, vstate->curframe); 4411 return -EFAULT; 4412 } 4413 /* Propagate read liveness of registers... */ 4414 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 4415 /* We don't need to worry about FP liveness because it's read-only */ 4416 for (i = 0; i < BPF_REG_FP; i++) { 4417 if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) 4418 continue; 4419 if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { 4420 err = mark_reg_read(env, vstate, vparent, i); 4421 if (err) 4422 return err; 4423 } 4424 } 4425 4426 /* ... and stack slots */ 4427 for (frame = 0; frame <= vstate->curframe; frame++) { 4428 state = vstate->frame[frame]; 4429 parent = vparent->frame[frame]; 4430 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 4431 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 4432 if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) 4433 continue; 4434 if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) 4435 mark_stack_slot_read(env, vstate, vparent, i, frame); 4436 } 4437 } 4438 return err; 4439 } 4440 4441 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 4442 { 4443 struct bpf_verifier_state_list *new_sl; 4444 struct bpf_verifier_state_list *sl; 4445 struct bpf_verifier_state *cur = env->cur_state; 4446 int i, j, err; 4447 4448 sl = env->explored_states[insn_idx]; 4449 if (!sl) 4450 /* this 'insn_idx' instruction wasn't marked, so we will not 4451 * be doing state search here 4452 */ 4453 return 0; 4454 4455 while (sl != STATE_LIST_MARK) { 4456 if (states_equal(env, &sl->state, cur)) { 4457 /* reached equivalent register/stack state, 4458 * prune the search. 4459 * Registers read by the continuation are read by us. 4460 * If we have any write marks in env->cur_state, they 4461 * will prevent corresponding reads in the continuation 4462 * from reaching our parent (an explored_state). Our 4463 * own state will get the read marks recorded, but 4464 * they'll be immediately forgotten as we're pruning 4465 * this state and will pop a new one. 4466 */ 4467 err = propagate_liveness(env, &sl->state, cur); 4468 if (err) 4469 return err; 4470 return 1; 4471 } 4472 sl = sl->next; 4473 } 4474 4475 /* there were no equivalent states, remember current one. 4476 * technically the current state is not proven to be safe yet, 4477 * but it will either reach outer most bpf_exit (which means it's safe) 4478 * or it will be rejected. Since there are no loops, we won't be 4479 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 4480 * again on the way to bpf_exit 4481 */ 4482 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 4483 if (!new_sl) 4484 return -ENOMEM; 4485 4486 /* add new state to the head of linked list */ 4487 err = copy_verifier_state(&new_sl->state, cur); 4488 if (err) { 4489 free_verifier_state(&new_sl->state, false); 4490 kfree(new_sl); 4491 return err; 4492 } 4493 new_sl->next = env->explored_states[insn_idx]; 4494 env->explored_states[insn_idx] = new_sl; 4495 /* connect new state to parentage chain */ 4496 cur->parent = &new_sl->state; 4497 /* clear write marks in current state: the writes we did are not writes 4498 * our child did, so they don't screen off its reads from us. 4499 * (There are no read marks in current state, because reads always mark 4500 * their parent and current state never has children yet. Only 4501 * explored_states can get read marks.) 4502 */ 4503 for (i = 0; i < BPF_REG_FP; i++) 4504 cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE; 4505 4506 /* all stack frames are accessible from callee, clear them all */ 4507 for (j = 0; j <= cur->curframe; j++) { 4508 struct bpf_func_state *frame = cur->frame[j]; 4509 4510 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) 4511 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 4512 } 4513 return 0; 4514 } 4515 4516 static int do_check(struct bpf_verifier_env *env) 4517 { 4518 struct bpf_verifier_state *state; 4519 struct bpf_insn *insns = env->prog->insnsi; 4520 struct bpf_reg_state *regs; 4521 int insn_cnt = env->prog->len, i; 4522 int insn_idx, prev_insn_idx = 0; 4523 int insn_processed = 0; 4524 bool do_print_state = false; 4525 4526 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 4527 if (!state) 4528 return -ENOMEM; 4529 state->curframe = 0; 4530 state->parent = NULL; 4531 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 4532 if (!state->frame[0]) { 4533 kfree(state); 4534 return -ENOMEM; 4535 } 4536 env->cur_state = state; 4537 init_func_state(env, state->frame[0], 4538 BPF_MAIN_FUNC /* callsite */, 4539 0 /* frameno */, 4540 0 /* subprogno, zero == main subprog */); 4541 insn_idx = 0; 4542 for (;;) { 4543 struct bpf_insn *insn; 4544 u8 class; 4545 int err; 4546 4547 if (insn_idx >= insn_cnt) { 4548 verbose(env, "invalid insn idx %d insn_cnt %d\n", 4549 insn_idx, insn_cnt); 4550 return -EFAULT; 4551 } 4552 4553 insn = &insns[insn_idx]; 4554 class = BPF_CLASS(insn->code); 4555 4556 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 4557 verbose(env, 4558 "BPF program is too large. Processed %d insn\n", 4559 insn_processed); 4560 return -E2BIG; 4561 } 4562 4563 err = is_state_visited(env, insn_idx); 4564 if (err < 0) 4565 return err; 4566 if (err == 1) { 4567 /* found equivalent state, can prune the search */ 4568 if (env->log.level) { 4569 if (do_print_state) 4570 verbose(env, "\nfrom %d to %d: safe\n", 4571 prev_insn_idx, insn_idx); 4572 else 4573 verbose(env, "%d: safe\n", insn_idx); 4574 } 4575 goto process_bpf_exit; 4576 } 4577 4578 if (need_resched()) 4579 cond_resched(); 4580 4581 if (env->log.level > 1 || (env->log.level && do_print_state)) { 4582 if (env->log.level > 1) 4583 verbose(env, "%d:", insn_idx); 4584 else 4585 verbose(env, "\nfrom %d to %d:", 4586 prev_insn_idx, insn_idx); 4587 print_verifier_state(env, state->frame[state->curframe]); 4588 do_print_state = false; 4589 } 4590 4591 if (env->log.level) { 4592 const struct bpf_insn_cbs cbs = { 4593 .cb_print = verbose, 4594 }; 4595 4596 verbose(env, "%d: ", insn_idx); 4597 print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks); 4598 } 4599 4600 if (bpf_prog_is_dev_bound(env->prog->aux)) { 4601 err = bpf_prog_offload_verify_insn(env, insn_idx, 4602 prev_insn_idx); 4603 if (err) 4604 return err; 4605 } 4606 4607 regs = cur_regs(env); 4608 env->insn_aux_data[insn_idx].seen = true; 4609 if (class == BPF_ALU || class == BPF_ALU64) { 4610 err = check_alu_op(env, insn); 4611 if (err) 4612 return err; 4613 4614 } else if (class == BPF_LDX) { 4615 enum bpf_reg_type *prev_src_type, src_reg_type; 4616 4617 /* check for reserved fields is already done */ 4618 4619 /* check src operand */ 4620 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4621 if (err) 4622 return err; 4623 4624 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 4625 if (err) 4626 return err; 4627 4628 src_reg_type = regs[insn->src_reg].type; 4629 4630 /* check that memory (src_reg + off) is readable, 4631 * the state of dst_reg will be updated by this func 4632 */ 4633 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, 4634 BPF_SIZE(insn->code), BPF_READ, 4635 insn->dst_reg); 4636 if (err) 4637 return err; 4638 4639 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; 4640 4641 if (*prev_src_type == NOT_INIT) { 4642 /* saw a valid insn 4643 * dst_reg = *(u32 *)(src_reg + off) 4644 * save type to validate intersecting paths 4645 */ 4646 *prev_src_type = src_reg_type; 4647 4648 } else if (src_reg_type != *prev_src_type && 4649 (src_reg_type == PTR_TO_CTX || 4650 *prev_src_type == PTR_TO_CTX)) { 4651 /* ABuser program is trying to use the same insn 4652 * dst_reg = *(u32*) (src_reg + off) 4653 * with different pointer types: 4654 * src_reg == ctx in one branch and 4655 * src_reg == stack|map in some other branch. 4656 * Reject it. 4657 */ 4658 verbose(env, "same insn cannot be used with different pointers\n"); 4659 return -EINVAL; 4660 } 4661 4662 } else if (class == BPF_STX) { 4663 enum bpf_reg_type *prev_dst_type, dst_reg_type; 4664 4665 if (BPF_MODE(insn->code) == BPF_XADD) { 4666 err = check_xadd(env, insn_idx, insn); 4667 if (err) 4668 return err; 4669 insn_idx++; 4670 continue; 4671 } 4672 4673 /* check src1 operand */ 4674 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4675 if (err) 4676 return err; 4677 /* check src2 operand */ 4678 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4679 if (err) 4680 return err; 4681 4682 dst_reg_type = regs[insn->dst_reg].type; 4683 4684 /* check that memory (dst_reg + off) is writeable */ 4685 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4686 BPF_SIZE(insn->code), BPF_WRITE, 4687 insn->src_reg); 4688 if (err) 4689 return err; 4690 4691 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; 4692 4693 if (*prev_dst_type == NOT_INIT) { 4694 *prev_dst_type = dst_reg_type; 4695 } else if (dst_reg_type != *prev_dst_type && 4696 (dst_reg_type == PTR_TO_CTX || 4697 *prev_dst_type == PTR_TO_CTX)) { 4698 verbose(env, "same insn cannot be used with different pointers\n"); 4699 return -EINVAL; 4700 } 4701 4702 } else if (class == BPF_ST) { 4703 if (BPF_MODE(insn->code) != BPF_MEM || 4704 insn->src_reg != BPF_REG_0) { 4705 verbose(env, "BPF_ST uses reserved fields\n"); 4706 return -EINVAL; 4707 } 4708 /* check src operand */ 4709 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4710 if (err) 4711 return err; 4712 4713 if (is_ctx_reg(env, insn->dst_reg)) { 4714 verbose(env, "BPF_ST stores into R%d context is not allowed\n", 4715 insn->dst_reg); 4716 return -EACCES; 4717 } 4718 4719 /* check that memory (dst_reg + off) is writeable */ 4720 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4721 BPF_SIZE(insn->code), BPF_WRITE, 4722 -1); 4723 if (err) 4724 return err; 4725 4726 } else if (class == BPF_JMP) { 4727 u8 opcode = BPF_OP(insn->code); 4728 4729 if (opcode == BPF_CALL) { 4730 if (BPF_SRC(insn->code) != BPF_K || 4731 insn->off != 0 || 4732 (insn->src_reg != BPF_REG_0 && 4733 insn->src_reg != BPF_PSEUDO_CALL) || 4734 insn->dst_reg != BPF_REG_0) { 4735 verbose(env, "BPF_CALL uses reserved fields\n"); 4736 return -EINVAL; 4737 } 4738 4739 if (insn->src_reg == BPF_PSEUDO_CALL) 4740 err = check_func_call(env, insn, &insn_idx); 4741 else 4742 err = check_helper_call(env, insn->imm, insn_idx); 4743 if (err) 4744 return err; 4745 4746 } else if (opcode == BPF_JA) { 4747 if (BPF_SRC(insn->code) != BPF_K || 4748 insn->imm != 0 || 4749 insn->src_reg != BPF_REG_0 || 4750 insn->dst_reg != BPF_REG_0) { 4751 verbose(env, "BPF_JA uses reserved fields\n"); 4752 return -EINVAL; 4753 } 4754 4755 insn_idx += insn->off + 1; 4756 continue; 4757 4758 } else if (opcode == BPF_EXIT) { 4759 if (BPF_SRC(insn->code) != BPF_K || 4760 insn->imm != 0 || 4761 insn->src_reg != BPF_REG_0 || 4762 insn->dst_reg != BPF_REG_0) { 4763 verbose(env, "BPF_EXIT uses reserved fields\n"); 4764 return -EINVAL; 4765 } 4766 4767 if (state->curframe) { 4768 /* exit from nested function */ 4769 prev_insn_idx = insn_idx; 4770 err = prepare_func_exit(env, &insn_idx); 4771 if (err) 4772 return err; 4773 do_print_state = true; 4774 continue; 4775 } 4776 4777 /* eBPF calling convetion is such that R0 is used 4778 * to return the value from eBPF program. 4779 * Make sure that it's readable at this time 4780 * of bpf_exit, which means that program wrote 4781 * something into it earlier 4782 */ 4783 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 4784 if (err) 4785 return err; 4786 4787 if (is_pointer_value(env, BPF_REG_0)) { 4788 verbose(env, "R0 leaks addr as return value\n"); 4789 return -EACCES; 4790 } 4791 4792 err = check_return_code(env); 4793 if (err) 4794 return err; 4795 process_bpf_exit: 4796 err = pop_stack(env, &prev_insn_idx, &insn_idx); 4797 if (err < 0) { 4798 if (err != -ENOENT) 4799 return err; 4800 break; 4801 } else { 4802 do_print_state = true; 4803 continue; 4804 } 4805 } else { 4806 err = check_cond_jmp_op(env, insn, &insn_idx); 4807 if (err) 4808 return err; 4809 } 4810 } else if (class == BPF_LD) { 4811 u8 mode = BPF_MODE(insn->code); 4812 4813 if (mode == BPF_ABS || mode == BPF_IND) { 4814 err = check_ld_abs(env, insn); 4815 if (err) 4816 return err; 4817 4818 } else if (mode == BPF_IMM) { 4819 err = check_ld_imm(env, insn); 4820 if (err) 4821 return err; 4822 4823 insn_idx++; 4824 env->insn_aux_data[insn_idx].seen = true; 4825 } else { 4826 verbose(env, "invalid BPF_LD mode\n"); 4827 return -EINVAL; 4828 } 4829 } else { 4830 verbose(env, "unknown insn class %d\n", class); 4831 return -EINVAL; 4832 } 4833 4834 insn_idx++; 4835 } 4836 4837 verbose(env, "processed %d insns (limit %d), stack depth ", 4838 insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); 4839 for (i = 0; i < env->subprog_cnt + 1; i++) { 4840 u32 depth = env->subprog_stack_depth[i]; 4841 4842 verbose(env, "%d", depth); 4843 if (i + 1 < env->subprog_cnt + 1) 4844 verbose(env, "+"); 4845 } 4846 verbose(env, "\n"); 4847 env->prog->aux->stack_depth = env->subprog_stack_depth[0]; 4848 return 0; 4849 } 4850 4851 static int check_map_prealloc(struct bpf_map *map) 4852 { 4853 return (map->map_type != BPF_MAP_TYPE_HASH && 4854 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 4855 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 4856 !(map->map_flags & BPF_F_NO_PREALLOC); 4857 } 4858 4859 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 4860 struct bpf_map *map, 4861 struct bpf_prog *prog) 4862 4863 { 4864 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 4865 * preallocated hash maps, since doing memory allocation 4866 * in overflow_handler can crash depending on where nmi got 4867 * triggered. 4868 */ 4869 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 4870 if (!check_map_prealloc(map)) { 4871 verbose(env, "perf_event programs can only use preallocated hash map\n"); 4872 return -EINVAL; 4873 } 4874 if (map->inner_map_meta && 4875 !check_map_prealloc(map->inner_map_meta)) { 4876 verbose(env, "perf_event programs can only use preallocated inner hash map\n"); 4877 return -EINVAL; 4878 } 4879 } 4880 4881 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 4882 !bpf_offload_dev_match(prog, map)) { 4883 verbose(env, "offload device mismatch between prog and map\n"); 4884 return -EINVAL; 4885 } 4886 4887 return 0; 4888 } 4889 4890 /* look for pseudo eBPF instructions that access map FDs and 4891 * replace them with actual map pointers 4892 */ 4893 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 4894 { 4895 struct bpf_insn *insn = env->prog->insnsi; 4896 int insn_cnt = env->prog->len; 4897 int i, j, err; 4898 4899 err = bpf_prog_calc_tag(env->prog); 4900 if (err) 4901 return err; 4902 4903 for (i = 0; i < insn_cnt; i++, insn++) { 4904 if (BPF_CLASS(insn->code) == BPF_LDX && 4905 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 4906 verbose(env, "BPF_LDX uses reserved fields\n"); 4907 return -EINVAL; 4908 } 4909 4910 if (BPF_CLASS(insn->code) == BPF_STX && 4911 ((BPF_MODE(insn->code) != BPF_MEM && 4912 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 4913 verbose(env, "BPF_STX uses reserved fields\n"); 4914 return -EINVAL; 4915 } 4916 4917 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 4918 struct bpf_map *map; 4919 struct fd f; 4920 4921 if (i == insn_cnt - 1 || insn[1].code != 0 || 4922 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 4923 insn[1].off != 0) { 4924 verbose(env, "invalid bpf_ld_imm64 insn\n"); 4925 return -EINVAL; 4926 } 4927 4928 if (insn->src_reg == 0) 4929 /* valid generic load 64-bit imm */ 4930 goto next_insn; 4931 4932 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 4933 verbose(env, 4934 "unrecognized bpf_ld_imm64 insn\n"); 4935 return -EINVAL; 4936 } 4937 4938 f = fdget(insn->imm); 4939 map = __bpf_map_get(f); 4940 if (IS_ERR(map)) { 4941 verbose(env, "fd %d is not pointing to valid bpf_map\n", 4942 insn->imm); 4943 return PTR_ERR(map); 4944 } 4945 4946 err = check_map_prog_compatibility(env, map, env->prog); 4947 if (err) { 4948 fdput(f); 4949 return err; 4950 } 4951 4952 /* store map pointer inside BPF_LD_IMM64 instruction */ 4953 insn[0].imm = (u32) (unsigned long) map; 4954 insn[1].imm = ((u64) (unsigned long) map) >> 32; 4955 4956 /* check whether we recorded this map already */ 4957 for (j = 0; j < env->used_map_cnt; j++) 4958 if (env->used_maps[j] == map) { 4959 fdput(f); 4960 goto next_insn; 4961 } 4962 4963 if (env->used_map_cnt >= MAX_USED_MAPS) { 4964 fdput(f); 4965 return -E2BIG; 4966 } 4967 4968 /* hold the map. If the program is rejected by verifier, 4969 * the map will be released by release_maps() or it 4970 * will be used by the valid program until it's unloaded 4971 * and all maps are released in free_bpf_prog_info() 4972 */ 4973 map = bpf_map_inc(map, false); 4974 if (IS_ERR(map)) { 4975 fdput(f); 4976 return PTR_ERR(map); 4977 } 4978 env->used_maps[env->used_map_cnt++] = map; 4979 4980 fdput(f); 4981 next_insn: 4982 insn++; 4983 i++; 4984 continue; 4985 } 4986 4987 /* Basic sanity check before we invest more work here. */ 4988 if (!bpf_opcode_in_insntable(insn->code)) { 4989 verbose(env, "unknown opcode %02x\n", insn->code); 4990 return -EINVAL; 4991 } 4992 } 4993 4994 /* now all pseudo BPF_LD_IMM64 instructions load valid 4995 * 'struct bpf_map *' into a register instead of user map_fd. 4996 * These pointers will be used later by verifier to validate map access. 4997 */ 4998 return 0; 4999 } 5000 5001 /* drop refcnt of maps used by the rejected program */ 5002 static void release_maps(struct bpf_verifier_env *env) 5003 { 5004 int i; 5005 5006 for (i = 0; i < env->used_map_cnt; i++) 5007 bpf_map_put(env->used_maps[i]); 5008 } 5009 5010 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 5011 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 5012 { 5013 struct bpf_insn *insn = env->prog->insnsi; 5014 int insn_cnt = env->prog->len; 5015 int i; 5016 5017 for (i = 0; i < insn_cnt; i++, insn++) 5018 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 5019 insn->src_reg = 0; 5020 } 5021 5022 /* single env->prog->insni[off] instruction was replaced with the range 5023 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 5024 * [0, off) and [off, end) to new locations, so the patched range stays zero 5025 */ 5026 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, 5027 u32 off, u32 cnt) 5028 { 5029 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 5030 int i; 5031 5032 if (cnt == 1) 5033 return 0; 5034 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); 5035 if (!new_data) 5036 return -ENOMEM; 5037 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 5038 memcpy(new_data + off + cnt - 1, old_data + off, 5039 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 5040 for (i = off; i < off + cnt - 1; i++) 5041 new_data[i].seen = true; 5042 env->insn_aux_data = new_data; 5043 vfree(old_data); 5044 return 0; 5045 } 5046 5047 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 5048 { 5049 int i; 5050 5051 if (len == 1) 5052 return; 5053 for (i = 0; i < env->subprog_cnt; i++) { 5054 if (env->subprog_starts[i] < off) 5055 continue; 5056 env->subprog_starts[i] += len - 1; 5057 } 5058 } 5059 5060 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 5061 const struct bpf_insn *patch, u32 len) 5062 { 5063 struct bpf_prog *new_prog; 5064 5065 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 5066 if (!new_prog) 5067 return NULL; 5068 if (adjust_insn_aux_data(env, new_prog->len, off, len)) 5069 return NULL; 5070 adjust_subprog_starts(env, off, len); 5071 return new_prog; 5072 } 5073 5074 /* The verifier does more data flow analysis than llvm and will not 5075 * explore branches that are dead at run time. Malicious programs can 5076 * have dead code too. Therefore replace all dead at-run-time code 5077 * with 'ja -1'. 5078 * 5079 * Just nops are not optimal, e.g. if they would sit at the end of the 5080 * program and through another bug we would manage to jump there, then 5081 * we'd execute beyond program memory otherwise. Returning exception 5082 * code also wouldn't work since we can have subprogs where the dead 5083 * code could be located. 5084 */ 5085 static void sanitize_dead_code(struct bpf_verifier_env *env) 5086 { 5087 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 5088 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 5089 struct bpf_insn *insn = env->prog->insnsi; 5090 const int insn_cnt = env->prog->len; 5091 int i; 5092 5093 for (i = 0; i < insn_cnt; i++) { 5094 if (aux_data[i].seen) 5095 continue; 5096 memcpy(insn + i, &trap, sizeof(trap)); 5097 } 5098 } 5099 5100 /* convert load instructions that access fields of 'struct __sk_buff' 5101 * into sequence of instructions that access fields of 'struct sk_buff' 5102 */ 5103 static int convert_ctx_accesses(struct bpf_verifier_env *env) 5104 { 5105 const struct bpf_verifier_ops *ops = env->ops; 5106 int i, cnt, size, ctx_field_size, delta = 0; 5107 const int insn_cnt = env->prog->len; 5108 struct bpf_insn insn_buf[16], *insn; 5109 struct bpf_prog *new_prog; 5110 enum bpf_access_type type; 5111 bool is_narrower_load; 5112 u32 target_size; 5113 5114 if (ops->gen_prologue) { 5115 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 5116 env->prog); 5117 if (cnt >= ARRAY_SIZE(insn_buf)) { 5118 verbose(env, "bpf verifier is misconfigured\n"); 5119 return -EINVAL; 5120 } else if (cnt) { 5121 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 5122 if (!new_prog) 5123 return -ENOMEM; 5124 5125 env->prog = new_prog; 5126 delta += cnt - 1; 5127 } 5128 } 5129 5130 if (!ops->convert_ctx_access) 5131 return 0; 5132 5133 insn = env->prog->insnsi + delta; 5134 5135 for (i = 0; i < insn_cnt; i++, insn++) { 5136 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 5137 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 5138 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 5139 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 5140 type = BPF_READ; 5141 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 5142 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 5143 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 5144 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 5145 type = BPF_WRITE; 5146 else 5147 continue; 5148 5149 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 5150 continue; 5151 5152 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 5153 size = BPF_LDST_BYTES(insn); 5154 5155 /* If the read access is a narrower load of the field, 5156 * convert to a 4/8-byte load, to minimum program type specific 5157 * convert_ctx_access changes. If conversion is successful, 5158 * we will apply proper mask to the result. 5159 */ 5160 is_narrower_load = size < ctx_field_size; 5161 if (is_narrower_load) { 5162 u32 off = insn->off; 5163 u8 size_code; 5164 5165 if (type == BPF_WRITE) { 5166 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 5167 return -EINVAL; 5168 } 5169 5170 size_code = BPF_H; 5171 if (ctx_field_size == 4) 5172 size_code = BPF_W; 5173 else if (ctx_field_size == 8) 5174 size_code = BPF_DW; 5175 5176 insn->off = off & ~(ctx_field_size - 1); 5177 insn->code = BPF_LDX | BPF_MEM | size_code; 5178 } 5179 5180 target_size = 0; 5181 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, 5182 &target_size); 5183 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 5184 (ctx_field_size && !target_size)) { 5185 verbose(env, "bpf verifier is misconfigured\n"); 5186 return -EINVAL; 5187 } 5188 5189 if (is_narrower_load && size < target_size) { 5190 if (ctx_field_size <= 4) 5191 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 5192 (1 << size * 8) - 1); 5193 else 5194 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 5195 (1 << size * 8) - 1); 5196 } 5197 5198 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 5199 if (!new_prog) 5200 return -ENOMEM; 5201 5202 delta += cnt - 1; 5203 5204 /* keep walking new program and skip insns we just inserted */ 5205 env->prog = new_prog; 5206 insn = new_prog->insnsi + i + delta; 5207 } 5208 5209 return 0; 5210 } 5211 5212 static int jit_subprogs(struct bpf_verifier_env *env) 5213 { 5214 struct bpf_prog *prog = env->prog, **func, *tmp; 5215 int i, j, subprog_start, subprog_end = 0, len, subprog; 5216 struct bpf_insn *insn; 5217 void *old_bpf_func; 5218 int err = -ENOMEM; 5219 5220 if (env->subprog_cnt == 0) 5221 return 0; 5222 5223 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5224 if (insn->code != (BPF_JMP | BPF_CALL) || 5225 insn->src_reg != BPF_PSEUDO_CALL) 5226 continue; 5227 subprog = find_subprog(env, i + insn->imm + 1); 5228 if (subprog < 0) { 5229 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 5230 i + insn->imm + 1); 5231 return -EFAULT; 5232 } 5233 /* temporarily remember subprog id inside insn instead of 5234 * aux_data, since next loop will split up all insns into funcs 5235 */ 5236 insn->off = subprog + 1; 5237 /* remember original imm in case JIT fails and fallback 5238 * to interpreter will be needed 5239 */ 5240 env->insn_aux_data[i].call_imm = insn->imm; 5241 /* point imm to __bpf_call_base+1 from JITs point of view */ 5242 insn->imm = 1; 5243 } 5244 5245 func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL); 5246 if (!func) 5247 return -ENOMEM; 5248 5249 for (i = 0; i <= env->subprog_cnt; i++) { 5250 subprog_start = subprog_end; 5251 if (env->subprog_cnt == i) 5252 subprog_end = prog->len; 5253 else 5254 subprog_end = env->subprog_starts[i]; 5255 5256 len = subprog_end - subprog_start; 5257 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); 5258 if (!func[i]) 5259 goto out_free; 5260 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 5261 len * sizeof(struct bpf_insn)); 5262 func[i]->type = prog->type; 5263 func[i]->len = len; 5264 if (bpf_prog_calc_tag(func[i])) 5265 goto out_free; 5266 func[i]->is_func = 1; 5267 /* Use bpf_prog_F_tag to indicate functions in stack traces. 5268 * Long term would need debug info to populate names 5269 */ 5270 func[i]->aux->name[0] = 'F'; 5271 func[i]->aux->stack_depth = env->subprog_stack_depth[i]; 5272 func[i]->jit_requested = 1; 5273 func[i] = bpf_int_jit_compile(func[i]); 5274 if (!func[i]->jited) { 5275 err = -ENOTSUPP; 5276 goto out_free; 5277 } 5278 cond_resched(); 5279 } 5280 /* at this point all bpf functions were successfully JITed 5281 * now populate all bpf_calls with correct addresses and 5282 * run last pass of JIT 5283 */ 5284 for (i = 0; i <= env->subprog_cnt; i++) { 5285 insn = func[i]->insnsi; 5286 for (j = 0; j < func[i]->len; j++, insn++) { 5287 if (insn->code != (BPF_JMP | BPF_CALL) || 5288 insn->src_reg != BPF_PSEUDO_CALL) 5289 continue; 5290 subprog = insn->off; 5291 insn->off = 0; 5292 insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) 5293 func[subprog]->bpf_func - 5294 __bpf_call_base; 5295 } 5296 } 5297 for (i = 0; i <= env->subprog_cnt; i++) { 5298 old_bpf_func = func[i]->bpf_func; 5299 tmp = bpf_int_jit_compile(func[i]); 5300 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 5301 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 5302 err = -EFAULT; 5303 goto out_free; 5304 } 5305 cond_resched(); 5306 } 5307 5308 /* finally lock prog and jit images for all functions and 5309 * populate kallsysm 5310 */ 5311 for (i = 0; i <= env->subprog_cnt; i++) { 5312 bpf_prog_lock_ro(func[i]); 5313 bpf_prog_kallsyms_add(func[i]); 5314 } 5315 5316 /* Last step: make now unused interpreter insns from main 5317 * prog consistent for later dump requests, so they can 5318 * later look the same as if they were interpreted only. 5319 */ 5320 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5321 unsigned long addr; 5322 5323 if (insn->code != (BPF_JMP | BPF_CALL) || 5324 insn->src_reg != BPF_PSEUDO_CALL) 5325 continue; 5326 insn->off = env->insn_aux_data[i].call_imm; 5327 subprog = find_subprog(env, i + insn->off + 1); 5328 addr = (unsigned long)func[subprog + 1]->bpf_func; 5329 addr &= PAGE_MASK; 5330 insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) 5331 addr - __bpf_call_base; 5332 } 5333 5334 prog->jited = 1; 5335 prog->bpf_func = func[0]->bpf_func; 5336 prog->aux->func = func; 5337 prog->aux->func_cnt = env->subprog_cnt + 1; 5338 return 0; 5339 out_free: 5340 for (i = 0; i <= env->subprog_cnt; i++) 5341 if (func[i]) 5342 bpf_jit_free(func[i]); 5343 kfree(func); 5344 /* cleanup main prog to be interpreted */ 5345 prog->jit_requested = 0; 5346 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5347 if (insn->code != (BPF_JMP | BPF_CALL) || 5348 insn->src_reg != BPF_PSEUDO_CALL) 5349 continue; 5350 insn->off = 0; 5351 insn->imm = env->insn_aux_data[i].call_imm; 5352 } 5353 return err; 5354 } 5355 5356 static int fixup_call_args(struct bpf_verifier_env *env) 5357 { 5358 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 5359 struct bpf_prog *prog = env->prog; 5360 struct bpf_insn *insn = prog->insnsi; 5361 int i, depth; 5362 #endif 5363 int err; 5364 5365 err = 0; 5366 if (env->prog->jit_requested) { 5367 err = jit_subprogs(env); 5368 if (err == 0) 5369 return 0; 5370 } 5371 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 5372 for (i = 0; i < prog->len; i++, insn++) { 5373 if (insn->code != (BPF_JMP | BPF_CALL) || 5374 insn->src_reg != BPF_PSEUDO_CALL) 5375 continue; 5376 depth = get_callee_stack_depth(env, insn, i); 5377 if (depth < 0) 5378 return depth; 5379 bpf_patch_call_args(insn, depth); 5380 } 5381 err = 0; 5382 #endif 5383 return err; 5384 } 5385 5386 /* fixup insn->imm field of bpf_call instructions 5387 * and inline eligible helpers as explicit sequence of BPF instructions 5388 * 5389 * this function is called after eBPF program passed verification 5390 */ 5391 static int fixup_bpf_calls(struct bpf_verifier_env *env) 5392 { 5393 struct bpf_prog *prog = env->prog; 5394 struct bpf_insn *insn = prog->insnsi; 5395 const struct bpf_func_proto *fn; 5396 const int insn_cnt = prog->len; 5397 struct bpf_insn insn_buf[16]; 5398 struct bpf_prog *new_prog; 5399 struct bpf_map *map_ptr; 5400 int i, cnt, delta = 0; 5401 5402 for (i = 0; i < insn_cnt; i++, insn++) { 5403 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 5404 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 5405 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 5406 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 5407 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 5408 struct bpf_insn mask_and_div[] = { 5409 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 5410 /* Rx div 0 -> 0 */ 5411 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), 5412 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 5413 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 5414 *insn, 5415 }; 5416 struct bpf_insn mask_and_mod[] = { 5417 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 5418 /* Rx mod 0 -> Rx */ 5419 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), 5420 *insn, 5421 }; 5422 struct bpf_insn *patchlet; 5423 5424 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 5425 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 5426 patchlet = mask_and_div + (is64 ? 1 : 0); 5427 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); 5428 } else { 5429 patchlet = mask_and_mod + (is64 ? 1 : 0); 5430 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); 5431 } 5432 5433 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 5434 if (!new_prog) 5435 return -ENOMEM; 5436 5437 delta += cnt - 1; 5438 env->prog = prog = new_prog; 5439 insn = new_prog->insnsi + i + delta; 5440 continue; 5441 } 5442 5443 if (insn->code != (BPF_JMP | BPF_CALL)) 5444 continue; 5445 if (insn->src_reg == BPF_PSEUDO_CALL) 5446 continue; 5447 5448 if (insn->imm == BPF_FUNC_get_route_realm) 5449 prog->dst_needed = 1; 5450 if (insn->imm == BPF_FUNC_get_prandom_u32) 5451 bpf_user_rnd_init_once(); 5452 if (insn->imm == BPF_FUNC_override_return) 5453 prog->kprobe_override = 1; 5454 if (insn->imm == BPF_FUNC_tail_call) { 5455 /* If we tail call into other programs, we 5456 * cannot make any assumptions since they can 5457 * be replaced dynamically during runtime in 5458 * the program array. 5459 */ 5460 prog->cb_access = 1; 5461 env->prog->aux->stack_depth = MAX_BPF_STACK; 5462 5463 /* mark bpf_tail_call as different opcode to avoid 5464 * conditional branch in the interpeter for every normal 5465 * call and to prevent accidental JITing by JIT compiler 5466 * that doesn't support bpf_tail_call yet 5467 */ 5468 insn->imm = 0; 5469 insn->code = BPF_JMP | BPF_TAIL_CALL; 5470 5471 /* instead of changing every JIT dealing with tail_call 5472 * emit two extra insns: 5473 * if (index >= max_entries) goto out; 5474 * index &= array->index_mask; 5475 * to avoid out-of-bounds cpu speculation 5476 */ 5477 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5478 if (map_ptr == BPF_MAP_PTR_POISON) { 5479 verbose(env, "tail_call abusing map_ptr\n"); 5480 return -EINVAL; 5481 } 5482 if (!map_ptr->unpriv_array) 5483 continue; 5484 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 5485 map_ptr->max_entries, 2); 5486 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 5487 container_of(map_ptr, 5488 struct bpf_array, 5489 map)->index_mask); 5490 insn_buf[2] = *insn; 5491 cnt = 3; 5492 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 5493 if (!new_prog) 5494 return -ENOMEM; 5495 5496 delta += cnt - 1; 5497 env->prog = prog = new_prog; 5498 insn = new_prog->insnsi + i + delta; 5499 continue; 5500 } 5501 5502 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 5503 * handlers are currently limited to 64 bit only. 5504 */ 5505 if (prog->jit_requested && BITS_PER_LONG == 64 && 5506 insn->imm == BPF_FUNC_map_lookup_elem) { 5507 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5508 if (map_ptr == BPF_MAP_PTR_POISON || 5509 !map_ptr->ops->map_gen_lookup) 5510 goto patch_call_imm; 5511 5512 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 5513 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 5514 verbose(env, "bpf verifier is misconfigured\n"); 5515 return -EINVAL; 5516 } 5517 5518 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 5519 cnt); 5520 if (!new_prog) 5521 return -ENOMEM; 5522 5523 delta += cnt - 1; 5524 5525 /* keep walking new program and skip insns we just inserted */ 5526 env->prog = prog = new_prog; 5527 insn = new_prog->insnsi + i + delta; 5528 continue; 5529 } 5530 5531 if (insn->imm == BPF_FUNC_redirect_map) { 5532 /* Note, we cannot use prog directly as imm as subsequent 5533 * rewrites would still change the prog pointer. The only 5534 * stable address we can use is aux, which also works with 5535 * prog clones during blinding. 5536 */ 5537 u64 addr = (unsigned long)prog->aux; 5538 struct bpf_insn r4_ld[] = { 5539 BPF_LD_IMM64(BPF_REG_4, addr), 5540 *insn, 5541 }; 5542 cnt = ARRAY_SIZE(r4_ld); 5543 5544 new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); 5545 if (!new_prog) 5546 return -ENOMEM; 5547 5548 delta += cnt - 1; 5549 env->prog = prog = new_prog; 5550 insn = new_prog->insnsi + i + delta; 5551 } 5552 patch_call_imm: 5553 fn = env->ops->get_func_proto(insn->imm); 5554 /* all functions that have prototype and verifier allowed 5555 * programs to call them, must be real in-kernel functions 5556 */ 5557 if (!fn->func) { 5558 verbose(env, 5559 "kernel subsystem misconfigured func %s#%d\n", 5560 func_id_name(insn->imm), insn->imm); 5561 return -EFAULT; 5562 } 5563 insn->imm = fn->func - __bpf_call_base; 5564 } 5565 5566 return 0; 5567 } 5568 5569 static void free_states(struct bpf_verifier_env *env) 5570 { 5571 struct bpf_verifier_state_list *sl, *sln; 5572 int i; 5573 5574 if (!env->explored_states) 5575 return; 5576 5577 for (i = 0; i < env->prog->len; i++) { 5578 sl = env->explored_states[i]; 5579 5580 if (sl) 5581 while (sl != STATE_LIST_MARK) { 5582 sln = sl->next; 5583 free_verifier_state(&sl->state, false); 5584 kfree(sl); 5585 sl = sln; 5586 } 5587 } 5588 5589 kfree(env->explored_states); 5590 } 5591 5592 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 5593 { 5594 struct bpf_verifier_env *env; 5595 struct bpf_verifer_log *log; 5596 int ret = -EINVAL; 5597 5598 /* no program is valid */ 5599 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 5600 return -EINVAL; 5601 5602 /* 'struct bpf_verifier_env' can be global, but since it's not small, 5603 * allocate/free it every time bpf_check() is called 5604 */ 5605 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 5606 if (!env) 5607 return -ENOMEM; 5608 log = &env->log; 5609 5610 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 5611 (*prog)->len); 5612 ret = -ENOMEM; 5613 if (!env->insn_aux_data) 5614 goto err_free_env; 5615 env->prog = *prog; 5616 env->ops = bpf_verifier_ops[env->prog->type]; 5617 5618 /* grab the mutex to protect few globals used by verifier */ 5619 mutex_lock(&bpf_verifier_lock); 5620 5621 if (attr->log_level || attr->log_buf || attr->log_size) { 5622 /* user requested verbose verifier output 5623 * and supplied buffer to store the verification trace 5624 */ 5625 log->level = attr->log_level; 5626 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 5627 log->len_total = attr->log_size; 5628 5629 ret = -EINVAL; 5630 /* log attributes have to be sane */ 5631 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || 5632 !log->level || !log->ubuf) 5633 goto err_unlock; 5634 } 5635 5636 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 5637 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 5638 env->strict_alignment = true; 5639 5640 if (bpf_prog_is_dev_bound(env->prog->aux)) { 5641 ret = bpf_prog_offload_verifier_prep(env); 5642 if (ret) 5643 goto err_unlock; 5644 } 5645 5646 ret = replace_map_fd_with_map_ptr(env); 5647 if (ret < 0) 5648 goto skip_full_check; 5649 5650 env->explored_states = kcalloc(env->prog->len, 5651 sizeof(struct bpf_verifier_state_list *), 5652 GFP_USER); 5653 ret = -ENOMEM; 5654 if (!env->explored_states) 5655 goto skip_full_check; 5656 5657 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 5658 5659 ret = check_cfg(env); 5660 if (ret < 0) 5661 goto skip_full_check; 5662 5663 ret = do_check(env); 5664 if (env->cur_state) { 5665 free_verifier_state(env->cur_state, true); 5666 env->cur_state = NULL; 5667 } 5668 5669 skip_full_check: 5670 while (!pop_stack(env, NULL, NULL)); 5671 free_states(env); 5672 5673 if (ret == 0) 5674 sanitize_dead_code(env); 5675 5676 if (ret == 0) 5677 ret = check_max_stack_depth(env); 5678 5679 if (ret == 0) 5680 /* program is valid, convert *(u32*)(ctx + off) accesses */ 5681 ret = convert_ctx_accesses(env); 5682 5683 if (ret == 0) 5684 ret = fixup_bpf_calls(env); 5685 5686 if (ret == 0) 5687 ret = fixup_call_args(env); 5688 5689 if (log->level && bpf_verifier_log_full(log)) 5690 ret = -ENOSPC; 5691 if (log->level && !log->ubuf) { 5692 ret = -EFAULT; 5693 goto err_release_maps; 5694 } 5695 5696 if (ret == 0 && env->used_map_cnt) { 5697 /* if program passed verifier, update used_maps in bpf_prog_info */ 5698 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 5699 sizeof(env->used_maps[0]), 5700 GFP_KERNEL); 5701 5702 if (!env->prog->aux->used_maps) { 5703 ret = -ENOMEM; 5704 goto err_release_maps; 5705 } 5706 5707 memcpy(env->prog->aux->used_maps, env->used_maps, 5708 sizeof(env->used_maps[0]) * env->used_map_cnt); 5709 env->prog->aux->used_map_cnt = env->used_map_cnt; 5710 5711 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 5712 * bpf_ld_imm64 instructions 5713 */ 5714 convert_pseudo_ld_imm64(env); 5715 } 5716 5717 err_release_maps: 5718 if (!env->prog->aux->used_maps) 5719 /* if we didn't copy map pointers into bpf_prog_info, release 5720 * them now. Otherwise free_bpf_prog_info() will release them. 5721 */ 5722 release_maps(env); 5723 *prog = env->prog; 5724 err_unlock: 5725 mutex_unlock(&bpf_verifier_lock); 5726 vfree(env->insn_aux_data); 5727 err_free_env: 5728 kfree(env); 5729 return ret; 5730 } 5731