1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/filter.h> 19 #include <net/netlink.h> 20 #include <linux/file.h> 21 #include <linux/vmalloc.h> 22 #include <linux/stringify.h> 23 24 /* bpf_check() is a static code analyzer that walks eBPF program 25 * instruction by instruction and updates register/stack state. 26 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 27 * 28 * The first pass is depth-first-search to check that the program is a DAG. 29 * It rejects the following programs: 30 * - larger than BPF_MAXINSNS insns 31 * - if loop is present (detected via back-edge) 32 * - unreachable insns exist (shouldn't be a forest. program = one function) 33 * - out of bounds or malformed jumps 34 * The second pass is all possible path descent from the 1st insn. 35 * Since it's analyzing all pathes through the program, the length of the 36 * analysis is limited to 64k insn, which may be hit even if total number of 37 * insn is less then 4K, but there are too many branches that change stack/regs. 38 * Number of 'branches to be analyzed' is limited to 1k 39 * 40 * On entry to each instruction, each register has a type, and the instruction 41 * changes the types of the registers depending on instruction semantics. 42 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 43 * copied to R1. 44 * 45 * All registers are 64-bit. 46 * R0 - return register 47 * R1-R5 argument passing registers 48 * R6-R9 callee saved registers 49 * R10 - frame pointer read-only 50 * 51 * At the start of BPF program the register R1 contains a pointer to bpf_context 52 * and has type PTR_TO_CTX. 53 * 54 * Verifier tracks arithmetic operations on pointers in case: 55 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 56 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 57 * 1st insn copies R10 (which has FRAME_PTR) type into R1 58 * and 2nd arithmetic instruction is pattern matched to recognize 59 * that it wants to construct a pointer to some element within stack. 60 * So after 2nd insn, the register R1 has type PTR_TO_STACK 61 * (and -20 constant is saved for further stack bounds checking). 62 * Meaning that this reg is a pointer to stack plus known immediate constant. 63 * 64 * Most of the time the registers have SCALAR_VALUE type, which 65 * means the register has some value, but it's not a valid pointer. 66 * (like pointer plus pointer becomes SCALAR_VALUE type) 67 * 68 * When verifier sees load or store instructions the type of base register 69 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer 70 * types recognized by check_mem_access() function. 71 * 72 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 73 * and the range of [ptr, ptr + map's value_size) is accessible. 74 * 75 * registers used to pass values to function calls are checked against 76 * function argument constraints. 77 * 78 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 79 * It means that the register type passed to this function must be 80 * PTR_TO_STACK and it will be used inside the function as 81 * 'pointer to map element key' 82 * 83 * For example the argument constraints for bpf_map_lookup_elem(): 84 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 85 * .arg1_type = ARG_CONST_MAP_PTR, 86 * .arg2_type = ARG_PTR_TO_MAP_KEY, 87 * 88 * ret_type says that this function returns 'pointer to map elem value or null' 89 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 90 * 2nd argument should be a pointer to stack, which will be used inside 91 * the helper function as a pointer to map element key. 92 * 93 * On the kernel side the helper function looks like: 94 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 95 * { 96 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 97 * void *key = (void *) (unsigned long) r2; 98 * void *value; 99 * 100 * here kernel can access 'key' and 'map' pointers safely, knowing that 101 * [key, key + map->key_size) bytes are valid and were initialized on 102 * the stack of eBPF program. 103 * } 104 * 105 * Corresponding eBPF program may look like: 106 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 107 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 108 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 109 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 110 * here verifier looks at prototype of map_lookup_elem() and sees: 111 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 112 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 113 * 114 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 115 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 116 * and were initialized prior to this call. 117 * If it's ok, then verifier allows this BPF_CALL insn and looks at 118 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 119 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 120 * returns ether pointer to map value or NULL. 121 * 122 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 123 * insn, the register holding that pointer in the true branch changes state to 124 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 125 * branch. See check_cond_jmp_op(). 126 * 127 * After the call R0 is set to return type of the function and registers R1-R5 128 * are set to NOT_INIT to indicate that they are no longer readable. 129 */ 130 131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 132 struct bpf_verifier_stack_elem { 133 /* verifer state is 'st' 134 * before processing instruction 'insn_idx' 135 * and after processing instruction 'prev_insn_idx' 136 */ 137 struct bpf_verifier_state st; 138 int insn_idx; 139 int prev_insn_idx; 140 struct bpf_verifier_stack_elem *next; 141 }; 142 143 #define BPF_COMPLEXITY_LIMIT_INSNS 131072 144 #define BPF_COMPLEXITY_LIMIT_STACK 1024 145 146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 147 148 struct bpf_call_arg_meta { 149 struct bpf_map *map_ptr; 150 bool raw_mode; 151 bool pkt_access; 152 int regno; 153 int access_size; 154 }; 155 156 /* verbose verifier prints what it's seeing 157 * bpf_check() is called under lock, so no race to access these global vars 158 */ 159 static u32 log_level, log_size, log_len; 160 static char *log_buf; 161 162 static DEFINE_MUTEX(bpf_verifier_lock); 163 164 /* log_level controls verbosity level of eBPF verifier. 165 * verbose() is used to dump the verification trace to the log, so the user 166 * can figure out what's wrong with the program 167 */ 168 static __printf(1, 2) void verbose(const char *fmt, ...) 169 { 170 va_list args; 171 172 if (log_level == 0 || log_len >= log_size - 1) 173 return; 174 175 va_start(args, fmt); 176 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 177 va_end(args); 178 } 179 180 /* string representation of 'enum bpf_reg_type' */ 181 static const char * const reg_type_str[] = { 182 [NOT_INIT] = "?", 183 [SCALAR_VALUE] = "inv", 184 [PTR_TO_CTX] = "ctx", 185 [CONST_PTR_TO_MAP] = "map_ptr", 186 [PTR_TO_MAP_VALUE] = "map_value", 187 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 188 [PTR_TO_STACK] = "fp", 189 [PTR_TO_PACKET] = "pkt", 190 [PTR_TO_PACKET_END] = "pkt_end", 191 }; 192 193 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) 194 static const char * const func_id_str[] = { 195 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) 196 }; 197 #undef __BPF_FUNC_STR_FN 198 199 static const char *func_id_name(int id) 200 { 201 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); 202 203 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) 204 return func_id_str[id]; 205 else 206 return "unknown"; 207 } 208 209 static void print_verifier_state(struct bpf_verifier_state *state) 210 { 211 struct bpf_reg_state *reg; 212 enum bpf_reg_type t; 213 int i; 214 215 for (i = 0; i < MAX_BPF_REG; i++) { 216 reg = &state->regs[i]; 217 t = reg->type; 218 if (t == NOT_INIT) 219 continue; 220 verbose(" R%d=%s", i, reg_type_str[t]); 221 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 222 tnum_is_const(reg->var_off)) { 223 /* reg->off should be 0 for SCALAR_VALUE */ 224 verbose("%lld", reg->var_off.value + reg->off); 225 } else { 226 verbose("(id=%d", reg->id); 227 if (t != SCALAR_VALUE) 228 verbose(",off=%d", reg->off); 229 if (t == PTR_TO_PACKET) 230 verbose(",r=%d", reg->range); 231 else if (t == CONST_PTR_TO_MAP || 232 t == PTR_TO_MAP_VALUE || 233 t == PTR_TO_MAP_VALUE_OR_NULL) 234 verbose(",ks=%d,vs=%d", 235 reg->map_ptr->key_size, 236 reg->map_ptr->value_size); 237 if (tnum_is_const(reg->var_off)) { 238 /* Typically an immediate SCALAR_VALUE, but 239 * could be a pointer whose offset is too big 240 * for reg->off 241 */ 242 verbose(",imm=%llx", reg->var_off.value); 243 } else { 244 if (reg->smin_value != reg->umin_value && 245 reg->smin_value != S64_MIN) 246 verbose(",smin_value=%lld", 247 (long long)reg->smin_value); 248 if (reg->smax_value != reg->umax_value && 249 reg->smax_value != S64_MAX) 250 verbose(",smax_value=%lld", 251 (long long)reg->smax_value); 252 if (reg->umin_value != 0) 253 verbose(",umin_value=%llu", 254 (unsigned long long)reg->umin_value); 255 if (reg->umax_value != U64_MAX) 256 verbose(",umax_value=%llu", 257 (unsigned long long)reg->umax_value); 258 if (!tnum_is_unknown(reg->var_off)) { 259 char tn_buf[48]; 260 261 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 262 verbose(",var_off=%s", tn_buf); 263 } 264 } 265 verbose(")"); 266 } 267 } 268 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 269 if (state->stack_slot_type[i] == STACK_SPILL) 270 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 271 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); 272 } 273 verbose("\n"); 274 } 275 276 static const char *const bpf_class_string[] = { 277 [BPF_LD] = "ld", 278 [BPF_LDX] = "ldx", 279 [BPF_ST] = "st", 280 [BPF_STX] = "stx", 281 [BPF_ALU] = "alu", 282 [BPF_JMP] = "jmp", 283 [BPF_RET] = "BUG", 284 [BPF_ALU64] = "alu64", 285 }; 286 287 static const char *const bpf_alu_string[16] = { 288 [BPF_ADD >> 4] = "+=", 289 [BPF_SUB >> 4] = "-=", 290 [BPF_MUL >> 4] = "*=", 291 [BPF_DIV >> 4] = "/=", 292 [BPF_OR >> 4] = "|=", 293 [BPF_AND >> 4] = "&=", 294 [BPF_LSH >> 4] = "<<=", 295 [BPF_RSH >> 4] = ">>=", 296 [BPF_NEG >> 4] = "neg", 297 [BPF_MOD >> 4] = "%=", 298 [BPF_XOR >> 4] = "^=", 299 [BPF_MOV >> 4] = "=", 300 [BPF_ARSH >> 4] = "s>>=", 301 [BPF_END >> 4] = "endian", 302 }; 303 304 static const char *const bpf_ldst_string[] = { 305 [BPF_W >> 3] = "u32", 306 [BPF_H >> 3] = "u16", 307 [BPF_B >> 3] = "u8", 308 [BPF_DW >> 3] = "u64", 309 }; 310 311 static const char *const bpf_jmp_string[16] = { 312 [BPF_JA >> 4] = "jmp", 313 [BPF_JEQ >> 4] = "==", 314 [BPF_JGT >> 4] = ">", 315 [BPF_JLT >> 4] = "<", 316 [BPF_JGE >> 4] = ">=", 317 [BPF_JLE >> 4] = "<=", 318 [BPF_JSET >> 4] = "&", 319 [BPF_JNE >> 4] = "!=", 320 [BPF_JSGT >> 4] = "s>", 321 [BPF_JSLT >> 4] = "s<", 322 [BPF_JSGE >> 4] = "s>=", 323 [BPF_JSLE >> 4] = "s<=", 324 [BPF_CALL >> 4] = "call", 325 [BPF_EXIT >> 4] = "exit", 326 }; 327 328 static void print_bpf_insn(const struct bpf_verifier_env *env, 329 const struct bpf_insn *insn) 330 { 331 u8 class = BPF_CLASS(insn->code); 332 333 if (class == BPF_ALU || class == BPF_ALU64) { 334 if (BPF_SRC(insn->code) == BPF_X) 335 verbose("(%02x) %sr%d %s %sr%d\n", 336 insn->code, class == BPF_ALU ? "(u32) " : "", 337 insn->dst_reg, 338 bpf_alu_string[BPF_OP(insn->code) >> 4], 339 class == BPF_ALU ? "(u32) " : "", 340 insn->src_reg); 341 else 342 verbose("(%02x) %sr%d %s %s%d\n", 343 insn->code, class == BPF_ALU ? "(u32) " : "", 344 insn->dst_reg, 345 bpf_alu_string[BPF_OP(insn->code) >> 4], 346 class == BPF_ALU ? "(u32) " : "", 347 insn->imm); 348 } else if (class == BPF_STX) { 349 if (BPF_MODE(insn->code) == BPF_MEM) 350 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 351 insn->code, 352 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 353 insn->dst_reg, 354 insn->off, insn->src_reg); 355 else if (BPF_MODE(insn->code) == BPF_XADD) 356 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 357 insn->code, 358 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 359 insn->dst_reg, insn->off, 360 insn->src_reg); 361 else 362 verbose("BUG_%02x\n", insn->code); 363 } else if (class == BPF_ST) { 364 if (BPF_MODE(insn->code) != BPF_MEM) { 365 verbose("BUG_st_%02x\n", insn->code); 366 return; 367 } 368 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 369 insn->code, 370 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 371 insn->dst_reg, 372 insn->off, insn->imm); 373 } else if (class == BPF_LDX) { 374 if (BPF_MODE(insn->code) != BPF_MEM) { 375 verbose("BUG_ldx_%02x\n", insn->code); 376 return; 377 } 378 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 379 insn->code, insn->dst_reg, 380 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 381 insn->src_reg, insn->off); 382 } else if (class == BPF_LD) { 383 if (BPF_MODE(insn->code) == BPF_ABS) { 384 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 385 insn->code, 386 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 387 insn->imm); 388 } else if (BPF_MODE(insn->code) == BPF_IND) { 389 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 390 insn->code, 391 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 392 insn->src_reg, insn->imm); 393 } else if (BPF_MODE(insn->code) == BPF_IMM && 394 BPF_SIZE(insn->code) == BPF_DW) { 395 /* At this point, we already made sure that the second 396 * part of the ldimm64 insn is accessible. 397 */ 398 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 399 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; 400 401 if (map_ptr && !env->allow_ptr_leaks) 402 imm = 0; 403 404 verbose("(%02x) r%d = 0x%llx\n", insn->code, 405 insn->dst_reg, (unsigned long long)imm); 406 } else { 407 verbose("BUG_ld_%02x\n", insn->code); 408 return; 409 } 410 } else if (class == BPF_JMP) { 411 u8 opcode = BPF_OP(insn->code); 412 413 if (opcode == BPF_CALL) { 414 verbose("(%02x) call %s#%d\n", insn->code, 415 func_id_name(insn->imm), insn->imm); 416 } else if (insn->code == (BPF_JMP | BPF_JA)) { 417 verbose("(%02x) goto pc%+d\n", 418 insn->code, insn->off); 419 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 420 verbose("(%02x) exit\n", insn->code); 421 } else if (BPF_SRC(insn->code) == BPF_X) { 422 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 423 insn->code, insn->dst_reg, 424 bpf_jmp_string[BPF_OP(insn->code) >> 4], 425 insn->src_reg, insn->off); 426 } else { 427 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 428 insn->code, insn->dst_reg, 429 bpf_jmp_string[BPF_OP(insn->code) >> 4], 430 insn->imm, insn->off); 431 } 432 } else { 433 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 434 } 435 } 436 437 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx) 438 { 439 struct bpf_verifier_stack_elem *elem; 440 int insn_idx; 441 442 if (env->head == NULL) 443 return -1; 444 445 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 446 insn_idx = env->head->insn_idx; 447 if (prev_insn_idx) 448 *prev_insn_idx = env->head->prev_insn_idx; 449 elem = env->head->next; 450 kfree(env->head); 451 env->head = elem; 452 env->stack_size--; 453 return insn_idx; 454 } 455 456 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 457 int insn_idx, int prev_insn_idx) 458 { 459 struct bpf_verifier_stack_elem *elem; 460 461 elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 462 if (!elem) 463 goto err; 464 465 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 466 elem->insn_idx = insn_idx; 467 elem->prev_insn_idx = prev_insn_idx; 468 elem->next = env->head; 469 env->head = elem; 470 env->stack_size++; 471 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 472 verbose("BPF program is too complex\n"); 473 goto err; 474 } 475 return &elem->st; 476 err: 477 /* pop all elements and return */ 478 while (pop_stack(env, NULL) >= 0); 479 return NULL; 480 } 481 482 #define CALLER_SAVED_REGS 6 483 static const int caller_saved[CALLER_SAVED_REGS] = { 484 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 485 }; 486 487 static void __mark_reg_not_init(struct bpf_reg_state *reg); 488 489 /* Mark the unknown part of a register (variable offset or scalar value) as 490 * known to have the value @imm. 491 */ 492 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 493 { 494 reg->id = 0; 495 reg->var_off = tnum_const(imm); 496 reg->smin_value = (s64)imm; 497 reg->smax_value = (s64)imm; 498 reg->umin_value = imm; 499 reg->umax_value = imm; 500 } 501 502 /* Mark the 'variable offset' part of a register as zero. This should be 503 * used only on registers holding a pointer type. 504 */ 505 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 506 { 507 __mark_reg_known(reg, 0); 508 } 509 510 static void mark_reg_known_zero(struct bpf_reg_state *regs, u32 regno) 511 { 512 if (WARN_ON(regno >= MAX_BPF_REG)) { 513 verbose("mark_reg_known_zero(regs, %u)\n", regno); 514 /* Something bad happened, let's kill all regs */ 515 for (regno = 0; regno < MAX_BPF_REG; regno++) 516 __mark_reg_not_init(regs + regno); 517 return; 518 } 519 __mark_reg_known_zero(regs + regno); 520 } 521 522 /* Attempts to improve min/max values based on var_off information */ 523 static void __update_reg_bounds(struct bpf_reg_state *reg) 524 { 525 /* min signed is max(sign bit) | min(other bits) */ 526 reg->smin_value = max_t(s64, reg->smin_value, 527 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 528 /* max signed is min(sign bit) | max(other bits) */ 529 reg->smax_value = min_t(s64, reg->smax_value, 530 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 531 reg->umin_value = max(reg->umin_value, reg->var_off.value); 532 reg->umax_value = min(reg->umax_value, 533 reg->var_off.value | reg->var_off.mask); 534 } 535 536 /* Uses signed min/max values to inform unsigned, and vice-versa */ 537 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 538 { 539 /* Learn sign from signed bounds. 540 * If we cannot cross the sign boundary, then signed and unsigned bounds 541 * are the same, so combine. This works even in the negative case, e.g. 542 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 543 */ 544 if (reg->smin_value >= 0 || reg->smax_value < 0) { 545 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 546 reg->umin_value); 547 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 548 reg->umax_value); 549 return; 550 } 551 /* Learn sign from unsigned bounds. Signed bounds cross the sign 552 * boundary, so we must be careful. 553 */ 554 if ((s64)reg->umax_value >= 0) { 555 /* Positive. We can't learn anything from the smin, but smax 556 * is positive, hence safe. 557 */ 558 reg->smin_value = reg->umin_value; 559 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 560 reg->umax_value); 561 } else if ((s64)reg->umin_value < 0) { 562 /* Negative. We can't learn anything from the smax, but smin 563 * is negative, hence safe. 564 */ 565 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 566 reg->umin_value); 567 reg->smax_value = reg->umax_value; 568 } 569 } 570 571 /* Attempts to improve var_off based on unsigned min/max information */ 572 static void __reg_bound_offset(struct bpf_reg_state *reg) 573 { 574 reg->var_off = tnum_intersect(reg->var_off, 575 tnum_range(reg->umin_value, 576 reg->umax_value)); 577 } 578 579 /* Reset the min/max bounds of a register */ 580 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 581 { 582 reg->smin_value = S64_MIN; 583 reg->smax_value = S64_MAX; 584 reg->umin_value = 0; 585 reg->umax_value = U64_MAX; 586 } 587 588 /* Mark a register as having a completely unknown (scalar) value. */ 589 static void __mark_reg_unknown(struct bpf_reg_state *reg) 590 { 591 reg->type = SCALAR_VALUE; 592 reg->id = 0; 593 reg->off = 0; 594 reg->var_off = tnum_unknown; 595 __mark_reg_unbounded(reg); 596 } 597 598 static void mark_reg_unknown(struct bpf_reg_state *regs, u32 regno) 599 { 600 if (WARN_ON(regno >= MAX_BPF_REG)) { 601 verbose("mark_reg_unknown(regs, %u)\n", regno); 602 /* Something bad happened, let's kill all regs */ 603 for (regno = 0; regno < MAX_BPF_REG; regno++) 604 __mark_reg_not_init(regs + regno); 605 return; 606 } 607 __mark_reg_unknown(regs + regno); 608 } 609 610 static void __mark_reg_not_init(struct bpf_reg_state *reg) 611 { 612 __mark_reg_unknown(reg); 613 reg->type = NOT_INIT; 614 } 615 616 static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno) 617 { 618 if (WARN_ON(regno >= MAX_BPF_REG)) { 619 verbose("mark_reg_not_init(regs, %u)\n", regno); 620 /* Something bad happened, let's kill all regs */ 621 for (regno = 0; regno < MAX_BPF_REG; regno++) 622 __mark_reg_not_init(regs + regno); 623 return; 624 } 625 __mark_reg_not_init(regs + regno); 626 } 627 628 static void init_reg_state(struct bpf_reg_state *regs) 629 { 630 int i; 631 632 for (i = 0; i < MAX_BPF_REG; i++) { 633 mark_reg_not_init(regs, i); 634 regs[i].live = REG_LIVE_NONE; 635 } 636 637 /* frame pointer */ 638 regs[BPF_REG_FP].type = PTR_TO_STACK; 639 mark_reg_known_zero(regs, BPF_REG_FP); 640 641 /* 1st arg to a function */ 642 regs[BPF_REG_1].type = PTR_TO_CTX; 643 mark_reg_known_zero(regs, BPF_REG_1); 644 } 645 646 enum reg_arg_type { 647 SRC_OP, /* register is used as source operand */ 648 DST_OP, /* register is used as destination operand */ 649 DST_OP_NO_MARK /* same as above, check only, don't mark */ 650 }; 651 652 static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) 653 { 654 struct bpf_verifier_state *parent = state->parent; 655 656 while (parent) { 657 /* if read wasn't screened by an earlier write ... */ 658 if (state->regs[regno].live & REG_LIVE_WRITTEN) 659 break; 660 /* ... then we depend on parent's value */ 661 parent->regs[regno].live |= REG_LIVE_READ; 662 state = parent; 663 parent = state->parent; 664 } 665 } 666 667 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 668 enum reg_arg_type t) 669 { 670 struct bpf_reg_state *regs = env->cur_state.regs; 671 672 if (regno >= MAX_BPF_REG) { 673 verbose("R%d is invalid\n", regno); 674 return -EINVAL; 675 } 676 677 if (t == SRC_OP) { 678 /* check whether register used as source operand can be read */ 679 if (regs[regno].type == NOT_INIT) { 680 verbose("R%d !read_ok\n", regno); 681 return -EACCES; 682 } 683 mark_reg_read(&env->cur_state, regno); 684 } else { 685 /* check whether register used as dest operand can be written to */ 686 if (regno == BPF_REG_FP) { 687 verbose("frame pointer is read only\n"); 688 return -EACCES; 689 } 690 regs[regno].live |= REG_LIVE_WRITTEN; 691 if (t == DST_OP) 692 mark_reg_unknown(regs, regno); 693 } 694 return 0; 695 } 696 697 static bool is_spillable_regtype(enum bpf_reg_type type) 698 { 699 switch (type) { 700 case PTR_TO_MAP_VALUE: 701 case PTR_TO_MAP_VALUE_OR_NULL: 702 case PTR_TO_STACK: 703 case PTR_TO_CTX: 704 case PTR_TO_PACKET: 705 case PTR_TO_PACKET_END: 706 case CONST_PTR_TO_MAP: 707 return true; 708 default: 709 return false; 710 } 711 } 712 713 /* check_stack_read/write functions track spill/fill of registers, 714 * stack boundary and alignment are checked in check_mem_access() 715 */ 716 static int check_stack_write(struct bpf_verifier_state *state, int off, 717 int size, int value_regno) 718 { 719 int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE; 720 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 721 * so it's aligned access and [off, off + size) are within stack limits 722 */ 723 724 if (value_regno >= 0 && 725 is_spillable_regtype(state->regs[value_regno].type)) { 726 727 /* register containing pointer is being spilled into stack */ 728 if (size != BPF_REG_SIZE) { 729 verbose("invalid size of register spill\n"); 730 return -EACCES; 731 } 732 733 /* save register state */ 734 state->spilled_regs[spi] = state->regs[value_regno]; 735 state->spilled_regs[spi].live |= REG_LIVE_WRITTEN; 736 737 for (i = 0; i < BPF_REG_SIZE; i++) 738 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 739 } else { 740 /* regular write of data into stack */ 741 state->spilled_regs[spi] = (struct bpf_reg_state) {}; 742 743 for (i = 0; i < size; i++) 744 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 745 } 746 return 0; 747 } 748 749 static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) 750 { 751 struct bpf_verifier_state *parent = state->parent; 752 753 while (parent) { 754 /* if read wasn't screened by an earlier write ... */ 755 if (state->spilled_regs[slot].live & REG_LIVE_WRITTEN) 756 break; 757 /* ... then we depend on parent's value */ 758 parent->spilled_regs[slot].live |= REG_LIVE_READ; 759 state = parent; 760 parent = state->parent; 761 } 762 } 763 764 static int check_stack_read(struct bpf_verifier_state *state, int off, int size, 765 int value_regno) 766 { 767 u8 *slot_type; 768 int i, spi; 769 770 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 771 772 if (slot_type[0] == STACK_SPILL) { 773 if (size != BPF_REG_SIZE) { 774 verbose("invalid size of register spill\n"); 775 return -EACCES; 776 } 777 for (i = 1; i < BPF_REG_SIZE; i++) { 778 if (slot_type[i] != STACK_SPILL) { 779 verbose("corrupted spill memory\n"); 780 return -EACCES; 781 } 782 } 783 784 spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE; 785 786 if (value_regno >= 0) { 787 /* restore register state from stack */ 788 state->regs[value_regno] = state->spilled_regs[spi]; 789 mark_stack_slot_read(state, spi); 790 } 791 return 0; 792 } else { 793 for (i = 0; i < size; i++) { 794 if (slot_type[i] != STACK_MISC) { 795 verbose("invalid read from stack off %d+%d size %d\n", 796 off, i, size); 797 return -EACCES; 798 } 799 } 800 if (value_regno >= 0) 801 /* have read misc data from the stack */ 802 mark_reg_unknown(state->regs, value_regno); 803 return 0; 804 } 805 } 806 807 /* check read/write into map element returned by bpf_map_lookup_elem() */ 808 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 809 int size) 810 { 811 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 812 813 if (off < 0 || size <= 0 || off + size > map->value_size) { 814 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 815 map->value_size, off, size); 816 return -EACCES; 817 } 818 return 0; 819 } 820 821 /* check read/write into a map element with possible variable offset */ 822 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 823 int off, int size) 824 { 825 struct bpf_verifier_state *state = &env->cur_state; 826 struct bpf_reg_state *reg = &state->regs[regno]; 827 int err; 828 829 /* We may have adjusted the register to this map value, so we 830 * need to try adding each of min_value and max_value to off 831 * to make sure our theoretical access will be safe. 832 */ 833 if (log_level) 834 print_verifier_state(state); 835 /* The minimum value is only important with signed 836 * comparisons where we can't assume the floor of a 837 * value is 0. If we are using signed variables for our 838 * index'es we need to make sure that whatever we use 839 * will have a set floor within our range. 840 */ 841 if (reg->smin_value < 0) { 842 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 843 regno); 844 return -EACCES; 845 } 846 err = __check_map_access(env, regno, reg->smin_value + off, size); 847 if (err) { 848 verbose("R%d min value is outside of the array range\n", regno); 849 return err; 850 } 851 852 /* If we haven't set a max value then we need to bail since we can't be 853 * sure we won't do bad things. 854 * If reg->umax_value + off could overflow, treat that as unbounded too. 855 */ 856 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 857 verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", 858 regno); 859 return -EACCES; 860 } 861 err = __check_map_access(env, regno, reg->umax_value + off, size); 862 if (err) 863 verbose("R%d max value is outside of the array range\n", regno); 864 return err; 865 } 866 867 #define MAX_PACKET_OFF 0xffff 868 869 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 870 const struct bpf_call_arg_meta *meta, 871 enum bpf_access_type t) 872 { 873 switch (env->prog->type) { 874 case BPF_PROG_TYPE_LWT_IN: 875 case BPF_PROG_TYPE_LWT_OUT: 876 /* dst_input() and dst_output() can't write for now */ 877 if (t == BPF_WRITE) 878 return false; 879 /* fallthrough */ 880 case BPF_PROG_TYPE_SCHED_CLS: 881 case BPF_PROG_TYPE_SCHED_ACT: 882 case BPF_PROG_TYPE_XDP: 883 case BPF_PROG_TYPE_LWT_XMIT: 884 case BPF_PROG_TYPE_SK_SKB: 885 if (meta) 886 return meta->pkt_access; 887 888 env->seen_direct_write = true; 889 return true; 890 default: 891 return false; 892 } 893 } 894 895 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, 896 int off, int size) 897 { 898 struct bpf_reg_state *regs = env->cur_state.regs; 899 struct bpf_reg_state *reg = ®s[regno]; 900 901 if (off < 0 || size <= 0 || (u64)off + size > reg->range) { 902 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 903 off, size, regno, reg->id, reg->off, reg->range); 904 return -EACCES; 905 } 906 return 0; 907 } 908 909 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 910 int size) 911 { 912 struct bpf_reg_state *regs = env->cur_state.regs; 913 struct bpf_reg_state *reg = ®s[regno]; 914 int err; 915 916 /* We may have added a variable offset to the packet pointer; but any 917 * reg->range we have comes after that. We are only checking the fixed 918 * offset. 919 */ 920 921 /* We don't allow negative numbers, because we aren't tracking enough 922 * detail to prove they're safe. 923 */ 924 if (reg->smin_value < 0) { 925 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 926 regno); 927 return -EACCES; 928 } 929 err = __check_packet_access(env, regno, off, size); 930 if (err) { 931 verbose("R%d offset is outside of the packet\n", regno); 932 return err; 933 } 934 return err; 935 } 936 937 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 938 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 939 enum bpf_access_type t, enum bpf_reg_type *reg_type) 940 { 941 struct bpf_insn_access_aux info = { 942 .reg_type = *reg_type, 943 }; 944 945 /* for analyzer ctx accesses are already validated and converted */ 946 if (env->analyzer_ops) 947 return 0; 948 949 if (env->prog->aux->ops->is_valid_access && 950 env->prog->aux->ops->is_valid_access(off, size, t, &info)) { 951 /* A non zero info.ctx_field_size indicates that this field is a 952 * candidate for later verifier transformation to load the whole 953 * field and then apply a mask when accessed with a narrower 954 * access than actual ctx access size. A zero info.ctx_field_size 955 * will only allow for whole field access and rejects any other 956 * type of narrower access. 957 */ 958 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 959 *reg_type = info.reg_type; 960 961 /* remember the offset of last byte accessed in ctx */ 962 if (env->prog->aux->max_ctx_offset < off + size) 963 env->prog->aux->max_ctx_offset = off + size; 964 return 0; 965 } 966 967 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 968 return -EACCES; 969 } 970 971 static bool __is_pointer_value(bool allow_ptr_leaks, 972 const struct bpf_reg_state *reg) 973 { 974 if (allow_ptr_leaks) 975 return false; 976 977 return reg->type != SCALAR_VALUE; 978 } 979 980 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 981 { 982 return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]); 983 } 984 985 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, 986 int off, int size, bool strict) 987 { 988 struct tnum reg_off; 989 int ip_align; 990 991 /* Byte size accesses are always allowed. */ 992 if (!strict || size == 1) 993 return 0; 994 995 /* For platforms that do not have a Kconfig enabling 996 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 997 * NET_IP_ALIGN is universally set to '2'. And on platforms 998 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 999 * to this code only in strict mode where we want to emulate 1000 * the NET_IP_ALIGN==2 checking. Therefore use an 1001 * unconditional IP align value of '2'. 1002 */ 1003 ip_align = 2; 1004 1005 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 1006 if (!tnum_is_aligned(reg_off, size)) { 1007 char tn_buf[48]; 1008 1009 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1010 verbose("misaligned packet access off %d+%s+%d+%d size %d\n", 1011 ip_align, tn_buf, reg->off, off, size); 1012 return -EACCES; 1013 } 1014 1015 return 0; 1016 } 1017 1018 static int check_generic_ptr_alignment(const struct bpf_reg_state *reg, 1019 const char *pointer_desc, 1020 int off, int size, bool strict) 1021 { 1022 struct tnum reg_off; 1023 1024 /* Byte size accesses are always allowed. */ 1025 if (!strict || size == 1) 1026 return 0; 1027 1028 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 1029 if (!tnum_is_aligned(reg_off, size)) { 1030 char tn_buf[48]; 1031 1032 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1033 verbose("misaligned %saccess off %s+%d+%d size %d\n", 1034 pointer_desc, tn_buf, reg->off, off, size); 1035 return -EACCES; 1036 } 1037 1038 return 0; 1039 } 1040 1041 static int check_ptr_alignment(struct bpf_verifier_env *env, 1042 const struct bpf_reg_state *reg, 1043 int off, int size) 1044 { 1045 bool strict = env->strict_alignment; 1046 const char *pointer_desc = ""; 1047 1048 switch (reg->type) { 1049 case PTR_TO_PACKET: 1050 /* special case, because of NET_IP_ALIGN */ 1051 return check_pkt_ptr_alignment(reg, off, size, strict); 1052 case PTR_TO_MAP_VALUE: 1053 pointer_desc = "value "; 1054 break; 1055 case PTR_TO_CTX: 1056 pointer_desc = "context "; 1057 break; 1058 case PTR_TO_STACK: 1059 pointer_desc = "stack "; 1060 break; 1061 default: 1062 break; 1063 } 1064 return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict); 1065 } 1066 1067 /* check whether memory at (regno + off) is accessible for t = (read | write) 1068 * if t==write, value_regno is a register which value is stored into memory 1069 * if t==read, value_regno is a register which will receive the value from memory 1070 * if t==write && value_regno==-1, some unknown value is stored into memory 1071 * if t==read && value_regno==-1, don't care what we read from memory 1072 */ 1073 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, 1074 int bpf_size, enum bpf_access_type t, 1075 int value_regno) 1076 { 1077 struct bpf_verifier_state *state = &env->cur_state; 1078 struct bpf_reg_state *reg = &state->regs[regno]; 1079 int size, err = 0; 1080 1081 size = bpf_size_to_bytes(bpf_size); 1082 if (size < 0) 1083 return size; 1084 1085 /* alignment checks will add in reg->off themselves */ 1086 err = check_ptr_alignment(env, reg, off, size); 1087 if (err) 1088 return err; 1089 1090 /* for access checks, reg->off is just part of off */ 1091 off += reg->off; 1092 1093 if (reg->type == PTR_TO_MAP_VALUE) { 1094 if (t == BPF_WRITE && value_regno >= 0 && 1095 is_pointer_value(env, value_regno)) { 1096 verbose("R%d leaks addr into map\n", value_regno); 1097 return -EACCES; 1098 } 1099 1100 err = check_map_access(env, regno, off, size); 1101 if (!err && t == BPF_READ && value_regno >= 0) 1102 mark_reg_unknown(state->regs, value_regno); 1103 1104 } else if (reg->type == PTR_TO_CTX) { 1105 enum bpf_reg_type reg_type = SCALAR_VALUE; 1106 1107 if (t == BPF_WRITE && value_regno >= 0 && 1108 is_pointer_value(env, value_regno)) { 1109 verbose("R%d leaks addr into ctx\n", value_regno); 1110 return -EACCES; 1111 } 1112 /* ctx accesses must be at a fixed offset, so that we can 1113 * determine what type of data were returned. 1114 */ 1115 if (!tnum_is_const(reg->var_off)) { 1116 char tn_buf[48]; 1117 1118 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1119 verbose("variable ctx access var_off=%s off=%d size=%d", 1120 tn_buf, off, size); 1121 return -EACCES; 1122 } 1123 off += reg->var_off.value; 1124 err = check_ctx_access(env, insn_idx, off, size, t, ®_type); 1125 if (!err && t == BPF_READ && value_regno >= 0) { 1126 /* ctx access returns either a scalar, or a 1127 * PTR_TO_PACKET[_END]. In the latter case, we know 1128 * the offset is zero. 1129 */ 1130 if (reg_type == SCALAR_VALUE) 1131 mark_reg_unknown(state->regs, value_regno); 1132 else 1133 mark_reg_known_zero(state->regs, value_regno); 1134 state->regs[value_regno].id = 0; 1135 state->regs[value_regno].off = 0; 1136 state->regs[value_regno].range = 0; 1137 state->regs[value_regno].type = reg_type; 1138 } 1139 1140 } else if (reg->type == PTR_TO_STACK) { 1141 /* stack accesses must be at a fixed offset, so that we can 1142 * determine what type of data were returned. 1143 * See check_stack_read(). 1144 */ 1145 if (!tnum_is_const(reg->var_off)) { 1146 char tn_buf[48]; 1147 1148 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1149 verbose("variable stack access var_off=%s off=%d size=%d", 1150 tn_buf, off, size); 1151 return -EACCES; 1152 } 1153 off += reg->var_off.value; 1154 if (off >= 0 || off < -MAX_BPF_STACK) { 1155 verbose("invalid stack off=%d size=%d\n", off, size); 1156 return -EACCES; 1157 } 1158 1159 if (env->prog->aux->stack_depth < -off) 1160 env->prog->aux->stack_depth = -off; 1161 1162 if (t == BPF_WRITE) { 1163 if (!env->allow_ptr_leaks && 1164 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 1165 size != BPF_REG_SIZE) { 1166 verbose("attempt to corrupt spilled pointer on stack\n"); 1167 return -EACCES; 1168 } 1169 err = check_stack_write(state, off, size, value_regno); 1170 } else { 1171 err = check_stack_read(state, off, size, value_regno); 1172 } 1173 } else if (reg->type == PTR_TO_PACKET) { 1174 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 1175 verbose("cannot write into packet\n"); 1176 return -EACCES; 1177 } 1178 if (t == BPF_WRITE && value_regno >= 0 && 1179 is_pointer_value(env, value_regno)) { 1180 verbose("R%d leaks addr into packet\n", value_regno); 1181 return -EACCES; 1182 } 1183 err = check_packet_access(env, regno, off, size); 1184 if (!err && t == BPF_READ && value_regno >= 0) 1185 mark_reg_unknown(state->regs, value_regno); 1186 } else { 1187 verbose("R%d invalid mem access '%s'\n", 1188 regno, reg_type_str[reg->type]); 1189 return -EACCES; 1190 } 1191 1192 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1193 state->regs[value_regno].type == SCALAR_VALUE) { 1194 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1195 state->regs[value_regno].var_off = tnum_cast( 1196 state->regs[value_regno].var_off, size); 1197 __update_reg_bounds(&state->regs[value_regno]); 1198 } 1199 return err; 1200 } 1201 1202 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 1203 { 1204 int err; 1205 1206 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 1207 insn->imm != 0) { 1208 verbose("BPF_XADD uses reserved fields\n"); 1209 return -EINVAL; 1210 } 1211 1212 /* check src1 operand */ 1213 err = check_reg_arg(env, insn->src_reg, SRC_OP); 1214 if (err) 1215 return err; 1216 1217 /* check src2 operand */ 1218 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 1219 if (err) 1220 return err; 1221 1222 if (is_pointer_value(env, insn->src_reg)) { 1223 verbose("R%d leaks addr into mem\n", insn->src_reg); 1224 return -EACCES; 1225 } 1226 1227 /* check whether atomic_add can read the memory */ 1228 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1229 BPF_SIZE(insn->code), BPF_READ, -1); 1230 if (err) 1231 return err; 1232 1233 /* check whether atomic_add can write into the same memory */ 1234 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1235 BPF_SIZE(insn->code), BPF_WRITE, -1); 1236 } 1237 1238 /* Does this register contain a constant zero? */ 1239 static bool register_is_null(struct bpf_reg_state reg) 1240 { 1241 return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); 1242 } 1243 1244 /* when register 'regno' is passed into function that will read 'access_size' 1245 * bytes from that pointer, make sure that it's within stack boundary 1246 * and all elements of stack are initialized. 1247 * Unlike most pointer bounds-checking functions, this one doesn't take an 1248 * 'off' argument, so it has to add in reg->off itself. 1249 */ 1250 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 1251 int access_size, bool zero_size_allowed, 1252 struct bpf_call_arg_meta *meta) 1253 { 1254 struct bpf_verifier_state *state = &env->cur_state; 1255 struct bpf_reg_state *regs = state->regs; 1256 int off, i; 1257 1258 if (regs[regno].type != PTR_TO_STACK) { 1259 /* Allow zero-byte read from NULL, regardless of pointer type */ 1260 if (zero_size_allowed && access_size == 0 && 1261 register_is_null(regs[regno])) 1262 return 0; 1263 1264 verbose("R%d type=%s expected=%s\n", regno, 1265 reg_type_str[regs[regno].type], 1266 reg_type_str[PTR_TO_STACK]); 1267 return -EACCES; 1268 } 1269 1270 /* Only allow fixed-offset stack reads */ 1271 if (!tnum_is_const(regs[regno].var_off)) { 1272 char tn_buf[48]; 1273 1274 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); 1275 verbose("invalid variable stack read R%d var_off=%s\n", 1276 regno, tn_buf); 1277 } 1278 off = regs[regno].off + regs[regno].var_off.value; 1279 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1280 access_size <= 0) { 1281 verbose("invalid stack type R%d off=%d access_size=%d\n", 1282 regno, off, access_size); 1283 return -EACCES; 1284 } 1285 1286 if (env->prog->aux->stack_depth < -off) 1287 env->prog->aux->stack_depth = -off; 1288 1289 if (meta && meta->raw_mode) { 1290 meta->access_size = access_size; 1291 meta->regno = regno; 1292 return 0; 1293 } 1294 1295 for (i = 0; i < access_size; i++) { 1296 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 1297 verbose("invalid indirect read from stack off %d+%d size %d\n", 1298 off, i, access_size); 1299 return -EACCES; 1300 } 1301 } 1302 return 0; 1303 } 1304 1305 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 1306 int access_size, bool zero_size_allowed, 1307 struct bpf_call_arg_meta *meta) 1308 { 1309 struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; 1310 1311 switch (reg->type) { 1312 case PTR_TO_PACKET: 1313 return check_packet_access(env, regno, reg->off, access_size); 1314 case PTR_TO_MAP_VALUE: 1315 return check_map_access(env, regno, reg->off, access_size); 1316 default: /* scalar_value|ptr_to_stack or invalid ptr */ 1317 return check_stack_boundary(env, regno, access_size, 1318 zero_size_allowed, meta); 1319 } 1320 } 1321 1322 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 1323 enum bpf_arg_type arg_type, 1324 struct bpf_call_arg_meta *meta) 1325 { 1326 struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; 1327 enum bpf_reg_type expected_type, type = reg->type; 1328 int err = 0; 1329 1330 if (arg_type == ARG_DONTCARE) 1331 return 0; 1332 1333 err = check_reg_arg(env, regno, SRC_OP); 1334 if (err) 1335 return err; 1336 1337 if (arg_type == ARG_ANYTHING) { 1338 if (is_pointer_value(env, regno)) { 1339 verbose("R%d leaks addr into helper function\n", regno); 1340 return -EACCES; 1341 } 1342 return 0; 1343 } 1344 1345 if (type == PTR_TO_PACKET && 1346 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 1347 verbose("helper access to the packet is not allowed\n"); 1348 return -EACCES; 1349 } 1350 1351 if (arg_type == ARG_PTR_TO_MAP_KEY || 1352 arg_type == ARG_PTR_TO_MAP_VALUE) { 1353 expected_type = PTR_TO_STACK; 1354 if (type != PTR_TO_PACKET && type != expected_type) 1355 goto err_type; 1356 } else if (arg_type == ARG_CONST_SIZE || 1357 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1358 expected_type = SCALAR_VALUE; 1359 if (type != expected_type) 1360 goto err_type; 1361 } else if (arg_type == ARG_CONST_MAP_PTR) { 1362 expected_type = CONST_PTR_TO_MAP; 1363 if (type != expected_type) 1364 goto err_type; 1365 } else if (arg_type == ARG_PTR_TO_CTX) { 1366 expected_type = PTR_TO_CTX; 1367 if (type != expected_type) 1368 goto err_type; 1369 } else if (arg_type == ARG_PTR_TO_MEM || 1370 arg_type == ARG_PTR_TO_UNINIT_MEM) { 1371 expected_type = PTR_TO_STACK; 1372 /* One exception here. In case function allows for NULL to be 1373 * passed in as argument, it's a SCALAR_VALUE type. Final test 1374 * happens during stack boundary checking. 1375 */ 1376 if (register_is_null(*reg)) 1377 /* final test in check_stack_boundary() */; 1378 else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE && 1379 type != expected_type) 1380 goto err_type; 1381 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 1382 } else { 1383 verbose("unsupported arg_type %d\n", arg_type); 1384 return -EFAULT; 1385 } 1386 1387 if (arg_type == ARG_CONST_MAP_PTR) { 1388 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 1389 meta->map_ptr = reg->map_ptr; 1390 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 1391 /* bpf_map_xxx(..., map_ptr, ..., key) call: 1392 * check that [key, key + map->key_size) are within 1393 * stack limits and initialized 1394 */ 1395 if (!meta->map_ptr) { 1396 /* in function declaration map_ptr must come before 1397 * map_key, so that it's verified and known before 1398 * we have to check map_key here. Otherwise it means 1399 * that kernel subsystem misconfigured verifier 1400 */ 1401 verbose("invalid map_ptr to access map->key\n"); 1402 return -EACCES; 1403 } 1404 if (type == PTR_TO_PACKET) 1405 err = check_packet_access(env, regno, reg->off, 1406 meta->map_ptr->key_size); 1407 else 1408 err = check_stack_boundary(env, regno, 1409 meta->map_ptr->key_size, 1410 false, NULL); 1411 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1412 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1413 * check [value, value + map->value_size) validity 1414 */ 1415 if (!meta->map_ptr) { 1416 /* kernel subsystem misconfigured verifier */ 1417 verbose("invalid map_ptr to access map->value\n"); 1418 return -EACCES; 1419 } 1420 if (type == PTR_TO_PACKET) 1421 err = check_packet_access(env, regno, reg->off, 1422 meta->map_ptr->value_size); 1423 else 1424 err = check_stack_boundary(env, regno, 1425 meta->map_ptr->value_size, 1426 false, NULL); 1427 } else if (arg_type == ARG_CONST_SIZE || 1428 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1429 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 1430 1431 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1432 * from stack pointer 'buf'. Check it 1433 * note: regno == len, regno - 1 == buf 1434 */ 1435 if (regno == 0) { 1436 /* kernel subsystem misconfigured verifier */ 1437 verbose("ARG_CONST_SIZE cannot be first argument\n"); 1438 return -EACCES; 1439 } 1440 1441 /* The register is SCALAR_VALUE; the access check 1442 * happens using its boundaries. 1443 */ 1444 1445 if (!tnum_is_const(reg->var_off)) 1446 /* For unprivileged variable accesses, disable raw 1447 * mode so that the program is required to 1448 * initialize all the memory that the helper could 1449 * just partially fill up. 1450 */ 1451 meta = NULL; 1452 1453 if (reg->smin_value < 0) { 1454 verbose("R%d min value is negative, either use unsigned or 'var &= const'\n", 1455 regno); 1456 return -EACCES; 1457 } 1458 1459 if (reg->umin_value == 0) { 1460 err = check_helper_mem_access(env, regno - 1, 0, 1461 zero_size_allowed, 1462 meta); 1463 if (err) 1464 return err; 1465 } 1466 1467 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 1468 verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 1469 regno); 1470 return -EACCES; 1471 } 1472 err = check_helper_mem_access(env, regno - 1, 1473 reg->umax_value, 1474 zero_size_allowed, meta); 1475 } 1476 1477 return err; 1478 err_type: 1479 verbose("R%d type=%s expected=%s\n", regno, 1480 reg_type_str[type], reg_type_str[expected_type]); 1481 return -EACCES; 1482 } 1483 1484 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 1485 { 1486 if (!map) 1487 return 0; 1488 1489 /* We need a two way check, first is from map perspective ... */ 1490 switch (map->map_type) { 1491 case BPF_MAP_TYPE_PROG_ARRAY: 1492 if (func_id != BPF_FUNC_tail_call) 1493 goto error; 1494 break; 1495 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1496 if (func_id != BPF_FUNC_perf_event_read && 1497 func_id != BPF_FUNC_perf_event_output) 1498 goto error; 1499 break; 1500 case BPF_MAP_TYPE_STACK_TRACE: 1501 if (func_id != BPF_FUNC_get_stackid) 1502 goto error; 1503 break; 1504 case BPF_MAP_TYPE_CGROUP_ARRAY: 1505 if (func_id != BPF_FUNC_skb_under_cgroup && 1506 func_id != BPF_FUNC_current_task_under_cgroup) 1507 goto error; 1508 break; 1509 /* devmap returns a pointer to a live net_device ifindex that we cannot 1510 * allow to be modified from bpf side. So do not allow lookup elements 1511 * for now. 1512 */ 1513 case BPF_MAP_TYPE_DEVMAP: 1514 if (func_id != BPF_FUNC_redirect_map) 1515 goto error; 1516 break; 1517 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1518 case BPF_MAP_TYPE_HASH_OF_MAPS: 1519 if (func_id != BPF_FUNC_map_lookup_elem) 1520 goto error; 1521 break; 1522 case BPF_MAP_TYPE_SOCKMAP: 1523 if (func_id != BPF_FUNC_sk_redirect_map && 1524 func_id != BPF_FUNC_sock_map_update && 1525 func_id != BPF_FUNC_map_delete_elem) 1526 goto error; 1527 break; 1528 default: 1529 break; 1530 } 1531 1532 /* ... and second from the function itself. */ 1533 switch (func_id) { 1534 case BPF_FUNC_tail_call: 1535 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1536 goto error; 1537 break; 1538 case BPF_FUNC_perf_event_read: 1539 case BPF_FUNC_perf_event_output: 1540 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1541 goto error; 1542 break; 1543 case BPF_FUNC_get_stackid: 1544 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1545 goto error; 1546 break; 1547 case BPF_FUNC_current_task_under_cgroup: 1548 case BPF_FUNC_skb_under_cgroup: 1549 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1550 goto error; 1551 break; 1552 case BPF_FUNC_redirect_map: 1553 if (map->map_type != BPF_MAP_TYPE_DEVMAP) 1554 goto error; 1555 break; 1556 case BPF_FUNC_sk_redirect_map: 1557 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 1558 goto error; 1559 break; 1560 case BPF_FUNC_sock_map_update: 1561 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 1562 goto error; 1563 break; 1564 default: 1565 break; 1566 } 1567 1568 return 0; 1569 error: 1570 verbose("cannot pass map_type %d into func %s#%d\n", 1571 map->map_type, func_id_name(func_id), func_id); 1572 return -EINVAL; 1573 } 1574 1575 static int check_raw_mode(const struct bpf_func_proto *fn) 1576 { 1577 int count = 0; 1578 1579 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 1580 count++; 1581 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 1582 count++; 1583 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 1584 count++; 1585 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 1586 count++; 1587 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 1588 count++; 1589 1590 return count > 1 ? -EINVAL : 0; 1591 } 1592 1593 /* Packet data might have moved, any old PTR_TO_PACKET[_END] are now invalid, 1594 * so turn them into unknown SCALAR_VALUE. 1595 */ 1596 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 1597 { 1598 struct bpf_verifier_state *state = &env->cur_state; 1599 struct bpf_reg_state *regs = state->regs, *reg; 1600 int i; 1601 1602 for (i = 0; i < MAX_BPF_REG; i++) 1603 if (regs[i].type == PTR_TO_PACKET || 1604 regs[i].type == PTR_TO_PACKET_END) 1605 mark_reg_unknown(regs, i); 1606 1607 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1608 if (state->stack_slot_type[i] != STACK_SPILL) 1609 continue; 1610 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1611 if (reg->type != PTR_TO_PACKET && 1612 reg->type != PTR_TO_PACKET_END) 1613 continue; 1614 __mark_reg_unknown(reg); 1615 } 1616 } 1617 1618 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 1619 { 1620 struct bpf_verifier_state *state = &env->cur_state; 1621 const struct bpf_func_proto *fn = NULL; 1622 struct bpf_reg_state *regs = state->regs; 1623 struct bpf_call_arg_meta meta; 1624 bool changes_data; 1625 int i, err; 1626 1627 /* find function prototype */ 1628 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1629 verbose("invalid func %s#%d\n", func_id_name(func_id), func_id); 1630 return -EINVAL; 1631 } 1632 1633 if (env->prog->aux->ops->get_func_proto) 1634 fn = env->prog->aux->ops->get_func_proto(func_id); 1635 1636 if (!fn) { 1637 verbose("unknown func %s#%d\n", func_id_name(func_id), func_id); 1638 return -EINVAL; 1639 } 1640 1641 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1642 if (!env->prog->gpl_compatible && fn->gpl_only) { 1643 verbose("cannot call GPL only function from proprietary program\n"); 1644 return -EINVAL; 1645 } 1646 1647 changes_data = bpf_helper_changes_pkt_data(fn->func); 1648 1649 memset(&meta, 0, sizeof(meta)); 1650 meta.pkt_access = fn->pkt_access; 1651 1652 /* We only support one arg being in raw mode at the moment, which 1653 * is sufficient for the helper functions we have right now. 1654 */ 1655 err = check_raw_mode(fn); 1656 if (err) { 1657 verbose("kernel subsystem misconfigured func %s#%d\n", 1658 func_id_name(func_id), func_id); 1659 return err; 1660 } 1661 1662 /* check args */ 1663 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1664 if (err) 1665 return err; 1666 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1667 if (err) 1668 return err; 1669 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1670 if (err) 1671 return err; 1672 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1673 if (err) 1674 return err; 1675 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1676 if (err) 1677 return err; 1678 1679 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1680 * is inferred from register state. 1681 */ 1682 for (i = 0; i < meta.access_size; i++) { 1683 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); 1684 if (err) 1685 return err; 1686 } 1687 1688 /* reset caller saved regs */ 1689 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1690 mark_reg_not_init(regs, caller_saved[i]); 1691 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 1692 } 1693 1694 /* update return register (already marked as written above) */ 1695 if (fn->ret_type == RET_INTEGER) { 1696 /* sets type to SCALAR_VALUE */ 1697 mark_reg_unknown(regs, BPF_REG_0); 1698 } else if (fn->ret_type == RET_VOID) { 1699 regs[BPF_REG_0].type = NOT_INIT; 1700 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1701 struct bpf_insn_aux_data *insn_aux; 1702 1703 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1704 /* There is no offset yet applied, variable or fixed */ 1705 mark_reg_known_zero(regs, BPF_REG_0); 1706 regs[BPF_REG_0].off = 0; 1707 /* remember map_ptr, so that check_map_access() 1708 * can check 'value_size' boundary of memory access 1709 * to map element returned from bpf_map_lookup_elem() 1710 */ 1711 if (meta.map_ptr == NULL) { 1712 verbose("kernel subsystem misconfigured verifier\n"); 1713 return -EINVAL; 1714 } 1715 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1716 regs[BPF_REG_0].id = ++env->id_gen; 1717 insn_aux = &env->insn_aux_data[insn_idx]; 1718 if (!insn_aux->map_ptr) 1719 insn_aux->map_ptr = meta.map_ptr; 1720 else if (insn_aux->map_ptr != meta.map_ptr) 1721 insn_aux->map_ptr = BPF_MAP_PTR_POISON; 1722 } else { 1723 verbose("unknown return type %d of func %s#%d\n", 1724 fn->ret_type, func_id_name(func_id), func_id); 1725 return -EINVAL; 1726 } 1727 1728 err = check_map_func_compatibility(meta.map_ptr, func_id); 1729 if (err) 1730 return err; 1731 1732 if (changes_data) 1733 clear_all_pkt_pointers(env); 1734 return 0; 1735 } 1736 1737 static void coerce_reg_to_32(struct bpf_reg_state *reg) 1738 { 1739 /* clear high 32 bits */ 1740 reg->var_off = tnum_cast(reg->var_off, 4); 1741 /* Update bounds */ 1742 __update_reg_bounds(reg); 1743 } 1744 1745 static bool signed_add_overflows(s64 a, s64 b) 1746 { 1747 /* Do the add in u64, where overflow is well-defined */ 1748 s64 res = (s64)((u64)a + (u64)b); 1749 1750 if (b < 0) 1751 return res > a; 1752 return res < a; 1753 } 1754 1755 static bool signed_sub_overflows(s64 a, s64 b) 1756 { 1757 /* Do the sub in u64, where overflow is well-defined */ 1758 s64 res = (s64)((u64)a - (u64)b); 1759 1760 if (b < 0) 1761 return res < a; 1762 return res > a; 1763 } 1764 1765 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 1766 * Caller should also handle BPF_MOV case separately. 1767 * If we return -EACCES, caller may want to try again treating pointer as a 1768 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 1769 */ 1770 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 1771 struct bpf_insn *insn, 1772 const struct bpf_reg_state *ptr_reg, 1773 const struct bpf_reg_state *off_reg) 1774 { 1775 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1776 bool known = tnum_is_const(off_reg->var_off); 1777 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 1778 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 1779 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 1780 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 1781 u8 opcode = BPF_OP(insn->code); 1782 u32 dst = insn->dst_reg; 1783 1784 dst_reg = ®s[dst]; 1785 1786 if (WARN_ON_ONCE(known && (smin_val != smax_val))) { 1787 print_verifier_state(&env->cur_state); 1788 verbose("verifier internal error: known but bad sbounds\n"); 1789 return -EINVAL; 1790 } 1791 if (WARN_ON_ONCE(known && (umin_val != umax_val))) { 1792 print_verifier_state(&env->cur_state); 1793 verbose("verifier internal error: known but bad ubounds\n"); 1794 return -EINVAL; 1795 } 1796 1797 if (BPF_CLASS(insn->code) != BPF_ALU64) { 1798 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 1799 if (!env->allow_ptr_leaks) 1800 verbose("R%d 32-bit pointer arithmetic prohibited\n", 1801 dst); 1802 return -EACCES; 1803 } 1804 1805 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1806 if (!env->allow_ptr_leaks) 1807 verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", 1808 dst); 1809 return -EACCES; 1810 } 1811 if (ptr_reg->type == CONST_PTR_TO_MAP) { 1812 if (!env->allow_ptr_leaks) 1813 verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", 1814 dst); 1815 return -EACCES; 1816 } 1817 if (ptr_reg->type == PTR_TO_PACKET_END) { 1818 if (!env->allow_ptr_leaks) 1819 verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", 1820 dst); 1821 return -EACCES; 1822 } 1823 1824 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 1825 * The id may be overwritten later if we create a new variable offset. 1826 */ 1827 dst_reg->type = ptr_reg->type; 1828 dst_reg->id = ptr_reg->id; 1829 1830 switch (opcode) { 1831 case BPF_ADD: 1832 /* We can take a fixed offset as long as it doesn't overflow 1833 * the s32 'off' field 1834 */ 1835 if (known && (ptr_reg->off + smin_val == 1836 (s64)(s32)(ptr_reg->off + smin_val))) { 1837 /* pointer += K. Accumulate it into fixed offset */ 1838 dst_reg->smin_value = smin_ptr; 1839 dst_reg->smax_value = smax_ptr; 1840 dst_reg->umin_value = umin_ptr; 1841 dst_reg->umax_value = umax_ptr; 1842 dst_reg->var_off = ptr_reg->var_off; 1843 dst_reg->off = ptr_reg->off + smin_val; 1844 dst_reg->range = ptr_reg->range; 1845 break; 1846 } 1847 /* A new variable offset is created. Note that off_reg->off 1848 * == 0, since it's a scalar. 1849 * dst_reg gets the pointer type and since some positive 1850 * integer value was added to the pointer, give it a new 'id' 1851 * if it's a PTR_TO_PACKET. 1852 * this creates a new 'base' pointer, off_reg (variable) gets 1853 * added into the variable offset, and we copy the fixed offset 1854 * from ptr_reg. 1855 */ 1856 if (signed_add_overflows(smin_ptr, smin_val) || 1857 signed_add_overflows(smax_ptr, smax_val)) { 1858 dst_reg->smin_value = S64_MIN; 1859 dst_reg->smax_value = S64_MAX; 1860 } else { 1861 dst_reg->smin_value = smin_ptr + smin_val; 1862 dst_reg->smax_value = smax_ptr + smax_val; 1863 } 1864 if (umin_ptr + umin_val < umin_ptr || 1865 umax_ptr + umax_val < umax_ptr) { 1866 dst_reg->umin_value = 0; 1867 dst_reg->umax_value = U64_MAX; 1868 } else { 1869 dst_reg->umin_value = umin_ptr + umin_val; 1870 dst_reg->umax_value = umax_ptr + umax_val; 1871 } 1872 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 1873 dst_reg->off = ptr_reg->off; 1874 if (ptr_reg->type == PTR_TO_PACKET) { 1875 dst_reg->id = ++env->id_gen; 1876 /* something was added to pkt_ptr, set range to zero */ 1877 dst_reg->range = 0; 1878 } 1879 break; 1880 case BPF_SUB: 1881 if (dst_reg == off_reg) { 1882 /* scalar -= pointer. Creates an unknown scalar */ 1883 if (!env->allow_ptr_leaks) 1884 verbose("R%d tried to subtract pointer from scalar\n", 1885 dst); 1886 return -EACCES; 1887 } 1888 /* We don't allow subtraction from FP, because (according to 1889 * test_verifier.c test "invalid fp arithmetic", JITs might not 1890 * be able to deal with it. 1891 */ 1892 if (ptr_reg->type == PTR_TO_STACK) { 1893 if (!env->allow_ptr_leaks) 1894 verbose("R%d subtraction from stack pointer prohibited\n", 1895 dst); 1896 return -EACCES; 1897 } 1898 if (known && (ptr_reg->off - smin_val == 1899 (s64)(s32)(ptr_reg->off - smin_val))) { 1900 /* pointer -= K. Subtract it from fixed offset */ 1901 dst_reg->smin_value = smin_ptr; 1902 dst_reg->smax_value = smax_ptr; 1903 dst_reg->umin_value = umin_ptr; 1904 dst_reg->umax_value = umax_ptr; 1905 dst_reg->var_off = ptr_reg->var_off; 1906 dst_reg->id = ptr_reg->id; 1907 dst_reg->off = ptr_reg->off - smin_val; 1908 dst_reg->range = ptr_reg->range; 1909 break; 1910 } 1911 /* A new variable offset is created. If the subtrahend is known 1912 * nonnegative, then any reg->range we had before is still good. 1913 */ 1914 if (signed_sub_overflows(smin_ptr, smax_val) || 1915 signed_sub_overflows(smax_ptr, smin_val)) { 1916 /* Overflow possible, we know nothing */ 1917 dst_reg->smin_value = S64_MIN; 1918 dst_reg->smax_value = S64_MAX; 1919 } else { 1920 dst_reg->smin_value = smin_ptr - smax_val; 1921 dst_reg->smax_value = smax_ptr - smin_val; 1922 } 1923 if (umin_ptr < umax_val) { 1924 /* Overflow possible, we know nothing */ 1925 dst_reg->umin_value = 0; 1926 dst_reg->umax_value = U64_MAX; 1927 } else { 1928 /* Cannot overflow (as long as bounds are consistent) */ 1929 dst_reg->umin_value = umin_ptr - umax_val; 1930 dst_reg->umax_value = umax_ptr - umin_val; 1931 } 1932 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 1933 dst_reg->off = ptr_reg->off; 1934 if (ptr_reg->type == PTR_TO_PACKET) { 1935 dst_reg->id = ++env->id_gen; 1936 /* something was added to pkt_ptr, set range to zero */ 1937 if (smin_val < 0) 1938 dst_reg->range = 0; 1939 } 1940 break; 1941 case BPF_AND: 1942 case BPF_OR: 1943 case BPF_XOR: 1944 /* bitwise ops on pointers are troublesome, prohibit for now. 1945 * (However, in principle we could allow some cases, e.g. 1946 * ptr &= ~3 which would reduce min_value by 3.) 1947 */ 1948 if (!env->allow_ptr_leaks) 1949 verbose("R%d bitwise operator %s on pointer prohibited\n", 1950 dst, bpf_alu_string[opcode >> 4]); 1951 return -EACCES; 1952 default: 1953 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 1954 if (!env->allow_ptr_leaks) 1955 verbose("R%d pointer arithmetic with %s operator prohibited\n", 1956 dst, bpf_alu_string[opcode >> 4]); 1957 return -EACCES; 1958 } 1959 1960 __update_reg_bounds(dst_reg); 1961 __reg_deduce_bounds(dst_reg); 1962 __reg_bound_offset(dst_reg); 1963 return 0; 1964 } 1965 1966 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 1967 struct bpf_insn *insn, 1968 struct bpf_reg_state *dst_reg, 1969 struct bpf_reg_state src_reg) 1970 { 1971 struct bpf_reg_state *regs = env->cur_state.regs; 1972 u8 opcode = BPF_OP(insn->code); 1973 bool src_known, dst_known; 1974 s64 smin_val, smax_val; 1975 u64 umin_val, umax_val; 1976 1977 if (BPF_CLASS(insn->code) != BPF_ALU64) { 1978 /* 32-bit ALU ops are (32,32)->64 */ 1979 coerce_reg_to_32(dst_reg); 1980 coerce_reg_to_32(&src_reg); 1981 } 1982 smin_val = src_reg.smin_value; 1983 smax_val = src_reg.smax_value; 1984 umin_val = src_reg.umin_value; 1985 umax_val = src_reg.umax_value; 1986 src_known = tnum_is_const(src_reg.var_off); 1987 dst_known = tnum_is_const(dst_reg->var_off); 1988 1989 switch (opcode) { 1990 case BPF_ADD: 1991 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 1992 signed_add_overflows(dst_reg->smax_value, smax_val)) { 1993 dst_reg->smin_value = S64_MIN; 1994 dst_reg->smax_value = S64_MAX; 1995 } else { 1996 dst_reg->smin_value += smin_val; 1997 dst_reg->smax_value += smax_val; 1998 } 1999 if (dst_reg->umin_value + umin_val < umin_val || 2000 dst_reg->umax_value + umax_val < umax_val) { 2001 dst_reg->umin_value = 0; 2002 dst_reg->umax_value = U64_MAX; 2003 } else { 2004 dst_reg->umin_value += umin_val; 2005 dst_reg->umax_value += umax_val; 2006 } 2007 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 2008 break; 2009 case BPF_SUB: 2010 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 2011 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 2012 /* Overflow possible, we know nothing */ 2013 dst_reg->smin_value = S64_MIN; 2014 dst_reg->smax_value = S64_MAX; 2015 } else { 2016 dst_reg->smin_value -= smax_val; 2017 dst_reg->smax_value -= smin_val; 2018 } 2019 if (dst_reg->umin_value < umax_val) { 2020 /* Overflow possible, we know nothing */ 2021 dst_reg->umin_value = 0; 2022 dst_reg->umax_value = U64_MAX; 2023 } else { 2024 /* Cannot overflow (as long as bounds are consistent) */ 2025 dst_reg->umin_value -= umax_val; 2026 dst_reg->umax_value -= umin_val; 2027 } 2028 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 2029 break; 2030 case BPF_MUL: 2031 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 2032 if (smin_val < 0 || dst_reg->smin_value < 0) { 2033 /* Ain't nobody got time to multiply that sign */ 2034 __mark_reg_unbounded(dst_reg); 2035 __update_reg_bounds(dst_reg); 2036 break; 2037 } 2038 /* Both values are positive, so we can work with unsigned and 2039 * copy the result to signed (unless it exceeds S64_MAX). 2040 */ 2041 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 2042 /* Potential overflow, we know nothing */ 2043 __mark_reg_unbounded(dst_reg); 2044 /* (except what we can learn from the var_off) */ 2045 __update_reg_bounds(dst_reg); 2046 break; 2047 } 2048 dst_reg->umin_value *= umin_val; 2049 dst_reg->umax_value *= umax_val; 2050 if (dst_reg->umax_value > S64_MAX) { 2051 /* Overflow possible, we know nothing */ 2052 dst_reg->smin_value = S64_MIN; 2053 dst_reg->smax_value = S64_MAX; 2054 } else { 2055 dst_reg->smin_value = dst_reg->umin_value; 2056 dst_reg->smax_value = dst_reg->umax_value; 2057 } 2058 break; 2059 case BPF_AND: 2060 if (src_known && dst_known) { 2061 __mark_reg_known(dst_reg, dst_reg->var_off.value & 2062 src_reg.var_off.value); 2063 break; 2064 } 2065 /* We get our minimum from the var_off, since that's inherently 2066 * bitwise. Our maximum is the minimum of the operands' maxima. 2067 */ 2068 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 2069 dst_reg->umin_value = dst_reg->var_off.value; 2070 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 2071 if (dst_reg->smin_value < 0 || smin_val < 0) { 2072 /* Lose signed bounds when ANDing negative numbers, 2073 * ain't nobody got time for that. 2074 */ 2075 dst_reg->smin_value = S64_MIN; 2076 dst_reg->smax_value = S64_MAX; 2077 } else { 2078 /* ANDing two positives gives a positive, so safe to 2079 * cast result into s64. 2080 */ 2081 dst_reg->smin_value = dst_reg->umin_value; 2082 dst_reg->smax_value = dst_reg->umax_value; 2083 } 2084 /* We may learn something more from the var_off */ 2085 __update_reg_bounds(dst_reg); 2086 break; 2087 case BPF_OR: 2088 if (src_known && dst_known) { 2089 __mark_reg_known(dst_reg, dst_reg->var_off.value | 2090 src_reg.var_off.value); 2091 break; 2092 } 2093 /* We get our maximum from the var_off, and our minimum is the 2094 * maximum of the operands' minima 2095 */ 2096 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 2097 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 2098 dst_reg->umax_value = dst_reg->var_off.value | 2099 dst_reg->var_off.mask; 2100 if (dst_reg->smin_value < 0 || smin_val < 0) { 2101 /* Lose signed bounds when ORing negative numbers, 2102 * ain't nobody got time for that. 2103 */ 2104 dst_reg->smin_value = S64_MIN; 2105 dst_reg->smax_value = S64_MAX; 2106 } else { 2107 /* ORing two positives gives a positive, so safe to 2108 * cast result into s64. 2109 */ 2110 dst_reg->smin_value = dst_reg->umin_value; 2111 dst_reg->smax_value = dst_reg->umax_value; 2112 } 2113 /* We may learn something more from the var_off */ 2114 __update_reg_bounds(dst_reg); 2115 break; 2116 case BPF_LSH: 2117 if (umax_val > 63) { 2118 /* Shifts greater than 63 are undefined. This includes 2119 * shifts by a negative number. 2120 */ 2121 mark_reg_unknown(regs, insn->dst_reg); 2122 break; 2123 } 2124 /* We lose all sign bit information (except what we can pick 2125 * up from var_off) 2126 */ 2127 dst_reg->smin_value = S64_MIN; 2128 dst_reg->smax_value = S64_MAX; 2129 /* If we might shift our top bit out, then we know nothing */ 2130 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 2131 dst_reg->umin_value = 0; 2132 dst_reg->umax_value = U64_MAX; 2133 } else { 2134 dst_reg->umin_value <<= umin_val; 2135 dst_reg->umax_value <<= umax_val; 2136 } 2137 if (src_known) 2138 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 2139 else 2140 dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); 2141 /* We may learn something more from the var_off */ 2142 __update_reg_bounds(dst_reg); 2143 break; 2144 case BPF_RSH: 2145 if (umax_val > 63) { 2146 /* Shifts greater than 63 are undefined. This includes 2147 * shifts by a negative number. 2148 */ 2149 mark_reg_unknown(regs, insn->dst_reg); 2150 break; 2151 } 2152 /* BPF_RSH is an unsigned shift, so make the appropriate casts */ 2153 if (dst_reg->smin_value < 0) { 2154 if (umin_val) { 2155 /* Sign bit will be cleared */ 2156 dst_reg->smin_value = 0; 2157 } else { 2158 /* Lost sign bit information */ 2159 dst_reg->smin_value = S64_MIN; 2160 dst_reg->smax_value = S64_MAX; 2161 } 2162 } else { 2163 dst_reg->smin_value = 2164 (u64)(dst_reg->smin_value) >> umax_val; 2165 } 2166 if (src_known) 2167 dst_reg->var_off = tnum_rshift(dst_reg->var_off, 2168 umin_val); 2169 else 2170 dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); 2171 dst_reg->umin_value >>= umax_val; 2172 dst_reg->umax_value >>= umin_val; 2173 /* We may learn something more from the var_off */ 2174 __update_reg_bounds(dst_reg); 2175 break; 2176 default: 2177 mark_reg_unknown(regs, insn->dst_reg); 2178 break; 2179 } 2180 2181 __reg_deduce_bounds(dst_reg); 2182 __reg_bound_offset(dst_reg); 2183 return 0; 2184 } 2185 2186 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 2187 * and var_off. 2188 */ 2189 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 2190 struct bpf_insn *insn) 2191 { 2192 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg, *src_reg; 2193 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 2194 u8 opcode = BPF_OP(insn->code); 2195 int rc; 2196 2197 dst_reg = ®s[insn->dst_reg]; 2198 src_reg = NULL; 2199 if (dst_reg->type != SCALAR_VALUE) 2200 ptr_reg = dst_reg; 2201 if (BPF_SRC(insn->code) == BPF_X) { 2202 src_reg = ®s[insn->src_reg]; 2203 if (src_reg->type != SCALAR_VALUE) { 2204 if (dst_reg->type != SCALAR_VALUE) { 2205 /* Combining two pointers by any ALU op yields 2206 * an arbitrary scalar. 2207 */ 2208 if (!env->allow_ptr_leaks) { 2209 verbose("R%d pointer %s pointer prohibited\n", 2210 insn->dst_reg, 2211 bpf_alu_string[opcode >> 4]); 2212 return -EACCES; 2213 } 2214 mark_reg_unknown(regs, insn->dst_reg); 2215 return 0; 2216 } else { 2217 /* scalar += pointer 2218 * This is legal, but we have to reverse our 2219 * src/dest handling in computing the range 2220 */ 2221 rc = adjust_ptr_min_max_vals(env, insn, 2222 src_reg, dst_reg); 2223 if (rc == -EACCES && env->allow_ptr_leaks) { 2224 /* scalar += unknown scalar */ 2225 __mark_reg_unknown(&off_reg); 2226 return adjust_scalar_min_max_vals( 2227 env, insn, 2228 dst_reg, off_reg); 2229 } 2230 return rc; 2231 } 2232 } else if (ptr_reg) { 2233 /* pointer += scalar */ 2234 rc = adjust_ptr_min_max_vals(env, insn, 2235 dst_reg, src_reg); 2236 if (rc == -EACCES && env->allow_ptr_leaks) { 2237 /* unknown scalar += scalar */ 2238 __mark_reg_unknown(dst_reg); 2239 return adjust_scalar_min_max_vals( 2240 env, insn, dst_reg, *src_reg); 2241 } 2242 return rc; 2243 } 2244 } else { 2245 /* Pretend the src is a reg with a known value, since we only 2246 * need to be able to read from this state. 2247 */ 2248 off_reg.type = SCALAR_VALUE; 2249 __mark_reg_known(&off_reg, insn->imm); 2250 src_reg = &off_reg; 2251 if (ptr_reg) { /* pointer += K */ 2252 rc = adjust_ptr_min_max_vals(env, insn, 2253 ptr_reg, src_reg); 2254 if (rc == -EACCES && env->allow_ptr_leaks) { 2255 /* unknown scalar += K */ 2256 __mark_reg_unknown(dst_reg); 2257 return adjust_scalar_min_max_vals( 2258 env, insn, dst_reg, off_reg); 2259 } 2260 return rc; 2261 } 2262 } 2263 2264 /* Got here implies adding two SCALAR_VALUEs */ 2265 if (WARN_ON_ONCE(ptr_reg)) { 2266 print_verifier_state(&env->cur_state); 2267 verbose("verifier internal error: unexpected ptr_reg\n"); 2268 return -EINVAL; 2269 } 2270 if (WARN_ON(!src_reg)) { 2271 print_verifier_state(&env->cur_state); 2272 verbose("verifier internal error: no src_reg\n"); 2273 return -EINVAL; 2274 } 2275 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 2276 } 2277 2278 /* check validity of 32-bit and 64-bit arithmetic operations */ 2279 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 2280 { 2281 struct bpf_reg_state *regs = env->cur_state.regs; 2282 u8 opcode = BPF_OP(insn->code); 2283 int err; 2284 2285 if (opcode == BPF_END || opcode == BPF_NEG) { 2286 if (opcode == BPF_NEG) { 2287 if (BPF_SRC(insn->code) != 0 || 2288 insn->src_reg != BPF_REG_0 || 2289 insn->off != 0 || insn->imm != 0) { 2290 verbose("BPF_NEG uses reserved fields\n"); 2291 return -EINVAL; 2292 } 2293 } else { 2294 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 2295 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 2296 BPF_CLASS(insn->code) == BPF_ALU64) { 2297 verbose("BPF_END uses reserved fields\n"); 2298 return -EINVAL; 2299 } 2300 } 2301 2302 /* check src operand */ 2303 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 2304 if (err) 2305 return err; 2306 2307 if (is_pointer_value(env, insn->dst_reg)) { 2308 verbose("R%d pointer arithmetic prohibited\n", 2309 insn->dst_reg); 2310 return -EACCES; 2311 } 2312 2313 /* check dest operand */ 2314 err = check_reg_arg(env, insn->dst_reg, DST_OP); 2315 if (err) 2316 return err; 2317 2318 } else if (opcode == BPF_MOV) { 2319 2320 if (BPF_SRC(insn->code) == BPF_X) { 2321 if (insn->imm != 0 || insn->off != 0) { 2322 verbose("BPF_MOV uses reserved fields\n"); 2323 return -EINVAL; 2324 } 2325 2326 /* check src operand */ 2327 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2328 if (err) 2329 return err; 2330 } else { 2331 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 2332 verbose("BPF_MOV uses reserved fields\n"); 2333 return -EINVAL; 2334 } 2335 } 2336 2337 /* check dest operand */ 2338 err = check_reg_arg(env, insn->dst_reg, DST_OP); 2339 if (err) 2340 return err; 2341 2342 if (BPF_SRC(insn->code) == BPF_X) { 2343 if (BPF_CLASS(insn->code) == BPF_ALU64) { 2344 /* case: R1 = R2 2345 * copy register state to dest reg 2346 */ 2347 regs[insn->dst_reg] = regs[insn->src_reg]; 2348 } else { 2349 /* R1 = (u32) R2 */ 2350 if (is_pointer_value(env, insn->src_reg)) { 2351 verbose("R%d partial copy of pointer\n", 2352 insn->src_reg); 2353 return -EACCES; 2354 } 2355 mark_reg_unknown(regs, insn->dst_reg); 2356 /* high 32 bits are known zero. */ 2357 regs[insn->dst_reg].var_off = tnum_cast( 2358 regs[insn->dst_reg].var_off, 4); 2359 __update_reg_bounds(®s[insn->dst_reg]); 2360 } 2361 } else { 2362 /* case: R = imm 2363 * remember the value we stored into this reg 2364 */ 2365 regs[insn->dst_reg].type = SCALAR_VALUE; 2366 __mark_reg_known(regs + insn->dst_reg, insn->imm); 2367 } 2368 2369 } else if (opcode > BPF_END) { 2370 verbose("invalid BPF_ALU opcode %x\n", opcode); 2371 return -EINVAL; 2372 2373 } else { /* all other ALU ops: and, sub, xor, add, ... */ 2374 2375 if (BPF_SRC(insn->code) == BPF_X) { 2376 if (insn->imm != 0 || insn->off != 0) { 2377 verbose("BPF_ALU uses reserved fields\n"); 2378 return -EINVAL; 2379 } 2380 /* check src1 operand */ 2381 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2382 if (err) 2383 return err; 2384 } else { 2385 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 2386 verbose("BPF_ALU uses reserved fields\n"); 2387 return -EINVAL; 2388 } 2389 } 2390 2391 /* check src2 operand */ 2392 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 2393 if (err) 2394 return err; 2395 2396 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 2397 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 2398 verbose("div by zero\n"); 2399 return -EINVAL; 2400 } 2401 2402 if ((opcode == BPF_LSH || opcode == BPF_RSH || 2403 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 2404 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 2405 2406 if (insn->imm < 0 || insn->imm >= size) { 2407 verbose("invalid shift %d\n", insn->imm); 2408 return -EINVAL; 2409 } 2410 } 2411 2412 /* check dest operand */ 2413 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 2414 if (err) 2415 return err; 2416 2417 return adjust_reg_min_max_vals(env, insn); 2418 } 2419 2420 return 0; 2421 } 2422 2423 static void find_good_pkt_pointers(struct bpf_verifier_state *state, 2424 struct bpf_reg_state *dst_reg) 2425 { 2426 struct bpf_reg_state *regs = state->regs, *reg; 2427 int i; 2428 2429 if (dst_reg->off < 0) 2430 /* This doesn't give us any range */ 2431 return; 2432 2433 if (dst_reg->umax_value > MAX_PACKET_OFF || 2434 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 2435 /* Risk of overflow. For instance, ptr + (1<<63) may be less 2436 * than pkt_end, but that's because it's also less than pkt. 2437 */ 2438 return; 2439 2440 /* LLVM can generate four kind of checks: 2441 * 2442 * Type 1/2: 2443 * 2444 * r2 = r3; 2445 * r2 += 8; 2446 * if (r2 > pkt_end) goto <handle exception> 2447 * <access okay> 2448 * 2449 * r2 = r3; 2450 * r2 += 8; 2451 * if (r2 < pkt_end) goto <access okay> 2452 * <handle exception> 2453 * 2454 * Where: 2455 * r2 == dst_reg, pkt_end == src_reg 2456 * r2=pkt(id=n,off=8,r=0) 2457 * r3=pkt(id=n,off=0,r=0) 2458 * 2459 * Type 3/4: 2460 * 2461 * r2 = r3; 2462 * r2 += 8; 2463 * if (pkt_end >= r2) goto <access okay> 2464 * <handle exception> 2465 * 2466 * r2 = r3; 2467 * r2 += 8; 2468 * if (pkt_end <= r2) goto <handle exception> 2469 * <access okay> 2470 * 2471 * Where: 2472 * pkt_end == dst_reg, r2 == src_reg 2473 * r2=pkt(id=n,off=8,r=0) 2474 * r3=pkt(id=n,off=0,r=0) 2475 * 2476 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 2477 * so that range of bytes [r3, r3 + 8) is safe to access. 2478 */ 2479 2480 /* If our ids match, then we must have the same max_value. And we 2481 * don't care about the other reg's fixed offset, since if it's too big 2482 * the range won't allow anything. 2483 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 2484 */ 2485 for (i = 0; i < MAX_BPF_REG; i++) 2486 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 2487 /* keep the maximum range already checked */ 2488 regs[i].range = max_t(u16, regs[i].range, dst_reg->off); 2489 2490 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2491 if (state->stack_slot_type[i] != STACK_SPILL) 2492 continue; 2493 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2494 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2495 reg->range = max_t(u16, reg->range, dst_reg->off); 2496 } 2497 } 2498 2499 /* Adjusts the register min/max values in the case that the dst_reg is the 2500 * variable register that we are working on, and src_reg is a constant or we're 2501 * simply doing a BPF_K check. 2502 * In JEQ/JNE cases we also adjust the var_off values. 2503 */ 2504 static void reg_set_min_max(struct bpf_reg_state *true_reg, 2505 struct bpf_reg_state *false_reg, u64 val, 2506 u8 opcode) 2507 { 2508 /* If the dst_reg is a pointer, we can't learn anything about its 2509 * variable offset from the compare (unless src_reg were a pointer into 2510 * the same object, but we don't bother with that. 2511 * Since false_reg and true_reg have the same type by construction, we 2512 * only need to check one of them for pointerness. 2513 */ 2514 if (__is_pointer_value(false, false_reg)) 2515 return; 2516 2517 switch (opcode) { 2518 case BPF_JEQ: 2519 /* If this is false then we know nothing Jon Snow, but if it is 2520 * true then we know for sure. 2521 */ 2522 __mark_reg_known(true_reg, val); 2523 break; 2524 case BPF_JNE: 2525 /* If this is true we know nothing Jon Snow, but if it is false 2526 * we know the value for sure; 2527 */ 2528 __mark_reg_known(false_reg, val); 2529 break; 2530 case BPF_JGT: 2531 false_reg->umax_value = min(false_reg->umax_value, val); 2532 true_reg->umin_value = max(true_reg->umin_value, val + 1); 2533 break; 2534 case BPF_JSGT: 2535 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 2536 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 2537 break; 2538 case BPF_JLT: 2539 false_reg->umin_value = max(false_reg->umin_value, val); 2540 true_reg->umax_value = min(true_reg->umax_value, val - 1); 2541 break; 2542 case BPF_JSLT: 2543 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 2544 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 2545 break; 2546 case BPF_JGE: 2547 false_reg->umax_value = min(false_reg->umax_value, val - 1); 2548 true_reg->umin_value = max(true_reg->umin_value, val); 2549 break; 2550 case BPF_JSGE: 2551 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 2552 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 2553 break; 2554 case BPF_JLE: 2555 false_reg->umin_value = max(false_reg->umin_value, val + 1); 2556 true_reg->umax_value = min(true_reg->umax_value, val); 2557 break; 2558 case BPF_JSLE: 2559 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 2560 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 2561 break; 2562 default: 2563 break; 2564 } 2565 2566 __reg_deduce_bounds(false_reg); 2567 __reg_deduce_bounds(true_reg); 2568 /* We might have learned some bits from the bounds. */ 2569 __reg_bound_offset(false_reg); 2570 __reg_bound_offset(true_reg); 2571 /* Intersecting with the old var_off might have improved our bounds 2572 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2573 * then new var_off is (0; 0x7f...fc) which improves our umax. 2574 */ 2575 __update_reg_bounds(false_reg); 2576 __update_reg_bounds(true_reg); 2577 } 2578 2579 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 2580 * the variable reg. 2581 */ 2582 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 2583 struct bpf_reg_state *false_reg, u64 val, 2584 u8 opcode) 2585 { 2586 if (__is_pointer_value(false, false_reg)) 2587 return; 2588 2589 switch (opcode) { 2590 case BPF_JEQ: 2591 /* If this is false then we know nothing Jon Snow, but if it is 2592 * true then we know for sure. 2593 */ 2594 __mark_reg_known(true_reg, val); 2595 break; 2596 case BPF_JNE: 2597 /* If this is true we know nothing Jon Snow, but if it is false 2598 * we know the value for sure; 2599 */ 2600 __mark_reg_known(false_reg, val); 2601 break; 2602 case BPF_JGT: 2603 true_reg->umax_value = min(true_reg->umax_value, val - 1); 2604 false_reg->umin_value = max(false_reg->umin_value, val); 2605 break; 2606 case BPF_JSGT: 2607 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 2608 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 2609 break; 2610 case BPF_JLT: 2611 true_reg->umin_value = max(true_reg->umin_value, val + 1); 2612 false_reg->umax_value = min(false_reg->umax_value, val); 2613 break; 2614 case BPF_JSLT: 2615 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 2616 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 2617 break; 2618 case BPF_JGE: 2619 true_reg->umax_value = min(true_reg->umax_value, val); 2620 false_reg->umin_value = max(false_reg->umin_value, val + 1); 2621 break; 2622 case BPF_JSGE: 2623 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 2624 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 2625 break; 2626 case BPF_JLE: 2627 true_reg->umin_value = max(true_reg->umin_value, val); 2628 false_reg->umax_value = min(false_reg->umax_value, val - 1); 2629 break; 2630 case BPF_JSLE: 2631 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 2632 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 2633 break; 2634 default: 2635 break; 2636 } 2637 2638 __reg_deduce_bounds(false_reg); 2639 __reg_deduce_bounds(true_reg); 2640 /* We might have learned some bits from the bounds. */ 2641 __reg_bound_offset(false_reg); 2642 __reg_bound_offset(true_reg); 2643 /* Intersecting with the old var_off might have improved our bounds 2644 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2645 * then new var_off is (0; 0x7f...fc) which improves our umax. 2646 */ 2647 __update_reg_bounds(false_reg); 2648 __update_reg_bounds(true_reg); 2649 } 2650 2651 /* Regs are known to be equal, so intersect their min/max/var_off */ 2652 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 2653 struct bpf_reg_state *dst_reg) 2654 { 2655 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 2656 dst_reg->umin_value); 2657 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 2658 dst_reg->umax_value); 2659 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 2660 dst_reg->smin_value); 2661 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 2662 dst_reg->smax_value); 2663 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 2664 dst_reg->var_off); 2665 /* We might have learned new bounds from the var_off. */ 2666 __update_reg_bounds(src_reg); 2667 __update_reg_bounds(dst_reg); 2668 /* We might have learned something about the sign bit. */ 2669 __reg_deduce_bounds(src_reg); 2670 __reg_deduce_bounds(dst_reg); 2671 /* We might have learned some bits from the bounds. */ 2672 __reg_bound_offset(src_reg); 2673 __reg_bound_offset(dst_reg); 2674 /* Intersecting with the old var_off might have improved our bounds 2675 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2676 * then new var_off is (0; 0x7f...fc) which improves our umax. 2677 */ 2678 __update_reg_bounds(src_reg); 2679 __update_reg_bounds(dst_reg); 2680 } 2681 2682 static void reg_combine_min_max(struct bpf_reg_state *true_src, 2683 struct bpf_reg_state *true_dst, 2684 struct bpf_reg_state *false_src, 2685 struct bpf_reg_state *false_dst, 2686 u8 opcode) 2687 { 2688 switch (opcode) { 2689 case BPF_JEQ: 2690 __reg_combine_min_max(true_src, true_dst); 2691 break; 2692 case BPF_JNE: 2693 __reg_combine_min_max(false_src, false_dst); 2694 break; 2695 } 2696 } 2697 2698 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, 2699 bool is_null) 2700 { 2701 struct bpf_reg_state *reg = ®s[regno]; 2702 2703 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { 2704 /* Old offset (both fixed and variable parts) should 2705 * have been known-zero, because we don't allow pointer 2706 * arithmetic on pointers that might be NULL. 2707 */ 2708 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 2709 !tnum_equals_const(reg->var_off, 0) || 2710 reg->off)) { 2711 __mark_reg_known_zero(reg); 2712 reg->off = 0; 2713 } 2714 if (is_null) { 2715 reg->type = SCALAR_VALUE; 2716 } else if (reg->map_ptr->inner_map_meta) { 2717 reg->type = CONST_PTR_TO_MAP; 2718 reg->map_ptr = reg->map_ptr->inner_map_meta; 2719 } else { 2720 reg->type = PTR_TO_MAP_VALUE; 2721 } 2722 /* We don't need id from this point onwards anymore, thus we 2723 * should better reset it, so that state pruning has chances 2724 * to take effect. 2725 */ 2726 reg->id = 0; 2727 } 2728 } 2729 2730 /* The logic is similar to find_good_pkt_pointers(), both could eventually 2731 * be folded together at some point. 2732 */ 2733 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, 2734 bool is_null) 2735 { 2736 struct bpf_reg_state *regs = state->regs; 2737 u32 id = regs[regno].id; 2738 int i; 2739 2740 for (i = 0; i < MAX_BPF_REG; i++) 2741 mark_map_reg(regs, i, id, is_null); 2742 2743 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2744 if (state->stack_slot_type[i] != STACK_SPILL) 2745 continue; 2746 mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, is_null); 2747 } 2748 } 2749 2750 static int check_cond_jmp_op(struct bpf_verifier_env *env, 2751 struct bpf_insn *insn, int *insn_idx) 2752 { 2753 struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state; 2754 struct bpf_reg_state *regs = this_branch->regs, *dst_reg; 2755 u8 opcode = BPF_OP(insn->code); 2756 int err; 2757 2758 if (opcode > BPF_JSLE) { 2759 verbose("invalid BPF_JMP opcode %x\n", opcode); 2760 return -EINVAL; 2761 } 2762 2763 if (BPF_SRC(insn->code) == BPF_X) { 2764 if (insn->imm != 0) { 2765 verbose("BPF_JMP uses reserved fields\n"); 2766 return -EINVAL; 2767 } 2768 2769 /* check src1 operand */ 2770 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2771 if (err) 2772 return err; 2773 2774 if (is_pointer_value(env, insn->src_reg)) { 2775 verbose("R%d pointer comparison prohibited\n", 2776 insn->src_reg); 2777 return -EACCES; 2778 } 2779 } else { 2780 if (insn->src_reg != BPF_REG_0) { 2781 verbose("BPF_JMP uses reserved fields\n"); 2782 return -EINVAL; 2783 } 2784 } 2785 2786 /* check src2 operand */ 2787 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 2788 if (err) 2789 return err; 2790 2791 dst_reg = ®s[insn->dst_reg]; 2792 2793 /* detect if R == 0 where R was initialized to zero earlier */ 2794 if (BPF_SRC(insn->code) == BPF_K && 2795 (opcode == BPF_JEQ || opcode == BPF_JNE) && 2796 dst_reg->type == SCALAR_VALUE && 2797 tnum_equals_const(dst_reg->var_off, insn->imm)) { 2798 if (opcode == BPF_JEQ) { 2799 /* if (imm == imm) goto pc+off; 2800 * only follow the goto, ignore fall-through 2801 */ 2802 *insn_idx += insn->off; 2803 return 0; 2804 } else { 2805 /* if (imm != imm) goto pc+off; 2806 * only follow fall-through branch, since 2807 * that's where the program will go 2808 */ 2809 return 0; 2810 } 2811 } 2812 2813 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 2814 if (!other_branch) 2815 return -EFAULT; 2816 2817 /* detect if we are comparing against a constant value so we can adjust 2818 * our min/max values for our dst register. 2819 * this is only legit if both are scalars (or pointers to the same 2820 * object, I suppose, but we don't support that right now), because 2821 * otherwise the different base pointers mean the offsets aren't 2822 * comparable. 2823 */ 2824 if (BPF_SRC(insn->code) == BPF_X) { 2825 if (dst_reg->type == SCALAR_VALUE && 2826 regs[insn->src_reg].type == SCALAR_VALUE) { 2827 if (tnum_is_const(regs[insn->src_reg].var_off)) 2828 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2829 dst_reg, regs[insn->src_reg].var_off.value, 2830 opcode); 2831 else if (tnum_is_const(dst_reg->var_off)) 2832 reg_set_min_max_inv(&other_branch->regs[insn->src_reg], 2833 ®s[insn->src_reg], 2834 dst_reg->var_off.value, opcode); 2835 else if (opcode == BPF_JEQ || opcode == BPF_JNE) 2836 /* Comparing for equality, we can combine knowledge */ 2837 reg_combine_min_max(&other_branch->regs[insn->src_reg], 2838 &other_branch->regs[insn->dst_reg], 2839 ®s[insn->src_reg], 2840 ®s[insn->dst_reg], opcode); 2841 } 2842 } else if (dst_reg->type == SCALAR_VALUE) { 2843 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2844 dst_reg, insn->imm, opcode); 2845 } 2846 2847 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ 2848 if (BPF_SRC(insn->code) == BPF_K && 2849 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 2850 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 2851 /* Mark all identical map registers in each branch as either 2852 * safe or unknown depending R == 0 or R != 0 conditional. 2853 */ 2854 mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); 2855 mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); 2856 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2857 dst_reg->type == PTR_TO_PACKET && 2858 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2859 find_good_pkt_pointers(this_branch, dst_reg); 2860 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && 2861 dst_reg->type == PTR_TO_PACKET && 2862 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2863 find_good_pkt_pointers(other_branch, dst_reg); 2864 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2865 dst_reg->type == PTR_TO_PACKET_END && 2866 regs[insn->src_reg].type == PTR_TO_PACKET) { 2867 find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); 2868 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && 2869 dst_reg->type == PTR_TO_PACKET_END && 2870 regs[insn->src_reg].type == PTR_TO_PACKET) { 2871 find_good_pkt_pointers(this_branch, ®s[insn->src_reg]); 2872 } else if (is_pointer_value(env, insn->dst_reg)) { 2873 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 2874 return -EACCES; 2875 } 2876 if (log_level) 2877 print_verifier_state(this_branch); 2878 return 0; 2879 } 2880 2881 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 2882 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 2883 { 2884 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 2885 2886 return (struct bpf_map *) (unsigned long) imm64; 2887 } 2888 2889 /* verify BPF_LD_IMM64 instruction */ 2890 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 2891 { 2892 struct bpf_reg_state *regs = env->cur_state.regs; 2893 int err; 2894 2895 if (BPF_SIZE(insn->code) != BPF_DW) { 2896 verbose("invalid BPF_LD_IMM insn\n"); 2897 return -EINVAL; 2898 } 2899 if (insn->off != 0) { 2900 verbose("BPF_LD_IMM64 uses reserved fields\n"); 2901 return -EINVAL; 2902 } 2903 2904 err = check_reg_arg(env, insn->dst_reg, DST_OP); 2905 if (err) 2906 return err; 2907 2908 if (insn->src_reg == 0) { 2909 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 2910 2911 regs[insn->dst_reg].type = SCALAR_VALUE; 2912 __mark_reg_known(®s[insn->dst_reg], imm); 2913 return 0; 2914 } 2915 2916 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 2917 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 2918 2919 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 2920 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 2921 return 0; 2922 } 2923 2924 static bool may_access_skb(enum bpf_prog_type type) 2925 { 2926 switch (type) { 2927 case BPF_PROG_TYPE_SOCKET_FILTER: 2928 case BPF_PROG_TYPE_SCHED_CLS: 2929 case BPF_PROG_TYPE_SCHED_ACT: 2930 return true; 2931 default: 2932 return false; 2933 } 2934 } 2935 2936 /* verify safety of LD_ABS|LD_IND instructions: 2937 * - they can only appear in the programs where ctx == skb 2938 * - since they are wrappers of function calls, they scratch R1-R5 registers, 2939 * preserve R6-R9, and store return value into R0 2940 * 2941 * Implicit input: 2942 * ctx == skb == R6 == CTX 2943 * 2944 * Explicit input: 2945 * SRC == any register 2946 * IMM == 32-bit immediate 2947 * 2948 * Output: 2949 * R0 - 8/16/32-bit skb data converted to cpu endianness 2950 */ 2951 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 2952 { 2953 struct bpf_reg_state *regs = env->cur_state.regs; 2954 u8 mode = BPF_MODE(insn->code); 2955 int i, err; 2956 2957 if (!may_access_skb(env->prog->type)) { 2958 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 2959 return -EINVAL; 2960 } 2961 2962 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 2963 BPF_SIZE(insn->code) == BPF_DW || 2964 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 2965 verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); 2966 return -EINVAL; 2967 } 2968 2969 /* check whether implicit source operand (register R6) is readable */ 2970 err = check_reg_arg(env, BPF_REG_6, SRC_OP); 2971 if (err) 2972 return err; 2973 2974 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 2975 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 2976 return -EINVAL; 2977 } 2978 2979 if (mode == BPF_IND) { 2980 /* check explicit source operand */ 2981 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2982 if (err) 2983 return err; 2984 } 2985 2986 /* reset caller saved regs to unreadable */ 2987 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2988 mark_reg_not_init(regs, caller_saved[i]); 2989 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 2990 } 2991 2992 /* mark destination R0 register as readable, since it contains 2993 * the value fetched from the packet. 2994 * Already marked as written above. 2995 */ 2996 mark_reg_unknown(regs, BPF_REG_0); 2997 return 0; 2998 } 2999 3000 /* non-recursive DFS pseudo code 3001 * 1 procedure DFS-iterative(G,v): 3002 * 2 label v as discovered 3003 * 3 let S be a stack 3004 * 4 S.push(v) 3005 * 5 while S is not empty 3006 * 6 t <- S.pop() 3007 * 7 if t is what we're looking for: 3008 * 8 return t 3009 * 9 for all edges e in G.adjacentEdges(t) do 3010 * 10 if edge e is already labelled 3011 * 11 continue with the next edge 3012 * 12 w <- G.adjacentVertex(t,e) 3013 * 13 if vertex w is not discovered and not explored 3014 * 14 label e as tree-edge 3015 * 15 label w as discovered 3016 * 16 S.push(w) 3017 * 17 continue at 5 3018 * 18 else if vertex w is discovered 3019 * 19 label e as back-edge 3020 * 20 else 3021 * 21 // vertex w is explored 3022 * 22 label e as forward- or cross-edge 3023 * 23 label t as explored 3024 * 24 S.pop() 3025 * 3026 * convention: 3027 * 0x10 - discovered 3028 * 0x11 - discovered and fall-through edge labelled 3029 * 0x12 - discovered and fall-through and branch edges labelled 3030 * 0x20 - explored 3031 */ 3032 3033 enum { 3034 DISCOVERED = 0x10, 3035 EXPLORED = 0x20, 3036 FALLTHROUGH = 1, 3037 BRANCH = 2, 3038 }; 3039 3040 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) 3041 3042 static int *insn_stack; /* stack of insns to process */ 3043 static int cur_stack; /* current stack index */ 3044 static int *insn_state; 3045 3046 /* t, w, e - match pseudo-code above: 3047 * t - index of current instruction 3048 * w - next instruction 3049 * e - edge 3050 */ 3051 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 3052 { 3053 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 3054 return 0; 3055 3056 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 3057 return 0; 3058 3059 if (w < 0 || w >= env->prog->len) { 3060 verbose("jump out of range from insn %d to %d\n", t, w); 3061 return -EINVAL; 3062 } 3063 3064 if (e == BRANCH) 3065 /* mark branch target for state pruning */ 3066 env->explored_states[w] = STATE_LIST_MARK; 3067 3068 if (insn_state[w] == 0) { 3069 /* tree-edge */ 3070 insn_state[t] = DISCOVERED | e; 3071 insn_state[w] = DISCOVERED; 3072 if (cur_stack >= env->prog->len) 3073 return -E2BIG; 3074 insn_stack[cur_stack++] = w; 3075 return 1; 3076 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 3077 verbose("back-edge from insn %d to %d\n", t, w); 3078 return -EINVAL; 3079 } else if (insn_state[w] == EXPLORED) { 3080 /* forward- or cross-edge */ 3081 insn_state[t] = DISCOVERED | e; 3082 } else { 3083 verbose("insn state internal bug\n"); 3084 return -EFAULT; 3085 } 3086 return 0; 3087 } 3088 3089 /* non-recursive depth-first-search to detect loops in BPF program 3090 * loop == back-edge in directed graph 3091 */ 3092 static int check_cfg(struct bpf_verifier_env *env) 3093 { 3094 struct bpf_insn *insns = env->prog->insnsi; 3095 int insn_cnt = env->prog->len; 3096 int ret = 0; 3097 int i, t; 3098 3099 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 3100 if (!insn_state) 3101 return -ENOMEM; 3102 3103 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 3104 if (!insn_stack) { 3105 kfree(insn_state); 3106 return -ENOMEM; 3107 } 3108 3109 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 3110 insn_stack[0] = 0; /* 0 is the first instruction */ 3111 cur_stack = 1; 3112 3113 peek_stack: 3114 if (cur_stack == 0) 3115 goto check_state; 3116 t = insn_stack[cur_stack - 1]; 3117 3118 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 3119 u8 opcode = BPF_OP(insns[t].code); 3120 3121 if (opcode == BPF_EXIT) { 3122 goto mark_explored; 3123 } else if (opcode == BPF_CALL) { 3124 ret = push_insn(t, t + 1, FALLTHROUGH, env); 3125 if (ret == 1) 3126 goto peek_stack; 3127 else if (ret < 0) 3128 goto err_free; 3129 if (t + 1 < insn_cnt) 3130 env->explored_states[t + 1] = STATE_LIST_MARK; 3131 } else if (opcode == BPF_JA) { 3132 if (BPF_SRC(insns[t].code) != BPF_K) { 3133 ret = -EINVAL; 3134 goto err_free; 3135 } 3136 /* unconditional jump with single edge */ 3137 ret = push_insn(t, t + insns[t].off + 1, 3138 FALLTHROUGH, env); 3139 if (ret == 1) 3140 goto peek_stack; 3141 else if (ret < 0) 3142 goto err_free; 3143 /* tell verifier to check for equivalent states 3144 * after every call and jump 3145 */ 3146 if (t + 1 < insn_cnt) 3147 env->explored_states[t + 1] = STATE_LIST_MARK; 3148 } else { 3149 /* conditional jump with two edges */ 3150 env->explored_states[t] = STATE_LIST_MARK; 3151 ret = push_insn(t, t + 1, FALLTHROUGH, env); 3152 if (ret == 1) 3153 goto peek_stack; 3154 else if (ret < 0) 3155 goto err_free; 3156 3157 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 3158 if (ret == 1) 3159 goto peek_stack; 3160 else if (ret < 0) 3161 goto err_free; 3162 } 3163 } else { 3164 /* all other non-branch instructions with single 3165 * fall-through edge 3166 */ 3167 ret = push_insn(t, t + 1, FALLTHROUGH, env); 3168 if (ret == 1) 3169 goto peek_stack; 3170 else if (ret < 0) 3171 goto err_free; 3172 } 3173 3174 mark_explored: 3175 insn_state[t] = EXPLORED; 3176 if (cur_stack-- <= 0) { 3177 verbose("pop stack internal bug\n"); 3178 ret = -EFAULT; 3179 goto err_free; 3180 } 3181 goto peek_stack; 3182 3183 check_state: 3184 for (i = 0; i < insn_cnt; i++) { 3185 if (insn_state[i] != EXPLORED) { 3186 verbose("unreachable insn %d\n", i); 3187 ret = -EINVAL; 3188 goto err_free; 3189 } 3190 } 3191 ret = 0; /* cfg looks good */ 3192 3193 err_free: 3194 kfree(insn_state); 3195 kfree(insn_stack); 3196 return ret; 3197 } 3198 3199 /* check %cur's range satisfies %old's */ 3200 static bool range_within(struct bpf_reg_state *old, 3201 struct bpf_reg_state *cur) 3202 { 3203 return old->umin_value <= cur->umin_value && 3204 old->umax_value >= cur->umax_value && 3205 old->smin_value <= cur->smin_value && 3206 old->smax_value >= cur->smax_value; 3207 } 3208 3209 /* Maximum number of register states that can exist at once */ 3210 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 3211 struct idpair { 3212 u32 old; 3213 u32 cur; 3214 }; 3215 3216 /* If in the old state two registers had the same id, then they need to have 3217 * the same id in the new state as well. But that id could be different from 3218 * the old state, so we need to track the mapping from old to new ids. 3219 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 3220 * regs with old id 5 must also have new id 9 for the new state to be safe. But 3221 * regs with a different old id could still have new id 9, we don't care about 3222 * that. 3223 * So we look through our idmap to see if this old id has been seen before. If 3224 * so, we require the new id to match; otherwise, we add the id pair to the map. 3225 */ 3226 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 3227 { 3228 unsigned int i; 3229 3230 for (i = 0; i < ID_MAP_SIZE; i++) { 3231 if (!idmap[i].old) { 3232 /* Reached an empty slot; haven't seen this id before */ 3233 idmap[i].old = old_id; 3234 idmap[i].cur = cur_id; 3235 return true; 3236 } 3237 if (idmap[i].old == old_id) 3238 return idmap[i].cur == cur_id; 3239 } 3240 /* We ran out of idmap slots, which should be impossible */ 3241 WARN_ON_ONCE(1); 3242 return false; 3243 } 3244 3245 /* Returns true if (rold safe implies rcur safe) */ 3246 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 3247 struct idpair *idmap) 3248 { 3249 if (!(rold->live & REG_LIVE_READ)) 3250 /* explored state didn't use this */ 3251 return true; 3252 3253 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) 3254 return true; 3255 3256 if (rold->type == NOT_INIT) 3257 /* explored state can't have used this */ 3258 return true; 3259 if (rcur->type == NOT_INIT) 3260 return false; 3261 switch (rold->type) { 3262 case SCALAR_VALUE: 3263 if (rcur->type == SCALAR_VALUE) { 3264 /* new val must satisfy old val knowledge */ 3265 return range_within(rold, rcur) && 3266 tnum_in(rold->var_off, rcur->var_off); 3267 } else { 3268 /* if we knew anything about the old value, we're not 3269 * equal, because we can't know anything about the 3270 * scalar value of the pointer in the new value. 3271 */ 3272 return rold->umin_value == 0 && 3273 rold->umax_value == U64_MAX && 3274 rold->smin_value == S64_MIN && 3275 rold->smax_value == S64_MAX && 3276 tnum_is_unknown(rold->var_off); 3277 } 3278 case PTR_TO_MAP_VALUE: 3279 /* If the new min/max/var_off satisfy the old ones and 3280 * everything else matches, we are OK. 3281 * We don't care about the 'id' value, because nothing 3282 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) 3283 */ 3284 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 3285 range_within(rold, rcur) && 3286 tnum_in(rold->var_off, rcur->var_off); 3287 case PTR_TO_MAP_VALUE_OR_NULL: 3288 /* a PTR_TO_MAP_VALUE could be safe to use as a 3289 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 3290 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 3291 * checked, doing so could have affected others with the same 3292 * id, and we can't check for that because we lost the id when 3293 * we converted to a PTR_TO_MAP_VALUE. 3294 */ 3295 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 3296 return false; 3297 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 3298 return false; 3299 /* Check our ids match any regs they're supposed to */ 3300 return check_ids(rold->id, rcur->id, idmap); 3301 case PTR_TO_PACKET: 3302 if (rcur->type != PTR_TO_PACKET) 3303 return false; 3304 /* We must have at least as much range as the old ptr 3305 * did, so that any accesses which were safe before are 3306 * still safe. This is true even if old range < old off, 3307 * since someone could have accessed through (ptr - k), or 3308 * even done ptr -= k in a register, to get a safe access. 3309 */ 3310 if (rold->range > rcur->range) 3311 return false; 3312 /* If the offsets don't match, we can't trust our alignment; 3313 * nor can we be sure that we won't fall out of range. 3314 */ 3315 if (rold->off != rcur->off) 3316 return false; 3317 /* id relations must be preserved */ 3318 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 3319 return false; 3320 /* new val must satisfy old val knowledge */ 3321 return range_within(rold, rcur) && 3322 tnum_in(rold->var_off, rcur->var_off); 3323 case PTR_TO_CTX: 3324 case CONST_PTR_TO_MAP: 3325 case PTR_TO_STACK: 3326 case PTR_TO_PACKET_END: 3327 /* Only valid matches are exact, which memcmp() above 3328 * would have accepted 3329 */ 3330 default: 3331 /* Don't know what's going on, just say it's not safe */ 3332 return false; 3333 } 3334 3335 /* Shouldn't get here; if we do, say it's not safe */ 3336 WARN_ON_ONCE(1); 3337 return false; 3338 } 3339 3340 /* compare two verifier states 3341 * 3342 * all states stored in state_list are known to be valid, since 3343 * verifier reached 'bpf_exit' instruction through them 3344 * 3345 * this function is called when verifier exploring different branches of 3346 * execution popped from the state stack. If it sees an old state that has 3347 * more strict register state and more strict stack state then this execution 3348 * branch doesn't need to be explored further, since verifier already 3349 * concluded that more strict state leads to valid finish. 3350 * 3351 * Therefore two states are equivalent if register state is more conservative 3352 * and explored stack state is more conservative than the current one. 3353 * Example: 3354 * explored current 3355 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 3356 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 3357 * 3358 * In other words if current stack state (one being explored) has more 3359 * valid slots than old one that already passed validation, it means 3360 * the verifier can stop exploring and conclude that current state is valid too 3361 * 3362 * Similarly with registers. If explored state has register type as invalid 3363 * whereas register type in current state is meaningful, it means that 3364 * the current state will reach 'bpf_exit' instruction safely 3365 */ 3366 static bool states_equal(struct bpf_verifier_env *env, 3367 struct bpf_verifier_state *old, 3368 struct bpf_verifier_state *cur) 3369 { 3370 struct idpair *idmap; 3371 bool ret = false; 3372 int i; 3373 3374 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 3375 /* If we failed to allocate the idmap, just say it's not safe */ 3376 if (!idmap) 3377 return false; 3378 3379 for (i = 0; i < MAX_BPF_REG; i++) { 3380 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 3381 goto out_free; 3382 } 3383 3384 for (i = 0; i < MAX_BPF_STACK; i++) { 3385 if (old->stack_slot_type[i] == STACK_INVALID) 3386 continue; 3387 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 3388 /* Ex: old explored (safe) state has STACK_SPILL in 3389 * this stack slot, but current has has STACK_MISC -> 3390 * this verifier states are not equivalent, 3391 * return false to continue verification of this path 3392 */ 3393 goto out_free; 3394 if (i % BPF_REG_SIZE) 3395 continue; 3396 if (old->stack_slot_type[i] != STACK_SPILL) 3397 continue; 3398 if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE], 3399 &cur->spilled_regs[i / BPF_REG_SIZE], 3400 idmap)) 3401 /* when explored and current stack slot are both storing 3402 * spilled registers, check that stored pointers types 3403 * are the same as well. 3404 * Ex: explored safe path could have stored 3405 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 3406 * but current path has stored: 3407 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 3408 * such verifier states are not equivalent. 3409 * return false to continue verification of this path 3410 */ 3411 goto out_free; 3412 else 3413 continue; 3414 } 3415 ret = true; 3416 out_free: 3417 kfree(idmap); 3418 return ret; 3419 } 3420 3421 /* A write screens off any subsequent reads; but write marks come from the 3422 * straight-line code between a state and its parent. When we arrive at a 3423 * jump target (in the first iteration of the propagate_liveness() loop), 3424 * we didn't arrive by the straight-line code, so read marks in state must 3425 * propagate to parent regardless of state's write marks. 3426 */ 3427 static bool do_propagate_liveness(const struct bpf_verifier_state *state, 3428 struct bpf_verifier_state *parent) 3429 { 3430 bool writes = parent == state->parent; /* Observe write marks */ 3431 bool touched = false; /* any changes made? */ 3432 int i; 3433 3434 if (!parent) 3435 return touched; 3436 /* Propagate read liveness of registers... */ 3437 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 3438 /* We don't need to worry about FP liveness because it's read-only */ 3439 for (i = 0; i < BPF_REG_FP; i++) { 3440 if (parent->regs[i].live & REG_LIVE_READ) 3441 continue; 3442 if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) 3443 continue; 3444 if (state->regs[i].live & REG_LIVE_READ) { 3445 parent->regs[i].live |= REG_LIVE_READ; 3446 touched = true; 3447 } 3448 } 3449 /* ... and stack slots */ 3450 for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) { 3451 if (parent->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL) 3452 continue; 3453 if (state->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL) 3454 continue; 3455 if (parent->spilled_regs[i].live & REG_LIVE_READ) 3456 continue; 3457 if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN)) 3458 continue; 3459 if (state->spilled_regs[i].live & REG_LIVE_READ) { 3460 parent->spilled_regs[i].live |= REG_LIVE_READ; 3461 touched = true; 3462 } 3463 } 3464 return touched; 3465 } 3466 3467 /* "parent" is "a state from which we reach the current state", but initially 3468 * it is not the state->parent (i.e. "the state whose straight-line code leads 3469 * to the current state"), instead it is the state that happened to arrive at 3470 * a (prunable) equivalent of the current state. See comment above 3471 * do_propagate_liveness() for consequences of this. 3472 * This function is just a more efficient way of calling mark_reg_read() or 3473 * mark_stack_slot_read() on each reg in "parent" that is read in "state", 3474 * though it requires that parent != state->parent in the call arguments. 3475 */ 3476 static void propagate_liveness(const struct bpf_verifier_state *state, 3477 struct bpf_verifier_state *parent) 3478 { 3479 while (do_propagate_liveness(state, parent)) { 3480 /* Something changed, so we need to feed those changes onward */ 3481 state = parent; 3482 parent = state->parent; 3483 } 3484 } 3485 3486 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 3487 { 3488 struct bpf_verifier_state_list *new_sl; 3489 struct bpf_verifier_state_list *sl; 3490 int i; 3491 3492 sl = env->explored_states[insn_idx]; 3493 if (!sl) 3494 /* this 'insn_idx' instruction wasn't marked, so we will not 3495 * be doing state search here 3496 */ 3497 return 0; 3498 3499 while (sl != STATE_LIST_MARK) { 3500 if (states_equal(env, &sl->state, &env->cur_state)) { 3501 /* reached equivalent register/stack state, 3502 * prune the search. 3503 * Registers read by the continuation are read by us. 3504 * If we have any write marks in env->cur_state, they 3505 * will prevent corresponding reads in the continuation 3506 * from reaching our parent (an explored_state). Our 3507 * own state will get the read marks recorded, but 3508 * they'll be immediately forgotten as we're pruning 3509 * this state and will pop a new one. 3510 */ 3511 propagate_liveness(&sl->state, &env->cur_state); 3512 return 1; 3513 } 3514 sl = sl->next; 3515 } 3516 3517 /* there were no equivalent states, remember current one. 3518 * technically the current state is not proven to be safe yet, 3519 * but it will either reach bpf_exit (which means it's safe) or 3520 * it will be rejected. Since there are no loops, we won't be 3521 * seeing this 'insn_idx' instruction again on the way to bpf_exit 3522 */ 3523 new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER); 3524 if (!new_sl) 3525 return -ENOMEM; 3526 3527 /* add new state to the head of linked list */ 3528 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 3529 new_sl->next = env->explored_states[insn_idx]; 3530 env->explored_states[insn_idx] = new_sl; 3531 /* connect new state to parentage chain */ 3532 env->cur_state.parent = &new_sl->state; 3533 /* clear write marks in current state: the writes we did are not writes 3534 * our child did, so they don't screen off its reads from us. 3535 * (There are no read marks in current state, because reads always mark 3536 * their parent and current state never has children yet. Only 3537 * explored_states can get read marks.) 3538 */ 3539 for (i = 0; i < BPF_REG_FP; i++) 3540 env->cur_state.regs[i].live = REG_LIVE_NONE; 3541 for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) 3542 if (env->cur_state.stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL) 3543 env->cur_state.spilled_regs[i].live = REG_LIVE_NONE; 3544 return 0; 3545 } 3546 3547 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, 3548 int insn_idx, int prev_insn_idx) 3549 { 3550 if (!env->analyzer_ops || !env->analyzer_ops->insn_hook) 3551 return 0; 3552 3553 return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx); 3554 } 3555 3556 static int do_check(struct bpf_verifier_env *env) 3557 { 3558 struct bpf_verifier_state *state = &env->cur_state; 3559 struct bpf_insn *insns = env->prog->insnsi; 3560 struct bpf_reg_state *regs = state->regs; 3561 int insn_cnt = env->prog->len; 3562 int insn_idx, prev_insn_idx = 0; 3563 int insn_processed = 0; 3564 bool do_print_state = false; 3565 3566 init_reg_state(regs); 3567 state->parent = NULL; 3568 insn_idx = 0; 3569 for (;;) { 3570 struct bpf_insn *insn; 3571 u8 class; 3572 int err; 3573 3574 if (insn_idx >= insn_cnt) { 3575 verbose("invalid insn idx %d insn_cnt %d\n", 3576 insn_idx, insn_cnt); 3577 return -EFAULT; 3578 } 3579 3580 insn = &insns[insn_idx]; 3581 class = BPF_CLASS(insn->code); 3582 3583 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 3584 verbose("BPF program is too large. Processed %d insn\n", 3585 insn_processed); 3586 return -E2BIG; 3587 } 3588 3589 err = is_state_visited(env, insn_idx); 3590 if (err < 0) 3591 return err; 3592 if (err == 1) { 3593 /* found equivalent state, can prune the search */ 3594 if (log_level) { 3595 if (do_print_state) 3596 verbose("\nfrom %d to %d: safe\n", 3597 prev_insn_idx, insn_idx); 3598 else 3599 verbose("%d: safe\n", insn_idx); 3600 } 3601 goto process_bpf_exit; 3602 } 3603 3604 if (need_resched()) 3605 cond_resched(); 3606 3607 if (log_level > 1 || (log_level && do_print_state)) { 3608 if (log_level > 1) 3609 verbose("%d:", insn_idx); 3610 else 3611 verbose("\nfrom %d to %d:", 3612 prev_insn_idx, insn_idx); 3613 print_verifier_state(&env->cur_state); 3614 do_print_state = false; 3615 } 3616 3617 if (log_level) { 3618 verbose("%d: ", insn_idx); 3619 print_bpf_insn(env, insn); 3620 } 3621 3622 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); 3623 if (err) 3624 return err; 3625 3626 if (class == BPF_ALU || class == BPF_ALU64) { 3627 err = check_alu_op(env, insn); 3628 if (err) 3629 return err; 3630 3631 } else if (class == BPF_LDX) { 3632 enum bpf_reg_type *prev_src_type, src_reg_type; 3633 3634 /* check for reserved fields is already done */ 3635 3636 /* check src operand */ 3637 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3638 if (err) 3639 return err; 3640 3641 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 3642 if (err) 3643 return err; 3644 3645 src_reg_type = regs[insn->src_reg].type; 3646 3647 /* check that memory (src_reg + off) is readable, 3648 * the state of dst_reg will be updated by this func 3649 */ 3650 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, 3651 BPF_SIZE(insn->code), BPF_READ, 3652 insn->dst_reg); 3653 if (err) 3654 return err; 3655 3656 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; 3657 3658 if (*prev_src_type == NOT_INIT) { 3659 /* saw a valid insn 3660 * dst_reg = *(u32 *)(src_reg + off) 3661 * save type to validate intersecting paths 3662 */ 3663 *prev_src_type = src_reg_type; 3664 3665 } else if (src_reg_type != *prev_src_type && 3666 (src_reg_type == PTR_TO_CTX || 3667 *prev_src_type == PTR_TO_CTX)) { 3668 /* ABuser program is trying to use the same insn 3669 * dst_reg = *(u32*) (src_reg + off) 3670 * with different pointer types: 3671 * src_reg == ctx in one branch and 3672 * src_reg == stack|map in some other branch. 3673 * Reject it. 3674 */ 3675 verbose("same insn cannot be used with different pointers\n"); 3676 return -EINVAL; 3677 } 3678 3679 } else if (class == BPF_STX) { 3680 enum bpf_reg_type *prev_dst_type, dst_reg_type; 3681 3682 if (BPF_MODE(insn->code) == BPF_XADD) { 3683 err = check_xadd(env, insn_idx, insn); 3684 if (err) 3685 return err; 3686 insn_idx++; 3687 continue; 3688 } 3689 3690 /* check src1 operand */ 3691 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3692 if (err) 3693 return err; 3694 /* check src2 operand */ 3695 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3696 if (err) 3697 return err; 3698 3699 dst_reg_type = regs[insn->dst_reg].type; 3700 3701 /* check that memory (dst_reg + off) is writeable */ 3702 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3703 BPF_SIZE(insn->code), BPF_WRITE, 3704 insn->src_reg); 3705 if (err) 3706 return err; 3707 3708 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; 3709 3710 if (*prev_dst_type == NOT_INIT) { 3711 *prev_dst_type = dst_reg_type; 3712 } else if (dst_reg_type != *prev_dst_type && 3713 (dst_reg_type == PTR_TO_CTX || 3714 *prev_dst_type == PTR_TO_CTX)) { 3715 verbose("same insn cannot be used with different pointers\n"); 3716 return -EINVAL; 3717 } 3718 3719 } else if (class == BPF_ST) { 3720 if (BPF_MODE(insn->code) != BPF_MEM || 3721 insn->src_reg != BPF_REG_0) { 3722 verbose("BPF_ST uses reserved fields\n"); 3723 return -EINVAL; 3724 } 3725 /* check src operand */ 3726 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3727 if (err) 3728 return err; 3729 3730 /* check that memory (dst_reg + off) is writeable */ 3731 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3732 BPF_SIZE(insn->code), BPF_WRITE, 3733 -1); 3734 if (err) 3735 return err; 3736 3737 } else if (class == BPF_JMP) { 3738 u8 opcode = BPF_OP(insn->code); 3739 3740 if (opcode == BPF_CALL) { 3741 if (BPF_SRC(insn->code) != BPF_K || 3742 insn->off != 0 || 3743 insn->src_reg != BPF_REG_0 || 3744 insn->dst_reg != BPF_REG_0) { 3745 verbose("BPF_CALL uses reserved fields\n"); 3746 return -EINVAL; 3747 } 3748 3749 err = check_call(env, insn->imm, insn_idx); 3750 if (err) 3751 return err; 3752 3753 } else if (opcode == BPF_JA) { 3754 if (BPF_SRC(insn->code) != BPF_K || 3755 insn->imm != 0 || 3756 insn->src_reg != BPF_REG_0 || 3757 insn->dst_reg != BPF_REG_0) { 3758 verbose("BPF_JA uses reserved fields\n"); 3759 return -EINVAL; 3760 } 3761 3762 insn_idx += insn->off + 1; 3763 continue; 3764 3765 } else if (opcode == BPF_EXIT) { 3766 if (BPF_SRC(insn->code) != BPF_K || 3767 insn->imm != 0 || 3768 insn->src_reg != BPF_REG_0 || 3769 insn->dst_reg != BPF_REG_0) { 3770 verbose("BPF_EXIT uses reserved fields\n"); 3771 return -EINVAL; 3772 } 3773 3774 /* eBPF calling convetion is such that R0 is used 3775 * to return the value from eBPF program. 3776 * Make sure that it's readable at this time 3777 * of bpf_exit, which means that program wrote 3778 * something into it earlier 3779 */ 3780 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 3781 if (err) 3782 return err; 3783 3784 if (is_pointer_value(env, BPF_REG_0)) { 3785 verbose("R0 leaks addr as return value\n"); 3786 return -EACCES; 3787 } 3788 3789 process_bpf_exit: 3790 insn_idx = pop_stack(env, &prev_insn_idx); 3791 if (insn_idx < 0) { 3792 break; 3793 } else { 3794 do_print_state = true; 3795 continue; 3796 } 3797 } else { 3798 err = check_cond_jmp_op(env, insn, &insn_idx); 3799 if (err) 3800 return err; 3801 } 3802 } else if (class == BPF_LD) { 3803 u8 mode = BPF_MODE(insn->code); 3804 3805 if (mode == BPF_ABS || mode == BPF_IND) { 3806 err = check_ld_abs(env, insn); 3807 if (err) 3808 return err; 3809 3810 } else if (mode == BPF_IMM) { 3811 err = check_ld_imm(env, insn); 3812 if (err) 3813 return err; 3814 3815 insn_idx++; 3816 } else { 3817 verbose("invalid BPF_LD mode\n"); 3818 return -EINVAL; 3819 } 3820 } else { 3821 verbose("unknown insn class %d\n", class); 3822 return -EINVAL; 3823 } 3824 3825 insn_idx++; 3826 } 3827 3828 verbose("processed %d insns, stack depth %d\n", 3829 insn_processed, env->prog->aux->stack_depth); 3830 return 0; 3831 } 3832 3833 static int check_map_prealloc(struct bpf_map *map) 3834 { 3835 return (map->map_type != BPF_MAP_TYPE_HASH && 3836 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 3837 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 3838 !(map->map_flags & BPF_F_NO_PREALLOC); 3839 } 3840 3841 static int check_map_prog_compatibility(struct bpf_map *map, 3842 struct bpf_prog *prog) 3843 3844 { 3845 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 3846 * preallocated hash maps, since doing memory allocation 3847 * in overflow_handler can crash depending on where nmi got 3848 * triggered. 3849 */ 3850 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 3851 if (!check_map_prealloc(map)) { 3852 verbose("perf_event programs can only use preallocated hash map\n"); 3853 return -EINVAL; 3854 } 3855 if (map->inner_map_meta && 3856 !check_map_prealloc(map->inner_map_meta)) { 3857 verbose("perf_event programs can only use preallocated inner hash map\n"); 3858 return -EINVAL; 3859 } 3860 } 3861 return 0; 3862 } 3863 3864 /* look for pseudo eBPF instructions that access map FDs and 3865 * replace them with actual map pointers 3866 */ 3867 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 3868 { 3869 struct bpf_insn *insn = env->prog->insnsi; 3870 int insn_cnt = env->prog->len; 3871 int i, j, err; 3872 3873 err = bpf_prog_calc_tag(env->prog); 3874 if (err) 3875 return err; 3876 3877 for (i = 0; i < insn_cnt; i++, insn++) { 3878 if (BPF_CLASS(insn->code) == BPF_LDX && 3879 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 3880 verbose("BPF_LDX uses reserved fields\n"); 3881 return -EINVAL; 3882 } 3883 3884 if (BPF_CLASS(insn->code) == BPF_STX && 3885 ((BPF_MODE(insn->code) != BPF_MEM && 3886 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 3887 verbose("BPF_STX uses reserved fields\n"); 3888 return -EINVAL; 3889 } 3890 3891 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 3892 struct bpf_map *map; 3893 struct fd f; 3894 3895 if (i == insn_cnt - 1 || insn[1].code != 0 || 3896 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 3897 insn[1].off != 0) { 3898 verbose("invalid bpf_ld_imm64 insn\n"); 3899 return -EINVAL; 3900 } 3901 3902 if (insn->src_reg == 0) 3903 /* valid generic load 64-bit imm */ 3904 goto next_insn; 3905 3906 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 3907 verbose("unrecognized bpf_ld_imm64 insn\n"); 3908 return -EINVAL; 3909 } 3910 3911 f = fdget(insn->imm); 3912 map = __bpf_map_get(f); 3913 if (IS_ERR(map)) { 3914 verbose("fd %d is not pointing to valid bpf_map\n", 3915 insn->imm); 3916 return PTR_ERR(map); 3917 } 3918 3919 err = check_map_prog_compatibility(map, env->prog); 3920 if (err) { 3921 fdput(f); 3922 return err; 3923 } 3924 3925 /* store map pointer inside BPF_LD_IMM64 instruction */ 3926 insn[0].imm = (u32) (unsigned long) map; 3927 insn[1].imm = ((u64) (unsigned long) map) >> 32; 3928 3929 /* check whether we recorded this map already */ 3930 for (j = 0; j < env->used_map_cnt; j++) 3931 if (env->used_maps[j] == map) { 3932 fdput(f); 3933 goto next_insn; 3934 } 3935 3936 if (env->used_map_cnt >= MAX_USED_MAPS) { 3937 fdput(f); 3938 return -E2BIG; 3939 } 3940 3941 /* hold the map. If the program is rejected by verifier, 3942 * the map will be released by release_maps() or it 3943 * will be used by the valid program until it's unloaded 3944 * and all maps are released in free_bpf_prog_info() 3945 */ 3946 map = bpf_map_inc(map, false); 3947 if (IS_ERR(map)) { 3948 fdput(f); 3949 return PTR_ERR(map); 3950 } 3951 env->used_maps[env->used_map_cnt++] = map; 3952 3953 fdput(f); 3954 next_insn: 3955 insn++; 3956 i++; 3957 } 3958 } 3959 3960 /* now all pseudo BPF_LD_IMM64 instructions load valid 3961 * 'struct bpf_map *' into a register instead of user map_fd. 3962 * These pointers will be used later by verifier to validate map access. 3963 */ 3964 return 0; 3965 } 3966 3967 /* drop refcnt of maps used by the rejected program */ 3968 static void release_maps(struct bpf_verifier_env *env) 3969 { 3970 int i; 3971 3972 for (i = 0; i < env->used_map_cnt; i++) 3973 bpf_map_put(env->used_maps[i]); 3974 } 3975 3976 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 3977 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 3978 { 3979 struct bpf_insn *insn = env->prog->insnsi; 3980 int insn_cnt = env->prog->len; 3981 int i; 3982 3983 for (i = 0; i < insn_cnt; i++, insn++) 3984 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 3985 insn->src_reg = 0; 3986 } 3987 3988 /* single env->prog->insni[off] instruction was replaced with the range 3989 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 3990 * [0, off) and [off, end) to new locations, so the patched range stays zero 3991 */ 3992 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, 3993 u32 off, u32 cnt) 3994 { 3995 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 3996 3997 if (cnt == 1) 3998 return 0; 3999 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); 4000 if (!new_data) 4001 return -ENOMEM; 4002 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 4003 memcpy(new_data + off + cnt - 1, old_data + off, 4004 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 4005 env->insn_aux_data = new_data; 4006 vfree(old_data); 4007 return 0; 4008 } 4009 4010 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 4011 const struct bpf_insn *patch, u32 len) 4012 { 4013 struct bpf_prog *new_prog; 4014 4015 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 4016 if (!new_prog) 4017 return NULL; 4018 if (adjust_insn_aux_data(env, new_prog->len, off, len)) 4019 return NULL; 4020 return new_prog; 4021 } 4022 4023 /* convert load instructions that access fields of 'struct __sk_buff' 4024 * into sequence of instructions that access fields of 'struct sk_buff' 4025 */ 4026 static int convert_ctx_accesses(struct bpf_verifier_env *env) 4027 { 4028 const struct bpf_verifier_ops *ops = env->prog->aux->ops; 4029 int i, cnt, size, ctx_field_size, delta = 0; 4030 const int insn_cnt = env->prog->len; 4031 struct bpf_insn insn_buf[16], *insn; 4032 struct bpf_prog *new_prog; 4033 enum bpf_access_type type; 4034 bool is_narrower_load; 4035 u32 target_size; 4036 4037 if (ops->gen_prologue) { 4038 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 4039 env->prog); 4040 if (cnt >= ARRAY_SIZE(insn_buf)) { 4041 verbose("bpf verifier is misconfigured\n"); 4042 return -EINVAL; 4043 } else if (cnt) { 4044 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 4045 if (!new_prog) 4046 return -ENOMEM; 4047 4048 env->prog = new_prog; 4049 delta += cnt - 1; 4050 } 4051 } 4052 4053 if (!ops->convert_ctx_access) 4054 return 0; 4055 4056 insn = env->prog->insnsi + delta; 4057 4058 for (i = 0; i < insn_cnt; i++, insn++) { 4059 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 4060 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 4061 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 4062 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 4063 type = BPF_READ; 4064 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 4065 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 4066 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 4067 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 4068 type = BPF_WRITE; 4069 else 4070 continue; 4071 4072 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 4073 continue; 4074 4075 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 4076 size = BPF_LDST_BYTES(insn); 4077 4078 /* If the read access is a narrower load of the field, 4079 * convert to a 4/8-byte load, to minimum program type specific 4080 * convert_ctx_access changes. If conversion is successful, 4081 * we will apply proper mask to the result. 4082 */ 4083 is_narrower_load = size < ctx_field_size; 4084 if (is_narrower_load) { 4085 u32 off = insn->off; 4086 u8 size_code; 4087 4088 if (type == BPF_WRITE) { 4089 verbose("bpf verifier narrow ctx access misconfigured\n"); 4090 return -EINVAL; 4091 } 4092 4093 size_code = BPF_H; 4094 if (ctx_field_size == 4) 4095 size_code = BPF_W; 4096 else if (ctx_field_size == 8) 4097 size_code = BPF_DW; 4098 4099 insn->off = off & ~(ctx_field_size - 1); 4100 insn->code = BPF_LDX | BPF_MEM | size_code; 4101 } 4102 4103 target_size = 0; 4104 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, 4105 &target_size); 4106 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 4107 (ctx_field_size && !target_size)) { 4108 verbose("bpf verifier is misconfigured\n"); 4109 return -EINVAL; 4110 } 4111 4112 if (is_narrower_load && size < target_size) { 4113 if (ctx_field_size <= 4) 4114 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 4115 (1 << size * 8) - 1); 4116 else 4117 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 4118 (1 << size * 8) - 1); 4119 } 4120 4121 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 4122 if (!new_prog) 4123 return -ENOMEM; 4124 4125 delta += cnt - 1; 4126 4127 /* keep walking new program and skip insns we just inserted */ 4128 env->prog = new_prog; 4129 insn = new_prog->insnsi + i + delta; 4130 } 4131 4132 return 0; 4133 } 4134 4135 /* fixup insn->imm field of bpf_call instructions 4136 * and inline eligible helpers as explicit sequence of BPF instructions 4137 * 4138 * this function is called after eBPF program passed verification 4139 */ 4140 static int fixup_bpf_calls(struct bpf_verifier_env *env) 4141 { 4142 struct bpf_prog *prog = env->prog; 4143 struct bpf_insn *insn = prog->insnsi; 4144 const struct bpf_func_proto *fn; 4145 const int insn_cnt = prog->len; 4146 struct bpf_insn insn_buf[16]; 4147 struct bpf_prog *new_prog; 4148 struct bpf_map *map_ptr; 4149 int i, cnt, delta = 0; 4150 4151 for (i = 0; i < insn_cnt; i++, insn++) { 4152 if (insn->code != (BPF_JMP | BPF_CALL)) 4153 continue; 4154 4155 if (insn->imm == BPF_FUNC_get_route_realm) 4156 prog->dst_needed = 1; 4157 if (insn->imm == BPF_FUNC_get_prandom_u32) 4158 bpf_user_rnd_init_once(); 4159 if (insn->imm == BPF_FUNC_tail_call) { 4160 /* If we tail call into other programs, we 4161 * cannot make any assumptions since they can 4162 * be replaced dynamically during runtime in 4163 * the program array. 4164 */ 4165 prog->cb_access = 1; 4166 env->prog->aux->stack_depth = MAX_BPF_STACK; 4167 4168 /* mark bpf_tail_call as different opcode to avoid 4169 * conditional branch in the interpeter for every normal 4170 * call and to prevent accidental JITing by JIT compiler 4171 * that doesn't support bpf_tail_call yet 4172 */ 4173 insn->imm = 0; 4174 insn->code = BPF_JMP | BPF_TAIL_CALL; 4175 continue; 4176 } 4177 4178 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 4179 * handlers are currently limited to 64 bit only. 4180 */ 4181 if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && 4182 insn->imm == BPF_FUNC_map_lookup_elem) { 4183 map_ptr = env->insn_aux_data[i + delta].map_ptr; 4184 if (map_ptr == BPF_MAP_PTR_POISON || 4185 !map_ptr->ops->map_gen_lookup) 4186 goto patch_call_imm; 4187 4188 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 4189 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 4190 verbose("bpf verifier is misconfigured\n"); 4191 return -EINVAL; 4192 } 4193 4194 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 4195 cnt); 4196 if (!new_prog) 4197 return -ENOMEM; 4198 4199 delta += cnt - 1; 4200 4201 /* keep walking new program and skip insns we just inserted */ 4202 env->prog = prog = new_prog; 4203 insn = new_prog->insnsi + i + delta; 4204 continue; 4205 } 4206 4207 if (insn->imm == BPF_FUNC_redirect_map) { 4208 /* Note, we cannot use prog directly as imm as subsequent 4209 * rewrites would still change the prog pointer. The only 4210 * stable address we can use is aux, which also works with 4211 * prog clones during blinding. 4212 */ 4213 u64 addr = (unsigned long)prog->aux; 4214 struct bpf_insn r4_ld[] = { 4215 BPF_LD_IMM64(BPF_REG_4, addr), 4216 *insn, 4217 }; 4218 cnt = ARRAY_SIZE(r4_ld); 4219 4220 new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); 4221 if (!new_prog) 4222 return -ENOMEM; 4223 4224 delta += cnt - 1; 4225 env->prog = prog = new_prog; 4226 insn = new_prog->insnsi + i + delta; 4227 } 4228 patch_call_imm: 4229 fn = prog->aux->ops->get_func_proto(insn->imm); 4230 /* all functions that have prototype and verifier allowed 4231 * programs to call them, must be real in-kernel functions 4232 */ 4233 if (!fn->func) { 4234 verbose("kernel subsystem misconfigured func %s#%d\n", 4235 func_id_name(insn->imm), insn->imm); 4236 return -EFAULT; 4237 } 4238 insn->imm = fn->func - __bpf_call_base; 4239 } 4240 4241 return 0; 4242 } 4243 4244 static void free_states(struct bpf_verifier_env *env) 4245 { 4246 struct bpf_verifier_state_list *sl, *sln; 4247 int i; 4248 4249 if (!env->explored_states) 4250 return; 4251 4252 for (i = 0; i < env->prog->len; i++) { 4253 sl = env->explored_states[i]; 4254 4255 if (sl) 4256 while (sl != STATE_LIST_MARK) { 4257 sln = sl->next; 4258 kfree(sl); 4259 sl = sln; 4260 } 4261 } 4262 4263 kfree(env->explored_states); 4264 } 4265 4266 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 4267 { 4268 char __user *log_ubuf = NULL; 4269 struct bpf_verifier_env *env; 4270 int ret = -EINVAL; 4271 4272 /* 'struct bpf_verifier_env' can be global, but since it's not small, 4273 * allocate/free it every time bpf_check() is called 4274 */ 4275 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 4276 if (!env) 4277 return -ENOMEM; 4278 4279 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 4280 (*prog)->len); 4281 ret = -ENOMEM; 4282 if (!env->insn_aux_data) 4283 goto err_free_env; 4284 env->prog = *prog; 4285 4286 /* grab the mutex to protect few globals used by verifier */ 4287 mutex_lock(&bpf_verifier_lock); 4288 4289 if (attr->log_level || attr->log_buf || attr->log_size) { 4290 /* user requested verbose verifier output 4291 * and supplied buffer to store the verification trace 4292 */ 4293 log_level = attr->log_level; 4294 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 4295 log_size = attr->log_size; 4296 log_len = 0; 4297 4298 ret = -EINVAL; 4299 /* log_* values have to be sane */ 4300 if (log_size < 128 || log_size > UINT_MAX >> 8 || 4301 log_level == 0 || log_ubuf == NULL) 4302 goto err_unlock; 4303 4304 ret = -ENOMEM; 4305 log_buf = vmalloc(log_size); 4306 if (!log_buf) 4307 goto err_unlock; 4308 } else { 4309 log_level = 0; 4310 } 4311 4312 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 4313 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 4314 env->strict_alignment = true; 4315 4316 ret = replace_map_fd_with_map_ptr(env); 4317 if (ret < 0) 4318 goto skip_full_check; 4319 4320 env->explored_states = kcalloc(env->prog->len, 4321 sizeof(struct bpf_verifier_state_list *), 4322 GFP_USER); 4323 ret = -ENOMEM; 4324 if (!env->explored_states) 4325 goto skip_full_check; 4326 4327 ret = check_cfg(env); 4328 if (ret < 0) 4329 goto skip_full_check; 4330 4331 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 4332 4333 ret = do_check(env); 4334 4335 skip_full_check: 4336 while (pop_stack(env, NULL) >= 0); 4337 free_states(env); 4338 4339 if (ret == 0) 4340 /* program is valid, convert *(u32*)(ctx + off) accesses */ 4341 ret = convert_ctx_accesses(env); 4342 4343 if (ret == 0) 4344 ret = fixup_bpf_calls(env); 4345 4346 if (log_level && log_len >= log_size - 1) { 4347 BUG_ON(log_len >= log_size); 4348 /* verifier log exceeded user supplied buffer */ 4349 ret = -ENOSPC; 4350 /* fall through to return what was recorded */ 4351 } 4352 4353 /* copy verifier log back to user space including trailing zero */ 4354 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 4355 ret = -EFAULT; 4356 goto free_log_buf; 4357 } 4358 4359 if (ret == 0 && env->used_map_cnt) { 4360 /* if program passed verifier, update used_maps in bpf_prog_info */ 4361 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 4362 sizeof(env->used_maps[0]), 4363 GFP_KERNEL); 4364 4365 if (!env->prog->aux->used_maps) { 4366 ret = -ENOMEM; 4367 goto free_log_buf; 4368 } 4369 4370 memcpy(env->prog->aux->used_maps, env->used_maps, 4371 sizeof(env->used_maps[0]) * env->used_map_cnt); 4372 env->prog->aux->used_map_cnt = env->used_map_cnt; 4373 4374 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 4375 * bpf_ld_imm64 instructions 4376 */ 4377 convert_pseudo_ld_imm64(env); 4378 } 4379 4380 free_log_buf: 4381 if (log_level) 4382 vfree(log_buf); 4383 if (!env->prog->aux->used_maps) 4384 /* if we didn't copy map pointers into bpf_prog_info, release 4385 * them now. Otherwise free_bpf_prog_info() will release them. 4386 */ 4387 release_maps(env); 4388 *prog = env->prog; 4389 err_unlock: 4390 mutex_unlock(&bpf_verifier_lock); 4391 vfree(env->insn_aux_data); 4392 err_free_env: 4393 kfree(env); 4394 return ret; 4395 } 4396 4397 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, 4398 void *priv) 4399 { 4400 struct bpf_verifier_env *env; 4401 int ret; 4402 4403 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 4404 if (!env) 4405 return -ENOMEM; 4406 4407 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 4408 prog->len); 4409 ret = -ENOMEM; 4410 if (!env->insn_aux_data) 4411 goto err_free_env; 4412 env->prog = prog; 4413 env->analyzer_ops = ops; 4414 env->analyzer_priv = priv; 4415 4416 /* grab the mutex to protect few globals used by verifier */ 4417 mutex_lock(&bpf_verifier_lock); 4418 4419 log_level = 0; 4420 4421 env->strict_alignment = false; 4422 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 4423 env->strict_alignment = true; 4424 4425 env->explored_states = kcalloc(env->prog->len, 4426 sizeof(struct bpf_verifier_state_list *), 4427 GFP_KERNEL); 4428 ret = -ENOMEM; 4429 if (!env->explored_states) 4430 goto skip_full_check; 4431 4432 ret = check_cfg(env); 4433 if (ret < 0) 4434 goto skip_full_check; 4435 4436 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 4437 4438 ret = do_check(env); 4439 4440 skip_full_check: 4441 while (pop_stack(env, NULL) >= 0); 4442 free_states(env); 4443 4444 mutex_unlock(&bpf_verifier_lock); 4445 vfree(env->insn_aux_data); 4446 err_free_env: 4447 kfree(env); 4448 return ret; 4449 } 4450 EXPORT_SYMBOL_GPL(bpf_analyzer); 4451