1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/filter.h> 19 #include <net/netlink.h> 20 #include <linux/file.h> 21 #include <linux/vmalloc.h> 22 #include <linux/stringify.h> 23 24 /* bpf_check() is a static code analyzer that walks eBPF program 25 * instruction by instruction and updates register/stack state. 26 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 27 * 28 * The first pass is depth-first-search to check that the program is a DAG. 29 * It rejects the following programs: 30 * - larger than BPF_MAXINSNS insns 31 * - if loop is present (detected via back-edge) 32 * - unreachable insns exist (shouldn't be a forest. program = one function) 33 * - out of bounds or malformed jumps 34 * The second pass is all possible path descent from the 1st insn. 35 * Since it's analyzing all pathes through the program, the length of the 36 * analysis is limited to 64k insn, which may be hit even if total number of 37 * insn is less then 4K, but there are too many branches that change stack/regs. 38 * Number of 'branches to be analyzed' is limited to 1k 39 * 40 * On entry to each instruction, each register has a type, and the instruction 41 * changes the types of the registers depending on instruction semantics. 42 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 43 * copied to R1. 44 * 45 * All registers are 64-bit. 46 * R0 - return register 47 * R1-R5 argument passing registers 48 * R6-R9 callee saved registers 49 * R10 - frame pointer read-only 50 * 51 * At the start of BPF program the register R1 contains a pointer to bpf_context 52 * and has type PTR_TO_CTX. 53 * 54 * Verifier tracks arithmetic operations on pointers in case: 55 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 56 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 57 * 1st insn copies R10 (which has FRAME_PTR) type into R1 58 * and 2nd arithmetic instruction is pattern matched to recognize 59 * that it wants to construct a pointer to some element within stack. 60 * So after 2nd insn, the register R1 has type PTR_TO_STACK 61 * (and -20 constant is saved for further stack bounds checking). 62 * Meaning that this reg is a pointer to stack plus known immediate constant. 63 * 64 * Most of the time the registers have UNKNOWN_VALUE type, which 65 * means the register has some value, but it's not a valid pointer. 66 * (like pointer plus pointer becomes UNKNOWN_VALUE type) 67 * 68 * When verifier sees load or store instructions the type of base register 69 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer 70 * types recognized by check_mem_access() function. 71 * 72 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 73 * and the range of [ptr, ptr + map's value_size) is accessible. 74 * 75 * registers used to pass values to function calls are checked against 76 * function argument constraints. 77 * 78 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 79 * It means that the register type passed to this function must be 80 * PTR_TO_STACK and it will be used inside the function as 81 * 'pointer to map element key' 82 * 83 * For example the argument constraints for bpf_map_lookup_elem(): 84 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 85 * .arg1_type = ARG_CONST_MAP_PTR, 86 * .arg2_type = ARG_PTR_TO_MAP_KEY, 87 * 88 * ret_type says that this function returns 'pointer to map elem value or null' 89 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 90 * 2nd argument should be a pointer to stack, which will be used inside 91 * the helper function as a pointer to map element key. 92 * 93 * On the kernel side the helper function looks like: 94 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 95 * { 96 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 97 * void *key = (void *) (unsigned long) r2; 98 * void *value; 99 * 100 * here kernel can access 'key' and 'map' pointers safely, knowing that 101 * [key, key + map->key_size) bytes are valid and were initialized on 102 * the stack of eBPF program. 103 * } 104 * 105 * Corresponding eBPF program may look like: 106 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 107 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 108 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 109 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 110 * here verifier looks at prototype of map_lookup_elem() and sees: 111 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 112 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 113 * 114 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 115 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 116 * and were initialized prior to this call. 117 * If it's ok, then verifier allows this BPF_CALL insn and looks at 118 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 119 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 120 * returns ether pointer to map value or NULL. 121 * 122 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 123 * insn, the register holding that pointer in the true branch changes state to 124 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 125 * branch. See check_cond_jmp_op(). 126 * 127 * After the call R0 is set to return type of the function and registers R1-R5 128 * are set to NOT_INIT to indicate that they are no longer readable. 129 */ 130 131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 132 struct bpf_verifier_stack_elem { 133 /* verifer state is 'st' 134 * before processing instruction 'insn_idx' 135 * and after processing instruction 'prev_insn_idx' 136 */ 137 struct bpf_verifier_state st; 138 int insn_idx; 139 int prev_insn_idx; 140 struct bpf_verifier_stack_elem *next; 141 }; 142 143 #define BPF_COMPLEXITY_LIMIT_INSNS 98304 144 #define BPF_COMPLEXITY_LIMIT_STACK 1024 145 146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 147 148 struct bpf_call_arg_meta { 149 struct bpf_map *map_ptr; 150 bool raw_mode; 151 bool pkt_access; 152 int regno; 153 int access_size; 154 }; 155 156 /* verbose verifier prints what it's seeing 157 * bpf_check() is called under lock, so no race to access these global vars 158 */ 159 static u32 log_level, log_size, log_len; 160 static char *log_buf; 161 162 static DEFINE_MUTEX(bpf_verifier_lock); 163 164 /* log_level controls verbosity level of eBPF verifier. 165 * verbose() is used to dump the verification trace to the log, so the user 166 * can figure out what's wrong with the program 167 */ 168 static __printf(1, 2) void verbose(const char *fmt, ...) 169 { 170 va_list args; 171 172 if (log_level == 0 || log_len >= log_size - 1) 173 return; 174 175 va_start(args, fmt); 176 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 177 va_end(args); 178 } 179 180 /* string representation of 'enum bpf_reg_type' */ 181 static const char * const reg_type_str[] = { 182 [NOT_INIT] = "?", 183 [UNKNOWN_VALUE] = "inv", 184 [PTR_TO_CTX] = "ctx", 185 [CONST_PTR_TO_MAP] = "map_ptr", 186 [PTR_TO_MAP_VALUE] = "map_value", 187 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 188 [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj", 189 [FRAME_PTR] = "fp", 190 [PTR_TO_STACK] = "fp", 191 [CONST_IMM] = "imm", 192 [PTR_TO_PACKET] = "pkt", 193 [PTR_TO_PACKET_END] = "pkt_end", 194 }; 195 196 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) 197 static const char * const func_id_str[] = { 198 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) 199 }; 200 #undef __BPF_FUNC_STR_FN 201 202 static const char *func_id_name(int id) 203 { 204 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); 205 206 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) 207 return func_id_str[id]; 208 else 209 return "unknown"; 210 } 211 212 static void print_verifier_state(struct bpf_verifier_state *state) 213 { 214 struct bpf_reg_state *reg; 215 enum bpf_reg_type t; 216 int i; 217 218 for (i = 0; i < MAX_BPF_REG; i++) { 219 reg = &state->regs[i]; 220 t = reg->type; 221 if (t == NOT_INIT) 222 continue; 223 verbose(" R%d=%s", i, reg_type_str[t]); 224 if (t == CONST_IMM || t == PTR_TO_STACK) 225 verbose("%lld", reg->imm); 226 else if (t == PTR_TO_PACKET) 227 verbose("(id=%d,off=%d,r=%d)", 228 reg->id, reg->off, reg->range); 229 else if (t == UNKNOWN_VALUE && reg->imm) 230 verbose("%lld", reg->imm); 231 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || 232 t == PTR_TO_MAP_VALUE_OR_NULL || 233 t == PTR_TO_MAP_VALUE_ADJ) 234 verbose("(ks=%d,vs=%d,id=%u)", 235 reg->map_ptr->key_size, 236 reg->map_ptr->value_size, 237 reg->id); 238 if (reg->min_value != BPF_REGISTER_MIN_RANGE) 239 verbose(",min_value=%lld", 240 (long long)reg->min_value); 241 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 242 verbose(",max_value=%llu", 243 (unsigned long long)reg->max_value); 244 if (reg->min_align) 245 verbose(",min_align=%u", reg->min_align); 246 if (reg->aux_off) 247 verbose(",aux_off=%u", reg->aux_off); 248 if (reg->aux_off_align) 249 verbose(",aux_off_align=%u", reg->aux_off_align); 250 } 251 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 252 if (state->stack_slot_type[i] == STACK_SPILL) 253 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 254 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); 255 } 256 verbose("\n"); 257 } 258 259 static const char *const bpf_class_string[] = { 260 [BPF_LD] = "ld", 261 [BPF_LDX] = "ldx", 262 [BPF_ST] = "st", 263 [BPF_STX] = "stx", 264 [BPF_ALU] = "alu", 265 [BPF_JMP] = "jmp", 266 [BPF_RET] = "BUG", 267 [BPF_ALU64] = "alu64", 268 }; 269 270 static const char *const bpf_alu_string[16] = { 271 [BPF_ADD >> 4] = "+=", 272 [BPF_SUB >> 4] = "-=", 273 [BPF_MUL >> 4] = "*=", 274 [BPF_DIV >> 4] = "/=", 275 [BPF_OR >> 4] = "|=", 276 [BPF_AND >> 4] = "&=", 277 [BPF_LSH >> 4] = "<<=", 278 [BPF_RSH >> 4] = ">>=", 279 [BPF_NEG >> 4] = "neg", 280 [BPF_MOD >> 4] = "%=", 281 [BPF_XOR >> 4] = "^=", 282 [BPF_MOV >> 4] = "=", 283 [BPF_ARSH >> 4] = "s>>=", 284 [BPF_END >> 4] = "endian", 285 }; 286 287 static const char *const bpf_ldst_string[] = { 288 [BPF_W >> 3] = "u32", 289 [BPF_H >> 3] = "u16", 290 [BPF_B >> 3] = "u8", 291 [BPF_DW >> 3] = "u64", 292 }; 293 294 static const char *const bpf_jmp_string[16] = { 295 [BPF_JA >> 4] = "jmp", 296 [BPF_JEQ >> 4] = "==", 297 [BPF_JGT >> 4] = ">", 298 [BPF_JGE >> 4] = ">=", 299 [BPF_JSET >> 4] = "&", 300 [BPF_JNE >> 4] = "!=", 301 [BPF_JSGT >> 4] = "s>", 302 [BPF_JSGE >> 4] = "s>=", 303 [BPF_CALL >> 4] = "call", 304 [BPF_EXIT >> 4] = "exit", 305 }; 306 307 static void print_bpf_insn(const struct bpf_verifier_env *env, 308 const struct bpf_insn *insn) 309 { 310 u8 class = BPF_CLASS(insn->code); 311 312 if (class == BPF_ALU || class == BPF_ALU64) { 313 if (BPF_SRC(insn->code) == BPF_X) 314 verbose("(%02x) %sr%d %s %sr%d\n", 315 insn->code, class == BPF_ALU ? "(u32) " : "", 316 insn->dst_reg, 317 bpf_alu_string[BPF_OP(insn->code) >> 4], 318 class == BPF_ALU ? "(u32) " : "", 319 insn->src_reg); 320 else 321 verbose("(%02x) %sr%d %s %s%d\n", 322 insn->code, class == BPF_ALU ? "(u32) " : "", 323 insn->dst_reg, 324 bpf_alu_string[BPF_OP(insn->code) >> 4], 325 class == BPF_ALU ? "(u32) " : "", 326 insn->imm); 327 } else if (class == BPF_STX) { 328 if (BPF_MODE(insn->code) == BPF_MEM) 329 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 330 insn->code, 331 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 332 insn->dst_reg, 333 insn->off, insn->src_reg); 334 else if (BPF_MODE(insn->code) == BPF_XADD) 335 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 336 insn->code, 337 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 338 insn->dst_reg, insn->off, 339 insn->src_reg); 340 else 341 verbose("BUG_%02x\n", insn->code); 342 } else if (class == BPF_ST) { 343 if (BPF_MODE(insn->code) != BPF_MEM) { 344 verbose("BUG_st_%02x\n", insn->code); 345 return; 346 } 347 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 348 insn->code, 349 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 350 insn->dst_reg, 351 insn->off, insn->imm); 352 } else if (class == BPF_LDX) { 353 if (BPF_MODE(insn->code) != BPF_MEM) { 354 verbose("BUG_ldx_%02x\n", insn->code); 355 return; 356 } 357 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 358 insn->code, insn->dst_reg, 359 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 360 insn->src_reg, insn->off); 361 } else if (class == BPF_LD) { 362 if (BPF_MODE(insn->code) == BPF_ABS) { 363 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 364 insn->code, 365 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 366 insn->imm); 367 } else if (BPF_MODE(insn->code) == BPF_IND) { 368 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 369 insn->code, 370 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 371 insn->src_reg, insn->imm); 372 } else if (BPF_MODE(insn->code) == BPF_IMM && 373 BPF_SIZE(insn->code) == BPF_DW) { 374 /* At this point, we already made sure that the second 375 * part of the ldimm64 insn is accessible. 376 */ 377 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 378 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; 379 380 if (map_ptr && !env->allow_ptr_leaks) 381 imm = 0; 382 383 verbose("(%02x) r%d = 0x%llx\n", insn->code, 384 insn->dst_reg, (unsigned long long)imm); 385 } else { 386 verbose("BUG_ld_%02x\n", insn->code); 387 return; 388 } 389 } else if (class == BPF_JMP) { 390 u8 opcode = BPF_OP(insn->code); 391 392 if (opcode == BPF_CALL) { 393 verbose("(%02x) call %s#%d\n", insn->code, 394 func_id_name(insn->imm), insn->imm); 395 } else if (insn->code == (BPF_JMP | BPF_JA)) { 396 verbose("(%02x) goto pc%+d\n", 397 insn->code, insn->off); 398 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 399 verbose("(%02x) exit\n", insn->code); 400 } else if (BPF_SRC(insn->code) == BPF_X) { 401 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 402 insn->code, insn->dst_reg, 403 bpf_jmp_string[BPF_OP(insn->code) >> 4], 404 insn->src_reg, insn->off); 405 } else { 406 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 407 insn->code, insn->dst_reg, 408 bpf_jmp_string[BPF_OP(insn->code) >> 4], 409 insn->imm, insn->off); 410 } 411 } else { 412 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 413 } 414 } 415 416 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx) 417 { 418 struct bpf_verifier_stack_elem *elem; 419 int insn_idx; 420 421 if (env->head == NULL) 422 return -1; 423 424 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 425 insn_idx = env->head->insn_idx; 426 if (prev_insn_idx) 427 *prev_insn_idx = env->head->prev_insn_idx; 428 elem = env->head->next; 429 kfree(env->head); 430 env->head = elem; 431 env->stack_size--; 432 return insn_idx; 433 } 434 435 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 436 int insn_idx, int prev_insn_idx) 437 { 438 struct bpf_verifier_stack_elem *elem; 439 440 elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 441 if (!elem) 442 goto err; 443 444 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 445 elem->insn_idx = insn_idx; 446 elem->prev_insn_idx = prev_insn_idx; 447 elem->next = env->head; 448 env->head = elem; 449 env->stack_size++; 450 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 451 verbose("BPF program is too complex\n"); 452 goto err; 453 } 454 return &elem->st; 455 err: 456 /* pop all elements and return */ 457 while (pop_stack(env, NULL) >= 0); 458 return NULL; 459 } 460 461 #define CALLER_SAVED_REGS 6 462 static const int caller_saved[CALLER_SAVED_REGS] = { 463 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 464 }; 465 466 static void init_reg_state(struct bpf_reg_state *regs) 467 { 468 int i; 469 470 for (i = 0; i < MAX_BPF_REG; i++) { 471 regs[i].type = NOT_INIT; 472 regs[i].imm = 0; 473 regs[i].min_value = BPF_REGISTER_MIN_RANGE; 474 regs[i].max_value = BPF_REGISTER_MAX_RANGE; 475 regs[i].min_align = 0; 476 regs[i].aux_off = 0; 477 regs[i].aux_off_align = 0; 478 } 479 480 /* frame pointer */ 481 regs[BPF_REG_FP].type = FRAME_PTR; 482 483 /* 1st arg to a function */ 484 regs[BPF_REG_1].type = PTR_TO_CTX; 485 } 486 487 static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) 488 { 489 regs[regno].type = UNKNOWN_VALUE; 490 regs[regno].id = 0; 491 regs[regno].imm = 0; 492 } 493 494 static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) 495 { 496 BUG_ON(regno >= MAX_BPF_REG); 497 __mark_reg_unknown_value(regs, regno); 498 } 499 500 static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) 501 { 502 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 503 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 504 regs[regno].min_align = 0; 505 } 506 507 static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, 508 u32 regno) 509 { 510 mark_reg_unknown_value(regs, regno); 511 reset_reg_range_values(regs, regno); 512 } 513 514 enum reg_arg_type { 515 SRC_OP, /* register is used as source operand */ 516 DST_OP, /* register is used as destination operand */ 517 DST_OP_NO_MARK /* same as above, check only, don't mark */ 518 }; 519 520 static int check_reg_arg(struct bpf_reg_state *regs, u32 regno, 521 enum reg_arg_type t) 522 { 523 if (regno >= MAX_BPF_REG) { 524 verbose("R%d is invalid\n", regno); 525 return -EINVAL; 526 } 527 528 if (t == SRC_OP) { 529 /* check whether register used as source operand can be read */ 530 if (regs[regno].type == NOT_INIT) { 531 verbose("R%d !read_ok\n", regno); 532 return -EACCES; 533 } 534 } else { 535 /* check whether register used as dest operand can be written to */ 536 if (regno == BPF_REG_FP) { 537 verbose("frame pointer is read only\n"); 538 return -EACCES; 539 } 540 if (t == DST_OP) 541 mark_reg_unknown_value(regs, regno); 542 } 543 return 0; 544 } 545 546 static int bpf_size_to_bytes(int bpf_size) 547 { 548 if (bpf_size == BPF_W) 549 return 4; 550 else if (bpf_size == BPF_H) 551 return 2; 552 else if (bpf_size == BPF_B) 553 return 1; 554 else if (bpf_size == BPF_DW) 555 return 8; 556 else 557 return -EINVAL; 558 } 559 560 static bool is_spillable_regtype(enum bpf_reg_type type) 561 { 562 switch (type) { 563 case PTR_TO_MAP_VALUE: 564 case PTR_TO_MAP_VALUE_OR_NULL: 565 case PTR_TO_MAP_VALUE_ADJ: 566 case PTR_TO_STACK: 567 case PTR_TO_CTX: 568 case PTR_TO_PACKET: 569 case PTR_TO_PACKET_END: 570 case FRAME_PTR: 571 case CONST_PTR_TO_MAP: 572 return true; 573 default: 574 return false; 575 } 576 } 577 578 /* check_stack_read/write functions track spill/fill of registers, 579 * stack boundary and alignment are checked in check_mem_access() 580 */ 581 static int check_stack_write(struct bpf_verifier_state *state, int off, 582 int size, int value_regno) 583 { 584 int i; 585 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 586 * so it's aligned access and [off, off + size) are within stack limits 587 */ 588 589 if (value_regno >= 0 && 590 is_spillable_regtype(state->regs[value_regno].type)) { 591 592 /* register containing pointer is being spilled into stack */ 593 if (size != BPF_REG_SIZE) { 594 verbose("invalid size of register spill\n"); 595 return -EACCES; 596 } 597 598 /* save register state */ 599 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 600 state->regs[value_regno]; 601 602 for (i = 0; i < BPF_REG_SIZE; i++) 603 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 604 } else { 605 /* regular write of data into stack */ 606 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 607 (struct bpf_reg_state) {}; 608 609 for (i = 0; i < size; i++) 610 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 611 } 612 return 0; 613 } 614 615 static int check_stack_read(struct bpf_verifier_state *state, int off, int size, 616 int value_regno) 617 { 618 u8 *slot_type; 619 int i; 620 621 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 622 623 if (slot_type[0] == STACK_SPILL) { 624 if (size != BPF_REG_SIZE) { 625 verbose("invalid size of register spill\n"); 626 return -EACCES; 627 } 628 for (i = 1; i < BPF_REG_SIZE; i++) { 629 if (slot_type[i] != STACK_SPILL) { 630 verbose("corrupted spill memory\n"); 631 return -EACCES; 632 } 633 } 634 635 if (value_regno >= 0) 636 /* restore register state from stack */ 637 state->regs[value_regno] = 638 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; 639 return 0; 640 } else { 641 for (i = 0; i < size; i++) { 642 if (slot_type[i] != STACK_MISC) { 643 verbose("invalid read from stack off %d+%d size %d\n", 644 off, i, size); 645 return -EACCES; 646 } 647 } 648 if (value_regno >= 0) 649 /* have read misc data from the stack */ 650 mark_reg_unknown_value_and_range(state->regs, 651 value_regno); 652 return 0; 653 } 654 } 655 656 /* check read/write into map element returned by bpf_map_lookup_elem() */ 657 static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 658 int size) 659 { 660 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 661 662 if (off < 0 || size <= 0 || off + size > map->value_size) { 663 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 664 map->value_size, off, size); 665 return -EACCES; 666 } 667 return 0; 668 } 669 670 /* check read/write into an adjusted map element */ 671 static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno, 672 int off, int size) 673 { 674 struct bpf_verifier_state *state = &env->cur_state; 675 struct bpf_reg_state *reg = &state->regs[regno]; 676 int err; 677 678 /* We adjusted the register to this map value, so we 679 * need to change off and size to min_value and max_value 680 * respectively to make sure our theoretical access will be 681 * safe. 682 */ 683 if (log_level) 684 print_verifier_state(state); 685 env->varlen_map_value_access = true; 686 /* The minimum value is only important with signed 687 * comparisons where we can't assume the floor of a 688 * value is 0. If we are using signed variables for our 689 * index'es we need to make sure that whatever we use 690 * will have a set floor within our range. 691 */ 692 if (reg->min_value < 0) { 693 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 694 regno); 695 return -EACCES; 696 } 697 err = check_map_access(env, regno, reg->min_value + off, size); 698 if (err) { 699 verbose("R%d min value is outside of the array range\n", 700 regno); 701 return err; 702 } 703 704 /* If we haven't set a max value then we need to bail 705 * since we can't be sure we won't do bad things. 706 */ 707 if (reg->max_value == BPF_REGISTER_MAX_RANGE) { 708 verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", 709 regno); 710 return -EACCES; 711 } 712 return check_map_access(env, regno, reg->max_value + off, size); 713 } 714 715 #define MAX_PACKET_OFF 0xffff 716 717 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 718 const struct bpf_call_arg_meta *meta, 719 enum bpf_access_type t) 720 { 721 switch (env->prog->type) { 722 case BPF_PROG_TYPE_LWT_IN: 723 case BPF_PROG_TYPE_LWT_OUT: 724 /* dst_input() and dst_output() can't write for now */ 725 if (t == BPF_WRITE) 726 return false; 727 /* fallthrough */ 728 case BPF_PROG_TYPE_SCHED_CLS: 729 case BPF_PROG_TYPE_SCHED_ACT: 730 case BPF_PROG_TYPE_XDP: 731 case BPF_PROG_TYPE_LWT_XMIT: 732 if (meta) 733 return meta->pkt_access; 734 735 env->seen_direct_write = true; 736 return true; 737 default: 738 return false; 739 } 740 } 741 742 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 743 int size) 744 { 745 struct bpf_reg_state *regs = env->cur_state.regs; 746 struct bpf_reg_state *reg = ®s[regno]; 747 748 off += reg->off; 749 if (off < 0 || size <= 0 || off + size > reg->range) { 750 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 751 off, size, regno, reg->id, reg->off, reg->range); 752 return -EACCES; 753 } 754 return 0; 755 } 756 757 /* check access to 'struct bpf_context' fields */ 758 static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, 759 enum bpf_access_type t, enum bpf_reg_type *reg_type) 760 { 761 /* for analyzer ctx accesses are already validated and converted */ 762 if (env->analyzer_ops) 763 return 0; 764 765 if (env->prog->aux->ops->is_valid_access && 766 env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { 767 /* remember the offset of last byte accessed in ctx */ 768 if (env->prog->aux->max_ctx_offset < off + size) 769 env->prog->aux->max_ctx_offset = off + size; 770 return 0; 771 } 772 773 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 774 return -EACCES; 775 } 776 777 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 778 { 779 if (env->allow_ptr_leaks) 780 return false; 781 782 switch (env->cur_state.regs[regno].type) { 783 case UNKNOWN_VALUE: 784 case CONST_IMM: 785 return false; 786 default: 787 return true; 788 } 789 } 790 791 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, 792 int off, int size, bool strict) 793 { 794 int ip_align; 795 int reg_off; 796 797 /* Byte size accesses are always allowed. */ 798 if (!strict || size == 1) 799 return 0; 800 801 reg_off = reg->off; 802 if (reg->id) { 803 if (reg->aux_off_align % size) { 804 verbose("Packet access is only %u byte aligned, %d byte access not allowed\n", 805 reg->aux_off_align, size); 806 return -EACCES; 807 } 808 reg_off += reg->aux_off; 809 } 810 811 /* skb->data is NET_IP_ALIGN-ed, but for strict alignment checking 812 * we force this to 2 which is universally what architectures use 813 * when they don't set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. 814 */ 815 ip_align = strict ? 2 : NET_IP_ALIGN; 816 if ((ip_align + reg_off + off) % size != 0) { 817 verbose("misaligned packet access off %d+%d+%d size %d\n", 818 ip_align, reg_off, off, size); 819 return -EACCES; 820 } 821 822 return 0; 823 } 824 825 static int check_val_ptr_alignment(const struct bpf_reg_state *reg, 826 int size, bool strict) 827 { 828 if (strict && size != 1) { 829 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); 830 return -EACCES; 831 } 832 833 return 0; 834 } 835 836 static int check_ptr_alignment(struct bpf_verifier_env *env, 837 const struct bpf_reg_state *reg, 838 int off, int size) 839 { 840 bool strict = env->strict_alignment; 841 842 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 843 strict = true; 844 845 switch (reg->type) { 846 case PTR_TO_PACKET: 847 return check_pkt_ptr_alignment(reg, off, size, strict); 848 case PTR_TO_MAP_VALUE_ADJ: 849 return check_val_ptr_alignment(reg, size, strict); 850 default: 851 if (off % size != 0) { 852 verbose("misaligned access off %d size %d\n", 853 off, size); 854 return -EACCES; 855 } 856 857 return 0; 858 } 859 } 860 861 /* check whether memory at (regno + off) is accessible for t = (read | write) 862 * if t==write, value_regno is a register which value is stored into memory 863 * if t==read, value_regno is a register which will receive the value from memory 864 * if t==write && value_regno==-1, some unknown value is stored into memory 865 * if t==read && value_regno==-1, don't care what we read from memory 866 */ 867 static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, 868 int bpf_size, enum bpf_access_type t, 869 int value_regno) 870 { 871 struct bpf_verifier_state *state = &env->cur_state; 872 struct bpf_reg_state *reg = &state->regs[regno]; 873 int size, err = 0; 874 875 if (reg->type == PTR_TO_STACK) 876 off += reg->imm; 877 878 size = bpf_size_to_bytes(bpf_size); 879 if (size < 0) 880 return size; 881 882 err = check_ptr_alignment(env, reg, off, size); 883 if (err) 884 return err; 885 886 if (reg->type == PTR_TO_MAP_VALUE || 887 reg->type == PTR_TO_MAP_VALUE_ADJ) { 888 if (t == BPF_WRITE && value_regno >= 0 && 889 is_pointer_value(env, value_regno)) { 890 verbose("R%d leaks addr into map\n", value_regno); 891 return -EACCES; 892 } 893 894 if (reg->type == PTR_TO_MAP_VALUE_ADJ) 895 err = check_map_access_adj(env, regno, off, size); 896 else 897 err = check_map_access(env, regno, off, size); 898 if (!err && t == BPF_READ && value_regno >= 0) 899 mark_reg_unknown_value_and_range(state->regs, 900 value_regno); 901 902 } else if (reg->type == PTR_TO_CTX) { 903 enum bpf_reg_type reg_type = UNKNOWN_VALUE; 904 905 if (t == BPF_WRITE && value_regno >= 0 && 906 is_pointer_value(env, value_regno)) { 907 verbose("R%d leaks addr into ctx\n", value_regno); 908 return -EACCES; 909 } 910 err = check_ctx_access(env, off, size, t, ®_type); 911 if (!err && t == BPF_READ && value_regno >= 0) { 912 mark_reg_unknown_value_and_range(state->regs, 913 value_regno); 914 /* note that reg.[id|off|range] == 0 */ 915 state->regs[value_regno].type = reg_type; 916 state->regs[value_regno].aux_off = 0; 917 state->regs[value_regno].aux_off_align = 0; 918 } 919 920 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 921 if (off >= 0 || off < -MAX_BPF_STACK) { 922 verbose("invalid stack off=%d size=%d\n", off, size); 923 return -EACCES; 924 } 925 if (t == BPF_WRITE) { 926 if (!env->allow_ptr_leaks && 927 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 928 size != BPF_REG_SIZE) { 929 verbose("attempt to corrupt spilled pointer on stack\n"); 930 return -EACCES; 931 } 932 err = check_stack_write(state, off, size, value_regno); 933 } else { 934 err = check_stack_read(state, off, size, value_regno); 935 } 936 } else if (state->regs[regno].type == PTR_TO_PACKET) { 937 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 938 verbose("cannot write into packet\n"); 939 return -EACCES; 940 } 941 if (t == BPF_WRITE && value_regno >= 0 && 942 is_pointer_value(env, value_regno)) { 943 verbose("R%d leaks addr into packet\n", value_regno); 944 return -EACCES; 945 } 946 err = check_packet_access(env, regno, off, size); 947 if (!err && t == BPF_READ && value_regno >= 0) 948 mark_reg_unknown_value_and_range(state->regs, 949 value_regno); 950 } else { 951 verbose("R%d invalid mem access '%s'\n", 952 regno, reg_type_str[reg->type]); 953 return -EACCES; 954 } 955 956 if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && 957 state->regs[value_regno].type == UNKNOWN_VALUE) { 958 /* 1 or 2 byte load zero-extends, determine the number of 959 * zero upper bits. Not doing it fo 4 byte load, since 960 * such values cannot be added to ptr_to_packet anyway. 961 */ 962 state->regs[value_regno].imm = 64 - size * 8; 963 } 964 return err; 965 } 966 967 static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) 968 { 969 struct bpf_reg_state *regs = env->cur_state.regs; 970 int err; 971 972 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 973 insn->imm != 0) { 974 verbose("BPF_XADD uses reserved fields\n"); 975 return -EINVAL; 976 } 977 978 /* check src1 operand */ 979 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 980 if (err) 981 return err; 982 983 /* check src2 operand */ 984 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 985 if (err) 986 return err; 987 988 /* check whether atomic_add can read the memory */ 989 err = check_mem_access(env, insn->dst_reg, insn->off, 990 BPF_SIZE(insn->code), BPF_READ, -1); 991 if (err) 992 return err; 993 994 /* check whether atomic_add can write into the same memory */ 995 return check_mem_access(env, insn->dst_reg, insn->off, 996 BPF_SIZE(insn->code), BPF_WRITE, -1); 997 } 998 999 /* when register 'regno' is passed into function that will read 'access_size' 1000 * bytes from that pointer, make sure that it's within stack boundary 1001 * and all elements of stack are initialized 1002 */ 1003 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 1004 int access_size, bool zero_size_allowed, 1005 struct bpf_call_arg_meta *meta) 1006 { 1007 struct bpf_verifier_state *state = &env->cur_state; 1008 struct bpf_reg_state *regs = state->regs; 1009 int off, i; 1010 1011 if (regs[regno].type != PTR_TO_STACK) { 1012 if (zero_size_allowed && access_size == 0 && 1013 regs[regno].type == CONST_IMM && 1014 regs[regno].imm == 0) 1015 return 0; 1016 1017 verbose("R%d type=%s expected=%s\n", regno, 1018 reg_type_str[regs[regno].type], 1019 reg_type_str[PTR_TO_STACK]); 1020 return -EACCES; 1021 } 1022 1023 off = regs[regno].imm; 1024 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1025 access_size <= 0) { 1026 verbose("invalid stack type R%d off=%d access_size=%d\n", 1027 regno, off, access_size); 1028 return -EACCES; 1029 } 1030 1031 if (meta && meta->raw_mode) { 1032 meta->access_size = access_size; 1033 meta->regno = regno; 1034 return 0; 1035 } 1036 1037 for (i = 0; i < access_size; i++) { 1038 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 1039 verbose("invalid indirect read from stack off %d+%d size %d\n", 1040 off, i, access_size); 1041 return -EACCES; 1042 } 1043 } 1044 return 0; 1045 } 1046 1047 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 1048 int access_size, bool zero_size_allowed, 1049 struct bpf_call_arg_meta *meta) 1050 { 1051 struct bpf_reg_state *regs = env->cur_state.regs; 1052 1053 switch (regs[regno].type) { 1054 case PTR_TO_PACKET: 1055 return check_packet_access(env, regno, 0, access_size); 1056 case PTR_TO_MAP_VALUE: 1057 return check_map_access(env, regno, 0, access_size); 1058 case PTR_TO_MAP_VALUE_ADJ: 1059 return check_map_access_adj(env, regno, 0, access_size); 1060 default: /* const_imm|ptr_to_stack or invalid ptr */ 1061 return check_stack_boundary(env, regno, access_size, 1062 zero_size_allowed, meta); 1063 } 1064 } 1065 1066 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 1067 enum bpf_arg_type arg_type, 1068 struct bpf_call_arg_meta *meta) 1069 { 1070 struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; 1071 enum bpf_reg_type expected_type, type = reg->type; 1072 int err = 0; 1073 1074 if (arg_type == ARG_DONTCARE) 1075 return 0; 1076 1077 if (type == NOT_INIT) { 1078 verbose("R%d !read_ok\n", regno); 1079 return -EACCES; 1080 } 1081 1082 if (arg_type == ARG_ANYTHING) { 1083 if (is_pointer_value(env, regno)) { 1084 verbose("R%d leaks addr into helper function\n", regno); 1085 return -EACCES; 1086 } 1087 return 0; 1088 } 1089 1090 if (type == PTR_TO_PACKET && 1091 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 1092 verbose("helper access to the packet is not allowed\n"); 1093 return -EACCES; 1094 } 1095 1096 if (arg_type == ARG_PTR_TO_MAP_KEY || 1097 arg_type == ARG_PTR_TO_MAP_VALUE) { 1098 expected_type = PTR_TO_STACK; 1099 if (type != PTR_TO_PACKET && type != expected_type) 1100 goto err_type; 1101 } else if (arg_type == ARG_CONST_SIZE || 1102 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1103 expected_type = CONST_IMM; 1104 /* One exception. Allow UNKNOWN_VALUE registers when the 1105 * boundaries are known and don't cause unsafe memory accesses 1106 */ 1107 if (type != UNKNOWN_VALUE && type != expected_type) 1108 goto err_type; 1109 } else if (arg_type == ARG_CONST_MAP_PTR) { 1110 expected_type = CONST_PTR_TO_MAP; 1111 if (type != expected_type) 1112 goto err_type; 1113 } else if (arg_type == ARG_PTR_TO_CTX) { 1114 expected_type = PTR_TO_CTX; 1115 if (type != expected_type) 1116 goto err_type; 1117 } else if (arg_type == ARG_PTR_TO_MEM || 1118 arg_type == ARG_PTR_TO_UNINIT_MEM) { 1119 expected_type = PTR_TO_STACK; 1120 /* One exception here. In case function allows for NULL to be 1121 * passed in as argument, it's a CONST_IMM type. Final test 1122 * happens during stack boundary checking. 1123 */ 1124 if (type == CONST_IMM && reg->imm == 0) 1125 /* final test in check_stack_boundary() */; 1126 else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE && 1127 type != PTR_TO_MAP_VALUE_ADJ && type != expected_type) 1128 goto err_type; 1129 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 1130 } else { 1131 verbose("unsupported arg_type %d\n", arg_type); 1132 return -EFAULT; 1133 } 1134 1135 if (arg_type == ARG_CONST_MAP_PTR) { 1136 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 1137 meta->map_ptr = reg->map_ptr; 1138 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 1139 /* bpf_map_xxx(..., map_ptr, ..., key) call: 1140 * check that [key, key + map->key_size) are within 1141 * stack limits and initialized 1142 */ 1143 if (!meta->map_ptr) { 1144 /* in function declaration map_ptr must come before 1145 * map_key, so that it's verified and known before 1146 * we have to check map_key here. Otherwise it means 1147 * that kernel subsystem misconfigured verifier 1148 */ 1149 verbose("invalid map_ptr to access map->key\n"); 1150 return -EACCES; 1151 } 1152 if (type == PTR_TO_PACKET) 1153 err = check_packet_access(env, regno, 0, 1154 meta->map_ptr->key_size); 1155 else 1156 err = check_stack_boundary(env, regno, 1157 meta->map_ptr->key_size, 1158 false, NULL); 1159 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1160 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1161 * check [value, value + map->value_size) validity 1162 */ 1163 if (!meta->map_ptr) { 1164 /* kernel subsystem misconfigured verifier */ 1165 verbose("invalid map_ptr to access map->value\n"); 1166 return -EACCES; 1167 } 1168 if (type == PTR_TO_PACKET) 1169 err = check_packet_access(env, regno, 0, 1170 meta->map_ptr->value_size); 1171 else 1172 err = check_stack_boundary(env, regno, 1173 meta->map_ptr->value_size, 1174 false, NULL); 1175 } else if (arg_type == ARG_CONST_SIZE || 1176 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1177 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 1178 1179 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1180 * from stack pointer 'buf'. Check it 1181 * note: regno == len, regno - 1 == buf 1182 */ 1183 if (regno == 0) { 1184 /* kernel subsystem misconfigured verifier */ 1185 verbose("ARG_CONST_SIZE cannot be first argument\n"); 1186 return -EACCES; 1187 } 1188 1189 /* If the register is UNKNOWN_VALUE, the access check happens 1190 * using its boundaries. Otherwise, just use its imm 1191 */ 1192 if (type == UNKNOWN_VALUE) { 1193 /* For unprivileged variable accesses, disable raw 1194 * mode so that the program is required to 1195 * initialize all the memory that the helper could 1196 * just partially fill up. 1197 */ 1198 meta = NULL; 1199 1200 if (reg->min_value < 0) { 1201 verbose("R%d min value is negative, either use unsigned or 'var &= const'\n", 1202 regno); 1203 return -EACCES; 1204 } 1205 1206 if (reg->min_value == 0) { 1207 err = check_helper_mem_access(env, regno - 1, 0, 1208 zero_size_allowed, 1209 meta); 1210 if (err) 1211 return err; 1212 } 1213 1214 if (reg->max_value == BPF_REGISTER_MAX_RANGE) { 1215 verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 1216 regno); 1217 return -EACCES; 1218 } 1219 err = check_helper_mem_access(env, regno - 1, 1220 reg->max_value, 1221 zero_size_allowed, meta); 1222 if (err) 1223 return err; 1224 } else { 1225 /* register is CONST_IMM */ 1226 err = check_helper_mem_access(env, regno - 1, reg->imm, 1227 zero_size_allowed, meta); 1228 } 1229 } 1230 1231 return err; 1232 err_type: 1233 verbose("R%d type=%s expected=%s\n", regno, 1234 reg_type_str[type], reg_type_str[expected_type]); 1235 return -EACCES; 1236 } 1237 1238 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 1239 { 1240 if (!map) 1241 return 0; 1242 1243 /* We need a two way check, first is from map perspective ... */ 1244 switch (map->map_type) { 1245 case BPF_MAP_TYPE_PROG_ARRAY: 1246 if (func_id != BPF_FUNC_tail_call) 1247 goto error; 1248 break; 1249 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1250 if (func_id != BPF_FUNC_perf_event_read && 1251 func_id != BPF_FUNC_perf_event_output) 1252 goto error; 1253 break; 1254 case BPF_MAP_TYPE_STACK_TRACE: 1255 if (func_id != BPF_FUNC_get_stackid) 1256 goto error; 1257 break; 1258 case BPF_MAP_TYPE_CGROUP_ARRAY: 1259 if (func_id != BPF_FUNC_skb_under_cgroup && 1260 func_id != BPF_FUNC_current_task_under_cgroup) 1261 goto error; 1262 break; 1263 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1264 case BPF_MAP_TYPE_HASH_OF_MAPS: 1265 if (func_id != BPF_FUNC_map_lookup_elem) 1266 goto error; 1267 default: 1268 break; 1269 } 1270 1271 /* ... and second from the function itself. */ 1272 switch (func_id) { 1273 case BPF_FUNC_tail_call: 1274 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1275 goto error; 1276 break; 1277 case BPF_FUNC_perf_event_read: 1278 case BPF_FUNC_perf_event_output: 1279 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1280 goto error; 1281 break; 1282 case BPF_FUNC_get_stackid: 1283 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1284 goto error; 1285 break; 1286 case BPF_FUNC_current_task_under_cgroup: 1287 case BPF_FUNC_skb_under_cgroup: 1288 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1289 goto error; 1290 break; 1291 default: 1292 break; 1293 } 1294 1295 return 0; 1296 error: 1297 verbose("cannot pass map_type %d into func %s#%d\n", 1298 map->map_type, func_id_name(func_id), func_id); 1299 return -EINVAL; 1300 } 1301 1302 static int check_raw_mode(const struct bpf_func_proto *fn) 1303 { 1304 int count = 0; 1305 1306 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 1307 count++; 1308 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 1309 count++; 1310 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 1311 count++; 1312 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 1313 count++; 1314 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 1315 count++; 1316 1317 return count > 1 ? -EINVAL : 0; 1318 } 1319 1320 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 1321 { 1322 struct bpf_verifier_state *state = &env->cur_state; 1323 struct bpf_reg_state *regs = state->regs, *reg; 1324 int i; 1325 1326 for (i = 0; i < MAX_BPF_REG; i++) 1327 if (regs[i].type == PTR_TO_PACKET || 1328 regs[i].type == PTR_TO_PACKET_END) 1329 mark_reg_unknown_value(regs, i); 1330 1331 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1332 if (state->stack_slot_type[i] != STACK_SPILL) 1333 continue; 1334 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1335 if (reg->type != PTR_TO_PACKET && 1336 reg->type != PTR_TO_PACKET_END) 1337 continue; 1338 reg->type = UNKNOWN_VALUE; 1339 reg->imm = 0; 1340 } 1341 } 1342 1343 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 1344 { 1345 struct bpf_verifier_state *state = &env->cur_state; 1346 const struct bpf_func_proto *fn = NULL; 1347 struct bpf_reg_state *regs = state->regs; 1348 struct bpf_reg_state *reg; 1349 struct bpf_call_arg_meta meta; 1350 bool changes_data; 1351 int i, err; 1352 1353 /* find function prototype */ 1354 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1355 verbose("invalid func %s#%d\n", func_id_name(func_id), func_id); 1356 return -EINVAL; 1357 } 1358 1359 if (env->prog->aux->ops->get_func_proto) 1360 fn = env->prog->aux->ops->get_func_proto(func_id); 1361 1362 if (!fn) { 1363 verbose("unknown func %s#%d\n", func_id_name(func_id), func_id); 1364 return -EINVAL; 1365 } 1366 1367 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1368 if (!env->prog->gpl_compatible && fn->gpl_only) { 1369 verbose("cannot call GPL only function from proprietary program\n"); 1370 return -EINVAL; 1371 } 1372 1373 changes_data = bpf_helper_changes_pkt_data(fn->func); 1374 1375 memset(&meta, 0, sizeof(meta)); 1376 meta.pkt_access = fn->pkt_access; 1377 1378 /* We only support one arg being in raw mode at the moment, which 1379 * is sufficient for the helper functions we have right now. 1380 */ 1381 err = check_raw_mode(fn); 1382 if (err) { 1383 verbose("kernel subsystem misconfigured func %s#%d\n", 1384 func_id_name(func_id), func_id); 1385 return err; 1386 } 1387 1388 /* check args */ 1389 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1390 if (err) 1391 return err; 1392 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1393 if (err) 1394 return err; 1395 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1396 if (err) 1397 return err; 1398 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1399 if (err) 1400 return err; 1401 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1402 if (err) 1403 return err; 1404 1405 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1406 * is inferred from register state. 1407 */ 1408 for (i = 0; i < meta.access_size; i++) { 1409 err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); 1410 if (err) 1411 return err; 1412 } 1413 1414 /* reset caller saved regs */ 1415 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1416 reg = regs + caller_saved[i]; 1417 reg->type = NOT_INIT; 1418 reg->imm = 0; 1419 } 1420 1421 /* update return register */ 1422 if (fn->ret_type == RET_INTEGER) { 1423 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1424 } else if (fn->ret_type == RET_VOID) { 1425 regs[BPF_REG_0].type = NOT_INIT; 1426 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1427 struct bpf_insn_aux_data *insn_aux; 1428 1429 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1430 regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0; 1431 /* remember map_ptr, so that check_map_access() 1432 * can check 'value_size' boundary of memory access 1433 * to map element returned from bpf_map_lookup_elem() 1434 */ 1435 if (meta.map_ptr == NULL) { 1436 verbose("kernel subsystem misconfigured verifier\n"); 1437 return -EINVAL; 1438 } 1439 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1440 regs[BPF_REG_0].id = ++env->id_gen; 1441 insn_aux = &env->insn_aux_data[insn_idx]; 1442 if (!insn_aux->map_ptr) 1443 insn_aux->map_ptr = meta.map_ptr; 1444 else if (insn_aux->map_ptr != meta.map_ptr) 1445 insn_aux->map_ptr = BPF_MAP_PTR_POISON; 1446 } else { 1447 verbose("unknown return type %d of func %s#%d\n", 1448 fn->ret_type, func_id_name(func_id), func_id); 1449 return -EINVAL; 1450 } 1451 1452 err = check_map_func_compatibility(meta.map_ptr, func_id); 1453 if (err) 1454 return err; 1455 1456 if (changes_data) 1457 clear_all_pkt_pointers(env); 1458 return 0; 1459 } 1460 1461 static int check_packet_ptr_add(struct bpf_verifier_env *env, 1462 struct bpf_insn *insn) 1463 { 1464 struct bpf_reg_state *regs = env->cur_state.regs; 1465 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1466 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1467 struct bpf_reg_state tmp_reg; 1468 s32 imm; 1469 1470 if (BPF_SRC(insn->code) == BPF_K) { 1471 /* pkt_ptr += imm */ 1472 imm = insn->imm; 1473 1474 add_imm: 1475 if (imm < 0) { 1476 verbose("addition of negative constant to packet pointer is not allowed\n"); 1477 return -EACCES; 1478 } 1479 if (imm >= MAX_PACKET_OFF || 1480 imm + dst_reg->off >= MAX_PACKET_OFF) { 1481 verbose("constant %d is too large to add to packet pointer\n", 1482 imm); 1483 return -EACCES; 1484 } 1485 /* a constant was added to pkt_ptr. 1486 * Remember it while keeping the same 'id' 1487 */ 1488 dst_reg->off += imm; 1489 } else { 1490 bool had_id; 1491 1492 if (src_reg->type == PTR_TO_PACKET) { 1493 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1494 tmp_reg = *dst_reg; /* save r7 state */ 1495 *dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */ 1496 src_reg = &tmp_reg; /* pretend it's src_reg state */ 1497 /* if the checks below reject it, the copy won't matter, 1498 * since we're rejecting the whole program. If all ok, 1499 * then imm22 state will be added to r7 1500 * and r7 will be pkt(id=0,off=22,r=62) while 1501 * r6 will stay as pkt(id=0,off=0,r=62) 1502 */ 1503 } 1504 1505 if (src_reg->type == CONST_IMM) { 1506 /* pkt_ptr += reg where reg is known constant */ 1507 imm = src_reg->imm; 1508 goto add_imm; 1509 } 1510 /* disallow pkt_ptr += reg 1511 * if reg is not uknown_value with guaranteed zero upper bits 1512 * otherwise pkt_ptr may overflow and addition will become 1513 * subtraction which is not allowed 1514 */ 1515 if (src_reg->type != UNKNOWN_VALUE) { 1516 verbose("cannot add '%s' to ptr_to_packet\n", 1517 reg_type_str[src_reg->type]); 1518 return -EACCES; 1519 } 1520 if (src_reg->imm < 48) { 1521 verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", 1522 src_reg->imm); 1523 return -EACCES; 1524 } 1525 1526 had_id = (dst_reg->id != 0); 1527 1528 /* dst_reg stays as pkt_ptr type and since some positive 1529 * integer value was added to the pointer, increment its 'id' 1530 */ 1531 dst_reg->id = ++env->id_gen; 1532 1533 /* something was added to pkt_ptr, set range to zero */ 1534 dst_reg->aux_off += dst_reg->off; 1535 dst_reg->off = 0; 1536 dst_reg->range = 0; 1537 if (had_id) 1538 dst_reg->aux_off_align = min(dst_reg->aux_off_align, 1539 src_reg->min_align); 1540 else 1541 dst_reg->aux_off_align = src_reg->min_align; 1542 } 1543 return 0; 1544 } 1545 1546 static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) 1547 { 1548 struct bpf_reg_state *regs = env->cur_state.regs; 1549 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1550 u8 opcode = BPF_OP(insn->code); 1551 s64 imm_log2; 1552 1553 /* for type == UNKNOWN_VALUE: 1554 * imm > 0 -> number of zero upper bits 1555 * imm == 0 -> don't track which is the same as all bits can be non-zero 1556 */ 1557 1558 if (BPF_SRC(insn->code) == BPF_X) { 1559 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1560 1561 if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && 1562 dst_reg->imm && opcode == BPF_ADD) { 1563 /* dreg += sreg 1564 * where both have zero upper bits. Adding them 1565 * can only result making one more bit non-zero 1566 * in the larger value. 1567 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) 1568 * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) 1569 */ 1570 dst_reg->imm = min(dst_reg->imm, src_reg->imm); 1571 dst_reg->imm--; 1572 return 0; 1573 } 1574 if (src_reg->type == CONST_IMM && src_reg->imm > 0 && 1575 dst_reg->imm && opcode == BPF_ADD) { 1576 /* dreg += sreg 1577 * where dreg has zero upper bits and sreg is const. 1578 * Adding them can only result making one more bit 1579 * non-zero in the larger value. 1580 */ 1581 imm_log2 = __ilog2_u64((long long)src_reg->imm); 1582 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1583 dst_reg->imm--; 1584 return 0; 1585 } 1586 /* all other cases non supported yet, just mark dst_reg */ 1587 dst_reg->imm = 0; 1588 return 0; 1589 } 1590 1591 /* sign extend 32-bit imm into 64-bit to make sure that 1592 * negative values occupy bit 63. Note ilog2() would have 1593 * been incorrect, since sizeof(insn->imm) == 4 1594 */ 1595 imm_log2 = __ilog2_u64((long long)insn->imm); 1596 1597 if (dst_reg->imm && opcode == BPF_LSH) { 1598 /* reg <<= imm 1599 * if reg was a result of 2 byte load, then its imm == 48 1600 * which means that upper 48 bits are zero and shifting this reg 1601 * left by 4 would mean that upper 44 bits are still zero 1602 */ 1603 dst_reg->imm -= insn->imm; 1604 } else if (dst_reg->imm && opcode == BPF_MUL) { 1605 /* reg *= imm 1606 * if multiplying by 14 subtract 4 1607 * This is conservative calculation of upper zero bits. 1608 * It's not trying to special case insn->imm == 1 or 0 cases 1609 */ 1610 dst_reg->imm -= imm_log2 + 1; 1611 } else if (opcode == BPF_AND) { 1612 /* reg &= imm */ 1613 dst_reg->imm = 63 - imm_log2; 1614 } else if (dst_reg->imm && opcode == BPF_ADD) { 1615 /* reg += imm */ 1616 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1617 dst_reg->imm--; 1618 } else if (opcode == BPF_RSH) { 1619 /* reg >>= imm 1620 * which means that after right shift, upper bits will be zero 1621 * note that verifier already checked that 1622 * 0 <= imm < 64 for shift insn 1623 */ 1624 dst_reg->imm += insn->imm; 1625 if (unlikely(dst_reg->imm > 64)) 1626 /* some dumb code did: 1627 * r2 = *(u32 *)mem; 1628 * r2 >>= 32; 1629 * and all bits are zero now */ 1630 dst_reg->imm = 64; 1631 } else { 1632 /* all other alu ops, means that we don't know what will 1633 * happen to the value, mark it with unknown number of zero bits 1634 */ 1635 dst_reg->imm = 0; 1636 } 1637 1638 if (dst_reg->imm < 0) { 1639 /* all 64 bits of the register can contain non-zero bits 1640 * and such value cannot be added to ptr_to_packet, since it 1641 * may overflow, mark it as unknown to avoid further eval 1642 */ 1643 dst_reg->imm = 0; 1644 } 1645 return 0; 1646 } 1647 1648 static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, 1649 struct bpf_insn *insn) 1650 { 1651 struct bpf_reg_state *regs = env->cur_state.regs; 1652 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1653 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1654 u8 opcode = BPF_OP(insn->code); 1655 u64 dst_imm = dst_reg->imm; 1656 1657 /* dst_reg->type == CONST_IMM here. Simulate execution of insns 1658 * containing ALU ops. Don't care about overflow or negative 1659 * values, just add/sub/... them; registers are in u64. 1660 */ 1661 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) { 1662 dst_imm += insn->imm; 1663 } else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && 1664 src_reg->type == CONST_IMM) { 1665 dst_imm += src_reg->imm; 1666 } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) { 1667 dst_imm -= insn->imm; 1668 } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X && 1669 src_reg->type == CONST_IMM) { 1670 dst_imm -= src_reg->imm; 1671 } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) { 1672 dst_imm *= insn->imm; 1673 } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X && 1674 src_reg->type == CONST_IMM) { 1675 dst_imm *= src_reg->imm; 1676 } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) { 1677 dst_imm |= insn->imm; 1678 } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X && 1679 src_reg->type == CONST_IMM) { 1680 dst_imm |= src_reg->imm; 1681 } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) { 1682 dst_imm &= insn->imm; 1683 } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X && 1684 src_reg->type == CONST_IMM) { 1685 dst_imm &= src_reg->imm; 1686 } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) { 1687 dst_imm >>= insn->imm; 1688 } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X && 1689 src_reg->type == CONST_IMM) { 1690 dst_imm >>= src_reg->imm; 1691 } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) { 1692 dst_imm <<= insn->imm; 1693 } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X && 1694 src_reg->type == CONST_IMM) { 1695 dst_imm <<= src_reg->imm; 1696 } else { 1697 mark_reg_unknown_value(regs, insn->dst_reg); 1698 goto out; 1699 } 1700 1701 dst_reg->imm = dst_imm; 1702 out: 1703 return 0; 1704 } 1705 1706 static void check_reg_overflow(struct bpf_reg_state *reg) 1707 { 1708 if (reg->max_value > BPF_REGISTER_MAX_RANGE) 1709 reg->max_value = BPF_REGISTER_MAX_RANGE; 1710 if (reg->min_value < BPF_REGISTER_MIN_RANGE || 1711 reg->min_value > BPF_REGISTER_MAX_RANGE) 1712 reg->min_value = BPF_REGISTER_MIN_RANGE; 1713 } 1714 1715 static u32 calc_align(u32 imm) 1716 { 1717 if (!imm) 1718 return 1U << 31; 1719 return imm - ((imm - 1) & imm); 1720 } 1721 1722 static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, 1723 struct bpf_insn *insn) 1724 { 1725 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1726 s64 min_val = BPF_REGISTER_MIN_RANGE; 1727 u64 max_val = BPF_REGISTER_MAX_RANGE; 1728 u8 opcode = BPF_OP(insn->code); 1729 u32 dst_align, src_align; 1730 1731 dst_reg = ®s[insn->dst_reg]; 1732 src_align = 0; 1733 if (BPF_SRC(insn->code) == BPF_X) { 1734 check_reg_overflow(®s[insn->src_reg]); 1735 min_val = regs[insn->src_reg].min_value; 1736 max_val = regs[insn->src_reg].max_value; 1737 1738 /* If the source register is a random pointer then the 1739 * min_value/max_value values represent the range of the known 1740 * accesses into that value, not the actual min/max value of the 1741 * register itself. In this case we have to reset the reg range 1742 * values so we know it is not safe to look at. 1743 */ 1744 if (regs[insn->src_reg].type != CONST_IMM && 1745 regs[insn->src_reg].type != UNKNOWN_VALUE) { 1746 min_val = BPF_REGISTER_MIN_RANGE; 1747 max_val = BPF_REGISTER_MAX_RANGE; 1748 src_align = 0; 1749 } else { 1750 src_align = regs[insn->src_reg].min_align; 1751 } 1752 } else if (insn->imm < BPF_REGISTER_MAX_RANGE && 1753 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { 1754 min_val = max_val = insn->imm; 1755 src_align = calc_align(insn->imm); 1756 } 1757 1758 dst_align = dst_reg->min_align; 1759 1760 /* We don't know anything about what was done to this register, mark it 1761 * as unknown. 1762 */ 1763 if (min_val == BPF_REGISTER_MIN_RANGE && 1764 max_val == BPF_REGISTER_MAX_RANGE) { 1765 reset_reg_range_values(regs, insn->dst_reg); 1766 return; 1767 } 1768 1769 /* If one of our values was at the end of our ranges then we can't just 1770 * do our normal operations to the register, we need to set the values 1771 * to the min/max since they are undefined. 1772 */ 1773 if (min_val == BPF_REGISTER_MIN_RANGE) 1774 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1775 if (max_val == BPF_REGISTER_MAX_RANGE) 1776 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1777 1778 switch (opcode) { 1779 case BPF_ADD: 1780 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1781 dst_reg->min_value += min_val; 1782 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1783 dst_reg->max_value += max_val; 1784 dst_reg->min_align = min(src_align, dst_align); 1785 break; 1786 case BPF_SUB: 1787 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1788 dst_reg->min_value -= min_val; 1789 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1790 dst_reg->max_value -= max_val; 1791 dst_reg->min_align = min(src_align, dst_align); 1792 break; 1793 case BPF_MUL: 1794 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1795 dst_reg->min_value *= min_val; 1796 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1797 dst_reg->max_value *= max_val; 1798 dst_reg->min_align = max(src_align, dst_align); 1799 break; 1800 case BPF_AND: 1801 /* Disallow AND'ing of negative numbers, ain't nobody got time 1802 * for that. Otherwise the minimum is 0 and the max is the max 1803 * value we could AND against. 1804 */ 1805 if (min_val < 0) 1806 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1807 else 1808 dst_reg->min_value = 0; 1809 dst_reg->max_value = max_val; 1810 dst_reg->min_align = max(src_align, dst_align); 1811 break; 1812 case BPF_LSH: 1813 /* Gotta have special overflow logic here, if we're shifting 1814 * more than MAX_RANGE then just assume we have an invalid 1815 * range. 1816 */ 1817 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) { 1818 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1819 dst_reg->min_align = 1; 1820 } else { 1821 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1822 dst_reg->min_value <<= min_val; 1823 if (!dst_reg->min_align) 1824 dst_reg->min_align = 1; 1825 dst_reg->min_align <<= min_val; 1826 } 1827 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1828 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1829 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1830 dst_reg->max_value <<= max_val; 1831 break; 1832 case BPF_RSH: 1833 /* RSH by a negative number is undefined, and the BPF_RSH is an 1834 * unsigned shift, so make the appropriate casts. 1835 */ 1836 if (min_val < 0 || dst_reg->min_value < 0) { 1837 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1838 } else { 1839 dst_reg->min_value = 1840 (u64)(dst_reg->min_value) >> min_val; 1841 } 1842 if (min_val < 0) { 1843 dst_reg->min_align = 1; 1844 } else { 1845 dst_reg->min_align >>= (u64) min_val; 1846 if (!dst_reg->min_align) 1847 dst_reg->min_align = 1; 1848 } 1849 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1850 dst_reg->max_value >>= max_val; 1851 break; 1852 default: 1853 reset_reg_range_values(regs, insn->dst_reg); 1854 break; 1855 } 1856 1857 check_reg_overflow(dst_reg); 1858 } 1859 1860 /* check validity of 32-bit and 64-bit arithmetic operations */ 1861 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 1862 { 1863 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1864 u8 opcode = BPF_OP(insn->code); 1865 int err; 1866 1867 if (opcode == BPF_END || opcode == BPF_NEG) { 1868 if (opcode == BPF_NEG) { 1869 if (BPF_SRC(insn->code) != 0 || 1870 insn->src_reg != BPF_REG_0 || 1871 insn->off != 0 || insn->imm != 0) { 1872 verbose("BPF_NEG uses reserved fields\n"); 1873 return -EINVAL; 1874 } 1875 } else { 1876 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 1877 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 1878 verbose("BPF_END uses reserved fields\n"); 1879 return -EINVAL; 1880 } 1881 } 1882 1883 /* check src operand */ 1884 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1885 if (err) 1886 return err; 1887 1888 if (is_pointer_value(env, insn->dst_reg)) { 1889 verbose("R%d pointer arithmetic prohibited\n", 1890 insn->dst_reg); 1891 return -EACCES; 1892 } 1893 1894 /* check dest operand */ 1895 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1896 if (err) 1897 return err; 1898 1899 } else if (opcode == BPF_MOV) { 1900 1901 if (BPF_SRC(insn->code) == BPF_X) { 1902 if (insn->imm != 0 || insn->off != 0) { 1903 verbose("BPF_MOV uses reserved fields\n"); 1904 return -EINVAL; 1905 } 1906 1907 /* check src operand */ 1908 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1909 if (err) 1910 return err; 1911 } else { 1912 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1913 verbose("BPF_MOV uses reserved fields\n"); 1914 return -EINVAL; 1915 } 1916 } 1917 1918 /* check dest operand */ 1919 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1920 if (err) 1921 return err; 1922 1923 /* we are setting our register to something new, we need to 1924 * reset its range values. 1925 */ 1926 reset_reg_range_values(regs, insn->dst_reg); 1927 1928 if (BPF_SRC(insn->code) == BPF_X) { 1929 if (BPF_CLASS(insn->code) == BPF_ALU64) { 1930 /* case: R1 = R2 1931 * copy register state to dest reg 1932 */ 1933 regs[insn->dst_reg] = regs[insn->src_reg]; 1934 } else { 1935 if (is_pointer_value(env, insn->src_reg)) { 1936 verbose("R%d partial copy of pointer\n", 1937 insn->src_reg); 1938 return -EACCES; 1939 } 1940 mark_reg_unknown_value(regs, insn->dst_reg); 1941 } 1942 } else { 1943 /* case: R = imm 1944 * remember the value we stored into this reg 1945 */ 1946 regs[insn->dst_reg].type = CONST_IMM; 1947 regs[insn->dst_reg].imm = insn->imm; 1948 regs[insn->dst_reg].max_value = insn->imm; 1949 regs[insn->dst_reg].min_value = insn->imm; 1950 regs[insn->dst_reg].min_align = calc_align(insn->imm); 1951 } 1952 1953 } else if (opcode > BPF_END) { 1954 verbose("invalid BPF_ALU opcode %x\n", opcode); 1955 return -EINVAL; 1956 1957 } else { /* all other ALU ops: and, sub, xor, add, ... */ 1958 1959 if (BPF_SRC(insn->code) == BPF_X) { 1960 if (insn->imm != 0 || insn->off != 0) { 1961 verbose("BPF_ALU uses reserved fields\n"); 1962 return -EINVAL; 1963 } 1964 /* check src1 operand */ 1965 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1966 if (err) 1967 return err; 1968 } else { 1969 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1970 verbose("BPF_ALU uses reserved fields\n"); 1971 return -EINVAL; 1972 } 1973 } 1974 1975 /* check src2 operand */ 1976 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1977 if (err) 1978 return err; 1979 1980 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 1981 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 1982 verbose("div by zero\n"); 1983 return -EINVAL; 1984 } 1985 1986 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1987 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1988 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1989 1990 if (insn->imm < 0 || insn->imm >= size) { 1991 verbose("invalid shift %d\n", insn->imm); 1992 return -EINVAL; 1993 } 1994 } 1995 1996 /* check dest operand */ 1997 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 1998 if (err) 1999 return err; 2000 2001 dst_reg = ®s[insn->dst_reg]; 2002 2003 /* first we want to adjust our ranges. */ 2004 adjust_reg_min_max_vals(env, insn); 2005 2006 /* pattern match 'bpf_add Rx, imm' instruction */ 2007 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && 2008 dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { 2009 dst_reg->type = PTR_TO_STACK; 2010 dst_reg->imm = insn->imm; 2011 return 0; 2012 } else if (opcode == BPF_ADD && 2013 BPF_CLASS(insn->code) == BPF_ALU64 && 2014 dst_reg->type == PTR_TO_STACK && 2015 ((BPF_SRC(insn->code) == BPF_X && 2016 regs[insn->src_reg].type == CONST_IMM) || 2017 BPF_SRC(insn->code) == BPF_K)) { 2018 if (BPF_SRC(insn->code) == BPF_X) 2019 dst_reg->imm += regs[insn->src_reg].imm; 2020 else 2021 dst_reg->imm += insn->imm; 2022 return 0; 2023 } else if (opcode == BPF_ADD && 2024 BPF_CLASS(insn->code) == BPF_ALU64 && 2025 (dst_reg->type == PTR_TO_PACKET || 2026 (BPF_SRC(insn->code) == BPF_X && 2027 regs[insn->src_reg].type == PTR_TO_PACKET))) { 2028 /* ptr_to_packet += K|X */ 2029 return check_packet_ptr_add(env, insn); 2030 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 2031 dst_reg->type == UNKNOWN_VALUE && 2032 env->allow_ptr_leaks) { 2033 /* unknown += K|X */ 2034 return evaluate_reg_alu(env, insn); 2035 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 2036 dst_reg->type == CONST_IMM && 2037 env->allow_ptr_leaks) { 2038 /* reg_imm += K|X */ 2039 return evaluate_reg_imm_alu(env, insn); 2040 } else if (is_pointer_value(env, insn->dst_reg)) { 2041 verbose("R%d pointer arithmetic prohibited\n", 2042 insn->dst_reg); 2043 return -EACCES; 2044 } else if (BPF_SRC(insn->code) == BPF_X && 2045 is_pointer_value(env, insn->src_reg)) { 2046 verbose("R%d pointer arithmetic prohibited\n", 2047 insn->src_reg); 2048 return -EACCES; 2049 } 2050 2051 /* If we did pointer math on a map value then just set it to our 2052 * PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or 2053 * loads to this register appropriately, otherwise just mark the 2054 * register as unknown. 2055 */ 2056 if (env->allow_ptr_leaks && 2057 BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD && 2058 (dst_reg->type == PTR_TO_MAP_VALUE || 2059 dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) 2060 dst_reg->type = PTR_TO_MAP_VALUE_ADJ; 2061 else 2062 mark_reg_unknown_value(regs, insn->dst_reg); 2063 } 2064 2065 return 0; 2066 } 2067 2068 static void find_good_pkt_pointers(struct bpf_verifier_state *state, 2069 struct bpf_reg_state *dst_reg) 2070 { 2071 struct bpf_reg_state *regs = state->regs, *reg; 2072 int i; 2073 2074 /* LLVM can generate two kind of checks: 2075 * 2076 * Type 1: 2077 * 2078 * r2 = r3; 2079 * r2 += 8; 2080 * if (r2 > pkt_end) goto <handle exception> 2081 * <access okay> 2082 * 2083 * Where: 2084 * r2 == dst_reg, pkt_end == src_reg 2085 * r2=pkt(id=n,off=8,r=0) 2086 * r3=pkt(id=n,off=0,r=0) 2087 * 2088 * Type 2: 2089 * 2090 * r2 = r3; 2091 * r2 += 8; 2092 * if (pkt_end >= r2) goto <access okay> 2093 * <handle exception> 2094 * 2095 * Where: 2096 * pkt_end == dst_reg, r2 == src_reg 2097 * r2=pkt(id=n,off=8,r=0) 2098 * r3=pkt(id=n,off=0,r=0) 2099 * 2100 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 2101 * so that range of bytes [r3, r3 + 8) is safe to access. 2102 */ 2103 2104 for (i = 0; i < MAX_BPF_REG; i++) 2105 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 2106 /* keep the maximum range already checked */ 2107 regs[i].range = max(regs[i].range, dst_reg->off); 2108 2109 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2110 if (state->stack_slot_type[i] != STACK_SPILL) 2111 continue; 2112 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2113 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2114 reg->range = max(reg->range, dst_reg->off); 2115 } 2116 } 2117 2118 /* Adjusts the register min/max values in the case that the dst_reg is the 2119 * variable register that we are working on, and src_reg is a constant or we're 2120 * simply doing a BPF_K check. 2121 */ 2122 static void reg_set_min_max(struct bpf_reg_state *true_reg, 2123 struct bpf_reg_state *false_reg, u64 val, 2124 u8 opcode) 2125 { 2126 switch (opcode) { 2127 case BPF_JEQ: 2128 /* If this is false then we know nothing Jon Snow, but if it is 2129 * true then we know for sure. 2130 */ 2131 true_reg->max_value = true_reg->min_value = val; 2132 break; 2133 case BPF_JNE: 2134 /* If this is true we know nothing Jon Snow, but if it is false 2135 * we know the value for sure; 2136 */ 2137 false_reg->max_value = false_reg->min_value = val; 2138 break; 2139 case BPF_JGT: 2140 /* Unsigned comparison, the minimum value is 0. */ 2141 false_reg->min_value = 0; 2142 /* fallthrough */ 2143 case BPF_JSGT: 2144 /* If this is false then we know the maximum val is val, 2145 * otherwise we know the min val is val+1. 2146 */ 2147 false_reg->max_value = val; 2148 true_reg->min_value = val + 1; 2149 break; 2150 case BPF_JGE: 2151 /* Unsigned comparison, the minimum value is 0. */ 2152 false_reg->min_value = 0; 2153 /* fallthrough */ 2154 case BPF_JSGE: 2155 /* If this is false then we know the maximum value is val - 1, 2156 * otherwise we know the mimimum value is val. 2157 */ 2158 false_reg->max_value = val - 1; 2159 true_reg->min_value = val; 2160 break; 2161 default: 2162 break; 2163 } 2164 2165 check_reg_overflow(false_reg); 2166 check_reg_overflow(true_reg); 2167 } 2168 2169 /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg 2170 * is the variable reg. 2171 */ 2172 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 2173 struct bpf_reg_state *false_reg, u64 val, 2174 u8 opcode) 2175 { 2176 switch (opcode) { 2177 case BPF_JEQ: 2178 /* If this is false then we know nothing Jon Snow, but if it is 2179 * true then we know for sure. 2180 */ 2181 true_reg->max_value = true_reg->min_value = val; 2182 break; 2183 case BPF_JNE: 2184 /* If this is true we know nothing Jon Snow, but if it is false 2185 * we know the value for sure; 2186 */ 2187 false_reg->max_value = false_reg->min_value = val; 2188 break; 2189 case BPF_JGT: 2190 /* Unsigned comparison, the minimum value is 0. */ 2191 true_reg->min_value = 0; 2192 /* fallthrough */ 2193 case BPF_JSGT: 2194 /* 2195 * If this is false, then the val is <= the register, if it is 2196 * true the register <= to the val. 2197 */ 2198 false_reg->min_value = val; 2199 true_reg->max_value = val - 1; 2200 break; 2201 case BPF_JGE: 2202 /* Unsigned comparison, the minimum value is 0. */ 2203 true_reg->min_value = 0; 2204 /* fallthrough */ 2205 case BPF_JSGE: 2206 /* If this is false then constant < register, if it is true then 2207 * the register < constant. 2208 */ 2209 false_reg->min_value = val + 1; 2210 true_reg->max_value = val; 2211 break; 2212 default: 2213 break; 2214 } 2215 2216 check_reg_overflow(false_reg); 2217 check_reg_overflow(true_reg); 2218 } 2219 2220 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, 2221 enum bpf_reg_type type) 2222 { 2223 struct bpf_reg_state *reg = ®s[regno]; 2224 2225 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { 2226 if (type == UNKNOWN_VALUE) { 2227 __mark_reg_unknown_value(regs, regno); 2228 } else if (reg->map_ptr->inner_map_meta) { 2229 reg->type = CONST_PTR_TO_MAP; 2230 reg->map_ptr = reg->map_ptr->inner_map_meta; 2231 } else { 2232 reg->type = type; 2233 } 2234 /* We don't need id from this point onwards anymore, thus we 2235 * should better reset it, so that state pruning has chances 2236 * to take effect. 2237 */ 2238 reg->id = 0; 2239 } 2240 } 2241 2242 /* The logic is similar to find_good_pkt_pointers(), both could eventually 2243 * be folded together at some point. 2244 */ 2245 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, 2246 enum bpf_reg_type type) 2247 { 2248 struct bpf_reg_state *regs = state->regs; 2249 u32 id = regs[regno].id; 2250 int i; 2251 2252 for (i = 0; i < MAX_BPF_REG; i++) 2253 mark_map_reg(regs, i, id, type); 2254 2255 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2256 if (state->stack_slot_type[i] != STACK_SPILL) 2257 continue; 2258 mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type); 2259 } 2260 } 2261 2262 static int check_cond_jmp_op(struct bpf_verifier_env *env, 2263 struct bpf_insn *insn, int *insn_idx) 2264 { 2265 struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state; 2266 struct bpf_reg_state *regs = this_branch->regs, *dst_reg; 2267 u8 opcode = BPF_OP(insn->code); 2268 int err; 2269 2270 if (opcode > BPF_EXIT) { 2271 verbose("invalid BPF_JMP opcode %x\n", opcode); 2272 return -EINVAL; 2273 } 2274 2275 if (BPF_SRC(insn->code) == BPF_X) { 2276 if (insn->imm != 0) { 2277 verbose("BPF_JMP uses reserved fields\n"); 2278 return -EINVAL; 2279 } 2280 2281 /* check src1 operand */ 2282 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2283 if (err) 2284 return err; 2285 2286 if (is_pointer_value(env, insn->src_reg)) { 2287 verbose("R%d pointer comparison prohibited\n", 2288 insn->src_reg); 2289 return -EACCES; 2290 } 2291 } else { 2292 if (insn->src_reg != BPF_REG_0) { 2293 verbose("BPF_JMP uses reserved fields\n"); 2294 return -EINVAL; 2295 } 2296 } 2297 2298 /* check src2 operand */ 2299 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2300 if (err) 2301 return err; 2302 2303 dst_reg = ®s[insn->dst_reg]; 2304 2305 /* detect if R == 0 where R was initialized to zero earlier */ 2306 if (BPF_SRC(insn->code) == BPF_K && 2307 (opcode == BPF_JEQ || opcode == BPF_JNE) && 2308 dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { 2309 if (opcode == BPF_JEQ) { 2310 /* if (imm == imm) goto pc+off; 2311 * only follow the goto, ignore fall-through 2312 */ 2313 *insn_idx += insn->off; 2314 return 0; 2315 } else { 2316 /* if (imm != imm) goto pc+off; 2317 * only follow fall-through branch, since 2318 * that's where the program will go 2319 */ 2320 return 0; 2321 } 2322 } 2323 2324 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 2325 if (!other_branch) 2326 return -EFAULT; 2327 2328 /* detect if we are comparing against a constant value so we can adjust 2329 * our min/max values for our dst register. 2330 */ 2331 if (BPF_SRC(insn->code) == BPF_X) { 2332 if (regs[insn->src_reg].type == CONST_IMM) 2333 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2334 dst_reg, regs[insn->src_reg].imm, 2335 opcode); 2336 else if (dst_reg->type == CONST_IMM) 2337 reg_set_min_max_inv(&other_branch->regs[insn->src_reg], 2338 ®s[insn->src_reg], dst_reg->imm, 2339 opcode); 2340 } else { 2341 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2342 dst_reg, insn->imm, opcode); 2343 } 2344 2345 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ 2346 if (BPF_SRC(insn->code) == BPF_K && 2347 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 2348 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 2349 /* Mark all identical map registers in each branch as either 2350 * safe or unknown depending R == 0 or R != 0 conditional. 2351 */ 2352 mark_map_regs(this_branch, insn->dst_reg, 2353 opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); 2354 mark_map_regs(other_branch, insn->dst_reg, 2355 opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); 2356 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2357 dst_reg->type == PTR_TO_PACKET && 2358 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2359 find_good_pkt_pointers(this_branch, dst_reg); 2360 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2361 dst_reg->type == PTR_TO_PACKET_END && 2362 regs[insn->src_reg].type == PTR_TO_PACKET) { 2363 find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); 2364 } else if (is_pointer_value(env, insn->dst_reg)) { 2365 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 2366 return -EACCES; 2367 } 2368 if (log_level) 2369 print_verifier_state(this_branch); 2370 return 0; 2371 } 2372 2373 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 2374 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 2375 { 2376 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 2377 2378 return (struct bpf_map *) (unsigned long) imm64; 2379 } 2380 2381 /* verify BPF_LD_IMM64 instruction */ 2382 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 2383 { 2384 struct bpf_reg_state *regs = env->cur_state.regs; 2385 int err; 2386 2387 if (BPF_SIZE(insn->code) != BPF_DW) { 2388 verbose("invalid BPF_LD_IMM insn\n"); 2389 return -EINVAL; 2390 } 2391 if (insn->off != 0) { 2392 verbose("BPF_LD_IMM64 uses reserved fields\n"); 2393 return -EINVAL; 2394 } 2395 2396 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 2397 if (err) 2398 return err; 2399 2400 if (insn->src_reg == 0) { 2401 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 2402 2403 regs[insn->dst_reg].type = CONST_IMM; 2404 regs[insn->dst_reg].imm = imm; 2405 return 0; 2406 } 2407 2408 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 2409 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 2410 2411 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 2412 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 2413 return 0; 2414 } 2415 2416 static bool may_access_skb(enum bpf_prog_type type) 2417 { 2418 switch (type) { 2419 case BPF_PROG_TYPE_SOCKET_FILTER: 2420 case BPF_PROG_TYPE_SCHED_CLS: 2421 case BPF_PROG_TYPE_SCHED_ACT: 2422 return true; 2423 default: 2424 return false; 2425 } 2426 } 2427 2428 /* verify safety of LD_ABS|LD_IND instructions: 2429 * - they can only appear in the programs where ctx == skb 2430 * - since they are wrappers of function calls, they scratch R1-R5 registers, 2431 * preserve R6-R9, and store return value into R0 2432 * 2433 * Implicit input: 2434 * ctx == skb == R6 == CTX 2435 * 2436 * Explicit input: 2437 * SRC == any register 2438 * IMM == 32-bit immediate 2439 * 2440 * Output: 2441 * R0 - 8/16/32-bit skb data converted to cpu endianness 2442 */ 2443 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 2444 { 2445 struct bpf_reg_state *regs = env->cur_state.regs; 2446 u8 mode = BPF_MODE(insn->code); 2447 struct bpf_reg_state *reg; 2448 int i, err; 2449 2450 if (!may_access_skb(env->prog->type)) { 2451 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 2452 return -EINVAL; 2453 } 2454 2455 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 2456 BPF_SIZE(insn->code) == BPF_DW || 2457 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 2458 verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); 2459 return -EINVAL; 2460 } 2461 2462 /* check whether implicit source operand (register R6) is readable */ 2463 err = check_reg_arg(regs, BPF_REG_6, SRC_OP); 2464 if (err) 2465 return err; 2466 2467 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 2468 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 2469 return -EINVAL; 2470 } 2471 2472 if (mode == BPF_IND) { 2473 /* check explicit source operand */ 2474 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2475 if (err) 2476 return err; 2477 } 2478 2479 /* reset caller saved regs to unreadable */ 2480 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2481 reg = regs + caller_saved[i]; 2482 reg->type = NOT_INIT; 2483 reg->imm = 0; 2484 } 2485 2486 /* mark destination R0 register as readable, since it contains 2487 * the value fetched from the packet 2488 */ 2489 regs[BPF_REG_0].type = UNKNOWN_VALUE; 2490 return 0; 2491 } 2492 2493 /* non-recursive DFS pseudo code 2494 * 1 procedure DFS-iterative(G,v): 2495 * 2 label v as discovered 2496 * 3 let S be a stack 2497 * 4 S.push(v) 2498 * 5 while S is not empty 2499 * 6 t <- S.pop() 2500 * 7 if t is what we're looking for: 2501 * 8 return t 2502 * 9 for all edges e in G.adjacentEdges(t) do 2503 * 10 if edge e is already labelled 2504 * 11 continue with the next edge 2505 * 12 w <- G.adjacentVertex(t,e) 2506 * 13 if vertex w is not discovered and not explored 2507 * 14 label e as tree-edge 2508 * 15 label w as discovered 2509 * 16 S.push(w) 2510 * 17 continue at 5 2511 * 18 else if vertex w is discovered 2512 * 19 label e as back-edge 2513 * 20 else 2514 * 21 // vertex w is explored 2515 * 22 label e as forward- or cross-edge 2516 * 23 label t as explored 2517 * 24 S.pop() 2518 * 2519 * convention: 2520 * 0x10 - discovered 2521 * 0x11 - discovered and fall-through edge labelled 2522 * 0x12 - discovered and fall-through and branch edges labelled 2523 * 0x20 - explored 2524 */ 2525 2526 enum { 2527 DISCOVERED = 0x10, 2528 EXPLORED = 0x20, 2529 FALLTHROUGH = 1, 2530 BRANCH = 2, 2531 }; 2532 2533 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) 2534 2535 static int *insn_stack; /* stack of insns to process */ 2536 static int cur_stack; /* current stack index */ 2537 static int *insn_state; 2538 2539 /* t, w, e - match pseudo-code above: 2540 * t - index of current instruction 2541 * w - next instruction 2542 * e - edge 2543 */ 2544 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 2545 { 2546 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 2547 return 0; 2548 2549 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 2550 return 0; 2551 2552 if (w < 0 || w >= env->prog->len) { 2553 verbose("jump out of range from insn %d to %d\n", t, w); 2554 return -EINVAL; 2555 } 2556 2557 if (e == BRANCH) 2558 /* mark branch target for state pruning */ 2559 env->explored_states[w] = STATE_LIST_MARK; 2560 2561 if (insn_state[w] == 0) { 2562 /* tree-edge */ 2563 insn_state[t] = DISCOVERED | e; 2564 insn_state[w] = DISCOVERED; 2565 if (cur_stack >= env->prog->len) 2566 return -E2BIG; 2567 insn_stack[cur_stack++] = w; 2568 return 1; 2569 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 2570 verbose("back-edge from insn %d to %d\n", t, w); 2571 return -EINVAL; 2572 } else if (insn_state[w] == EXPLORED) { 2573 /* forward- or cross-edge */ 2574 insn_state[t] = DISCOVERED | e; 2575 } else { 2576 verbose("insn state internal bug\n"); 2577 return -EFAULT; 2578 } 2579 return 0; 2580 } 2581 2582 /* non-recursive depth-first-search to detect loops in BPF program 2583 * loop == back-edge in directed graph 2584 */ 2585 static int check_cfg(struct bpf_verifier_env *env) 2586 { 2587 struct bpf_insn *insns = env->prog->insnsi; 2588 int insn_cnt = env->prog->len; 2589 int ret = 0; 2590 int i, t; 2591 2592 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2593 if (!insn_state) 2594 return -ENOMEM; 2595 2596 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2597 if (!insn_stack) { 2598 kfree(insn_state); 2599 return -ENOMEM; 2600 } 2601 2602 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 2603 insn_stack[0] = 0; /* 0 is the first instruction */ 2604 cur_stack = 1; 2605 2606 peek_stack: 2607 if (cur_stack == 0) 2608 goto check_state; 2609 t = insn_stack[cur_stack - 1]; 2610 2611 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 2612 u8 opcode = BPF_OP(insns[t].code); 2613 2614 if (opcode == BPF_EXIT) { 2615 goto mark_explored; 2616 } else if (opcode == BPF_CALL) { 2617 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2618 if (ret == 1) 2619 goto peek_stack; 2620 else if (ret < 0) 2621 goto err_free; 2622 if (t + 1 < insn_cnt) 2623 env->explored_states[t + 1] = STATE_LIST_MARK; 2624 } else if (opcode == BPF_JA) { 2625 if (BPF_SRC(insns[t].code) != BPF_K) { 2626 ret = -EINVAL; 2627 goto err_free; 2628 } 2629 /* unconditional jump with single edge */ 2630 ret = push_insn(t, t + insns[t].off + 1, 2631 FALLTHROUGH, env); 2632 if (ret == 1) 2633 goto peek_stack; 2634 else if (ret < 0) 2635 goto err_free; 2636 /* tell verifier to check for equivalent states 2637 * after every call and jump 2638 */ 2639 if (t + 1 < insn_cnt) 2640 env->explored_states[t + 1] = STATE_LIST_MARK; 2641 } else { 2642 /* conditional jump with two edges */ 2643 env->explored_states[t] = STATE_LIST_MARK; 2644 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2645 if (ret == 1) 2646 goto peek_stack; 2647 else if (ret < 0) 2648 goto err_free; 2649 2650 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 2651 if (ret == 1) 2652 goto peek_stack; 2653 else if (ret < 0) 2654 goto err_free; 2655 } 2656 } else { 2657 /* all other non-branch instructions with single 2658 * fall-through edge 2659 */ 2660 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2661 if (ret == 1) 2662 goto peek_stack; 2663 else if (ret < 0) 2664 goto err_free; 2665 } 2666 2667 mark_explored: 2668 insn_state[t] = EXPLORED; 2669 if (cur_stack-- <= 0) { 2670 verbose("pop stack internal bug\n"); 2671 ret = -EFAULT; 2672 goto err_free; 2673 } 2674 goto peek_stack; 2675 2676 check_state: 2677 for (i = 0; i < insn_cnt; i++) { 2678 if (insn_state[i] != EXPLORED) { 2679 verbose("unreachable insn %d\n", i); 2680 ret = -EINVAL; 2681 goto err_free; 2682 } 2683 } 2684 ret = 0; /* cfg looks good */ 2685 2686 err_free: 2687 kfree(insn_state); 2688 kfree(insn_stack); 2689 return ret; 2690 } 2691 2692 /* the following conditions reduce the number of explored insns 2693 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2694 */ 2695 static bool compare_ptrs_to_packet(struct bpf_reg_state *old, 2696 struct bpf_reg_state *cur) 2697 { 2698 if (old->id != cur->id) 2699 return false; 2700 2701 /* old ptr_to_packet is more conservative, since it allows smaller 2702 * range. Ex: 2703 * old(off=0,r=10) is equal to cur(off=0,r=20), because 2704 * old(off=0,r=10) means that with range=10 the verifier proceeded 2705 * further and found no issues with the program. Now we're in the same 2706 * spot with cur(off=0,r=20), so we're safe too, since anything further 2707 * will only be looking at most 10 bytes after this pointer. 2708 */ 2709 if (old->off == cur->off && old->range < cur->range) 2710 return true; 2711 2712 /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) 2713 * since both cannot be used for packet access and safe(old) 2714 * pointer has smaller off that could be used for further 2715 * 'if (ptr > data_end)' check 2716 * Ex: 2717 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean 2718 * that we cannot access the packet. 2719 * The safe range is: 2720 * [ptr, ptr + range - off) 2721 * so whenever off >=range, it means no safe bytes from this pointer. 2722 * When comparing old->off <= cur->off, it means that older code 2723 * went with smaller offset and that offset was later 2724 * used to figure out the safe range after 'if (ptr > data_end)' check 2725 * Say, 'old' state was explored like: 2726 * ... R3(off=0, r=0) 2727 * R4 = R3 + 20 2728 * ... now R4(off=20,r=0) <-- here 2729 * if (R4 > data_end) 2730 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. 2731 * ... the code further went all the way to bpf_exit. 2732 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). 2733 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier 2734 * goes further, such cur_R4 will give larger safe packet range after 2735 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2736 * so they will be good with r=30 and we can prune the search. 2737 */ 2738 if (old->off <= cur->off && 2739 old->off >= old->range && cur->off >= cur->range) 2740 return true; 2741 2742 return false; 2743 } 2744 2745 /* compare two verifier states 2746 * 2747 * all states stored in state_list are known to be valid, since 2748 * verifier reached 'bpf_exit' instruction through them 2749 * 2750 * this function is called when verifier exploring different branches of 2751 * execution popped from the state stack. If it sees an old state that has 2752 * more strict register state and more strict stack state then this execution 2753 * branch doesn't need to be explored further, since verifier already 2754 * concluded that more strict state leads to valid finish. 2755 * 2756 * Therefore two states are equivalent if register state is more conservative 2757 * and explored stack state is more conservative than the current one. 2758 * Example: 2759 * explored current 2760 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 2761 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 2762 * 2763 * In other words if current stack state (one being explored) has more 2764 * valid slots than old one that already passed validation, it means 2765 * the verifier can stop exploring and conclude that current state is valid too 2766 * 2767 * Similarly with registers. If explored state has register type as invalid 2768 * whereas register type in current state is meaningful, it means that 2769 * the current state will reach 'bpf_exit' instruction safely 2770 */ 2771 static bool states_equal(struct bpf_verifier_env *env, 2772 struct bpf_verifier_state *old, 2773 struct bpf_verifier_state *cur) 2774 { 2775 bool varlen_map_access = env->varlen_map_value_access; 2776 struct bpf_reg_state *rold, *rcur; 2777 int i; 2778 2779 for (i = 0; i < MAX_BPF_REG; i++) { 2780 rold = &old->regs[i]; 2781 rcur = &cur->regs[i]; 2782 2783 if (memcmp(rold, rcur, sizeof(*rold)) == 0) 2784 continue; 2785 2786 /* If the ranges were not the same, but everything else was and 2787 * we didn't do a variable access into a map then we are a-ok. 2788 */ 2789 if (!varlen_map_access && 2790 memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0) 2791 continue; 2792 2793 /* If we didn't map access then again we don't care about the 2794 * mismatched range values and it's ok if our old type was 2795 * UNKNOWN and we didn't go to a NOT_INIT'ed reg. 2796 */ 2797 if (rold->type == NOT_INIT || 2798 (!varlen_map_access && rold->type == UNKNOWN_VALUE && 2799 rcur->type != NOT_INIT)) 2800 continue; 2801 2802 /* Don't care about the reg->id in this case. */ 2803 if (rold->type == PTR_TO_MAP_VALUE_OR_NULL && 2804 rcur->type == PTR_TO_MAP_VALUE_OR_NULL && 2805 rold->map_ptr == rcur->map_ptr) 2806 continue; 2807 2808 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2809 compare_ptrs_to_packet(rold, rcur)) 2810 continue; 2811 2812 return false; 2813 } 2814 2815 for (i = 0; i < MAX_BPF_STACK; i++) { 2816 if (old->stack_slot_type[i] == STACK_INVALID) 2817 continue; 2818 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 2819 /* Ex: old explored (safe) state has STACK_SPILL in 2820 * this stack slot, but current has has STACK_MISC -> 2821 * this verifier states are not equivalent, 2822 * return false to continue verification of this path 2823 */ 2824 return false; 2825 if (i % BPF_REG_SIZE) 2826 continue; 2827 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], 2828 &cur->spilled_regs[i / BPF_REG_SIZE], 2829 sizeof(old->spilled_regs[0]))) 2830 /* when explored and current stack slot types are 2831 * the same, check that stored pointers types 2832 * are the same as well. 2833 * Ex: explored safe path could have stored 2834 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8} 2835 * but current path has stored: 2836 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16} 2837 * such verifier states are not equivalent. 2838 * return false to continue verification of this path 2839 */ 2840 return false; 2841 else 2842 continue; 2843 } 2844 return true; 2845 } 2846 2847 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 2848 { 2849 struct bpf_verifier_state_list *new_sl; 2850 struct bpf_verifier_state_list *sl; 2851 2852 sl = env->explored_states[insn_idx]; 2853 if (!sl) 2854 /* this 'insn_idx' instruction wasn't marked, so we will not 2855 * be doing state search here 2856 */ 2857 return 0; 2858 2859 while (sl != STATE_LIST_MARK) { 2860 if (states_equal(env, &sl->state, &env->cur_state)) 2861 /* reached equivalent register/stack state, 2862 * prune the search 2863 */ 2864 return 1; 2865 sl = sl->next; 2866 } 2867 2868 /* there were no equivalent states, remember current one. 2869 * technically the current state is not proven to be safe yet, 2870 * but it will either reach bpf_exit (which means it's safe) or 2871 * it will be rejected. Since there are no loops, we won't be 2872 * seeing this 'insn_idx' instruction again on the way to bpf_exit 2873 */ 2874 new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER); 2875 if (!new_sl) 2876 return -ENOMEM; 2877 2878 /* add new state to the head of linked list */ 2879 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 2880 new_sl->next = env->explored_states[insn_idx]; 2881 env->explored_states[insn_idx] = new_sl; 2882 return 0; 2883 } 2884 2885 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, 2886 int insn_idx, int prev_insn_idx) 2887 { 2888 if (!env->analyzer_ops || !env->analyzer_ops->insn_hook) 2889 return 0; 2890 2891 return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx); 2892 } 2893 2894 static int do_check(struct bpf_verifier_env *env) 2895 { 2896 struct bpf_verifier_state *state = &env->cur_state; 2897 struct bpf_insn *insns = env->prog->insnsi; 2898 struct bpf_reg_state *regs = state->regs; 2899 int insn_cnt = env->prog->len; 2900 int insn_idx, prev_insn_idx = 0; 2901 int insn_processed = 0; 2902 bool do_print_state = false; 2903 2904 init_reg_state(regs); 2905 insn_idx = 0; 2906 env->varlen_map_value_access = false; 2907 for (;;) { 2908 struct bpf_insn *insn; 2909 u8 class; 2910 int err; 2911 2912 if (insn_idx >= insn_cnt) { 2913 verbose("invalid insn idx %d insn_cnt %d\n", 2914 insn_idx, insn_cnt); 2915 return -EFAULT; 2916 } 2917 2918 insn = &insns[insn_idx]; 2919 class = BPF_CLASS(insn->code); 2920 2921 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 2922 verbose("BPF program is too large. Processed %d insn\n", 2923 insn_processed); 2924 return -E2BIG; 2925 } 2926 2927 err = is_state_visited(env, insn_idx); 2928 if (err < 0) 2929 return err; 2930 if (err == 1) { 2931 /* found equivalent state, can prune the search */ 2932 if (log_level) { 2933 if (do_print_state) 2934 verbose("\nfrom %d to %d: safe\n", 2935 prev_insn_idx, insn_idx); 2936 else 2937 verbose("%d: safe\n", insn_idx); 2938 } 2939 goto process_bpf_exit; 2940 } 2941 2942 if (need_resched()) 2943 cond_resched(); 2944 2945 if (log_level > 1 || (log_level && do_print_state)) { 2946 if (log_level > 1) 2947 verbose("%d:", insn_idx); 2948 else 2949 verbose("\nfrom %d to %d:", 2950 prev_insn_idx, insn_idx); 2951 print_verifier_state(&env->cur_state); 2952 do_print_state = false; 2953 } 2954 2955 if (log_level) { 2956 verbose("%d: ", insn_idx); 2957 print_bpf_insn(env, insn); 2958 } 2959 2960 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); 2961 if (err) 2962 return err; 2963 2964 if (class == BPF_ALU || class == BPF_ALU64) { 2965 err = check_alu_op(env, insn); 2966 if (err) 2967 return err; 2968 2969 } else if (class == BPF_LDX) { 2970 enum bpf_reg_type *prev_src_type, src_reg_type; 2971 2972 /* check for reserved fields is already done */ 2973 2974 /* check src operand */ 2975 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2976 if (err) 2977 return err; 2978 2979 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 2980 if (err) 2981 return err; 2982 2983 src_reg_type = regs[insn->src_reg].type; 2984 2985 /* check that memory (src_reg + off) is readable, 2986 * the state of dst_reg will be updated by this func 2987 */ 2988 err = check_mem_access(env, insn->src_reg, insn->off, 2989 BPF_SIZE(insn->code), BPF_READ, 2990 insn->dst_reg); 2991 if (err) 2992 return err; 2993 2994 if (BPF_SIZE(insn->code) != BPF_W && 2995 BPF_SIZE(insn->code) != BPF_DW) { 2996 insn_idx++; 2997 continue; 2998 } 2999 3000 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; 3001 3002 if (*prev_src_type == NOT_INIT) { 3003 /* saw a valid insn 3004 * dst_reg = *(u32 *)(src_reg + off) 3005 * save type to validate intersecting paths 3006 */ 3007 *prev_src_type = src_reg_type; 3008 3009 } else if (src_reg_type != *prev_src_type && 3010 (src_reg_type == PTR_TO_CTX || 3011 *prev_src_type == PTR_TO_CTX)) { 3012 /* ABuser program is trying to use the same insn 3013 * dst_reg = *(u32*) (src_reg + off) 3014 * with different pointer types: 3015 * src_reg == ctx in one branch and 3016 * src_reg == stack|map in some other branch. 3017 * Reject it. 3018 */ 3019 verbose("same insn cannot be used with different pointers\n"); 3020 return -EINVAL; 3021 } 3022 3023 } else if (class == BPF_STX) { 3024 enum bpf_reg_type *prev_dst_type, dst_reg_type; 3025 3026 if (BPF_MODE(insn->code) == BPF_XADD) { 3027 err = check_xadd(env, insn); 3028 if (err) 3029 return err; 3030 insn_idx++; 3031 continue; 3032 } 3033 3034 /* check src1 operand */ 3035 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 3036 if (err) 3037 return err; 3038 /* check src2 operand */ 3039 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 3040 if (err) 3041 return err; 3042 3043 dst_reg_type = regs[insn->dst_reg].type; 3044 3045 /* check that memory (dst_reg + off) is writeable */ 3046 err = check_mem_access(env, insn->dst_reg, insn->off, 3047 BPF_SIZE(insn->code), BPF_WRITE, 3048 insn->src_reg); 3049 if (err) 3050 return err; 3051 3052 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; 3053 3054 if (*prev_dst_type == NOT_INIT) { 3055 *prev_dst_type = dst_reg_type; 3056 } else if (dst_reg_type != *prev_dst_type && 3057 (dst_reg_type == PTR_TO_CTX || 3058 *prev_dst_type == PTR_TO_CTX)) { 3059 verbose("same insn cannot be used with different pointers\n"); 3060 return -EINVAL; 3061 } 3062 3063 } else if (class == BPF_ST) { 3064 if (BPF_MODE(insn->code) != BPF_MEM || 3065 insn->src_reg != BPF_REG_0) { 3066 verbose("BPF_ST uses reserved fields\n"); 3067 return -EINVAL; 3068 } 3069 /* check src operand */ 3070 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 3071 if (err) 3072 return err; 3073 3074 /* check that memory (dst_reg + off) is writeable */ 3075 err = check_mem_access(env, insn->dst_reg, insn->off, 3076 BPF_SIZE(insn->code), BPF_WRITE, 3077 -1); 3078 if (err) 3079 return err; 3080 3081 } else if (class == BPF_JMP) { 3082 u8 opcode = BPF_OP(insn->code); 3083 3084 if (opcode == BPF_CALL) { 3085 if (BPF_SRC(insn->code) != BPF_K || 3086 insn->off != 0 || 3087 insn->src_reg != BPF_REG_0 || 3088 insn->dst_reg != BPF_REG_0) { 3089 verbose("BPF_CALL uses reserved fields\n"); 3090 return -EINVAL; 3091 } 3092 3093 err = check_call(env, insn->imm, insn_idx); 3094 if (err) 3095 return err; 3096 3097 } else if (opcode == BPF_JA) { 3098 if (BPF_SRC(insn->code) != BPF_K || 3099 insn->imm != 0 || 3100 insn->src_reg != BPF_REG_0 || 3101 insn->dst_reg != BPF_REG_0) { 3102 verbose("BPF_JA uses reserved fields\n"); 3103 return -EINVAL; 3104 } 3105 3106 insn_idx += insn->off + 1; 3107 continue; 3108 3109 } else if (opcode == BPF_EXIT) { 3110 if (BPF_SRC(insn->code) != BPF_K || 3111 insn->imm != 0 || 3112 insn->src_reg != BPF_REG_0 || 3113 insn->dst_reg != BPF_REG_0) { 3114 verbose("BPF_EXIT uses reserved fields\n"); 3115 return -EINVAL; 3116 } 3117 3118 /* eBPF calling convetion is such that R0 is used 3119 * to return the value from eBPF program. 3120 * Make sure that it's readable at this time 3121 * of bpf_exit, which means that program wrote 3122 * something into it earlier 3123 */ 3124 err = check_reg_arg(regs, BPF_REG_0, SRC_OP); 3125 if (err) 3126 return err; 3127 3128 if (is_pointer_value(env, BPF_REG_0)) { 3129 verbose("R0 leaks addr as return value\n"); 3130 return -EACCES; 3131 } 3132 3133 process_bpf_exit: 3134 insn_idx = pop_stack(env, &prev_insn_idx); 3135 if (insn_idx < 0) { 3136 break; 3137 } else { 3138 do_print_state = true; 3139 continue; 3140 } 3141 } else { 3142 err = check_cond_jmp_op(env, insn, &insn_idx); 3143 if (err) 3144 return err; 3145 } 3146 } else if (class == BPF_LD) { 3147 u8 mode = BPF_MODE(insn->code); 3148 3149 if (mode == BPF_ABS || mode == BPF_IND) { 3150 err = check_ld_abs(env, insn); 3151 if (err) 3152 return err; 3153 3154 } else if (mode == BPF_IMM) { 3155 err = check_ld_imm(env, insn); 3156 if (err) 3157 return err; 3158 3159 insn_idx++; 3160 } else { 3161 verbose("invalid BPF_LD mode\n"); 3162 return -EINVAL; 3163 } 3164 reset_reg_range_values(regs, insn->dst_reg); 3165 } else { 3166 verbose("unknown insn class %d\n", class); 3167 return -EINVAL; 3168 } 3169 3170 insn_idx++; 3171 } 3172 3173 verbose("processed %d insns\n", insn_processed); 3174 return 0; 3175 } 3176 3177 static int check_map_prealloc(struct bpf_map *map) 3178 { 3179 return (map->map_type != BPF_MAP_TYPE_HASH && 3180 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 3181 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 3182 !(map->map_flags & BPF_F_NO_PREALLOC); 3183 } 3184 3185 static int check_map_prog_compatibility(struct bpf_map *map, 3186 struct bpf_prog *prog) 3187 3188 { 3189 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 3190 * preallocated hash maps, since doing memory allocation 3191 * in overflow_handler can crash depending on where nmi got 3192 * triggered. 3193 */ 3194 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 3195 if (!check_map_prealloc(map)) { 3196 verbose("perf_event programs can only use preallocated hash map\n"); 3197 return -EINVAL; 3198 } 3199 if (map->inner_map_meta && 3200 !check_map_prealloc(map->inner_map_meta)) { 3201 verbose("perf_event programs can only use preallocated inner hash map\n"); 3202 return -EINVAL; 3203 } 3204 } 3205 return 0; 3206 } 3207 3208 /* look for pseudo eBPF instructions that access map FDs and 3209 * replace them with actual map pointers 3210 */ 3211 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 3212 { 3213 struct bpf_insn *insn = env->prog->insnsi; 3214 int insn_cnt = env->prog->len; 3215 int i, j, err; 3216 3217 err = bpf_prog_calc_tag(env->prog); 3218 if (err) 3219 return err; 3220 3221 for (i = 0; i < insn_cnt; i++, insn++) { 3222 if (BPF_CLASS(insn->code) == BPF_LDX && 3223 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 3224 verbose("BPF_LDX uses reserved fields\n"); 3225 return -EINVAL; 3226 } 3227 3228 if (BPF_CLASS(insn->code) == BPF_STX && 3229 ((BPF_MODE(insn->code) != BPF_MEM && 3230 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 3231 verbose("BPF_STX uses reserved fields\n"); 3232 return -EINVAL; 3233 } 3234 3235 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 3236 struct bpf_map *map; 3237 struct fd f; 3238 3239 if (i == insn_cnt - 1 || insn[1].code != 0 || 3240 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 3241 insn[1].off != 0) { 3242 verbose("invalid bpf_ld_imm64 insn\n"); 3243 return -EINVAL; 3244 } 3245 3246 if (insn->src_reg == 0) 3247 /* valid generic load 64-bit imm */ 3248 goto next_insn; 3249 3250 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 3251 verbose("unrecognized bpf_ld_imm64 insn\n"); 3252 return -EINVAL; 3253 } 3254 3255 f = fdget(insn->imm); 3256 map = __bpf_map_get(f); 3257 if (IS_ERR(map)) { 3258 verbose("fd %d is not pointing to valid bpf_map\n", 3259 insn->imm); 3260 return PTR_ERR(map); 3261 } 3262 3263 err = check_map_prog_compatibility(map, env->prog); 3264 if (err) { 3265 fdput(f); 3266 return err; 3267 } 3268 3269 /* store map pointer inside BPF_LD_IMM64 instruction */ 3270 insn[0].imm = (u32) (unsigned long) map; 3271 insn[1].imm = ((u64) (unsigned long) map) >> 32; 3272 3273 /* check whether we recorded this map already */ 3274 for (j = 0; j < env->used_map_cnt; j++) 3275 if (env->used_maps[j] == map) { 3276 fdput(f); 3277 goto next_insn; 3278 } 3279 3280 if (env->used_map_cnt >= MAX_USED_MAPS) { 3281 fdput(f); 3282 return -E2BIG; 3283 } 3284 3285 /* hold the map. If the program is rejected by verifier, 3286 * the map will be released by release_maps() or it 3287 * will be used by the valid program until it's unloaded 3288 * and all maps are released in free_bpf_prog_info() 3289 */ 3290 map = bpf_map_inc(map, false); 3291 if (IS_ERR(map)) { 3292 fdput(f); 3293 return PTR_ERR(map); 3294 } 3295 env->used_maps[env->used_map_cnt++] = map; 3296 3297 fdput(f); 3298 next_insn: 3299 insn++; 3300 i++; 3301 } 3302 } 3303 3304 /* now all pseudo BPF_LD_IMM64 instructions load valid 3305 * 'struct bpf_map *' into a register instead of user map_fd. 3306 * These pointers will be used later by verifier to validate map access. 3307 */ 3308 return 0; 3309 } 3310 3311 /* drop refcnt of maps used by the rejected program */ 3312 static void release_maps(struct bpf_verifier_env *env) 3313 { 3314 int i; 3315 3316 for (i = 0; i < env->used_map_cnt; i++) 3317 bpf_map_put(env->used_maps[i]); 3318 } 3319 3320 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 3321 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 3322 { 3323 struct bpf_insn *insn = env->prog->insnsi; 3324 int insn_cnt = env->prog->len; 3325 int i; 3326 3327 for (i = 0; i < insn_cnt; i++, insn++) 3328 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 3329 insn->src_reg = 0; 3330 } 3331 3332 /* single env->prog->insni[off] instruction was replaced with the range 3333 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 3334 * [0, off) and [off, end) to new locations, so the patched range stays zero 3335 */ 3336 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, 3337 u32 off, u32 cnt) 3338 { 3339 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 3340 3341 if (cnt == 1) 3342 return 0; 3343 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); 3344 if (!new_data) 3345 return -ENOMEM; 3346 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 3347 memcpy(new_data + off + cnt - 1, old_data + off, 3348 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 3349 env->insn_aux_data = new_data; 3350 vfree(old_data); 3351 return 0; 3352 } 3353 3354 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 3355 const struct bpf_insn *patch, u32 len) 3356 { 3357 struct bpf_prog *new_prog; 3358 3359 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 3360 if (!new_prog) 3361 return NULL; 3362 if (adjust_insn_aux_data(env, new_prog->len, off, len)) 3363 return NULL; 3364 return new_prog; 3365 } 3366 3367 /* convert load instructions that access fields of 'struct __sk_buff' 3368 * into sequence of instructions that access fields of 'struct sk_buff' 3369 */ 3370 static int convert_ctx_accesses(struct bpf_verifier_env *env) 3371 { 3372 const struct bpf_verifier_ops *ops = env->prog->aux->ops; 3373 const int insn_cnt = env->prog->len; 3374 struct bpf_insn insn_buf[16], *insn; 3375 struct bpf_prog *new_prog; 3376 enum bpf_access_type type; 3377 int i, cnt, delta = 0; 3378 3379 if (ops->gen_prologue) { 3380 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 3381 env->prog); 3382 if (cnt >= ARRAY_SIZE(insn_buf)) { 3383 verbose("bpf verifier is misconfigured\n"); 3384 return -EINVAL; 3385 } else if (cnt) { 3386 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 3387 if (!new_prog) 3388 return -ENOMEM; 3389 3390 env->prog = new_prog; 3391 delta += cnt - 1; 3392 } 3393 } 3394 3395 if (!ops->convert_ctx_access) 3396 return 0; 3397 3398 insn = env->prog->insnsi + delta; 3399 3400 for (i = 0; i < insn_cnt; i++, insn++) { 3401 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 3402 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 3403 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 3404 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 3405 type = BPF_READ; 3406 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 3407 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 3408 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 3409 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 3410 type = BPF_WRITE; 3411 else 3412 continue; 3413 3414 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 3415 continue; 3416 3417 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog); 3418 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 3419 verbose("bpf verifier is misconfigured\n"); 3420 return -EINVAL; 3421 } 3422 3423 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 3424 if (!new_prog) 3425 return -ENOMEM; 3426 3427 delta += cnt - 1; 3428 3429 /* keep walking new program and skip insns we just inserted */ 3430 env->prog = new_prog; 3431 insn = new_prog->insnsi + i + delta; 3432 } 3433 3434 return 0; 3435 } 3436 3437 /* fixup insn->imm field of bpf_call instructions 3438 * and inline eligible helpers as explicit sequence of BPF instructions 3439 * 3440 * this function is called after eBPF program passed verification 3441 */ 3442 static int fixup_bpf_calls(struct bpf_verifier_env *env) 3443 { 3444 struct bpf_prog *prog = env->prog; 3445 struct bpf_insn *insn = prog->insnsi; 3446 const struct bpf_func_proto *fn; 3447 const int insn_cnt = prog->len; 3448 struct bpf_insn insn_buf[16]; 3449 struct bpf_prog *new_prog; 3450 struct bpf_map *map_ptr; 3451 int i, cnt, delta = 0; 3452 3453 for (i = 0; i < insn_cnt; i++, insn++) { 3454 if (insn->code != (BPF_JMP | BPF_CALL)) 3455 continue; 3456 3457 if (insn->imm == BPF_FUNC_get_route_realm) 3458 prog->dst_needed = 1; 3459 if (insn->imm == BPF_FUNC_get_prandom_u32) 3460 bpf_user_rnd_init_once(); 3461 if (insn->imm == BPF_FUNC_tail_call) { 3462 /* If we tail call into other programs, we 3463 * cannot make any assumptions since they can 3464 * be replaced dynamically during runtime in 3465 * the program array. 3466 */ 3467 prog->cb_access = 1; 3468 3469 /* mark bpf_tail_call as different opcode to avoid 3470 * conditional branch in the interpeter for every normal 3471 * call and to prevent accidental JITing by JIT compiler 3472 * that doesn't support bpf_tail_call yet 3473 */ 3474 insn->imm = 0; 3475 insn->code |= BPF_X; 3476 continue; 3477 } 3478 3479 if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) { 3480 map_ptr = env->insn_aux_data[i + delta].map_ptr; 3481 if (map_ptr == BPF_MAP_PTR_POISON || 3482 !map_ptr->ops->map_gen_lookup) 3483 goto patch_call_imm; 3484 3485 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 3486 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 3487 verbose("bpf verifier is misconfigured\n"); 3488 return -EINVAL; 3489 } 3490 3491 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 3492 cnt); 3493 if (!new_prog) 3494 return -ENOMEM; 3495 3496 delta += cnt - 1; 3497 3498 /* keep walking new program and skip insns we just inserted */ 3499 env->prog = prog = new_prog; 3500 insn = new_prog->insnsi + i + delta; 3501 continue; 3502 } 3503 3504 patch_call_imm: 3505 fn = prog->aux->ops->get_func_proto(insn->imm); 3506 /* all functions that have prototype and verifier allowed 3507 * programs to call them, must be real in-kernel functions 3508 */ 3509 if (!fn->func) { 3510 verbose("kernel subsystem misconfigured func %s#%d\n", 3511 func_id_name(insn->imm), insn->imm); 3512 return -EFAULT; 3513 } 3514 insn->imm = fn->func - __bpf_call_base; 3515 } 3516 3517 return 0; 3518 } 3519 3520 static void free_states(struct bpf_verifier_env *env) 3521 { 3522 struct bpf_verifier_state_list *sl, *sln; 3523 int i; 3524 3525 if (!env->explored_states) 3526 return; 3527 3528 for (i = 0; i < env->prog->len; i++) { 3529 sl = env->explored_states[i]; 3530 3531 if (sl) 3532 while (sl != STATE_LIST_MARK) { 3533 sln = sl->next; 3534 kfree(sl); 3535 sl = sln; 3536 } 3537 } 3538 3539 kfree(env->explored_states); 3540 } 3541 3542 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 3543 { 3544 char __user *log_ubuf = NULL; 3545 struct bpf_verifier_env *env; 3546 int ret = -EINVAL; 3547 3548 /* 'struct bpf_verifier_env' can be global, but since it's not small, 3549 * allocate/free it every time bpf_check() is called 3550 */ 3551 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 3552 if (!env) 3553 return -ENOMEM; 3554 3555 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 3556 (*prog)->len); 3557 ret = -ENOMEM; 3558 if (!env->insn_aux_data) 3559 goto err_free_env; 3560 env->prog = *prog; 3561 3562 /* grab the mutex to protect few globals used by verifier */ 3563 mutex_lock(&bpf_verifier_lock); 3564 3565 if (attr->log_level || attr->log_buf || attr->log_size) { 3566 /* user requested verbose verifier output 3567 * and supplied buffer to store the verification trace 3568 */ 3569 log_level = attr->log_level; 3570 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 3571 log_size = attr->log_size; 3572 log_len = 0; 3573 3574 ret = -EINVAL; 3575 /* log_* values have to be sane */ 3576 if (log_size < 128 || log_size > UINT_MAX >> 8 || 3577 log_level == 0 || log_ubuf == NULL) 3578 goto err_unlock; 3579 3580 ret = -ENOMEM; 3581 log_buf = vmalloc(log_size); 3582 if (!log_buf) 3583 goto err_unlock; 3584 } else { 3585 log_level = 0; 3586 } 3587 if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT) 3588 env->strict_alignment = true; 3589 else 3590 env->strict_alignment = false; 3591 3592 ret = replace_map_fd_with_map_ptr(env); 3593 if (ret < 0) 3594 goto skip_full_check; 3595 3596 env->explored_states = kcalloc(env->prog->len, 3597 sizeof(struct bpf_verifier_state_list *), 3598 GFP_USER); 3599 ret = -ENOMEM; 3600 if (!env->explored_states) 3601 goto skip_full_check; 3602 3603 ret = check_cfg(env); 3604 if (ret < 0) 3605 goto skip_full_check; 3606 3607 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 3608 3609 ret = do_check(env); 3610 3611 skip_full_check: 3612 while (pop_stack(env, NULL) >= 0); 3613 free_states(env); 3614 3615 if (ret == 0) 3616 /* program is valid, convert *(u32*)(ctx + off) accesses */ 3617 ret = convert_ctx_accesses(env); 3618 3619 if (ret == 0) 3620 ret = fixup_bpf_calls(env); 3621 3622 if (log_level && log_len >= log_size - 1) { 3623 BUG_ON(log_len >= log_size); 3624 /* verifier log exceeded user supplied buffer */ 3625 ret = -ENOSPC; 3626 /* fall through to return what was recorded */ 3627 } 3628 3629 /* copy verifier log back to user space including trailing zero */ 3630 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 3631 ret = -EFAULT; 3632 goto free_log_buf; 3633 } 3634 3635 if (ret == 0 && env->used_map_cnt) { 3636 /* if program passed verifier, update used_maps in bpf_prog_info */ 3637 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 3638 sizeof(env->used_maps[0]), 3639 GFP_KERNEL); 3640 3641 if (!env->prog->aux->used_maps) { 3642 ret = -ENOMEM; 3643 goto free_log_buf; 3644 } 3645 3646 memcpy(env->prog->aux->used_maps, env->used_maps, 3647 sizeof(env->used_maps[0]) * env->used_map_cnt); 3648 env->prog->aux->used_map_cnt = env->used_map_cnt; 3649 3650 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 3651 * bpf_ld_imm64 instructions 3652 */ 3653 convert_pseudo_ld_imm64(env); 3654 } 3655 3656 free_log_buf: 3657 if (log_level) 3658 vfree(log_buf); 3659 if (!env->prog->aux->used_maps) 3660 /* if we didn't copy map pointers into bpf_prog_info, release 3661 * them now. Otherwise free_bpf_prog_info() will release them. 3662 */ 3663 release_maps(env); 3664 *prog = env->prog; 3665 err_unlock: 3666 mutex_unlock(&bpf_verifier_lock); 3667 vfree(env->insn_aux_data); 3668 err_free_env: 3669 kfree(env); 3670 return ret; 3671 } 3672 3673 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, 3674 void *priv) 3675 { 3676 struct bpf_verifier_env *env; 3677 int ret; 3678 3679 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 3680 if (!env) 3681 return -ENOMEM; 3682 3683 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 3684 prog->len); 3685 ret = -ENOMEM; 3686 if (!env->insn_aux_data) 3687 goto err_free_env; 3688 env->prog = prog; 3689 env->analyzer_ops = ops; 3690 env->analyzer_priv = priv; 3691 3692 /* grab the mutex to protect few globals used by verifier */ 3693 mutex_lock(&bpf_verifier_lock); 3694 3695 log_level = 0; 3696 env->strict_alignment = false; 3697 3698 env->explored_states = kcalloc(env->prog->len, 3699 sizeof(struct bpf_verifier_state_list *), 3700 GFP_KERNEL); 3701 ret = -ENOMEM; 3702 if (!env->explored_states) 3703 goto skip_full_check; 3704 3705 ret = check_cfg(env); 3706 if (ret < 0) 3707 goto skip_full_check; 3708 3709 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 3710 3711 ret = do_check(env); 3712 3713 skip_full_check: 3714 while (pop_stack(env, NULL) >= 0); 3715 free_states(env); 3716 3717 mutex_unlock(&bpf_verifier_lock); 3718 vfree(env->insn_aux_data); 3719 err_free_env: 3720 kfree(env); 3721 return ret; 3722 } 3723 EXPORT_SYMBOL_GPL(bpf_analyzer); 3724