1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/filter.h> 18 #include <net/netlink.h> 19 #include <linux/file.h> 20 #include <linux/vmalloc.h> 21 22 /* bpf_check() is a static code analyzer that walks eBPF program 23 * instruction by instruction and updates register/stack state. 24 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 25 * 26 * The first pass is depth-first-search to check that the program is a DAG. 27 * It rejects the following programs: 28 * - larger than BPF_MAXINSNS insns 29 * - if loop is present (detected via back-edge) 30 * - unreachable insns exist (shouldn't be a forest. program = one function) 31 * - out of bounds or malformed jumps 32 * The second pass is all possible path descent from the 1st insn. 33 * Since it's analyzing all pathes through the program, the length of the 34 * analysis is limited to 32k insn, which may be hit even if total number of 35 * insn is less then 4K, but there are too many branches that change stack/regs. 36 * Number of 'branches to be analyzed' is limited to 1k 37 * 38 * On entry to each instruction, each register has a type, and the instruction 39 * changes the types of the registers depending on instruction semantics. 40 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 41 * copied to R1. 42 * 43 * All registers are 64-bit. 44 * R0 - return register 45 * R1-R5 argument passing registers 46 * R6-R9 callee saved registers 47 * R10 - frame pointer read-only 48 * 49 * At the start of BPF program the register R1 contains a pointer to bpf_context 50 * and has type PTR_TO_CTX. 51 * 52 * Verifier tracks arithmetic operations on pointers in case: 53 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 54 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 55 * 1st insn copies R10 (which has FRAME_PTR) type into R1 56 * and 2nd arithmetic instruction is pattern matched to recognize 57 * that it wants to construct a pointer to some element within stack. 58 * So after 2nd insn, the register R1 has type PTR_TO_STACK 59 * (and -20 constant is saved for further stack bounds checking). 60 * Meaning that this reg is a pointer to stack plus known immediate constant. 61 * 62 * Most of the time the registers have UNKNOWN_VALUE type, which 63 * means the register has some value, but it's not a valid pointer. 64 * (like pointer plus pointer becomes UNKNOWN_VALUE type) 65 * 66 * When verifier sees load or store instructions the type of base register 67 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer 68 * types recognized by check_mem_access() function. 69 * 70 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 71 * and the range of [ptr, ptr + map's value_size) is accessible. 72 * 73 * registers used to pass values to function calls are checked against 74 * function argument constraints. 75 * 76 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 77 * It means that the register type passed to this function must be 78 * PTR_TO_STACK and it will be used inside the function as 79 * 'pointer to map element key' 80 * 81 * For example the argument constraints for bpf_map_lookup_elem(): 82 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 83 * .arg1_type = ARG_CONST_MAP_PTR, 84 * .arg2_type = ARG_PTR_TO_MAP_KEY, 85 * 86 * ret_type says that this function returns 'pointer to map elem value or null' 87 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 88 * 2nd argument should be a pointer to stack, which will be used inside 89 * the helper function as a pointer to map element key. 90 * 91 * On the kernel side the helper function looks like: 92 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 93 * { 94 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 95 * void *key = (void *) (unsigned long) r2; 96 * void *value; 97 * 98 * here kernel can access 'key' and 'map' pointers safely, knowing that 99 * [key, key + map->key_size) bytes are valid and were initialized on 100 * the stack of eBPF program. 101 * } 102 * 103 * Corresponding eBPF program may look like: 104 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 105 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 106 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 107 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 108 * here verifier looks at prototype of map_lookup_elem() and sees: 109 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 110 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 111 * 112 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 113 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 114 * and were initialized prior to this call. 115 * If it's ok, then verifier allows this BPF_CALL insn and looks at 116 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 117 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 118 * returns ether pointer to map value or NULL. 119 * 120 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 121 * insn, the register holding that pointer in the true branch changes state to 122 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 123 * branch. See check_cond_jmp_op(). 124 * 125 * After the call R0 is set to return type of the function and registers R1-R5 126 * are set to NOT_INIT to indicate that they are no longer readable. 127 */ 128 129 struct reg_state { 130 enum bpf_reg_type type; 131 union { 132 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ 133 s64 imm; 134 135 /* valid when type == PTR_TO_PACKET* */ 136 struct { 137 u32 id; 138 u16 off; 139 u16 range; 140 }; 141 142 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 143 * PTR_TO_MAP_VALUE_OR_NULL 144 */ 145 struct bpf_map *map_ptr; 146 }; 147 }; 148 149 enum bpf_stack_slot_type { 150 STACK_INVALID, /* nothing was stored in this stack slot */ 151 STACK_SPILL, /* register spilled into stack */ 152 STACK_MISC /* BPF program wrote some data into this slot */ 153 }; 154 155 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 156 157 /* state of the program: 158 * type of all registers and stack info 159 */ 160 struct verifier_state { 161 struct reg_state regs[MAX_BPF_REG]; 162 u8 stack_slot_type[MAX_BPF_STACK]; 163 struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; 164 }; 165 166 /* linked list of verifier states used to prune search */ 167 struct verifier_state_list { 168 struct verifier_state state; 169 struct verifier_state_list *next; 170 }; 171 172 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 173 struct verifier_stack_elem { 174 /* verifer state is 'st' 175 * before processing instruction 'insn_idx' 176 * and after processing instruction 'prev_insn_idx' 177 */ 178 struct verifier_state st; 179 int insn_idx; 180 int prev_insn_idx; 181 struct verifier_stack_elem *next; 182 }; 183 184 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 185 186 /* single container for all structs 187 * one verifier_env per bpf_check() call 188 */ 189 struct verifier_env { 190 struct bpf_prog *prog; /* eBPF program being verified */ 191 struct verifier_stack_elem *head; /* stack of verifier states to be processed */ 192 int stack_size; /* number of states to be processed */ 193 struct verifier_state cur_state; /* current verifier state */ 194 struct verifier_state_list **explored_states; /* search pruning optimization */ 195 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 196 u32 used_map_cnt; /* number of used maps */ 197 bool allow_ptr_leaks; 198 }; 199 200 #define BPF_COMPLEXITY_LIMIT_INSNS 65536 201 #define BPF_COMPLEXITY_LIMIT_STACK 1024 202 203 struct bpf_call_arg_meta { 204 struct bpf_map *map_ptr; 205 bool raw_mode; 206 int regno; 207 int access_size; 208 }; 209 210 /* verbose verifier prints what it's seeing 211 * bpf_check() is called under lock, so no race to access these global vars 212 */ 213 static u32 log_level, log_size, log_len; 214 static char *log_buf; 215 216 static DEFINE_MUTEX(bpf_verifier_lock); 217 218 /* log_level controls verbosity level of eBPF verifier. 219 * verbose() is used to dump the verification trace to the log, so the user 220 * can figure out what's wrong with the program 221 */ 222 static __printf(1, 2) void verbose(const char *fmt, ...) 223 { 224 va_list args; 225 226 if (log_level == 0 || log_len >= log_size - 1) 227 return; 228 229 va_start(args, fmt); 230 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 231 va_end(args); 232 } 233 234 /* string representation of 'enum bpf_reg_type' */ 235 static const char * const reg_type_str[] = { 236 [NOT_INIT] = "?", 237 [UNKNOWN_VALUE] = "inv", 238 [PTR_TO_CTX] = "ctx", 239 [CONST_PTR_TO_MAP] = "map_ptr", 240 [PTR_TO_MAP_VALUE] = "map_value", 241 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 242 [FRAME_PTR] = "fp", 243 [PTR_TO_STACK] = "fp", 244 [CONST_IMM] = "imm", 245 [PTR_TO_PACKET] = "pkt", 246 [PTR_TO_PACKET_END] = "pkt_end", 247 }; 248 249 static void print_verifier_state(struct verifier_state *state) 250 { 251 struct reg_state *reg; 252 enum bpf_reg_type t; 253 int i; 254 255 for (i = 0; i < MAX_BPF_REG; i++) { 256 reg = &state->regs[i]; 257 t = reg->type; 258 if (t == NOT_INIT) 259 continue; 260 verbose(" R%d=%s", i, reg_type_str[t]); 261 if (t == CONST_IMM || t == PTR_TO_STACK) 262 verbose("%lld", reg->imm); 263 else if (t == PTR_TO_PACKET) 264 verbose("(id=%d,off=%d,r=%d)", 265 reg->id, reg->off, reg->range); 266 else if (t == UNKNOWN_VALUE && reg->imm) 267 verbose("%lld", reg->imm); 268 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || 269 t == PTR_TO_MAP_VALUE_OR_NULL) 270 verbose("(ks=%d,vs=%d)", 271 reg->map_ptr->key_size, 272 reg->map_ptr->value_size); 273 } 274 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 275 if (state->stack_slot_type[i] == STACK_SPILL) 276 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 277 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); 278 } 279 verbose("\n"); 280 } 281 282 static const char *const bpf_class_string[] = { 283 [BPF_LD] = "ld", 284 [BPF_LDX] = "ldx", 285 [BPF_ST] = "st", 286 [BPF_STX] = "stx", 287 [BPF_ALU] = "alu", 288 [BPF_JMP] = "jmp", 289 [BPF_RET] = "BUG", 290 [BPF_ALU64] = "alu64", 291 }; 292 293 static const char *const bpf_alu_string[16] = { 294 [BPF_ADD >> 4] = "+=", 295 [BPF_SUB >> 4] = "-=", 296 [BPF_MUL >> 4] = "*=", 297 [BPF_DIV >> 4] = "/=", 298 [BPF_OR >> 4] = "|=", 299 [BPF_AND >> 4] = "&=", 300 [BPF_LSH >> 4] = "<<=", 301 [BPF_RSH >> 4] = ">>=", 302 [BPF_NEG >> 4] = "neg", 303 [BPF_MOD >> 4] = "%=", 304 [BPF_XOR >> 4] = "^=", 305 [BPF_MOV >> 4] = "=", 306 [BPF_ARSH >> 4] = "s>>=", 307 [BPF_END >> 4] = "endian", 308 }; 309 310 static const char *const bpf_ldst_string[] = { 311 [BPF_W >> 3] = "u32", 312 [BPF_H >> 3] = "u16", 313 [BPF_B >> 3] = "u8", 314 [BPF_DW >> 3] = "u64", 315 }; 316 317 static const char *const bpf_jmp_string[16] = { 318 [BPF_JA >> 4] = "jmp", 319 [BPF_JEQ >> 4] = "==", 320 [BPF_JGT >> 4] = ">", 321 [BPF_JGE >> 4] = ">=", 322 [BPF_JSET >> 4] = "&", 323 [BPF_JNE >> 4] = "!=", 324 [BPF_JSGT >> 4] = "s>", 325 [BPF_JSGE >> 4] = "s>=", 326 [BPF_CALL >> 4] = "call", 327 [BPF_EXIT >> 4] = "exit", 328 }; 329 330 static void print_bpf_insn(struct bpf_insn *insn) 331 { 332 u8 class = BPF_CLASS(insn->code); 333 334 if (class == BPF_ALU || class == BPF_ALU64) { 335 if (BPF_SRC(insn->code) == BPF_X) 336 verbose("(%02x) %sr%d %s %sr%d\n", 337 insn->code, class == BPF_ALU ? "(u32) " : "", 338 insn->dst_reg, 339 bpf_alu_string[BPF_OP(insn->code) >> 4], 340 class == BPF_ALU ? "(u32) " : "", 341 insn->src_reg); 342 else 343 verbose("(%02x) %sr%d %s %s%d\n", 344 insn->code, class == BPF_ALU ? "(u32) " : "", 345 insn->dst_reg, 346 bpf_alu_string[BPF_OP(insn->code) >> 4], 347 class == BPF_ALU ? "(u32) " : "", 348 insn->imm); 349 } else if (class == BPF_STX) { 350 if (BPF_MODE(insn->code) == BPF_MEM) 351 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 352 insn->code, 353 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 354 insn->dst_reg, 355 insn->off, insn->src_reg); 356 else if (BPF_MODE(insn->code) == BPF_XADD) 357 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 358 insn->code, 359 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 360 insn->dst_reg, insn->off, 361 insn->src_reg); 362 else 363 verbose("BUG_%02x\n", insn->code); 364 } else if (class == BPF_ST) { 365 if (BPF_MODE(insn->code) != BPF_MEM) { 366 verbose("BUG_st_%02x\n", insn->code); 367 return; 368 } 369 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 370 insn->code, 371 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 372 insn->dst_reg, 373 insn->off, insn->imm); 374 } else if (class == BPF_LDX) { 375 if (BPF_MODE(insn->code) != BPF_MEM) { 376 verbose("BUG_ldx_%02x\n", insn->code); 377 return; 378 } 379 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 380 insn->code, insn->dst_reg, 381 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 382 insn->src_reg, insn->off); 383 } else if (class == BPF_LD) { 384 if (BPF_MODE(insn->code) == BPF_ABS) { 385 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 386 insn->code, 387 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 388 insn->imm); 389 } else if (BPF_MODE(insn->code) == BPF_IND) { 390 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 391 insn->code, 392 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 393 insn->src_reg, insn->imm); 394 } else if (BPF_MODE(insn->code) == BPF_IMM) { 395 verbose("(%02x) r%d = 0x%x\n", 396 insn->code, insn->dst_reg, insn->imm); 397 } else { 398 verbose("BUG_ld_%02x\n", insn->code); 399 return; 400 } 401 } else if (class == BPF_JMP) { 402 u8 opcode = BPF_OP(insn->code); 403 404 if (opcode == BPF_CALL) { 405 verbose("(%02x) call %d\n", insn->code, insn->imm); 406 } else if (insn->code == (BPF_JMP | BPF_JA)) { 407 verbose("(%02x) goto pc%+d\n", 408 insn->code, insn->off); 409 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 410 verbose("(%02x) exit\n", insn->code); 411 } else if (BPF_SRC(insn->code) == BPF_X) { 412 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 413 insn->code, insn->dst_reg, 414 bpf_jmp_string[BPF_OP(insn->code) >> 4], 415 insn->src_reg, insn->off); 416 } else { 417 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 418 insn->code, insn->dst_reg, 419 bpf_jmp_string[BPF_OP(insn->code) >> 4], 420 insn->imm, insn->off); 421 } 422 } else { 423 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 424 } 425 } 426 427 static int pop_stack(struct verifier_env *env, int *prev_insn_idx) 428 { 429 struct verifier_stack_elem *elem; 430 int insn_idx; 431 432 if (env->head == NULL) 433 return -1; 434 435 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 436 insn_idx = env->head->insn_idx; 437 if (prev_insn_idx) 438 *prev_insn_idx = env->head->prev_insn_idx; 439 elem = env->head->next; 440 kfree(env->head); 441 env->head = elem; 442 env->stack_size--; 443 return insn_idx; 444 } 445 446 static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, 447 int prev_insn_idx) 448 { 449 struct verifier_stack_elem *elem; 450 451 elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL); 452 if (!elem) 453 goto err; 454 455 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 456 elem->insn_idx = insn_idx; 457 elem->prev_insn_idx = prev_insn_idx; 458 elem->next = env->head; 459 env->head = elem; 460 env->stack_size++; 461 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 462 verbose("BPF program is too complex\n"); 463 goto err; 464 } 465 return &elem->st; 466 err: 467 /* pop all elements and return */ 468 while (pop_stack(env, NULL) >= 0); 469 return NULL; 470 } 471 472 #define CALLER_SAVED_REGS 6 473 static const int caller_saved[CALLER_SAVED_REGS] = { 474 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 475 }; 476 477 static void init_reg_state(struct reg_state *regs) 478 { 479 int i; 480 481 for (i = 0; i < MAX_BPF_REG; i++) { 482 regs[i].type = NOT_INIT; 483 regs[i].imm = 0; 484 } 485 486 /* frame pointer */ 487 regs[BPF_REG_FP].type = FRAME_PTR; 488 489 /* 1st arg to a function */ 490 regs[BPF_REG_1].type = PTR_TO_CTX; 491 } 492 493 static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) 494 { 495 BUG_ON(regno >= MAX_BPF_REG); 496 regs[regno].type = UNKNOWN_VALUE; 497 regs[regno].imm = 0; 498 } 499 500 enum reg_arg_type { 501 SRC_OP, /* register is used as source operand */ 502 DST_OP, /* register is used as destination operand */ 503 DST_OP_NO_MARK /* same as above, check only, don't mark */ 504 }; 505 506 static int check_reg_arg(struct reg_state *regs, u32 regno, 507 enum reg_arg_type t) 508 { 509 if (regno >= MAX_BPF_REG) { 510 verbose("R%d is invalid\n", regno); 511 return -EINVAL; 512 } 513 514 if (t == SRC_OP) { 515 /* check whether register used as source operand can be read */ 516 if (regs[regno].type == NOT_INIT) { 517 verbose("R%d !read_ok\n", regno); 518 return -EACCES; 519 } 520 } else { 521 /* check whether register used as dest operand can be written to */ 522 if (regno == BPF_REG_FP) { 523 verbose("frame pointer is read only\n"); 524 return -EACCES; 525 } 526 if (t == DST_OP) 527 mark_reg_unknown_value(regs, regno); 528 } 529 return 0; 530 } 531 532 static int bpf_size_to_bytes(int bpf_size) 533 { 534 if (bpf_size == BPF_W) 535 return 4; 536 else if (bpf_size == BPF_H) 537 return 2; 538 else if (bpf_size == BPF_B) 539 return 1; 540 else if (bpf_size == BPF_DW) 541 return 8; 542 else 543 return -EINVAL; 544 } 545 546 static bool is_spillable_regtype(enum bpf_reg_type type) 547 { 548 switch (type) { 549 case PTR_TO_MAP_VALUE: 550 case PTR_TO_MAP_VALUE_OR_NULL: 551 case PTR_TO_STACK: 552 case PTR_TO_CTX: 553 case PTR_TO_PACKET: 554 case PTR_TO_PACKET_END: 555 case FRAME_PTR: 556 case CONST_PTR_TO_MAP: 557 return true; 558 default: 559 return false; 560 } 561 } 562 563 /* check_stack_read/write functions track spill/fill of registers, 564 * stack boundary and alignment are checked in check_mem_access() 565 */ 566 static int check_stack_write(struct verifier_state *state, int off, int size, 567 int value_regno) 568 { 569 int i; 570 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 571 * so it's aligned access and [off, off + size) are within stack limits 572 */ 573 574 if (value_regno >= 0 && 575 is_spillable_regtype(state->regs[value_regno].type)) { 576 577 /* register containing pointer is being spilled into stack */ 578 if (size != BPF_REG_SIZE) { 579 verbose("invalid size of register spill\n"); 580 return -EACCES; 581 } 582 583 /* save register state */ 584 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 585 state->regs[value_regno]; 586 587 for (i = 0; i < BPF_REG_SIZE; i++) 588 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 589 } else { 590 /* regular write of data into stack */ 591 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 592 (struct reg_state) {}; 593 594 for (i = 0; i < size; i++) 595 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 596 } 597 return 0; 598 } 599 600 static int check_stack_read(struct verifier_state *state, int off, int size, 601 int value_regno) 602 { 603 u8 *slot_type; 604 int i; 605 606 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 607 608 if (slot_type[0] == STACK_SPILL) { 609 if (size != BPF_REG_SIZE) { 610 verbose("invalid size of register spill\n"); 611 return -EACCES; 612 } 613 for (i = 1; i < BPF_REG_SIZE; i++) { 614 if (slot_type[i] != STACK_SPILL) { 615 verbose("corrupted spill memory\n"); 616 return -EACCES; 617 } 618 } 619 620 if (value_regno >= 0) 621 /* restore register state from stack */ 622 state->regs[value_regno] = 623 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; 624 return 0; 625 } else { 626 for (i = 0; i < size; i++) { 627 if (slot_type[i] != STACK_MISC) { 628 verbose("invalid read from stack off %d+%d size %d\n", 629 off, i, size); 630 return -EACCES; 631 } 632 } 633 if (value_regno >= 0) 634 /* have read misc data from the stack */ 635 mark_reg_unknown_value(state->regs, value_regno); 636 return 0; 637 } 638 } 639 640 /* check read/write into map element returned by bpf_map_lookup_elem() */ 641 static int check_map_access(struct verifier_env *env, u32 regno, int off, 642 int size) 643 { 644 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 645 646 if (off < 0 || off + size > map->value_size) { 647 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 648 map->value_size, off, size); 649 return -EACCES; 650 } 651 return 0; 652 } 653 654 #define MAX_PACKET_OFF 0xffff 655 656 static bool may_write_pkt_data(enum bpf_prog_type type) 657 { 658 switch (type) { 659 case BPF_PROG_TYPE_XDP: 660 return true; 661 default: 662 return false; 663 } 664 } 665 666 static int check_packet_access(struct verifier_env *env, u32 regno, int off, 667 int size) 668 { 669 struct reg_state *regs = env->cur_state.regs; 670 struct reg_state *reg = ®s[regno]; 671 672 off += reg->off; 673 if (off < 0 || off + size > reg->range) { 674 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 675 off, size, regno, reg->id, reg->off, reg->range); 676 return -EACCES; 677 } 678 return 0; 679 } 680 681 /* check access to 'struct bpf_context' fields */ 682 static int check_ctx_access(struct verifier_env *env, int off, int size, 683 enum bpf_access_type t, enum bpf_reg_type *reg_type) 684 { 685 if (env->prog->aux->ops->is_valid_access && 686 env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { 687 /* remember the offset of last byte accessed in ctx */ 688 if (env->prog->aux->max_ctx_offset < off + size) 689 env->prog->aux->max_ctx_offset = off + size; 690 return 0; 691 } 692 693 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 694 return -EACCES; 695 } 696 697 static bool is_pointer_value(struct verifier_env *env, int regno) 698 { 699 if (env->allow_ptr_leaks) 700 return false; 701 702 switch (env->cur_state.regs[regno].type) { 703 case UNKNOWN_VALUE: 704 case CONST_IMM: 705 return false; 706 default: 707 return true; 708 } 709 } 710 711 static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg, 712 int off, int size) 713 { 714 if (reg->type != PTR_TO_PACKET) { 715 if (off % size != 0) { 716 verbose("misaligned access off %d size %d\n", off, size); 717 return -EACCES; 718 } else { 719 return 0; 720 } 721 } 722 723 switch (env->prog->type) { 724 case BPF_PROG_TYPE_SCHED_CLS: 725 case BPF_PROG_TYPE_SCHED_ACT: 726 case BPF_PROG_TYPE_XDP: 727 break; 728 default: 729 verbose("verifier is misconfigured\n"); 730 return -EACCES; 731 } 732 733 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 734 /* misaligned access to packet is ok on x86,arm,arm64 */ 735 return 0; 736 737 if (reg->id && size != 1) { 738 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 739 return -EACCES; 740 } 741 742 /* skb->data is NET_IP_ALIGN-ed */ 743 if ((NET_IP_ALIGN + reg->off + off) % size != 0) { 744 verbose("misaligned packet access off %d+%d+%d size %d\n", 745 NET_IP_ALIGN, reg->off, off, size); 746 return -EACCES; 747 } 748 return 0; 749 } 750 751 /* check whether memory at (regno + off) is accessible for t = (read | write) 752 * if t==write, value_regno is a register which value is stored into memory 753 * if t==read, value_regno is a register which will receive the value from memory 754 * if t==write && value_regno==-1, some unknown value is stored into memory 755 * if t==read && value_regno==-1, don't care what we read from memory 756 */ 757 static int check_mem_access(struct verifier_env *env, u32 regno, int off, 758 int bpf_size, enum bpf_access_type t, 759 int value_regno) 760 { 761 struct verifier_state *state = &env->cur_state; 762 struct reg_state *reg = &state->regs[regno]; 763 int size, err = 0; 764 765 if (reg->type == PTR_TO_STACK) 766 off += reg->imm; 767 768 size = bpf_size_to_bytes(bpf_size); 769 if (size < 0) 770 return size; 771 772 err = check_ptr_alignment(env, reg, off, size); 773 if (err) 774 return err; 775 776 if (reg->type == PTR_TO_MAP_VALUE) { 777 if (t == BPF_WRITE && value_regno >= 0 && 778 is_pointer_value(env, value_regno)) { 779 verbose("R%d leaks addr into map\n", value_regno); 780 return -EACCES; 781 } 782 err = check_map_access(env, regno, off, size); 783 if (!err && t == BPF_READ && value_regno >= 0) 784 mark_reg_unknown_value(state->regs, value_regno); 785 786 } else if (reg->type == PTR_TO_CTX) { 787 enum bpf_reg_type reg_type = UNKNOWN_VALUE; 788 789 if (t == BPF_WRITE && value_regno >= 0 && 790 is_pointer_value(env, value_regno)) { 791 verbose("R%d leaks addr into ctx\n", value_regno); 792 return -EACCES; 793 } 794 err = check_ctx_access(env, off, size, t, ®_type); 795 if (!err && t == BPF_READ && value_regno >= 0) { 796 mark_reg_unknown_value(state->regs, value_regno); 797 if (env->allow_ptr_leaks) 798 /* note that reg.[id|off|range] == 0 */ 799 state->regs[value_regno].type = reg_type; 800 } 801 802 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 803 if (off >= 0 || off < -MAX_BPF_STACK) { 804 verbose("invalid stack off=%d size=%d\n", off, size); 805 return -EACCES; 806 } 807 if (t == BPF_WRITE) { 808 if (!env->allow_ptr_leaks && 809 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 810 size != BPF_REG_SIZE) { 811 verbose("attempt to corrupt spilled pointer on stack\n"); 812 return -EACCES; 813 } 814 err = check_stack_write(state, off, size, value_regno); 815 } else { 816 err = check_stack_read(state, off, size, value_regno); 817 } 818 } else if (state->regs[regno].type == PTR_TO_PACKET) { 819 if (t == BPF_WRITE && !may_write_pkt_data(env->prog->type)) { 820 verbose("cannot write into packet\n"); 821 return -EACCES; 822 } 823 if (t == BPF_WRITE && value_regno >= 0 && 824 is_pointer_value(env, value_regno)) { 825 verbose("R%d leaks addr into packet\n", value_regno); 826 return -EACCES; 827 } 828 err = check_packet_access(env, regno, off, size); 829 if (!err && t == BPF_READ && value_regno >= 0) 830 mark_reg_unknown_value(state->regs, value_regno); 831 } else { 832 verbose("R%d invalid mem access '%s'\n", 833 regno, reg_type_str[reg->type]); 834 return -EACCES; 835 } 836 837 if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && 838 state->regs[value_regno].type == UNKNOWN_VALUE) { 839 /* 1 or 2 byte load zero-extends, determine the number of 840 * zero upper bits. Not doing it fo 4 byte load, since 841 * such values cannot be added to ptr_to_packet anyway. 842 */ 843 state->regs[value_regno].imm = 64 - size * 8; 844 } 845 return err; 846 } 847 848 static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) 849 { 850 struct reg_state *regs = env->cur_state.regs; 851 int err; 852 853 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 854 insn->imm != 0) { 855 verbose("BPF_XADD uses reserved fields\n"); 856 return -EINVAL; 857 } 858 859 /* check src1 operand */ 860 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 861 if (err) 862 return err; 863 864 /* check src2 operand */ 865 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 866 if (err) 867 return err; 868 869 /* check whether atomic_add can read the memory */ 870 err = check_mem_access(env, insn->dst_reg, insn->off, 871 BPF_SIZE(insn->code), BPF_READ, -1); 872 if (err) 873 return err; 874 875 /* check whether atomic_add can write into the same memory */ 876 return check_mem_access(env, insn->dst_reg, insn->off, 877 BPF_SIZE(insn->code), BPF_WRITE, -1); 878 } 879 880 /* when register 'regno' is passed into function that will read 'access_size' 881 * bytes from that pointer, make sure that it's within stack boundary 882 * and all elements of stack are initialized 883 */ 884 static int check_stack_boundary(struct verifier_env *env, int regno, 885 int access_size, bool zero_size_allowed, 886 struct bpf_call_arg_meta *meta) 887 { 888 struct verifier_state *state = &env->cur_state; 889 struct reg_state *regs = state->regs; 890 int off, i; 891 892 if (regs[regno].type != PTR_TO_STACK) { 893 if (zero_size_allowed && access_size == 0 && 894 regs[regno].type == CONST_IMM && 895 regs[regno].imm == 0) 896 return 0; 897 898 verbose("R%d type=%s expected=%s\n", regno, 899 reg_type_str[regs[regno].type], 900 reg_type_str[PTR_TO_STACK]); 901 return -EACCES; 902 } 903 904 off = regs[regno].imm; 905 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 906 access_size <= 0) { 907 verbose("invalid stack type R%d off=%d access_size=%d\n", 908 regno, off, access_size); 909 return -EACCES; 910 } 911 912 if (meta && meta->raw_mode) { 913 meta->access_size = access_size; 914 meta->regno = regno; 915 return 0; 916 } 917 918 for (i = 0; i < access_size; i++) { 919 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 920 verbose("invalid indirect read from stack off %d+%d size %d\n", 921 off, i, access_size); 922 return -EACCES; 923 } 924 } 925 return 0; 926 } 927 928 static int check_func_arg(struct verifier_env *env, u32 regno, 929 enum bpf_arg_type arg_type, 930 struct bpf_call_arg_meta *meta) 931 { 932 struct reg_state *reg = env->cur_state.regs + regno; 933 enum bpf_reg_type expected_type; 934 int err = 0; 935 936 if (arg_type == ARG_DONTCARE) 937 return 0; 938 939 if (reg->type == NOT_INIT) { 940 verbose("R%d !read_ok\n", regno); 941 return -EACCES; 942 } 943 944 if (arg_type == ARG_ANYTHING) { 945 if (is_pointer_value(env, regno)) { 946 verbose("R%d leaks addr into helper function\n", regno); 947 return -EACCES; 948 } 949 return 0; 950 } 951 952 if (arg_type == ARG_PTR_TO_MAP_KEY || 953 arg_type == ARG_PTR_TO_MAP_VALUE) { 954 expected_type = PTR_TO_STACK; 955 } else if (arg_type == ARG_CONST_STACK_SIZE || 956 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 957 expected_type = CONST_IMM; 958 } else if (arg_type == ARG_CONST_MAP_PTR) { 959 expected_type = CONST_PTR_TO_MAP; 960 } else if (arg_type == ARG_PTR_TO_CTX) { 961 expected_type = PTR_TO_CTX; 962 } else if (arg_type == ARG_PTR_TO_STACK || 963 arg_type == ARG_PTR_TO_RAW_STACK) { 964 expected_type = PTR_TO_STACK; 965 /* One exception here. In case function allows for NULL to be 966 * passed in as argument, it's a CONST_IMM type. Final test 967 * happens during stack boundary checking. 968 */ 969 if (reg->type == CONST_IMM && reg->imm == 0) 970 expected_type = CONST_IMM; 971 meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; 972 } else { 973 verbose("unsupported arg_type %d\n", arg_type); 974 return -EFAULT; 975 } 976 977 if (reg->type != expected_type) { 978 verbose("R%d type=%s expected=%s\n", regno, 979 reg_type_str[reg->type], reg_type_str[expected_type]); 980 return -EACCES; 981 } 982 983 if (arg_type == ARG_CONST_MAP_PTR) { 984 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 985 meta->map_ptr = reg->map_ptr; 986 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 987 /* bpf_map_xxx(..., map_ptr, ..., key) call: 988 * check that [key, key + map->key_size) are within 989 * stack limits and initialized 990 */ 991 if (!meta->map_ptr) { 992 /* in function declaration map_ptr must come before 993 * map_key, so that it's verified and known before 994 * we have to check map_key here. Otherwise it means 995 * that kernel subsystem misconfigured verifier 996 */ 997 verbose("invalid map_ptr to access map->key\n"); 998 return -EACCES; 999 } 1000 err = check_stack_boundary(env, regno, meta->map_ptr->key_size, 1001 false, NULL); 1002 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1003 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1004 * check [value, value + map->value_size) validity 1005 */ 1006 if (!meta->map_ptr) { 1007 /* kernel subsystem misconfigured verifier */ 1008 verbose("invalid map_ptr to access map->value\n"); 1009 return -EACCES; 1010 } 1011 err = check_stack_boundary(env, regno, 1012 meta->map_ptr->value_size, 1013 false, NULL); 1014 } else if (arg_type == ARG_CONST_STACK_SIZE || 1015 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 1016 bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); 1017 1018 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1019 * from stack pointer 'buf'. Check it 1020 * note: regno == len, regno - 1 == buf 1021 */ 1022 if (regno == 0) { 1023 /* kernel subsystem misconfigured verifier */ 1024 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); 1025 return -EACCES; 1026 } 1027 err = check_stack_boundary(env, regno - 1, reg->imm, 1028 zero_size_allowed, meta); 1029 } 1030 1031 return err; 1032 } 1033 1034 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 1035 { 1036 if (!map) 1037 return 0; 1038 1039 /* We need a two way check, first is from map perspective ... */ 1040 switch (map->map_type) { 1041 case BPF_MAP_TYPE_PROG_ARRAY: 1042 if (func_id != BPF_FUNC_tail_call) 1043 goto error; 1044 break; 1045 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1046 if (func_id != BPF_FUNC_perf_event_read && 1047 func_id != BPF_FUNC_perf_event_output) 1048 goto error; 1049 break; 1050 case BPF_MAP_TYPE_STACK_TRACE: 1051 if (func_id != BPF_FUNC_get_stackid) 1052 goto error; 1053 break; 1054 case BPF_MAP_TYPE_CGROUP_ARRAY: 1055 if (func_id != BPF_FUNC_skb_in_cgroup) 1056 goto error; 1057 break; 1058 default: 1059 break; 1060 } 1061 1062 /* ... and second from the function itself. */ 1063 switch (func_id) { 1064 case BPF_FUNC_tail_call: 1065 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1066 goto error; 1067 break; 1068 case BPF_FUNC_perf_event_read: 1069 case BPF_FUNC_perf_event_output: 1070 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1071 goto error; 1072 break; 1073 case BPF_FUNC_get_stackid: 1074 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1075 goto error; 1076 break; 1077 case BPF_FUNC_skb_in_cgroup: 1078 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1079 goto error; 1080 break; 1081 default: 1082 break; 1083 } 1084 1085 return 0; 1086 error: 1087 verbose("cannot pass map_type %d into func %d\n", 1088 map->map_type, func_id); 1089 return -EINVAL; 1090 } 1091 1092 static int check_raw_mode(const struct bpf_func_proto *fn) 1093 { 1094 int count = 0; 1095 1096 if (fn->arg1_type == ARG_PTR_TO_RAW_STACK) 1097 count++; 1098 if (fn->arg2_type == ARG_PTR_TO_RAW_STACK) 1099 count++; 1100 if (fn->arg3_type == ARG_PTR_TO_RAW_STACK) 1101 count++; 1102 if (fn->arg4_type == ARG_PTR_TO_RAW_STACK) 1103 count++; 1104 if (fn->arg5_type == ARG_PTR_TO_RAW_STACK) 1105 count++; 1106 1107 return count > 1 ? -EINVAL : 0; 1108 } 1109 1110 static void clear_all_pkt_pointers(struct verifier_env *env) 1111 { 1112 struct verifier_state *state = &env->cur_state; 1113 struct reg_state *regs = state->regs, *reg; 1114 int i; 1115 1116 for (i = 0; i < MAX_BPF_REG; i++) 1117 if (regs[i].type == PTR_TO_PACKET || 1118 regs[i].type == PTR_TO_PACKET_END) 1119 mark_reg_unknown_value(regs, i); 1120 1121 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1122 if (state->stack_slot_type[i] != STACK_SPILL) 1123 continue; 1124 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1125 if (reg->type != PTR_TO_PACKET && 1126 reg->type != PTR_TO_PACKET_END) 1127 continue; 1128 reg->type = UNKNOWN_VALUE; 1129 reg->imm = 0; 1130 } 1131 } 1132 1133 static int check_call(struct verifier_env *env, int func_id) 1134 { 1135 struct verifier_state *state = &env->cur_state; 1136 const struct bpf_func_proto *fn = NULL; 1137 struct reg_state *regs = state->regs; 1138 struct reg_state *reg; 1139 struct bpf_call_arg_meta meta; 1140 bool changes_data; 1141 int i, err; 1142 1143 /* find function prototype */ 1144 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1145 verbose("invalid func %d\n", func_id); 1146 return -EINVAL; 1147 } 1148 1149 if (env->prog->aux->ops->get_func_proto) 1150 fn = env->prog->aux->ops->get_func_proto(func_id); 1151 1152 if (!fn) { 1153 verbose("unknown func %d\n", func_id); 1154 return -EINVAL; 1155 } 1156 1157 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1158 if (!env->prog->gpl_compatible && fn->gpl_only) { 1159 verbose("cannot call GPL only function from proprietary program\n"); 1160 return -EINVAL; 1161 } 1162 1163 changes_data = bpf_helper_changes_skb_data(fn->func); 1164 1165 memset(&meta, 0, sizeof(meta)); 1166 1167 /* We only support one arg being in raw mode at the moment, which 1168 * is sufficient for the helper functions we have right now. 1169 */ 1170 err = check_raw_mode(fn); 1171 if (err) { 1172 verbose("kernel subsystem misconfigured func %d\n", func_id); 1173 return err; 1174 } 1175 1176 /* check args */ 1177 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1178 if (err) 1179 return err; 1180 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1181 if (err) 1182 return err; 1183 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1184 if (err) 1185 return err; 1186 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1187 if (err) 1188 return err; 1189 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1190 if (err) 1191 return err; 1192 1193 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1194 * is inferred from register state. 1195 */ 1196 for (i = 0; i < meta.access_size; i++) { 1197 err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); 1198 if (err) 1199 return err; 1200 } 1201 1202 /* reset caller saved regs */ 1203 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1204 reg = regs + caller_saved[i]; 1205 reg->type = NOT_INIT; 1206 reg->imm = 0; 1207 } 1208 1209 /* update return register */ 1210 if (fn->ret_type == RET_INTEGER) { 1211 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1212 } else if (fn->ret_type == RET_VOID) { 1213 regs[BPF_REG_0].type = NOT_INIT; 1214 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1215 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1216 /* remember map_ptr, so that check_map_access() 1217 * can check 'value_size' boundary of memory access 1218 * to map element returned from bpf_map_lookup_elem() 1219 */ 1220 if (meta.map_ptr == NULL) { 1221 verbose("kernel subsystem misconfigured verifier\n"); 1222 return -EINVAL; 1223 } 1224 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1225 } else { 1226 verbose("unknown return type %d of func %d\n", 1227 fn->ret_type, func_id); 1228 return -EINVAL; 1229 } 1230 1231 err = check_map_func_compatibility(meta.map_ptr, func_id); 1232 if (err) 1233 return err; 1234 1235 if (changes_data) 1236 clear_all_pkt_pointers(env); 1237 return 0; 1238 } 1239 1240 static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn) 1241 { 1242 struct reg_state *regs = env->cur_state.regs; 1243 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1244 struct reg_state *src_reg = ®s[insn->src_reg]; 1245 struct reg_state tmp_reg; 1246 s32 imm; 1247 1248 if (BPF_SRC(insn->code) == BPF_K) { 1249 /* pkt_ptr += imm */ 1250 imm = insn->imm; 1251 1252 add_imm: 1253 if (imm <= 0) { 1254 verbose("addition of negative constant to packet pointer is not allowed\n"); 1255 return -EACCES; 1256 } 1257 if (imm >= MAX_PACKET_OFF || 1258 imm + dst_reg->off >= MAX_PACKET_OFF) { 1259 verbose("constant %d is too large to add to packet pointer\n", 1260 imm); 1261 return -EACCES; 1262 } 1263 /* a constant was added to pkt_ptr. 1264 * Remember it while keeping the same 'id' 1265 */ 1266 dst_reg->off += imm; 1267 } else { 1268 if (src_reg->type == PTR_TO_PACKET) { 1269 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1270 tmp_reg = *dst_reg; /* save r7 state */ 1271 *dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */ 1272 src_reg = &tmp_reg; /* pretend it's src_reg state */ 1273 /* if the checks below reject it, the copy won't matter, 1274 * since we're rejecting the whole program. If all ok, 1275 * then imm22 state will be added to r7 1276 * and r7 will be pkt(id=0,off=22,r=62) while 1277 * r6 will stay as pkt(id=0,off=0,r=62) 1278 */ 1279 } 1280 1281 if (src_reg->type == CONST_IMM) { 1282 /* pkt_ptr += reg where reg is known constant */ 1283 imm = src_reg->imm; 1284 goto add_imm; 1285 } 1286 /* disallow pkt_ptr += reg 1287 * if reg is not uknown_value with guaranteed zero upper bits 1288 * otherwise pkt_ptr may overflow and addition will become 1289 * subtraction which is not allowed 1290 */ 1291 if (src_reg->type != UNKNOWN_VALUE) { 1292 verbose("cannot add '%s' to ptr_to_packet\n", 1293 reg_type_str[src_reg->type]); 1294 return -EACCES; 1295 } 1296 if (src_reg->imm < 48) { 1297 verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", 1298 src_reg->imm); 1299 return -EACCES; 1300 } 1301 /* dst_reg stays as pkt_ptr type and since some positive 1302 * integer value was added to the pointer, increment its 'id' 1303 */ 1304 dst_reg->id++; 1305 1306 /* something was added to pkt_ptr, set range and off to zero */ 1307 dst_reg->off = 0; 1308 dst_reg->range = 0; 1309 } 1310 return 0; 1311 } 1312 1313 static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn) 1314 { 1315 struct reg_state *regs = env->cur_state.regs; 1316 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1317 u8 opcode = BPF_OP(insn->code); 1318 s64 imm_log2; 1319 1320 /* for type == UNKNOWN_VALUE: 1321 * imm > 0 -> number of zero upper bits 1322 * imm == 0 -> don't track which is the same as all bits can be non-zero 1323 */ 1324 1325 if (BPF_SRC(insn->code) == BPF_X) { 1326 struct reg_state *src_reg = ®s[insn->src_reg]; 1327 1328 if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && 1329 dst_reg->imm && opcode == BPF_ADD) { 1330 /* dreg += sreg 1331 * where both have zero upper bits. Adding them 1332 * can only result making one more bit non-zero 1333 * in the larger value. 1334 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) 1335 * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) 1336 */ 1337 dst_reg->imm = min(dst_reg->imm, src_reg->imm); 1338 dst_reg->imm--; 1339 return 0; 1340 } 1341 if (src_reg->type == CONST_IMM && src_reg->imm > 0 && 1342 dst_reg->imm && opcode == BPF_ADD) { 1343 /* dreg += sreg 1344 * where dreg has zero upper bits and sreg is const. 1345 * Adding them can only result making one more bit 1346 * non-zero in the larger value. 1347 */ 1348 imm_log2 = __ilog2_u64((long long)src_reg->imm); 1349 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1350 dst_reg->imm--; 1351 return 0; 1352 } 1353 /* all other cases non supported yet, just mark dst_reg */ 1354 dst_reg->imm = 0; 1355 return 0; 1356 } 1357 1358 /* sign extend 32-bit imm into 64-bit to make sure that 1359 * negative values occupy bit 63. Note ilog2() would have 1360 * been incorrect, since sizeof(insn->imm) == 4 1361 */ 1362 imm_log2 = __ilog2_u64((long long)insn->imm); 1363 1364 if (dst_reg->imm && opcode == BPF_LSH) { 1365 /* reg <<= imm 1366 * if reg was a result of 2 byte load, then its imm == 48 1367 * which means that upper 48 bits are zero and shifting this reg 1368 * left by 4 would mean that upper 44 bits are still zero 1369 */ 1370 dst_reg->imm -= insn->imm; 1371 } else if (dst_reg->imm && opcode == BPF_MUL) { 1372 /* reg *= imm 1373 * if multiplying by 14 subtract 4 1374 * This is conservative calculation of upper zero bits. 1375 * It's not trying to special case insn->imm == 1 or 0 cases 1376 */ 1377 dst_reg->imm -= imm_log2 + 1; 1378 } else if (opcode == BPF_AND) { 1379 /* reg &= imm */ 1380 dst_reg->imm = 63 - imm_log2; 1381 } else if (dst_reg->imm && opcode == BPF_ADD) { 1382 /* reg += imm */ 1383 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1384 dst_reg->imm--; 1385 } else if (opcode == BPF_RSH) { 1386 /* reg >>= imm 1387 * which means that after right shift, upper bits will be zero 1388 * note that verifier already checked that 1389 * 0 <= imm < 64 for shift insn 1390 */ 1391 dst_reg->imm += insn->imm; 1392 if (unlikely(dst_reg->imm > 64)) 1393 /* some dumb code did: 1394 * r2 = *(u32 *)mem; 1395 * r2 >>= 32; 1396 * and all bits are zero now */ 1397 dst_reg->imm = 64; 1398 } else { 1399 /* all other alu ops, means that we don't know what will 1400 * happen to the value, mark it with unknown number of zero bits 1401 */ 1402 dst_reg->imm = 0; 1403 } 1404 1405 if (dst_reg->imm < 0) { 1406 /* all 64 bits of the register can contain non-zero bits 1407 * and such value cannot be added to ptr_to_packet, since it 1408 * may overflow, mark it as unknown to avoid further eval 1409 */ 1410 dst_reg->imm = 0; 1411 } 1412 return 0; 1413 } 1414 1415 static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn) 1416 { 1417 struct reg_state *regs = env->cur_state.regs; 1418 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1419 struct reg_state *src_reg = ®s[insn->src_reg]; 1420 u8 opcode = BPF_OP(insn->code); 1421 1422 /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn. 1423 * Don't care about overflow or negative values, just add them 1424 */ 1425 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) 1426 dst_reg->imm += insn->imm; 1427 else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && 1428 src_reg->type == CONST_IMM) 1429 dst_reg->imm += src_reg->imm; 1430 else 1431 mark_reg_unknown_value(regs, insn->dst_reg); 1432 return 0; 1433 } 1434 1435 /* check validity of 32-bit and 64-bit arithmetic operations */ 1436 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) 1437 { 1438 struct reg_state *regs = env->cur_state.regs, *dst_reg; 1439 u8 opcode = BPF_OP(insn->code); 1440 int err; 1441 1442 if (opcode == BPF_END || opcode == BPF_NEG) { 1443 if (opcode == BPF_NEG) { 1444 if (BPF_SRC(insn->code) != 0 || 1445 insn->src_reg != BPF_REG_0 || 1446 insn->off != 0 || insn->imm != 0) { 1447 verbose("BPF_NEG uses reserved fields\n"); 1448 return -EINVAL; 1449 } 1450 } else { 1451 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 1452 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 1453 verbose("BPF_END uses reserved fields\n"); 1454 return -EINVAL; 1455 } 1456 } 1457 1458 /* check src operand */ 1459 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1460 if (err) 1461 return err; 1462 1463 if (is_pointer_value(env, insn->dst_reg)) { 1464 verbose("R%d pointer arithmetic prohibited\n", 1465 insn->dst_reg); 1466 return -EACCES; 1467 } 1468 1469 /* check dest operand */ 1470 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1471 if (err) 1472 return err; 1473 1474 } else if (opcode == BPF_MOV) { 1475 1476 if (BPF_SRC(insn->code) == BPF_X) { 1477 if (insn->imm != 0 || insn->off != 0) { 1478 verbose("BPF_MOV uses reserved fields\n"); 1479 return -EINVAL; 1480 } 1481 1482 /* check src operand */ 1483 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1484 if (err) 1485 return err; 1486 } else { 1487 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1488 verbose("BPF_MOV uses reserved fields\n"); 1489 return -EINVAL; 1490 } 1491 } 1492 1493 /* check dest operand */ 1494 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1495 if (err) 1496 return err; 1497 1498 if (BPF_SRC(insn->code) == BPF_X) { 1499 if (BPF_CLASS(insn->code) == BPF_ALU64) { 1500 /* case: R1 = R2 1501 * copy register state to dest reg 1502 */ 1503 regs[insn->dst_reg] = regs[insn->src_reg]; 1504 } else { 1505 if (is_pointer_value(env, insn->src_reg)) { 1506 verbose("R%d partial copy of pointer\n", 1507 insn->src_reg); 1508 return -EACCES; 1509 } 1510 regs[insn->dst_reg].type = UNKNOWN_VALUE; 1511 regs[insn->dst_reg].map_ptr = NULL; 1512 } 1513 } else { 1514 /* case: R = imm 1515 * remember the value we stored into this reg 1516 */ 1517 regs[insn->dst_reg].type = CONST_IMM; 1518 regs[insn->dst_reg].imm = insn->imm; 1519 } 1520 1521 } else if (opcode > BPF_END) { 1522 verbose("invalid BPF_ALU opcode %x\n", opcode); 1523 return -EINVAL; 1524 1525 } else { /* all other ALU ops: and, sub, xor, add, ... */ 1526 1527 if (BPF_SRC(insn->code) == BPF_X) { 1528 if (insn->imm != 0 || insn->off != 0) { 1529 verbose("BPF_ALU uses reserved fields\n"); 1530 return -EINVAL; 1531 } 1532 /* check src1 operand */ 1533 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1534 if (err) 1535 return err; 1536 } else { 1537 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1538 verbose("BPF_ALU uses reserved fields\n"); 1539 return -EINVAL; 1540 } 1541 } 1542 1543 /* check src2 operand */ 1544 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1545 if (err) 1546 return err; 1547 1548 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 1549 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 1550 verbose("div by zero\n"); 1551 return -EINVAL; 1552 } 1553 1554 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1555 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1556 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1557 1558 if (insn->imm < 0 || insn->imm >= size) { 1559 verbose("invalid shift %d\n", insn->imm); 1560 return -EINVAL; 1561 } 1562 } 1563 1564 /* check dest operand */ 1565 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 1566 if (err) 1567 return err; 1568 1569 dst_reg = ®s[insn->dst_reg]; 1570 1571 /* pattern match 'bpf_add Rx, imm' instruction */ 1572 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && 1573 dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { 1574 dst_reg->type = PTR_TO_STACK; 1575 dst_reg->imm = insn->imm; 1576 return 0; 1577 } else if (opcode == BPF_ADD && 1578 BPF_CLASS(insn->code) == BPF_ALU64 && 1579 (dst_reg->type == PTR_TO_PACKET || 1580 (BPF_SRC(insn->code) == BPF_X && 1581 regs[insn->src_reg].type == PTR_TO_PACKET))) { 1582 /* ptr_to_packet += K|X */ 1583 return check_packet_ptr_add(env, insn); 1584 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1585 dst_reg->type == UNKNOWN_VALUE && 1586 env->allow_ptr_leaks) { 1587 /* unknown += K|X */ 1588 return evaluate_reg_alu(env, insn); 1589 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1590 dst_reg->type == CONST_IMM && 1591 env->allow_ptr_leaks) { 1592 /* reg_imm += K|X */ 1593 return evaluate_reg_imm_alu(env, insn); 1594 } else if (is_pointer_value(env, insn->dst_reg)) { 1595 verbose("R%d pointer arithmetic prohibited\n", 1596 insn->dst_reg); 1597 return -EACCES; 1598 } else if (BPF_SRC(insn->code) == BPF_X && 1599 is_pointer_value(env, insn->src_reg)) { 1600 verbose("R%d pointer arithmetic prohibited\n", 1601 insn->src_reg); 1602 return -EACCES; 1603 } 1604 1605 /* mark dest operand */ 1606 mark_reg_unknown_value(regs, insn->dst_reg); 1607 } 1608 1609 return 0; 1610 } 1611 1612 static void find_good_pkt_pointers(struct verifier_env *env, 1613 struct reg_state *dst_reg) 1614 { 1615 struct verifier_state *state = &env->cur_state; 1616 struct reg_state *regs = state->regs, *reg; 1617 int i; 1618 /* r2 = r3; 1619 * r2 += 8 1620 * if (r2 > pkt_end) goto somewhere 1621 * r2 == dst_reg, pkt_end == src_reg, 1622 * r2=pkt(id=n,off=8,r=0) 1623 * r3=pkt(id=n,off=0,r=0) 1624 * find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 1625 * so that range of bytes [r3, r3 + 8) is safe to access 1626 */ 1627 for (i = 0; i < MAX_BPF_REG; i++) 1628 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 1629 regs[i].range = dst_reg->off; 1630 1631 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1632 if (state->stack_slot_type[i] != STACK_SPILL) 1633 continue; 1634 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1635 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 1636 reg->range = dst_reg->off; 1637 } 1638 } 1639 1640 static int check_cond_jmp_op(struct verifier_env *env, 1641 struct bpf_insn *insn, int *insn_idx) 1642 { 1643 struct reg_state *regs = env->cur_state.regs, *dst_reg; 1644 struct verifier_state *other_branch; 1645 u8 opcode = BPF_OP(insn->code); 1646 int err; 1647 1648 if (opcode > BPF_EXIT) { 1649 verbose("invalid BPF_JMP opcode %x\n", opcode); 1650 return -EINVAL; 1651 } 1652 1653 if (BPF_SRC(insn->code) == BPF_X) { 1654 if (insn->imm != 0) { 1655 verbose("BPF_JMP uses reserved fields\n"); 1656 return -EINVAL; 1657 } 1658 1659 /* check src1 operand */ 1660 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1661 if (err) 1662 return err; 1663 1664 if (is_pointer_value(env, insn->src_reg)) { 1665 verbose("R%d pointer comparison prohibited\n", 1666 insn->src_reg); 1667 return -EACCES; 1668 } 1669 } else { 1670 if (insn->src_reg != BPF_REG_0) { 1671 verbose("BPF_JMP uses reserved fields\n"); 1672 return -EINVAL; 1673 } 1674 } 1675 1676 /* check src2 operand */ 1677 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1678 if (err) 1679 return err; 1680 1681 dst_reg = ®s[insn->dst_reg]; 1682 1683 /* detect if R == 0 where R was initialized to zero earlier */ 1684 if (BPF_SRC(insn->code) == BPF_K && 1685 (opcode == BPF_JEQ || opcode == BPF_JNE) && 1686 dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { 1687 if (opcode == BPF_JEQ) { 1688 /* if (imm == imm) goto pc+off; 1689 * only follow the goto, ignore fall-through 1690 */ 1691 *insn_idx += insn->off; 1692 return 0; 1693 } else { 1694 /* if (imm != imm) goto pc+off; 1695 * only follow fall-through branch, since 1696 * that's where the program will go 1697 */ 1698 return 0; 1699 } 1700 } 1701 1702 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 1703 if (!other_branch) 1704 return -EFAULT; 1705 1706 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ 1707 if (BPF_SRC(insn->code) == BPF_K && 1708 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 1709 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1710 if (opcode == BPF_JEQ) { 1711 /* next fallthrough insn can access memory via 1712 * this register 1713 */ 1714 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1715 /* branch targer cannot access it, since reg == 0 */ 1716 mark_reg_unknown_value(other_branch->regs, 1717 insn->dst_reg); 1718 } else { 1719 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1720 mark_reg_unknown_value(regs, insn->dst_reg); 1721 } 1722 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 1723 dst_reg->type == PTR_TO_PACKET && 1724 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 1725 find_good_pkt_pointers(env, dst_reg); 1726 } else if (is_pointer_value(env, insn->dst_reg)) { 1727 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 1728 return -EACCES; 1729 } 1730 if (log_level) 1731 print_verifier_state(&env->cur_state); 1732 return 0; 1733 } 1734 1735 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 1736 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 1737 { 1738 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 1739 1740 return (struct bpf_map *) (unsigned long) imm64; 1741 } 1742 1743 /* verify BPF_LD_IMM64 instruction */ 1744 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) 1745 { 1746 struct reg_state *regs = env->cur_state.regs; 1747 int err; 1748 1749 if (BPF_SIZE(insn->code) != BPF_DW) { 1750 verbose("invalid BPF_LD_IMM insn\n"); 1751 return -EINVAL; 1752 } 1753 if (insn->off != 0) { 1754 verbose("BPF_LD_IMM64 uses reserved fields\n"); 1755 return -EINVAL; 1756 } 1757 1758 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1759 if (err) 1760 return err; 1761 1762 if (insn->src_reg == 0) 1763 /* generic move 64-bit immediate into a register */ 1764 return 0; 1765 1766 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 1767 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 1768 1769 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 1770 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 1771 return 0; 1772 } 1773 1774 static bool may_access_skb(enum bpf_prog_type type) 1775 { 1776 switch (type) { 1777 case BPF_PROG_TYPE_SOCKET_FILTER: 1778 case BPF_PROG_TYPE_SCHED_CLS: 1779 case BPF_PROG_TYPE_SCHED_ACT: 1780 return true; 1781 default: 1782 return false; 1783 } 1784 } 1785 1786 /* verify safety of LD_ABS|LD_IND instructions: 1787 * - they can only appear in the programs where ctx == skb 1788 * - since they are wrappers of function calls, they scratch R1-R5 registers, 1789 * preserve R6-R9, and store return value into R0 1790 * 1791 * Implicit input: 1792 * ctx == skb == R6 == CTX 1793 * 1794 * Explicit input: 1795 * SRC == any register 1796 * IMM == 32-bit immediate 1797 * 1798 * Output: 1799 * R0 - 8/16/32-bit skb data converted to cpu endianness 1800 */ 1801 static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) 1802 { 1803 struct reg_state *regs = env->cur_state.regs; 1804 u8 mode = BPF_MODE(insn->code); 1805 struct reg_state *reg; 1806 int i, err; 1807 1808 if (!may_access_skb(env->prog->type)) { 1809 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 1810 return -EINVAL; 1811 } 1812 1813 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1814 BPF_SIZE(insn->code) == BPF_DW || 1815 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1816 verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); 1817 return -EINVAL; 1818 } 1819 1820 /* check whether implicit source operand (register R6) is readable */ 1821 err = check_reg_arg(regs, BPF_REG_6, SRC_OP); 1822 if (err) 1823 return err; 1824 1825 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 1826 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 1827 return -EINVAL; 1828 } 1829 1830 if (mode == BPF_IND) { 1831 /* check explicit source operand */ 1832 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1833 if (err) 1834 return err; 1835 } 1836 1837 /* reset caller saved regs to unreadable */ 1838 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1839 reg = regs + caller_saved[i]; 1840 reg->type = NOT_INIT; 1841 reg->imm = 0; 1842 } 1843 1844 /* mark destination R0 register as readable, since it contains 1845 * the value fetched from the packet 1846 */ 1847 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1848 return 0; 1849 } 1850 1851 /* non-recursive DFS pseudo code 1852 * 1 procedure DFS-iterative(G,v): 1853 * 2 label v as discovered 1854 * 3 let S be a stack 1855 * 4 S.push(v) 1856 * 5 while S is not empty 1857 * 6 t <- S.pop() 1858 * 7 if t is what we're looking for: 1859 * 8 return t 1860 * 9 for all edges e in G.adjacentEdges(t) do 1861 * 10 if edge e is already labelled 1862 * 11 continue with the next edge 1863 * 12 w <- G.adjacentVertex(t,e) 1864 * 13 if vertex w is not discovered and not explored 1865 * 14 label e as tree-edge 1866 * 15 label w as discovered 1867 * 16 S.push(w) 1868 * 17 continue at 5 1869 * 18 else if vertex w is discovered 1870 * 19 label e as back-edge 1871 * 20 else 1872 * 21 // vertex w is explored 1873 * 22 label e as forward- or cross-edge 1874 * 23 label t as explored 1875 * 24 S.pop() 1876 * 1877 * convention: 1878 * 0x10 - discovered 1879 * 0x11 - discovered and fall-through edge labelled 1880 * 0x12 - discovered and fall-through and branch edges labelled 1881 * 0x20 - explored 1882 */ 1883 1884 enum { 1885 DISCOVERED = 0x10, 1886 EXPLORED = 0x20, 1887 FALLTHROUGH = 1, 1888 BRANCH = 2, 1889 }; 1890 1891 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L) 1892 1893 static int *insn_stack; /* stack of insns to process */ 1894 static int cur_stack; /* current stack index */ 1895 static int *insn_state; 1896 1897 /* t, w, e - match pseudo-code above: 1898 * t - index of current instruction 1899 * w - next instruction 1900 * e - edge 1901 */ 1902 static int push_insn(int t, int w, int e, struct verifier_env *env) 1903 { 1904 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 1905 return 0; 1906 1907 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 1908 return 0; 1909 1910 if (w < 0 || w >= env->prog->len) { 1911 verbose("jump out of range from insn %d to %d\n", t, w); 1912 return -EINVAL; 1913 } 1914 1915 if (e == BRANCH) 1916 /* mark branch target for state pruning */ 1917 env->explored_states[w] = STATE_LIST_MARK; 1918 1919 if (insn_state[w] == 0) { 1920 /* tree-edge */ 1921 insn_state[t] = DISCOVERED | e; 1922 insn_state[w] = DISCOVERED; 1923 if (cur_stack >= env->prog->len) 1924 return -E2BIG; 1925 insn_stack[cur_stack++] = w; 1926 return 1; 1927 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 1928 verbose("back-edge from insn %d to %d\n", t, w); 1929 return -EINVAL; 1930 } else if (insn_state[w] == EXPLORED) { 1931 /* forward- or cross-edge */ 1932 insn_state[t] = DISCOVERED | e; 1933 } else { 1934 verbose("insn state internal bug\n"); 1935 return -EFAULT; 1936 } 1937 return 0; 1938 } 1939 1940 /* non-recursive depth-first-search to detect loops in BPF program 1941 * loop == back-edge in directed graph 1942 */ 1943 static int check_cfg(struct verifier_env *env) 1944 { 1945 struct bpf_insn *insns = env->prog->insnsi; 1946 int insn_cnt = env->prog->len; 1947 int ret = 0; 1948 int i, t; 1949 1950 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 1951 if (!insn_state) 1952 return -ENOMEM; 1953 1954 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 1955 if (!insn_stack) { 1956 kfree(insn_state); 1957 return -ENOMEM; 1958 } 1959 1960 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 1961 insn_stack[0] = 0; /* 0 is the first instruction */ 1962 cur_stack = 1; 1963 1964 peek_stack: 1965 if (cur_stack == 0) 1966 goto check_state; 1967 t = insn_stack[cur_stack - 1]; 1968 1969 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 1970 u8 opcode = BPF_OP(insns[t].code); 1971 1972 if (opcode == BPF_EXIT) { 1973 goto mark_explored; 1974 } else if (opcode == BPF_CALL) { 1975 ret = push_insn(t, t + 1, FALLTHROUGH, env); 1976 if (ret == 1) 1977 goto peek_stack; 1978 else if (ret < 0) 1979 goto err_free; 1980 if (t + 1 < insn_cnt) 1981 env->explored_states[t + 1] = STATE_LIST_MARK; 1982 } else if (opcode == BPF_JA) { 1983 if (BPF_SRC(insns[t].code) != BPF_K) { 1984 ret = -EINVAL; 1985 goto err_free; 1986 } 1987 /* unconditional jump with single edge */ 1988 ret = push_insn(t, t + insns[t].off + 1, 1989 FALLTHROUGH, env); 1990 if (ret == 1) 1991 goto peek_stack; 1992 else if (ret < 0) 1993 goto err_free; 1994 /* tell verifier to check for equivalent states 1995 * after every call and jump 1996 */ 1997 if (t + 1 < insn_cnt) 1998 env->explored_states[t + 1] = STATE_LIST_MARK; 1999 } else { 2000 /* conditional jump with two edges */ 2001 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2002 if (ret == 1) 2003 goto peek_stack; 2004 else if (ret < 0) 2005 goto err_free; 2006 2007 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 2008 if (ret == 1) 2009 goto peek_stack; 2010 else if (ret < 0) 2011 goto err_free; 2012 } 2013 } else { 2014 /* all other non-branch instructions with single 2015 * fall-through edge 2016 */ 2017 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2018 if (ret == 1) 2019 goto peek_stack; 2020 else if (ret < 0) 2021 goto err_free; 2022 } 2023 2024 mark_explored: 2025 insn_state[t] = EXPLORED; 2026 if (cur_stack-- <= 0) { 2027 verbose("pop stack internal bug\n"); 2028 ret = -EFAULT; 2029 goto err_free; 2030 } 2031 goto peek_stack; 2032 2033 check_state: 2034 for (i = 0; i < insn_cnt; i++) { 2035 if (insn_state[i] != EXPLORED) { 2036 verbose("unreachable insn %d\n", i); 2037 ret = -EINVAL; 2038 goto err_free; 2039 } 2040 } 2041 ret = 0; /* cfg looks good */ 2042 2043 err_free: 2044 kfree(insn_state); 2045 kfree(insn_stack); 2046 return ret; 2047 } 2048 2049 /* the following conditions reduce the number of explored insns 2050 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2051 */ 2052 static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur) 2053 { 2054 if (old->id != cur->id) 2055 return false; 2056 2057 /* old ptr_to_packet is more conservative, since it allows smaller 2058 * range. Ex: 2059 * old(off=0,r=10) is equal to cur(off=0,r=20), because 2060 * old(off=0,r=10) means that with range=10 the verifier proceeded 2061 * further and found no issues with the program. Now we're in the same 2062 * spot with cur(off=0,r=20), so we're safe too, since anything further 2063 * will only be looking at most 10 bytes after this pointer. 2064 */ 2065 if (old->off == cur->off && old->range < cur->range) 2066 return true; 2067 2068 /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) 2069 * since both cannot be used for packet access and safe(old) 2070 * pointer has smaller off that could be used for further 2071 * 'if (ptr > data_end)' check 2072 * Ex: 2073 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean 2074 * that we cannot access the packet. 2075 * The safe range is: 2076 * [ptr, ptr + range - off) 2077 * so whenever off >=range, it means no safe bytes from this pointer. 2078 * When comparing old->off <= cur->off, it means that older code 2079 * went with smaller offset and that offset was later 2080 * used to figure out the safe range after 'if (ptr > data_end)' check 2081 * Say, 'old' state was explored like: 2082 * ... R3(off=0, r=0) 2083 * R4 = R3 + 20 2084 * ... now R4(off=20,r=0) <-- here 2085 * if (R4 > data_end) 2086 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. 2087 * ... the code further went all the way to bpf_exit. 2088 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). 2089 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier 2090 * goes further, such cur_R4 will give larger safe packet range after 2091 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2092 * so they will be good with r=30 and we can prune the search. 2093 */ 2094 if (old->off <= cur->off && 2095 old->off >= old->range && cur->off >= cur->range) 2096 return true; 2097 2098 return false; 2099 } 2100 2101 /* compare two verifier states 2102 * 2103 * all states stored in state_list are known to be valid, since 2104 * verifier reached 'bpf_exit' instruction through them 2105 * 2106 * this function is called when verifier exploring different branches of 2107 * execution popped from the state stack. If it sees an old state that has 2108 * more strict register state and more strict stack state then this execution 2109 * branch doesn't need to be explored further, since verifier already 2110 * concluded that more strict state leads to valid finish. 2111 * 2112 * Therefore two states are equivalent if register state is more conservative 2113 * and explored stack state is more conservative than the current one. 2114 * Example: 2115 * explored current 2116 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 2117 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 2118 * 2119 * In other words if current stack state (one being explored) has more 2120 * valid slots than old one that already passed validation, it means 2121 * the verifier can stop exploring and conclude that current state is valid too 2122 * 2123 * Similarly with registers. If explored state has register type as invalid 2124 * whereas register type in current state is meaningful, it means that 2125 * the current state will reach 'bpf_exit' instruction safely 2126 */ 2127 static bool states_equal(struct verifier_state *old, struct verifier_state *cur) 2128 { 2129 struct reg_state *rold, *rcur; 2130 int i; 2131 2132 for (i = 0; i < MAX_BPF_REG; i++) { 2133 rold = &old->regs[i]; 2134 rcur = &cur->regs[i]; 2135 2136 if (memcmp(rold, rcur, sizeof(*rold)) == 0) 2137 continue; 2138 2139 if (rold->type == NOT_INIT || 2140 (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) 2141 continue; 2142 2143 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2144 compare_ptrs_to_packet(rold, rcur)) 2145 continue; 2146 2147 return false; 2148 } 2149 2150 for (i = 0; i < MAX_BPF_STACK; i++) { 2151 if (old->stack_slot_type[i] == STACK_INVALID) 2152 continue; 2153 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 2154 /* Ex: old explored (safe) state has STACK_SPILL in 2155 * this stack slot, but current has has STACK_MISC -> 2156 * this verifier states are not equivalent, 2157 * return false to continue verification of this path 2158 */ 2159 return false; 2160 if (i % BPF_REG_SIZE) 2161 continue; 2162 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], 2163 &cur->spilled_regs[i / BPF_REG_SIZE], 2164 sizeof(old->spilled_regs[0]))) 2165 /* when explored and current stack slot types are 2166 * the same, check that stored pointers types 2167 * are the same as well. 2168 * Ex: explored safe path could have stored 2169 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} 2170 * but current path has stored: 2171 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} 2172 * such verifier states are not equivalent. 2173 * return false to continue verification of this path 2174 */ 2175 return false; 2176 else 2177 continue; 2178 } 2179 return true; 2180 } 2181 2182 static int is_state_visited(struct verifier_env *env, int insn_idx) 2183 { 2184 struct verifier_state_list *new_sl; 2185 struct verifier_state_list *sl; 2186 2187 sl = env->explored_states[insn_idx]; 2188 if (!sl) 2189 /* this 'insn_idx' instruction wasn't marked, so we will not 2190 * be doing state search here 2191 */ 2192 return 0; 2193 2194 while (sl != STATE_LIST_MARK) { 2195 if (states_equal(&sl->state, &env->cur_state)) 2196 /* reached equivalent register/stack state, 2197 * prune the search 2198 */ 2199 return 1; 2200 sl = sl->next; 2201 } 2202 2203 /* there were no equivalent states, remember current one. 2204 * technically the current state is not proven to be safe yet, 2205 * but it will either reach bpf_exit (which means it's safe) or 2206 * it will be rejected. Since there are no loops, we won't be 2207 * seeing this 'insn_idx' instruction again on the way to bpf_exit 2208 */ 2209 new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER); 2210 if (!new_sl) 2211 return -ENOMEM; 2212 2213 /* add new state to the head of linked list */ 2214 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 2215 new_sl->next = env->explored_states[insn_idx]; 2216 env->explored_states[insn_idx] = new_sl; 2217 return 0; 2218 } 2219 2220 static int do_check(struct verifier_env *env) 2221 { 2222 struct verifier_state *state = &env->cur_state; 2223 struct bpf_insn *insns = env->prog->insnsi; 2224 struct reg_state *regs = state->regs; 2225 int insn_cnt = env->prog->len; 2226 int insn_idx, prev_insn_idx = 0; 2227 int insn_processed = 0; 2228 bool do_print_state = false; 2229 2230 init_reg_state(regs); 2231 insn_idx = 0; 2232 for (;;) { 2233 struct bpf_insn *insn; 2234 u8 class; 2235 int err; 2236 2237 if (insn_idx >= insn_cnt) { 2238 verbose("invalid insn idx %d insn_cnt %d\n", 2239 insn_idx, insn_cnt); 2240 return -EFAULT; 2241 } 2242 2243 insn = &insns[insn_idx]; 2244 class = BPF_CLASS(insn->code); 2245 2246 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 2247 verbose("BPF program is too large. Proccessed %d insn\n", 2248 insn_processed); 2249 return -E2BIG; 2250 } 2251 2252 err = is_state_visited(env, insn_idx); 2253 if (err < 0) 2254 return err; 2255 if (err == 1) { 2256 /* found equivalent state, can prune the search */ 2257 if (log_level) { 2258 if (do_print_state) 2259 verbose("\nfrom %d to %d: safe\n", 2260 prev_insn_idx, insn_idx); 2261 else 2262 verbose("%d: safe\n", insn_idx); 2263 } 2264 goto process_bpf_exit; 2265 } 2266 2267 if (log_level && do_print_state) { 2268 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 2269 print_verifier_state(&env->cur_state); 2270 do_print_state = false; 2271 } 2272 2273 if (log_level) { 2274 verbose("%d: ", insn_idx); 2275 print_bpf_insn(insn); 2276 } 2277 2278 if (class == BPF_ALU || class == BPF_ALU64) { 2279 err = check_alu_op(env, insn); 2280 if (err) 2281 return err; 2282 2283 } else if (class == BPF_LDX) { 2284 enum bpf_reg_type src_reg_type; 2285 2286 /* check for reserved fields is already done */ 2287 2288 /* check src operand */ 2289 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2290 if (err) 2291 return err; 2292 2293 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 2294 if (err) 2295 return err; 2296 2297 src_reg_type = regs[insn->src_reg].type; 2298 2299 /* check that memory (src_reg + off) is readable, 2300 * the state of dst_reg will be updated by this func 2301 */ 2302 err = check_mem_access(env, insn->src_reg, insn->off, 2303 BPF_SIZE(insn->code), BPF_READ, 2304 insn->dst_reg); 2305 if (err) 2306 return err; 2307 2308 if (BPF_SIZE(insn->code) != BPF_W) { 2309 insn_idx++; 2310 continue; 2311 } 2312 2313 if (insn->imm == 0) { 2314 /* saw a valid insn 2315 * dst_reg = *(u32 *)(src_reg + off) 2316 * use reserved 'imm' field to mark this insn 2317 */ 2318 insn->imm = src_reg_type; 2319 2320 } else if (src_reg_type != insn->imm && 2321 (src_reg_type == PTR_TO_CTX || 2322 insn->imm == PTR_TO_CTX)) { 2323 /* ABuser program is trying to use the same insn 2324 * dst_reg = *(u32*) (src_reg + off) 2325 * with different pointer types: 2326 * src_reg == ctx in one branch and 2327 * src_reg == stack|map in some other branch. 2328 * Reject it. 2329 */ 2330 verbose("same insn cannot be used with different pointers\n"); 2331 return -EINVAL; 2332 } 2333 2334 } else if (class == BPF_STX) { 2335 enum bpf_reg_type dst_reg_type; 2336 2337 if (BPF_MODE(insn->code) == BPF_XADD) { 2338 err = check_xadd(env, insn); 2339 if (err) 2340 return err; 2341 insn_idx++; 2342 continue; 2343 } 2344 2345 /* check src1 operand */ 2346 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2347 if (err) 2348 return err; 2349 /* check src2 operand */ 2350 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2351 if (err) 2352 return err; 2353 2354 dst_reg_type = regs[insn->dst_reg].type; 2355 2356 /* check that memory (dst_reg + off) is writeable */ 2357 err = check_mem_access(env, insn->dst_reg, insn->off, 2358 BPF_SIZE(insn->code), BPF_WRITE, 2359 insn->src_reg); 2360 if (err) 2361 return err; 2362 2363 if (insn->imm == 0) { 2364 insn->imm = dst_reg_type; 2365 } else if (dst_reg_type != insn->imm && 2366 (dst_reg_type == PTR_TO_CTX || 2367 insn->imm == PTR_TO_CTX)) { 2368 verbose("same insn cannot be used with different pointers\n"); 2369 return -EINVAL; 2370 } 2371 2372 } else if (class == BPF_ST) { 2373 if (BPF_MODE(insn->code) != BPF_MEM || 2374 insn->src_reg != BPF_REG_0) { 2375 verbose("BPF_ST uses reserved fields\n"); 2376 return -EINVAL; 2377 } 2378 /* check src operand */ 2379 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2380 if (err) 2381 return err; 2382 2383 /* check that memory (dst_reg + off) is writeable */ 2384 err = check_mem_access(env, insn->dst_reg, insn->off, 2385 BPF_SIZE(insn->code), BPF_WRITE, 2386 -1); 2387 if (err) 2388 return err; 2389 2390 } else if (class == BPF_JMP) { 2391 u8 opcode = BPF_OP(insn->code); 2392 2393 if (opcode == BPF_CALL) { 2394 if (BPF_SRC(insn->code) != BPF_K || 2395 insn->off != 0 || 2396 insn->src_reg != BPF_REG_0 || 2397 insn->dst_reg != BPF_REG_0) { 2398 verbose("BPF_CALL uses reserved fields\n"); 2399 return -EINVAL; 2400 } 2401 2402 err = check_call(env, insn->imm); 2403 if (err) 2404 return err; 2405 2406 } else if (opcode == BPF_JA) { 2407 if (BPF_SRC(insn->code) != BPF_K || 2408 insn->imm != 0 || 2409 insn->src_reg != BPF_REG_0 || 2410 insn->dst_reg != BPF_REG_0) { 2411 verbose("BPF_JA uses reserved fields\n"); 2412 return -EINVAL; 2413 } 2414 2415 insn_idx += insn->off + 1; 2416 continue; 2417 2418 } else if (opcode == BPF_EXIT) { 2419 if (BPF_SRC(insn->code) != BPF_K || 2420 insn->imm != 0 || 2421 insn->src_reg != BPF_REG_0 || 2422 insn->dst_reg != BPF_REG_0) { 2423 verbose("BPF_EXIT uses reserved fields\n"); 2424 return -EINVAL; 2425 } 2426 2427 /* eBPF calling convetion is such that R0 is used 2428 * to return the value from eBPF program. 2429 * Make sure that it's readable at this time 2430 * of bpf_exit, which means that program wrote 2431 * something into it earlier 2432 */ 2433 err = check_reg_arg(regs, BPF_REG_0, SRC_OP); 2434 if (err) 2435 return err; 2436 2437 if (is_pointer_value(env, BPF_REG_0)) { 2438 verbose("R0 leaks addr as return value\n"); 2439 return -EACCES; 2440 } 2441 2442 process_bpf_exit: 2443 insn_idx = pop_stack(env, &prev_insn_idx); 2444 if (insn_idx < 0) { 2445 break; 2446 } else { 2447 do_print_state = true; 2448 continue; 2449 } 2450 } else { 2451 err = check_cond_jmp_op(env, insn, &insn_idx); 2452 if (err) 2453 return err; 2454 } 2455 } else if (class == BPF_LD) { 2456 u8 mode = BPF_MODE(insn->code); 2457 2458 if (mode == BPF_ABS || mode == BPF_IND) { 2459 err = check_ld_abs(env, insn); 2460 if (err) 2461 return err; 2462 2463 } else if (mode == BPF_IMM) { 2464 err = check_ld_imm(env, insn); 2465 if (err) 2466 return err; 2467 2468 insn_idx++; 2469 } else { 2470 verbose("invalid BPF_LD mode\n"); 2471 return -EINVAL; 2472 } 2473 } else { 2474 verbose("unknown insn class %d\n", class); 2475 return -EINVAL; 2476 } 2477 2478 insn_idx++; 2479 } 2480 2481 verbose("processed %d insns\n", insn_processed); 2482 return 0; 2483 } 2484 2485 /* look for pseudo eBPF instructions that access map FDs and 2486 * replace them with actual map pointers 2487 */ 2488 static int replace_map_fd_with_map_ptr(struct verifier_env *env) 2489 { 2490 struct bpf_insn *insn = env->prog->insnsi; 2491 int insn_cnt = env->prog->len; 2492 int i, j; 2493 2494 for (i = 0; i < insn_cnt; i++, insn++) { 2495 if (BPF_CLASS(insn->code) == BPF_LDX && 2496 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 2497 verbose("BPF_LDX uses reserved fields\n"); 2498 return -EINVAL; 2499 } 2500 2501 if (BPF_CLASS(insn->code) == BPF_STX && 2502 ((BPF_MODE(insn->code) != BPF_MEM && 2503 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 2504 verbose("BPF_STX uses reserved fields\n"); 2505 return -EINVAL; 2506 } 2507 2508 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 2509 struct bpf_map *map; 2510 struct fd f; 2511 2512 if (i == insn_cnt - 1 || insn[1].code != 0 || 2513 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 2514 insn[1].off != 0) { 2515 verbose("invalid bpf_ld_imm64 insn\n"); 2516 return -EINVAL; 2517 } 2518 2519 if (insn->src_reg == 0) 2520 /* valid generic load 64-bit imm */ 2521 goto next_insn; 2522 2523 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 2524 verbose("unrecognized bpf_ld_imm64 insn\n"); 2525 return -EINVAL; 2526 } 2527 2528 f = fdget(insn->imm); 2529 map = __bpf_map_get(f); 2530 if (IS_ERR(map)) { 2531 verbose("fd %d is not pointing to valid bpf_map\n", 2532 insn->imm); 2533 return PTR_ERR(map); 2534 } 2535 2536 /* store map pointer inside BPF_LD_IMM64 instruction */ 2537 insn[0].imm = (u32) (unsigned long) map; 2538 insn[1].imm = ((u64) (unsigned long) map) >> 32; 2539 2540 /* check whether we recorded this map already */ 2541 for (j = 0; j < env->used_map_cnt; j++) 2542 if (env->used_maps[j] == map) { 2543 fdput(f); 2544 goto next_insn; 2545 } 2546 2547 if (env->used_map_cnt >= MAX_USED_MAPS) { 2548 fdput(f); 2549 return -E2BIG; 2550 } 2551 2552 /* hold the map. If the program is rejected by verifier, 2553 * the map will be released by release_maps() or it 2554 * will be used by the valid program until it's unloaded 2555 * and all maps are released in free_bpf_prog_info() 2556 */ 2557 map = bpf_map_inc(map, false); 2558 if (IS_ERR(map)) { 2559 fdput(f); 2560 return PTR_ERR(map); 2561 } 2562 env->used_maps[env->used_map_cnt++] = map; 2563 2564 fdput(f); 2565 next_insn: 2566 insn++; 2567 i++; 2568 } 2569 } 2570 2571 /* now all pseudo BPF_LD_IMM64 instructions load valid 2572 * 'struct bpf_map *' into a register instead of user map_fd. 2573 * These pointers will be used later by verifier to validate map access. 2574 */ 2575 return 0; 2576 } 2577 2578 /* drop refcnt of maps used by the rejected program */ 2579 static void release_maps(struct verifier_env *env) 2580 { 2581 int i; 2582 2583 for (i = 0; i < env->used_map_cnt; i++) 2584 bpf_map_put(env->used_maps[i]); 2585 } 2586 2587 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 2588 static void convert_pseudo_ld_imm64(struct verifier_env *env) 2589 { 2590 struct bpf_insn *insn = env->prog->insnsi; 2591 int insn_cnt = env->prog->len; 2592 int i; 2593 2594 for (i = 0; i < insn_cnt; i++, insn++) 2595 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 2596 insn->src_reg = 0; 2597 } 2598 2599 /* convert load instructions that access fields of 'struct __sk_buff' 2600 * into sequence of instructions that access fields of 'struct sk_buff' 2601 */ 2602 static int convert_ctx_accesses(struct verifier_env *env) 2603 { 2604 struct bpf_insn *insn = env->prog->insnsi; 2605 int insn_cnt = env->prog->len; 2606 struct bpf_insn insn_buf[16]; 2607 struct bpf_prog *new_prog; 2608 enum bpf_access_type type; 2609 int i; 2610 2611 if (!env->prog->aux->ops->convert_ctx_access) 2612 return 0; 2613 2614 for (i = 0; i < insn_cnt; i++, insn++) { 2615 u32 insn_delta, cnt; 2616 2617 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) 2618 type = BPF_READ; 2619 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) 2620 type = BPF_WRITE; 2621 else 2622 continue; 2623 2624 if (insn->imm != PTR_TO_CTX) { 2625 /* clear internal mark */ 2626 insn->imm = 0; 2627 continue; 2628 } 2629 2630 cnt = env->prog->aux->ops-> 2631 convert_ctx_access(type, insn->dst_reg, insn->src_reg, 2632 insn->off, insn_buf, env->prog); 2633 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 2634 verbose("bpf verifier is misconfigured\n"); 2635 return -EINVAL; 2636 } 2637 2638 new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt); 2639 if (!new_prog) 2640 return -ENOMEM; 2641 2642 insn_delta = cnt - 1; 2643 2644 /* keep walking new program and skip insns we just inserted */ 2645 env->prog = new_prog; 2646 insn = new_prog->insnsi + i + insn_delta; 2647 2648 insn_cnt += insn_delta; 2649 i += insn_delta; 2650 } 2651 2652 return 0; 2653 } 2654 2655 static void free_states(struct verifier_env *env) 2656 { 2657 struct verifier_state_list *sl, *sln; 2658 int i; 2659 2660 if (!env->explored_states) 2661 return; 2662 2663 for (i = 0; i < env->prog->len; i++) { 2664 sl = env->explored_states[i]; 2665 2666 if (sl) 2667 while (sl != STATE_LIST_MARK) { 2668 sln = sl->next; 2669 kfree(sl); 2670 sl = sln; 2671 } 2672 } 2673 2674 kfree(env->explored_states); 2675 } 2676 2677 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 2678 { 2679 char __user *log_ubuf = NULL; 2680 struct verifier_env *env; 2681 int ret = -EINVAL; 2682 2683 if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) 2684 return -E2BIG; 2685 2686 /* 'struct verifier_env' can be global, but since it's not small, 2687 * allocate/free it every time bpf_check() is called 2688 */ 2689 env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL); 2690 if (!env) 2691 return -ENOMEM; 2692 2693 env->prog = *prog; 2694 2695 /* grab the mutex to protect few globals used by verifier */ 2696 mutex_lock(&bpf_verifier_lock); 2697 2698 if (attr->log_level || attr->log_buf || attr->log_size) { 2699 /* user requested verbose verifier output 2700 * and supplied buffer to store the verification trace 2701 */ 2702 log_level = attr->log_level; 2703 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 2704 log_size = attr->log_size; 2705 log_len = 0; 2706 2707 ret = -EINVAL; 2708 /* log_* values have to be sane */ 2709 if (log_size < 128 || log_size > UINT_MAX >> 8 || 2710 log_level == 0 || log_ubuf == NULL) 2711 goto free_env; 2712 2713 ret = -ENOMEM; 2714 log_buf = vmalloc(log_size); 2715 if (!log_buf) 2716 goto free_env; 2717 } else { 2718 log_level = 0; 2719 } 2720 2721 ret = replace_map_fd_with_map_ptr(env); 2722 if (ret < 0) 2723 goto skip_full_check; 2724 2725 env->explored_states = kcalloc(env->prog->len, 2726 sizeof(struct verifier_state_list *), 2727 GFP_USER); 2728 ret = -ENOMEM; 2729 if (!env->explored_states) 2730 goto skip_full_check; 2731 2732 ret = check_cfg(env); 2733 if (ret < 0) 2734 goto skip_full_check; 2735 2736 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 2737 2738 ret = do_check(env); 2739 2740 skip_full_check: 2741 while (pop_stack(env, NULL) >= 0); 2742 free_states(env); 2743 2744 if (ret == 0) 2745 /* program is valid, convert *(u32*)(ctx + off) accesses */ 2746 ret = convert_ctx_accesses(env); 2747 2748 if (log_level && log_len >= log_size - 1) { 2749 BUG_ON(log_len >= log_size); 2750 /* verifier log exceeded user supplied buffer */ 2751 ret = -ENOSPC; 2752 /* fall through to return what was recorded */ 2753 } 2754 2755 /* copy verifier log back to user space including trailing zero */ 2756 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 2757 ret = -EFAULT; 2758 goto free_log_buf; 2759 } 2760 2761 if (ret == 0 && env->used_map_cnt) { 2762 /* if program passed verifier, update used_maps in bpf_prog_info */ 2763 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 2764 sizeof(env->used_maps[0]), 2765 GFP_KERNEL); 2766 2767 if (!env->prog->aux->used_maps) { 2768 ret = -ENOMEM; 2769 goto free_log_buf; 2770 } 2771 2772 memcpy(env->prog->aux->used_maps, env->used_maps, 2773 sizeof(env->used_maps[0]) * env->used_map_cnt); 2774 env->prog->aux->used_map_cnt = env->used_map_cnt; 2775 2776 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 2777 * bpf_ld_imm64 instructions 2778 */ 2779 convert_pseudo_ld_imm64(env); 2780 } 2781 2782 free_log_buf: 2783 if (log_level) 2784 vfree(log_buf); 2785 free_env: 2786 if (!env->prog->aux->used_maps) 2787 /* if we didn't copy map pointers into bpf_prog_info, release 2788 * them now. Otherwise free_bpf_prog_info() will release them. 2789 */ 2790 release_maps(env); 2791 *prog = env->prog; 2792 kfree(env); 2793 mutex_unlock(&bpf_verifier_lock); 2794 return ret; 2795 } 2796