1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/filter.h> 18 #include <net/netlink.h> 19 #include <linux/file.h> 20 #include <linux/vmalloc.h> 21 22 /* bpf_check() is a static code analyzer that walks eBPF program 23 * instruction by instruction and updates register/stack state. 24 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 25 * 26 * The first pass is depth-first-search to check that the program is a DAG. 27 * It rejects the following programs: 28 * - larger than BPF_MAXINSNS insns 29 * - if loop is present (detected via back-edge) 30 * - unreachable insns exist (shouldn't be a forest. program = one function) 31 * - out of bounds or malformed jumps 32 * The second pass is all possible path descent from the 1st insn. 33 * Since it's analyzing all pathes through the program, the length of the 34 * analysis is limited to 32k insn, which may be hit even if total number of 35 * insn is less then 4K, but there are too many branches that change stack/regs. 36 * Number of 'branches to be analyzed' is limited to 1k 37 * 38 * On entry to each instruction, each register has a type, and the instruction 39 * changes the types of the registers depending on instruction semantics. 40 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 41 * copied to R1. 42 * 43 * All registers are 64-bit. 44 * R0 - return register 45 * R1-R5 argument passing registers 46 * R6-R9 callee saved registers 47 * R10 - frame pointer read-only 48 * 49 * At the start of BPF program the register R1 contains a pointer to bpf_context 50 * and has type PTR_TO_CTX. 51 * 52 * Verifier tracks arithmetic operations on pointers in case: 53 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 54 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 55 * 1st insn copies R10 (which has FRAME_PTR) type into R1 56 * and 2nd arithmetic instruction is pattern matched to recognize 57 * that it wants to construct a pointer to some element within stack. 58 * So after 2nd insn, the register R1 has type PTR_TO_STACK 59 * (and -20 constant is saved for further stack bounds checking). 60 * Meaning that this reg is a pointer to stack plus known immediate constant. 61 * 62 * Most of the time the registers have UNKNOWN_VALUE type, which 63 * means the register has some value, but it's not a valid pointer. 64 * (like pointer plus pointer becomes UNKNOWN_VALUE type) 65 * 66 * When verifier sees load or store instructions the type of base register 67 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer 68 * types recognized by check_mem_access() function. 69 * 70 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 71 * and the range of [ptr, ptr + map's value_size) is accessible. 72 * 73 * registers used to pass values to function calls are checked against 74 * function argument constraints. 75 * 76 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 77 * It means that the register type passed to this function must be 78 * PTR_TO_STACK and it will be used inside the function as 79 * 'pointer to map element key' 80 * 81 * For example the argument constraints for bpf_map_lookup_elem(): 82 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 83 * .arg1_type = ARG_CONST_MAP_PTR, 84 * .arg2_type = ARG_PTR_TO_MAP_KEY, 85 * 86 * ret_type says that this function returns 'pointer to map elem value or null' 87 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 88 * 2nd argument should be a pointer to stack, which will be used inside 89 * the helper function as a pointer to map element key. 90 * 91 * On the kernel side the helper function looks like: 92 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 93 * { 94 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 95 * void *key = (void *) (unsigned long) r2; 96 * void *value; 97 * 98 * here kernel can access 'key' and 'map' pointers safely, knowing that 99 * [key, key + map->key_size) bytes are valid and were initialized on 100 * the stack of eBPF program. 101 * } 102 * 103 * Corresponding eBPF program may look like: 104 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 105 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 106 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 107 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 108 * here verifier looks at prototype of map_lookup_elem() and sees: 109 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 110 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 111 * 112 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 113 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 114 * and were initialized prior to this call. 115 * If it's ok, then verifier allows this BPF_CALL insn and looks at 116 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 117 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 118 * returns ether pointer to map value or NULL. 119 * 120 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 121 * insn, the register holding that pointer in the true branch changes state to 122 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 123 * branch. See check_cond_jmp_op(). 124 * 125 * After the call R0 is set to return type of the function and registers R1-R5 126 * are set to NOT_INIT to indicate that they are no longer readable. 127 */ 128 129 struct reg_state { 130 enum bpf_reg_type type; 131 union { 132 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ 133 s64 imm; 134 135 /* valid when type == PTR_TO_PACKET* */ 136 struct { 137 u32 id; 138 u16 off; 139 u16 range; 140 }; 141 142 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 143 * PTR_TO_MAP_VALUE_OR_NULL 144 */ 145 struct bpf_map *map_ptr; 146 }; 147 }; 148 149 enum bpf_stack_slot_type { 150 STACK_INVALID, /* nothing was stored in this stack slot */ 151 STACK_SPILL, /* register spilled into stack */ 152 STACK_MISC /* BPF program wrote some data into this slot */ 153 }; 154 155 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 156 157 /* state of the program: 158 * type of all registers and stack info 159 */ 160 struct verifier_state { 161 struct reg_state regs[MAX_BPF_REG]; 162 u8 stack_slot_type[MAX_BPF_STACK]; 163 struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; 164 }; 165 166 /* linked list of verifier states used to prune search */ 167 struct verifier_state_list { 168 struct verifier_state state; 169 struct verifier_state_list *next; 170 }; 171 172 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 173 struct verifier_stack_elem { 174 /* verifer state is 'st' 175 * before processing instruction 'insn_idx' 176 * and after processing instruction 'prev_insn_idx' 177 */ 178 struct verifier_state st; 179 int insn_idx; 180 int prev_insn_idx; 181 struct verifier_stack_elem *next; 182 }; 183 184 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 185 186 /* single container for all structs 187 * one verifier_env per bpf_check() call 188 */ 189 struct verifier_env { 190 struct bpf_prog *prog; /* eBPF program being verified */ 191 struct verifier_stack_elem *head; /* stack of verifier states to be processed */ 192 int stack_size; /* number of states to be processed */ 193 struct verifier_state cur_state; /* current verifier state */ 194 struct verifier_state_list **explored_states; /* search pruning optimization */ 195 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 196 u32 used_map_cnt; /* number of used maps */ 197 bool allow_ptr_leaks; 198 }; 199 200 #define BPF_COMPLEXITY_LIMIT_INSNS 65536 201 #define BPF_COMPLEXITY_LIMIT_STACK 1024 202 203 struct bpf_call_arg_meta { 204 struct bpf_map *map_ptr; 205 bool raw_mode; 206 int regno; 207 int access_size; 208 }; 209 210 /* verbose verifier prints what it's seeing 211 * bpf_check() is called under lock, so no race to access these global vars 212 */ 213 static u32 log_level, log_size, log_len; 214 static char *log_buf; 215 216 static DEFINE_MUTEX(bpf_verifier_lock); 217 218 /* log_level controls verbosity level of eBPF verifier. 219 * verbose() is used to dump the verification trace to the log, so the user 220 * can figure out what's wrong with the program 221 */ 222 static __printf(1, 2) void verbose(const char *fmt, ...) 223 { 224 va_list args; 225 226 if (log_level == 0 || log_len >= log_size - 1) 227 return; 228 229 va_start(args, fmt); 230 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 231 va_end(args); 232 } 233 234 /* string representation of 'enum bpf_reg_type' */ 235 static const char * const reg_type_str[] = { 236 [NOT_INIT] = "?", 237 [UNKNOWN_VALUE] = "inv", 238 [PTR_TO_CTX] = "ctx", 239 [CONST_PTR_TO_MAP] = "map_ptr", 240 [PTR_TO_MAP_VALUE] = "map_value", 241 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 242 [FRAME_PTR] = "fp", 243 [PTR_TO_STACK] = "fp", 244 [CONST_IMM] = "imm", 245 [PTR_TO_PACKET] = "pkt", 246 [PTR_TO_PACKET_END] = "pkt_end", 247 }; 248 249 static void print_verifier_state(struct verifier_state *state) 250 { 251 struct reg_state *reg; 252 enum bpf_reg_type t; 253 int i; 254 255 for (i = 0; i < MAX_BPF_REG; i++) { 256 reg = &state->regs[i]; 257 t = reg->type; 258 if (t == NOT_INIT) 259 continue; 260 verbose(" R%d=%s", i, reg_type_str[t]); 261 if (t == CONST_IMM || t == PTR_TO_STACK) 262 verbose("%lld", reg->imm); 263 else if (t == PTR_TO_PACKET) 264 verbose("(id=%d,off=%d,r=%d)", 265 reg->id, reg->off, reg->range); 266 else if (t == UNKNOWN_VALUE && reg->imm) 267 verbose("%lld", reg->imm); 268 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || 269 t == PTR_TO_MAP_VALUE_OR_NULL) 270 verbose("(ks=%d,vs=%d)", 271 reg->map_ptr->key_size, 272 reg->map_ptr->value_size); 273 } 274 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 275 if (state->stack_slot_type[i] == STACK_SPILL) 276 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 277 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); 278 } 279 verbose("\n"); 280 } 281 282 static const char *const bpf_class_string[] = { 283 [BPF_LD] = "ld", 284 [BPF_LDX] = "ldx", 285 [BPF_ST] = "st", 286 [BPF_STX] = "stx", 287 [BPF_ALU] = "alu", 288 [BPF_JMP] = "jmp", 289 [BPF_RET] = "BUG", 290 [BPF_ALU64] = "alu64", 291 }; 292 293 static const char *const bpf_alu_string[16] = { 294 [BPF_ADD >> 4] = "+=", 295 [BPF_SUB >> 4] = "-=", 296 [BPF_MUL >> 4] = "*=", 297 [BPF_DIV >> 4] = "/=", 298 [BPF_OR >> 4] = "|=", 299 [BPF_AND >> 4] = "&=", 300 [BPF_LSH >> 4] = "<<=", 301 [BPF_RSH >> 4] = ">>=", 302 [BPF_NEG >> 4] = "neg", 303 [BPF_MOD >> 4] = "%=", 304 [BPF_XOR >> 4] = "^=", 305 [BPF_MOV >> 4] = "=", 306 [BPF_ARSH >> 4] = "s>>=", 307 [BPF_END >> 4] = "endian", 308 }; 309 310 static const char *const bpf_ldst_string[] = { 311 [BPF_W >> 3] = "u32", 312 [BPF_H >> 3] = "u16", 313 [BPF_B >> 3] = "u8", 314 [BPF_DW >> 3] = "u64", 315 }; 316 317 static const char *const bpf_jmp_string[16] = { 318 [BPF_JA >> 4] = "jmp", 319 [BPF_JEQ >> 4] = "==", 320 [BPF_JGT >> 4] = ">", 321 [BPF_JGE >> 4] = ">=", 322 [BPF_JSET >> 4] = "&", 323 [BPF_JNE >> 4] = "!=", 324 [BPF_JSGT >> 4] = "s>", 325 [BPF_JSGE >> 4] = "s>=", 326 [BPF_CALL >> 4] = "call", 327 [BPF_EXIT >> 4] = "exit", 328 }; 329 330 static void print_bpf_insn(struct bpf_insn *insn) 331 { 332 u8 class = BPF_CLASS(insn->code); 333 334 if (class == BPF_ALU || class == BPF_ALU64) { 335 if (BPF_SRC(insn->code) == BPF_X) 336 verbose("(%02x) %sr%d %s %sr%d\n", 337 insn->code, class == BPF_ALU ? "(u32) " : "", 338 insn->dst_reg, 339 bpf_alu_string[BPF_OP(insn->code) >> 4], 340 class == BPF_ALU ? "(u32) " : "", 341 insn->src_reg); 342 else 343 verbose("(%02x) %sr%d %s %s%d\n", 344 insn->code, class == BPF_ALU ? "(u32) " : "", 345 insn->dst_reg, 346 bpf_alu_string[BPF_OP(insn->code) >> 4], 347 class == BPF_ALU ? "(u32) " : "", 348 insn->imm); 349 } else if (class == BPF_STX) { 350 if (BPF_MODE(insn->code) == BPF_MEM) 351 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 352 insn->code, 353 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 354 insn->dst_reg, 355 insn->off, insn->src_reg); 356 else if (BPF_MODE(insn->code) == BPF_XADD) 357 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 358 insn->code, 359 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 360 insn->dst_reg, insn->off, 361 insn->src_reg); 362 else 363 verbose("BUG_%02x\n", insn->code); 364 } else if (class == BPF_ST) { 365 if (BPF_MODE(insn->code) != BPF_MEM) { 366 verbose("BUG_st_%02x\n", insn->code); 367 return; 368 } 369 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 370 insn->code, 371 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 372 insn->dst_reg, 373 insn->off, insn->imm); 374 } else if (class == BPF_LDX) { 375 if (BPF_MODE(insn->code) != BPF_MEM) { 376 verbose("BUG_ldx_%02x\n", insn->code); 377 return; 378 } 379 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 380 insn->code, insn->dst_reg, 381 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 382 insn->src_reg, insn->off); 383 } else if (class == BPF_LD) { 384 if (BPF_MODE(insn->code) == BPF_ABS) { 385 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 386 insn->code, 387 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 388 insn->imm); 389 } else if (BPF_MODE(insn->code) == BPF_IND) { 390 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 391 insn->code, 392 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 393 insn->src_reg, insn->imm); 394 } else if (BPF_MODE(insn->code) == BPF_IMM) { 395 verbose("(%02x) r%d = 0x%x\n", 396 insn->code, insn->dst_reg, insn->imm); 397 } else { 398 verbose("BUG_ld_%02x\n", insn->code); 399 return; 400 } 401 } else if (class == BPF_JMP) { 402 u8 opcode = BPF_OP(insn->code); 403 404 if (opcode == BPF_CALL) { 405 verbose("(%02x) call %d\n", insn->code, insn->imm); 406 } else if (insn->code == (BPF_JMP | BPF_JA)) { 407 verbose("(%02x) goto pc%+d\n", 408 insn->code, insn->off); 409 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 410 verbose("(%02x) exit\n", insn->code); 411 } else if (BPF_SRC(insn->code) == BPF_X) { 412 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 413 insn->code, insn->dst_reg, 414 bpf_jmp_string[BPF_OP(insn->code) >> 4], 415 insn->src_reg, insn->off); 416 } else { 417 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 418 insn->code, insn->dst_reg, 419 bpf_jmp_string[BPF_OP(insn->code) >> 4], 420 insn->imm, insn->off); 421 } 422 } else { 423 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 424 } 425 } 426 427 static int pop_stack(struct verifier_env *env, int *prev_insn_idx) 428 { 429 struct verifier_stack_elem *elem; 430 int insn_idx; 431 432 if (env->head == NULL) 433 return -1; 434 435 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 436 insn_idx = env->head->insn_idx; 437 if (prev_insn_idx) 438 *prev_insn_idx = env->head->prev_insn_idx; 439 elem = env->head->next; 440 kfree(env->head); 441 env->head = elem; 442 env->stack_size--; 443 return insn_idx; 444 } 445 446 static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, 447 int prev_insn_idx) 448 { 449 struct verifier_stack_elem *elem; 450 451 elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL); 452 if (!elem) 453 goto err; 454 455 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 456 elem->insn_idx = insn_idx; 457 elem->prev_insn_idx = prev_insn_idx; 458 elem->next = env->head; 459 env->head = elem; 460 env->stack_size++; 461 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 462 verbose("BPF program is too complex\n"); 463 goto err; 464 } 465 return &elem->st; 466 err: 467 /* pop all elements and return */ 468 while (pop_stack(env, NULL) >= 0); 469 return NULL; 470 } 471 472 #define CALLER_SAVED_REGS 6 473 static const int caller_saved[CALLER_SAVED_REGS] = { 474 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 475 }; 476 477 static void init_reg_state(struct reg_state *regs) 478 { 479 int i; 480 481 for (i = 0; i < MAX_BPF_REG; i++) { 482 regs[i].type = NOT_INIT; 483 regs[i].imm = 0; 484 } 485 486 /* frame pointer */ 487 regs[BPF_REG_FP].type = FRAME_PTR; 488 489 /* 1st arg to a function */ 490 regs[BPF_REG_1].type = PTR_TO_CTX; 491 } 492 493 static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) 494 { 495 BUG_ON(regno >= MAX_BPF_REG); 496 regs[regno].type = UNKNOWN_VALUE; 497 regs[regno].imm = 0; 498 } 499 500 enum reg_arg_type { 501 SRC_OP, /* register is used as source operand */ 502 DST_OP, /* register is used as destination operand */ 503 DST_OP_NO_MARK /* same as above, check only, don't mark */ 504 }; 505 506 static int check_reg_arg(struct reg_state *regs, u32 regno, 507 enum reg_arg_type t) 508 { 509 if (regno >= MAX_BPF_REG) { 510 verbose("R%d is invalid\n", regno); 511 return -EINVAL; 512 } 513 514 if (t == SRC_OP) { 515 /* check whether register used as source operand can be read */ 516 if (regs[regno].type == NOT_INIT) { 517 verbose("R%d !read_ok\n", regno); 518 return -EACCES; 519 } 520 } else { 521 /* check whether register used as dest operand can be written to */ 522 if (regno == BPF_REG_FP) { 523 verbose("frame pointer is read only\n"); 524 return -EACCES; 525 } 526 if (t == DST_OP) 527 mark_reg_unknown_value(regs, regno); 528 } 529 return 0; 530 } 531 532 static int bpf_size_to_bytes(int bpf_size) 533 { 534 if (bpf_size == BPF_W) 535 return 4; 536 else if (bpf_size == BPF_H) 537 return 2; 538 else if (bpf_size == BPF_B) 539 return 1; 540 else if (bpf_size == BPF_DW) 541 return 8; 542 else 543 return -EINVAL; 544 } 545 546 static bool is_spillable_regtype(enum bpf_reg_type type) 547 { 548 switch (type) { 549 case PTR_TO_MAP_VALUE: 550 case PTR_TO_MAP_VALUE_OR_NULL: 551 case PTR_TO_STACK: 552 case PTR_TO_CTX: 553 case PTR_TO_PACKET: 554 case PTR_TO_PACKET_END: 555 case FRAME_PTR: 556 case CONST_PTR_TO_MAP: 557 return true; 558 default: 559 return false; 560 } 561 } 562 563 /* check_stack_read/write functions track spill/fill of registers, 564 * stack boundary and alignment are checked in check_mem_access() 565 */ 566 static int check_stack_write(struct verifier_state *state, int off, int size, 567 int value_regno) 568 { 569 int i; 570 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 571 * so it's aligned access and [off, off + size) are within stack limits 572 */ 573 574 if (value_regno >= 0 && 575 is_spillable_regtype(state->regs[value_regno].type)) { 576 577 /* register containing pointer is being spilled into stack */ 578 if (size != BPF_REG_SIZE) { 579 verbose("invalid size of register spill\n"); 580 return -EACCES; 581 } 582 583 /* save register state */ 584 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 585 state->regs[value_regno]; 586 587 for (i = 0; i < BPF_REG_SIZE; i++) 588 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 589 } else { 590 /* regular write of data into stack */ 591 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 592 (struct reg_state) {}; 593 594 for (i = 0; i < size; i++) 595 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 596 } 597 return 0; 598 } 599 600 static int check_stack_read(struct verifier_state *state, int off, int size, 601 int value_regno) 602 { 603 u8 *slot_type; 604 int i; 605 606 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 607 608 if (slot_type[0] == STACK_SPILL) { 609 if (size != BPF_REG_SIZE) { 610 verbose("invalid size of register spill\n"); 611 return -EACCES; 612 } 613 for (i = 1; i < BPF_REG_SIZE; i++) { 614 if (slot_type[i] != STACK_SPILL) { 615 verbose("corrupted spill memory\n"); 616 return -EACCES; 617 } 618 } 619 620 if (value_regno >= 0) 621 /* restore register state from stack */ 622 state->regs[value_regno] = 623 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; 624 return 0; 625 } else { 626 for (i = 0; i < size; i++) { 627 if (slot_type[i] != STACK_MISC) { 628 verbose("invalid read from stack off %d+%d size %d\n", 629 off, i, size); 630 return -EACCES; 631 } 632 } 633 if (value_regno >= 0) 634 /* have read misc data from the stack */ 635 mark_reg_unknown_value(state->regs, value_regno); 636 return 0; 637 } 638 } 639 640 /* check read/write into map element returned by bpf_map_lookup_elem() */ 641 static int check_map_access(struct verifier_env *env, u32 regno, int off, 642 int size) 643 { 644 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 645 646 if (off < 0 || off + size > map->value_size) { 647 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 648 map->value_size, off, size); 649 return -EACCES; 650 } 651 return 0; 652 } 653 654 #define MAX_PACKET_OFF 0xffff 655 656 static int check_packet_access(struct verifier_env *env, u32 regno, int off, 657 int size) 658 { 659 struct reg_state *regs = env->cur_state.regs; 660 struct reg_state *reg = ®s[regno]; 661 662 off += reg->off; 663 if (off < 0 || off + size > reg->range) { 664 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 665 off, size, regno, reg->id, reg->off, reg->range); 666 return -EACCES; 667 } 668 return 0; 669 } 670 671 /* check access to 'struct bpf_context' fields */ 672 static int check_ctx_access(struct verifier_env *env, int off, int size, 673 enum bpf_access_type t, enum bpf_reg_type *reg_type) 674 { 675 if (env->prog->aux->ops->is_valid_access && 676 env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { 677 /* remember the offset of last byte accessed in ctx */ 678 if (env->prog->aux->max_ctx_offset < off + size) 679 env->prog->aux->max_ctx_offset = off + size; 680 return 0; 681 } 682 683 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 684 return -EACCES; 685 } 686 687 static bool is_pointer_value(struct verifier_env *env, int regno) 688 { 689 if (env->allow_ptr_leaks) 690 return false; 691 692 switch (env->cur_state.regs[regno].type) { 693 case UNKNOWN_VALUE: 694 case CONST_IMM: 695 return false; 696 default: 697 return true; 698 } 699 } 700 701 static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg, 702 int off, int size) 703 { 704 if (reg->type != PTR_TO_PACKET) { 705 if (off % size != 0) { 706 verbose("misaligned access off %d size %d\n", off, size); 707 return -EACCES; 708 } else { 709 return 0; 710 } 711 } 712 713 switch (env->prog->type) { 714 case BPF_PROG_TYPE_SCHED_CLS: 715 case BPF_PROG_TYPE_SCHED_ACT: 716 break; 717 default: 718 verbose("verifier is misconfigured\n"); 719 return -EACCES; 720 } 721 722 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 723 /* misaligned access to packet is ok on x86,arm,arm64 */ 724 return 0; 725 726 if (reg->id && size != 1) { 727 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 728 return -EACCES; 729 } 730 731 /* skb->data is NET_IP_ALIGN-ed */ 732 if ((NET_IP_ALIGN + reg->off + off) % size != 0) { 733 verbose("misaligned packet access off %d+%d+%d size %d\n", 734 NET_IP_ALIGN, reg->off, off, size); 735 return -EACCES; 736 } 737 return 0; 738 } 739 740 /* check whether memory at (regno + off) is accessible for t = (read | write) 741 * if t==write, value_regno is a register which value is stored into memory 742 * if t==read, value_regno is a register which will receive the value from memory 743 * if t==write && value_regno==-1, some unknown value is stored into memory 744 * if t==read && value_regno==-1, don't care what we read from memory 745 */ 746 static int check_mem_access(struct verifier_env *env, u32 regno, int off, 747 int bpf_size, enum bpf_access_type t, 748 int value_regno) 749 { 750 struct verifier_state *state = &env->cur_state; 751 struct reg_state *reg = &state->regs[regno]; 752 int size, err = 0; 753 754 if (reg->type == PTR_TO_STACK) 755 off += reg->imm; 756 757 size = bpf_size_to_bytes(bpf_size); 758 if (size < 0) 759 return size; 760 761 err = check_ptr_alignment(env, reg, off, size); 762 if (err) 763 return err; 764 765 if (reg->type == PTR_TO_MAP_VALUE) { 766 if (t == BPF_WRITE && value_regno >= 0 && 767 is_pointer_value(env, value_regno)) { 768 verbose("R%d leaks addr into map\n", value_regno); 769 return -EACCES; 770 } 771 err = check_map_access(env, regno, off, size); 772 if (!err && t == BPF_READ && value_regno >= 0) 773 mark_reg_unknown_value(state->regs, value_regno); 774 775 } else if (reg->type == PTR_TO_CTX) { 776 enum bpf_reg_type reg_type = UNKNOWN_VALUE; 777 778 if (t == BPF_WRITE && value_regno >= 0 && 779 is_pointer_value(env, value_regno)) { 780 verbose("R%d leaks addr into ctx\n", value_regno); 781 return -EACCES; 782 } 783 err = check_ctx_access(env, off, size, t, ®_type); 784 if (!err && t == BPF_READ && value_regno >= 0) { 785 mark_reg_unknown_value(state->regs, value_regno); 786 if (env->allow_ptr_leaks) 787 /* note that reg.[id|off|range] == 0 */ 788 state->regs[value_regno].type = reg_type; 789 } 790 791 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 792 if (off >= 0 || off < -MAX_BPF_STACK) { 793 verbose("invalid stack off=%d size=%d\n", off, size); 794 return -EACCES; 795 } 796 if (t == BPF_WRITE) { 797 if (!env->allow_ptr_leaks && 798 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 799 size != BPF_REG_SIZE) { 800 verbose("attempt to corrupt spilled pointer on stack\n"); 801 return -EACCES; 802 } 803 err = check_stack_write(state, off, size, value_regno); 804 } else { 805 err = check_stack_read(state, off, size, value_regno); 806 } 807 } else if (state->regs[regno].type == PTR_TO_PACKET) { 808 if (t == BPF_WRITE) { 809 verbose("cannot write into packet\n"); 810 return -EACCES; 811 } 812 err = check_packet_access(env, regno, off, size); 813 if (!err && t == BPF_READ && value_regno >= 0) 814 mark_reg_unknown_value(state->regs, value_regno); 815 } else { 816 verbose("R%d invalid mem access '%s'\n", 817 regno, reg_type_str[reg->type]); 818 return -EACCES; 819 } 820 821 if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && 822 state->regs[value_regno].type == UNKNOWN_VALUE) { 823 /* 1 or 2 byte load zero-extends, determine the number of 824 * zero upper bits. Not doing it fo 4 byte load, since 825 * such values cannot be added to ptr_to_packet anyway. 826 */ 827 state->regs[value_regno].imm = 64 - size * 8; 828 } 829 return err; 830 } 831 832 static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) 833 { 834 struct reg_state *regs = env->cur_state.regs; 835 int err; 836 837 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 838 insn->imm != 0) { 839 verbose("BPF_XADD uses reserved fields\n"); 840 return -EINVAL; 841 } 842 843 /* check src1 operand */ 844 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 845 if (err) 846 return err; 847 848 /* check src2 operand */ 849 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 850 if (err) 851 return err; 852 853 /* check whether atomic_add can read the memory */ 854 err = check_mem_access(env, insn->dst_reg, insn->off, 855 BPF_SIZE(insn->code), BPF_READ, -1); 856 if (err) 857 return err; 858 859 /* check whether atomic_add can write into the same memory */ 860 return check_mem_access(env, insn->dst_reg, insn->off, 861 BPF_SIZE(insn->code), BPF_WRITE, -1); 862 } 863 864 /* when register 'regno' is passed into function that will read 'access_size' 865 * bytes from that pointer, make sure that it's within stack boundary 866 * and all elements of stack are initialized 867 */ 868 static int check_stack_boundary(struct verifier_env *env, int regno, 869 int access_size, bool zero_size_allowed, 870 struct bpf_call_arg_meta *meta) 871 { 872 struct verifier_state *state = &env->cur_state; 873 struct reg_state *regs = state->regs; 874 int off, i; 875 876 if (regs[regno].type != PTR_TO_STACK) { 877 if (zero_size_allowed && access_size == 0 && 878 regs[regno].type == CONST_IMM && 879 regs[regno].imm == 0) 880 return 0; 881 882 verbose("R%d type=%s expected=%s\n", regno, 883 reg_type_str[regs[regno].type], 884 reg_type_str[PTR_TO_STACK]); 885 return -EACCES; 886 } 887 888 off = regs[regno].imm; 889 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 890 access_size <= 0) { 891 verbose("invalid stack type R%d off=%d access_size=%d\n", 892 regno, off, access_size); 893 return -EACCES; 894 } 895 896 if (meta && meta->raw_mode) { 897 meta->access_size = access_size; 898 meta->regno = regno; 899 return 0; 900 } 901 902 for (i = 0; i < access_size; i++) { 903 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 904 verbose("invalid indirect read from stack off %d+%d size %d\n", 905 off, i, access_size); 906 return -EACCES; 907 } 908 } 909 return 0; 910 } 911 912 static int check_func_arg(struct verifier_env *env, u32 regno, 913 enum bpf_arg_type arg_type, 914 struct bpf_call_arg_meta *meta) 915 { 916 struct reg_state *reg = env->cur_state.regs + regno; 917 enum bpf_reg_type expected_type; 918 int err = 0; 919 920 if (arg_type == ARG_DONTCARE) 921 return 0; 922 923 if (reg->type == NOT_INIT) { 924 verbose("R%d !read_ok\n", regno); 925 return -EACCES; 926 } 927 928 if (arg_type == ARG_ANYTHING) { 929 if (is_pointer_value(env, regno)) { 930 verbose("R%d leaks addr into helper function\n", regno); 931 return -EACCES; 932 } 933 return 0; 934 } 935 936 if (arg_type == ARG_PTR_TO_MAP_KEY || 937 arg_type == ARG_PTR_TO_MAP_VALUE) { 938 expected_type = PTR_TO_STACK; 939 } else if (arg_type == ARG_CONST_STACK_SIZE || 940 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 941 expected_type = CONST_IMM; 942 } else if (arg_type == ARG_CONST_MAP_PTR) { 943 expected_type = CONST_PTR_TO_MAP; 944 } else if (arg_type == ARG_PTR_TO_CTX) { 945 expected_type = PTR_TO_CTX; 946 } else if (arg_type == ARG_PTR_TO_STACK || 947 arg_type == ARG_PTR_TO_RAW_STACK) { 948 expected_type = PTR_TO_STACK; 949 /* One exception here. In case function allows for NULL to be 950 * passed in as argument, it's a CONST_IMM type. Final test 951 * happens during stack boundary checking. 952 */ 953 if (reg->type == CONST_IMM && reg->imm == 0) 954 expected_type = CONST_IMM; 955 meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; 956 } else { 957 verbose("unsupported arg_type %d\n", arg_type); 958 return -EFAULT; 959 } 960 961 if (reg->type != expected_type) { 962 verbose("R%d type=%s expected=%s\n", regno, 963 reg_type_str[reg->type], reg_type_str[expected_type]); 964 return -EACCES; 965 } 966 967 if (arg_type == ARG_CONST_MAP_PTR) { 968 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 969 meta->map_ptr = reg->map_ptr; 970 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 971 /* bpf_map_xxx(..., map_ptr, ..., key) call: 972 * check that [key, key + map->key_size) are within 973 * stack limits and initialized 974 */ 975 if (!meta->map_ptr) { 976 /* in function declaration map_ptr must come before 977 * map_key, so that it's verified and known before 978 * we have to check map_key here. Otherwise it means 979 * that kernel subsystem misconfigured verifier 980 */ 981 verbose("invalid map_ptr to access map->key\n"); 982 return -EACCES; 983 } 984 err = check_stack_boundary(env, regno, meta->map_ptr->key_size, 985 false, NULL); 986 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 987 /* bpf_map_xxx(..., map_ptr, ..., value) call: 988 * check [value, value + map->value_size) validity 989 */ 990 if (!meta->map_ptr) { 991 /* kernel subsystem misconfigured verifier */ 992 verbose("invalid map_ptr to access map->value\n"); 993 return -EACCES; 994 } 995 err = check_stack_boundary(env, regno, 996 meta->map_ptr->value_size, 997 false, NULL); 998 } else if (arg_type == ARG_CONST_STACK_SIZE || 999 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 1000 bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); 1001 1002 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1003 * from stack pointer 'buf'. Check it 1004 * note: regno == len, regno - 1 == buf 1005 */ 1006 if (regno == 0) { 1007 /* kernel subsystem misconfigured verifier */ 1008 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); 1009 return -EACCES; 1010 } 1011 err = check_stack_boundary(env, regno - 1, reg->imm, 1012 zero_size_allowed, meta); 1013 } 1014 1015 return err; 1016 } 1017 1018 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 1019 { 1020 if (!map) 1021 return 0; 1022 1023 /* We need a two way check, first is from map perspective ... */ 1024 switch (map->map_type) { 1025 case BPF_MAP_TYPE_PROG_ARRAY: 1026 if (func_id != BPF_FUNC_tail_call) 1027 goto error; 1028 break; 1029 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1030 if (func_id != BPF_FUNC_perf_event_read && 1031 func_id != BPF_FUNC_perf_event_output) 1032 goto error; 1033 break; 1034 case BPF_MAP_TYPE_STACK_TRACE: 1035 if (func_id != BPF_FUNC_get_stackid) 1036 goto error; 1037 break; 1038 default: 1039 break; 1040 } 1041 1042 /* ... and second from the function itself. */ 1043 switch (func_id) { 1044 case BPF_FUNC_tail_call: 1045 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1046 goto error; 1047 break; 1048 case BPF_FUNC_perf_event_read: 1049 case BPF_FUNC_perf_event_output: 1050 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1051 goto error; 1052 break; 1053 case BPF_FUNC_get_stackid: 1054 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1055 goto error; 1056 break; 1057 default: 1058 break; 1059 } 1060 1061 return 0; 1062 error: 1063 verbose("cannot pass map_type %d into func %d\n", 1064 map->map_type, func_id); 1065 return -EINVAL; 1066 } 1067 1068 static int check_raw_mode(const struct bpf_func_proto *fn) 1069 { 1070 int count = 0; 1071 1072 if (fn->arg1_type == ARG_PTR_TO_RAW_STACK) 1073 count++; 1074 if (fn->arg2_type == ARG_PTR_TO_RAW_STACK) 1075 count++; 1076 if (fn->arg3_type == ARG_PTR_TO_RAW_STACK) 1077 count++; 1078 if (fn->arg4_type == ARG_PTR_TO_RAW_STACK) 1079 count++; 1080 if (fn->arg5_type == ARG_PTR_TO_RAW_STACK) 1081 count++; 1082 1083 return count > 1 ? -EINVAL : 0; 1084 } 1085 1086 static void clear_all_pkt_pointers(struct verifier_env *env) 1087 { 1088 struct verifier_state *state = &env->cur_state; 1089 struct reg_state *regs = state->regs, *reg; 1090 int i; 1091 1092 for (i = 0; i < MAX_BPF_REG; i++) 1093 if (regs[i].type == PTR_TO_PACKET || 1094 regs[i].type == PTR_TO_PACKET_END) 1095 mark_reg_unknown_value(regs, i); 1096 1097 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1098 if (state->stack_slot_type[i] != STACK_SPILL) 1099 continue; 1100 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1101 if (reg->type != PTR_TO_PACKET && 1102 reg->type != PTR_TO_PACKET_END) 1103 continue; 1104 reg->type = UNKNOWN_VALUE; 1105 reg->imm = 0; 1106 } 1107 } 1108 1109 static int check_call(struct verifier_env *env, int func_id) 1110 { 1111 struct verifier_state *state = &env->cur_state; 1112 const struct bpf_func_proto *fn = NULL; 1113 struct reg_state *regs = state->regs; 1114 struct reg_state *reg; 1115 struct bpf_call_arg_meta meta; 1116 bool changes_data; 1117 int i, err; 1118 1119 /* find function prototype */ 1120 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1121 verbose("invalid func %d\n", func_id); 1122 return -EINVAL; 1123 } 1124 1125 if (env->prog->aux->ops->get_func_proto) 1126 fn = env->prog->aux->ops->get_func_proto(func_id); 1127 1128 if (!fn) { 1129 verbose("unknown func %d\n", func_id); 1130 return -EINVAL; 1131 } 1132 1133 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1134 if (!env->prog->gpl_compatible && fn->gpl_only) { 1135 verbose("cannot call GPL only function from proprietary program\n"); 1136 return -EINVAL; 1137 } 1138 1139 changes_data = bpf_helper_changes_skb_data(fn->func); 1140 1141 memset(&meta, 0, sizeof(meta)); 1142 1143 /* We only support one arg being in raw mode at the moment, which 1144 * is sufficient for the helper functions we have right now. 1145 */ 1146 err = check_raw_mode(fn); 1147 if (err) { 1148 verbose("kernel subsystem misconfigured func %d\n", func_id); 1149 return err; 1150 } 1151 1152 /* check args */ 1153 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1154 if (err) 1155 return err; 1156 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1157 if (err) 1158 return err; 1159 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1160 if (err) 1161 return err; 1162 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1163 if (err) 1164 return err; 1165 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1166 if (err) 1167 return err; 1168 1169 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1170 * is inferred from register state. 1171 */ 1172 for (i = 0; i < meta.access_size; i++) { 1173 err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); 1174 if (err) 1175 return err; 1176 } 1177 1178 /* reset caller saved regs */ 1179 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1180 reg = regs + caller_saved[i]; 1181 reg->type = NOT_INIT; 1182 reg->imm = 0; 1183 } 1184 1185 /* update return register */ 1186 if (fn->ret_type == RET_INTEGER) { 1187 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1188 } else if (fn->ret_type == RET_VOID) { 1189 regs[BPF_REG_0].type = NOT_INIT; 1190 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1191 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1192 /* remember map_ptr, so that check_map_access() 1193 * can check 'value_size' boundary of memory access 1194 * to map element returned from bpf_map_lookup_elem() 1195 */ 1196 if (meta.map_ptr == NULL) { 1197 verbose("kernel subsystem misconfigured verifier\n"); 1198 return -EINVAL; 1199 } 1200 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1201 } else { 1202 verbose("unknown return type %d of func %d\n", 1203 fn->ret_type, func_id); 1204 return -EINVAL; 1205 } 1206 1207 err = check_map_func_compatibility(meta.map_ptr, func_id); 1208 if (err) 1209 return err; 1210 1211 if (changes_data) 1212 clear_all_pkt_pointers(env); 1213 return 0; 1214 } 1215 1216 static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn) 1217 { 1218 struct reg_state *regs = env->cur_state.regs; 1219 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1220 struct reg_state *src_reg = ®s[insn->src_reg]; 1221 struct reg_state tmp_reg; 1222 s32 imm; 1223 1224 if (BPF_SRC(insn->code) == BPF_K) { 1225 /* pkt_ptr += imm */ 1226 imm = insn->imm; 1227 1228 add_imm: 1229 if (imm <= 0) { 1230 verbose("addition of negative constant to packet pointer is not allowed\n"); 1231 return -EACCES; 1232 } 1233 if (imm >= MAX_PACKET_OFF || 1234 imm + dst_reg->off >= MAX_PACKET_OFF) { 1235 verbose("constant %d is too large to add to packet pointer\n", 1236 imm); 1237 return -EACCES; 1238 } 1239 /* a constant was added to pkt_ptr. 1240 * Remember it while keeping the same 'id' 1241 */ 1242 dst_reg->off += imm; 1243 } else { 1244 if (src_reg->type == PTR_TO_PACKET) { 1245 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1246 tmp_reg = *dst_reg; /* save r7 state */ 1247 *dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */ 1248 src_reg = &tmp_reg; /* pretend it's src_reg state */ 1249 /* if the checks below reject it, the copy won't matter, 1250 * since we're rejecting the whole program. If all ok, 1251 * then imm22 state will be added to r7 1252 * and r7 will be pkt(id=0,off=22,r=62) while 1253 * r6 will stay as pkt(id=0,off=0,r=62) 1254 */ 1255 } 1256 1257 if (src_reg->type == CONST_IMM) { 1258 /* pkt_ptr += reg where reg is known constant */ 1259 imm = src_reg->imm; 1260 goto add_imm; 1261 } 1262 /* disallow pkt_ptr += reg 1263 * if reg is not uknown_value with guaranteed zero upper bits 1264 * otherwise pkt_ptr may overflow and addition will become 1265 * subtraction which is not allowed 1266 */ 1267 if (src_reg->type != UNKNOWN_VALUE) { 1268 verbose("cannot add '%s' to ptr_to_packet\n", 1269 reg_type_str[src_reg->type]); 1270 return -EACCES; 1271 } 1272 if (src_reg->imm < 48) { 1273 verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", 1274 src_reg->imm); 1275 return -EACCES; 1276 } 1277 /* dst_reg stays as pkt_ptr type and since some positive 1278 * integer value was added to the pointer, increment its 'id' 1279 */ 1280 dst_reg->id++; 1281 1282 /* something was added to pkt_ptr, set range and off to zero */ 1283 dst_reg->off = 0; 1284 dst_reg->range = 0; 1285 } 1286 return 0; 1287 } 1288 1289 static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn) 1290 { 1291 struct reg_state *regs = env->cur_state.regs; 1292 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1293 u8 opcode = BPF_OP(insn->code); 1294 s64 imm_log2; 1295 1296 /* for type == UNKNOWN_VALUE: 1297 * imm > 0 -> number of zero upper bits 1298 * imm == 0 -> don't track which is the same as all bits can be non-zero 1299 */ 1300 1301 if (BPF_SRC(insn->code) == BPF_X) { 1302 struct reg_state *src_reg = ®s[insn->src_reg]; 1303 1304 if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && 1305 dst_reg->imm && opcode == BPF_ADD) { 1306 /* dreg += sreg 1307 * where both have zero upper bits. Adding them 1308 * can only result making one more bit non-zero 1309 * in the larger value. 1310 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) 1311 * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) 1312 */ 1313 dst_reg->imm = min(dst_reg->imm, src_reg->imm); 1314 dst_reg->imm--; 1315 return 0; 1316 } 1317 if (src_reg->type == CONST_IMM && src_reg->imm > 0 && 1318 dst_reg->imm && opcode == BPF_ADD) { 1319 /* dreg += sreg 1320 * where dreg has zero upper bits and sreg is const. 1321 * Adding them can only result making one more bit 1322 * non-zero in the larger value. 1323 */ 1324 imm_log2 = __ilog2_u64((long long)src_reg->imm); 1325 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1326 dst_reg->imm--; 1327 return 0; 1328 } 1329 /* all other cases non supported yet, just mark dst_reg */ 1330 dst_reg->imm = 0; 1331 return 0; 1332 } 1333 1334 /* sign extend 32-bit imm into 64-bit to make sure that 1335 * negative values occupy bit 63. Note ilog2() would have 1336 * been incorrect, since sizeof(insn->imm) == 4 1337 */ 1338 imm_log2 = __ilog2_u64((long long)insn->imm); 1339 1340 if (dst_reg->imm && opcode == BPF_LSH) { 1341 /* reg <<= imm 1342 * if reg was a result of 2 byte load, then its imm == 48 1343 * which means that upper 48 bits are zero and shifting this reg 1344 * left by 4 would mean that upper 44 bits are still zero 1345 */ 1346 dst_reg->imm -= insn->imm; 1347 } else if (dst_reg->imm && opcode == BPF_MUL) { 1348 /* reg *= imm 1349 * if multiplying by 14 subtract 4 1350 * This is conservative calculation of upper zero bits. 1351 * It's not trying to special case insn->imm == 1 or 0 cases 1352 */ 1353 dst_reg->imm -= imm_log2 + 1; 1354 } else if (opcode == BPF_AND) { 1355 /* reg &= imm */ 1356 dst_reg->imm = 63 - imm_log2; 1357 } else if (dst_reg->imm && opcode == BPF_ADD) { 1358 /* reg += imm */ 1359 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1360 dst_reg->imm--; 1361 } else if (opcode == BPF_RSH) { 1362 /* reg >>= imm 1363 * which means that after right shift, upper bits will be zero 1364 * note that verifier already checked that 1365 * 0 <= imm < 64 for shift insn 1366 */ 1367 dst_reg->imm += insn->imm; 1368 if (unlikely(dst_reg->imm > 64)) 1369 /* some dumb code did: 1370 * r2 = *(u32 *)mem; 1371 * r2 >>= 32; 1372 * and all bits are zero now */ 1373 dst_reg->imm = 64; 1374 } else { 1375 /* all other alu ops, means that we don't know what will 1376 * happen to the value, mark it with unknown number of zero bits 1377 */ 1378 dst_reg->imm = 0; 1379 } 1380 1381 if (dst_reg->imm < 0) { 1382 /* all 64 bits of the register can contain non-zero bits 1383 * and such value cannot be added to ptr_to_packet, since it 1384 * may overflow, mark it as unknown to avoid further eval 1385 */ 1386 dst_reg->imm = 0; 1387 } 1388 return 0; 1389 } 1390 1391 static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn) 1392 { 1393 struct reg_state *regs = env->cur_state.regs; 1394 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1395 struct reg_state *src_reg = ®s[insn->src_reg]; 1396 u8 opcode = BPF_OP(insn->code); 1397 1398 /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn. 1399 * Don't care about overflow or negative values, just add them 1400 */ 1401 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) 1402 dst_reg->imm += insn->imm; 1403 else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && 1404 src_reg->type == CONST_IMM) 1405 dst_reg->imm += src_reg->imm; 1406 else 1407 mark_reg_unknown_value(regs, insn->dst_reg); 1408 return 0; 1409 } 1410 1411 /* check validity of 32-bit and 64-bit arithmetic operations */ 1412 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) 1413 { 1414 struct reg_state *regs = env->cur_state.regs, *dst_reg; 1415 u8 opcode = BPF_OP(insn->code); 1416 int err; 1417 1418 if (opcode == BPF_END || opcode == BPF_NEG) { 1419 if (opcode == BPF_NEG) { 1420 if (BPF_SRC(insn->code) != 0 || 1421 insn->src_reg != BPF_REG_0 || 1422 insn->off != 0 || insn->imm != 0) { 1423 verbose("BPF_NEG uses reserved fields\n"); 1424 return -EINVAL; 1425 } 1426 } else { 1427 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 1428 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 1429 verbose("BPF_END uses reserved fields\n"); 1430 return -EINVAL; 1431 } 1432 } 1433 1434 /* check src operand */ 1435 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1436 if (err) 1437 return err; 1438 1439 if (is_pointer_value(env, insn->dst_reg)) { 1440 verbose("R%d pointer arithmetic prohibited\n", 1441 insn->dst_reg); 1442 return -EACCES; 1443 } 1444 1445 /* check dest operand */ 1446 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1447 if (err) 1448 return err; 1449 1450 } else if (opcode == BPF_MOV) { 1451 1452 if (BPF_SRC(insn->code) == BPF_X) { 1453 if (insn->imm != 0 || insn->off != 0) { 1454 verbose("BPF_MOV uses reserved fields\n"); 1455 return -EINVAL; 1456 } 1457 1458 /* check src operand */ 1459 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1460 if (err) 1461 return err; 1462 } else { 1463 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1464 verbose("BPF_MOV uses reserved fields\n"); 1465 return -EINVAL; 1466 } 1467 } 1468 1469 /* check dest operand */ 1470 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1471 if (err) 1472 return err; 1473 1474 if (BPF_SRC(insn->code) == BPF_X) { 1475 if (BPF_CLASS(insn->code) == BPF_ALU64) { 1476 /* case: R1 = R2 1477 * copy register state to dest reg 1478 */ 1479 regs[insn->dst_reg] = regs[insn->src_reg]; 1480 } else { 1481 if (is_pointer_value(env, insn->src_reg)) { 1482 verbose("R%d partial copy of pointer\n", 1483 insn->src_reg); 1484 return -EACCES; 1485 } 1486 regs[insn->dst_reg].type = UNKNOWN_VALUE; 1487 regs[insn->dst_reg].map_ptr = NULL; 1488 } 1489 } else { 1490 /* case: R = imm 1491 * remember the value we stored into this reg 1492 */ 1493 regs[insn->dst_reg].type = CONST_IMM; 1494 regs[insn->dst_reg].imm = insn->imm; 1495 } 1496 1497 } else if (opcode > BPF_END) { 1498 verbose("invalid BPF_ALU opcode %x\n", opcode); 1499 return -EINVAL; 1500 1501 } else { /* all other ALU ops: and, sub, xor, add, ... */ 1502 1503 if (BPF_SRC(insn->code) == BPF_X) { 1504 if (insn->imm != 0 || insn->off != 0) { 1505 verbose("BPF_ALU uses reserved fields\n"); 1506 return -EINVAL; 1507 } 1508 /* check src1 operand */ 1509 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1510 if (err) 1511 return err; 1512 } else { 1513 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1514 verbose("BPF_ALU uses reserved fields\n"); 1515 return -EINVAL; 1516 } 1517 } 1518 1519 /* check src2 operand */ 1520 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1521 if (err) 1522 return err; 1523 1524 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 1525 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 1526 verbose("div by zero\n"); 1527 return -EINVAL; 1528 } 1529 1530 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1531 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1532 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1533 1534 if (insn->imm < 0 || insn->imm >= size) { 1535 verbose("invalid shift %d\n", insn->imm); 1536 return -EINVAL; 1537 } 1538 } 1539 1540 /* check dest operand */ 1541 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 1542 if (err) 1543 return err; 1544 1545 dst_reg = ®s[insn->dst_reg]; 1546 1547 /* pattern match 'bpf_add Rx, imm' instruction */ 1548 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && 1549 dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { 1550 dst_reg->type = PTR_TO_STACK; 1551 dst_reg->imm = insn->imm; 1552 return 0; 1553 } else if (opcode == BPF_ADD && 1554 BPF_CLASS(insn->code) == BPF_ALU64 && 1555 (dst_reg->type == PTR_TO_PACKET || 1556 (BPF_SRC(insn->code) == BPF_X && 1557 regs[insn->src_reg].type == PTR_TO_PACKET))) { 1558 /* ptr_to_packet += K|X */ 1559 return check_packet_ptr_add(env, insn); 1560 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1561 dst_reg->type == UNKNOWN_VALUE && 1562 env->allow_ptr_leaks) { 1563 /* unknown += K|X */ 1564 return evaluate_reg_alu(env, insn); 1565 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1566 dst_reg->type == CONST_IMM && 1567 env->allow_ptr_leaks) { 1568 /* reg_imm += K|X */ 1569 return evaluate_reg_imm_alu(env, insn); 1570 } else if (is_pointer_value(env, insn->dst_reg)) { 1571 verbose("R%d pointer arithmetic prohibited\n", 1572 insn->dst_reg); 1573 return -EACCES; 1574 } else if (BPF_SRC(insn->code) == BPF_X && 1575 is_pointer_value(env, insn->src_reg)) { 1576 verbose("R%d pointer arithmetic prohibited\n", 1577 insn->src_reg); 1578 return -EACCES; 1579 } 1580 1581 /* mark dest operand */ 1582 mark_reg_unknown_value(regs, insn->dst_reg); 1583 } 1584 1585 return 0; 1586 } 1587 1588 static void find_good_pkt_pointers(struct verifier_env *env, 1589 struct reg_state *dst_reg) 1590 { 1591 struct verifier_state *state = &env->cur_state; 1592 struct reg_state *regs = state->regs, *reg; 1593 int i; 1594 /* r2 = r3; 1595 * r2 += 8 1596 * if (r2 > pkt_end) goto somewhere 1597 * r2 == dst_reg, pkt_end == src_reg, 1598 * r2=pkt(id=n,off=8,r=0) 1599 * r3=pkt(id=n,off=0,r=0) 1600 * find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 1601 * so that range of bytes [r3, r3 + 8) is safe to access 1602 */ 1603 for (i = 0; i < MAX_BPF_REG; i++) 1604 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 1605 regs[i].range = dst_reg->off; 1606 1607 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1608 if (state->stack_slot_type[i] != STACK_SPILL) 1609 continue; 1610 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1611 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 1612 reg->range = dst_reg->off; 1613 } 1614 } 1615 1616 static int check_cond_jmp_op(struct verifier_env *env, 1617 struct bpf_insn *insn, int *insn_idx) 1618 { 1619 struct reg_state *regs = env->cur_state.regs, *dst_reg; 1620 struct verifier_state *other_branch; 1621 u8 opcode = BPF_OP(insn->code); 1622 int err; 1623 1624 if (opcode > BPF_EXIT) { 1625 verbose("invalid BPF_JMP opcode %x\n", opcode); 1626 return -EINVAL; 1627 } 1628 1629 if (BPF_SRC(insn->code) == BPF_X) { 1630 if (insn->imm != 0) { 1631 verbose("BPF_JMP uses reserved fields\n"); 1632 return -EINVAL; 1633 } 1634 1635 /* check src1 operand */ 1636 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1637 if (err) 1638 return err; 1639 1640 if (is_pointer_value(env, insn->src_reg)) { 1641 verbose("R%d pointer comparison prohibited\n", 1642 insn->src_reg); 1643 return -EACCES; 1644 } 1645 } else { 1646 if (insn->src_reg != BPF_REG_0) { 1647 verbose("BPF_JMP uses reserved fields\n"); 1648 return -EINVAL; 1649 } 1650 } 1651 1652 /* check src2 operand */ 1653 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1654 if (err) 1655 return err; 1656 1657 dst_reg = ®s[insn->dst_reg]; 1658 1659 /* detect if R == 0 where R was initialized to zero earlier */ 1660 if (BPF_SRC(insn->code) == BPF_K && 1661 (opcode == BPF_JEQ || opcode == BPF_JNE) && 1662 dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { 1663 if (opcode == BPF_JEQ) { 1664 /* if (imm == imm) goto pc+off; 1665 * only follow the goto, ignore fall-through 1666 */ 1667 *insn_idx += insn->off; 1668 return 0; 1669 } else { 1670 /* if (imm != imm) goto pc+off; 1671 * only follow fall-through branch, since 1672 * that's where the program will go 1673 */ 1674 return 0; 1675 } 1676 } 1677 1678 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 1679 if (!other_branch) 1680 return -EFAULT; 1681 1682 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ 1683 if (BPF_SRC(insn->code) == BPF_K && 1684 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 1685 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1686 if (opcode == BPF_JEQ) { 1687 /* next fallthrough insn can access memory via 1688 * this register 1689 */ 1690 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1691 /* branch targer cannot access it, since reg == 0 */ 1692 mark_reg_unknown_value(other_branch->regs, 1693 insn->dst_reg); 1694 } else { 1695 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1696 mark_reg_unknown_value(regs, insn->dst_reg); 1697 } 1698 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 1699 dst_reg->type == PTR_TO_PACKET && 1700 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 1701 find_good_pkt_pointers(env, dst_reg); 1702 } else if (is_pointer_value(env, insn->dst_reg)) { 1703 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 1704 return -EACCES; 1705 } 1706 if (log_level) 1707 print_verifier_state(&env->cur_state); 1708 return 0; 1709 } 1710 1711 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 1712 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 1713 { 1714 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 1715 1716 return (struct bpf_map *) (unsigned long) imm64; 1717 } 1718 1719 /* verify BPF_LD_IMM64 instruction */ 1720 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) 1721 { 1722 struct reg_state *regs = env->cur_state.regs; 1723 int err; 1724 1725 if (BPF_SIZE(insn->code) != BPF_DW) { 1726 verbose("invalid BPF_LD_IMM insn\n"); 1727 return -EINVAL; 1728 } 1729 if (insn->off != 0) { 1730 verbose("BPF_LD_IMM64 uses reserved fields\n"); 1731 return -EINVAL; 1732 } 1733 1734 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1735 if (err) 1736 return err; 1737 1738 if (insn->src_reg == 0) 1739 /* generic move 64-bit immediate into a register */ 1740 return 0; 1741 1742 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 1743 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 1744 1745 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 1746 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 1747 return 0; 1748 } 1749 1750 static bool may_access_skb(enum bpf_prog_type type) 1751 { 1752 switch (type) { 1753 case BPF_PROG_TYPE_SOCKET_FILTER: 1754 case BPF_PROG_TYPE_SCHED_CLS: 1755 case BPF_PROG_TYPE_SCHED_ACT: 1756 return true; 1757 default: 1758 return false; 1759 } 1760 } 1761 1762 /* verify safety of LD_ABS|LD_IND instructions: 1763 * - they can only appear in the programs where ctx == skb 1764 * - since they are wrappers of function calls, they scratch R1-R5 registers, 1765 * preserve R6-R9, and store return value into R0 1766 * 1767 * Implicit input: 1768 * ctx == skb == R6 == CTX 1769 * 1770 * Explicit input: 1771 * SRC == any register 1772 * IMM == 32-bit immediate 1773 * 1774 * Output: 1775 * R0 - 8/16/32-bit skb data converted to cpu endianness 1776 */ 1777 static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) 1778 { 1779 struct reg_state *regs = env->cur_state.regs; 1780 u8 mode = BPF_MODE(insn->code); 1781 struct reg_state *reg; 1782 int i, err; 1783 1784 if (!may_access_skb(env->prog->type)) { 1785 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 1786 return -EINVAL; 1787 } 1788 1789 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1790 BPF_SIZE(insn->code) == BPF_DW || 1791 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1792 verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); 1793 return -EINVAL; 1794 } 1795 1796 /* check whether implicit source operand (register R6) is readable */ 1797 err = check_reg_arg(regs, BPF_REG_6, SRC_OP); 1798 if (err) 1799 return err; 1800 1801 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 1802 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 1803 return -EINVAL; 1804 } 1805 1806 if (mode == BPF_IND) { 1807 /* check explicit source operand */ 1808 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1809 if (err) 1810 return err; 1811 } 1812 1813 /* reset caller saved regs to unreadable */ 1814 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1815 reg = regs + caller_saved[i]; 1816 reg->type = NOT_INIT; 1817 reg->imm = 0; 1818 } 1819 1820 /* mark destination R0 register as readable, since it contains 1821 * the value fetched from the packet 1822 */ 1823 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1824 return 0; 1825 } 1826 1827 /* non-recursive DFS pseudo code 1828 * 1 procedure DFS-iterative(G,v): 1829 * 2 label v as discovered 1830 * 3 let S be a stack 1831 * 4 S.push(v) 1832 * 5 while S is not empty 1833 * 6 t <- S.pop() 1834 * 7 if t is what we're looking for: 1835 * 8 return t 1836 * 9 for all edges e in G.adjacentEdges(t) do 1837 * 10 if edge e is already labelled 1838 * 11 continue with the next edge 1839 * 12 w <- G.adjacentVertex(t,e) 1840 * 13 if vertex w is not discovered and not explored 1841 * 14 label e as tree-edge 1842 * 15 label w as discovered 1843 * 16 S.push(w) 1844 * 17 continue at 5 1845 * 18 else if vertex w is discovered 1846 * 19 label e as back-edge 1847 * 20 else 1848 * 21 // vertex w is explored 1849 * 22 label e as forward- or cross-edge 1850 * 23 label t as explored 1851 * 24 S.pop() 1852 * 1853 * convention: 1854 * 0x10 - discovered 1855 * 0x11 - discovered and fall-through edge labelled 1856 * 0x12 - discovered and fall-through and branch edges labelled 1857 * 0x20 - explored 1858 */ 1859 1860 enum { 1861 DISCOVERED = 0x10, 1862 EXPLORED = 0x20, 1863 FALLTHROUGH = 1, 1864 BRANCH = 2, 1865 }; 1866 1867 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L) 1868 1869 static int *insn_stack; /* stack of insns to process */ 1870 static int cur_stack; /* current stack index */ 1871 static int *insn_state; 1872 1873 /* t, w, e - match pseudo-code above: 1874 * t - index of current instruction 1875 * w - next instruction 1876 * e - edge 1877 */ 1878 static int push_insn(int t, int w, int e, struct verifier_env *env) 1879 { 1880 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 1881 return 0; 1882 1883 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 1884 return 0; 1885 1886 if (w < 0 || w >= env->prog->len) { 1887 verbose("jump out of range from insn %d to %d\n", t, w); 1888 return -EINVAL; 1889 } 1890 1891 if (e == BRANCH) 1892 /* mark branch target for state pruning */ 1893 env->explored_states[w] = STATE_LIST_MARK; 1894 1895 if (insn_state[w] == 0) { 1896 /* tree-edge */ 1897 insn_state[t] = DISCOVERED | e; 1898 insn_state[w] = DISCOVERED; 1899 if (cur_stack >= env->prog->len) 1900 return -E2BIG; 1901 insn_stack[cur_stack++] = w; 1902 return 1; 1903 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 1904 verbose("back-edge from insn %d to %d\n", t, w); 1905 return -EINVAL; 1906 } else if (insn_state[w] == EXPLORED) { 1907 /* forward- or cross-edge */ 1908 insn_state[t] = DISCOVERED | e; 1909 } else { 1910 verbose("insn state internal bug\n"); 1911 return -EFAULT; 1912 } 1913 return 0; 1914 } 1915 1916 /* non-recursive depth-first-search to detect loops in BPF program 1917 * loop == back-edge in directed graph 1918 */ 1919 static int check_cfg(struct verifier_env *env) 1920 { 1921 struct bpf_insn *insns = env->prog->insnsi; 1922 int insn_cnt = env->prog->len; 1923 int ret = 0; 1924 int i, t; 1925 1926 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 1927 if (!insn_state) 1928 return -ENOMEM; 1929 1930 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 1931 if (!insn_stack) { 1932 kfree(insn_state); 1933 return -ENOMEM; 1934 } 1935 1936 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 1937 insn_stack[0] = 0; /* 0 is the first instruction */ 1938 cur_stack = 1; 1939 1940 peek_stack: 1941 if (cur_stack == 0) 1942 goto check_state; 1943 t = insn_stack[cur_stack - 1]; 1944 1945 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 1946 u8 opcode = BPF_OP(insns[t].code); 1947 1948 if (opcode == BPF_EXIT) { 1949 goto mark_explored; 1950 } else if (opcode == BPF_CALL) { 1951 ret = push_insn(t, t + 1, FALLTHROUGH, env); 1952 if (ret == 1) 1953 goto peek_stack; 1954 else if (ret < 0) 1955 goto err_free; 1956 if (t + 1 < insn_cnt) 1957 env->explored_states[t + 1] = STATE_LIST_MARK; 1958 } else if (opcode == BPF_JA) { 1959 if (BPF_SRC(insns[t].code) != BPF_K) { 1960 ret = -EINVAL; 1961 goto err_free; 1962 } 1963 /* unconditional jump with single edge */ 1964 ret = push_insn(t, t + insns[t].off + 1, 1965 FALLTHROUGH, env); 1966 if (ret == 1) 1967 goto peek_stack; 1968 else if (ret < 0) 1969 goto err_free; 1970 /* tell verifier to check for equivalent states 1971 * after every call and jump 1972 */ 1973 if (t + 1 < insn_cnt) 1974 env->explored_states[t + 1] = STATE_LIST_MARK; 1975 } else { 1976 /* conditional jump with two edges */ 1977 ret = push_insn(t, t + 1, FALLTHROUGH, env); 1978 if (ret == 1) 1979 goto peek_stack; 1980 else if (ret < 0) 1981 goto err_free; 1982 1983 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 1984 if (ret == 1) 1985 goto peek_stack; 1986 else if (ret < 0) 1987 goto err_free; 1988 } 1989 } else { 1990 /* all other non-branch instructions with single 1991 * fall-through edge 1992 */ 1993 ret = push_insn(t, t + 1, FALLTHROUGH, env); 1994 if (ret == 1) 1995 goto peek_stack; 1996 else if (ret < 0) 1997 goto err_free; 1998 } 1999 2000 mark_explored: 2001 insn_state[t] = EXPLORED; 2002 if (cur_stack-- <= 0) { 2003 verbose("pop stack internal bug\n"); 2004 ret = -EFAULT; 2005 goto err_free; 2006 } 2007 goto peek_stack; 2008 2009 check_state: 2010 for (i = 0; i < insn_cnt; i++) { 2011 if (insn_state[i] != EXPLORED) { 2012 verbose("unreachable insn %d\n", i); 2013 ret = -EINVAL; 2014 goto err_free; 2015 } 2016 } 2017 ret = 0; /* cfg looks good */ 2018 2019 err_free: 2020 kfree(insn_state); 2021 kfree(insn_stack); 2022 return ret; 2023 } 2024 2025 /* the following conditions reduce the number of explored insns 2026 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2027 */ 2028 static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur) 2029 { 2030 if (old->id != cur->id) 2031 return false; 2032 2033 /* old ptr_to_packet is more conservative, since it allows smaller 2034 * range. Ex: 2035 * old(off=0,r=10) is equal to cur(off=0,r=20), because 2036 * old(off=0,r=10) means that with range=10 the verifier proceeded 2037 * further and found no issues with the program. Now we're in the same 2038 * spot with cur(off=0,r=20), so we're safe too, since anything further 2039 * will only be looking at most 10 bytes after this pointer. 2040 */ 2041 if (old->off == cur->off && old->range < cur->range) 2042 return true; 2043 2044 /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) 2045 * since both cannot be used for packet access and safe(old) 2046 * pointer has smaller off that could be used for further 2047 * 'if (ptr > data_end)' check 2048 * Ex: 2049 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean 2050 * that we cannot access the packet. 2051 * The safe range is: 2052 * [ptr, ptr + range - off) 2053 * so whenever off >=range, it means no safe bytes from this pointer. 2054 * When comparing old->off <= cur->off, it means that older code 2055 * went with smaller offset and that offset was later 2056 * used to figure out the safe range after 'if (ptr > data_end)' check 2057 * Say, 'old' state was explored like: 2058 * ... R3(off=0, r=0) 2059 * R4 = R3 + 20 2060 * ... now R4(off=20,r=0) <-- here 2061 * if (R4 > data_end) 2062 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. 2063 * ... the code further went all the way to bpf_exit. 2064 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). 2065 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier 2066 * goes further, such cur_R4 will give larger safe packet range after 2067 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2068 * so they will be good with r=30 and we can prune the search. 2069 */ 2070 if (old->off <= cur->off && 2071 old->off >= old->range && cur->off >= cur->range) 2072 return true; 2073 2074 return false; 2075 } 2076 2077 /* compare two verifier states 2078 * 2079 * all states stored in state_list are known to be valid, since 2080 * verifier reached 'bpf_exit' instruction through them 2081 * 2082 * this function is called when verifier exploring different branches of 2083 * execution popped from the state stack. If it sees an old state that has 2084 * more strict register state and more strict stack state then this execution 2085 * branch doesn't need to be explored further, since verifier already 2086 * concluded that more strict state leads to valid finish. 2087 * 2088 * Therefore two states are equivalent if register state is more conservative 2089 * and explored stack state is more conservative than the current one. 2090 * Example: 2091 * explored current 2092 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 2093 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 2094 * 2095 * In other words if current stack state (one being explored) has more 2096 * valid slots than old one that already passed validation, it means 2097 * the verifier can stop exploring and conclude that current state is valid too 2098 * 2099 * Similarly with registers. If explored state has register type as invalid 2100 * whereas register type in current state is meaningful, it means that 2101 * the current state will reach 'bpf_exit' instruction safely 2102 */ 2103 static bool states_equal(struct verifier_state *old, struct verifier_state *cur) 2104 { 2105 struct reg_state *rold, *rcur; 2106 int i; 2107 2108 for (i = 0; i < MAX_BPF_REG; i++) { 2109 rold = &old->regs[i]; 2110 rcur = &cur->regs[i]; 2111 2112 if (memcmp(rold, rcur, sizeof(*rold)) == 0) 2113 continue; 2114 2115 if (rold->type == NOT_INIT || 2116 (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) 2117 continue; 2118 2119 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2120 compare_ptrs_to_packet(rold, rcur)) 2121 continue; 2122 2123 return false; 2124 } 2125 2126 for (i = 0; i < MAX_BPF_STACK; i++) { 2127 if (old->stack_slot_type[i] == STACK_INVALID) 2128 continue; 2129 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 2130 /* Ex: old explored (safe) state has STACK_SPILL in 2131 * this stack slot, but current has has STACK_MISC -> 2132 * this verifier states are not equivalent, 2133 * return false to continue verification of this path 2134 */ 2135 return false; 2136 if (i % BPF_REG_SIZE) 2137 continue; 2138 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], 2139 &cur->spilled_regs[i / BPF_REG_SIZE], 2140 sizeof(old->spilled_regs[0]))) 2141 /* when explored and current stack slot types are 2142 * the same, check that stored pointers types 2143 * are the same as well. 2144 * Ex: explored safe path could have stored 2145 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} 2146 * but current path has stored: 2147 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} 2148 * such verifier states are not equivalent. 2149 * return false to continue verification of this path 2150 */ 2151 return false; 2152 else 2153 continue; 2154 } 2155 return true; 2156 } 2157 2158 static int is_state_visited(struct verifier_env *env, int insn_idx) 2159 { 2160 struct verifier_state_list *new_sl; 2161 struct verifier_state_list *sl; 2162 2163 sl = env->explored_states[insn_idx]; 2164 if (!sl) 2165 /* this 'insn_idx' instruction wasn't marked, so we will not 2166 * be doing state search here 2167 */ 2168 return 0; 2169 2170 while (sl != STATE_LIST_MARK) { 2171 if (states_equal(&sl->state, &env->cur_state)) 2172 /* reached equivalent register/stack state, 2173 * prune the search 2174 */ 2175 return 1; 2176 sl = sl->next; 2177 } 2178 2179 /* there were no equivalent states, remember current one. 2180 * technically the current state is not proven to be safe yet, 2181 * but it will either reach bpf_exit (which means it's safe) or 2182 * it will be rejected. Since there are no loops, we won't be 2183 * seeing this 'insn_idx' instruction again on the way to bpf_exit 2184 */ 2185 new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER); 2186 if (!new_sl) 2187 return -ENOMEM; 2188 2189 /* add new state to the head of linked list */ 2190 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 2191 new_sl->next = env->explored_states[insn_idx]; 2192 env->explored_states[insn_idx] = new_sl; 2193 return 0; 2194 } 2195 2196 static int do_check(struct verifier_env *env) 2197 { 2198 struct verifier_state *state = &env->cur_state; 2199 struct bpf_insn *insns = env->prog->insnsi; 2200 struct reg_state *regs = state->regs; 2201 int insn_cnt = env->prog->len; 2202 int insn_idx, prev_insn_idx = 0; 2203 int insn_processed = 0; 2204 bool do_print_state = false; 2205 2206 init_reg_state(regs); 2207 insn_idx = 0; 2208 for (;;) { 2209 struct bpf_insn *insn; 2210 u8 class; 2211 int err; 2212 2213 if (insn_idx >= insn_cnt) { 2214 verbose("invalid insn idx %d insn_cnt %d\n", 2215 insn_idx, insn_cnt); 2216 return -EFAULT; 2217 } 2218 2219 insn = &insns[insn_idx]; 2220 class = BPF_CLASS(insn->code); 2221 2222 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 2223 verbose("BPF program is too large. Proccessed %d insn\n", 2224 insn_processed); 2225 return -E2BIG; 2226 } 2227 2228 err = is_state_visited(env, insn_idx); 2229 if (err < 0) 2230 return err; 2231 if (err == 1) { 2232 /* found equivalent state, can prune the search */ 2233 if (log_level) { 2234 if (do_print_state) 2235 verbose("\nfrom %d to %d: safe\n", 2236 prev_insn_idx, insn_idx); 2237 else 2238 verbose("%d: safe\n", insn_idx); 2239 } 2240 goto process_bpf_exit; 2241 } 2242 2243 if (log_level && do_print_state) { 2244 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 2245 print_verifier_state(&env->cur_state); 2246 do_print_state = false; 2247 } 2248 2249 if (log_level) { 2250 verbose("%d: ", insn_idx); 2251 print_bpf_insn(insn); 2252 } 2253 2254 if (class == BPF_ALU || class == BPF_ALU64) { 2255 err = check_alu_op(env, insn); 2256 if (err) 2257 return err; 2258 2259 } else if (class == BPF_LDX) { 2260 enum bpf_reg_type src_reg_type; 2261 2262 /* check for reserved fields is already done */ 2263 2264 /* check src operand */ 2265 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2266 if (err) 2267 return err; 2268 2269 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 2270 if (err) 2271 return err; 2272 2273 src_reg_type = regs[insn->src_reg].type; 2274 2275 /* check that memory (src_reg + off) is readable, 2276 * the state of dst_reg will be updated by this func 2277 */ 2278 err = check_mem_access(env, insn->src_reg, insn->off, 2279 BPF_SIZE(insn->code), BPF_READ, 2280 insn->dst_reg); 2281 if (err) 2282 return err; 2283 2284 if (BPF_SIZE(insn->code) != BPF_W) { 2285 insn_idx++; 2286 continue; 2287 } 2288 2289 if (insn->imm == 0) { 2290 /* saw a valid insn 2291 * dst_reg = *(u32 *)(src_reg + off) 2292 * use reserved 'imm' field to mark this insn 2293 */ 2294 insn->imm = src_reg_type; 2295 2296 } else if (src_reg_type != insn->imm && 2297 (src_reg_type == PTR_TO_CTX || 2298 insn->imm == PTR_TO_CTX)) { 2299 /* ABuser program is trying to use the same insn 2300 * dst_reg = *(u32*) (src_reg + off) 2301 * with different pointer types: 2302 * src_reg == ctx in one branch and 2303 * src_reg == stack|map in some other branch. 2304 * Reject it. 2305 */ 2306 verbose("same insn cannot be used with different pointers\n"); 2307 return -EINVAL; 2308 } 2309 2310 } else if (class == BPF_STX) { 2311 enum bpf_reg_type dst_reg_type; 2312 2313 if (BPF_MODE(insn->code) == BPF_XADD) { 2314 err = check_xadd(env, insn); 2315 if (err) 2316 return err; 2317 insn_idx++; 2318 continue; 2319 } 2320 2321 /* check src1 operand */ 2322 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2323 if (err) 2324 return err; 2325 /* check src2 operand */ 2326 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2327 if (err) 2328 return err; 2329 2330 dst_reg_type = regs[insn->dst_reg].type; 2331 2332 /* check that memory (dst_reg + off) is writeable */ 2333 err = check_mem_access(env, insn->dst_reg, insn->off, 2334 BPF_SIZE(insn->code), BPF_WRITE, 2335 insn->src_reg); 2336 if (err) 2337 return err; 2338 2339 if (insn->imm == 0) { 2340 insn->imm = dst_reg_type; 2341 } else if (dst_reg_type != insn->imm && 2342 (dst_reg_type == PTR_TO_CTX || 2343 insn->imm == PTR_TO_CTX)) { 2344 verbose("same insn cannot be used with different pointers\n"); 2345 return -EINVAL; 2346 } 2347 2348 } else if (class == BPF_ST) { 2349 if (BPF_MODE(insn->code) != BPF_MEM || 2350 insn->src_reg != BPF_REG_0) { 2351 verbose("BPF_ST uses reserved fields\n"); 2352 return -EINVAL; 2353 } 2354 /* check src operand */ 2355 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2356 if (err) 2357 return err; 2358 2359 /* check that memory (dst_reg + off) is writeable */ 2360 err = check_mem_access(env, insn->dst_reg, insn->off, 2361 BPF_SIZE(insn->code), BPF_WRITE, 2362 -1); 2363 if (err) 2364 return err; 2365 2366 } else if (class == BPF_JMP) { 2367 u8 opcode = BPF_OP(insn->code); 2368 2369 if (opcode == BPF_CALL) { 2370 if (BPF_SRC(insn->code) != BPF_K || 2371 insn->off != 0 || 2372 insn->src_reg != BPF_REG_0 || 2373 insn->dst_reg != BPF_REG_0) { 2374 verbose("BPF_CALL uses reserved fields\n"); 2375 return -EINVAL; 2376 } 2377 2378 err = check_call(env, insn->imm); 2379 if (err) 2380 return err; 2381 2382 } else if (opcode == BPF_JA) { 2383 if (BPF_SRC(insn->code) != BPF_K || 2384 insn->imm != 0 || 2385 insn->src_reg != BPF_REG_0 || 2386 insn->dst_reg != BPF_REG_0) { 2387 verbose("BPF_JA uses reserved fields\n"); 2388 return -EINVAL; 2389 } 2390 2391 insn_idx += insn->off + 1; 2392 continue; 2393 2394 } else if (opcode == BPF_EXIT) { 2395 if (BPF_SRC(insn->code) != BPF_K || 2396 insn->imm != 0 || 2397 insn->src_reg != BPF_REG_0 || 2398 insn->dst_reg != BPF_REG_0) { 2399 verbose("BPF_EXIT uses reserved fields\n"); 2400 return -EINVAL; 2401 } 2402 2403 /* eBPF calling convetion is such that R0 is used 2404 * to return the value from eBPF program. 2405 * Make sure that it's readable at this time 2406 * of bpf_exit, which means that program wrote 2407 * something into it earlier 2408 */ 2409 err = check_reg_arg(regs, BPF_REG_0, SRC_OP); 2410 if (err) 2411 return err; 2412 2413 if (is_pointer_value(env, BPF_REG_0)) { 2414 verbose("R0 leaks addr as return value\n"); 2415 return -EACCES; 2416 } 2417 2418 process_bpf_exit: 2419 insn_idx = pop_stack(env, &prev_insn_idx); 2420 if (insn_idx < 0) { 2421 break; 2422 } else { 2423 do_print_state = true; 2424 continue; 2425 } 2426 } else { 2427 err = check_cond_jmp_op(env, insn, &insn_idx); 2428 if (err) 2429 return err; 2430 } 2431 } else if (class == BPF_LD) { 2432 u8 mode = BPF_MODE(insn->code); 2433 2434 if (mode == BPF_ABS || mode == BPF_IND) { 2435 err = check_ld_abs(env, insn); 2436 if (err) 2437 return err; 2438 2439 } else if (mode == BPF_IMM) { 2440 err = check_ld_imm(env, insn); 2441 if (err) 2442 return err; 2443 2444 insn_idx++; 2445 } else { 2446 verbose("invalid BPF_LD mode\n"); 2447 return -EINVAL; 2448 } 2449 } else { 2450 verbose("unknown insn class %d\n", class); 2451 return -EINVAL; 2452 } 2453 2454 insn_idx++; 2455 } 2456 2457 verbose("processed %d insns\n", insn_processed); 2458 return 0; 2459 } 2460 2461 /* look for pseudo eBPF instructions that access map FDs and 2462 * replace them with actual map pointers 2463 */ 2464 static int replace_map_fd_with_map_ptr(struct verifier_env *env) 2465 { 2466 struct bpf_insn *insn = env->prog->insnsi; 2467 int insn_cnt = env->prog->len; 2468 int i, j; 2469 2470 for (i = 0; i < insn_cnt; i++, insn++) { 2471 if (BPF_CLASS(insn->code) == BPF_LDX && 2472 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 2473 verbose("BPF_LDX uses reserved fields\n"); 2474 return -EINVAL; 2475 } 2476 2477 if (BPF_CLASS(insn->code) == BPF_STX && 2478 ((BPF_MODE(insn->code) != BPF_MEM && 2479 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 2480 verbose("BPF_STX uses reserved fields\n"); 2481 return -EINVAL; 2482 } 2483 2484 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 2485 struct bpf_map *map; 2486 struct fd f; 2487 2488 if (i == insn_cnt - 1 || insn[1].code != 0 || 2489 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 2490 insn[1].off != 0) { 2491 verbose("invalid bpf_ld_imm64 insn\n"); 2492 return -EINVAL; 2493 } 2494 2495 if (insn->src_reg == 0) 2496 /* valid generic load 64-bit imm */ 2497 goto next_insn; 2498 2499 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 2500 verbose("unrecognized bpf_ld_imm64 insn\n"); 2501 return -EINVAL; 2502 } 2503 2504 f = fdget(insn->imm); 2505 map = __bpf_map_get(f); 2506 if (IS_ERR(map)) { 2507 verbose("fd %d is not pointing to valid bpf_map\n", 2508 insn->imm); 2509 return PTR_ERR(map); 2510 } 2511 2512 /* store map pointer inside BPF_LD_IMM64 instruction */ 2513 insn[0].imm = (u32) (unsigned long) map; 2514 insn[1].imm = ((u64) (unsigned long) map) >> 32; 2515 2516 /* check whether we recorded this map already */ 2517 for (j = 0; j < env->used_map_cnt; j++) 2518 if (env->used_maps[j] == map) { 2519 fdput(f); 2520 goto next_insn; 2521 } 2522 2523 if (env->used_map_cnt >= MAX_USED_MAPS) { 2524 fdput(f); 2525 return -E2BIG; 2526 } 2527 2528 /* hold the map. If the program is rejected by verifier, 2529 * the map will be released by release_maps() or it 2530 * will be used by the valid program until it's unloaded 2531 * and all maps are released in free_bpf_prog_info() 2532 */ 2533 map = bpf_map_inc(map, false); 2534 if (IS_ERR(map)) { 2535 fdput(f); 2536 return PTR_ERR(map); 2537 } 2538 env->used_maps[env->used_map_cnt++] = map; 2539 2540 fdput(f); 2541 next_insn: 2542 insn++; 2543 i++; 2544 } 2545 } 2546 2547 /* now all pseudo BPF_LD_IMM64 instructions load valid 2548 * 'struct bpf_map *' into a register instead of user map_fd. 2549 * These pointers will be used later by verifier to validate map access. 2550 */ 2551 return 0; 2552 } 2553 2554 /* drop refcnt of maps used by the rejected program */ 2555 static void release_maps(struct verifier_env *env) 2556 { 2557 int i; 2558 2559 for (i = 0; i < env->used_map_cnt; i++) 2560 bpf_map_put(env->used_maps[i]); 2561 } 2562 2563 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 2564 static void convert_pseudo_ld_imm64(struct verifier_env *env) 2565 { 2566 struct bpf_insn *insn = env->prog->insnsi; 2567 int insn_cnt = env->prog->len; 2568 int i; 2569 2570 for (i = 0; i < insn_cnt; i++, insn++) 2571 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 2572 insn->src_reg = 0; 2573 } 2574 2575 /* convert load instructions that access fields of 'struct __sk_buff' 2576 * into sequence of instructions that access fields of 'struct sk_buff' 2577 */ 2578 static int convert_ctx_accesses(struct verifier_env *env) 2579 { 2580 struct bpf_insn *insn = env->prog->insnsi; 2581 int insn_cnt = env->prog->len; 2582 struct bpf_insn insn_buf[16]; 2583 struct bpf_prog *new_prog; 2584 enum bpf_access_type type; 2585 int i; 2586 2587 if (!env->prog->aux->ops->convert_ctx_access) 2588 return 0; 2589 2590 for (i = 0; i < insn_cnt; i++, insn++) { 2591 u32 insn_delta, cnt; 2592 2593 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) 2594 type = BPF_READ; 2595 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) 2596 type = BPF_WRITE; 2597 else 2598 continue; 2599 2600 if (insn->imm != PTR_TO_CTX) { 2601 /* clear internal mark */ 2602 insn->imm = 0; 2603 continue; 2604 } 2605 2606 cnt = env->prog->aux->ops-> 2607 convert_ctx_access(type, insn->dst_reg, insn->src_reg, 2608 insn->off, insn_buf, env->prog); 2609 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 2610 verbose("bpf verifier is misconfigured\n"); 2611 return -EINVAL; 2612 } 2613 2614 new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt); 2615 if (!new_prog) 2616 return -ENOMEM; 2617 2618 insn_delta = cnt - 1; 2619 2620 /* keep walking new program and skip insns we just inserted */ 2621 env->prog = new_prog; 2622 insn = new_prog->insnsi + i + insn_delta; 2623 2624 insn_cnt += insn_delta; 2625 i += insn_delta; 2626 } 2627 2628 return 0; 2629 } 2630 2631 static void free_states(struct verifier_env *env) 2632 { 2633 struct verifier_state_list *sl, *sln; 2634 int i; 2635 2636 if (!env->explored_states) 2637 return; 2638 2639 for (i = 0; i < env->prog->len; i++) { 2640 sl = env->explored_states[i]; 2641 2642 if (sl) 2643 while (sl != STATE_LIST_MARK) { 2644 sln = sl->next; 2645 kfree(sl); 2646 sl = sln; 2647 } 2648 } 2649 2650 kfree(env->explored_states); 2651 } 2652 2653 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 2654 { 2655 char __user *log_ubuf = NULL; 2656 struct verifier_env *env; 2657 int ret = -EINVAL; 2658 2659 if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) 2660 return -E2BIG; 2661 2662 /* 'struct verifier_env' can be global, but since it's not small, 2663 * allocate/free it every time bpf_check() is called 2664 */ 2665 env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL); 2666 if (!env) 2667 return -ENOMEM; 2668 2669 env->prog = *prog; 2670 2671 /* grab the mutex to protect few globals used by verifier */ 2672 mutex_lock(&bpf_verifier_lock); 2673 2674 if (attr->log_level || attr->log_buf || attr->log_size) { 2675 /* user requested verbose verifier output 2676 * and supplied buffer to store the verification trace 2677 */ 2678 log_level = attr->log_level; 2679 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 2680 log_size = attr->log_size; 2681 log_len = 0; 2682 2683 ret = -EINVAL; 2684 /* log_* values have to be sane */ 2685 if (log_size < 128 || log_size > UINT_MAX >> 8 || 2686 log_level == 0 || log_ubuf == NULL) 2687 goto free_env; 2688 2689 ret = -ENOMEM; 2690 log_buf = vmalloc(log_size); 2691 if (!log_buf) 2692 goto free_env; 2693 } else { 2694 log_level = 0; 2695 } 2696 2697 ret = replace_map_fd_with_map_ptr(env); 2698 if (ret < 0) 2699 goto skip_full_check; 2700 2701 env->explored_states = kcalloc(env->prog->len, 2702 sizeof(struct verifier_state_list *), 2703 GFP_USER); 2704 ret = -ENOMEM; 2705 if (!env->explored_states) 2706 goto skip_full_check; 2707 2708 ret = check_cfg(env); 2709 if (ret < 0) 2710 goto skip_full_check; 2711 2712 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 2713 2714 ret = do_check(env); 2715 2716 skip_full_check: 2717 while (pop_stack(env, NULL) >= 0); 2718 free_states(env); 2719 2720 if (ret == 0) 2721 /* program is valid, convert *(u32*)(ctx + off) accesses */ 2722 ret = convert_ctx_accesses(env); 2723 2724 if (log_level && log_len >= log_size - 1) { 2725 BUG_ON(log_len >= log_size); 2726 /* verifier log exceeded user supplied buffer */ 2727 ret = -ENOSPC; 2728 /* fall through to return what was recorded */ 2729 } 2730 2731 /* copy verifier log back to user space including trailing zero */ 2732 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 2733 ret = -EFAULT; 2734 goto free_log_buf; 2735 } 2736 2737 if (ret == 0 && env->used_map_cnt) { 2738 /* if program passed verifier, update used_maps in bpf_prog_info */ 2739 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 2740 sizeof(env->used_maps[0]), 2741 GFP_KERNEL); 2742 2743 if (!env->prog->aux->used_maps) { 2744 ret = -ENOMEM; 2745 goto free_log_buf; 2746 } 2747 2748 memcpy(env->prog->aux->used_maps, env->used_maps, 2749 sizeof(env->used_maps[0]) * env->used_map_cnt); 2750 env->prog->aux->used_map_cnt = env->used_map_cnt; 2751 2752 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 2753 * bpf_ld_imm64 instructions 2754 */ 2755 convert_pseudo_ld_imm64(env); 2756 } 2757 2758 free_log_buf: 2759 if (log_level) 2760 vfree(log_buf); 2761 free_env: 2762 if (!env->prog->aux->used_maps) 2763 /* if we didn't copy map pointers into bpf_prog_info, release 2764 * them now. Otherwise free_bpf_prog_info() will release them. 2765 */ 2766 release_maps(env); 2767 *prog = env->prog; 2768 kfree(env); 2769 mutex_unlock(&bpf_verifier_lock); 2770 return ret; 2771 } 2772