1 /* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * 9 * Authors: 10 * 11 * Jay Schulist <jschlst@samba.org> 12 * Alexei Starovoitov <ast@plumgrid.com> 13 * Daniel Borkmann <dborkman@redhat.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * Andi Kleen - Fix a few bad bugs and races. 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 22 */ 23 24 #include <linux/filter.h> 25 #include <linux/skbuff.h> 26 #include <linux/vmalloc.h> 27 #include <linux/random.h> 28 #include <linux/moduleloader.h> 29 #include <linux/bpf.h> 30 #include <linux/frame.h> 31 #include <linux/rbtree_latch.h> 32 #include <linux/kallsyms.h> 33 #include <linux/rcupdate.h> 34 #include <linux/perf_event.h> 35 36 #include <asm/unaligned.h> 37 38 /* Registers */ 39 #define BPF_R0 regs[BPF_REG_0] 40 #define BPF_R1 regs[BPF_REG_1] 41 #define BPF_R2 regs[BPF_REG_2] 42 #define BPF_R3 regs[BPF_REG_3] 43 #define BPF_R4 regs[BPF_REG_4] 44 #define BPF_R5 regs[BPF_REG_5] 45 #define BPF_R6 regs[BPF_REG_6] 46 #define BPF_R7 regs[BPF_REG_7] 47 #define BPF_R8 regs[BPF_REG_8] 48 #define BPF_R9 regs[BPF_REG_9] 49 #define BPF_R10 regs[BPF_REG_10] 50 51 /* Named registers */ 52 #define DST regs[insn->dst_reg] 53 #define SRC regs[insn->src_reg] 54 #define FP regs[BPF_REG_FP] 55 #define ARG1 regs[BPF_REG_ARG1] 56 #define CTX regs[BPF_REG_CTX] 57 #define IMM insn->imm 58 59 /* No hurry in this branch 60 * 61 * Exported for the bpf jit load helper. 62 */ 63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 64 { 65 u8 *ptr = NULL; 66 67 if (k >= SKF_NET_OFF) 68 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 69 else if (k >= SKF_LL_OFF) 70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 71 72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 73 return ptr; 74 75 return NULL; 76 } 77 78 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 79 { 80 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 81 struct bpf_prog_aux *aux; 82 struct bpf_prog *fp; 83 84 size = round_up(size, PAGE_SIZE); 85 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 86 if (fp == NULL) 87 return NULL; 88 89 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); 90 if (aux == NULL) { 91 vfree(fp); 92 return NULL; 93 } 94 95 fp->pages = size / PAGE_SIZE; 96 fp->aux = aux; 97 fp->aux->prog = fp; 98 fp->jit_requested = ebpf_jit_enabled(); 99 100 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); 101 102 return fp; 103 } 104 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 105 106 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 107 gfp_t gfp_extra_flags) 108 { 109 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 110 struct bpf_prog *fp; 111 u32 pages, delta; 112 int ret; 113 114 BUG_ON(fp_old == NULL); 115 116 size = round_up(size, PAGE_SIZE); 117 pages = size / PAGE_SIZE; 118 if (pages <= fp_old->pages) 119 return fp_old; 120 121 delta = pages - fp_old->pages; 122 ret = __bpf_prog_charge(fp_old->aux->user, delta); 123 if (ret) 124 return NULL; 125 126 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 127 if (fp == NULL) { 128 __bpf_prog_uncharge(fp_old->aux->user, delta); 129 } else { 130 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 131 fp->pages = pages; 132 fp->aux->prog = fp; 133 134 /* We keep fp->aux from fp_old around in the new 135 * reallocated structure. 136 */ 137 fp_old->aux = NULL; 138 __bpf_prog_free(fp_old); 139 } 140 141 return fp; 142 } 143 144 void __bpf_prog_free(struct bpf_prog *fp) 145 { 146 kfree(fp->aux); 147 vfree(fp); 148 } 149 150 int bpf_prog_calc_tag(struct bpf_prog *fp) 151 { 152 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 153 u32 raw_size = bpf_prog_tag_scratch_size(fp); 154 u32 digest[SHA_DIGEST_WORDS]; 155 u32 ws[SHA_WORKSPACE_WORDS]; 156 u32 i, bsize, psize, blocks; 157 struct bpf_insn *dst; 158 bool was_ld_map; 159 u8 *raw, *todo; 160 __be32 *result; 161 __be64 *bits; 162 163 raw = vmalloc(raw_size); 164 if (!raw) 165 return -ENOMEM; 166 167 sha_init(digest); 168 memset(ws, 0, sizeof(ws)); 169 170 /* We need to take out the map fd for the digest calculation 171 * since they are unstable from user space side. 172 */ 173 dst = (void *)raw; 174 for (i = 0, was_ld_map = false; i < fp->len; i++) { 175 dst[i] = fp->insnsi[i]; 176 if (!was_ld_map && 177 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 178 dst[i].src_reg == BPF_PSEUDO_MAP_FD) { 179 was_ld_map = true; 180 dst[i].imm = 0; 181 } else if (was_ld_map && 182 dst[i].code == 0 && 183 dst[i].dst_reg == 0 && 184 dst[i].src_reg == 0 && 185 dst[i].off == 0) { 186 was_ld_map = false; 187 dst[i].imm = 0; 188 } else { 189 was_ld_map = false; 190 } 191 } 192 193 psize = bpf_prog_insn_size(fp); 194 memset(&raw[psize], 0, raw_size - psize); 195 raw[psize++] = 0x80; 196 197 bsize = round_up(psize, SHA_MESSAGE_BYTES); 198 blocks = bsize / SHA_MESSAGE_BYTES; 199 todo = raw; 200 if (bsize - psize >= sizeof(__be64)) { 201 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 202 } else { 203 bits = (__be64 *)(todo + bsize + bits_offset); 204 blocks++; 205 } 206 *bits = cpu_to_be64((psize - 1) << 3); 207 208 while (blocks--) { 209 sha_transform(digest, todo, ws); 210 todo += SHA_MESSAGE_BYTES; 211 } 212 213 result = (__force __be32 *)digest; 214 for (i = 0; i < SHA_DIGEST_WORDS; i++) 215 result[i] = cpu_to_be32(digest[i]); 216 memcpy(fp->tag, result, sizeof(fp->tag)); 217 218 vfree(raw); 219 return 0; 220 } 221 222 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta, 223 u32 curr, const bool probe_pass) 224 { 225 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 226 s64 imm = insn->imm; 227 228 if (curr < pos && curr + imm + 1 > pos) 229 imm += delta; 230 else if (curr > pos + delta && curr + imm + 1 <= pos + delta) 231 imm -= delta; 232 if (imm < imm_min || imm > imm_max) 233 return -ERANGE; 234 if (!probe_pass) 235 insn->imm = imm; 236 return 0; 237 } 238 239 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, 240 u32 curr, const bool probe_pass) 241 { 242 const s32 off_min = S16_MIN, off_max = S16_MAX; 243 s32 off = insn->off; 244 245 if (curr < pos && curr + off + 1 > pos) 246 off += delta; 247 else if (curr > pos + delta && curr + off + 1 <= pos + delta) 248 off -= delta; 249 if (off < off_min || off > off_max) 250 return -ERANGE; 251 if (!probe_pass) 252 insn->off = off; 253 return 0; 254 } 255 256 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, 257 const bool probe_pass) 258 { 259 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0); 260 struct bpf_insn *insn = prog->insnsi; 261 int ret = 0; 262 263 for (i = 0; i < insn_cnt; i++, insn++) { 264 u8 code; 265 266 /* In the probing pass we still operate on the original, 267 * unpatched image in order to check overflows before we 268 * do any other adjustments. Therefore skip the patchlet. 269 */ 270 if (probe_pass && i == pos) { 271 i += delta + 1; 272 insn++; 273 } 274 code = insn->code; 275 if (BPF_CLASS(code) != BPF_JMP || 276 BPF_OP(code) == BPF_EXIT) 277 continue; 278 /* Adjust offset of jmps if we cross patch boundaries. */ 279 if (BPF_OP(code) == BPF_CALL) { 280 if (insn->src_reg != BPF_PSEUDO_CALL) 281 continue; 282 ret = bpf_adj_delta_to_imm(insn, pos, delta, i, 283 probe_pass); 284 } else { 285 ret = bpf_adj_delta_to_off(insn, pos, delta, i, 286 probe_pass); 287 } 288 if (ret) 289 break; 290 } 291 292 return ret; 293 } 294 295 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 296 const struct bpf_insn *patch, u32 len) 297 { 298 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 299 const u32 cnt_max = S16_MAX; 300 struct bpf_prog *prog_adj; 301 302 /* Since our patchlet doesn't expand the image, we're done. */ 303 if (insn_delta == 0) { 304 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 305 return prog; 306 } 307 308 insn_adj_cnt = prog->len + insn_delta; 309 310 /* Reject anything that would potentially let the insn->off 311 * target overflow when we have excessive program expansions. 312 * We need to probe here before we do any reallocation where 313 * we afterwards may not fail anymore. 314 */ 315 if (insn_adj_cnt > cnt_max && 316 bpf_adj_branches(prog, off, insn_delta, true)) 317 return NULL; 318 319 /* Several new instructions need to be inserted. Make room 320 * for them. Likely, there's no need for a new allocation as 321 * last page could have large enough tailroom. 322 */ 323 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 324 GFP_USER); 325 if (!prog_adj) 326 return NULL; 327 328 prog_adj->len = insn_adj_cnt; 329 330 /* Patching happens in 3 steps: 331 * 332 * 1) Move over tail of insnsi from next instruction onwards, 333 * so we can patch the single target insn with one or more 334 * new ones (patching is always from 1 to n insns, n > 0). 335 * 2) Inject new instructions at the target location. 336 * 3) Adjust branch offsets if necessary. 337 */ 338 insn_rest = insn_adj_cnt - off - len; 339 340 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 341 sizeof(*patch) * insn_rest); 342 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 343 344 /* We are guaranteed to not fail at this point, otherwise 345 * the ship has sailed to reverse to the original state. An 346 * overflow cannot happen at this point. 347 */ 348 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false)); 349 350 return prog_adj; 351 } 352 353 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 354 { 355 int i; 356 357 for (i = 0; i < fp->aux->func_cnt; i++) 358 bpf_prog_kallsyms_del(fp->aux->func[i]); 359 } 360 361 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 362 { 363 bpf_prog_kallsyms_del_subprogs(fp); 364 bpf_prog_kallsyms_del(fp); 365 } 366 367 #ifdef CONFIG_BPF_JIT 368 # define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000) 369 370 /* All BPF JIT sysctl knobs here. */ 371 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); 372 int bpf_jit_harden __read_mostly; 373 int bpf_jit_kallsyms __read_mostly; 374 int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT; 375 376 static __always_inline void 377 bpf_get_prog_addr_region(const struct bpf_prog *prog, 378 unsigned long *symbol_start, 379 unsigned long *symbol_end) 380 { 381 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 382 unsigned long addr = (unsigned long)hdr; 383 384 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 385 386 *symbol_start = addr; 387 *symbol_end = addr + hdr->pages * PAGE_SIZE; 388 } 389 390 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 391 { 392 const char *end = sym + KSYM_NAME_LEN; 393 394 BUILD_BUG_ON(sizeof("bpf_prog_") + 395 sizeof(prog->tag) * 2 + 396 /* name has been null terminated. 397 * We should need +1 for the '_' preceding 398 * the name. However, the null character 399 * is double counted between the name and the 400 * sizeof("bpf_prog_") above, so we omit 401 * the +1 here. 402 */ 403 sizeof(prog->aux->name) > KSYM_NAME_LEN); 404 405 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 406 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 407 if (prog->aux->name[0]) 408 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 409 else 410 *sym = 0; 411 } 412 413 static __always_inline unsigned long 414 bpf_get_prog_addr_start(struct latch_tree_node *n) 415 { 416 unsigned long symbol_start, symbol_end; 417 const struct bpf_prog_aux *aux; 418 419 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 420 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 421 422 return symbol_start; 423 } 424 425 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 426 struct latch_tree_node *b) 427 { 428 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); 429 } 430 431 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 432 { 433 unsigned long val = (unsigned long)key; 434 unsigned long symbol_start, symbol_end; 435 const struct bpf_prog_aux *aux; 436 437 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 438 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 439 440 if (val < symbol_start) 441 return -1; 442 if (val >= symbol_end) 443 return 1; 444 445 return 0; 446 } 447 448 static const struct latch_tree_ops bpf_tree_ops = { 449 .less = bpf_tree_less, 450 .comp = bpf_tree_comp, 451 }; 452 453 static DEFINE_SPINLOCK(bpf_lock); 454 static LIST_HEAD(bpf_kallsyms); 455 static struct latch_tree_root bpf_tree __cacheline_aligned; 456 457 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) 458 { 459 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); 460 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); 461 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 462 } 463 464 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) 465 { 466 if (list_empty(&aux->ksym_lnode)) 467 return; 468 469 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 470 list_del_rcu(&aux->ksym_lnode); 471 } 472 473 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 474 { 475 return fp->jited && !bpf_prog_was_classic(fp); 476 } 477 478 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 479 { 480 return list_empty(&fp->aux->ksym_lnode) || 481 fp->aux->ksym_lnode.prev == LIST_POISON2; 482 } 483 484 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 485 { 486 if (!bpf_prog_kallsyms_candidate(fp) || 487 !capable(CAP_SYS_ADMIN)) 488 return; 489 490 spin_lock_bh(&bpf_lock); 491 bpf_prog_ksym_node_add(fp->aux); 492 spin_unlock_bh(&bpf_lock); 493 } 494 495 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 496 { 497 if (!bpf_prog_kallsyms_candidate(fp)) 498 return; 499 500 spin_lock_bh(&bpf_lock); 501 bpf_prog_ksym_node_del(fp->aux); 502 spin_unlock_bh(&bpf_lock); 503 } 504 505 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) 506 { 507 struct latch_tree_node *n; 508 509 if (!bpf_jit_kallsyms_enabled()) 510 return NULL; 511 512 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 513 return n ? 514 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : 515 NULL; 516 } 517 518 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 519 unsigned long *off, char *sym) 520 { 521 unsigned long symbol_start, symbol_end; 522 struct bpf_prog *prog; 523 char *ret = NULL; 524 525 rcu_read_lock(); 526 prog = bpf_prog_kallsyms_find(addr); 527 if (prog) { 528 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); 529 bpf_get_prog_name(prog, sym); 530 531 ret = sym; 532 if (size) 533 *size = symbol_end - symbol_start; 534 if (off) 535 *off = addr - symbol_start; 536 } 537 rcu_read_unlock(); 538 539 return ret; 540 } 541 542 bool is_bpf_text_address(unsigned long addr) 543 { 544 bool ret; 545 546 rcu_read_lock(); 547 ret = bpf_prog_kallsyms_find(addr) != NULL; 548 rcu_read_unlock(); 549 550 return ret; 551 } 552 553 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 554 char *sym) 555 { 556 struct bpf_prog_aux *aux; 557 unsigned int it = 0; 558 int ret = -ERANGE; 559 560 if (!bpf_jit_kallsyms_enabled()) 561 return ret; 562 563 rcu_read_lock(); 564 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { 565 if (it++ != symnum) 566 continue; 567 568 bpf_get_prog_name(aux->prog, sym); 569 570 *value = (unsigned long)aux->prog->bpf_func; 571 *type = BPF_SYM_ELF_TYPE; 572 573 ret = 0; 574 break; 575 } 576 rcu_read_unlock(); 577 578 return ret; 579 } 580 581 static atomic_long_t bpf_jit_current; 582 583 #if defined(MODULES_VADDR) 584 static int __init bpf_jit_charge_init(void) 585 { 586 /* Only used as heuristic here to derive limit. */ 587 bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2, 588 PAGE_SIZE), INT_MAX); 589 return 0; 590 } 591 pure_initcall(bpf_jit_charge_init); 592 #endif 593 594 static int bpf_jit_charge_modmem(u32 pages) 595 { 596 if (atomic_long_add_return(pages, &bpf_jit_current) > 597 (bpf_jit_limit >> PAGE_SHIFT)) { 598 if (!capable(CAP_SYS_ADMIN)) { 599 atomic_long_sub(pages, &bpf_jit_current); 600 return -EPERM; 601 } 602 } 603 604 return 0; 605 } 606 607 static void bpf_jit_uncharge_modmem(u32 pages) 608 { 609 atomic_long_sub(pages, &bpf_jit_current); 610 } 611 612 struct bpf_binary_header * 613 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 614 unsigned int alignment, 615 bpf_jit_fill_hole_t bpf_fill_ill_insns) 616 { 617 struct bpf_binary_header *hdr; 618 u32 size, hole, start, pages; 619 620 /* Most of BPF filters are really small, but if some of them 621 * fill a page, allow at least 128 extra bytes to insert a 622 * random section of illegal instructions. 623 */ 624 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 625 pages = size / PAGE_SIZE; 626 627 if (bpf_jit_charge_modmem(pages)) 628 return NULL; 629 hdr = module_alloc(size); 630 if (!hdr) { 631 bpf_jit_uncharge_modmem(pages); 632 return NULL; 633 } 634 635 /* Fill space with illegal/arch-dep instructions. */ 636 bpf_fill_ill_insns(hdr, size); 637 638 hdr->pages = pages; 639 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 640 PAGE_SIZE - sizeof(*hdr)); 641 start = (get_random_int() % hole) & ~(alignment - 1); 642 643 /* Leave a random number of instructions before BPF code. */ 644 *image_ptr = &hdr->image[start]; 645 646 return hdr; 647 } 648 649 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 650 { 651 u32 pages = hdr->pages; 652 653 module_memfree(hdr); 654 bpf_jit_uncharge_modmem(pages); 655 } 656 657 /* This symbol is only overridden by archs that have different 658 * requirements than the usual eBPF JITs, f.e. when they only 659 * implement cBPF JIT, do not set images read-only, etc. 660 */ 661 void __weak bpf_jit_free(struct bpf_prog *fp) 662 { 663 if (fp->jited) { 664 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 665 666 bpf_jit_binary_unlock_ro(hdr); 667 bpf_jit_binary_free(hdr); 668 669 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 670 } 671 672 bpf_prog_unlock_free(fp); 673 } 674 675 static int bpf_jit_blind_insn(const struct bpf_insn *from, 676 const struct bpf_insn *aux, 677 struct bpf_insn *to_buff) 678 { 679 struct bpf_insn *to = to_buff; 680 u32 imm_rnd = get_random_int(); 681 s16 off; 682 683 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 684 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 685 686 if (from->imm == 0 && 687 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 688 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 689 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 690 goto out; 691 } 692 693 switch (from->code) { 694 case BPF_ALU | BPF_ADD | BPF_K: 695 case BPF_ALU | BPF_SUB | BPF_K: 696 case BPF_ALU | BPF_AND | BPF_K: 697 case BPF_ALU | BPF_OR | BPF_K: 698 case BPF_ALU | BPF_XOR | BPF_K: 699 case BPF_ALU | BPF_MUL | BPF_K: 700 case BPF_ALU | BPF_MOV | BPF_K: 701 case BPF_ALU | BPF_DIV | BPF_K: 702 case BPF_ALU | BPF_MOD | BPF_K: 703 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 704 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 705 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 706 break; 707 708 case BPF_ALU64 | BPF_ADD | BPF_K: 709 case BPF_ALU64 | BPF_SUB | BPF_K: 710 case BPF_ALU64 | BPF_AND | BPF_K: 711 case BPF_ALU64 | BPF_OR | BPF_K: 712 case BPF_ALU64 | BPF_XOR | BPF_K: 713 case BPF_ALU64 | BPF_MUL | BPF_K: 714 case BPF_ALU64 | BPF_MOV | BPF_K: 715 case BPF_ALU64 | BPF_DIV | BPF_K: 716 case BPF_ALU64 | BPF_MOD | BPF_K: 717 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 718 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 719 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 720 break; 721 722 case BPF_JMP | BPF_JEQ | BPF_K: 723 case BPF_JMP | BPF_JNE | BPF_K: 724 case BPF_JMP | BPF_JGT | BPF_K: 725 case BPF_JMP | BPF_JLT | BPF_K: 726 case BPF_JMP | BPF_JGE | BPF_K: 727 case BPF_JMP | BPF_JLE | BPF_K: 728 case BPF_JMP | BPF_JSGT | BPF_K: 729 case BPF_JMP | BPF_JSLT | BPF_K: 730 case BPF_JMP | BPF_JSGE | BPF_K: 731 case BPF_JMP | BPF_JSLE | BPF_K: 732 case BPF_JMP | BPF_JSET | BPF_K: 733 /* Accommodate for extra offset in case of a backjump. */ 734 off = from->off; 735 if (off < 0) 736 off -= 2; 737 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 738 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 739 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 740 break; 741 742 case BPF_LD | BPF_IMM | BPF_DW: 743 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 744 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 745 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 746 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 747 break; 748 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 749 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 750 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 751 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 752 break; 753 754 case BPF_ST | BPF_MEM | BPF_DW: 755 case BPF_ST | BPF_MEM | BPF_W: 756 case BPF_ST | BPF_MEM | BPF_H: 757 case BPF_ST | BPF_MEM | BPF_B: 758 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 759 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 760 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 761 break; 762 } 763 out: 764 return to - to_buff; 765 } 766 767 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 768 gfp_t gfp_extra_flags) 769 { 770 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 771 struct bpf_prog *fp; 772 773 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 774 if (fp != NULL) { 775 /* aux->prog still points to the fp_other one, so 776 * when promoting the clone to the real program, 777 * this still needs to be adapted. 778 */ 779 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 780 } 781 782 return fp; 783 } 784 785 static void bpf_prog_clone_free(struct bpf_prog *fp) 786 { 787 /* aux was stolen by the other clone, so we cannot free 788 * it from this path! It will be freed eventually by the 789 * other program on release. 790 * 791 * At this point, we don't need a deferred release since 792 * clone is guaranteed to not be locked. 793 */ 794 fp->aux = NULL; 795 __bpf_prog_free(fp); 796 } 797 798 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 799 { 800 /* We have to repoint aux->prog to self, as we don't 801 * know whether fp here is the clone or the original. 802 */ 803 fp->aux->prog = fp; 804 bpf_prog_clone_free(fp_other); 805 } 806 807 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 808 { 809 struct bpf_insn insn_buff[16], aux[2]; 810 struct bpf_prog *clone, *tmp; 811 int insn_delta, insn_cnt; 812 struct bpf_insn *insn; 813 int i, rewritten; 814 815 if (!bpf_jit_blinding_enabled(prog) || prog->blinded) 816 return prog; 817 818 clone = bpf_prog_clone_create(prog, GFP_USER); 819 if (!clone) 820 return ERR_PTR(-ENOMEM); 821 822 insn_cnt = clone->len; 823 insn = clone->insnsi; 824 825 for (i = 0; i < insn_cnt; i++, insn++) { 826 /* We temporarily need to hold the original ld64 insn 827 * so that we can still access the first part in the 828 * second blinding run. 829 */ 830 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 831 insn[1].code == 0) 832 memcpy(aux, insn, sizeof(aux)); 833 834 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 835 if (!rewritten) 836 continue; 837 838 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 839 if (!tmp) { 840 /* Patching may have repointed aux->prog during 841 * realloc from the original one, so we need to 842 * fix it up here on error. 843 */ 844 bpf_jit_prog_release_other(prog, clone); 845 return ERR_PTR(-ENOMEM); 846 } 847 848 clone = tmp; 849 insn_delta = rewritten - 1; 850 851 /* Walk new program and skip insns we just inserted. */ 852 insn = clone->insnsi + i + insn_delta; 853 insn_cnt += insn_delta; 854 i += insn_delta; 855 } 856 857 clone->blinded = 1; 858 return clone; 859 } 860 #endif /* CONFIG_BPF_JIT */ 861 862 /* Base function for offset calculation. Needs to go into .text section, 863 * therefore keeping it non-static as well; will also be used by JITs 864 * anyway later on, so do not let the compiler omit it. This also needs 865 * to go into kallsyms for correlation from e.g. bpftool, so naming 866 * must not change. 867 */ 868 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 869 { 870 return 0; 871 } 872 EXPORT_SYMBOL_GPL(__bpf_call_base); 873 874 /* All UAPI available opcodes. */ 875 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 876 /* 32 bit ALU operations. */ \ 877 /* Register based. */ \ 878 INSN_3(ALU, ADD, X), \ 879 INSN_3(ALU, SUB, X), \ 880 INSN_3(ALU, AND, X), \ 881 INSN_3(ALU, OR, X), \ 882 INSN_3(ALU, LSH, X), \ 883 INSN_3(ALU, RSH, X), \ 884 INSN_3(ALU, XOR, X), \ 885 INSN_3(ALU, MUL, X), \ 886 INSN_3(ALU, MOV, X), \ 887 INSN_3(ALU, DIV, X), \ 888 INSN_3(ALU, MOD, X), \ 889 INSN_2(ALU, NEG), \ 890 INSN_3(ALU, END, TO_BE), \ 891 INSN_3(ALU, END, TO_LE), \ 892 /* Immediate based. */ \ 893 INSN_3(ALU, ADD, K), \ 894 INSN_3(ALU, SUB, K), \ 895 INSN_3(ALU, AND, K), \ 896 INSN_3(ALU, OR, K), \ 897 INSN_3(ALU, LSH, K), \ 898 INSN_3(ALU, RSH, K), \ 899 INSN_3(ALU, XOR, K), \ 900 INSN_3(ALU, MUL, K), \ 901 INSN_3(ALU, MOV, K), \ 902 INSN_3(ALU, DIV, K), \ 903 INSN_3(ALU, MOD, K), \ 904 /* 64 bit ALU operations. */ \ 905 /* Register based. */ \ 906 INSN_3(ALU64, ADD, X), \ 907 INSN_3(ALU64, SUB, X), \ 908 INSN_3(ALU64, AND, X), \ 909 INSN_3(ALU64, OR, X), \ 910 INSN_3(ALU64, LSH, X), \ 911 INSN_3(ALU64, RSH, X), \ 912 INSN_3(ALU64, XOR, X), \ 913 INSN_3(ALU64, MUL, X), \ 914 INSN_3(ALU64, MOV, X), \ 915 INSN_3(ALU64, ARSH, X), \ 916 INSN_3(ALU64, DIV, X), \ 917 INSN_3(ALU64, MOD, X), \ 918 INSN_2(ALU64, NEG), \ 919 /* Immediate based. */ \ 920 INSN_3(ALU64, ADD, K), \ 921 INSN_3(ALU64, SUB, K), \ 922 INSN_3(ALU64, AND, K), \ 923 INSN_3(ALU64, OR, K), \ 924 INSN_3(ALU64, LSH, K), \ 925 INSN_3(ALU64, RSH, K), \ 926 INSN_3(ALU64, XOR, K), \ 927 INSN_3(ALU64, MUL, K), \ 928 INSN_3(ALU64, MOV, K), \ 929 INSN_3(ALU64, ARSH, K), \ 930 INSN_3(ALU64, DIV, K), \ 931 INSN_3(ALU64, MOD, K), \ 932 /* Call instruction. */ \ 933 INSN_2(JMP, CALL), \ 934 /* Exit instruction. */ \ 935 INSN_2(JMP, EXIT), \ 936 /* Jump instructions. */ \ 937 /* Register based. */ \ 938 INSN_3(JMP, JEQ, X), \ 939 INSN_3(JMP, JNE, X), \ 940 INSN_3(JMP, JGT, X), \ 941 INSN_3(JMP, JLT, X), \ 942 INSN_3(JMP, JGE, X), \ 943 INSN_3(JMP, JLE, X), \ 944 INSN_3(JMP, JSGT, X), \ 945 INSN_3(JMP, JSLT, X), \ 946 INSN_3(JMP, JSGE, X), \ 947 INSN_3(JMP, JSLE, X), \ 948 INSN_3(JMP, JSET, X), \ 949 /* Immediate based. */ \ 950 INSN_3(JMP, JEQ, K), \ 951 INSN_3(JMP, JNE, K), \ 952 INSN_3(JMP, JGT, K), \ 953 INSN_3(JMP, JLT, K), \ 954 INSN_3(JMP, JGE, K), \ 955 INSN_3(JMP, JLE, K), \ 956 INSN_3(JMP, JSGT, K), \ 957 INSN_3(JMP, JSLT, K), \ 958 INSN_3(JMP, JSGE, K), \ 959 INSN_3(JMP, JSLE, K), \ 960 INSN_3(JMP, JSET, K), \ 961 INSN_2(JMP, JA), \ 962 /* Store instructions. */ \ 963 /* Register based. */ \ 964 INSN_3(STX, MEM, B), \ 965 INSN_3(STX, MEM, H), \ 966 INSN_3(STX, MEM, W), \ 967 INSN_3(STX, MEM, DW), \ 968 INSN_3(STX, XADD, W), \ 969 INSN_3(STX, XADD, DW), \ 970 /* Immediate based. */ \ 971 INSN_3(ST, MEM, B), \ 972 INSN_3(ST, MEM, H), \ 973 INSN_3(ST, MEM, W), \ 974 INSN_3(ST, MEM, DW), \ 975 /* Load instructions. */ \ 976 /* Register based. */ \ 977 INSN_3(LDX, MEM, B), \ 978 INSN_3(LDX, MEM, H), \ 979 INSN_3(LDX, MEM, W), \ 980 INSN_3(LDX, MEM, DW), \ 981 /* Immediate based. */ \ 982 INSN_3(LD, IMM, DW) 983 984 bool bpf_opcode_in_insntable(u8 code) 985 { 986 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 987 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 988 static const bool public_insntable[256] = { 989 [0 ... 255] = false, 990 /* Now overwrite non-defaults ... */ 991 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 992 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 993 [BPF_LD | BPF_ABS | BPF_B] = true, 994 [BPF_LD | BPF_ABS | BPF_H] = true, 995 [BPF_LD | BPF_ABS | BPF_W] = true, 996 [BPF_LD | BPF_IND | BPF_B] = true, 997 [BPF_LD | BPF_IND | BPF_H] = true, 998 [BPF_LD | BPF_IND | BPF_W] = true, 999 }; 1000 #undef BPF_INSN_3_TBL 1001 #undef BPF_INSN_2_TBL 1002 return public_insntable[code]; 1003 } 1004 1005 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1006 /** 1007 * __bpf_prog_run - run eBPF program on a given context 1008 * @ctx: is the data we are operating on 1009 * @insn: is the array of eBPF instructions 1010 * 1011 * Decode and execute eBPF instructions. 1012 */ 1013 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) 1014 { 1015 u64 tmp; 1016 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1017 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1018 static const void *jumptable[256] = { 1019 [0 ... 255] = &&default_label, 1020 /* Now overwrite non-defaults ... */ 1021 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1022 /* Non-UAPI available opcodes. */ 1023 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1024 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1025 }; 1026 #undef BPF_INSN_3_LBL 1027 #undef BPF_INSN_2_LBL 1028 u32 tail_call_cnt = 0; 1029 1030 #define CONT ({ insn++; goto select_insn; }) 1031 #define CONT_JMP ({ insn++; goto select_insn; }) 1032 1033 select_insn: 1034 goto *jumptable[insn->code]; 1035 1036 /* ALU */ 1037 #define ALU(OPCODE, OP) \ 1038 ALU64_##OPCODE##_X: \ 1039 DST = DST OP SRC; \ 1040 CONT; \ 1041 ALU_##OPCODE##_X: \ 1042 DST = (u32) DST OP (u32) SRC; \ 1043 CONT; \ 1044 ALU64_##OPCODE##_K: \ 1045 DST = DST OP IMM; \ 1046 CONT; \ 1047 ALU_##OPCODE##_K: \ 1048 DST = (u32) DST OP (u32) IMM; \ 1049 CONT; 1050 1051 ALU(ADD, +) 1052 ALU(SUB, -) 1053 ALU(AND, &) 1054 ALU(OR, |) 1055 ALU(LSH, <<) 1056 ALU(RSH, >>) 1057 ALU(XOR, ^) 1058 ALU(MUL, *) 1059 #undef ALU 1060 ALU_NEG: 1061 DST = (u32) -DST; 1062 CONT; 1063 ALU64_NEG: 1064 DST = -DST; 1065 CONT; 1066 ALU_MOV_X: 1067 DST = (u32) SRC; 1068 CONT; 1069 ALU_MOV_K: 1070 DST = (u32) IMM; 1071 CONT; 1072 ALU64_MOV_X: 1073 DST = SRC; 1074 CONT; 1075 ALU64_MOV_K: 1076 DST = IMM; 1077 CONT; 1078 LD_IMM_DW: 1079 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1080 insn++; 1081 CONT; 1082 ALU64_ARSH_X: 1083 (*(s64 *) &DST) >>= SRC; 1084 CONT; 1085 ALU64_ARSH_K: 1086 (*(s64 *) &DST) >>= IMM; 1087 CONT; 1088 ALU64_MOD_X: 1089 div64_u64_rem(DST, SRC, &tmp); 1090 DST = tmp; 1091 CONT; 1092 ALU_MOD_X: 1093 tmp = (u32) DST; 1094 DST = do_div(tmp, (u32) SRC); 1095 CONT; 1096 ALU64_MOD_K: 1097 div64_u64_rem(DST, IMM, &tmp); 1098 DST = tmp; 1099 CONT; 1100 ALU_MOD_K: 1101 tmp = (u32) DST; 1102 DST = do_div(tmp, (u32) IMM); 1103 CONT; 1104 ALU64_DIV_X: 1105 DST = div64_u64(DST, SRC); 1106 CONT; 1107 ALU_DIV_X: 1108 tmp = (u32) DST; 1109 do_div(tmp, (u32) SRC); 1110 DST = (u32) tmp; 1111 CONT; 1112 ALU64_DIV_K: 1113 DST = div64_u64(DST, IMM); 1114 CONT; 1115 ALU_DIV_K: 1116 tmp = (u32) DST; 1117 do_div(tmp, (u32) IMM); 1118 DST = (u32) tmp; 1119 CONT; 1120 ALU_END_TO_BE: 1121 switch (IMM) { 1122 case 16: 1123 DST = (__force u16) cpu_to_be16(DST); 1124 break; 1125 case 32: 1126 DST = (__force u32) cpu_to_be32(DST); 1127 break; 1128 case 64: 1129 DST = (__force u64) cpu_to_be64(DST); 1130 break; 1131 } 1132 CONT; 1133 ALU_END_TO_LE: 1134 switch (IMM) { 1135 case 16: 1136 DST = (__force u16) cpu_to_le16(DST); 1137 break; 1138 case 32: 1139 DST = (__force u32) cpu_to_le32(DST); 1140 break; 1141 case 64: 1142 DST = (__force u64) cpu_to_le64(DST); 1143 break; 1144 } 1145 CONT; 1146 1147 /* CALL */ 1148 JMP_CALL: 1149 /* Function call scratches BPF_R1-BPF_R5 registers, 1150 * preserves BPF_R6-BPF_R9, and stores return value 1151 * into BPF_R0. 1152 */ 1153 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1154 BPF_R4, BPF_R5); 1155 CONT; 1156 1157 JMP_CALL_ARGS: 1158 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1159 BPF_R3, BPF_R4, 1160 BPF_R5, 1161 insn + insn->off + 1); 1162 CONT; 1163 1164 JMP_TAIL_CALL: { 1165 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1166 struct bpf_array *array = container_of(map, struct bpf_array, map); 1167 struct bpf_prog *prog; 1168 u32 index = BPF_R3; 1169 1170 if (unlikely(index >= array->map.max_entries)) 1171 goto out; 1172 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1173 goto out; 1174 1175 tail_call_cnt++; 1176 1177 prog = READ_ONCE(array->ptrs[index]); 1178 if (!prog) 1179 goto out; 1180 1181 /* ARG1 at this point is guaranteed to point to CTX from 1182 * the verifier side due to the fact that the tail call is 1183 * handeled like a helper, that is, bpf_tail_call_proto, 1184 * where arg1_type is ARG_PTR_TO_CTX. 1185 */ 1186 insn = prog->insnsi; 1187 goto select_insn; 1188 out: 1189 CONT; 1190 } 1191 /* JMP */ 1192 JMP_JA: 1193 insn += insn->off; 1194 CONT; 1195 JMP_JEQ_X: 1196 if (DST == SRC) { 1197 insn += insn->off; 1198 CONT_JMP; 1199 } 1200 CONT; 1201 JMP_JEQ_K: 1202 if (DST == IMM) { 1203 insn += insn->off; 1204 CONT_JMP; 1205 } 1206 CONT; 1207 JMP_JNE_X: 1208 if (DST != SRC) { 1209 insn += insn->off; 1210 CONT_JMP; 1211 } 1212 CONT; 1213 JMP_JNE_K: 1214 if (DST != IMM) { 1215 insn += insn->off; 1216 CONT_JMP; 1217 } 1218 CONT; 1219 JMP_JGT_X: 1220 if (DST > SRC) { 1221 insn += insn->off; 1222 CONT_JMP; 1223 } 1224 CONT; 1225 JMP_JGT_K: 1226 if (DST > IMM) { 1227 insn += insn->off; 1228 CONT_JMP; 1229 } 1230 CONT; 1231 JMP_JLT_X: 1232 if (DST < SRC) { 1233 insn += insn->off; 1234 CONT_JMP; 1235 } 1236 CONT; 1237 JMP_JLT_K: 1238 if (DST < IMM) { 1239 insn += insn->off; 1240 CONT_JMP; 1241 } 1242 CONT; 1243 JMP_JGE_X: 1244 if (DST >= SRC) { 1245 insn += insn->off; 1246 CONT_JMP; 1247 } 1248 CONT; 1249 JMP_JGE_K: 1250 if (DST >= IMM) { 1251 insn += insn->off; 1252 CONT_JMP; 1253 } 1254 CONT; 1255 JMP_JLE_X: 1256 if (DST <= SRC) { 1257 insn += insn->off; 1258 CONT_JMP; 1259 } 1260 CONT; 1261 JMP_JLE_K: 1262 if (DST <= IMM) { 1263 insn += insn->off; 1264 CONT_JMP; 1265 } 1266 CONT; 1267 JMP_JSGT_X: 1268 if (((s64) DST) > ((s64) SRC)) { 1269 insn += insn->off; 1270 CONT_JMP; 1271 } 1272 CONT; 1273 JMP_JSGT_K: 1274 if (((s64) DST) > ((s64) IMM)) { 1275 insn += insn->off; 1276 CONT_JMP; 1277 } 1278 CONT; 1279 JMP_JSLT_X: 1280 if (((s64) DST) < ((s64) SRC)) { 1281 insn += insn->off; 1282 CONT_JMP; 1283 } 1284 CONT; 1285 JMP_JSLT_K: 1286 if (((s64) DST) < ((s64) IMM)) { 1287 insn += insn->off; 1288 CONT_JMP; 1289 } 1290 CONT; 1291 JMP_JSGE_X: 1292 if (((s64) DST) >= ((s64) SRC)) { 1293 insn += insn->off; 1294 CONT_JMP; 1295 } 1296 CONT; 1297 JMP_JSGE_K: 1298 if (((s64) DST) >= ((s64) IMM)) { 1299 insn += insn->off; 1300 CONT_JMP; 1301 } 1302 CONT; 1303 JMP_JSLE_X: 1304 if (((s64) DST) <= ((s64) SRC)) { 1305 insn += insn->off; 1306 CONT_JMP; 1307 } 1308 CONT; 1309 JMP_JSLE_K: 1310 if (((s64) DST) <= ((s64) IMM)) { 1311 insn += insn->off; 1312 CONT_JMP; 1313 } 1314 CONT; 1315 JMP_JSET_X: 1316 if (DST & SRC) { 1317 insn += insn->off; 1318 CONT_JMP; 1319 } 1320 CONT; 1321 JMP_JSET_K: 1322 if (DST & IMM) { 1323 insn += insn->off; 1324 CONT_JMP; 1325 } 1326 CONT; 1327 JMP_EXIT: 1328 return BPF_R0; 1329 1330 /* STX and ST and LDX*/ 1331 #define LDST(SIZEOP, SIZE) \ 1332 STX_MEM_##SIZEOP: \ 1333 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1334 CONT; \ 1335 ST_MEM_##SIZEOP: \ 1336 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1337 CONT; \ 1338 LDX_MEM_##SIZEOP: \ 1339 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1340 CONT; 1341 1342 LDST(B, u8) 1343 LDST(H, u16) 1344 LDST(W, u32) 1345 LDST(DW, u64) 1346 #undef LDST 1347 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1348 atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1349 (DST + insn->off)); 1350 CONT; 1351 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1352 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1353 (DST + insn->off)); 1354 CONT; 1355 1356 default_label: 1357 /* If we ever reach this, we have a bug somewhere. Die hard here 1358 * instead of just returning 0; we could be somewhere in a subprog, 1359 * so execution could continue otherwise which we do /not/ want. 1360 * 1361 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1362 */ 1363 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); 1364 BUG_ON(1); 1365 return 0; 1366 } 1367 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ 1368 1369 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 1370 #define DEFINE_BPF_PROG_RUN(stack_size) \ 1371 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 1372 { \ 1373 u64 stack[stack_size / sizeof(u64)]; \ 1374 u64 regs[MAX_BPF_REG]; \ 1375 \ 1376 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1377 ARG1 = (u64) (unsigned long) ctx; \ 1378 return ___bpf_prog_run(regs, insn, stack); \ 1379 } 1380 1381 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 1382 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 1383 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 1384 const struct bpf_insn *insn) \ 1385 { \ 1386 u64 stack[stack_size / sizeof(u64)]; \ 1387 u64 regs[MAX_BPF_REG]; \ 1388 \ 1389 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1390 BPF_R1 = r1; \ 1391 BPF_R2 = r2; \ 1392 BPF_R3 = r3; \ 1393 BPF_R4 = r4; \ 1394 BPF_R5 = r5; \ 1395 return ___bpf_prog_run(regs, insn, stack); \ 1396 } 1397 1398 #define EVAL1(FN, X) FN(X) 1399 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 1400 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 1401 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 1402 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 1403 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 1404 1405 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 1406 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 1407 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 1408 1409 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 1410 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 1411 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 1412 1413 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 1414 1415 static unsigned int (*interpreters[])(const void *ctx, 1416 const struct bpf_insn *insn) = { 1417 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1418 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1419 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1420 }; 1421 #undef PROG_NAME_LIST 1422 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 1423 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 1424 const struct bpf_insn *insn) = { 1425 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1426 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1427 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1428 }; 1429 #undef PROG_NAME_LIST 1430 1431 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 1432 { 1433 stack_depth = max_t(u32, stack_depth, 1); 1434 insn->off = (s16) insn->imm; 1435 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 1436 __bpf_call_base_args; 1437 insn->code = BPF_JMP | BPF_CALL_ARGS; 1438 } 1439 1440 #else 1441 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 1442 const struct bpf_insn *insn) 1443 { 1444 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 1445 * is not working properly, so warn about it! 1446 */ 1447 WARN_ON_ONCE(1); 1448 return 0; 1449 } 1450 #endif 1451 1452 bool bpf_prog_array_compatible(struct bpf_array *array, 1453 const struct bpf_prog *fp) 1454 { 1455 if (fp->kprobe_override) 1456 return false; 1457 1458 if (!array->owner_prog_type) { 1459 /* There's no owner yet where we could check for 1460 * compatibility. 1461 */ 1462 array->owner_prog_type = fp->type; 1463 array->owner_jited = fp->jited; 1464 1465 return true; 1466 } 1467 1468 return array->owner_prog_type == fp->type && 1469 array->owner_jited == fp->jited; 1470 } 1471 1472 static int bpf_check_tail_call(const struct bpf_prog *fp) 1473 { 1474 struct bpf_prog_aux *aux = fp->aux; 1475 int i; 1476 1477 for (i = 0; i < aux->used_map_cnt; i++) { 1478 struct bpf_map *map = aux->used_maps[i]; 1479 struct bpf_array *array; 1480 1481 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1482 continue; 1483 1484 array = container_of(map, struct bpf_array, map); 1485 if (!bpf_prog_array_compatible(array, fp)) 1486 return -EINVAL; 1487 } 1488 1489 return 0; 1490 } 1491 1492 static void bpf_prog_select_func(struct bpf_prog *fp) 1493 { 1494 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1495 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1496 1497 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1498 #else 1499 fp->bpf_func = __bpf_prog_ret0_warn; 1500 #endif 1501 } 1502 1503 /** 1504 * bpf_prog_select_runtime - select exec runtime for BPF program 1505 * @fp: bpf_prog populated with internal BPF program 1506 * @err: pointer to error variable 1507 * 1508 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1509 * The BPF program will be executed via BPF_PROG_RUN() macro. 1510 */ 1511 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1512 { 1513 /* In case of BPF to BPF calls, verifier did all the prep 1514 * work with regards to JITing, etc. 1515 */ 1516 if (fp->bpf_func) 1517 goto finalize; 1518 1519 bpf_prog_select_func(fp); 1520 1521 /* eBPF JITs can rewrite the program in case constant 1522 * blinding is active. However, in case of error during 1523 * blinding, bpf_int_jit_compile() must always return a 1524 * valid program, which in this case would simply not 1525 * be JITed, but falls back to the interpreter. 1526 */ 1527 if (!bpf_prog_is_dev_bound(fp->aux)) { 1528 fp = bpf_int_jit_compile(fp); 1529 #ifdef CONFIG_BPF_JIT_ALWAYS_ON 1530 if (!fp->jited) { 1531 *err = -ENOTSUPP; 1532 return fp; 1533 } 1534 #endif 1535 } else { 1536 *err = bpf_prog_offload_compile(fp); 1537 if (*err) 1538 return fp; 1539 } 1540 1541 finalize: 1542 bpf_prog_lock_ro(fp); 1543 1544 /* The tail call compatibility check can only be done at 1545 * this late stage as we need to determine, if we deal 1546 * with JITed or non JITed program concatenations and not 1547 * all eBPF JITs might immediately support all features. 1548 */ 1549 *err = bpf_check_tail_call(fp); 1550 1551 return fp; 1552 } 1553 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1554 1555 static unsigned int __bpf_prog_ret1(const void *ctx, 1556 const struct bpf_insn *insn) 1557 { 1558 return 1; 1559 } 1560 1561 static struct bpf_prog_dummy { 1562 struct bpf_prog prog; 1563 } dummy_bpf_prog = { 1564 .prog = { 1565 .bpf_func = __bpf_prog_ret1, 1566 }, 1567 }; 1568 1569 /* to avoid allocating empty bpf_prog_array for cgroups that 1570 * don't have bpf program attached use one global 'empty_prog_array' 1571 * It will not be modified the caller of bpf_prog_array_alloc() 1572 * (since caller requested prog_cnt == 0) 1573 * that pointer should be 'freed' by bpf_prog_array_free() 1574 */ 1575 static struct { 1576 struct bpf_prog_array hdr; 1577 struct bpf_prog *null_prog; 1578 } empty_prog_array = { 1579 .null_prog = NULL, 1580 }; 1581 1582 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1583 { 1584 if (prog_cnt) 1585 return kzalloc(sizeof(struct bpf_prog_array) + 1586 sizeof(struct bpf_prog_array_item) * 1587 (prog_cnt + 1), 1588 flags); 1589 1590 return &empty_prog_array.hdr; 1591 } 1592 1593 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) 1594 { 1595 if (!progs || 1596 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr) 1597 return; 1598 kfree_rcu(progs, rcu); 1599 } 1600 1601 int bpf_prog_array_length(struct bpf_prog_array __rcu *array) 1602 { 1603 struct bpf_prog_array_item *item; 1604 u32 cnt = 0; 1605 1606 rcu_read_lock(); 1607 item = rcu_dereference(array)->items; 1608 for (; item->prog; item++) 1609 if (item->prog != &dummy_bpf_prog.prog) 1610 cnt++; 1611 rcu_read_unlock(); 1612 return cnt; 1613 } 1614 1615 1616 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, 1617 u32 *prog_ids, 1618 u32 request_cnt) 1619 { 1620 struct bpf_prog_array_item *item; 1621 int i = 0; 1622 1623 item = rcu_dereference_check(array, 1)->items; 1624 for (; item->prog; item++) { 1625 if (item->prog == &dummy_bpf_prog.prog) 1626 continue; 1627 prog_ids[i] = item->prog->aux->id; 1628 if (++i == request_cnt) { 1629 item++; 1630 break; 1631 } 1632 } 1633 1634 return !!(item->prog); 1635 } 1636 1637 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, 1638 __u32 __user *prog_ids, u32 cnt) 1639 { 1640 unsigned long err = 0; 1641 bool nospc; 1642 u32 *ids; 1643 1644 /* users of this function are doing: 1645 * cnt = bpf_prog_array_length(); 1646 * if (cnt > 0) 1647 * bpf_prog_array_copy_to_user(..., cnt); 1648 * so below kcalloc doesn't need extra cnt > 0 check, but 1649 * bpf_prog_array_length() releases rcu lock and 1650 * prog array could have been swapped with empty or larger array, 1651 * so always copy 'cnt' prog_ids to the user. 1652 * In a rare race the user will see zero prog_ids 1653 */ 1654 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 1655 if (!ids) 1656 return -ENOMEM; 1657 rcu_read_lock(); 1658 nospc = bpf_prog_array_copy_core(array, ids, cnt); 1659 rcu_read_unlock(); 1660 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1661 kfree(ids); 1662 if (err) 1663 return -EFAULT; 1664 if (nospc) 1665 return -ENOSPC; 1666 return 0; 1667 } 1668 1669 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array, 1670 struct bpf_prog *old_prog) 1671 { 1672 struct bpf_prog_array_item *item = array->items; 1673 1674 for (; item->prog; item++) 1675 if (item->prog == old_prog) { 1676 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 1677 break; 1678 } 1679 } 1680 1681 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 1682 struct bpf_prog *exclude_prog, 1683 struct bpf_prog *include_prog, 1684 struct bpf_prog_array **new_array) 1685 { 1686 int new_prog_cnt, carry_prog_cnt = 0; 1687 struct bpf_prog_array_item *existing; 1688 struct bpf_prog_array *array; 1689 bool found_exclude = false; 1690 int new_prog_idx = 0; 1691 1692 /* Figure out how many existing progs we need to carry over to 1693 * the new array. 1694 */ 1695 if (old_array) { 1696 existing = old_array->items; 1697 for (; existing->prog; existing++) { 1698 if (existing->prog == exclude_prog) { 1699 found_exclude = true; 1700 continue; 1701 } 1702 if (existing->prog != &dummy_bpf_prog.prog) 1703 carry_prog_cnt++; 1704 if (existing->prog == include_prog) 1705 return -EEXIST; 1706 } 1707 } 1708 1709 if (exclude_prog && !found_exclude) 1710 return -ENOENT; 1711 1712 /* How many progs (not NULL) will be in the new array? */ 1713 new_prog_cnt = carry_prog_cnt; 1714 if (include_prog) 1715 new_prog_cnt += 1; 1716 1717 /* Do we have any prog (not NULL) in the new array? */ 1718 if (!new_prog_cnt) { 1719 *new_array = NULL; 1720 return 0; 1721 } 1722 1723 /* +1 as the end of prog_array is marked with NULL */ 1724 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 1725 if (!array) 1726 return -ENOMEM; 1727 1728 /* Fill in the new prog array */ 1729 if (carry_prog_cnt) { 1730 existing = old_array->items; 1731 for (; existing->prog; existing++) 1732 if (existing->prog != exclude_prog && 1733 existing->prog != &dummy_bpf_prog.prog) { 1734 array->items[new_prog_idx++].prog = 1735 existing->prog; 1736 } 1737 } 1738 if (include_prog) 1739 array->items[new_prog_idx++].prog = include_prog; 1740 array->items[new_prog_idx].prog = NULL; 1741 *new_array = array; 1742 return 0; 1743 } 1744 1745 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 1746 u32 *prog_ids, u32 request_cnt, 1747 u32 *prog_cnt) 1748 { 1749 u32 cnt = 0; 1750 1751 if (array) 1752 cnt = bpf_prog_array_length(array); 1753 1754 *prog_cnt = cnt; 1755 1756 /* return early if user requested only program count or nothing to copy */ 1757 if (!request_cnt || !cnt) 1758 return 0; 1759 1760 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 1761 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 1762 : 0; 1763 } 1764 1765 static void bpf_prog_free_deferred(struct work_struct *work) 1766 { 1767 struct bpf_prog_aux *aux; 1768 int i; 1769 1770 aux = container_of(work, struct bpf_prog_aux, work); 1771 if (bpf_prog_is_dev_bound(aux)) 1772 bpf_prog_offload_destroy(aux->prog); 1773 #ifdef CONFIG_PERF_EVENTS 1774 if (aux->prog->has_callchain_buf) 1775 put_callchain_buffers(); 1776 #endif 1777 for (i = 0; i < aux->func_cnt; i++) 1778 bpf_jit_free(aux->func[i]); 1779 if (aux->func_cnt) { 1780 kfree(aux->func); 1781 bpf_prog_unlock_free(aux->prog); 1782 } else { 1783 bpf_jit_free(aux->prog); 1784 } 1785 } 1786 1787 /* Free internal BPF program */ 1788 void bpf_prog_free(struct bpf_prog *fp) 1789 { 1790 struct bpf_prog_aux *aux = fp->aux; 1791 1792 INIT_WORK(&aux->work, bpf_prog_free_deferred); 1793 schedule_work(&aux->work); 1794 } 1795 EXPORT_SYMBOL_GPL(bpf_prog_free); 1796 1797 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 1798 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 1799 1800 void bpf_user_rnd_init_once(void) 1801 { 1802 prandom_init_once(&bpf_user_rnd_state); 1803 } 1804 1805 BPF_CALL_0(bpf_user_rnd_u32) 1806 { 1807 /* Should someone ever have the rather unwise idea to use some 1808 * of the registers passed into this function, then note that 1809 * this function is called from native eBPF and classic-to-eBPF 1810 * transformations. Register assignments from both sides are 1811 * different, f.e. classic always sets fn(ctx, A, X) here. 1812 */ 1813 struct rnd_state *state; 1814 u32 res; 1815 1816 state = &get_cpu_var(bpf_user_rnd_state); 1817 res = prandom_u32_state(state); 1818 put_cpu_var(bpf_user_rnd_state); 1819 1820 return res; 1821 } 1822 1823 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 1824 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 1825 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 1826 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 1827 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 1828 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 1829 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 1830 1831 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 1832 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 1833 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 1834 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 1835 1836 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 1837 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 1838 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 1839 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 1840 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 1841 1842 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 1843 { 1844 return NULL; 1845 } 1846 1847 u64 __weak 1848 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1849 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 1850 { 1851 return -ENOTSUPP; 1852 } 1853 EXPORT_SYMBOL_GPL(bpf_event_output); 1854 1855 /* Always built-in helper functions. */ 1856 const struct bpf_func_proto bpf_tail_call_proto = { 1857 .func = NULL, 1858 .gpl_only = false, 1859 .ret_type = RET_VOID, 1860 .arg1_type = ARG_PTR_TO_CTX, 1861 .arg2_type = ARG_CONST_MAP_PTR, 1862 .arg3_type = ARG_ANYTHING, 1863 }; 1864 1865 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 1866 * It is encouraged to implement bpf_int_jit_compile() instead, so that 1867 * eBPF and implicitly also cBPF can get JITed! 1868 */ 1869 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 1870 { 1871 return prog; 1872 } 1873 1874 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 1875 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 1876 */ 1877 void __weak bpf_jit_compile(struct bpf_prog *prog) 1878 { 1879 } 1880 1881 bool __weak bpf_helper_changes_pkt_data(void *func) 1882 { 1883 return false; 1884 } 1885 1886 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 1887 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 1888 */ 1889 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 1890 int len) 1891 { 1892 return -EFAULT; 1893 } 1894 1895 /* All definitions of tracepoints related to BPF. */ 1896 #define CREATE_TRACE_POINTS 1897 #include <linux/bpf_trace.h> 1898 1899 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 1900