1 /* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * 9 * Authors: 10 * 11 * Jay Schulist <jschlst@samba.org> 12 * Alexei Starovoitov <ast@plumgrid.com> 13 * Daniel Borkmann <dborkman@redhat.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * Andi Kleen - Fix a few bad bugs and races. 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 22 */ 23 24 #include <linux/filter.h> 25 #include <linux/skbuff.h> 26 #include <linux/vmalloc.h> 27 #include <linux/random.h> 28 #include <linux/moduleloader.h> 29 #include <linux/bpf.h> 30 #include <linux/frame.h> 31 #include <linux/rbtree_latch.h> 32 #include <linux/kallsyms.h> 33 #include <linux/rcupdate.h> 34 #include <linux/perf_event.h> 35 36 #include <asm/unaligned.h> 37 38 /* Registers */ 39 #define BPF_R0 regs[BPF_REG_0] 40 #define BPF_R1 regs[BPF_REG_1] 41 #define BPF_R2 regs[BPF_REG_2] 42 #define BPF_R3 regs[BPF_REG_3] 43 #define BPF_R4 regs[BPF_REG_4] 44 #define BPF_R5 regs[BPF_REG_5] 45 #define BPF_R6 regs[BPF_REG_6] 46 #define BPF_R7 regs[BPF_REG_7] 47 #define BPF_R8 regs[BPF_REG_8] 48 #define BPF_R9 regs[BPF_REG_9] 49 #define BPF_R10 regs[BPF_REG_10] 50 51 /* Named registers */ 52 #define DST regs[insn->dst_reg] 53 #define SRC regs[insn->src_reg] 54 #define FP regs[BPF_REG_FP] 55 #define ARG1 regs[BPF_REG_ARG1] 56 #define CTX regs[BPF_REG_CTX] 57 #define IMM insn->imm 58 59 /* No hurry in this branch 60 * 61 * Exported for the bpf jit load helper. 62 */ 63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 64 { 65 u8 *ptr = NULL; 66 67 if (k >= SKF_NET_OFF) 68 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 69 else if (k >= SKF_LL_OFF) 70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 71 72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 73 return ptr; 74 75 return NULL; 76 } 77 78 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 79 { 80 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 81 struct bpf_prog_aux *aux; 82 struct bpf_prog *fp; 83 84 size = round_up(size, PAGE_SIZE); 85 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 86 if (fp == NULL) 87 return NULL; 88 89 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); 90 if (aux == NULL) { 91 vfree(fp); 92 return NULL; 93 } 94 95 fp->pages = size / PAGE_SIZE; 96 fp->aux = aux; 97 fp->aux->prog = fp; 98 fp->jit_requested = ebpf_jit_enabled(); 99 100 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); 101 102 return fp; 103 } 104 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 105 106 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 107 gfp_t gfp_extra_flags) 108 { 109 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 110 struct bpf_prog *fp; 111 u32 pages, delta; 112 int ret; 113 114 BUG_ON(fp_old == NULL); 115 116 size = round_up(size, PAGE_SIZE); 117 pages = size / PAGE_SIZE; 118 if (pages <= fp_old->pages) 119 return fp_old; 120 121 delta = pages - fp_old->pages; 122 ret = __bpf_prog_charge(fp_old->aux->user, delta); 123 if (ret) 124 return NULL; 125 126 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 127 if (fp == NULL) { 128 __bpf_prog_uncharge(fp_old->aux->user, delta); 129 } else { 130 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 131 fp->pages = pages; 132 fp->aux->prog = fp; 133 134 /* We keep fp->aux from fp_old around in the new 135 * reallocated structure. 136 */ 137 fp_old->aux = NULL; 138 __bpf_prog_free(fp_old); 139 } 140 141 return fp; 142 } 143 144 void __bpf_prog_free(struct bpf_prog *fp) 145 { 146 kfree(fp->aux); 147 vfree(fp); 148 } 149 150 int bpf_prog_calc_tag(struct bpf_prog *fp) 151 { 152 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 153 u32 raw_size = bpf_prog_tag_scratch_size(fp); 154 u32 digest[SHA_DIGEST_WORDS]; 155 u32 ws[SHA_WORKSPACE_WORDS]; 156 u32 i, bsize, psize, blocks; 157 struct bpf_insn *dst; 158 bool was_ld_map; 159 u8 *raw, *todo; 160 __be32 *result; 161 __be64 *bits; 162 163 raw = vmalloc(raw_size); 164 if (!raw) 165 return -ENOMEM; 166 167 sha_init(digest); 168 memset(ws, 0, sizeof(ws)); 169 170 /* We need to take out the map fd for the digest calculation 171 * since they are unstable from user space side. 172 */ 173 dst = (void *)raw; 174 for (i = 0, was_ld_map = false; i < fp->len; i++) { 175 dst[i] = fp->insnsi[i]; 176 if (!was_ld_map && 177 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 178 dst[i].src_reg == BPF_PSEUDO_MAP_FD) { 179 was_ld_map = true; 180 dst[i].imm = 0; 181 } else if (was_ld_map && 182 dst[i].code == 0 && 183 dst[i].dst_reg == 0 && 184 dst[i].src_reg == 0 && 185 dst[i].off == 0) { 186 was_ld_map = false; 187 dst[i].imm = 0; 188 } else { 189 was_ld_map = false; 190 } 191 } 192 193 psize = bpf_prog_insn_size(fp); 194 memset(&raw[psize], 0, raw_size - psize); 195 raw[psize++] = 0x80; 196 197 bsize = round_up(psize, SHA_MESSAGE_BYTES); 198 blocks = bsize / SHA_MESSAGE_BYTES; 199 todo = raw; 200 if (bsize - psize >= sizeof(__be64)) { 201 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 202 } else { 203 bits = (__be64 *)(todo + bsize + bits_offset); 204 blocks++; 205 } 206 *bits = cpu_to_be64((psize - 1) << 3); 207 208 while (blocks--) { 209 sha_transform(digest, todo, ws); 210 todo += SHA_MESSAGE_BYTES; 211 } 212 213 result = (__force __be32 *)digest; 214 for (i = 0; i < SHA_DIGEST_WORDS; i++) 215 result[i] = cpu_to_be32(digest[i]); 216 memcpy(fp->tag, result, sizeof(fp->tag)); 217 218 vfree(raw); 219 return 0; 220 } 221 222 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta, 223 u32 curr, const bool probe_pass) 224 { 225 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 226 s64 imm = insn->imm; 227 228 if (curr < pos && curr + imm + 1 > pos) 229 imm += delta; 230 else if (curr > pos + delta && curr + imm + 1 <= pos + delta) 231 imm -= delta; 232 if (imm < imm_min || imm > imm_max) 233 return -ERANGE; 234 if (!probe_pass) 235 insn->imm = imm; 236 return 0; 237 } 238 239 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, 240 u32 curr, const bool probe_pass) 241 { 242 const s32 off_min = S16_MIN, off_max = S16_MAX; 243 s32 off = insn->off; 244 245 if (curr < pos && curr + off + 1 > pos) 246 off += delta; 247 else if (curr > pos + delta && curr + off + 1 <= pos + delta) 248 off -= delta; 249 if (off < off_min || off > off_max) 250 return -ERANGE; 251 if (!probe_pass) 252 insn->off = off; 253 return 0; 254 } 255 256 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, 257 const bool probe_pass) 258 { 259 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0); 260 struct bpf_insn *insn = prog->insnsi; 261 int ret = 0; 262 263 for (i = 0; i < insn_cnt; i++, insn++) { 264 u8 code; 265 266 /* In the probing pass we still operate on the original, 267 * unpatched image in order to check overflows before we 268 * do any other adjustments. Therefore skip the patchlet. 269 */ 270 if (probe_pass && i == pos) { 271 i += delta + 1; 272 insn++; 273 } 274 code = insn->code; 275 if (BPF_CLASS(code) != BPF_JMP || 276 BPF_OP(code) == BPF_EXIT) 277 continue; 278 /* Adjust offset of jmps if we cross patch boundaries. */ 279 if (BPF_OP(code) == BPF_CALL) { 280 if (insn->src_reg != BPF_PSEUDO_CALL) 281 continue; 282 ret = bpf_adj_delta_to_imm(insn, pos, delta, i, 283 probe_pass); 284 } else { 285 ret = bpf_adj_delta_to_off(insn, pos, delta, i, 286 probe_pass); 287 } 288 if (ret) 289 break; 290 } 291 292 return ret; 293 } 294 295 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 296 const struct bpf_insn *patch, u32 len) 297 { 298 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 299 const u32 cnt_max = S16_MAX; 300 struct bpf_prog *prog_adj; 301 302 /* Since our patchlet doesn't expand the image, we're done. */ 303 if (insn_delta == 0) { 304 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 305 return prog; 306 } 307 308 insn_adj_cnt = prog->len + insn_delta; 309 310 /* Reject anything that would potentially let the insn->off 311 * target overflow when we have excessive program expansions. 312 * We need to probe here before we do any reallocation where 313 * we afterwards may not fail anymore. 314 */ 315 if (insn_adj_cnt > cnt_max && 316 bpf_adj_branches(prog, off, insn_delta, true)) 317 return NULL; 318 319 /* Several new instructions need to be inserted. Make room 320 * for them. Likely, there's no need for a new allocation as 321 * last page could have large enough tailroom. 322 */ 323 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 324 GFP_USER); 325 if (!prog_adj) 326 return NULL; 327 328 prog_adj->len = insn_adj_cnt; 329 330 /* Patching happens in 3 steps: 331 * 332 * 1) Move over tail of insnsi from next instruction onwards, 333 * so we can patch the single target insn with one or more 334 * new ones (patching is always from 1 to n insns, n > 0). 335 * 2) Inject new instructions at the target location. 336 * 3) Adjust branch offsets if necessary. 337 */ 338 insn_rest = insn_adj_cnt - off - len; 339 340 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 341 sizeof(*patch) * insn_rest); 342 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 343 344 /* We are guaranteed to not fail at this point, otherwise 345 * the ship has sailed to reverse to the original state. An 346 * overflow cannot happen at this point. 347 */ 348 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false)); 349 350 return prog_adj; 351 } 352 353 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 354 { 355 int i; 356 357 for (i = 0; i < fp->aux->func_cnt; i++) 358 bpf_prog_kallsyms_del(fp->aux->func[i]); 359 } 360 361 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 362 { 363 bpf_prog_kallsyms_del_subprogs(fp); 364 bpf_prog_kallsyms_del(fp); 365 } 366 367 #ifdef CONFIG_BPF_JIT 368 /* All BPF JIT sysctl knobs here. */ 369 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); 370 int bpf_jit_harden __read_mostly; 371 int bpf_jit_kallsyms __read_mostly; 372 long bpf_jit_limit __read_mostly; 373 374 static __always_inline void 375 bpf_get_prog_addr_region(const struct bpf_prog *prog, 376 unsigned long *symbol_start, 377 unsigned long *symbol_end) 378 { 379 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 380 unsigned long addr = (unsigned long)hdr; 381 382 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 383 384 *symbol_start = addr; 385 *symbol_end = addr + hdr->pages * PAGE_SIZE; 386 } 387 388 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 389 { 390 const char *end = sym + KSYM_NAME_LEN; 391 392 BUILD_BUG_ON(sizeof("bpf_prog_") + 393 sizeof(prog->tag) * 2 + 394 /* name has been null terminated. 395 * We should need +1 for the '_' preceding 396 * the name. However, the null character 397 * is double counted between the name and the 398 * sizeof("bpf_prog_") above, so we omit 399 * the +1 here. 400 */ 401 sizeof(prog->aux->name) > KSYM_NAME_LEN); 402 403 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 404 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 405 if (prog->aux->name[0]) 406 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 407 else 408 *sym = 0; 409 } 410 411 static __always_inline unsigned long 412 bpf_get_prog_addr_start(struct latch_tree_node *n) 413 { 414 unsigned long symbol_start, symbol_end; 415 const struct bpf_prog_aux *aux; 416 417 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 418 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 419 420 return symbol_start; 421 } 422 423 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 424 struct latch_tree_node *b) 425 { 426 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); 427 } 428 429 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 430 { 431 unsigned long val = (unsigned long)key; 432 unsigned long symbol_start, symbol_end; 433 const struct bpf_prog_aux *aux; 434 435 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 436 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 437 438 if (val < symbol_start) 439 return -1; 440 if (val >= symbol_end) 441 return 1; 442 443 return 0; 444 } 445 446 static const struct latch_tree_ops bpf_tree_ops = { 447 .less = bpf_tree_less, 448 .comp = bpf_tree_comp, 449 }; 450 451 static DEFINE_SPINLOCK(bpf_lock); 452 static LIST_HEAD(bpf_kallsyms); 453 static struct latch_tree_root bpf_tree __cacheline_aligned; 454 455 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) 456 { 457 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); 458 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); 459 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 460 } 461 462 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) 463 { 464 if (list_empty(&aux->ksym_lnode)) 465 return; 466 467 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 468 list_del_rcu(&aux->ksym_lnode); 469 } 470 471 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 472 { 473 return fp->jited && !bpf_prog_was_classic(fp); 474 } 475 476 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 477 { 478 return list_empty(&fp->aux->ksym_lnode) || 479 fp->aux->ksym_lnode.prev == LIST_POISON2; 480 } 481 482 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 483 { 484 if (!bpf_prog_kallsyms_candidate(fp) || 485 !capable(CAP_SYS_ADMIN)) 486 return; 487 488 spin_lock_bh(&bpf_lock); 489 bpf_prog_ksym_node_add(fp->aux); 490 spin_unlock_bh(&bpf_lock); 491 } 492 493 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 494 { 495 if (!bpf_prog_kallsyms_candidate(fp)) 496 return; 497 498 spin_lock_bh(&bpf_lock); 499 bpf_prog_ksym_node_del(fp->aux); 500 spin_unlock_bh(&bpf_lock); 501 } 502 503 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) 504 { 505 struct latch_tree_node *n; 506 507 if (!bpf_jit_kallsyms_enabled()) 508 return NULL; 509 510 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 511 return n ? 512 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : 513 NULL; 514 } 515 516 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 517 unsigned long *off, char *sym) 518 { 519 unsigned long symbol_start, symbol_end; 520 struct bpf_prog *prog; 521 char *ret = NULL; 522 523 rcu_read_lock(); 524 prog = bpf_prog_kallsyms_find(addr); 525 if (prog) { 526 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); 527 bpf_get_prog_name(prog, sym); 528 529 ret = sym; 530 if (size) 531 *size = symbol_end - symbol_start; 532 if (off) 533 *off = addr - symbol_start; 534 } 535 rcu_read_unlock(); 536 537 return ret; 538 } 539 540 bool is_bpf_text_address(unsigned long addr) 541 { 542 bool ret; 543 544 rcu_read_lock(); 545 ret = bpf_prog_kallsyms_find(addr) != NULL; 546 rcu_read_unlock(); 547 548 return ret; 549 } 550 551 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 552 char *sym) 553 { 554 struct bpf_prog_aux *aux; 555 unsigned int it = 0; 556 int ret = -ERANGE; 557 558 if (!bpf_jit_kallsyms_enabled()) 559 return ret; 560 561 rcu_read_lock(); 562 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { 563 if (it++ != symnum) 564 continue; 565 566 bpf_get_prog_name(aux->prog, sym); 567 568 *value = (unsigned long)aux->prog->bpf_func; 569 *type = BPF_SYM_ELF_TYPE; 570 571 ret = 0; 572 break; 573 } 574 rcu_read_unlock(); 575 576 return ret; 577 } 578 579 static atomic_long_t bpf_jit_current; 580 581 /* Can be overridden by an arch's JIT compiler if it has a custom, 582 * dedicated BPF backend memory area, or if neither of the two 583 * below apply. 584 */ 585 u64 __weak bpf_jit_alloc_exec_limit(void) 586 { 587 #if defined(MODULES_VADDR) 588 return MODULES_END - MODULES_VADDR; 589 #else 590 return VMALLOC_END - VMALLOC_START; 591 #endif 592 } 593 594 static int __init bpf_jit_charge_init(void) 595 { 596 /* Only used as heuristic here to derive limit. */ 597 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, 598 PAGE_SIZE), LONG_MAX); 599 return 0; 600 } 601 pure_initcall(bpf_jit_charge_init); 602 603 static int bpf_jit_charge_modmem(u32 pages) 604 { 605 if (atomic_long_add_return(pages, &bpf_jit_current) > 606 (bpf_jit_limit >> PAGE_SHIFT)) { 607 if (!capable(CAP_SYS_ADMIN)) { 608 atomic_long_sub(pages, &bpf_jit_current); 609 return -EPERM; 610 } 611 } 612 613 return 0; 614 } 615 616 static void bpf_jit_uncharge_modmem(u32 pages) 617 { 618 atomic_long_sub(pages, &bpf_jit_current); 619 } 620 621 struct bpf_binary_header * 622 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 623 unsigned int alignment, 624 bpf_jit_fill_hole_t bpf_fill_ill_insns) 625 { 626 struct bpf_binary_header *hdr; 627 u32 size, hole, start, pages; 628 629 /* Most of BPF filters are really small, but if some of them 630 * fill a page, allow at least 128 extra bytes to insert a 631 * random section of illegal instructions. 632 */ 633 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 634 pages = size / PAGE_SIZE; 635 636 if (bpf_jit_charge_modmem(pages)) 637 return NULL; 638 hdr = module_alloc(size); 639 if (!hdr) { 640 bpf_jit_uncharge_modmem(pages); 641 return NULL; 642 } 643 644 /* Fill space with illegal/arch-dep instructions. */ 645 bpf_fill_ill_insns(hdr, size); 646 647 hdr->pages = pages; 648 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 649 PAGE_SIZE - sizeof(*hdr)); 650 start = (get_random_int() % hole) & ~(alignment - 1); 651 652 /* Leave a random number of instructions before BPF code. */ 653 *image_ptr = &hdr->image[start]; 654 655 return hdr; 656 } 657 658 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 659 { 660 u32 pages = hdr->pages; 661 662 module_memfree(hdr); 663 bpf_jit_uncharge_modmem(pages); 664 } 665 666 /* This symbol is only overridden by archs that have different 667 * requirements than the usual eBPF JITs, f.e. when they only 668 * implement cBPF JIT, do not set images read-only, etc. 669 */ 670 void __weak bpf_jit_free(struct bpf_prog *fp) 671 { 672 if (fp->jited) { 673 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 674 675 bpf_jit_binary_unlock_ro(hdr); 676 bpf_jit_binary_free(hdr); 677 678 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 679 } 680 681 bpf_prog_unlock_free(fp); 682 } 683 684 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 685 const struct bpf_insn *insn, bool extra_pass, 686 u64 *func_addr, bool *func_addr_fixed) 687 { 688 s16 off = insn->off; 689 s32 imm = insn->imm; 690 u8 *addr; 691 692 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 693 if (!*func_addr_fixed) { 694 /* Place-holder address till the last pass has collected 695 * all addresses for JITed subprograms in which case we 696 * can pick them up from prog->aux. 697 */ 698 if (!extra_pass) 699 addr = NULL; 700 else if (prog->aux->func && 701 off >= 0 && off < prog->aux->func_cnt) 702 addr = (u8 *)prog->aux->func[off]->bpf_func; 703 else 704 return -EINVAL; 705 } else { 706 /* Address of a BPF helper call. Since part of the core 707 * kernel, it's always at a fixed location. __bpf_call_base 708 * and the helper with imm relative to it are both in core 709 * kernel. 710 */ 711 addr = (u8 *)__bpf_call_base + imm; 712 } 713 714 *func_addr = (unsigned long)addr; 715 return 0; 716 } 717 718 static int bpf_jit_blind_insn(const struct bpf_insn *from, 719 const struct bpf_insn *aux, 720 struct bpf_insn *to_buff) 721 { 722 struct bpf_insn *to = to_buff; 723 u32 imm_rnd = get_random_int(); 724 s16 off; 725 726 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 727 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 728 729 if (from->imm == 0 && 730 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 731 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 732 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 733 goto out; 734 } 735 736 switch (from->code) { 737 case BPF_ALU | BPF_ADD | BPF_K: 738 case BPF_ALU | BPF_SUB | BPF_K: 739 case BPF_ALU | BPF_AND | BPF_K: 740 case BPF_ALU | BPF_OR | BPF_K: 741 case BPF_ALU | BPF_XOR | BPF_K: 742 case BPF_ALU | BPF_MUL | BPF_K: 743 case BPF_ALU | BPF_MOV | BPF_K: 744 case BPF_ALU | BPF_DIV | BPF_K: 745 case BPF_ALU | BPF_MOD | BPF_K: 746 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 747 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 748 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 749 break; 750 751 case BPF_ALU64 | BPF_ADD | BPF_K: 752 case BPF_ALU64 | BPF_SUB | BPF_K: 753 case BPF_ALU64 | BPF_AND | BPF_K: 754 case BPF_ALU64 | BPF_OR | BPF_K: 755 case BPF_ALU64 | BPF_XOR | BPF_K: 756 case BPF_ALU64 | BPF_MUL | BPF_K: 757 case BPF_ALU64 | BPF_MOV | BPF_K: 758 case BPF_ALU64 | BPF_DIV | BPF_K: 759 case BPF_ALU64 | BPF_MOD | BPF_K: 760 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 761 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 762 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 763 break; 764 765 case BPF_JMP | BPF_JEQ | BPF_K: 766 case BPF_JMP | BPF_JNE | BPF_K: 767 case BPF_JMP | BPF_JGT | BPF_K: 768 case BPF_JMP | BPF_JLT | BPF_K: 769 case BPF_JMP | BPF_JGE | BPF_K: 770 case BPF_JMP | BPF_JLE | BPF_K: 771 case BPF_JMP | BPF_JSGT | BPF_K: 772 case BPF_JMP | BPF_JSLT | BPF_K: 773 case BPF_JMP | BPF_JSGE | BPF_K: 774 case BPF_JMP | BPF_JSLE | BPF_K: 775 case BPF_JMP | BPF_JSET | BPF_K: 776 /* Accommodate for extra offset in case of a backjump. */ 777 off = from->off; 778 if (off < 0) 779 off -= 2; 780 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 781 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 782 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 783 break; 784 785 case BPF_LD | BPF_IMM | BPF_DW: 786 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 787 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 788 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 789 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 790 break; 791 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 792 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 793 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 794 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 795 break; 796 797 case BPF_ST | BPF_MEM | BPF_DW: 798 case BPF_ST | BPF_MEM | BPF_W: 799 case BPF_ST | BPF_MEM | BPF_H: 800 case BPF_ST | BPF_MEM | BPF_B: 801 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 802 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 803 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 804 break; 805 } 806 out: 807 return to - to_buff; 808 } 809 810 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 811 gfp_t gfp_extra_flags) 812 { 813 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 814 struct bpf_prog *fp; 815 816 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 817 if (fp != NULL) { 818 /* aux->prog still points to the fp_other one, so 819 * when promoting the clone to the real program, 820 * this still needs to be adapted. 821 */ 822 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 823 } 824 825 return fp; 826 } 827 828 static void bpf_prog_clone_free(struct bpf_prog *fp) 829 { 830 /* aux was stolen by the other clone, so we cannot free 831 * it from this path! It will be freed eventually by the 832 * other program on release. 833 * 834 * At this point, we don't need a deferred release since 835 * clone is guaranteed to not be locked. 836 */ 837 fp->aux = NULL; 838 __bpf_prog_free(fp); 839 } 840 841 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 842 { 843 /* We have to repoint aux->prog to self, as we don't 844 * know whether fp here is the clone or the original. 845 */ 846 fp->aux->prog = fp; 847 bpf_prog_clone_free(fp_other); 848 } 849 850 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 851 { 852 struct bpf_insn insn_buff[16], aux[2]; 853 struct bpf_prog *clone, *tmp; 854 int insn_delta, insn_cnt; 855 struct bpf_insn *insn; 856 int i, rewritten; 857 858 if (!bpf_jit_blinding_enabled(prog) || prog->blinded) 859 return prog; 860 861 clone = bpf_prog_clone_create(prog, GFP_USER); 862 if (!clone) 863 return ERR_PTR(-ENOMEM); 864 865 insn_cnt = clone->len; 866 insn = clone->insnsi; 867 868 for (i = 0; i < insn_cnt; i++, insn++) { 869 /* We temporarily need to hold the original ld64 insn 870 * so that we can still access the first part in the 871 * second blinding run. 872 */ 873 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 874 insn[1].code == 0) 875 memcpy(aux, insn, sizeof(aux)); 876 877 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 878 if (!rewritten) 879 continue; 880 881 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 882 if (!tmp) { 883 /* Patching may have repointed aux->prog during 884 * realloc from the original one, so we need to 885 * fix it up here on error. 886 */ 887 bpf_jit_prog_release_other(prog, clone); 888 return ERR_PTR(-ENOMEM); 889 } 890 891 clone = tmp; 892 insn_delta = rewritten - 1; 893 894 /* Walk new program and skip insns we just inserted. */ 895 insn = clone->insnsi + i + insn_delta; 896 insn_cnt += insn_delta; 897 i += insn_delta; 898 } 899 900 clone->blinded = 1; 901 return clone; 902 } 903 #endif /* CONFIG_BPF_JIT */ 904 905 /* Base function for offset calculation. Needs to go into .text section, 906 * therefore keeping it non-static as well; will also be used by JITs 907 * anyway later on, so do not let the compiler omit it. This also needs 908 * to go into kallsyms for correlation from e.g. bpftool, so naming 909 * must not change. 910 */ 911 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 912 { 913 return 0; 914 } 915 EXPORT_SYMBOL_GPL(__bpf_call_base); 916 917 /* All UAPI available opcodes. */ 918 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 919 /* 32 bit ALU operations. */ \ 920 /* Register based. */ \ 921 INSN_3(ALU, ADD, X), \ 922 INSN_3(ALU, SUB, X), \ 923 INSN_3(ALU, AND, X), \ 924 INSN_3(ALU, OR, X), \ 925 INSN_3(ALU, LSH, X), \ 926 INSN_3(ALU, RSH, X), \ 927 INSN_3(ALU, XOR, X), \ 928 INSN_3(ALU, MUL, X), \ 929 INSN_3(ALU, MOV, X), \ 930 INSN_3(ALU, DIV, X), \ 931 INSN_3(ALU, MOD, X), \ 932 INSN_2(ALU, NEG), \ 933 INSN_3(ALU, END, TO_BE), \ 934 INSN_3(ALU, END, TO_LE), \ 935 /* Immediate based. */ \ 936 INSN_3(ALU, ADD, K), \ 937 INSN_3(ALU, SUB, K), \ 938 INSN_3(ALU, AND, K), \ 939 INSN_3(ALU, OR, K), \ 940 INSN_3(ALU, LSH, K), \ 941 INSN_3(ALU, RSH, K), \ 942 INSN_3(ALU, XOR, K), \ 943 INSN_3(ALU, MUL, K), \ 944 INSN_3(ALU, MOV, K), \ 945 INSN_3(ALU, DIV, K), \ 946 INSN_3(ALU, MOD, K), \ 947 /* 64 bit ALU operations. */ \ 948 /* Register based. */ \ 949 INSN_3(ALU64, ADD, X), \ 950 INSN_3(ALU64, SUB, X), \ 951 INSN_3(ALU64, AND, X), \ 952 INSN_3(ALU64, OR, X), \ 953 INSN_3(ALU64, LSH, X), \ 954 INSN_3(ALU64, RSH, X), \ 955 INSN_3(ALU64, XOR, X), \ 956 INSN_3(ALU64, MUL, X), \ 957 INSN_3(ALU64, MOV, X), \ 958 INSN_3(ALU64, ARSH, X), \ 959 INSN_3(ALU64, DIV, X), \ 960 INSN_3(ALU64, MOD, X), \ 961 INSN_2(ALU64, NEG), \ 962 /* Immediate based. */ \ 963 INSN_3(ALU64, ADD, K), \ 964 INSN_3(ALU64, SUB, K), \ 965 INSN_3(ALU64, AND, K), \ 966 INSN_3(ALU64, OR, K), \ 967 INSN_3(ALU64, LSH, K), \ 968 INSN_3(ALU64, RSH, K), \ 969 INSN_3(ALU64, XOR, K), \ 970 INSN_3(ALU64, MUL, K), \ 971 INSN_3(ALU64, MOV, K), \ 972 INSN_3(ALU64, ARSH, K), \ 973 INSN_3(ALU64, DIV, K), \ 974 INSN_3(ALU64, MOD, K), \ 975 /* Call instruction. */ \ 976 INSN_2(JMP, CALL), \ 977 /* Exit instruction. */ \ 978 INSN_2(JMP, EXIT), \ 979 /* Jump instructions. */ \ 980 /* Register based. */ \ 981 INSN_3(JMP, JEQ, X), \ 982 INSN_3(JMP, JNE, X), \ 983 INSN_3(JMP, JGT, X), \ 984 INSN_3(JMP, JLT, X), \ 985 INSN_3(JMP, JGE, X), \ 986 INSN_3(JMP, JLE, X), \ 987 INSN_3(JMP, JSGT, X), \ 988 INSN_3(JMP, JSLT, X), \ 989 INSN_3(JMP, JSGE, X), \ 990 INSN_3(JMP, JSLE, X), \ 991 INSN_3(JMP, JSET, X), \ 992 /* Immediate based. */ \ 993 INSN_3(JMP, JEQ, K), \ 994 INSN_3(JMP, JNE, K), \ 995 INSN_3(JMP, JGT, K), \ 996 INSN_3(JMP, JLT, K), \ 997 INSN_3(JMP, JGE, K), \ 998 INSN_3(JMP, JLE, K), \ 999 INSN_3(JMP, JSGT, K), \ 1000 INSN_3(JMP, JSLT, K), \ 1001 INSN_3(JMP, JSGE, K), \ 1002 INSN_3(JMP, JSLE, K), \ 1003 INSN_3(JMP, JSET, K), \ 1004 INSN_2(JMP, JA), \ 1005 /* Store instructions. */ \ 1006 /* Register based. */ \ 1007 INSN_3(STX, MEM, B), \ 1008 INSN_3(STX, MEM, H), \ 1009 INSN_3(STX, MEM, W), \ 1010 INSN_3(STX, MEM, DW), \ 1011 INSN_3(STX, XADD, W), \ 1012 INSN_3(STX, XADD, DW), \ 1013 /* Immediate based. */ \ 1014 INSN_3(ST, MEM, B), \ 1015 INSN_3(ST, MEM, H), \ 1016 INSN_3(ST, MEM, W), \ 1017 INSN_3(ST, MEM, DW), \ 1018 /* Load instructions. */ \ 1019 /* Register based. */ \ 1020 INSN_3(LDX, MEM, B), \ 1021 INSN_3(LDX, MEM, H), \ 1022 INSN_3(LDX, MEM, W), \ 1023 INSN_3(LDX, MEM, DW), \ 1024 /* Immediate based. */ \ 1025 INSN_3(LD, IMM, DW) 1026 1027 bool bpf_opcode_in_insntable(u8 code) 1028 { 1029 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1030 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1031 static const bool public_insntable[256] = { 1032 [0 ... 255] = false, 1033 /* Now overwrite non-defaults ... */ 1034 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1035 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1036 [BPF_LD | BPF_ABS | BPF_B] = true, 1037 [BPF_LD | BPF_ABS | BPF_H] = true, 1038 [BPF_LD | BPF_ABS | BPF_W] = true, 1039 [BPF_LD | BPF_IND | BPF_B] = true, 1040 [BPF_LD | BPF_IND | BPF_H] = true, 1041 [BPF_LD | BPF_IND | BPF_W] = true, 1042 }; 1043 #undef BPF_INSN_3_TBL 1044 #undef BPF_INSN_2_TBL 1045 return public_insntable[code]; 1046 } 1047 1048 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1049 /** 1050 * __bpf_prog_run - run eBPF program on a given context 1051 * @ctx: is the data we are operating on 1052 * @insn: is the array of eBPF instructions 1053 * 1054 * Decode and execute eBPF instructions. 1055 */ 1056 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) 1057 { 1058 u64 tmp; 1059 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1060 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1061 static const void *jumptable[256] = { 1062 [0 ... 255] = &&default_label, 1063 /* Now overwrite non-defaults ... */ 1064 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1065 /* Non-UAPI available opcodes. */ 1066 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1067 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1068 }; 1069 #undef BPF_INSN_3_LBL 1070 #undef BPF_INSN_2_LBL 1071 u32 tail_call_cnt = 0; 1072 1073 #define CONT ({ insn++; goto select_insn; }) 1074 #define CONT_JMP ({ insn++; goto select_insn; }) 1075 1076 select_insn: 1077 goto *jumptable[insn->code]; 1078 1079 /* ALU */ 1080 #define ALU(OPCODE, OP) \ 1081 ALU64_##OPCODE##_X: \ 1082 DST = DST OP SRC; \ 1083 CONT; \ 1084 ALU_##OPCODE##_X: \ 1085 DST = (u32) DST OP (u32) SRC; \ 1086 CONT; \ 1087 ALU64_##OPCODE##_K: \ 1088 DST = DST OP IMM; \ 1089 CONT; \ 1090 ALU_##OPCODE##_K: \ 1091 DST = (u32) DST OP (u32) IMM; \ 1092 CONT; 1093 1094 ALU(ADD, +) 1095 ALU(SUB, -) 1096 ALU(AND, &) 1097 ALU(OR, |) 1098 ALU(LSH, <<) 1099 ALU(RSH, >>) 1100 ALU(XOR, ^) 1101 ALU(MUL, *) 1102 #undef ALU 1103 ALU_NEG: 1104 DST = (u32) -DST; 1105 CONT; 1106 ALU64_NEG: 1107 DST = -DST; 1108 CONT; 1109 ALU_MOV_X: 1110 DST = (u32) SRC; 1111 CONT; 1112 ALU_MOV_K: 1113 DST = (u32) IMM; 1114 CONT; 1115 ALU64_MOV_X: 1116 DST = SRC; 1117 CONT; 1118 ALU64_MOV_K: 1119 DST = IMM; 1120 CONT; 1121 LD_IMM_DW: 1122 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1123 insn++; 1124 CONT; 1125 ALU64_ARSH_X: 1126 (*(s64 *) &DST) >>= SRC; 1127 CONT; 1128 ALU64_ARSH_K: 1129 (*(s64 *) &DST) >>= IMM; 1130 CONT; 1131 ALU64_MOD_X: 1132 div64_u64_rem(DST, SRC, &tmp); 1133 DST = tmp; 1134 CONT; 1135 ALU_MOD_X: 1136 tmp = (u32) DST; 1137 DST = do_div(tmp, (u32) SRC); 1138 CONT; 1139 ALU64_MOD_K: 1140 div64_u64_rem(DST, IMM, &tmp); 1141 DST = tmp; 1142 CONT; 1143 ALU_MOD_K: 1144 tmp = (u32) DST; 1145 DST = do_div(tmp, (u32) IMM); 1146 CONT; 1147 ALU64_DIV_X: 1148 DST = div64_u64(DST, SRC); 1149 CONT; 1150 ALU_DIV_X: 1151 tmp = (u32) DST; 1152 do_div(tmp, (u32) SRC); 1153 DST = (u32) tmp; 1154 CONT; 1155 ALU64_DIV_K: 1156 DST = div64_u64(DST, IMM); 1157 CONT; 1158 ALU_DIV_K: 1159 tmp = (u32) DST; 1160 do_div(tmp, (u32) IMM); 1161 DST = (u32) tmp; 1162 CONT; 1163 ALU_END_TO_BE: 1164 switch (IMM) { 1165 case 16: 1166 DST = (__force u16) cpu_to_be16(DST); 1167 break; 1168 case 32: 1169 DST = (__force u32) cpu_to_be32(DST); 1170 break; 1171 case 64: 1172 DST = (__force u64) cpu_to_be64(DST); 1173 break; 1174 } 1175 CONT; 1176 ALU_END_TO_LE: 1177 switch (IMM) { 1178 case 16: 1179 DST = (__force u16) cpu_to_le16(DST); 1180 break; 1181 case 32: 1182 DST = (__force u32) cpu_to_le32(DST); 1183 break; 1184 case 64: 1185 DST = (__force u64) cpu_to_le64(DST); 1186 break; 1187 } 1188 CONT; 1189 1190 /* CALL */ 1191 JMP_CALL: 1192 /* Function call scratches BPF_R1-BPF_R5 registers, 1193 * preserves BPF_R6-BPF_R9, and stores return value 1194 * into BPF_R0. 1195 */ 1196 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1197 BPF_R4, BPF_R5); 1198 CONT; 1199 1200 JMP_CALL_ARGS: 1201 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1202 BPF_R3, BPF_R4, 1203 BPF_R5, 1204 insn + insn->off + 1); 1205 CONT; 1206 1207 JMP_TAIL_CALL: { 1208 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1209 struct bpf_array *array = container_of(map, struct bpf_array, map); 1210 struct bpf_prog *prog; 1211 u32 index = BPF_R3; 1212 1213 if (unlikely(index >= array->map.max_entries)) 1214 goto out; 1215 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1216 goto out; 1217 1218 tail_call_cnt++; 1219 1220 prog = READ_ONCE(array->ptrs[index]); 1221 if (!prog) 1222 goto out; 1223 1224 /* ARG1 at this point is guaranteed to point to CTX from 1225 * the verifier side due to the fact that the tail call is 1226 * handeled like a helper, that is, bpf_tail_call_proto, 1227 * where arg1_type is ARG_PTR_TO_CTX. 1228 */ 1229 insn = prog->insnsi; 1230 goto select_insn; 1231 out: 1232 CONT; 1233 } 1234 /* JMP */ 1235 JMP_JA: 1236 insn += insn->off; 1237 CONT; 1238 JMP_JEQ_X: 1239 if (DST == SRC) { 1240 insn += insn->off; 1241 CONT_JMP; 1242 } 1243 CONT; 1244 JMP_JEQ_K: 1245 if (DST == IMM) { 1246 insn += insn->off; 1247 CONT_JMP; 1248 } 1249 CONT; 1250 JMP_JNE_X: 1251 if (DST != SRC) { 1252 insn += insn->off; 1253 CONT_JMP; 1254 } 1255 CONT; 1256 JMP_JNE_K: 1257 if (DST != IMM) { 1258 insn += insn->off; 1259 CONT_JMP; 1260 } 1261 CONT; 1262 JMP_JGT_X: 1263 if (DST > SRC) { 1264 insn += insn->off; 1265 CONT_JMP; 1266 } 1267 CONT; 1268 JMP_JGT_K: 1269 if (DST > IMM) { 1270 insn += insn->off; 1271 CONT_JMP; 1272 } 1273 CONT; 1274 JMP_JLT_X: 1275 if (DST < SRC) { 1276 insn += insn->off; 1277 CONT_JMP; 1278 } 1279 CONT; 1280 JMP_JLT_K: 1281 if (DST < IMM) { 1282 insn += insn->off; 1283 CONT_JMP; 1284 } 1285 CONT; 1286 JMP_JGE_X: 1287 if (DST >= SRC) { 1288 insn += insn->off; 1289 CONT_JMP; 1290 } 1291 CONT; 1292 JMP_JGE_K: 1293 if (DST >= IMM) { 1294 insn += insn->off; 1295 CONT_JMP; 1296 } 1297 CONT; 1298 JMP_JLE_X: 1299 if (DST <= SRC) { 1300 insn += insn->off; 1301 CONT_JMP; 1302 } 1303 CONT; 1304 JMP_JLE_K: 1305 if (DST <= IMM) { 1306 insn += insn->off; 1307 CONT_JMP; 1308 } 1309 CONT; 1310 JMP_JSGT_X: 1311 if (((s64) DST) > ((s64) SRC)) { 1312 insn += insn->off; 1313 CONT_JMP; 1314 } 1315 CONT; 1316 JMP_JSGT_K: 1317 if (((s64) DST) > ((s64) IMM)) { 1318 insn += insn->off; 1319 CONT_JMP; 1320 } 1321 CONT; 1322 JMP_JSLT_X: 1323 if (((s64) DST) < ((s64) SRC)) { 1324 insn += insn->off; 1325 CONT_JMP; 1326 } 1327 CONT; 1328 JMP_JSLT_K: 1329 if (((s64) DST) < ((s64) IMM)) { 1330 insn += insn->off; 1331 CONT_JMP; 1332 } 1333 CONT; 1334 JMP_JSGE_X: 1335 if (((s64) DST) >= ((s64) SRC)) { 1336 insn += insn->off; 1337 CONT_JMP; 1338 } 1339 CONT; 1340 JMP_JSGE_K: 1341 if (((s64) DST) >= ((s64) IMM)) { 1342 insn += insn->off; 1343 CONT_JMP; 1344 } 1345 CONT; 1346 JMP_JSLE_X: 1347 if (((s64) DST) <= ((s64) SRC)) { 1348 insn += insn->off; 1349 CONT_JMP; 1350 } 1351 CONT; 1352 JMP_JSLE_K: 1353 if (((s64) DST) <= ((s64) IMM)) { 1354 insn += insn->off; 1355 CONT_JMP; 1356 } 1357 CONT; 1358 JMP_JSET_X: 1359 if (DST & SRC) { 1360 insn += insn->off; 1361 CONT_JMP; 1362 } 1363 CONT; 1364 JMP_JSET_K: 1365 if (DST & IMM) { 1366 insn += insn->off; 1367 CONT_JMP; 1368 } 1369 CONT; 1370 JMP_EXIT: 1371 return BPF_R0; 1372 1373 /* STX and ST and LDX*/ 1374 #define LDST(SIZEOP, SIZE) \ 1375 STX_MEM_##SIZEOP: \ 1376 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1377 CONT; \ 1378 ST_MEM_##SIZEOP: \ 1379 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1380 CONT; \ 1381 LDX_MEM_##SIZEOP: \ 1382 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1383 CONT; 1384 1385 LDST(B, u8) 1386 LDST(H, u16) 1387 LDST(W, u32) 1388 LDST(DW, u64) 1389 #undef LDST 1390 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1391 atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1392 (DST + insn->off)); 1393 CONT; 1394 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1395 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1396 (DST + insn->off)); 1397 CONT; 1398 1399 default_label: 1400 /* If we ever reach this, we have a bug somewhere. Die hard here 1401 * instead of just returning 0; we could be somewhere in a subprog, 1402 * so execution could continue otherwise which we do /not/ want. 1403 * 1404 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1405 */ 1406 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); 1407 BUG_ON(1); 1408 return 0; 1409 } 1410 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ 1411 1412 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 1413 #define DEFINE_BPF_PROG_RUN(stack_size) \ 1414 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 1415 { \ 1416 u64 stack[stack_size / sizeof(u64)]; \ 1417 u64 regs[MAX_BPF_REG]; \ 1418 \ 1419 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1420 ARG1 = (u64) (unsigned long) ctx; \ 1421 return ___bpf_prog_run(regs, insn, stack); \ 1422 } 1423 1424 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 1425 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 1426 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 1427 const struct bpf_insn *insn) \ 1428 { \ 1429 u64 stack[stack_size / sizeof(u64)]; \ 1430 u64 regs[MAX_BPF_REG]; \ 1431 \ 1432 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1433 BPF_R1 = r1; \ 1434 BPF_R2 = r2; \ 1435 BPF_R3 = r3; \ 1436 BPF_R4 = r4; \ 1437 BPF_R5 = r5; \ 1438 return ___bpf_prog_run(regs, insn, stack); \ 1439 } 1440 1441 #define EVAL1(FN, X) FN(X) 1442 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 1443 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 1444 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 1445 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 1446 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 1447 1448 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 1449 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 1450 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 1451 1452 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 1453 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 1454 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 1455 1456 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 1457 1458 static unsigned int (*interpreters[])(const void *ctx, 1459 const struct bpf_insn *insn) = { 1460 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1461 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1462 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1463 }; 1464 #undef PROG_NAME_LIST 1465 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 1466 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 1467 const struct bpf_insn *insn) = { 1468 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1469 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1470 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1471 }; 1472 #undef PROG_NAME_LIST 1473 1474 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 1475 { 1476 stack_depth = max_t(u32, stack_depth, 1); 1477 insn->off = (s16) insn->imm; 1478 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 1479 __bpf_call_base_args; 1480 insn->code = BPF_JMP | BPF_CALL_ARGS; 1481 } 1482 1483 #else 1484 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 1485 const struct bpf_insn *insn) 1486 { 1487 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 1488 * is not working properly, so warn about it! 1489 */ 1490 WARN_ON_ONCE(1); 1491 return 0; 1492 } 1493 #endif 1494 1495 bool bpf_prog_array_compatible(struct bpf_array *array, 1496 const struct bpf_prog *fp) 1497 { 1498 if (fp->kprobe_override) 1499 return false; 1500 1501 if (!array->owner_prog_type) { 1502 /* There's no owner yet where we could check for 1503 * compatibility. 1504 */ 1505 array->owner_prog_type = fp->type; 1506 array->owner_jited = fp->jited; 1507 1508 return true; 1509 } 1510 1511 return array->owner_prog_type == fp->type && 1512 array->owner_jited == fp->jited; 1513 } 1514 1515 static int bpf_check_tail_call(const struct bpf_prog *fp) 1516 { 1517 struct bpf_prog_aux *aux = fp->aux; 1518 int i; 1519 1520 for (i = 0; i < aux->used_map_cnt; i++) { 1521 struct bpf_map *map = aux->used_maps[i]; 1522 struct bpf_array *array; 1523 1524 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1525 continue; 1526 1527 array = container_of(map, struct bpf_array, map); 1528 if (!bpf_prog_array_compatible(array, fp)) 1529 return -EINVAL; 1530 } 1531 1532 return 0; 1533 } 1534 1535 static void bpf_prog_select_func(struct bpf_prog *fp) 1536 { 1537 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1538 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1539 1540 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1541 #else 1542 fp->bpf_func = __bpf_prog_ret0_warn; 1543 #endif 1544 } 1545 1546 /** 1547 * bpf_prog_select_runtime - select exec runtime for BPF program 1548 * @fp: bpf_prog populated with internal BPF program 1549 * @err: pointer to error variable 1550 * 1551 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1552 * The BPF program will be executed via BPF_PROG_RUN() macro. 1553 */ 1554 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1555 { 1556 /* In case of BPF to BPF calls, verifier did all the prep 1557 * work with regards to JITing, etc. 1558 */ 1559 if (fp->bpf_func) 1560 goto finalize; 1561 1562 bpf_prog_select_func(fp); 1563 1564 /* eBPF JITs can rewrite the program in case constant 1565 * blinding is active. However, in case of error during 1566 * blinding, bpf_int_jit_compile() must always return a 1567 * valid program, which in this case would simply not 1568 * be JITed, but falls back to the interpreter. 1569 */ 1570 if (!bpf_prog_is_dev_bound(fp->aux)) { 1571 fp = bpf_int_jit_compile(fp); 1572 #ifdef CONFIG_BPF_JIT_ALWAYS_ON 1573 if (!fp->jited) { 1574 *err = -ENOTSUPP; 1575 return fp; 1576 } 1577 #endif 1578 } else { 1579 *err = bpf_prog_offload_compile(fp); 1580 if (*err) 1581 return fp; 1582 } 1583 1584 finalize: 1585 bpf_prog_lock_ro(fp); 1586 1587 /* The tail call compatibility check can only be done at 1588 * this late stage as we need to determine, if we deal 1589 * with JITed or non JITed program concatenations and not 1590 * all eBPF JITs might immediately support all features. 1591 */ 1592 *err = bpf_check_tail_call(fp); 1593 1594 return fp; 1595 } 1596 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1597 1598 static unsigned int __bpf_prog_ret1(const void *ctx, 1599 const struct bpf_insn *insn) 1600 { 1601 return 1; 1602 } 1603 1604 static struct bpf_prog_dummy { 1605 struct bpf_prog prog; 1606 } dummy_bpf_prog = { 1607 .prog = { 1608 .bpf_func = __bpf_prog_ret1, 1609 }, 1610 }; 1611 1612 /* to avoid allocating empty bpf_prog_array for cgroups that 1613 * don't have bpf program attached use one global 'empty_prog_array' 1614 * It will not be modified the caller of bpf_prog_array_alloc() 1615 * (since caller requested prog_cnt == 0) 1616 * that pointer should be 'freed' by bpf_prog_array_free() 1617 */ 1618 static struct { 1619 struct bpf_prog_array hdr; 1620 struct bpf_prog *null_prog; 1621 } empty_prog_array = { 1622 .null_prog = NULL, 1623 }; 1624 1625 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1626 { 1627 if (prog_cnt) 1628 return kzalloc(sizeof(struct bpf_prog_array) + 1629 sizeof(struct bpf_prog_array_item) * 1630 (prog_cnt + 1), 1631 flags); 1632 1633 return &empty_prog_array.hdr; 1634 } 1635 1636 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) 1637 { 1638 if (!progs || 1639 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr) 1640 return; 1641 kfree_rcu(progs, rcu); 1642 } 1643 1644 int bpf_prog_array_length(struct bpf_prog_array __rcu *array) 1645 { 1646 struct bpf_prog_array_item *item; 1647 u32 cnt = 0; 1648 1649 rcu_read_lock(); 1650 item = rcu_dereference(array)->items; 1651 for (; item->prog; item++) 1652 if (item->prog != &dummy_bpf_prog.prog) 1653 cnt++; 1654 rcu_read_unlock(); 1655 return cnt; 1656 } 1657 1658 1659 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, 1660 u32 *prog_ids, 1661 u32 request_cnt) 1662 { 1663 struct bpf_prog_array_item *item; 1664 int i = 0; 1665 1666 item = rcu_dereference_check(array, 1)->items; 1667 for (; item->prog; item++) { 1668 if (item->prog == &dummy_bpf_prog.prog) 1669 continue; 1670 prog_ids[i] = item->prog->aux->id; 1671 if (++i == request_cnt) { 1672 item++; 1673 break; 1674 } 1675 } 1676 1677 return !!(item->prog); 1678 } 1679 1680 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, 1681 __u32 __user *prog_ids, u32 cnt) 1682 { 1683 unsigned long err = 0; 1684 bool nospc; 1685 u32 *ids; 1686 1687 /* users of this function are doing: 1688 * cnt = bpf_prog_array_length(); 1689 * if (cnt > 0) 1690 * bpf_prog_array_copy_to_user(..., cnt); 1691 * so below kcalloc doesn't need extra cnt > 0 check, but 1692 * bpf_prog_array_length() releases rcu lock and 1693 * prog array could have been swapped with empty or larger array, 1694 * so always copy 'cnt' prog_ids to the user. 1695 * In a rare race the user will see zero prog_ids 1696 */ 1697 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 1698 if (!ids) 1699 return -ENOMEM; 1700 rcu_read_lock(); 1701 nospc = bpf_prog_array_copy_core(array, ids, cnt); 1702 rcu_read_unlock(); 1703 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1704 kfree(ids); 1705 if (err) 1706 return -EFAULT; 1707 if (nospc) 1708 return -ENOSPC; 1709 return 0; 1710 } 1711 1712 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array, 1713 struct bpf_prog *old_prog) 1714 { 1715 struct bpf_prog_array_item *item = array->items; 1716 1717 for (; item->prog; item++) 1718 if (item->prog == old_prog) { 1719 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 1720 break; 1721 } 1722 } 1723 1724 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 1725 struct bpf_prog *exclude_prog, 1726 struct bpf_prog *include_prog, 1727 struct bpf_prog_array **new_array) 1728 { 1729 int new_prog_cnt, carry_prog_cnt = 0; 1730 struct bpf_prog_array_item *existing; 1731 struct bpf_prog_array *array; 1732 bool found_exclude = false; 1733 int new_prog_idx = 0; 1734 1735 /* Figure out how many existing progs we need to carry over to 1736 * the new array. 1737 */ 1738 if (old_array) { 1739 existing = old_array->items; 1740 for (; existing->prog; existing++) { 1741 if (existing->prog == exclude_prog) { 1742 found_exclude = true; 1743 continue; 1744 } 1745 if (existing->prog != &dummy_bpf_prog.prog) 1746 carry_prog_cnt++; 1747 if (existing->prog == include_prog) 1748 return -EEXIST; 1749 } 1750 } 1751 1752 if (exclude_prog && !found_exclude) 1753 return -ENOENT; 1754 1755 /* How many progs (not NULL) will be in the new array? */ 1756 new_prog_cnt = carry_prog_cnt; 1757 if (include_prog) 1758 new_prog_cnt += 1; 1759 1760 /* Do we have any prog (not NULL) in the new array? */ 1761 if (!new_prog_cnt) { 1762 *new_array = NULL; 1763 return 0; 1764 } 1765 1766 /* +1 as the end of prog_array is marked with NULL */ 1767 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 1768 if (!array) 1769 return -ENOMEM; 1770 1771 /* Fill in the new prog array */ 1772 if (carry_prog_cnt) { 1773 existing = old_array->items; 1774 for (; existing->prog; existing++) 1775 if (existing->prog != exclude_prog && 1776 existing->prog != &dummy_bpf_prog.prog) { 1777 array->items[new_prog_idx++].prog = 1778 existing->prog; 1779 } 1780 } 1781 if (include_prog) 1782 array->items[new_prog_idx++].prog = include_prog; 1783 array->items[new_prog_idx].prog = NULL; 1784 *new_array = array; 1785 return 0; 1786 } 1787 1788 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 1789 u32 *prog_ids, u32 request_cnt, 1790 u32 *prog_cnt) 1791 { 1792 u32 cnt = 0; 1793 1794 if (array) 1795 cnt = bpf_prog_array_length(array); 1796 1797 *prog_cnt = cnt; 1798 1799 /* return early if user requested only program count or nothing to copy */ 1800 if (!request_cnt || !cnt) 1801 return 0; 1802 1803 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 1804 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 1805 : 0; 1806 } 1807 1808 static void bpf_prog_free_deferred(struct work_struct *work) 1809 { 1810 struct bpf_prog_aux *aux; 1811 int i; 1812 1813 aux = container_of(work, struct bpf_prog_aux, work); 1814 if (bpf_prog_is_dev_bound(aux)) 1815 bpf_prog_offload_destroy(aux->prog); 1816 #ifdef CONFIG_PERF_EVENTS 1817 if (aux->prog->has_callchain_buf) 1818 put_callchain_buffers(); 1819 #endif 1820 for (i = 0; i < aux->func_cnt; i++) 1821 bpf_jit_free(aux->func[i]); 1822 if (aux->func_cnt) { 1823 kfree(aux->func); 1824 bpf_prog_unlock_free(aux->prog); 1825 } else { 1826 bpf_jit_free(aux->prog); 1827 } 1828 } 1829 1830 /* Free internal BPF program */ 1831 void bpf_prog_free(struct bpf_prog *fp) 1832 { 1833 struct bpf_prog_aux *aux = fp->aux; 1834 1835 INIT_WORK(&aux->work, bpf_prog_free_deferred); 1836 schedule_work(&aux->work); 1837 } 1838 EXPORT_SYMBOL_GPL(bpf_prog_free); 1839 1840 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 1841 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 1842 1843 void bpf_user_rnd_init_once(void) 1844 { 1845 prandom_init_once(&bpf_user_rnd_state); 1846 } 1847 1848 BPF_CALL_0(bpf_user_rnd_u32) 1849 { 1850 /* Should someone ever have the rather unwise idea to use some 1851 * of the registers passed into this function, then note that 1852 * this function is called from native eBPF and classic-to-eBPF 1853 * transformations. Register assignments from both sides are 1854 * different, f.e. classic always sets fn(ctx, A, X) here. 1855 */ 1856 struct rnd_state *state; 1857 u32 res; 1858 1859 state = &get_cpu_var(bpf_user_rnd_state); 1860 res = prandom_u32_state(state); 1861 put_cpu_var(bpf_user_rnd_state); 1862 1863 return res; 1864 } 1865 1866 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 1867 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 1868 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 1869 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 1870 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 1871 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 1872 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 1873 1874 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 1875 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 1876 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 1877 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 1878 1879 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 1880 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 1881 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 1882 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 1883 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 1884 1885 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 1886 { 1887 return NULL; 1888 } 1889 1890 u64 __weak 1891 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1892 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 1893 { 1894 return -ENOTSUPP; 1895 } 1896 EXPORT_SYMBOL_GPL(bpf_event_output); 1897 1898 /* Always built-in helper functions. */ 1899 const struct bpf_func_proto bpf_tail_call_proto = { 1900 .func = NULL, 1901 .gpl_only = false, 1902 .ret_type = RET_VOID, 1903 .arg1_type = ARG_PTR_TO_CTX, 1904 .arg2_type = ARG_CONST_MAP_PTR, 1905 .arg3_type = ARG_ANYTHING, 1906 }; 1907 1908 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 1909 * It is encouraged to implement bpf_int_jit_compile() instead, so that 1910 * eBPF and implicitly also cBPF can get JITed! 1911 */ 1912 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 1913 { 1914 return prog; 1915 } 1916 1917 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 1918 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 1919 */ 1920 void __weak bpf_jit_compile(struct bpf_prog *prog) 1921 { 1922 } 1923 1924 bool __weak bpf_helper_changes_pkt_data(void *func) 1925 { 1926 return false; 1927 } 1928 1929 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 1930 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 1931 */ 1932 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 1933 int len) 1934 { 1935 return -EFAULT; 1936 } 1937 1938 /* All definitions of tracepoints related to BPF. */ 1939 #define CREATE_TRACE_POINTS 1940 #include <linux/bpf_trace.h> 1941 1942 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 1943