1 /* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * 9 * Authors: 10 * 11 * Jay Schulist <jschlst@samba.org> 12 * Alexei Starovoitov <ast@plumgrid.com> 13 * Daniel Borkmann <dborkman@redhat.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * Andi Kleen - Fix a few bad bugs and races. 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 22 */ 23 24 #include <uapi/linux/btf.h> 25 #include <linux/filter.h> 26 #include <linux/skbuff.h> 27 #include <linux/vmalloc.h> 28 #include <linux/random.h> 29 #include <linux/moduleloader.h> 30 #include <linux/bpf.h> 31 #include <linux/btf.h> 32 #include <linux/frame.h> 33 #include <linux/rbtree_latch.h> 34 #include <linux/kallsyms.h> 35 #include <linux/rcupdate.h> 36 #include <linux/perf_event.h> 37 38 #include <asm/unaligned.h> 39 40 /* Registers */ 41 #define BPF_R0 regs[BPF_REG_0] 42 #define BPF_R1 regs[BPF_REG_1] 43 #define BPF_R2 regs[BPF_REG_2] 44 #define BPF_R3 regs[BPF_REG_3] 45 #define BPF_R4 regs[BPF_REG_4] 46 #define BPF_R5 regs[BPF_REG_5] 47 #define BPF_R6 regs[BPF_REG_6] 48 #define BPF_R7 regs[BPF_REG_7] 49 #define BPF_R8 regs[BPF_REG_8] 50 #define BPF_R9 regs[BPF_REG_9] 51 #define BPF_R10 regs[BPF_REG_10] 52 53 /* Named registers */ 54 #define DST regs[insn->dst_reg] 55 #define SRC regs[insn->src_reg] 56 #define FP regs[BPF_REG_FP] 57 #define AX regs[BPF_REG_AX] 58 #define ARG1 regs[BPF_REG_ARG1] 59 #define CTX regs[BPF_REG_CTX] 60 #define IMM insn->imm 61 62 /* No hurry in this branch 63 * 64 * Exported for the bpf jit load helper. 65 */ 66 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 67 { 68 u8 *ptr = NULL; 69 70 if (k >= SKF_NET_OFF) 71 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 72 else if (k >= SKF_LL_OFF) 73 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 74 75 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 76 return ptr; 77 78 return NULL; 79 } 80 81 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 82 { 83 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 84 struct bpf_prog_aux *aux; 85 struct bpf_prog *fp; 86 87 size = round_up(size, PAGE_SIZE); 88 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 89 if (fp == NULL) 90 return NULL; 91 92 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); 93 if (aux == NULL) { 94 vfree(fp); 95 return NULL; 96 } 97 98 fp->pages = size / PAGE_SIZE; 99 fp->aux = aux; 100 fp->aux->prog = fp; 101 fp->jit_requested = ebpf_jit_enabled(); 102 103 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); 104 105 return fp; 106 } 107 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 108 109 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 110 { 111 if (!prog->aux->nr_linfo || !prog->jit_requested) 112 return 0; 113 114 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo, 115 sizeof(*prog->aux->jited_linfo), 116 GFP_KERNEL | __GFP_NOWARN); 117 if (!prog->aux->jited_linfo) 118 return -ENOMEM; 119 120 return 0; 121 } 122 123 void bpf_prog_free_jited_linfo(struct bpf_prog *prog) 124 { 125 kfree(prog->aux->jited_linfo); 126 prog->aux->jited_linfo = NULL; 127 } 128 129 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog) 130 { 131 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0]) 132 bpf_prog_free_jited_linfo(prog); 133 } 134 135 /* The jit engine is responsible to provide an array 136 * for insn_off to the jited_off mapping (insn_to_jit_off). 137 * 138 * The idx to this array is the insn_off. Hence, the insn_off 139 * here is relative to the prog itself instead of the main prog. 140 * This array has one entry for each xlated bpf insn. 141 * 142 * jited_off is the byte off to the last byte of the jited insn. 143 * 144 * Hence, with 145 * insn_start: 146 * The first bpf insn off of the prog. The insn off 147 * here is relative to the main prog. 148 * e.g. if prog is a subprog, insn_start > 0 149 * linfo_idx: 150 * The prog's idx to prog->aux->linfo and jited_linfo 151 * 152 * jited_linfo[linfo_idx] = prog->bpf_func 153 * 154 * For i > linfo_idx, 155 * 156 * jited_linfo[i] = prog->bpf_func + 157 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 158 */ 159 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 160 const u32 *insn_to_jit_off) 161 { 162 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 163 const struct bpf_line_info *linfo; 164 void **jited_linfo; 165 166 if (!prog->aux->jited_linfo) 167 /* Userspace did not provide linfo */ 168 return; 169 170 linfo_idx = prog->aux->linfo_idx; 171 linfo = &prog->aux->linfo[linfo_idx]; 172 insn_start = linfo[0].insn_off; 173 insn_end = insn_start + prog->len; 174 175 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 176 jited_linfo[0] = prog->bpf_func; 177 178 nr_linfo = prog->aux->nr_linfo - linfo_idx; 179 180 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 181 /* The verifier ensures that linfo[i].insn_off is 182 * strictly increasing 183 */ 184 jited_linfo[i] = prog->bpf_func + 185 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 186 } 187 188 void bpf_prog_free_linfo(struct bpf_prog *prog) 189 { 190 bpf_prog_free_jited_linfo(prog); 191 kvfree(prog->aux->linfo); 192 } 193 194 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 195 gfp_t gfp_extra_flags) 196 { 197 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 198 struct bpf_prog *fp; 199 u32 pages, delta; 200 int ret; 201 202 BUG_ON(fp_old == NULL); 203 204 size = round_up(size, PAGE_SIZE); 205 pages = size / PAGE_SIZE; 206 if (pages <= fp_old->pages) 207 return fp_old; 208 209 delta = pages - fp_old->pages; 210 ret = __bpf_prog_charge(fp_old->aux->user, delta); 211 if (ret) 212 return NULL; 213 214 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 215 if (fp == NULL) { 216 __bpf_prog_uncharge(fp_old->aux->user, delta); 217 } else { 218 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 219 fp->pages = pages; 220 fp->aux->prog = fp; 221 222 /* We keep fp->aux from fp_old around in the new 223 * reallocated structure. 224 */ 225 fp_old->aux = NULL; 226 __bpf_prog_free(fp_old); 227 } 228 229 return fp; 230 } 231 232 void __bpf_prog_free(struct bpf_prog *fp) 233 { 234 kfree(fp->aux); 235 vfree(fp); 236 } 237 238 int bpf_prog_calc_tag(struct bpf_prog *fp) 239 { 240 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 241 u32 raw_size = bpf_prog_tag_scratch_size(fp); 242 u32 digest[SHA_DIGEST_WORDS]; 243 u32 ws[SHA_WORKSPACE_WORDS]; 244 u32 i, bsize, psize, blocks; 245 struct bpf_insn *dst; 246 bool was_ld_map; 247 u8 *raw, *todo; 248 __be32 *result; 249 __be64 *bits; 250 251 raw = vmalloc(raw_size); 252 if (!raw) 253 return -ENOMEM; 254 255 sha_init(digest); 256 memset(ws, 0, sizeof(ws)); 257 258 /* We need to take out the map fd for the digest calculation 259 * since they are unstable from user space side. 260 */ 261 dst = (void *)raw; 262 for (i = 0, was_ld_map = false; i < fp->len; i++) { 263 dst[i] = fp->insnsi[i]; 264 if (!was_ld_map && 265 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 266 dst[i].src_reg == BPF_PSEUDO_MAP_FD) { 267 was_ld_map = true; 268 dst[i].imm = 0; 269 } else if (was_ld_map && 270 dst[i].code == 0 && 271 dst[i].dst_reg == 0 && 272 dst[i].src_reg == 0 && 273 dst[i].off == 0) { 274 was_ld_map = false; 275 dst[i].imm = 0; 276 } else { 277 was_ld_map = false; 278 } 279 } 280 281 psize = bpf_prog_insn_size(fp); 282 memset(&raw[psize], 0, raw_size - psize); 283 raw[psize++] = 0x80; 284 285 bsize = round_up(psize, SHA_MESSAGE_BYTES); 286 blocks = bsize / SHA_MESSAGE_BYTES; 287 todo = raw; 288 if (bsize - psize >= sizeof(__be64)) { 289 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 290 } else { 291 bits = (__be64 *)(todo + bsize + bits_offset); 292 blocks++; 293 } 294 *bits = cpu_to_be64((psize - 1) << 3); 295 296 while (blocks--) { 297 sha_transform(digest, todo, ws); 298 todo += SHA_MESSAGE_BYTES; 299 } 300 301 result = (__force __be32 *)digest; 302 for (i = 0; i < SHA_DIGEST_WORDS; i++) 303 result[i] = cpu_to_be32(digest[i]); 304 memcpy(fp->tag, result, sizeof(fp->tag)); 305 306 vfree(raw); 307 return 0; 308 } 309 310 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta, 311 u32 curr, const bool probe_pass) 312 { 313 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 314 s64 imm = insn->imm; 315 316 if (curr < pos && curr + imm + 1 > pos) 317 imm += delta; 318 else if (curr > pos + delta && curr + imm + 1 <= pos + delta) 319 imm -= delta; 320 if (imm < imm_min || imm > imm_max) 321 return -ERANGE; 322 if (!probe_pass) 323 insn->imm = imm; 324 return 0; 325 } 326 327 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, 328 u32 curr, const bool probe_pass) 329 { 330 const s32 off_min = S16_MIN, off_max = S16_MAX; 331 s32 off = insn->off; 332 333 if (curr < pos && curr + off + 1 > pos) 334 off += delta; 335 else if (curr > pos + delta && curr + off + 1 <= pos + delta) 336 off -= delta; 337 if (off < off_min || off > off_max) 338 return -ERANGE; 339 if (!probe_pass) 340 insn->off = off; 341 return 0; 342 } 343 344 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, 345 const bool probe_pass) 346 { 347 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0); 348 struct bpf_insn *insn = prog->insnsi; 349 int ret = 0; 350 351 for (i = 0; i < insn_cnt; i++, insn++) { 352 u8 code; 353 354 /* In the probing pass we still operate on the original, 355 * unpatched image in order to check overflows before we 356 * do any other adjustments. Therefore skip the patchlet. 357 */ 358 if (probe_pass && i == pos) { 359 i += delta + 1; 360 insn++; 361 } 362 code = insn->code; 363 if (BPF_CLASS(code) != BPF_JMP || 364 BPF_OP(code) == BPF_EXIT) 365 continue; 366 /* Adjust offset of jmps if we cross patch boundaries. */ 367 if (BPF_OP(code) == BPF_CALL) { 368 if (insn->src_reg != BPF_PSEUDO_CALL) 369 continue; 370 ret = bpf_adj_delta_to_imm(insn, pos, delta, i, 371 probe_pass); 372 } else { 373 ret = bpf_adj_delta_to_off(insn, pos, delta, i, 374 probe_pass); 375 } 376 if (ret) 377 break; 378 } 379 380 return ret; 381 } 382 383 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 384 { 385 struct bpf_line_info *linfo; 386 u32 i, nr_linfo; 387 388 nr_linfo = prog->aux->nr_linfo; 389 if (!nr_linfo || !delta) 390 return; 391 392 linfo = prog->aux->linfo; 393 394 for (i = 0; i < nr_linfo; i++) 395 if (off < linfo[i].insn_off) 396 break; 397 398 /* Push all off < linfo[i].insn_off by delta */ 399 for (; i < nr_linfo; i++) 400 linfo[i].insn_off += delta; 401 } 402 403 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 404 const struct bpf_insn *patch, u32 len) 405 { 406 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 407 const u32 cnt_max = S16_MAX; 408 struct bpf_prog *prog_adj; 409 410 /* Since our patchlet doesn't expand the image, we're done. */ 411 if (insn_delta == 0) { 412 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 413 return prog; 414 } 415 416 insn_adj_cnt = prog->len + insn_delta; 417 418 /* Reject anything that would potentially let the insn->off 419 * target overflow when we have excessive program expansions. 420 * We need to probe here before we do any reallocation where 421 * we afterwards may not fail anymore. 422 */ 423 if (insn_adj_cnt > cnt_max && 424 bpf_adj_branches(prog, off, insn_delta, true)) 425 return NULL; 426 427 /* Several new instructions need to be inserted. Make room 428 * for them. Likely, there's no need for a new allocation as 429 * last page could have large enough tailroom. 430 */ 431 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 432 GFP_USER); 433 if (!prog_adj) 434 return NULL; 435 436 prog_adj->len = insn_adj_cnt; 437 438 /* Patching happens in 3 steps: 439 * 440 * 1) Move over tail of insnsi from next instruction onwards, 441 * so we can patch the single target insn with one or more 442 * new ones (patching is always from 1 to n insns, n > 0). 443 * 2) Inject new instructions at the target location. 444 * 3) Adjust branch offsets if necessary. 445 */ 446 insn_rest = insn_adj_cnt - off - len; 447 448 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 449 sizeof(*patch) * insn_rest); 450 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 451 452 /* We are guaranteed to not fail at this point, otherwise 453 * the ship has sailed to reverse to the original state. An 454 * overflow cannot happen at this point. 455 */ 456 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false)); 457 458 bpf_adj_linfo(prog_adj, off, insn_delta); 459 460 return prog_adj; 461 } 462 463 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 464 { 465 int i; 466 467 for (i = 0; i < fp->aux->func_cnt; i++) 468 bpf_prog_kallsyms_del(fp->aux->func[i]); 469 } 470 471 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 472 { 473 bpf_prog_kallsyms_del_subprogs(fp); 474 bpf_prog_kallsyms_del(fp); 475 } 476 477 #ifdef CONFIG_BPF_JIT 478 /* All BPF JIT sysctl knobs here. */ 479 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); 480 int bpf_jit_harden __read_mostly; 481 int bpf_jit_kallsyms __read_mostly; 482 long bpf_jit_limit __read_mostly; 483 484 static __always_inline void 485 bpf_get_prog_addr_region(const struct bpf_prog *prog, 486 unsigned long *symbol_start, 487 unsigned long *symbol_end) 488 { 489 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 490 unsigned long addr = (unsigned long)hdr; 491 492 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 493 494 *symbol_start = addr; 495 *symbol_end = addr + hdr->pages * PAGE_SIZE; 496 } 497 498 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 499 { 500 const char *end = sym + KSYM_NAME_LEN; 501 const struct btf_type *type; 502 const char *func_name; 503 504 BUILD_BUG_ON(sizeof("bpf_prog_") + 505 sizeof(prog->tag) * 2 + 506 /* name has been null terminated. 507 * We should need +1 for the '_' preceding 508 * the name. However, the null character 509 * is double counted between the name and the 510 * sizeof("bpf_prog_") above, so we omit 511 * the +1 here. 512 */ 513 sizeof(prog->aux->name) > KSYM_NAME_LEN); 514 515 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 516 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 517 518 /* prog->aux->name will be ignored if full btf name is available */ 519 if (prog->aux->func_info_cnt) { 520 type = btf_type_by_id(prog->aux->btf, 521 prog->aux->func_info[prog->aux->func_idx].type_id); 522 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 523 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 524 return; 525 } 526 527 if (prog->aux->name[0]) 528 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 529 else 530 *sym = 0; 531 } 532 533 static __always_inline unsigned long 534 bpf_get_prog_addr_start(struct latch_tree_node *n) 535 { 536 unsigned long symbol_start, symbol_end; 537 const struct bpf_prog_aux *aux; 538 539 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 540 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 541 542 return symbol_start; 543 } 544 545 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 546 struct latch_tree_node *b) 547 { 548 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); 549 } 550 551 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 552 { 553 unsigned long val = (unsigned long)key; 554 unsigned long symbol_start, symbol_end; 555 const struct bpf_prog_aux *aux; 556 557 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 558 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 559 560 if (val < symbol_start) 561 return -1; 562 if (val >= symbol_end) 563 return 1; 564 565 return 0; 566 } 567 568 static const struct latch_tree_ops bpf_tree_ops = { 569 .less = bpf_tree_less, 570 .comp = bpf_tree_comp, 571 }; 572 573 static DEFINE_SPINLOCK(bpf_lock); 574 static LIST_HEAD(bpf_kallsyms); 575 static struct latch_tree_root bpf_tree __cacheline_aligned; 576 577 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) 578 { 579 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); 580 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); 581 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 582 } 583 584 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) 585 { 586 if (list_empty(&aux->ksym_lnode)) 587 return; 588 589 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 590 list_del_rcu(&aux->ksym_lnode); 591 } 592 593 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 594 { 595 return fp->jited && !bpf_prog_was_classic(fp); 596 } 597 598 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 599 { 600 return list_empty(&fp->aux->ksym_lnode) || 601 fp->aux->ksym_lnode.prev == LIST_POISON2; 602 } 603 604 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 605 { 606 if (!bpf_prog_kallsyms_candidate(fp) || 607 !capable(CAP_SYS_ADMIN)) 608 return; 609 610 spin_lock_bh(&bpf_lock); 611 bpf_prog_ksym_node_add(fp->aux); 612 spin_unlock_bh(&bpf_lock); 613 } 614 615 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 616 { 617 if (!bpf_prog_kallsyms_candidate(fp)) 618 return; 619 620 spin_lock_bh(&bpf_lock); 621 bpf_prog_ksym_node_del(fp->aux); 622 spin_unlock_bh(&bpf_lock); 623 } 624 625 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) 626 { 627 struct latch_tree_node *n; 628 629 if (!bpf_jit_kallsyms_enabled()) 630 return NULL; 631 632 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 633 return n ? 634 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : 635 NULL; 636 } 637 638 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 639 unsigned long *off, char *sym) 640 { 641 unsigned long symbol_start, symbol_end; 642 struct bpf_prog *prog; 643 char *ret = NULL; 644 645 rcu_read_lock(); 646 prog = bpf_prog_kallsyms_find(addr); 647 if (prog) { 648 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); 649 bpf_get_prog_name(prog, sym); 650 651 ret = sym; 652 if (size) 653 *size = symbol_end - symbol_start; 654 if (off) 655 *off = addr - symbol_start; 656 } 657 rcu_read_unlock(); 658 659 return ret; 660 } 661 662 bool is_bpf_text_address(unsigned long addr) 663 { 664 bool ret; 665 666 rcu_read_lock(); 667 ret = bpf_prog_kallsyms_find(addr) != NULL; 668 rcu_read_unlock(); 669 670 return ret; 671 } 672 673 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 674 char *sym) 675 { 676 struct bpf_prog_aux *aux; 677 unsigned int it = 0; 678 int ret = -ERANGE; 679 680 if (!bpf_jit_kallsyms_enabled()) 681 return ret; 682 683 rcu_read_lock(); 684 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { 685 if (it++ != symnum) 686 continue; 687 688 bpf_get_prog_name(aux->prog, sym); 689 690 *value = (unsigned long)aux->prog->bpf_func; 691 *type = BPF_SYM_ELF_TYPE; 692 693 ret = 0; 694 break; 695 } 696 rcu_read_unlock(); 697 698 return ret; 699 } 700 701 static atomic_long_t bpf_jit_current; 702 703 /* Can be overridden by an arch's JIT compiler if it has a custom, 704 * dedicated BPF backend memory area, or if neither of the two 705 * below apply. 706 */ 707 u64 __weak bpf_jit_alloc_exec_limit(void) 708 { 709 #if defined(MODULES_VADDR) 710 return MODULES_END - MODULES_VADDR; 711 #else 712 return VMALLOC_END - VMALLOC_START; 713 #endif 714 } 715 716 static int __init bpf_jit_charge_init(void) 717 { 718 /* Only used as heuristic here to derive limit. */ 719 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, 720 PAGE_SIZE), LONG_MAX); 721 return 0; 722 } 723 pure_initcall(bpf_jit_charge_init); 724 725 static int bpf_jit_charge_modmem(u32 pages) 726 { 727 if (atomic_long_add_return(pages, &bpf_jit_current) > 728 (bpf_jit_limit >> PAGE_SHIFT)) { 729 if (!capable(CAP_SYS_ADMIN)) { 730 atomic_long_sub(pages, &bpf_jit_current); 731 return -EPERM; 732 } 733 } 734 735 return 0; 736 } 737 738 static void bpf_jit_uncharge_modmem(u32 pages) 739 { 740 atomic_long_sub(pages, &bpf_jit_current); 741 } 742 743 void *__weak bpf_jit_alloc_exec(unsigned long size) 744 { 745 return module_alloc(size); 746 } 747 748 void __weak bpf_jit_free_exec(void *addr) 749 { 750 module_memfree(addr); 751 } 752 753 struct bpf_binary_header * 754 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 755 unsigned int alignment, 756 bpf_jit_fill_hole_t bpf_fill_ill_insns) 757 { 758 struct bpf_binary_header *hdr; 759 u32 size, hole, start, pages; 760 761 /* Most of BPF filters are really small, but if some of them 762 * fill a page, allow at least 128 extra bytes to insert a 763 * random section of illegal instructions. 764 */ 765 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 766 pages = size / PAGE_SIZE; 767 768 if (bpf_jit_charge_modmem(pages)) 769 return NULL; 770 hdr = bpf_jit_alloc_exec(size); 771 if (!hdr) { 772 bpf_jit_uncharge_modmem(pages); 773 return NULL; 774 } 775 776 /* Fill space with illegal/arch-dep instructions. */ 777 bpf_fill_ill_insns(hdr, size); 778 779 hdr->pages = pages; 780 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 781 PAGE_SIZE - sizeof(*hdr)); 782 start = (get_random_int() % hole) & ~(alignment - 1); 783 784 /* Leave a random number of instructions before BPF code. */ 785 *image_ptr = &hdr->image[start]; 786 787 return hdr; 788 } 789 790 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 791 { 792 u32 pages = hdr->pages; 793 794 bpf_jit_free_exec(hdr); 795 bpf_jit_uncharge_modmem(pages); 796 } 797 798 /* This symbol is only overridden by archs that have different 799 * requirements than the usual eBPF JITs, f.e. when they only 800 * implement cBPF JIT, do not set images read-only, etc. 801 */ 802 void __weak bpf_jit_free(struct bpf_prog *fp) 803 { 804 if (fp->jited) { 805 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 806 807 bpf_jit_binary_unlock_ro(hdr); 808 bpf_jit_binary_free(hdr); 809 810 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 811 } 812 813 bpf_prog_unlock_free(fp); 814 } 815 816 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 817 const struct bpf_insn *insn, bool extra_pass, 818 u64 *func_addr, bool *func_addr_fixed) 819 { 820 s16 off = insn->off; 821 s32 imm = insn->imm; 822 u8 *addr; 823 824 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 825 if (!*func_addr_fixed) { 826 /* Place-holder address till the last pass has collected 827 * all addresses for JITed subprograms in which case we 828 * can pick them up from prog->aux. 829 */ 830 if (!extra_pass) 831 addr = NULL; 832 else if (prog->aux->func && 833 off >= 0 && off < prog->aux->func_cnt) 834 addr = (u8 *)prog->aux->func[off]->bpf_func; 835 else 836 return -EINVAL; 837 } else { 838 /* Address of a BPF helper call. Since part of the core 839 * kernel, it's always at a fixed location. __bpf_call_base 840 * and the helper with imm relative to it are both in core 841 * kernel. 842 */ 843 addr = (u8 *)__bpf_call_base + imm; 844 } 845 846 *func_addr = (unsigned long)addr; 847 return 0; 848 } 849 850 static int bpf_jit_blind_insn(const struct bpf_insn *from, 851 const struct bpf_insn *aux, 852 struct bpf_insn *to_buff) 853 { 854 struct bpf_insn *to = to_buff; 855 u32 imm_rnd = get_random_int(); 856 s16 off; 857 858 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 859 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 860 861 /* Constraints on AX register: 862 * 863 * AX register is inaccessible from user space. It is mapped in 864 * all JITs, and used here for constant blinding rewrites. It is 865 * typically "stateless" meaning its contents are only valid within 866 * the executed instruction, but not across several instructions. 867 * There are a few exceptions however which are further detailed 868 * below. 869 * 870 * Constant blinding is only used by JITs, not in the interpreter. 871 * The interpreter uses AX in some occasions as a local temporary 872 * register e.g. in DIV or MOD instructions. 873 * 874 * In restricted circumstances, the verifier can also use the AX 875 * register for rewrites as long as they do not interfere with 876 * the above cases! 877 */ 878 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 879 goto out; 880 881 if (from->imm == 0 && 882 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 883 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 884 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 885 goto out; 886 } 887 888 switch (from->code) { 889 case BPF_ALU | BPF_ADD | BPF_K: 890 case BPF_ALU | BPF_SUB | BPF_K: 891 case BPF_ALU | BPF_AND | BPF_K: 892 case BPF_ALU | BPF_OR | BPF_K: 893 case BPF_ALU | BPF_XOR | BPF_K: 894 case BPF_ALU | BPF_MUL | BPF_K: 895 case BPF_ALU | BPF_MOV | BPF_K: 896 case BPF_ALU | BPF_DIV | BPF_K: 897 case BPF_ALU | BPF_MOD | BPF_K: 898 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 899 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 900 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 901 break; 902 903 case BPF_ALU64 | BPF_ADD | BPF_K: 904 case BPF_ALU64 | BPF_SUB | BPF_K: 905 case BPF_ALU64 | BPF_AND | BPF_K: 906 case BPF_ALU64 | BPF_OR | BPF_K: 907 case BPF_ALU64 | BPF_XOR | BPF_K: 908 case BPF_ALU64 | BPF_MUL | BPF_K: 909 case BPF_ALU64 | BPF_MOV | BPF_K: 910 case BPF_ALU64 | BPF_DIV | BPF_K: 911 case BPF_ALU64 | BPF_MOD | BPF_K: 912 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 913 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 914 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 915 break; 916 917 case BPF_JMP | BPF_JEQ | BPF_K: 918 case BPF_JMP | BPF_JNE | BPF_K: 919 case BPF_JMP | BPF_JGT | BPF_K: 920 case BPF_JMP | BPF_JLT | BPF_K: 921 case BPF_JMP | BPF_JGE | BPF_K: 922 case BPF_JMP | BPF_JLE | BPF_K: 923 case BPF_JMP | BPF_JSGT | BPF_K: 924 case BPF_JMP | BPF_JSLT | BPF_K: 925 case BPF_JMP | BPF_JSGE | BPF_K: 926 case BPF_JMP | BPF_JSLE | BPF_K: 927 case BPF_JMP | BPF_JSET | BPF_K: 928 /* Accommodate for extra offset in case of a backjump. */ 929 off = from->off; 930 if (off < 0) 931 off -= 2; 932 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 933 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 934 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 935 break; 936 937 case BPF_LD | BPF_IMM | BPF_DW: 938 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 939 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 940 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 941 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 942 break; 943 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 944 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 945 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 946 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 947 break; 948 949 case BPF_ST | BPF_MEM | BPF_DW: 950 case BPF_ST | BPF_MEM | BPF_W: 951 case BPF_ST | BPF_MEM | BPF_H: 952 case BPF_ST | BPF_MEM | BPF_B: 953 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 954 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 955 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 956 break; 957 } 958 out: 959 return to - to_buff; 960 } 961 962 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 963 gfp_t gfp_extra_flags) 964 { 965 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 966 struct bpf_prog *fp; 967 968 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 969 if (fp != NULL) { 970 /* aux->prog still points to the fp_other one, so 971 * when promoting the clone to the real program, 972 * this still needs to be adapted. 973 */ 974 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 975 } 976 977 return fp; 978 } 979 980 static void bpf_prog_clone_free(struct bpf_prog *fp) 981 { 982 /* aux was stolen by the other clone, so we cannot free 983 * it from this path! It will be freed eventually by the 984 * other program on release. 985 * 986 * At this point, we don't need a deferred release since 987 * clone is guaranteed to not be locked. 988 */ 989 fp->aux = NULL; 990 __bpf_prog_free(fp); 991 } 992 993 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 994 { 995 /* We have to repoint aux->prog to self, as we don't 996 * know whether fp here is the clone or the original. 997 */ 998 fp->aux->prog = fp; 999 bpf_prog_clone_free(fp_other); 1000 } 1001 1002 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1003 { 1004 struct bpf_insn insn_buff[16], aux[2]; 1005 struct bpf_prog *clone, *tmp; 1006 int insn_delta, insn_cnt; 1007 struct bpf_insn *insn; 1008 int i, rewritten; 1009 1010 if (!bpf_jit_blinding_enabled(prog) || prog->blinded) 1011 return prog; 1012 1013 clone = bpf_prog_clone_create(prog, GFP_USER); 1014 if (!clone) 1015 return ERR_PTR(-ENOMEM); 1016 1017 insn_cnt = clone->len; 1018 insn = clone->insnsi; 1019 1020 for (i = 0; i < insn_cnt; i++, insn++) { 1021 /* We temporarily need to hold the original ld64 insn 1022 * so that we can still access the first part in the 1023 * second blinding run. 1024 */ 1025 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1026 insn[1].code == 0) 1027 memcpy(aux, insn, sizeof(aux)); 1028 1029 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 1030 if (!rewritten) 1031 continue; 1032 1033 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1034 if (!tmp) { 1035 /* Patching may have repointed aux->prog during 1036 * realloc from the original one, so we need to 1037 * fix it up here on error. 1038 */ 1039 bpf_jit_prog_release_other(prog, clone); 1040 return ERR_PTR(-ENOMEM); 1041 } 1042 1043 clone = tmp; 1044 insn_delta = rewritten - 1; 1045 1046 /* Walk new program and skip insns we just inserted. */ 1047 insn = clone->insnsi + i + insn_delta; 1048 insn_cnt += insn_delta; 1049 i += insn_delta; 1050 } 1051 1052 clone->blinded = 1; 1053 return clone; 1054 } 1055 #endif /* CONFIG_BPF_JIT */ 1056 1057 /* Base function for offset calculation. Needs to go into .text section, 1058 * therefore keeping it non-static as well; will also be used by JITs 1059 * anyway later on, so do not let the compiler omit it. This also needs 1060 * to go into kallsyms for correlation from e.g. bpftool, so naming 1061 * must not change. 1062 */ 1063 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1064 { 1065 return 0; 1066 } 1067 EXPORT_SYMBOL_GPL(__bpf_call_base); 1068 1069 /* All UAPI available opcodes. */ 1070 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1071 /* 32 bit ALU operations. */ \ 1072 /* Register based. */ \ 1073 INSN_3(ALU, ADD, X), \ 1074 INSN_3(ALU, SUB, X), \ 1075 INSN_3(ALU, AND, X), \ 1076 INSN_3(ALU, OR, X), \ 1077 INSN_3(ALU, LSH, X), \ 1078 INSN_3(ALU, RSH, X), \ 1079 INSN_3(ALU, XOR, X), \ 1080 INSN_3(ALU, MUL, X), \ 1081 INSN_3(ALU, MOV, X), \ 1082 INSN_3(ALU, ARSH, X), \ 1083 INSN_3(ALU, DIV, X), \ 1084 INSN_3(ALU, MOD, X), \ 1085 INSN_2(ALU, NEG), \ 1086 INSN_3(ALU, END, TO_BE), \ 1087 INSN_3(ALU, END, TO_LE), \ 1088 /* Immediate based. */ \ 1089 INSN_3(ALU, ADD, K), \ 1090 INSN_3(ALU, SUB, K), \ 1091 INSN_3(ALU, AND, K), \ 1092 INSN_3(ALU, OR, K), \ 1093 INSN_3(ALU, LSH, K), \ 1094 INSN_3(ALU, RSH, K), \ 1095 INSN_3(ALU, XOR, K), \ 1096 INSN_3(ALU, MUL, K), \ 1097 INSN_3(ALU, MOV, K), \ 1098 INSN_3(ALU, ARSH, K), \ 1099 INSN_3(ALU, DIV, K), \ 1100 INSN_3(ALU, MOD, K), \ 1101 /* 64 bit ALU operations. */ \ 1102 /* Register based. */ \ 1103 INSN_3(ALU64, ADD, X), \ 1104 INSN_3(ALU64, SUB, X), \ 1105 INSN_3(ALU64, AND, X), \ 1106 INSN_3(ALU64, OR, X), \ 1107 INSN_3(ALU64, LSH, X), \ 1108 INSN_3(ALU64, RSH, X), \ 1109 INSN_3(ALU64, XOR, X), \ 1110 INSN_3(ALU64, MUL, X), \ 1111 INSN_3(ALU64, MOV, X), \ 1112 INSN_3(ALU64, ARSH, X), \ 1113 INSN_3(ALU64, DIV, X), \ 1114 INSN_3(ALU64, MOD, X), \ 1115 INSN_2(ALU64, NEG), \ 1116 /* Immediate based. */ \ 1117 INSN_3(ALU64, ADD, K), \ 1118 INSN_3(ALU64, SUB, K), \ 1119 INSN_3(ALU64, AND, K), \ 1120 INSN_3(ALU64, OR, K), \ 1121 INSN_3(ALU64, LSH, K), \ 1122 INSN_3(ALU64, RSH, K), \ 1123 INSN_3(ALU64, XOR, K), \ 1124 INSN_3(ALU64, MUL, K), \ 1125 INSN_3(ALU64, MOV, K), \ 1126 INSN_3(ALU64, ARSH, K), \ 1127 INSN_3(ALU64, DIV, K), \ 1128 INSN_3(ALU64, MOD, K), \ 1129 /* Call instruction. */ \ 1130 INSN_2(JMP, CALL), \ 1131 /* Exit instruction. */ \ 1132 INSN_2(JMP, EXIT), \ 1133 /* Jump instructions. */ \ 1134 /* Register based. */ \ 1135 INSN_3(JMP, JEQ, X), \ 1136 INSN_3(JMP, JNE, X), \ 1137 INSN_3(JMP, JGT, X), \ 1138 INSN_3(JMP, JLT, X), \ 1139 INSN_3(JMP, JGE, X), \ 1140 INSN_3(JMP, JLE, X), \ 1141 INSN_3(JMP, JSGT, X), \ 1142 INSN_3(JMP, JSLT, X), \ 1143 INSN_3(JMP, JSGE, X), \ 1144 INSN_3(JMP, JSLE, X), \ 1145 INSN_3(JMP, JSET, X), \ 1146 /* Immediate based. */ \ 1147 INSN_3(JMP, JEQ, K), \ 1148 INSN_3(JMP, JNE, K), \ 1149 INSN_3(JMP, JGT, K), \ 1150 INSN_3(JMP, JLT, K), \ 1151 INSN_3(JMP, JGE, K), \ 1152 INSN_3(JMP, JLE, K), \ 1153 INSN_3(JMP, JSGT, K), \ 1154 INSN_3(JMP, JSLT, K), \ 1155 INSN_3(JMP, JSGE, K), \ 1156 INSN_3(JMP, JSLE, K), \ 1157 INSN_3(JMP, JSET, K), \ 1158 INSN_2(JMP, JA), \ 1159 /* Store instructions. */ \ 1160 /* Register based. */ \ 1161 INSN_3(STX, MEM, B), \ 1162 INSN_3(STX, MEM, H), \ 1163 INSN_3(STX, MEM, W), \ 1164 INSN_3(STX, MEM, DW), \ 1165 INSN_3(STX, XADD, W), \ 1166 INSN_3(STX, XADD, DW), \ 1167 /* Immediate based. */ \ 1168 INSN_3(ST, MEM, B), \ 1169 INSN_3(ST, MEM, H), \ 1170 INSN_3(ST, MEM, W), \ 1171 INSN_3(ST, MEM, DW), \ 1172 /* Load instructions. */ \ 1173 /* Register based. */ \ 1174 INSN_3(LDX, MEM, B), \ 1175 INSN_3(LDX, MEM, H), \ 1176 INSN_3(LDX, MEM, W), \ 1177 INSN_3(LDX, MEM, DW), \ 1178 /* Immediate based. */ \ 1179 INSN_3(LD, IMM, DW) 1180 1181 bool bpf_opcode_in_insntable(u8 code) 1182 { 1183 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1184 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1185 static const bool public_insntable[256] = { 1186 [0 ... 255] = false, 1187 /* Now overwrite non-defaults ... */ 1188 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1189 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1190 [BPF_LD | BPF_ABS | BPF_B] = true, 1191 [BPF_LD | BPF_ABS | BPF_H] = true, 1192 [BPF_LD | BPF_ABS | BPF_W] = true, 1193 [BPF_LD | BPF_IND | BPF_B] = true, 1194 [BPF_LD | BPF_IND | BPF_H] = true, 1195 [BPF_LD | BPF_IND | BPF_W] = true, 1196 }; 1197 #undef BPF_INSN_3_TBL 1198 #undef BPF_INSN_2_TBL 1199 return public_insntable[code]; 1200 } 1201 1202 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1203 /** 1204 * __bpf_prog_run - run eBPF program on a given context 1205 * @ctx: is the data we are operating on 1206 * @insn: is the array of eBPF instructions 1207 * 1208 * Decode and execute eBPF instructions. 1209 */ 1210 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) 1211 { 1212 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1213 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1214 static const void *jumptable[256] = { 1215 [0 ... 255] = &&default_label, 1216 /* Now overwrite non-defaults ... */ 1217 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1218 /* Non-UAPI available opcodes. */ 1219 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1220 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1221 }; 1222 #undef BPF_INSN_3_LBL 1223 #undef BPF_INSN_2_LBL 1224 u32 tail_call_cnt = 0; 1225 1226 #define CONT ({ insn++; goto select_insn; }) 1227 #define CONT_JMP ({ insn++; goto select_insn; }) 1228 1229 select_insn: 1230 goto *jumptable[insn->code]; 1231 1232 /* ALU */ 1233 #define ALU(OPCODE, OP) \ 1234 ALU64_##OPCODE##_X: \ 1235 DST = DST OP SRC; \ 1236 CONT; \ 1237 ALU_##OPCODE##_X: \ 1238 DST = (u32) DST OP (u32) SRC; \ 1239 CONT; \ 1240 ALU64_##OPCODE##_K: \ 1241 DST = DST OP IMM; \ 1242 CONT; \ 1243 ALU_##OPCODE##_K: \ 1244 DST = (u32) DST OP (u32) IMM; \ 1245 CONT; 1246 1247 ALU(ADD, +) 1248 ALU(SUB, -) 1249 ALU(AND, &) 1250 ALU(OR, |) 1251 ALU(LSH, <<) 1252 ALU(RSH, >>) 1253 ALU(XOR, ^) 1254 ALU(MUL, *) 1255 #undef ALU 1256 ALU_NEG: 1257 DST = (u32) -DST; 1258 CONT; 1259 ALU64_NEG: 1260 DST = -DST; 1261 CONT; 1262 ALU_MOV_X: 1263 DST = (u32) SRC; 1264 CONT; 1265 ALU_MOV_K: 1266 DST = (u32) IMM; 1267 CONT; 1268 ALU64_MOV_X: 1269 DST = SRC; 1270 CONT; 1271 ALU64_MOV_K: 1272 DST = IMM; 1273 CONT; 1274 LD_IMM_DW: 1275 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1276 insn++; 1277 CONT; 1278 ALU_ARSH_X: 1279 DST = (u64) (u32) ((*(s32 *) &DST) >> SRC); 1280 CONT; 1281 ALU_ARSH_K: 1282 DST = (u64) (u32) ((*(s32 *) &DST) >> IMM); 1283 CONT; 1284 ALU64_ARSH_X: 1285 (*(s64 *) &DST) >>= SRC; 1286 CONT; 1287 ALU64_ARSH_K: 1288 (*(s64 *) &DST) >>= IMM; 1289 CONT; 1290 ALU64_MOD_X: 1291 div64_u64_rem(DST, SRC, &AX); 1292 DST = AX; 1293 CONT; 1294 ALU_MOD_X: 1295 AX = (u32) DST; 1296 DST = do_div(AX, (u32) SRC); 1297 CONT; 1298 ALU64_MOD_K: 1299 div64_u64_rem(DST, IMM, &AX); 1300 DST = AX; 1301 CONT; 1302 ALU_MOD_K: 1303 AX = (u32) DST; 1304 DST = do_div(AX, (u32) IMM); 1305 CONT; 1306 ALU64_DIV_X: 1307 DST = div64_u64(DST, SRC); 1308 CONT; 1309 ALU_DIV_X: 1310 AX = (u32) DST; 1311 do_div(AX, (u32) SRC); 1312 DST = (u32) AX; 1313 CONT; 1314 ALU64_DIV_K: 1315 DST = div64_u64(DST, IMM); 1316 CONT; 1317 ALU_DIV_K: 1318 AX = (u32) DST; 1319 do_div(AX, (u32) IMM); 1320 DST = (u32) AX; 1321 CONT; 1322 ALU_END_TO_BE: 1323 switch (IMM) { 1324 case 16: 1325 DST = (__force u16) cpu_to_be16(DST); 1326 break; 1327 case 32: 1328 DST = (__force u32) cpu_to_be32(DST); 1329 break; 1330 case 64: 1331 DST = (__force u64) cpu_to_be64(DST); 1332 break; 1333 } 1334 CONT; 1335 ALU_END_TO_LE: 1336 switch (IMM) { 1337 case 16: 1338 DST = (__force u16) cpu_to_le16(DST); 1339 break; 1340 case 32: 1341 DST = (__force u32) cpu_to_le32(DST); 1342 break; 1343 case 64: 1344 DST = (__force u64) cpu_to_le64(DST); 1345 break; 1346 } 1347 CONT; 1348 1349 /* CALL */ 1350 JMP_CALL: 1351 /* Function call scratches BPF_R1-BPF_R5 registers, 1352 * preserves BPF_R6-BPF_R9, and stores return value 1353 * into BPF_R0. 1354 */ 1355 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1356 BPF_R4, BPF_R5); 1357 CONT; 1358 1359 JMP_CALL_ARGS: 1360 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1361 BPF_R3, BPF_R4, 1362 BPF_R5, 1363 insn + insn->off + 1); 1364 CONT; 1365 1366 JMP_TAIL_CALL: { 1367 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1368 struct bpf_array *array = container_of(map, struct bpf_array, map); 1369 struct bpf_prog *prog; 1370 u32 index = BPF_R3; 1371 1372 if (unlikely(index >= array->map.max_entries)) 1373 goto out; 1374 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1375 goto out; 1376 1377 tail_call_cnt++; 1378 1379 prog = READ_ONCE(array->ptrs[index]); 1380 if (!prog) 1381 goto out; 1382 1383 /* ARG1 at this point is guaranteed to point to CTX from 1384 * the verifier side due to the fact that the tail call is 1385 * handeled like a helper, that is, bpf_tail_call_proto, 1386 * where arg1_type is ARG_PTR_TO_CTX. 1387 */ 1388 insn = prog->insnsi; 1389 goto select_insn; 1390 out: 1391 CONT; 1392 } 1393 /* JMP */ 1394 JMP_JA: 1395 insn += insn->off; 1396 CONT; 1397 JMP_JEQ_X: 1398 if (DST == SRC) { 1399 insn += insn->off; 1400 CONT_JMP; 1401 } 1402 CONT; 1403 JMP_JEQ_K: 1404 if (DST == IMM) { 1405 insn += insn->off; 1406 CONT_JMP; 1407 } 1408 CONT; 1409 JMP_JNE_X: 1410 if (DST != SRC) { 1411 insn += insn->off; 1412 CONT_JMP; 1413 } 1414 CONT; 1415 JMP_JNE_K: 1416 if (DST != IMM) { 1417 insn += insn->off; 1418 CONT_JMP; 1419 } 1420 CONT; 1421 JMP_JGT_X: 1422 if (DST > SRC) { 1423 insn += insn->off; 1424 CONT_JMP; 1425 } 1426 CONT; 1427 JMP_JGT_K: 1428 if (DST > IMM) { 1429 insn += insn->off; 1430 CONT_JMP; 1431 } 1432 CONT; 1433 JMP_JLT_X: 1434 if (DST < SRC) { 1435 insn += insn->off; 1436 CONT_JMP; 1437 } 1438 CONT; 1439 JMP_JLT_K: 1440 if (DST < IMM) { 1441 insn += insn->off; 1442 CONT_JMP; 1443 } 1444 CONT; 1445 JMP_JGE_X: 1446 if (DST >= SRC) { 1447 insn += insn->off; 1448 CONT_JMP; 1449 } 1450 CONT; 1451 JMP_JGE_K: 1452 if (DST >= IMM) { 1453 insn += insn->off; 1454 CONT_JMP; 1455 } 1456 CONT; 1457 JMP_JLE_X: 1458 if (DST <= SRC) { 1459 insn += insn->off; 1460 CONT_JMP; 1461 } 1462 CONT; 1463 JMP_JLE_K: 1464 if (DST <= IMM) { 1465 insn += insn->off; 1466 CONT_JMP; 1467 } 1468 CONT; 1469 JMP_JSGT_X: 1470 if (((s64) DST) > ((s64) SRC)) { 1471 insn += insn->off; 1472 CONT_JMP; 1473 } 1474 CONT; 1475 JMP_JSGT_K: 1476 if (((s64) DST) > ((s64) IMM)) { 1477 insn += insn->off; 1478 CONT_JMP; 1479 } 1480 CONT; 1481 JMP_JSLT_X: 1482 if (((s64) DST) < ((s64) SRC)) { 1483 insn += insn->off; 1484 CONT_JMP; 1485 } 1486 CONT; 1487 JMP_JSLT_K: 1488 if (((s64) DST) < ((s64) IMM)) { 1489 insn += insn->off; 1490 CONT_JMP; 1491 } 1492 CONT; 1493 JMP_JSGE_X: 1494 if (((s64) DST) >= ((s64) SRC)) { 1495 insn += insn->off; 1496 CONT_JMP; 1497 } 1498 CONT; 1499 JMP_JSGE_K: 1500 if (((s64) DST) >= ((s64) IMM)) { 1501 insn += insn->off; 1502 CONT_JMP; 1503 } 1504 CONT; 1505 JMP_JSLE_X: 1506 if (((s64) DST) <= ((s64) SRC)) { 1507 insn += insn->off; 1508 CONT_JMP; 1509 } 1510 CONT; 1511 JMP_JSLE_K: 1512 if (((s64) DST) <= ((s64) IMM)) { 1513 insn += insn->off; 1514 CONT_JMP; 1515 } 1516 CONT; 1517 JMP_JSET_X: 1518 if (DST & SRC) { 1519 insn += insn->off; 1520 CONT_JMP; 1521 } 1522 CONT; 1523 JMP_JSET_K: 1524 if (DST & IMM) { 1525 insn += insn->off; 1526 CONT_JMP; 1527 } 1528 CONT; 1529 JMP_EXIT: 1530 return BPF_R0; 1531 1532 /* STX and ST and LDX*/ 1533 #define LDST(SIZEOP, SIZE) \ 1534 STX_MEM_##SIZEOP: \ 1535 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1536 CONT; \ 1537 ST_MEM_##SIZEOP: \ 1538 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1539 CONT; \ 1540 LDX_MEM_##SIZEOP: \ 1541 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1542 CONT; 1543 1544 LDST(B, u8) 1545 LDST(H, u16) 1546 LDST(W, u32) 1547 LDST(DW, u64) 1548 #undef LDST 1549 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1550 atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1551 (DST + insn->off)); 1552 CONT; 1553 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1554 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1555 (DST + insn->off)); 1556 CONT; 1557 1558 default_label: 1559 /* If we ever reach this, we have a bug somewhere. Die hard here 1560 * instead of just returning 0; we could be somewhere in a subprog, 1561 * so execution could continue otherwise which we do /not/ want. 1562 * 1563 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1564 */ 1565 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); 1566 BUG_ON(1); 1567 return 0; 1568 } 1569 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ 1570 1571 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 1572 #define DEFINE_BPF_PROG_RUN(stack_size) \ 1573 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 1574 { \ 1575 u64 stack[stack_size / sizeof(u64)]; \ 1576 u64 regs[MAX_BPF_EXT_REG]; \ 1577 \ 1578 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1579 ARG1 = (u64) (unsigned long) ctx; \ 1580 return ___bpf_prog_run(regs, insn, stack); \ 1581 } 1582 1583 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 1584 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 1585 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 1586 const struct bpf_insn *insn) \ 1587 { \ 1588 u64 stack[stack_size / sizeof(u64)]; \ 1589 u64 regs[MAX_BPF_EXT_REG]; \ 1590 \ 1591 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1592 BPF_R1 = r1; \ 1593 BPF_R2 = r2; \ 1594 BPF_R3 = r3; \ 1595 BPF_R4 = r4; \ 1596 BPF_R5 = r5; \ 1597 return ___bpf_prog_run(regs, insn, stack); \ 1598 } 1599 1600 #define EVAL1(FN, X) FN(X) 1601 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 1602 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 1603 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 1604 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 1605 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 1606 1607 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 1608 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 1609 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 1610 1611 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 1612 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 1613 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 1614 1615 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 1616 1617 static unsigned int (*interpreters[])(const void *ctx, 1618 const struct bpf_insn *insn) = { 1619 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1620 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1621 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1622 }; 1623 #undef PROG_NAME_LIST 1624 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 1625 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 1626 const struct bpf_insn *insn) = { 1627 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1628 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1629 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1630 }; 1631 #undef PROG_NAME_LIST 1632 1633 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 1634 { 1635 stack_depth = max_t(u32, stack_depth, 1); 1636 insn->off = (s16) insn->imm; 1637 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 1638 __bpf_call_base_args; 1639 insn->code = BPF_JMP | BPF_CALL_ARGS; 1640 } 1641 1642 #else 1643 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 1644 const struct bpf_insn *insn) 1645 { 1646 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 1647 * is not working properly, so warn about it! 1648 */ 1649 WARN_ON_ONCE(1); 1650 return 0; 1651 } 1652 #endif 1653 1654 bool bpf_prog_array_compatible(struct bpf_array *array, 1655 const struct bpf_prog *fp) 1656 { 1657 if (fp->kprobe_override) 1658 return false; 1659 1660 if (!array->owner_prog_type) { 1661 /* There's no owner yet where we could check for 1662 * compatibility. 1663 */ 1664 array->owner_prog_type = fp->type; 1665 array->owner_jited = fp->jited; 1666 1667 return true; 1668 } 1669 1670 return array->owner_prog_type == fp->type && 1671 array->owner_jited == fp->jited; 1672 } 1673 1674 static int bpf_check_tail_call(const struct bpf_prog *fp) 1675 { 1676 struct bpf_prog_aux *aux = fp->aux; 1677 int i; 1678 1679 for (i = 0; i < aux->used_map_cnt; i++) { 1680 struct bpf_map *map = aux->used_maps[i]; 1681 struct bpf_array *array; 1682 1683 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1684 continue; 1685 1686 array = container_of(map, struct bpf_array, map); 1687 if (!bpf_prog_array_compatible(array, fp)) 1688 return -EINVAL; 1689 } 1690 1691 return 0; 1692 } 1693 1694 static void bpf_prog_select_func(struct bpf_prog *fp) 1695 { 1696 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1697 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1698 1699 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1700 #else 1701 fp->bpf_func = __bpf_prog_ret0_warn; 1702 #endif 1703 } 1704 1705 /** 1706 * bpf_prog_select_runtime - select exec runtime for BPF program 1707 * @fp: bpf_prog populated with internal BPF program 1708 * @err: pointer to error variable 1709 * 1710 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1711 * The BPF program will be executed via BPF_PROG_RUN() macro. 1712 */ 1713 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1714 { 1715 /* In case of BPF to BPF calls, verifier did all the prep 1716 * work with regards to JITing, etc. 1717 */ 1718 if (fp->bpf_func) 1719 goto finalize; 1720 1721 bpf_prog_select_func(fp); 1722 1723 /* eBPF JITs can rewrite the program in case constant 1724 * blinding is active. However, in case of error during 1725 * blinding, bpf_int_jit_compile() must always return a 1726 * valid program, which in this case would simply not 1727 * be JITed, but falls back to the interpreter. 1728 */ 1729 if (!bpf_prog_is_dev_bound(fp->aux)) { 1730 *err = bpf_prog_alloc_jited_linfo(fp); 1731 if (*err) 1732 return fp; 1733 1734 fp = bpf_int_jit_compile(fp); 1735 if (!fp->jited) { 1736 bpf_prog_free_jited_linfo(fp); 1737 #ifdef CONFIG_BPF_JIT_ALWAYS_ON 1738 *err = -ENOTSUPP; 1739 return fp; 1740 #endif 1741 } else { 1742 bpf_prog_free_unused_jited_linfo(fp); 1743 } 1744 } else { 1745 *err = bpf_prog_offload_compile(fp); 1746 if (*err) 1747 return fp; 1748 } 1749 1750 finalize: 1751 bpf_prog_lock_ro(fp); 1752 1753 /* The tail call compatibility check can only be done at 1754 * this late stage as we need to determine, if we deal 1755 * with JITed or non JITed program concatenations and not 1756 * all eBPF JITs might immediately support all features. 1757 */ 1758 *err = bpf_check_tail_call(fp); 1759 1760 return fp; 1761 } 1762 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1763 1764 static unsigned int __bpf_prog_ret1(const void *ctx, 1765 const struct bpf_insn *insn) 1766 { 1767 return 1; 1768 } 1769 1770 static struct bpf_prog_dummy { 1771 struct bpf_prog prog; 1772 } dummy_bpf_prog = { 1773 .prog = { 1774 .bpf_func = __bpf_prog_ret1, 1775 }, 1776 }; 1777 1778 /* to avoid allocating empty bpf_prog_array for cgroups that 1779 * don't have bpf program attached use one global 'empty_prog_array' 1780 * It will not be modified the caller of bpf_prog_array_alloc() 1781 * (since caller requested prog_cnt == 0) 1782 * that pointer should be 'freed' by bpf_prog_array_free() 1783 */ 1784 static struct { 1785 struct bpf_prog_array hdr; 1786 struct bpf_prog *null_prog; 1787 } empty_prog_array = { 1788 .null_prog = NULL, 1789 }; 1790 1791 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1792 { 1793 if (prog_cnt) 1794 return kzalloc(sizeof(struct bpf_prog_array) + 1795 sizeof(struct bpf_prog_array_item) * 1796 (prog_cnt + 1), 1797 flags); 1798 1799 return &empty_prog_array.hdr; 1800 } 1801 1802 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) 1803 { 1804 if (!progs || 1805 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr) 1806 return; 1807 kfree_rcu(progs, rcu); 1808 } 1809 1810 int bpf_prog_array_length(struct bpf_prog_array __rcu *array) 1811 { 1812 struct bpf_prog_array_item *item; 1813 u32 cnt = 0; 1814 1815 rcu_read_lock(); 1816 item = rcu_dereference(array)->items; 1817 for (; item->prog; item++) 1818 if (item->prog != &dummy_bpf_prog.prog) 1819 cnt++; 1820 rcu_read_unlock(); 1821 return cnt; 1822 } 1823 1824 1825 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, 1826 u32 *prog_ids, 1827 u32 request_cnt) 1828 { 1829 struct bpf_prog_array_item *item; 1830 int i = 0; 1831 1832 item = rcu_dereference_check(array, 1)->items; 1833 for (; item->prog; item++) { 1834 if (item->prog == &dummy_bpf_prog.prog) 1835 continue; 1836 prog_ids[i] = item->prog->aux->id; 1837 if (++i == request_cnt) { 1838 item++; 1839 break; 1840 } 1841 } 1842 1843 return !!(item->prog); 1844 } 1845 1846 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, 1847 __u32 __user *prog_ids, u32 cnt) 1848 { 1849 unsigned long err = 0; 1850 bool nospc; 1851 u32 *ids; 1852 1853 /* users of this function are doing: 1854 * cnt = bpf_prog_array_length(); 1855 * if (cnt > 0) 1856 * bpf_prog_array_copy_to_user(..., cnt); 1857 * so below kcalloc doesn't need extra cnt > 0 check, but 1858 * bpf_prog_array_length() releases rcu lock and 1859 * prog array could have been swapped with empty or larger array, 1860 * so always copy 'cnt' prog_ids to the user. 1861 * In a rare race the user will see zero prog_ids 1862 */ 1863 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 1864 if (!ids) 1865 return -ENOMEM; 1866 rcu_read_lock(); 1867 nospc = bpf_prog_array_copy_core(array, ids, cnt); 1868 rcu_read_unlock(); 1869 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1870 kfree(ids); 1871 if (err) 1872 return -EFAULT; 1873 if (nospc) 1874 return -ENOSPC; 1875 return 0; 1876 } 1877 1878 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array, 1879 struct bpf_prog *old_prog) 1880 { 1881 struct bpf_prog_array_item *item = array->items; 1882 1883 for (; item->prog; item++) 1884 if (item->prog == old_prog) { 1885 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 1886 break; 1887 } 1888 } 1889 1890 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 1891 struct bpf_prog *exclude_prog, 1892 struct bpf_prog *include_prog, 1893 struct bpf_prog_array **new_array) 1894 { 1895 int new_prog_cnt, carry_prog_cnt = 0; 1896 struct bpf_prog_array_item *existing; 1897 struct bpf_prog_array *array; 1898 bool found_exclude = false; 1899 int new_prog_idx = 0; 1900 1901 /* Figure out how many existing progs we need to carry over to 1902 * the new array. 1903 */ 1904 if (old_array) { 1905 existing = old_array->items; 1906 for (; existing->prog; existing++) { 1907 if (existing->prog == exclude_prog) { 1908 found_exclude = true; 1909 continue; 1910 } 1911 if (existing->prog != &dummy_bpf_prog.prog) 1912 carry_prog_cnt++; 1913 if (existing->prog == include_prog) 1914 return -EEXIST; 1915 } 1916 } 1917 1918 if (exclude_prog && !found_exclude) 1919 return -ENOENT; 1920 1921 /* How many progs (not NULL) will be in the new array? */ 1922 new_prog_cnt = carry_prog_cnt; 1923 if (include_prog) 1924 new_prog_cnt += 1; 1925 1926 /* Do we have any prog (not NULL) in the new array? */ 1927 if (!new_prog_cnt) { 1928 *new_array = NULL; 1929 return 0; 1930 } 1931 1932 /* +1 as the end of prog_array is marked with NULL */ 1933 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 1934 if (!array) 1935 return -ENOMEM; 1936 1937 /* Fill in the new prog array */ 1938 if (carry_prog_cnt) { 1939 existing = old_array->items; 1940 for (; existing->prog; existing++) 1941 if (existing->prog != exclude_prog && 1942 existing->prog != &dummy_bpf_prog.prog) { 1943 array->items[new_prog_idx++].prog = 1944 existing->prog; 1945 } 1946 } 1947 if (include_prog) 1948 array->items[new_prog_idx++].prog = include_prog; 1949 array->items[new_prog_idx].prog = NULL; 1950 *new_array = array; 1951 return 0; 1952 } 1953 1954 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 1955 u32 *prog_ids, u32 request_cnt, 1956 u32 *prog_cnt) 1957 { 1958 u32 cnt = 0; 1959 1960 if (array) 1961 cnt = bpf_prog_array_length(array); 1962 1963 *prog_cnt = cnt; 1964 1965 /* return early if user requested only program count or nothing to copy */ 1966 if (!request_cnt || !cnt) 1967 return 0; 1968 1969 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 1970 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 1971 : 0; 1972 } 1973 1974 static void bpf_prog_free_deferred(struct work_struct *work) 1975 { 1976 struct bpf_prog_aux *aux; 1977 int i; 1978 1979 aux = container_of(work, struct bpf_prog_aux, work); 1980 if (bpf_prog_is_dev_bound(aux)) 1981 bpf_prog_offload_destroy(aux->prog); 1982 #ifdef CONFIG_PERF_EVENTS 1983 if (aux->prog->has_callchain_buf) 1984 put_callchain_buffers(); 1985 #endif 1986 for (i = 0; i < aux->func_cnt; i++) 1987 bpf_jit_free(aux->func[i]); 1988 if (aux->func_cnt) { 1989 kfree(aux->func); 1990 bpf_prog_unlock_free(aux->prog); 1991 } else { 1992 bpf_jit_free(aux->prog); 1993 } 1994 } 1995 1996 /* Free internal BPF program */ 1997 void bpf_prog_free(struct bpf_prog *fp) 1998 { 1999 struct bpf_prog_aux *aux = fp->aux; 2000 2001 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2002 schedule_work(&aux->work); 2003 } 2004 EXPORT_SYMBOL_GPL(bpf_prog_free); 2005 2006 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 2007 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2008 2009 void bpf_user_rnd_init_once(void) 2010 { 2011 prandom_init_once(&bpf_user_rnd_state); 2012 } 2013 2014 BPF_CALL_0(bpf_user_rnd_u32) 2015 { 2016 /* Should someone ever have the rather unwise idea to use some 2017 * of the registers passed into this function, then note that 2018 * this function is called from native eBPF and classic-to-eBPF 2019 * transformations. Register assignments from both sides are 2020 * different, f.e. classic always sets fn(ctx, A, X) here. 2021 */ 2022 struct rnd_state *state; 2023 u32 res; 2024 2025 state = &get_cpu_var(bpf_user_rnd_state); 2026 res = prandom_u32_state(state); 2027 put_cpu_var(bpf_user_rnd_state); 2028 2029 return res; 2030 } 2031 2032 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2033 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2034 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2035 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2036 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2037 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2038 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2039 2040 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2041 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2042 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2043 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2044 2045 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 2046 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 2047 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 2048 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 2049 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 2050 2051 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 2052 { 2053 return NULL; 2054 } 2055 2056 u64 __weak 2057 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 2058 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 2059 { 2060 return -ENOTSUPP; 2061 } 2062 EXPORT_SYMBOL_GPL(bpf_event_output); 2063 2064 /* Always built-in helper functions. */ 2065 const struct bpf_func_proto bpf_tail_call_proto = { 2066 .func = NULL, 2067 .gpl_only = false, 2068 .ret_type = RET_VOID, 2069 .arg1_type = ARG_PTR_TO_CTX, 2070 .arg2_type = ARG_CONST_MAP_PTR, 2071 .arg3_type = ARG_ANYTHING, 2072 }; 2073 2074 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 2075 * It is encouraged to implement bpf_int_jit_compile() instead, so that 2076 * eBPF and implicitly also cBPF can get JITed! 2077 */ 2078 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 2079 { 2080 return prog; 2081 } 2082 2083 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 2084 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 2085 */ 2086 void __weak bpf_jit_compile(struct bpf_prog *prog) 2087 { 2088 } 2089 2090 bool __weak bpf_helper_changes_pkt_data(void *func) 2091 { 2092 return false; 2093 } 2094 2095 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 2096 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 2097 */ 2098 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 2099 int len) 2100 { 2101 return -EFAULT; 2102 } 2103 2104 /* All definitions of tracepoints related to BPF. */ 2105 #define CREATE_TRACE_POINTS 2106 #include <linux/bpf_trace.h> 2107 2108 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 2109