1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <linux/filter.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/random.h> 25 #include <linux/moduleloader.h> 26 #include <linux/bpf.h> 27 #include <linux/btf.h> 28 #include <linux/frame.h> 29 #include <linux/rbtree_latch.h> 30 #include <linux/kallsyms.h> 31 #include <linux/rcupdate.h> 32 #include <linux/perf_event.h> 33 #include <linux/extable.h> 34 #include <linux/log2.h> 35 #include <asm/unaligned.h> 36 37 /* Registers */ 38 #define BPF_R0 regs[BPF_REG_0] 39 #define BPF_R1 regs[BPF_REG_1] 40 #define BPF_R2 regs[BPF_REG_2] 41 #define BPF_R3 regs[BPF_REG_3] 42 #define BPF_R4 regs[BPF_REG_4] 43 #define BPF_R5 regs[BPF_REG_5] 44 #define BPF_R6 regs[BPF_REG_6] 45 #define BPF_R7 regs[BPF_REG_7] 46 #define BPF_R8 regs[BPF_REG_8] 47 #define BPF_R9 regs[BPF_REG_9] 48 #define BPF_R10 regs[BPF_REG_10] 49 50 /* Named registers */ 51 #define DST regs[insn->dst_reg] 52 #define SRC regs[insn->src_reg] 53 #define FP regs[BPF_REG_FP] 54 #define AX regs[BPF_REG_AX] 55 #define ARG1 regs[BPF_REG_ARG1] 56 #define CTX regs[BPF_REG_CTX] 57 #define IMM insn->imm 58 59 /* No hurry in this branch 60 * 61 * Exported for the bpf jit load helper. 62 */ 63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 64 { 65 u8 *ptr = NULL; 66 67 if (k >= SKF_NET_OFF) 68 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 69 else if (k >= SKF_LL_OFF) 70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 71 72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 73 return ptr; 74 75 return NULL; 76 } 77 78 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 79 { 80 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 81 struct bpf_prog_aux *aux; 82 struct bpf_prog *fp; 83 84 size = round_up(size, PAGE_SIZE); 85 fp = __vmalloc(size, gfp_flags); 86 if (fp == NULL) 87 return NULL; 88 89 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); 90 if (aux == NULL) { 91 vfree(fp); 92 return NULL; 93 } 94 95 fp->pages = size / PAGE_SIZE; 96 fp->aux = aux; 97 fp->aux->prog = fp; 98 fp->jit_requested = ebpf_jit_enabled(); 99 100 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); 101 mutex_init(&fp->aux->used_maps_mutex); 102 103 return fp; 104 } 105 106 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 107 { 108 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 109 struct bpf_prog *prog; 110 int cpu; 111 112 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 113 if (!prog) 114 return NULL; 115 116 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 117 if (!prog->aux->stats) { 118 kfree(prog->aux); 119 vfree(prog); 120 return NULL; 121 } 122 123 for_each_possible_cpu(cpu) { 124 struct bpf_prog_stats *pstats; 125 126 pstats = per_cpu_ptr(prog->aux->stats, cpu); 127 u64_stats_init(&pstats->syncp); 128 } 129 return prog; 130 } 131 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 132 133 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 134 { 135 if (!prog->aux->nr_linfo || !prog->jit_requested) 136 return 0; 137 138 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo, 139 sizeof(*prog->aux->jited_linfo), 140 GFP_KERNEL | __GFP_NOWARN); 141 if (!prog->aux->jited_linfo) 142 return -ENOMEM; 143 144 return 0; 145 } 146 147 void bpf_prog_free_jited_linfo(struct bpf_prog *prog) 148 { 149 kfree(prog->aux->jited_linfo); 150 prog->aux->jited_linfo = NULL; 151 } 152 153 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog) 154 { 155 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0]) 156 bpf_prog_free_jited_linfo(prog); 157 } 158 159 /* The jit engine is responsible to provide an array 160 * for insn_off to the jited_off mapping (insn_to_jit_off). 161 * 162 * The idx to this array is the insn_off. Hence, the insn_off 163 * here is relative to the prog itself instead of the main prog. 164 * This array has one entry for each xlated bpf insn. 165 * 166 * jited_off is the byte off to the last byte of the jited insn. 167 * 168 * Hence, with 169 * insn_start: 170 * The first bpf insn off of the prog. The insn off 171 * here is relative to the main prog. 172 * e.g. if prog is a subprog, insn_start > 0 173 * linfo_idx: 174 * The prog's idx to prog->aux->linfo and jited_linfo 175 * 176 * jited_linfo[linfo_idx] = prog->bpf_func 177 * 178 * For i > linfo_idx, 179 * 180 * jited_linfo[i] = prog->bpf_func + 181 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 182 */ 183 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 184 const u32 *insn_to_jit_off) 185 { 186 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 187 const struct bpf_line_info *linfo; 188 void **jited_linfo; 189 190 if (!prog->aux->jited_linfo) 191 /* Userspace did not provide linfo */ 192 return; 193 194 linfo_idx = prog->aux->linfo_idx; 195 linfo = &prog->aux->linfo[linfo_idx]; 196 insn_start = linfo[0].insn_off; 197 insn_end = insn_start + prog->len; 198 199 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 200 jited_linfo[0] = prog->bpf_func; 201 202 nr_linfo = prog->aux->nr_linfo - linfo_idx; 203 204 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 205 /* The verifier ensures that linfo[i].insn_off is 206 * strictly increasing 207 */ 208 jited_linfo[i] = prog->bpf_func + 209 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 210 } 211 212 void bpf_prog_free_linfo(struct bpf_prog *prog) 213 { 214 bpf_prog_free_jited_linfo(prog); 215 kvfree(prog->aux->linfo); 216 } 217 218 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 219 gfp_t gfp_extra_flags) 220 { 221 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 222 struct bpf_prog *fp; 223 u32 pages, delta; 224 int ret; 225 226 size = round_up(size, PAGE_SIZE); 227 pages = size / PAGE_SIZE; 228 if (pages <= fp_old->pages) 229 return fp_old; 230 231 delta = pages - fp_old->pages; 232 ret = __bpf_prog_charge(fp_old->aux->user, delta); 233 if (ret) 234 return NULL; 235 236 fp = __vmalloc(size, gfp_flags); 237 if (fp == NULL) { 238 __bpf_prog_uncharge(fp_old->aux->user, delta); 239 } else { 240 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 241 fp->pages = pages; 242 fp->aux->prog = fp; 243 244 /* We keep fp->aux from fp_old around in the new 245 * reallocated structure. 246 */ 247 fp_old->aux = NULL; 248 __bpf_prog_free(fp_old); 249 } 250 251 return fp; 252 } 253 254 void __bpf_prog_free(struct bpf_prog *fp) 255 { 256 if (fp->aux) { 257 mutex_destroy(&fp->aux->used_maps_mutex); 258 free_percpu(fp->aux->stats); 259 kfree(fp->aux->poke_tab); 260 kfree(fp->aux); 261 } 262 vfree(fp); 263 } 264 265 int bpf_prog_calc_tag(struct bpf_prog *fp) 266 { 267 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); 268 u32 raw_size = bpf_prog_tag_scratch_size(fp); 269 u32 digest[SHA1_DIGEST_WORDS]; 270 u32 ws[SHA1_WORKSPACE_WORDS]; 271 u32 i, bsize, psize, blocks; 272 struct bpf_insn *dst; 273 bool was_ld_map; 274 u8 *raw, *todo; 275 __be32 *result; 276 __be64 *bits; 277 278 raw = vmalloc(raw_size); 279 if (!raw) 280 return -ENOMEM; 281 282 sha1_init(digest); 283 memset(ws, 0, sizeof(ws)); 284 285 /* We need to take out the map fd for the digest calculation 286 * since they are unstable from user space side. 287 */ 288 dst = (void *)raw; 289 for (i = 0, was_ld_map = false; i < fp->len; i++) { 290 dst[i] = fp->insnsi[i]; 291 if (!was_ld_map && 292 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 293 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 294 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 295 was_ld_map = true; 296 dst[i].imm = 0; 297 } else if (was_ld_map && 298 dst[i].code == 0 && 299 dst[i].dst_reg == 0 && 300 dst[i].src_reg == 0 && 301 dst[i].off == 0) { 302 was_ld_map = false; 303 dst[i].imm = 0; 304 } else { 305 was_ld_map = false; 306 } 307 } 308 309 psize = bpf_prog_insn_size(fp); 310 memset(&raw[psize], 0, raw_size - psize); 311 raw[psize++] = 0x80; 312 313 bsize = round_up(psize, SHA1_BLOCK_SIZE); 314 blocks = bsize / SHA1_BLOCK_SIZE; 315 todo = raw; 316 if (bsize - psize >= sizeof(__be64)) { 317 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 318 } else { 319 bits = (__be64 *)(todo + bsize + bits_offset); 320 blocks++; 321 } 322 *bits = cpu_to_be64((psize - 1) << 3); 323 324 while (blocks--) { 325 sha1_transform(digest, todo, ws); 326 todo += SHA1_BLOCK_SIZE; 327 } 328 329 result = (__force __be32 *)digest; 330 for (i = 0; i < SHA1_DIGEST_WORDS; i++) 331 result[i] = cpu_to_be32(digest[i]); 332 memcpy(fp->tag, result, sizeof(fp->tag)); 333 334 vfree(raw); 335 return 0; 336 } 337 338 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 339 s32 end_new, s32 curr, const bool probe_pass) 340 { 341 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 342 s32 delta = end_new - end_old; 343 s64 imm = insn->imm; 344 345 if (curr < pos && curr + imm + 1 >= end_old) 346 imm += delta; 347 else if (curr >= end_new && curr + imm + 1 < end_new) 348 imm -= delta; 349 if (imm < imm_min || imm > imm_max) 350 return -ERANGE; 351 if (!probe_pass) 352 insn->imm = imm; 353 return 0; 354 } 355 356 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 357 s32 end_new, s32 curr, const bool probe_pass) 358 { 359 const s32 off_min = S16_MIN, off_max = S16_MAX; 360 s32 delta = end_new - end_old; 361 s32 off = insn->off; 362 363 if (curr < pos && curr + off + 1 >= end_old) 364 off += delta; 365 else if (curr >= end_new && curr + off + 1 < end_new) 366 off -= delta; 367 if (off < off_min || off > off_max) 368 return -ERANGE; 369 if (!probe_pass) 370 insn->off = off; 371 return 0; 372 } 373 374 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 375 s32 end_new, const bool probe_pass) 376 { 377 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 378 struct bpf_insn *insn = prog->insnsi; 379 int ret = 0; 380 381 for (i = 0; i < insn_cnt; i++, insn++) { 382 u8 code; 383 384 /* In the probing pass we still operate on the original, 385 * unpatched image in order to check overflows before we 386 * do any other adjustments. Therefore skip the patchlet. 387 */ 388 if (probe_pass && i == pos) { 389 i = end_new; 390 insn = prog->insnsi + end_old; 391 } 392 code = insn->code; 393 if ((BPF_CLASS(code) != BPF_JMP && 394 BPF_CLASS(code) != BPF_JMP32) || 395 BPF_OP(code) == BPF_EXIT) 396 continue; 397 /* Adjust offset of jmps if we cross patch boundaries. */ 398 if (BPF_OP(code) == BPF_CALL) { 399 if (insn->src_reg != BPF_PSEUDO_CALL) 400 continue; 401 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 402 end_new, i, probe_pass); 403 } else { 404 ret = bpf_adj_delta_to_off(insn, pos, end_old, 405 end_new, i, probe_pass); 406 } 407 if (ret) 408 break; 409 } 410 411 return ret; 412 } 413 414 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 415 { 416 struct bpf_line_info *linfo; 417 u32 i, nr_linfo; 418 419 nr_linfo = prog->aux->nr_linfo; 420 if (!nr_linfo || !delta) 421 return; 422 423 linfo = prog->aux->linfo; 424 425 for (i = 0; i < nr_linfo; i++) 426 if (off < linfo[i].insn_off) 427 break; 428 429 /* Push all off < linfo[i].insn_off by delta */ 430 for (; i < nr_linfo; i++) 431 linfo[i].insn_off += delta; 432 } 433 434 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 435 const struct bpf_insn *patch, u32 len) 436 { 437 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 438 const u32 cnt_max = S16_MAX; 439 struct bpf_prog *prog_adj; 440 int err; 441 442 /* Since our patchlet doesn't expand the image, we're done. */ 443 if (insn_delta == 0) { 444 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 445 return prog; 446 } 447 448 insn_adj_cnt = prog->len + insn_delta; 449 450 /* Reject anything that would potentially let the insn->off 451 * target overflow when we have excessive program expansions. 452 * We need to probe here before we do any reallocation where 453 * we afterwards may not fail anymore. 454 */ 455 if (insn_adj_cnt > cnt_max && 456 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 457 return ERR_PTR(err); 458 459 /* Several new instructions need to be inserted. Make room 460 * for them. Likely, there's no need for a new allocation as 461 * last page could have large enough tailroom. 462 */ 463 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 464 GFP_USER); 465 if (!prog_adj) 466 return ERR_PTR(-ENOMEM); 467 468 prog_adj->len = insn_adj_cnt; 469 470 /* Patching happens in 3 steps: 471 * 472 * 1) Move over tail of insnsi from next instruction onwards, 473 * so we can patch the single target insn with one or more 474 * new ones (patching is always from 1 to n insns, n > 0). 475 * 2) Inject new instructions at the target location. 476 * 3) Adjust branch offsets if necessary. 477 */ 478 insn_rest = insn_adj_cnt - off - len; 479 480 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 481 sizeof(*patch) * insn_rest); 482 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 483 484 /* We are guaranteed to not fail at this point, otherwise 485 * the ship has sailed to reverse to the original state. An 486 * overflow cannot happen at this point. 487 */ 488 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 489 490 bpf_adj_linfo(prog_adj, off, insn_delta); 491 492 return prog_adj; 493 } 494 495 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 496 { 497 /* Branch offsets can't overflow when program is shrinking, no need 498 * to call bpf_adj_branches(..., true) here 499 */ 500 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 501 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 502 prog->len -= cnt; 503 504 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); 505 } 506 507 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 508 { 509 int i; 510 511 for (i = 0; i < fp->aux->func_cnt; i++) 512 bpf_prog_kallsyms_del(fp->aux->func[i]); 513 } 514 515 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 516 { 517 bpf_prog_kallsyms_del_subprogs(fp); 518 bpf_prog_kallsyms_del(fp); 519 } 520 521 #ifdef CONFIG_BPF_JIT 522 /* All BPF JIT sysctl knobs here. */ 523 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 524 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 525 int bpf_jit_harden __read_mostly; 526 long bpf_jit_limit __read_mostly; 527 528 static void 529 bpf_prog_ksym_set_addr(struct bpf_prog *prog) 530 { 531 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 532 unsigned long addr = (unsigned long)hdr; 533 534 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 535 536 prog->aux->ksym.start = (unsigned long) prog->bpf_func; 537 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE; 538 } 539 540 static void 541 bpf_prog_ksym_set_name(struct bpf_prog *prog) 542 { 543 char *sym = prog->aux->ksym.name; 544 const char *end = sym + KSYM_NAME_LEN; 545 const struct btf_type *type; 546 const char *func_name; 547 548 BUILD_BUG_ON(sizeof("bpf_prog_") + 549 sizeof(prog->tag) * 2 + 550 /* name has been null terminated. 551 * We should need +1 for the '_' preceding 552 * the name. However, the null character 553 * is double counted between the name and the 554 * sizeof("bpf_prog_") above, so we omit 555 * the +1 here. 556 */ 557 sizeof(prog->aux->name) > KSYM_NAME_LEN); 558 559 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 560 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 561 562 /* prog->aux->name will be ignored if full btf name is available */ 563 if (prog->aux->func_info_cnt) { 564 type = btf_type_by_id(prog->aux->btf, 565 prog->aux->func_info[prog->aux->func_idx].type_id); 566 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 567 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 568 return; 569 } 570 571 if (prog->aux->name[0]) 572 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 573 else 574 *sym = 0; 575 } 576 577 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n) 578 { 579 return container_of(n, struct bpf_ksym, tnode)->start; 580 } 581 582 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 583 struct latch_tree_node *b) 584 { 585 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); 586 } 587 588 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 589 { 590 unsigned long val = (unsigned long)key; 591 const struct bpf_ksym *ksym; 592 593 ksym = container_of(n, struct bpf_ksym, tnode); 594 595 if (val < ksym->start) 596 return -1; 597 if (val >= ksym->end) 598 return 1; 599 600 return 0; 601 } 602 603 static const struct latch_tree_ops bpf_tree_ops = { 604 .less = bpf_tree_less, 605 .comp = bpf_tree_comp, 606 }; 607 608 static DEFINE_SPINLOCK(bpf_lock); 609 static LIST_HEAD(bpf_kallsyms); 610 static struct latch_tree_root bpf_tree __cacheline_aligned; 611 612 void bpf_ksym_add(struct bpf_ksym *ksym) 613 { 614 spin_lock_bh(&bpf_lock); 615 WARN_ON_ONCE(!list_empty(&ksym->lnode)); 616 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); 617 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 618 spin_unlock_bh(&bpf_lock); 619 } 620 621 static void __bpf_ksym_del(struct bpf_ksym *ksym) 622 { 623 if (list_empty(&ksym->lnode)) 624 return; 625 626 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 627 list_del_rcu(&ksym->lnode); 628 } 629 630 void bpf_ksym_del(struct bpf_ksym *ksym) 631 { 632 spin_lock_bh(&bpf_lock); 633 __bpf_ksym_del(ksym); 634 spin_unlock_bh(&bpf_lock); 635 } 636 637 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 638 { 639 return fp->jited && !bpf_prog_was_classic(fp); 640 } 641 642 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 643 { 644 return list_empty(&fp->aux->ksym.lnode) || 645 fp->aux->ksym.lnode.prev == LIST_POISON2; 646 } 647 648 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 649 { 650 if (!bpf_prog_kallsyms_candidate(fp) || 651 !bpf_capable()) 652 return; 653 654 bpf_prog_ksym_set_addr(fp); 655 bpf_prog_ksym_set_name(fp); 656 fp->aux->ksym.prog = true; 657 658 bpf_ksym_add(&fp->aux->ksym); 659 } 660 661 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 662 { 663 if (!bpf_prog_kallsyms_candidate(fp)) 664 return; 665 666 bpf_ksym_del(&fp->aux->ksym); 667 } 668 669 static struct bpf_ksym *bpf_ksym_find(unsigned long addr) 670 { 671 struct latch_tree_node *n; 672 673 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 674 return n ? container_of(n, struct bpf_ksym, tnode) : NULL; 675 } 676 677 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 678 unsigned long *off, char *sym) 679 { 680 struct bpf_ksym *ksym; 681 char *ret = NULL; 682 683 rcu_read_lock(); 684 ksym = bpf_ksym_find(addr); 685 if (ksym) { 686 unsigned long symbol_start = ksym->start; 687 unsigned long symbol_end = ksym->end; 688 689 strncpy(sym, ksym->name, KSYM_NAME_LEN); 690 691 ret = sym; 692 if (size) 693 *size = symbol_end - symbol_start; 694 if (off) 695 *off = addr - symbol_start; 696 } 697 rcu_read_unlock(); 698 699 return ret; 700 } 701 702 bool is_bpf_text_address(unsigned long addr) 703 { 704 bool ret; 705 706 rcu_read_lock(); 707 ret = bpf_ksym_find(addr) != NULL; 708 rcu_read_unlock(); 709 710 return ret; 711 } 712 713 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) 714 { 715 struct bpf_ksym *ksym = bpf_ksym_find(addr); 716 717 return ksym && ksym->prog ? 718 container_of(ksym, struct bpf_prog_aux, ksym)->prog : 719 NULL; 720 } 721 722 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 723 { 724 const struct exception_table_entry *e = NULL; 725 struct bpf_prog *prog; 726 727 rcu_read_lock(); 728 prog = bpf_prog_ksym_find(addr); 729 if (!prog) 730 goto out; 731 if (!prog->aux->num_exentries) 732 goto out; 733 734 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 735 out: 736 rcu_read_unlock(); 737 return e; 738 } 739 740 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 741 char *sym) 742 { 743 struct bpf_ksym *ksym; 744 unsigned int it = 0; 745 int ret = -ERANGE; 746 747 if (!bpf_jit_kallsyms_enabled()) 748 return ret; 749 750 rcu_read_lock(); 751 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { 752 if (it++ != symnum) 753 continue; 754 755 strncpy(sym, ksym->name, KSYM_NAME_LEN); 756 757 *value = ksym->start; 758 *type = BPF_SYM_ELF_TYPE; 759 760 ret = 0; 761 break; 762 } 763 rcu_read_unlock(); 764 765 return ret; 766 } 767 768 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 769 struct bpf_jit_poke_descriptor *poke) 770 { 771 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 772 static const u32 poke_tab_max = 1024; 773 u32 slot = prog->aux->size_poke_tab; 774 u32 size = slot + 1; 775 776 if (size > poke_tab_max) 777 return -ENOSPC; 778 if (poke->tailcall_target || poke->tailcall_target_stable || 779 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) 780 return -EINVAL; 781 782 switch (poke->reason) { 783 case BPF_POKE_REASON_TAIL_CALL: 784 if (!poke->tail_call.map) 785 return -EINVAL; 786 break; 787 default: 788 return -EINVAL; 789 } 790 791 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL); 792 if (!tab) 793 return -ENOMEM; 794 795 memcpy(&tab[slot], poke, sizeof(*poke)); 796 prog->aux->size_poke_tab = size; 797 prog->aux->poke_tab = tab; 798 799 return slot; 800 } 801 802 static atomic_long_t bpf_jit_current; 803 804 /* Can be overridden by an arch's JIT compiler if it has a custom, 805 * dedicated BPF backend memory area, or if neither of the two 806 * below apply. 807 */ 808 u64 __weak bpf_jit_alloc_exec_limit(void) 809 { 810 #if defined(MODULES_VADDR) 811 return MODULES_END - MODULES_VADDR; 812 #else 813 return VMALLOC_END - VMALLOC_START; 814 #endif 815 } 816 817 static int __init bpf_jit_charge_init(void) 818 { 819 /* Only used as heuristic here to derive limit. */ 820 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, 821 PAGE_SIZE), LONG_MAX); 822 return 0; 823 } 824 pure_initcall(bpf_jit_charge_init); 825 826 static int bpf_jit_charge_modmem(u32 pages) 827 { 828 if (atomic_long_add_return(pages, &bpf_jit_current) > 829 (bpf_jit_limit >> PAGE_SHIFT)) { 830 if (!capable(CAP_SYS_ADMIN)) { 831 atomic_long_sub(pages, &bpf_jit_current); 832 return -EPERM; 833 } 834 } 835 836 return 0; 837 } 838 839 static void bpf_jit_uncharge_modmem(u32 pages) 840 { 841 atomic_long_sub(pages, &bpf_jit_current); 842 } 843 844 void *__weak bpf_jit_alloc_exec(unsigned long size) 845 { 846 return module_alloc(size); 847 } 848 849 void __weak bpf_jit_free_exec(void *addr) 850 { 851 module_memfree(addr); 852 } 853 854 struct bpf_binary_header * 855 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 856 unsigned int alignment, 857 bpf_jit_fill_hole_t bpf_fill_ill_insns) 858 { 859 struct bpf_binary_header *hdr; 860 u32 size, hole, start, pages; 861 862 WARN_ON_ONCE(!is_power_of_2(alignment) || 863 alignment > BPF_IMAGE_ALIGNMENT); 864 865 /* Most of BPF filters are really small, but if some of them 866 * fill a page, allow at least 128 extra bytes to insert a 867 * random section of illegal instructions. 868 */ 869 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 870 pages = size / PAGE_SIZE; 871 872 if (bpf_jit_charge_modmem(pages)) 873 return NULL; 874 hdr = bpf_jit_alloc_exec(size); 875 if (!hdr) { 876 bpf_jit_uncharge_modmem(pages); 877 return NULL; 878 } 879 880 /* Fill space with illegal/arch-dep instructions. */ 881 bpf_fill_ill_insns(hdr, size); 882 883 hdr->pages = pages; 884 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 885 PAGE_SIZE - sizeof(*hdr)); 886 start = (get_random_int() % hole) & ~(alignment - 1); 887 888 /* Leave a random number of instructions before BPF code. */ 889 *image_ptr = &hdr->image[start]; 890 891 return hdr; 892 } 893 894 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 895 { 896 u32 pages = hdr->pages; 897 898 bpf_jit_free_exec(hdr); 899 bpf_jit_uncharge_modmem(pages); 900 } 901 902 /* This symbol is only overridden by archs that have different 903 * requirements than the usual eBPF JITs, f.e. when they only 904 * implement cBPF JIT, do not set images read-only, etc. 905 */ 906 void __weak bpf_jit_free(struct bpf_prog *fp) 907 { 908 if (fp->jited) { 909 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 910 911 bpf_jit_binary_free(hdr); 912 913 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 914 } 915 916 bpf_prog_unlock_free(fp); 917 } 918 919 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 920 const struct bpf_insn *insn, bool extra_pass, 921 u64 *func_addr, bool *func_addr_fixed) 922 { 923 s16 off = insn->off; 924 s32 imm = insn->imm; 925 u8 *addr; 926 927 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 928 if (!*func_addr_fixed) { 929 /* Place-holder address till the last pass has collected 930 * all addresses for JITed subprograms in which case we 931 * can pick them up from prog->aux. 932 */ 933 if (!extra_pass) 934 addr = NULL; 935 else if (prog->aux->func && 936 off >= 0 && off < prog->aux->func_cnt) 937 addr = (u8 *)prog->aux->func[off]->bpf_func; 938 else 939 return -EINVAL; 940 } else { 941 /* Address of a BPF helper call. Since part of the core 942 * kernel, it's always at a fixed location. __bpf_call_base 943 * and the helper with imm relative to it are both in core 944 * kernel. 945 */ 946 addr = (u8 *)__bpf_call_base + imm; 947 } 948 949 *func_addr = (unsigned long)addr; 950 return 0; 951 } 952 953 static int bpf_jit_blind_insn(const struct bpf_insn *from, 954 const struct bpf_insn *aux, 955 struct bpf_insn *to_buff, 956 bool emit_zext) 957 { 958 struct bpf_insn *to = to_buff; 959 u32 imm_rnd = get_random_int(); 960 s16 off; 961 962 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 963 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 964 965 /* Constraints on AX register: 966 * 967 * AX register is inaccessible from user space. It is mapped in 968 * all JITs, and used here for constant blinding rewrites. It is 969 * typically "stateless" meaning its contents are only valid within 970 * the executed instruction, but not across several instructions. 971 * There are a few exceptions however which are further detailed 972 * below. 973 * 974 * Constant blinding is only used by JITs, not in the interpreter. 975 * The interpreter uses AX in some occasions as a local temporary 976 * register e.g. in DIV or MOD instructions. 977 * 978 * In restricted circumstances, the verifier can also use the AX 979 * register for rewrites as long as they do not interfere with 980 * the above cases! 981 */ 982 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 983 goto out; 984 985 if (from->imm == 0 && 986 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 987 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 988 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 989 goto out; 990 } 991 992 switch (from->code) { 993 case BPF_ALU | BPF_ADD | BPF_K: 994 case BPF_ALU | BPF_SUB | BPF_K: 995 case BPF_ALU | BPF_AND | BPF_K: 996 case BPF_ALU | BPF_OR | BPF_K: 997 case BPF_ALU | BPF_XOR | BPF_K: 998 case BPF_ALU | BPF_MUL | BPF_K: 999 case BPF_ALU | BPF_MOV | BPF_K: 1000 case BPF_ALU | BPF_DIV | BPF_K: 1001 case BPF_ALU | BPF_MOD | BPF_K: 1002 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1003 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1004 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 1005 break; 1006 1007 case BPF_ALU64 | BPF_ADD | BPF_K: 1008 case BPF_ALU64 | BPF_SUB | BPF_K: 1009 case BPF_ALU64 | BPF_AND | BPF_K: 1010 case BPF_ALU64 | BPF_OR | BPF_K: 1011 case BPF_ALU64 | BPF_XOR | BPF_K: 1012 case BPF_ALU64 | BPF_MUL | BPF_K: 1013 case BPF_ALU64 | BPF_MOV | BPF_K: 1014 case BPF_ALU64 | BPF_DIV | BPF_K: 1015 case BPF_ALU64 | BPF_MOD | BPF_K: 1016 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1017 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1018 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 1019 break; 1020 1021 case BPF_JMP | BPF_JEQ | BPF_K: 1022 case BPF_JMP | BPF_JNE | BPF_K: 1023 case BPF_JMP | BPF_JGT | BPF_K: 1024 case BPF_JMP | BPF_JLT | BPF_K: 1025 case BPF_JMP | BPF_JGE | BPF_K: 1026 case BPF_JMP | BPF_JLE | BPF_K: 1027 case BPF_JMP | BPF_JSGT | BPF_K: 1028 case BPF_JMP | BPF_JSLT | BPF_K: 1029 case BPF_JMP | BPF_JSGE | BPF_K: 1030 case BPF_JMP | BPF_JSLE | BPF_K: 1031 case BPF_JMP | BPF_JSET | BPF_K: 1032 /* Accommodate for extra offset in case of a backjump. */ 1033 off = from->off; 1034 if (off < 0) 1035 off -= 2; 1036 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1037 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1038 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1039 break; 1040 1041 case BPF_JMP32 | BPF_JEQ | BPF_K: 1042 case BPF_JMP32 | BPF_JNE | BPF_K: 1043 case BPF_JMP32 | BPF_JGT | BPF_K: 1044 case BPF_JMP32 | BPF_JLT | BPF_K: 1045 case BPF_JMP32 | BPF_JGE | BPF_K: 1046 case BPF_JMP32 | BPF_JLE | BPF_K: 1047 case BPF_JMP32 | BPF_JSGT | BPF_K: 1048 case BPF_JMP32 | BPF_JSLT | BPF_K: 1049 case BPF_JMP32 | BPF_JSGE | BPF_K: 1050 case BPF_JMP32 | BPF_JSLE | BPF_K: 1051 case BPF_JMP32 | BPF_JSET | BPF_K: 1052 /* Accommodate for extra offset in case of a backjump. */ 1053 off = from->off; 1054 if (off < 0) 1055 off -= 2; 1056 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1057 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1058 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1059 off); 1060 break; 1061 1062 case BPF_LD | BPF_IMM | BPF_DW: 1063 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1064 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1065 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1066 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1067 break; 1068 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1069 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1070 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1071 if (emit_zext) 1072 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1073 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1074 break; 1075 1076 case BPF_ST | BPF_MEM | BPF_DW: 1077 case BPF_ST | BPF_MEM | BPF_W: 1078 case BPF_ST | BPF_MEM | BPF_H: 1079 case BPF_ST | BPF_MEM | BPF_B: 1080 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1081 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1082 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1083 break; 1084 } 1085 out: 1086 return to - to_buff; 1087 } 1088 1089 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1090 gfp_t gfp_extra_flags) 1091 { 1092 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1093 struct bpf_prog *fp; 1094 1095 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1096 if (fp != NULL) { 1097 /* aux->prog still points to the fp_other one, so 1098 * when promoting the clone to the real program, 1099 * this still needs to be adapted. 1100 */ 1101 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1102 } 1103 1104 return fp; 1105 } 1106 1107 static void bpf_prog_clone_free(struct bpf_prog *fp) 1108 { 1109 /* aux was stolen by the other clone, so we cannot free 1110 * it from this path! It will be freed eventually by the 1111 * other program on release. 1112 * 1113 * At this point, we don't need a deferred release since 1114 * clone is guaranteed to not be locked. 1115 */ 1116 fp->aux = NULL; 1117 __bpf_prog_free(fp); 1118 } 1119 1120 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1121 { 1122 /* We have to repoint aux->prog to self, as we don't 1123 * know whether fp here is the clone or the original. 1124 */ 1125 fp->aux->prog = fp; 1126 bpf_prog_clone_free(fp_other); 1127 } 1128 1129 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1130 { 1131 struct bpf_insn insn_buff[16], aux[2]; 1132 struct bpf_prog *clone, *tmp; 1133 int insn_delta, insn_cnt; 1134 struct bpf_insn *insn; 1135 int i, rewritten; 1136 1137 if (!bpf_jit_blinding_enabled(prog) || prog->blinded) 1138 return prog; 1139 1140 clone = bpf_prog_clone_create(prog, GFP_USER); 1141 if (!clone) 1142 return ERR_PTR(-ENOMEM); 1143 1144 insn_cnt = clone->len; 1145 insn = clone->insnsi; 1146 1147 for (i = 0; i < insn_cnt; i++, insn++) { 1148 /* We temporarily need to hold the original ld64 insn 1149 * so that we can still access the first part in the 1150 * second blinding run. 1151 */ 1152 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1153 insn[1].code == 0) 1154 memcpy(aux, insn, sizeof(aux)); 1155 1156 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1157 clone->aux->verifier_zext); 1158 if (!rewritten) 1159 continue; 1160 1161 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1162 if (IS_ERR(tmp)) { 1163 /* Patching may have repointed aux->prog during 1164 * realloc from the original one, so we need to 1165 * fix it up here on error. 1166 */ 1167 bpf_jit_prog_release_other(prog, clone); 1168 return tmp; 1169 } 1170 1171 clone = tmp; 1172 insn_delta = rewritten - 1; 1173 1174 /* Walk new program and skip insns we just inserted. */ 1175 insn = clone->insnsi + i + insn_delta; 1176 insn_cnt += insn_delta; 1177 i += insn_delta; 1178 } 1179 1180 clone->blinded = 1; 1181 return clone; 1182 } 1183 #endif /* CONFIG_BPF_JIT */ 1184 1185 /* Base function for offset calculation. Needs to go into .text section, 1186 * therefore keeping it non-static as well; will also be used by JITs 1187 * anyway later on, so do not let the compiler omit it. This also needs 1188 * to go into kallsyms for correlation from e.g. bpftool, so naming 1189 * must not change. 1190 */ 1191 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1192 { 1193 return 0; 1194 } 1195 EXPORT_SYMBOL_GPL(__bpf_call_base); 1196 1197 /* All UAPI available opcodes. */ 1198 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1199 /* 32 bit ALU operations. */ \ 1200 /* Register based. */ \ 1201 INSN_3(ALU, ADD, X), \ 1202 INSN_3(ALU, SUB, X), \ 1203 INSN_3(ALU, AND, X), \ 1204 INSN_3(ALU, OR, X), \ 1205 INSN_3(ALU, LSH, X), \ 1206 INSN_3(ALU, RSH, X), \ 1207 INSN_3(ALU, XOR, X), \ 1208 INSN_3(ALU, MUL, X), \ 1209 INSN_3(ALU, MOV, X), \ 1210 INSN_3(ALU, ARSH, X), \ 1211 INSN_3(ALU, DIV, X), \ 1212 INSN_3(ALU, MOD, X), \ 1213 INSN_2(ALU, NEG), \ 1214 INSN_3(ALU, END, TO_BE), \ 1215 INSN_3(ALU, END, TO_LE), \ 1216 /* Immediate based. */ \ 1217 INSN_3(ALU, ADD, K), \ 1218 INSN_3(ALU, SUB, K), \ 1219 INSN_3(ALU, AND, K), \ 1220 INSN_3(ALU, OR, K), \ 1221 INSN_3(ALU, LSH, K), \ 1222 INSN_3(ALU, RSH, K), \ 1223 INSN_3(ALU, XOR, K), \ 1224 INSN_3(ALU, MUL, K), \ 1225 INSN_3(ALU, MOV, K), \ 1226 INSN_3(ALU, ARSH, K), \ 1227 INSN_3(ALU, DIV, K), \ 1228 INSN_3(ALU, MOD, K), \ 1229 /* 64 bit ALU operations. */ \ 1230 /* Register based. */ \ 1231 INSN_3(ALU64, ADD, X), \ 1232 INSN_3(ALU64, SUB, X), \ 1233 INSN_3(ALU64, AND, X), \ 1234 INSN_3(ALU64, OR, X), \ 1235 INSN_3(ALU64, LSH, X), \ 1236 INSN_3(ALU64, RSH, X), \ 1237 INSN_3(ALU64, XOR, X), \ 1238 INSN_3(ALU64, MUL, X), \ 1239 INSN_3(ALU64, MOV, X), \ 1240 INSN_3(ALU64, ARSH, X), \ 1241 INSN_3(ALU64, DIV, X), \ 1242 INSN_3(ALU64, MOD, X), \ 1243 INSN_2(ALU64, NEG), \ 1244 /* Immediate based. */ \ 1245 INSN_3(ALU64, ADD, K), \ 1246 INSN_3(ALU64, SUB, K), \ 1247 INSN_3(ALU64, AND, K), \ 1248 INSN_3(ALU64, OR, K), \ 1249 INSN_3(ALU64, LSH, K), \ 1250 INSN_3(ALU64, RSH, K), \ 1251 INSN_3(ALU64, XOR, K), \ 1252 INSN_3(ALU64, MUL, K), \ 1253 INSN_3(ALU64, MOV, K), \ 1254 INSN_3(ALU64, ARSH, K), \ 1255 INSN_3(ALU64, DIV, K), \ 1256 INSN_3(ALU64, MOD, K), \ 1257 /* Call instruction. */ \ 1258 INSN_2(JMP, CALL), \ 1259 /* Exit instruction. */ \ 1260 INSN_2(JMP, EXIT), \ 1261 /* 32-bit Jump instructions. */ \ 1262 /* Register based. */ \ 1263 INSN_3(JMP32, JEQ, X), \ 1264 INSN_3(JMP32, JNE, X), \ 1265 INSN_3(JMP32, JGT, X), \ 1266 INSN_3(JMP32, JLT, X), \ 1267 INSN_3(JMP32, JGE, X), \ 1268 INSN_3(JMP32, JLE, X), \ 1269 INSN_3(JMP32, JSGT, X), \ 1270 INSN_3(JMP32, JSLT, X), \ 1271 INSN_3(JMP32, JSGE, X), \ 1272 INSN_3(JMP32, JSLE, X), \ 1273 INSN_3(JMP32, JSET, X), \ 1274 /* Immediate based. */ \ 1275 INSN_3(JMP32, JEQ, K), \ 1276 INSN_3(JMP32, JNE, K), \ 1277 INSN_3(JMP32, JGT, K), \ 1278 INSN_3(JMP32, JLT, K), \ 1279 INSN_3(JMP32, JGE, K), \ 1280 INSN_3(JMP32, JLE, K), \ 1281 INSN_3(JMP32, JSGT, K), \ 1282 INSN_3(JMP32, JSLT, K), \ 1283 INSN_3(JMP32, JSGE, K), \ 1284 INSN_3(JMP32, JSLE, K), \ 1285 INSN_3(JMP32, JSET, K), \ 1286 /* Jump instructions. */ \ 1287 /* Register based. */ \ 1288 INSN_3(JMP, JEQ, X), \ 1289 INSN_3(JMP, JNE, X), \ 1290 INSN_3(JMP, JGT, X), \ 1291 INSN_3(JMP, JLT, X), \ 1292 INSN_3(JMP, JGE, X), \ 1293 INSN_3(JMP, JLE, X), \ 1294 INSN_3(JMP, JSGT, X), \ 1295 INSN_3(JMP, JSLT, X), \ 1296 INSN_3(JMP, JSGE, X), \ 1297 INSN_3(JMP, JSLE, X), \ 1298 INSN_3(JMP, JSET, X), \ 1299 /* Immediate based. */ \ 1300 INSN_3(JMP, JEQ, K), \ 1301 INSN_3(JMP, JNE, K), \ 1302 INSN_3(JMP, JGT, K), \ 1303 INSN_3(JMP, JLT, K), \ 1304 INSN_3(JMP, JGE, K), \ 1305 INSN_3(JMP, JLE, K), \ 1306 INSN_3(JMP, JSGT, K), \ 1307 INSN_3(JMP, JSLT, K), \ 1308 INSN_3(JMP, JSGE, K), \ 1309 INSN_3(JMP, JSLE, K), \ 1310 INSN_3(JMP, JSET, K), \ 1311 INSN_2(JMP, JA), \ 1312 /* Store instructions. */ \ 1313 /* Register based. */ \ 1314 INSN_3(STX, MEM, B), \ 1315 INSN_3(STX, MEM, H), \ 1316 INSN_3(STX, MEM, W), \ 1317 INSN_3(STX, MEM, DW), \ 1318 INSN_3(STX, XADD, W), \ 1319 INSN_3(STX, XADD, DW), \ 1320 /* Immediate based. */ \ 1321 INSN_3(ST, MEM, B), \ 1322 INSN_3(ST, MEM, H), \ 1323 INSN_3(ST, MEM, W), \ 1324 INSN_3(ST, MEM, DW), \ 1325 /* Load instructions. */ \ 1326 /* Register based. */ \ 1327 INSN_3(LDX, MEM, B), \ 1328 INSN_3(LDX, MEM, H), \ 1329 INSN_3(LDX, MEM, W), \ 1330 INSN_3(LDX, MEM, DW), \ 1331 /* Immediate based. */ \ 1332 INSN_3(LD, IMM, DW) 1333 1334 bool bpf_opcode_in_insntable(u8 code) 1335 { 1336 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1337 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1338 static const bool public_insntable[256] = { 1339 [0 ... 255] = false, 1340 /* Now overwrite non-defaults ... */ 1341 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1342 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1343 [BPF_LD | BPF_ABS | BPF_B] = true, 1344 [BPF_LD | BPF_ABS | BPF_H] = true, 1345 [BPF_LD | BPF_ABS | BPF_W] = true, 1346 [BPF_LD | BPF_IND | BPF_B] = true, 1347 [BPF_LD | BPF_IND | BPF_H] = true, 1348 [BPF_LD | BPF_IND | BPF_W] = true, 1349 }; 1350 #undef BPF_INSN_3_TBL 1351 #undef BPF_INSN_2_TBL 1352 return public_insntable[code]; 1353 } 1354 1355 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1356 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 1357 { 1358 memset(dst, 0, size); 1359 return -EFAULT; 1360 } 1361 1362 /** 1363 * __bpf_prog_run - run eBPF program on a given context 1364 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1365 * @insn: is the array of eBPF instructions 1366 * @stack: is the eBPF storage stack 1367 * 1368 * Decode and execute eBPF instructions. 1369 */ 1370 static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) 1371 { 1372 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1373 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1374 static const void * const jumptable[256] __annotate_jump_table = { 1375 [0 ... 255] = &&default_label, 1376 /* Now overwrite non-defaults ... */ 1377 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1378 /* Non-UAPI available opcodes. */ 1379 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1380 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1381 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1382 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1383 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1384 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1385 }; 1386 #undef BPF_INSN_3_LBL 1387 #undef BPF_INSN_2_LBL 1388 u32 tail_call_cnt = 0; 1389 1390 #define CONT ({ insn++; goto select_insn; }) 1391 #define CONT_JMP ({ insn++; goto select_insn; }) 1392 1393 select_insn: 1394 goto *jumptable[insn->code]; 1395 1396 /* ALU */ 1397 #define ALU(OPCODE, OP) \ 1398 ALU64_##OPCODE##_X: \ 1399 DST = DST OP SRC; \ 1400 CONT; \ 1401 ALU_##OPCODE##_X: \ 1402 DST = (u32) DST OP (u32) SRC; \ 1403 CONT; \ 1404 ALU64_##OPCODE##_K: \ 1405 DST = DST OP IMM; \ 1406 CONT; \ 1407 ALU_##OPCODE##_K: \ 1408 DST = (u32) DST OP (u32) IMM; \ 1409 CONT; 1410 1411 ALU(ADD, +) 1412 ALU(SUB, -) 1413 ALU(AND, &) 1414 ALU(OR, |) 1415 ALU(LSH, <<) 1416 ALU(RSH, >>) 1417 ALU(XOR, ^) 1418 ALU(MUL, *) 1419 #undef ALU 1420 ALU_NEG: 1421 DST = (u32) -DST; 1422 CONT; 1423 ALU64_NEG: 1424 DST = -DST; 1425 CONT; 1426 ALU_MOV_X: 1427 DST = (u32) SRC; 1428 CONT; 1429 ALU_MOV_K: 1430 DST = (u32) IMM; 1431 CONT; 1432 ALU64_MOV_X: 1433 DST = SRC; 1434 CONT; 1435 ALU64_MOV_K: 1436 DST = IMM; 1437 CONT; 1438 LD_IMM_DW: 1439 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1440 insn++; 1441 CONT; 1442 ALU_ARSH_X: 1443 DST = (u64) (u32) (((s32) DST) >> SRC); 1444 CONT; 1445 ALU_ARSH_K: 1446 DST = (u64) (u32) (((s32) DST) >> IMM); 1447 CONT; 1448 ALU64_ARSH_X: 1449 (*(s64 *) &DST) >>= SRC; 1450 CONT; 1451 ALU64_ARSH_K: 1452 (*(s64 *) &DST) >>= IMM; 1453 CONT; 1454 ALU64_MOD_X: 1455 div64_u64_rem(DST, SRC, &AX); 1456 DST = AX; 1457 CONT; 1458 ALU_MOD_X: 1459 AX = (u32) DST; 1460 DST = do_div(AX, (u32) SRC); 1461 CONT; 1462 ALU64_MOD_K: 1463 div64_u64_rem(DST, IMM, &AX); 1464 DST = AX; 1465 CONT; 1466 ALU_MOD_K: 1467 AX = (u32) DST; 1468 DST = do_div(AX, (u32) IMM); 1469 CONT; 1470 ALU64_DIV_X: 1471 DST = div64_u64(DST, SRC); 1472 CONT; 1473 ALU_DIV_X: 1474 AX = (u32) DST; 1475 do_div(AX, (u32) SRC); 1476 DST = (u32) AX; 1477 CONT; 1478 ALU64_DIV_K: 1479 DST = div64_u64(DST, IMM); 1480 CONT; 1481 ALU_DIV_K: 1482 AX = (u32) DST; 1483 do_div(AX, (u32) IMM); 1484 DST = (u32) AX; 1485 CONT; 1486 ALU_END_TO_BE: 1487 switch (IMM) { 1488 case 16: 1489 DST = (__force u16) cpu_to_be16(DST); 1490 break; 1491 case 32: 1492 DST = (__force u32) cpu_to_be32(DST); 1493 break; 1494 case 64: 1495 DST = (__force u64) cpu_to_be64(DST); 1496 break; 1497 } 1498 CONT; 1499 ALU_END_TO_LE: 1500 switch (IMM) { 1501 case 16: 1502 DST = (__force u16) cpu_to_le16(DST); 1503 break; 1504 case 32: 1505 DST = (__force u32) cpu_to_le32(DST); 1506 break; 1507 case 64: 1508 DST = (__force u64) cpu_to_le64(DST); 1509 break; 1510 } 1511 CONT; 1512 1513 /* CALL */ 1514 JMP_CALL: 1515 /* Function call scratches BPF_R1-BPF_R5 registers, 1516 * preserves BPF_R6-BPF_R9, and stores return value 1517 * into BPF_R0. 1518 */ 1519 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1520 BPF_R4, BPF_R5); 1521 CONT; 1522 1523 JMP_CALL_ARGS: 1524 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1525 BPF_R3, BPF_R4, 1526 BPF_R5, 1527 insn + insn->off + 1); 1528 CONT; 1529 1530 JMP_TAIL_CALL: { 1531 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1532 struct bpf_array *array = container_of(map, struct bpf_array, map); 1533 struct bpf_prog *prog; 1534 u32 index = BPF_R3; 1535 1536 if (unlikely(index >= array->map.max_entries)) 1537 goto out; 1538 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1539 goto out; 1540 1541 tail_call_cnt++; 1542 1543 prog = READ_ONCE(array->ptrs[index]); 1544 if (!prog) 1545 goto out; 1546 1547 /* ARG1 at this point is guaranteed to point to CTX from 1548 * the verifier side due to the fact that the tail call is 1549 * handled like a helper, that is, bpf_tail_call_proto, 1550 * where arg1_type is ARG_PTR_TO_CTX. 1551 */ 1552 insn = prog->insnsi; 1553 goto select_insn; 1554 out: 1555 CONT; 1556 } 1557 JMP_JA: 1558 insn += insn->off; 1559 CONT; 1560 JMP_EXIT: 1561 return BPF_R0; 1562 /* JMP */ 1563 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 1564 JMP_##OPCODE##_X: \ 1565 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 1566 insn += insn->off; \ 1567 CONT_JMP; \ 1568 } \ 1569 CONT; \ 1570 JMP32_##OPCODE##_X: \ 1571 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 1572 insn += insn->off; \ 1573 CONT_JMP; \ 1574 } \ 1575 CONT; \ 1576 JMP_##OPCODE##_K: \ 1577 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 1578 insn += insn->off; \ 1579 CONT_JMP; \ 1580 } \ 1581 CONT; \ 1582 JMP32_##OPCODE##_K: \ 1583 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 1584 insn += insn->off; \ 1585 CONT_JMP; \ 1586 } \ 1587 CONT; 1588 COND_JMP(u, JEQ, ==) 1589 COND_JMP(u, JNE, !=) 1590 COND_JMP(u, JGT, >) 1591 COND_JMP(u, JLT, <) 1592 COND_JMP(u, JGE, >=) 1593 COND_JMP(u, JLE, <=) 1594 COND_JMP(u, JSET, &) 1595 COND_JMP(s, JSGT, >) 1596 COND_JMP(s, JSLT, <) 1597 COND_JMP(s, JSGE, >=) 1598 COND_JMP(s, JSLE, <=) 1599 #undef COND_JMP 1600 /* STX and ST and LDX*/ 1601 #define LDST(SIZEOP, SIZE) \ 1602 STX_MEM_##SIZEOP: \ 1603 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1604 CONT; \ 1605 ST_MEM_##SIZEOP: \ 1606 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1607 CONT; \ 1608 LDX_MEM_##SIZEOP: \ 1609 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1610 CONT; 1611 1612 LDST(B, u8) 1613 LDST(H, u16) 1614 LDST(W, u32) 1615 LDST(DW, u64) 1616 #undef LDST 1617 #define LDX_PROBE(SIZEOP, SIZE) \ 1618 LDX_PROBE_MEM_##SIZEOP: \ 1619 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \ 1620 CONT; 1621 LDX_PROBE(B, 1) 1622 LDX_PROBE(H, 2) 1623 LDX_PROBE(W, 4) 1624 LDX_PROBE(DW, 8) 1625 #undef LDX_PROBE 1626 1627 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1628 atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1629 (DST + insn->off)); 1630 CONT; 1631 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1632 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1633 (DST + insn->off)); 1634 CONT; 1635 1636 default_label: 1637 /* If we ever reach this, we have a bug somewhere. Die hard here 1638 * instead of just returning 0; we could be somewhere in a subprog, 1639 * so execution could continue otherwise which we do /not/ want. 1640 * 1641 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1642 */ 1643 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); 1644 BUG_ON(1); 1645 return 0; 1646 } 1647 1648 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 1649 #define DEFINE_BPF_PROG_RUN(stack_size) \ 1650 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 1651 { \ 1652 u64 stack[stack_size / sizeof(u64)]; \ 1653 u64 regs[MAX_BPF_EXT_REG]; \ 1654 \ 1655 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1656 ARG1 = (u64) (unsigned long) ctx; \ 1657 return ___bpf_prog_run(regs, insn, stack); \ 1658 } 1659 1660 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 1661 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 1662 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 1663 const struct bpf_insn *insn) \ 1664 { \ 1665 u64 stack[stack_size / sizeof(u64)]; \ 1666 u64 regs[MAX_BPF_EXT_REG]; \ 1667 \ 1668 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1669 BPF_R1 = r1; \ 1670 BPF_R2 = r2; \ 1671 BPF_R3 = r3; \ 1672 BPF_R4 = r4; \ 1673 BPF_R5 = r5; \ 1674 return ___bpf_prog_run(regs, insn, stack); \ 1675 } 1676 1677 #define EVAL1(FN, X) FN(X) 1678 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 1679 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 1680 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 1681 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 1682 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 1683 1684 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 1685 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 1686 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 1687 1688 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 1689 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 1690 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 1691 1692 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 1693 1694 static unsigned int (*interpreters[])(const void *ctx, 1695 const struct bpf_insn *insn) = { 1696 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1697 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1698 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1699 }; 1700 #undef PROG_NAME_LIST 1701 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 1702 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 1703 const struct bpf_insn *insn) = { 1704 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1705 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1706 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1707 }; 1708 #undef PROG_NAME_LIST 1709 1710 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 1711 { 1712 stack_depth = max_t(u32, stack_depth, 1); 1713 insn->off = (s16) insn->imm; 1714 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 1715 __bpf_call_base_args; 1716 insn->code = BPF_JMP | BPF_CALL_ARGS; 1717 } 1718 1719 #else 1720 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 1721 const struct bpf_insn *insn) 1722 { 1723 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 1724 * is not working properly, so warn about it! 1725 */ 1726 WARN_ON_ONCE(1); 1727 return 0; 1728 } 1729 #endif 1730 1731 bool bpf_prog_array_compatible(struct bpf_array *array, 1732 const struct bpf_prog *fp) 1733 { 1734 if (fp->kprobe_override) 1735 return false; 1736 1737 if (!array->aux->type) { 1738 /* There's no owner yet where we could check for 1739 * compatibility. 1740 */ 1741 array->aux->type = fp->type; 1742 array->aux->jited = fp->jited; 1743 return true; 1744 } 1745 1746 return array->aux->type == fp->type && 1747 array->aux->jited == fp->jited; 1748 } 1749 1750 static int bpf_check_tail_call(const struct bpf_prog *fp) 1751 { 1752 struct bpf_prog_aux *aux = fp->aux; 1753 int i, ret = 0; 1754 1755 mutex_lock(&aux->used_maps_mutex); 1756 for (i = 0; i < aux->used_map_cnt; i++) { 1757 struct bpf_map *map = aux->used_maps[i]; 1758 struct bpf_array *array; 1759 1760 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1761 continue; 1762 1763 array = container_of(map, struct bpf_array, map); 1764 if (!bpf_prog_array_compatible(array, fp)) { 1765 ret = -EINVAL; 1766 goto out; 1767 } 1768 } 1769 1770 out: 1771 mutex_unlock(&aux->used_maps_mutex); 1772 return ret; 1773 } 1774 1775 static void bpf_prog_select_func(struct bpf_prog *fp) 1776 { 1777 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1778 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1779 1780 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1781 #else 1782 fp->bpf_func = __bpf_prog_ret0_warn; 1783 #endif 1784 } 1785 1786 /** 1787 * bpf_prog_select_runtime - select exec runtime for BPF program 1788 * @fp: bpf_prog populated with internal BPF program 1789 * @err: pointer to error variable 1790 * 1791 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1792 * The BPF program will be executed via BPF_PROG_RUN() macro. 1793 */ 1794 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1795 { 1796 /* In case of BPF to BPF calls, verifier did all the prep 1797 * work with regards to JITing, etc. 1798 */ 1799 if (fp->bpf_func) 1800 goto finalize; 1801 1802 bpf_prog_select_func(fp); 1803 1804 /* eBPF JITs can rewrite the program in case constant 1805 * blinding is active. However, in case of error during 1806 * blinding, bpf_int_jit_compile() must always return a 1807 * valid program, which in this case would simply not 1808 * be JITed, but falls back to the interpreter. 1809 */ 1810 if (!bpf_prog_is_dev_bound(fp->aux)) { 1811 *err = bpf_prog_alloc_jited_linfo(fp); 1812 if (*err) 1813 return fp; 1814 1815 fp = bpf_int_jit_compile(fp); 1816 if (!fp->jited) { 1817 bpf_prog_free_jited_linfo(fp); 1818 #ifdef CONFIG_BPF_JIT_ALWAYS_ON 1819 *err = -ENOTSUPP; 1820 return fp; 1821 #endif 1822 } else { 1823 bpf_prog_free_unused_jited_linfo(fp); 1824 } 1825 } else { 1826 *err = bpf_prog_offload_compile(fp); 1827 if (*err) 1828 return fp; 1829 } 1830 1831 finalize: 1832 bpf_prog_lock_ro(fp); 1833 1834 /* The tail call compatibility check can only be done at 1835 * this late stage as we need to determine, if we deal 1836 * with JITed or non JITed program concatenations and not 1837 * all eBPF JITs might immediately support all features. 1838 */ 1839 *err = bpf_check_tail_call(fp); 1840 1841 return fp; 1842 } 1843 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1844 1845 static unsigned int __bpf_prog_ret1(const void *ctx, 1846 const struct bpf_insn *insn) 1847 { 1848 return 1; 1849 } 1850 1851 static struct bpf_prog_dummy { 1852 struct bpf_prog prog; 1853 } dummy_bpf_prog = { 1854 .prog = { 1855 .bpf_func = __bpf_prog_ret1, 1856 }, 1857 }; 1858 1859 /* to avoid allocating empty bpf_prog_array for cgroups that 1860 * don't have bpf program attached use one global 'empty_prog_array' 1861 * It will not be modified the caller of bpf_prog_array_alloc() 1862 * (since caller requested prog_cnt == 0) 1863 * that pointer should be 'freed' by bpf_prog_array_free() 1864 */ 1865 static struct { 1866 struct bpf_prog_array hdr; 1867 struct bpf_prog *null_prog; 1868 } empty_prog_array = { 1869 .null_prog = NULL, 1870 }; 1871 1872 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1873 { 1874 if (prog_cnt) 1875 return kzalloc(sizeof(struct bpf_prog_array) + 1876 sizeof(struct bpf_prog_array_item) * 1877 (prog_cnt + 1), 1878 flags); 1879 1880 return &empty_prog_array.hdr; 1881 } 1882 1883 void bpf_prog_array_free(struct bpf_prog_array *progs) 1884 { 1885 if (!progs || progs == &empty_prog_array.hdr) 1886 return; 1887 kfree_rcu(progs, rcu); 1888 } 1889 1890 int bpf_prog_array_length(struct bpf_prog_array *array) 1891 { 1892 struct bpf_prog_array_item *item; 1893 u32 cnt = 0; 1894 1895 for (item = array->items; item->prog; item++) 1896 if (item->prog != &dummy_bpf_prog.prog) 1897 cnt++; 1898 return cnt; 1899 } 1900 1901 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 1902 { 1903 struct bpf_prog_array_item *item; 1904 1905 for (item = array->items; item->prog; item++) 1906 if (item->prog != &dummy_bpf_prog.prog) 1907 return false; 1908 return true; 1909 } 1910 1911 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 1912 u32 *prog_ids, 1913 u32 request_cnt) 1914 { 1915 struct bpf_prog_array_item *item; 1916 int i = 0; 1917 1918 for (item = array->items; item->prog; item++) { 1919 if (item->prog == &dummy_bpf_prog.prog) 1920 continue; 1921 prog_ids[i] = item->prog->aux->id; 1922 if (++i == request_cnt) { 1923 item++; 1924 break; 1925 } 1926 } 1927 1928 return !!(item->prog); 1929 } 1930 1931 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 1932 __u32 __user *prog_ids, u32 cnt) 1933 { 1934 unsigned long err = 0; 1935 bool nospc; 1936 u32 *ids; 1937 1938 /* users of this function are doing: 1939 * cnt = bpf_prog_array_length(); 1940 * if (cnt > 0) 1941 * bpf_prog_array_copy_to_user(..., cnt); 1942 * so below kcalloc doesn't need extra cnt > 0 check. 1943 */ 1944 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 1945 if (!ids) 1946 return -ENOMEM; 1947 nospc = bpf_prog_array_copy_core(array, ids, cnt); 1948 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1949 kfree(ids); 1950 if (err) 1951 return -EFAULT; 1952 if (nospc) 1953 return -ENOSPC; 1954 return 0; 1955 } 1956 1957 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 1958 struct bpf_prog *old_prog) 1959 { 1960 struct bpf_prog_array_item *item; 1961 1962 for (item = array->items; item->prog; item++) 1963 if (item->prog == old_prog) { 1964 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 1965 break; 1966 } 1967 } 1968 1969 /** 1970 * bpf_prog_array_delete_safe_at() - Replaces the program at the given 1971 * index into the program array with 1972 * a dummy no-op program. 1973 * @array: a bpf_prog_array 1974 * @index: the index of the program to replace 1975 * 1976 * Skips over dummy programs, by not counting them, when calculating 1977 * the position of the program to replace. 1978 * 1979 * Return: 1980 * * 0 - Success 1981 * * -EINVAL - Invalid index value. Must be a non-negative integer. 1982 * * -ENOENT - Index out of range 1983 */ 1984 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) 1985 { 1986 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); 1987 } 1988 1989 /** 1990 * bpf_prog_array_update_at() - Updates the program at the given index 1991 * into the program array. 1992 * @array: a bpf_prog_array 1993 * @index: the index of the program to update 1994 * @prog: the program to insert into the array 1995 * 1996 * Skips over dummy programs, by not counting them, when calculating 1997 * the position of the program to update. 1998 * 1999 * Return: 2000 * * 0 - Success 2001 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2002 * * -ENOENT - Index out of range 2003 */ 2004 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 2005 struct bpf_prog *prog) 2006 { 2007 struct bpf_prog_array_item *item; 2008 2009 if (unlikely(index < 0)) 2010 return -EINVAL; 2011 2012 for (item = array->items; item->prog; item++) { 2013 if (item->prog == &dummy_bpf_prog.prog) 2014 continue; 2015 if (!index) { 2016 WRITE_ONCE(item->prog, prog); 2017 return 0; 2018 } 2019 index--; 2020 } 2021 return -ENOENT; 2022 } 2023 2024 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 2025 struct bpf_prog *exclude_prog, 2026 struct bpf_prog *include_prog, 2027 struct bpf_prog_array **new_array) 2028 { 2029 int new_prog_cnt, carry_prog_cnt = 0; 2030 struct bpf_prog_array_item *existing; 2031 struct bpf_prog_array *array; 2032 bool found_exclude = false; 2033 int new_prog_idx = 0; 2034 2035 /* Figure out how many existing progs we need to carry over to 2036 * the new array. 2037 */ 2038 if (old_array) { 2039 existing = old_array->items; 2040 for (; existing->prog; existing++) { 2041 if (existing->prog == exclude_prog) { 2042 found_exclude = true; 2043 continue; 2044 } 2045 if (existing->prog != &dummy_bpf_prog.prog) 2046 carry_prog_cnt++; 2047 if (existing->prog == include_prog) 2048 return -EEXIST; 2049 } 2050 } 2051 2052 if (exclude_prog && !found_exclude) 2053 return -ENOENT; 2054 2055 /* How many progs (not NULL) will be in the new array? */ 2056 new_prog_cnt = carry_prog_cnt; 2057 if (include_prog) 2058 new_prog_cnt += 1; 2059 2060 /* Do we have any prog (not NULL) in the new array? */ 2061 if (!new_prog_cnt) { 2062 *new_array = NULL; 2063 return 0; 2064 } 2065 2066 /* +1 as the end of prog_array is marked with NULL */ 2067 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 2068 if (!array) 2069 return -ENOMEM; 2070 2071 /* Fill in the new prog array */ 2072 if (carry_prog_cnt) { 2073 existing = old_array->items; 2074 for (; existing->prog; existing++) 2075 if (existing->prog != exclude_prog && 2076 existing->prog != &dummy_bpf_prog.prog) { 2077 array->items[new_prog_idx++].prog = 2078 existing->prog; 2079 } 2080 } 2081 if (include_prog) 2082 array->items[new_prog_idx++].prog = include_prog; 2083 array->items[new_prog_idx].prog = NULL; 2084 *new_array = array; 2085 return 0; 2086 } 2087 2088 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2089 u32 *prog_ids, u32 request_cnt, 2090 u32 *prog_cnt) 2091 { 2092 u32 cnt = 0; 2093 2094 if (array) 2095 cnt = bpf_prog_array_length(array); 2096 2097 *prog_cnt = cnt; 2098 2099 /* return early if user requested only program count or nothing to copy */ 2100 if (!request_cnt || !cnt) 2101 return 0; 2102 2103 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2104 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2105 : 0; 2106 } 2107 2108 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2109 struct bpf_map **used_maps, u32 len) 2110 { 2111 struct bpf_map *map; 2112 u32 i; 2113 2114 for (i = 0; i < len; i++) { 2115 map = used_maps[i]; 2116 if (map->ops->map_poke_untrack) 2117 map->ops->map_poke_untrack(map, aux); 2118 bpf_map_put(map); 2119 } 2120 } 2121 2122 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2123 { 2124 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2125 kfree(aux->used_maps); 2126 } 2127 2128 static void bpf_prog_free_deferred(struct work_struct *work) 2129 { 2130 struct bpf_prog_aux *aux; 2131 int i; 2132 2133 aux = container_of(work, struct bpf_prog_aux, work); 2134 bpf_free_used_maps(aux); 2135 if (bpf_prog_is_dev_bound(aux)) 2136 bpf_prog_offload_destroy(aux->prog); 2137 #ifdef CONFIG_PERF_EVENTS 2138 if (aux->prog->has_callchain_buf) 2139 put_callchain_buffers(); 2140 #endif 2141 bpf_trampoline_put(aux->trampoline); 2142 for (i = 0; i < aux->func_cnt; i++) 2143 bpf_jit_free(aux->func[i]); 2144 if (aux->func_cnt) { 2145 kfree(aux->func); 2146 bpf_prog_unlock_free(aux->prog); 2147 } else { 2148 bpf_jit_free(aux->prog); 2149 } 2150 } 2151 2152 /* Free internal BPF program */ 2153 void bpf_prog_free(struct bpf_prog *fp) 2154 { 2155 struct bpf_prog_aux *aux = fp->aux; 2156 2157 if (aux->linked_prog) 2158 bpf_prog_put(aux->linked_prog); 2159 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2160 schedule_work(&aux->work); 2161 } 2162 EXPORT_SYMBOL_GPL(bpf_prog_free); 2163 2164 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 2165 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2166 2167 void bpf_user_rnd_init_once(void) 2168 { 2169 prandom_init_once(&bpf_user_rnd_state); 2170 } 2171 2172 BPF_CALL_0(bpf_user_rnd_u32) 2173 { 2174 /* Should someone ever have the rather unwise idea to use some 2175 * of the registers passed into this function, then note that 2176 * this function is called from native eBPF and classic-to-eBPF 2177 * transformations. Register assignments from both sides are 2178 * different, f.e. classic always sets fn(ctx, A, X) here. 2179 */ 2180 struct rnd_state *state; 2181 u32 res; 2182 2183 state = &get_cpu_var(bpf_user_rnd_state); 2184 res = prandom_u32_state(state); 2185 put_cpu_var(bpf_user_rnd_state); 2186 2187 return res; 2188 } 2189 2190 BPF_CALL_0(bpf_get_raw_cpu_id) 2191 { 2192 return raw_smp_processor_id(); 2193 } 2194 2195 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2196 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2197 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2198 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2199 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2200 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2201 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2202 const struct bpf_func_proto bpf_spin_lock_proto __weak; 2203 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 2204 const struct bpf_func_proto bpf_jiffies64_proto __weak; 2205 2206 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2207 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2208 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2209 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2210 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; 2211 2212 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 2213 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 2214 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 2215 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 2216 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak; 2217 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 2218 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak; 2219 2220 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 2221 { 2222 return NULL; 2223 } 2224 2225 u64 __weak 2226 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 2227 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 2228 { 2229 return -ENOTSUPP; 2230 } 2231 EXPORT_SYMBOL_GPL(bpf_event_output); 2232 2233 /* Always built-in helper functions. */ 2234 const struct bpf_func_proto bpf_tail_call_proto = { 2235 .func = NULL, 2236 .gpl_only = false, 2237 .ret_type = RET_VOID, 2238 .arg1_type = ARG_PTR_TO_CTX, 2239 .arg2_type = ARG_CONST_MAP_PTR, 2240 .arg3_type = ARG_ANYTHING, 2241 }; 2242 2243 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 2244 * It is encouraged to implement bpf_int_jit_compile() instead, so that 2245 * eBPF and implicitly also cBPF can get JITed! 2246 */ 2247 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 2248 { 2249 return prog; 2250 } 2251 2252 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 2253 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 2254 */ 2255 void __weak bpf_jit_compile(struct bpf_prog *prog) 2256 { 2257 } 2258 2259 bool __weak bpf_helper_changes_pkt_data(void *func) 2260 { 2261 return false; 2262 } 2263 2264 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 2265 * analysis code and wants explicit zero extension inserted by verifier. 2266 * Otherwise, return FALSE. 2267 */ 2268 bool __weak bpf_jit_needs_zext(void) 2269 { 2270 return false; 2271 } 2272 2273 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 2274 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 2275 */ 2276 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 2277 int len) 2278 { 2279 return -EFAULT; 2280 } 2281 2282 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2283 void *addr1, void *addr2) 2284 { 2285 return -ENOTSUPP; 2286 } 2287 2288 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 2289 EXPORT_SYMBOL(bpf_stats_enabled_key); 2290 2291 /* All definitions of tracepoints related to BPF. */ 2292 #define CREATE_TRACE_POINTS 2293 #include <linux/bpf_trace.h> 2294 2295 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 2296 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 2297