1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <linux/filter.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/random.h> 25 #include <linux/moduleloader.h> 26 #include <linux/bpf.h> 27 #include <linux/btf.h> 28 #include <linux/frame.h> 29 #include <linux/rbtree_latch.h> 30 #include <linux/kallsyms.h> 31 #include <linux/rcupdate.h> 32 #include <linux/perf_event.h> 33 #include <linux/extable.h> 34 #include <linux/log2.h> 35 #include <asm/unaligned.h> 36 37 /* Registers */ 38 #define BPF_R0 regs[BPF_REG_0] 39 #define BPF_R1 regs[BPF_REG_1] 40 #define BPF_R2 regs[BPF_REG_2] 41 #define BPF_R3 regs[BPF_REG_3] 42 #define BPF_R4 regs[BPF_REG_4] 43 #define BPF_R5 regs[BPF_REG_5] 44 #define BPF_R6 regs[BPF_REG_6] 45 #define BPF_R7 regs[BPF_REG_7] 46 #define BPF_R8 regs[BPF_REG_8] 47 #define BPF_R9 regs[BPF_REG_9] 48 #define BPF_R10 regs[BPF_REG_10] 49 50 /* Named registers */ 51 #define DST regs[insn->dst_reg] 52 #define SRC regs[insn->src_reg] 53 #define FP regs[BPF_REG_FP] 54 #define AX regs[BPF_REG_AX] 55 #define ARG1 regs[BPF_REG_ARG1] 56 #define CTX regs[BPF_REG_CTX] 57 #define IMM insn->imm 58 59 /* No hurry in this branch 60 * 61 * Exported for the bpf jit load helper. 62 */ 63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 64 { 65 u8 *ptr = NULL; 66 67 if (k >= SKF_NET_OFF) 68 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 69 else if (k >= SKF_LL_OFF) 70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 71 72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 73 return ptr; 74 75 return NULL; 76 } 77 78 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 79 { 80 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 81 struct bpf_prog_aux *aux; 82 struct bpf_prog *fp; 83 84 size = round_up(size, PAGE_SIZE); 85 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 86 if (fp == NULL) 87 return NULL; 88 89 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); 90 if (aux == NULL) { 91 vfree(fp); 92 return NULL; 93 } 94 95 fp->pages = size / PAGE_SIZE; 96 fp->aux = aux; 97 fp->aux->prog = fp; 98 fp->jit_requested = ebpf_jit_enabled(); 99 100 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); 101 102 return fp; 103 } 104 105 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 106 { 107 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 108 struct bpf_prog *prog; 109 int cpu; 110 111 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 112 if (!prog) 113 return NULL; 114 115 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 116 if (!prog->aux->stats) { 117 kfree(prog->aux); 118 vfree(prog); 119 return NULL; 120 } 121 122 for_each_possible_cpu(cpu) { 123 struct bpf_prog_stats *pstats; 124 125 pstats = per_cpu_ptr(prog->aux->stats, cpu); 126 u64_stats_init(&pstats->syncp); 127 } 128 return prog; 129 } 130 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 131 132 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 133 { 134 if (!prog->aux->nr_linfo || !prog->jit_requested) 135 return 0; 136 137 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo, 138 sizeof(*prog->aux->jited_linfo), 139 GFP_KERNEL | __GFP_NOWARN); 140 if (!prog->aux->jited_linfo) 141 return -ENOMEM; 142 143 return 0; 144 } 145 146 void bpf_prog_free_jited_linfo(struct bpf_prog *prog) 147 { 148 kfree(prog->aux->jited_linfo); 149 prog->aux->jited_linfo = NULL; 150 } 151 152 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog) 153 { 154 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0]) 155 bpf_prog_free_jited_linfo(prog); 156 } 157 158 /* The jit engine is responsible to provide an array 159 * for insn_off to the jited_off mapping (insn_to_jit_off). 160 * 161 * The idx to this array is the insn_off. Hence, the insn_off 162 * here is relative to the prog itself instead of the main prog. 163 * This array has one entry for each xlated bpf insn. 164 * 165 * jited_off is the byte off to the last byte of the jited insn. 166 * 167 * Hence, with 168 * insn_start: 169 * The first bpf insn off of the prog. The insn off 170 * here is relative to the main prog. 171 * e.g. if prog is a subprog, insn_start > 0 172 * linfo_idx: 173 * The prog's idx to prog->aux->linfo and jited_linfo 174 * 175 * jited_linfo[linfo_idx] = prog->bpf_func 176 * 177 * For i > linfo_idx, 178 * 179 * jited_linfo[i] = prog->bpf_func + 180 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 181 */ 182 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 183 const u32 *insn_to_jit_off) 184 { 185 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 186 const struct bpf_line_info *linfo; 187 void **jited_linfo; 188 189 if (!prog->aux->jited_linfo) 190 /* Userspace did not provide linfo */ 191 return; 192 193 linfo_idx = prog->aux->linfo_idx; 194 linfo = &prog->aux->linfo[linfo_idx]; 195 insn_start = linfo[0].insn_off; 196 insn_end = insn_start + prog->len; 197 198 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 199 jited_linfo[0] = prog->bpf_func; 200 201 nr_linfo = prog->aux->nr_linfo - linfo_idx; 202 203 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 204 /* The verifier ensures that linfo[i].insn_off is 205 * strictly increasing 206 */ 207 jited_linfo[i] = prog->bpf_func + 208 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 209 } 210 211 void bpf_prog_free_linfo(struct bpf_prog *prog) 212 { 213 bpf_prog_free_jited_linfo(prog); 214 kvfree(prog->aux->linfo); 215 } 216 217 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 218 gfp_t gfp_extra_flags) 219 { 220 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 221 struct bpf_prog *fp; 222 u32 pages, delta; 223 int ret; 224 225 size = round_up(size, PAGE_SIZE); 226 pages = size / PAGE_SIZE; 227 if (pages <= fp_old->pages) 228 return fp_old; 229 230 delta = pages - fp_old->pages; 231 ret = __bpf_prog_charge(fp_old->aux->user, delta); 232 if (ret) 233 return NULL; 234 235 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 236 if (fp == NULL) { 237 __bpf_prog_uncharge(fp_old->aux->user, delta); 238 } else { 239 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 240 fp->pages = pages; 241 fp->aux->prog = fp; 242 243 /* We keep fp->aux from fp_old around in the new 244 * reallocated structure. 245 */ 246 fp_old->aux = NULL; 247 __bpf_prog_free(fp_old); 248 } 249 250 return fp; 251 } 252 253 void __bpf_prog_free(struct bpf_prog *fp) 254 { 255 if (fp->aux) { 256 free_percpu(fp->aux->stats); 257 kfree(fp->aux->poke_tab); 258 kfree(fp->aux); 259 } 260 vfree(fp); 261 } 262 263 int bpf_prog_calc_tag(struct bpf_prog *fp) 264 { 265 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 266 u32 raw_size = bpf_prog_tag_scratch_size(fp); 267 u32 digest[SHA_DIGEST_WORDS]; 268 u32 ws[SHA_WORKSPACE_WORDS]; 269 u32 i, bsize, psize, blocks; 270 struct bpf_insn *dst; 271 bool was_ld_map; 272 u8 *raw, *todo; 273 __be32 *result; 274 __be64 *bits; 275 276 raw = vmalloc(raw_size); 277 if (!raw) 278 return -ENOMEM; 279 280 sha_init(digest); 281 memset(ws, 0, sizeof(ws)); 282 283 /* We need to take out the map fd for the digest calculation 284 * since they are unstable from user space side. 285 */ 286 dst = (void *)raw; 287 for (i = 0, was_ld_map = false; i < fp->len; i++) { 288 dst[i] = fp->insnsi[i]; 289 if (!was_ld_map && 290 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 291 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 292 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 293 was_ld_map = true; 294 dst[i].imm = 0; 295 } else if (was_ld_map && 296 dst[i].code == 0 && 297 dst[i].dst_reg == 0 && 298 dst[i].src_reg == 0 && 299 dst[i].off == 0) { 300 was_ld_map = false; 301 dst[i].imm = 0; 302 } else { 303 was_ld_map = false; 304 } 305 } 306 307 psize = bpf_prog_insn_size(fp); 308 memset(&raw[psize], 0, raw_size - psize); 309 raw[psize++] = 0x80; 310 311 bsize = round_up(psize, SHA_MESSAGE_BYTES); 312 blocks = bsize / SHA_MESSAGE_BYTES; 313 todo = raw; 314 if (bsize - psize >= sizeof(__be64)) { 315 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 316 } else { 317 bits = (__be64 *)(todo + bsize + bits_offset); 318 blocks++; 319 } 320 *bits = cpu_to_be64((psize - 1) << 3); 321 322 while (blocks--) { 323 sha_transform(digest, todo, ws); 324 todo += SHA_MESSAGE_BYTES; 325 } 326 327 result = (__force __be32 *)digest; 328 for (i = 0; i < SHA_DIGEST_WORDS; i++) 329 result[i] = cpu_to_be32(digest[i]); 330 memcpy(fp->tag, result, sizeof(fp->tag)); 331 332 vfree(raw); 333 return 0; 334 } 335 336 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 337 s32 end_new, s32 curr, const bool probe_pass) 338 { 339 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 340 s32 delta = end_new - end_old; 341 s64 imm = insn->imm; 342 343 if (curr < pos && curr + imm + 1 >= end_old) 344 imm += delta; 345 else if (curr >= end_new && curr + imm + 1 < end_new) 346 imm -= delta; 347 if (imm < imm_min || imm > imm_max) 348 return -ERANGE; 349 if (!probe_pass) 350 insn->imm = imm; 351 return 0; 352 } 353 354 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 355 s32 end_new, s32 curr, const bool probe_pass) 356 { 357 const s32 off_min = S16_MIN, off_max = S16_MAX; 358 s32 delta = end_new - end_old; 359 s32 off = insn->off; 360 361 if (curr < pos && curr + off + 1 >= end_old) 362 off += delta; 363 else if (curr >= end_new && curr + off + 1 < end_new) 364 off -= delta; 365 if (off < off_min || off > off_max) 366 return -ERANGE; 367 if (!probe_pass) 368 insn->off = off; 369 return 0; 370 } 371 372 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 373 s32 end_new, const bool probe_pass) 374 { 375 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 376 struct bpf_insn *insn = prog->insnsi; 377 int ret = 0; 378 379 for (i = 0; i < insn_cnt; i++, insn++) { 380 u8 code; 381 382 /* In the probing pass we still operate on the original, 383 * unpatched image in order to check overflows before we 384 * do any other adjustments. Therefore skip the patchlet. 385 */ 386 if (probe_pass && i == pos) { 387 i = end_new; 388 insn = prog->insnsi + end_old; 389 } 390 code = insn->code; 391 if ((BPF_CLASS(code) != BPF_JMP && 392 BPF_CLASS(code) != BPF_JMP32) || 393 BPF_OP(code) == BPF_EXIT) 394 continue; 395 /* Adjust offset of jmps if we cross patch boundaries. */ 396 if (BPF_OP(code) == BPF_CALL) { 397 if (insn->src_reg != BPF_PSEUDO_CALL) 398 continue; 399 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 400 end_new, i, probe_pass); 401 } else { 402 ret = bpf_adj_delta_to_off(insn, pos, end_old, 403 end_new, i, probe_pass); 404 } 405 if (ret) 406 break; 407 } 408 409 return ret; 410 } 411 412 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 413 { 414 struct bpf_line_info *linfo; 415 u32 i, nr_linfo; 416 417 nr_linfo = prog->aux->nr_linfo; 418 if (!nr_linfo || !delta) 419 return; 420 421 linfo = prog->aux->linfo; 422 423 for (i = 0; i < nr_linfo; i++) 424 if (off < linfo[i].insn_off) 425 break; 426 427 /* Push all off < linfo[i].insn_off by delta */ 428 for (; i < nr_linfo; i++) 429 linfo[i].insn_off += delta; 430 } 431 432 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 433 const struct bpf_insn *patch, u32 len) 434 { 435 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 436 const u32 cnt_max = S16_MAX; 437 struct bpf_prog *prog_adj; 438 int err; 439 440 /* Since our patchlet doesn't expand the image, we're done. */ 441 if (insn_delta == 0) { 442 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 443 return prog; 444 } 445 446 insn_adj_cnt = prog->len + insn_delta; 447 448 /* Reject anything that would potentially let the insn->off 449 * target overflow when we have excessive program expansions. 450 * We need to probe here before we do any reallocation where 451 * we afterwards may not fail anymore. 452 */ 453 if (insn_adj_cnt > cnt_max && 454 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 455 return ERR_PTR(err); 456 457 /* Several new instructions need to be inserted. Make room 458 * for them. Likely, there's no need for a new allocation as 459 * last page could have large enough tailroom. 460 */ 461 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 462 GFP_USER); 463 if (!prog_adj) 464 return ERR_PTR(-ENOMEM); 465 466 prog_adj->len = insn_adj_cnt; 467 468 /* Patching happens in 3 steps: 469 * 470 * 1) Move over tail of insnsi from next instruction onwards, 471 * so we can patch the single target insn with one or more 472 * new ones (patching is always from 1 to n insns, n > 0). 473 * 2) Inject new instructions at the target location. 474 * 3) Adjust branch offsets if necessary. 475 */ 476 insn_rest = insn_adj_cnt - off - len; 477 478 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 479 sizeof(*patch) * insn_rest); 480 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 481 482 /* We are guaranteed to not fail at this point, otherwise 483 * the ship has sailed to reverse to the original state. An 484 * overflow cannot happen at this point. 485 */ 486 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 487 488 bpf_adj_linfo(prog_adj, off, insn_delta); 489 490 return prog_adj; 491 } 492 493 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 494 { 495 /* Branch offsets can't overflow when program is shrinking, no need 496 * to call bpf_adj_branches(..., true) here 497 */ 498 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 499 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 500 prog->len -= cnt; 501 502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); 503 } 504 505 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 506 { 507 int i; 508 509 for (i = 0; i < fp->aux->func_cnt; i++) 510 bpf_prog_kallsyms_del(fp->aux->func[i]); 511 } 512 513 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 514 { 515 bpf_prog_kallsyms_del_subprogs(fp); 516 bpf_prog_kallsyms_del(fp); 517 } 518 519 #ifdef CONFIG_BPF_JIT 520 /* All BPF JIT sysctl knobs here. */ 521 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 522 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 523 int bpf_jit_harden __read_mostly; 524 long bpf_jit_limit __read_mostly; 525 526 static __always_inline void 527 bpf_get_prog_addr_region(const struct bpf_prog *prog, 528 unsigned long *symbol_start, 529 unsigned long *symbol_end) 530 { 531 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 532 unsigned long addr = (unsigned long)hdr; 533 534 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 535 536 *symbol_start = addr; 537 *symbol_end = addr + hdr->pages * PAGE_SIZE; 538 } 539 540 void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 541 { 542 const char *end = sym + KSYM_NAME_LEN; 543 const struct btf_type *type; 544 const char *func_name; 545 546 BUILD_BUG_ON(sizeof("bpf_prog_") + 547 sizeof(prog->tag) * 2 + 548 /* name has been null terminated. 549 * We should need +1 for the '_' preceding 550 * the name. However, the null character 551 * is double counted between the name and the 552 * sizeof("bpf_prog_") above, so we omit 553 * the +1 here. 554 */ 555 sizeof(prog->aux->name) > KSYM_NAME_LEN); 556 557 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 558 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 559 560 /* prog->aux->name will be ignored if full btf name is available */ 561 if (prog->aux->func_info_cnt) { 562 type = btf_type_by_id(prog->aux->btf, 563 prog->aux->func_info[prog->aux->func_idx].type_id); 564 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 565 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 566 return; 567 } 568 569 if (prog->aux->name[0]) 570 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 571 else 572 *sym = 0; 573 } 574 575 static __always_inline unsigned long 576 bpf_get_prog_addr_start(struct latch_tree_node *n) 577 { 578 unsigned long symbol_start, symbol_end; 579 const struct bpf_prog_aux *aux; 580 581 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 582 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 583 584 return symbol_start; 585 } 586 587 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 588 struct latch_tree_node *b) 589 { 590 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); 591 } 592 593 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 594 { 595 unsigned long val = (unsigned long)key; 596 unsigned long symbol_start, symbol_end; 597 const struct bpf_prog_aux *aux; 598 599 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 600 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 601 602 if (val < symbol_start) 603 return -1; 604 if (val >= symbol_end) 605 return 1; 606 607 return 0; 608 } 609 610 static const struct latch_tree_ops bpf_tree_ops = { 611 .less = bpf_tree_less, 612 .comp = bpf_tree_comp, 613 }; 614 615 static DEFINE_SPINLOCK(bpf_lock); 616 static LIST_HEAD(bpf_kallsyms); 617 static struct latch_tree_root bpf_tree __cacheline_aligned; 618 619 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) 620 { 621 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); 622 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); 623 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 624 } 625 626 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) 627 { 628 if (list_empty(&aux->ksym_lnode)) 629 return; 630 631 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 632 list_del_rcu(&aux->ksym_lnode); 633 } 634 635 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 636 { 637 return fp->jited && !bpf_prog_was_classic(fp); 638 } 639 640 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 641 { 642 return list_empty(&fp->aux->ksym_lnode) || 643 fp->aux->ksym_lnode.prev == LIST_POISON2; 644 } 645 646 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 647 { 648 if (!bpf_prog_kallsyms_candidate(fp) || 649 !capable(CAP_SYS_ADMIN)) 650 return; 651 652 spin_lock_bh(&bpf_lock); 653 bpf_prog_ksym_node_add(fp->aux); 654 spin_unlock_bh(&bpf_lock); 655 } 656 657 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 658 { 659 if (!bpf_prog_kallsyms_candidate(fp)) 660 return; 661 662 spin_lock_bh(&bpf_lock); 663 bpf_prog_ksym_node_del(fp->aux); 664 spin_unlock_bh(&bpf_lock); 665 } 666 667 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) 668 { 669 struct latch_tree_node *n; 670 671 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 672 return n ? 673 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : 674 NULL; 675 } 676 677 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 678 unsigned long *off, char *sym) 679 { 680 unsigned long symbol_start, symbol_end; 681 struct bpf_prog *prog; 682 char *ret = NULL; 683 684 rcu_read_lock(); 685 prog = bpf_prog_kallsyms_find(addr); 686 if (prog) { 687 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); 688 bpf_get_prog_name(prog, sym); 689 690 ret = sym; 691 if (size) 692 *size = symbol_end - symbol_start; 693 if (off) 694 *off = addr - symbol_start; 695 } 696 rcu_read_unlock(); 697 698 return ret; 699 } 700 701 bool is_bpf_text_address(unsigned long addr) 702 { 703 bool ret; 704 705 rcu_read_lock(); 706 ret = bpf_prog_kallsyms_find(addr) != NULL; 707 rcu_read_unlock(); 708 709 return ret; 710 } 711 712 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 713 { 714 const struct exception_table_entry *e = NULL; 715 struct bpf_prog *prog; 716 717 rcu_read_lock(); 718 prog = bpf_prog_kallsyms_find(addr); 719 if (!prog) 720 goto out; 721 if (!prog->aux->num_exentries) 722 goto out; 723 724 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 725 out: 726 rcu_read_unlock(); 727 return e; 728 } 729 730 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 731 char *sym) 732 { 733 struct bpf_prog_aux *aux; 734 unsigned int it = 0; 735 int ret = -ERANGE; 736 737 if (!bpf_jit_kallsyms_enabled()) 738 return ret; 739 740 rcu_read_lock(); 741 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { 742 if (it++ != symnum) 743 continue; 744 745 bpf_get_prog_name(aux->prog, sym); 746 747 *value = (unsigned long)aux->prog->bpf_func; 748 *type = BPF_SYM_ELF_TYPE; 749 750 ret = 0; 751 break; 752 } 753 rcu_read_unlock(); 754 755 return ret; 756 } 757 758 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 759 struct bpf_jit_poke_descriptor *poke) 760 { 761 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 762 static const u32 poke_tab_max = 1024; 763 u32 slot = prog->aux->size_poke_tab; 764 u32 size = slot + 1; 765 766 if (size > poke_tab_max) 767 return -ENOSPC; 768 if (poke->ip || poke->ip_stable || poke->adj_off) 769 return -EINVAL; 770 771 switch (poke->reason) { 772 case BPF_POKE_REASON_TAIL_CALL: 773 if (!poke->tail_call.map) 774 return -EINVAL; 775 break; 776 default: 777 return -EINVAL; 778 } 779 780 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL); 781 if (!tab) 782 return -ENOMEM; 783 784 memcpy(&tab[slot], poke, sizeof(*poke)); 785 prog->aux->size_poke_tab = size; 786 prog->aux->poke_tab = tab; 787 788 return slot; 789 } 790 791 static atomic_long_t bpf_jit_current; 792 793 /* Can be overridden by an arch's JIT compiler if it has a custom, 794 * dedicated BPF backend memory area, or if neither of the two 795 * below apply. 796 */ 797 u64 __weak bpf_jit_alloc_exec_limit(void) 798 { 799 #if defined(MODULES_VADDR) 800 return MODULES_END - MODULES_VADDR; 801 #else 802 return VMALLOC_END - VMALLOC_START; 803 #endif 804 } 805 806 static int __init bpf_jit_charge_init(void) 807 { 808 /* Only used as heuristic here to derive limit. */ 809 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, 810 PAGE_SIZE), LONG_MAX); 811 return 0; 812 } 813 pure_initcall(bpf_jit_charge_init); 814 815 static int bpf_jit_charge_modmem(u32 pages) 816 { 817 if (atomic_long_add_return(pages, &bpf_jit_current) > 818 (bpf_jit_limit >> PAGE_SHIFT)) { 819 if (!capable(CAP_SYS_ADMIN)) { 820 atomic_long_sub(pages, &bpf_jit_current); 821 return -EPERM; 822 } 823 } 824 825 return 0; 826 } 827 828 static void bpf_jit_uncharge_modmem(u32 pages) 829 { 830 atomic_long_sub(pages, &bpf_jit_current); 831 } 832 833 void *__weak bpf_jit_alloc_exec(unsigned long size) 834 { 835 return module_alloc(size); 836 } 837 838 void __weak bpf_jit_free_exec(void *addr) 839 { 840 module_memfree(addr); 841 } 842 843 struct bpf_binary_header * 844 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 845 unsigned int alignment, 846 bpf_jit_fill_hole_t bpf_fill_ill_insns) 847 { 848 struct bpf_binary_header *hdr; 849 u32 size, hole, start, pages; 850 851 WARN_ON_ONCE(!is_power_of_2(alignment) || 852 alignment > BPF_IMAGE_ALIGNMENT); 853 854 /* Most of BPF filters are really small, but if some of them 855 * fill a page, allow at least 128 extra bytes to insert a 856 * random section of illegal instructions. 857 */ 858 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 859 pages = size / PAGE_SIZE; 860 861 if (bpf_jit_charge_modmem(pages)) 862 return NULL; 863 hdr = bpf_jit_alloc_exec(size); 864 if (!hdr) { 865 bpf_jit_uncharge_modmem(pages); 866 return NULL; 867 } 868 869 /* Fill space with illegal/arch-dep instructions. */ 870 bpf_fill_ill_insns(hdr, size); 871 872 hdr->pages = pages; 873 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 874 PAGE_SIZE - sizeof(*hdr)); 875 start = (get_random_int() % hole) & ~(alignment - 1); 876 877 /* Leave a random number of instructions before BPF code. */ 878 *image_ptr = &hdr->image[start]; 879 880 return hdr; 881 } 882 883 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 884 { 885 u32 pages = hdr->pages; 886 887 bpf_jit_free_exec(hdr); 888 bpf_jit_uncharge_modmem(pages); 889 } 890 891 /* This symbol is only overridden by archs that have different 892 * requirements than the usual eBPF JITs, f.e. when they only 893 * implement cBPF JIT, do not set images read-only, etc. 894 */ 895 void __weak bpf_jit_free(struct bpf_prog *fp) 896 { 897 if (fp->jited) { 898 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 899 900 bpf_jit_binary_free(hdr); 901 902 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 903 } 904 905 bpf_prog_unlock_free(fp); 906 } 907 908 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 909 const struct bpf_insn *insn, bool extra_pass, 910 u64 *func_addr, bool *func_addr_fixed) 911 { 912 s16 off = insn->off; 913 s32 imm = insn->imm; 914 u8 *addr; 915 916 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 917 if (!*func_addr_fixed) { 918 /* Place-holder address till the last pass has collected 919 * all addresses for JITed subprograms in which case we 920 * can pick them up from prog->aux. 921 */ 922 if (!extra_pass) 923 addr = NULL; 924 else if (prog->aux->func && 925 off >= 0 && off < prog->aux->func_cnt) 926 addr = (u8 *)prog->aux->func[off]->bpf_func; 927 else 928 return -EINVAL; 929 } else { 930 /* Address of a BPF helper call. Since part of the core 931 * kernel, it's always at a fixed location. __bpf_call_base 932 * and the helper with imm relative to it are both in core 933 * kernel. 934 */ 935 addr = (u8 *)__bpf_call_base + imm; 936 } 937 938 *func_addr = (unsigned long)addr; 939 return 0; 940 } 941 942 static int bpf_jit_blind_insn(const struct bpf_insn *from, 943 const struct bpf_insn *aux, 944 struct bpf_insn *to_buff, 945 bool emit_zext) 946 { 947 struct bpf_insn *to = to_buff; 948 u32 imm_rnd = get_random_int(); 949 s16 off; 950 951 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 952 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 953 954 /* Constraints on AX register: 955 * 956 * AX register is inaccessible from user space. It is mapped in 957 * all JITs, and used here for constant blinding rewrites. It is 958 * typically "stateless" meaning its contents are only valid within 959 * the executed instruction, but not across several instructions. 960 * There are a few exceptions however which are further detailed 961 * below. 962 * 963 * Constant blinding is only used by JITs, not in the interpreter. 964 * The interpreter uses AX in some occasions as a local temporary 965 * register e.g. in DIV or MOD instructions. 966 * 967 * In restricted circumstances, the verifier can also use the AX 968 * register for rewrites as long as they do not interfere with 969 * the above cases! 970 */ 971 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 972 goto out; 973 974 if (from->imm == 0 && 975 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 976 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 977 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 978 goto out; 979 } 980 981 switch (from->code) { 982 case BPF_ALU | BPF_ADD | BPF_K: 983 case BPF_ALU | BPF_SUB | BPF_K: 984 case BPF_ALU | BPF_AND | BPF_K: 985 case BPF_ALU | BPF_OR | BPF_K: 986 case BPF_ALU | BPF_XOR | BPF_K: 987 case BPF_ALU | BPF_MUL | BPF_K: 988 case BPF_ALU | BPF_MOV | BPF_K: 989 case BPF_ALU | BPF_DIV | BPF_K: 990 case BPF_ALU | BPF_MOD | BPF_K: 991 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 992 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 993 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 994 break; 995 996 case BPF_ALU64 | BPF_ADD | BPF_K: 997 case BPF_ALU64 | BPF_SUB | BPF_K: 998 case BPF_ALU64 | BPF_AND | BPF_K: 999 case BPF_ALU64 | BPF_OR | BPF_K: 1000 case BPF_ALU64 | BPF_XOR | BPF_K: 1001 case BPF_ALU64 | BPF_MUL | BPF_K: 1002 case BPF_ALU64 | BPF_MOV | BPF_K: 1003 case BPF_ALU64 | BPF_DIV | BPF_K: 1004 case BPF_ALU64 | BPF_MOD | BPF_K: 1005 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1006 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1007 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 1008 break; 1009 1010 case BPF_JMP | BPF_JEQ | BPF_K: 1011 case BPF_JMP | BPF_JNE | BPF_K: 1012 case BPF_JMP | BPF_JGT | BPF_K: 1013 case BPF_JMP | BPF_JLT | BPF_K: 1014 case BPF_JMP | BPF_JGE | BPF_K: 1015 case BPF_JMP | BPF_JLE | BPF_K: 1016 case BPF_JMP | BPF_JSGT | BPF_K: 1017 case BPF_JMP | BPF_JSLT | BPF_K: 1018 case BPF_JMP | BPF_JSGE | BPF_K: 1019 case BPF_JMP | BPF_JSLE | BPF_K: 1020 case BPF_JMP | BPF_JSET | BPF_K: 1021 /* Accommodate for extra offset in case of a backjump. */ 1022 off = from->off; 1023 if (off < 0) 1024 off -= 2; 1025 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1026 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1027 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1028 break; 1029 1030 case BPF_JMP32 | BPF_JEQ | BPF_K: 1031 case BPF_JMP32 | BPF_JNE | BPF_K: 1032 case BPF_JMP32 | BPF_JGT | BPF_K: 1033 case BPF_JMP32 | BPF_JLT | BPF_K: 1034 case BPF_JMP32 | BPF_JGE | BPF_K: 1035 case BPF_JMP32 | BPF_JLE | BPF_K: 1036 case BPF_JMP32 | BPF_JSGT | BPF_K: 1037 case BPF_JMP32 | BPF_JSLT | BPF_K: 1038 case BPF_JMP32 | BPF_JSGE | BPF_K: 1039 case BPF_JMP32 | BPF_JSLE | BPF_K: 1040 case BPF_JMP32 | BPF_JSET | BPF_K: 1041 /* Accommodate for extra offset in case of a backjump. */ 1042 off = from->off; 1043 if (off < 0) 1044 off -= 2; 1045 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1046 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1047 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1048 off); 1049 break; 1050 1051 case BPF_LD | BPF_IMM | BPF_DW: 1052 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1053 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1054 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1055 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1056 break; 1057 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1058 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1059 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1060 if (emit_zext) 1061 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1062 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1063 break; 1064 1065 case BPF_ST | BPF_MEM | BPF_DW: 1066 case BPF_ST | BPF_MEM | BPF_W: 1067 case BPF_ST | BPF_MEM | BPF_H: 1068 case BPF_ST | BPF_MEM | BPF_B: 1069 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1070 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1071 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1072 break; 1073 } 1074 out: 1075 return to - to_buff; 1076 } 1077 1078 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1079 gfp_t gfp_extra_flags) 1080 { 1081 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1082 struct bpf_prog *fp; 1083 1084 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 1085 if (fp != NULL) { 1086 /* aux->prog still points to the fp_other one, so 1087 * when promoting the clone to the real program, 1088 * this still needs to be adapted. 1089 */ 1090 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1091 } 1092 1093 return fp; 1094 } 1095 1096 static void bpf_prog_clone_free(struct bpf_prog *fp) 1097 { 1098 /* aux was stolen by the other clone, so we cannot free 1099 * it from this path! It will be freed eventually by the 1100 * other program on release. 1101 * 1102 * At this point, we don't need a deferred release since 1103 * clone is guaranteed to not be locked. 1104 */ 1105 fp->aux = NULL; 1106 __bpf_prog_free(fp); 1107 } 1108 1109 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1110 { 1111 /* We have to repoint aux->prog to self, as we don't 1112 * know whether fp here is the clone or the original. 1113 */ 1114 fp->aux->prog = fp; 1115 bpf_prog_clone_free(fp_other); 1116 } 1117 1118 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1119 { 1120 struct bpf_insn insn_buff[16], aux[2]; 1121 struct bpf_prog *clone, *tmp; 1122 int insn_delta, insn_cnt; 1123 struct bpf_insn *insn; 1124 int i, rewritten; 1125 1126 if (!bpf_jit_blinding_enabled(prog) || prog->blinded) 1127 return prog; 1128 1129 clone = bpf_prog_clone_create(prog, GFP_USER); 1130 if (!clone) 1131 return ERR_PTR(-ENOMEM); 1132 1133 insn_cnt = clone->len; 1134 insn = clone->insnsi; 1135 1136 for (i = 0; i < insn_cnt; i++, insn++) { 1137 /* We temporarily need to hold the original ld64 insn 1138 * so that we can still access the first part in the 1139 * second blinding run. 1140 */ 1141 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1142 insn[1].code == 0) 1143 memcpy(aux, insn, sizeof(aux)); 1144 1145 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1146 clone->aux->verifier_zext); 1147 if (!rewritten) 1148 continue; 1149 1150 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1151 if (IS_ERR(tmp)) { 1152 /* Patching may have repointed aux->prog during 1153 * realloc from the original one, so we need to 1154 * fix it up here on error. 1155 */ 1156 bpf_jit_prog_release_other(prog, clone); 1157 return tmp; 1158 } 1159 1160 clone = tmp; 1161 insn_delta = rewritten - 1; 1162 1163 /* Walk new program and skip insns we just inserted. */ 1164 insn = clone->insnsi + i + insn_delta; 1165 insn_cnt += insn_delta; 1166 i += insn_delta; 1167 } 1168 1169 clone->blinded = 1; 1170 return clone; 1171 } 1172 #endif /* CONFIG_BPF_JIT */ 1173 1174 /* Base function for offset calculation. Needs to go into .text section, 1175 * therefore keeping it non-static as well; will also be used by JITs 1176 * anyway later on, so do not let the compiler omit it. This also needs 1177 * to go into kallsyms for correlation from e.g. bpftool, so naming 1178 * must not change. 1179 */ 1180 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1181 { 1182 return 0; 1183 } 1184 EXPORT_SYMBOL_GPL(__bpf_call_base); 1185 1186 /* All UAPI available opcodes. */ 1187 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1188 /* 32 bit ALU operations. */ \ 1189 /* Register based. */ \ 1190 INSN_3(ALU, ADD, X), \ 1191 INSN_3(ALU, SUB, X), \ 1192 INSN_3(ALU, AND, X), \ 1193 INSN_3(ALU, OR, X), \ 1194 INSN_3(ALU, LSH, X), \ 1195 INSN_3(ALU, RSH, X), \ 1196 INSN_3(ALU, XOR, X), \ 1197 INSN_3(ALU, MUL, X), \ 1198 INSN_3(ALU, MOV, X), \ 1199 INSN_3(ALU, ARSH, X), \ 1200 INSN_3(ALU, DIV, X), \ 1201 INSN_3(ALU, MOD, X), \ 1202 INSN_2(ALU, NEG), \ 1203 INSN_3(ALU, END, TO_BE), \ 1204 INSN_3(ALU, END, TO_LE), \ 1205 /* Immediate based. */ \ 1206 INSN_3(ALU, ADD, K), \ 1207 INSN_3(ALU, SUB, K), \ 1208 INSN_3(ALU, AND, K), \ 1209 INSN_3(ALU, OR, K), \ 1210 INSN_3(ALU, LSH, K), \ 1211 INSN_3(ALU, RSH, K), \ 1212 INSN_3(ALU, XOR, K), \ 1213 INSN_3(ALU, MUL, K), \ 1214 INSN_3(ALU, MOV, K), \ 1215 INSN_3(ALU, ARSH, K), \ 1216 INSN_3(ALU, DIV, K), \ 1217 INSN_3(ALU, MOD, K), \ 1218 /* 64 bit ALU operations. */ \ 1219 /* Register based. */ \ 1220 INSN_3(ALU64, ADD, X), \ 1221 INSN_3(ALU64, SUB, X), \ 1222 INSN_3(ALU64, AND, X), \ 1223 INSN_3(ALU64, OR, X), \ 1224 INSN_3(ALU64, LSH, X), \ 1225 INSN_3(ALU64, RSH, X), \ 1226 INSN_3(ALU64, XOR, X), \ 1227 INSN_3(ALU64, MUL, X), \ 1228 INSN_3(ALU64, MOV, X), \ 1229 INSN_3(ALU64, ARSH, X), \ 1230 INSN_3(ALU64, DIV, X), \ 1231 INSN_3(ALU64, MOD, X), \ 1232 INSN_2(ALU64, NEG), \ 1233 /* Immediate based. */ \ 1234 INSN_3(ALU64, ADD, K), \ 1235 INSN_3(ALU64, SUB, K), \ 1236 INSN_3(ALU64, AND, K), \ 1237 INSN_3(ALU64, OR, K), \ 1238 INSN_3(ALU64, LSH, K), \ 1239 INSN_3(ALU64, RSH, K), \ 1240 INSN_3(ALU64, XOR, K), \ 1241 INSN_3(ALU64, MUL, K), \ 1242 INSN_3(ALU64, MOV, K), \ 1243 INSN_3(ALU64, ARSH, K), \ 1244 INSN_3(ALU64, DIV, K), \ 1245 INSN_3(ALU64, MOD, K), \ 1246 /* Call instruction. */ \ 1247 INSN_2(JMP, CALL), \ 1248 /* Exit instruction. */ \ 1249 INSN_2(JMP, EXIT), \ 1250 /* 32-bit Jump instructions. */ \ 1251 /* Register based. */ \ 1252 INSN_3(JMP32, JEQ, X), \ 1253 INSN_3(JMP32, JNE, X), \ 1254 INSN_3(JMP32, JGT, X), \ 1255 INSN_3(JMP32, JLT, X), \ 1256 INSN_3(JMP32, JGE, X), \ 1257 INSN_3(JMP32, JLE, X), \ 1258 INSN_3(JMP32, JSGT, X), \ 1259 INSN_3(JMP32, JSLT, X), \ 1260 INSN_3(JMP32, JSGE, X), \ 1261 INSN_3(JMP32, JSLE, X), \ 1262 INSN_3(JMP32, JSET, X), \ 1263 /* Immediate based. */ \ 1264 INSN_3(JMP32, JEQ, K), \ 1265 INSN_3(JMP32, JNE, K), \ 1266 INSN_3(JMP32, JGT, K), \ 1267 INSN_3(JMP32, JLT, K), \ 1268 INSN_3(JMP32, JGE, K), \ 1269 INSN_3(JMP32, JLE, K), \ 1270 INSN_3(JMP32, JSGT, K), \ 1271 INSN_3(JMP32, JSLT, K), \ 1272 INSN_3(JMP32, JSGE, K), \ 1273 INSN_3(JMP32, JSLE, K), \ 1274 INSN_3(JMP32, JSET, K), \ 1275 /* Jump instructions. */ \ 1276 /* Register based. */ \ 1277 INSN_3(JMP, JEQ, X), \ 1278 INSN_3(JMP, JNE, X), \ 1279 INSN_3(JMP, JGT, X), \ 1280 INSN_3(JMP, JLT, X), \ 1281 INSN_3(JMP, JGE, X), \ 1282 INSN_3(JMP, JLE, X), \ 1283 INSN_3(JMP, JSGT, X), \ 1284 INSN_3(JMP, JSLT, X), \ 1285 INSN_3(JMP, JSGE, X), \ 1286 INSN_3(JMP, JSLE, X), \ 1287 INSN_3(JMP, JSET, X), \ 1288 /* Immediate based. */ \ 1289 INSN_3(JMP, JEQ, K), \ 1290 INSN_3(JMP, JNE, K), \ 1291 INSN_3(JMP, JGT, K), \ 1292 INSN_3(JMP, JLT, K), \ 1293 INSN_3(JMP, JGE, K), \ 1294 INSN_3(JMP, JLE, K), \ 1295 INSN_3(JMP, JSGT, K), \ 1296 INSN_3(JMP, JSLT, K), \ 1297 INSN_3(JMP, JSGE, K), \ 1298 INSN_3(JMP, JSLE, K), \ 1299 INSN_3(JMP, JSET, K), \ 1300 INSN_2(JMP, JA), \ 1301 /* Store instructions. */ \ 1302 /* Register based. */ \ 1303 INSN_3(STX, MEM, B), \ 1304 INSN_3(STX, MEM, H), \ 1305 INSN_3(STX, MEM, W), \ 1306 INSN_3(STX, MEM, DW), \ 1307 INSN_3(STX, XADD, W), \ 1308 INSN_3(STX, XADD, DW), \ 1309 /* Immediate based. */ \ 1310 INSN_3(ST, MEM, B), \ 1311 INSN_3(ST, MEM, H), \ 1312 INSN_3(ST, MEM, W), \ 1313 INSN_3(ST, MEM, DW), \ 1314 /* Load instructions. */ \ 1315 /* Register based. */ \ 1316 INSN_3(LDX, MEM, B), \ 1317 INSN_3(LDX, MEM, H), \ 1318 INSN_3(LDX, MEM, W), \ 1319 INSN_3(LDX, MEM, DW), \ 1320 /* Immediate based. */ \ 1321 INSN_3(LD, IMM, DW) 1322 1323 bool bpf_opcode_in_insntable(u8 code) 1324 { 1325 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1326 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1327 static const bool public_insntable[256] = { 1328 [0 ... 255] = false, 1329 /* Now overwrite non-defaults ... */ 1330 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1331 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1332 [BPF_LD | BPF_ABS | BPF_B] = true, 1333 [BPF_LD | BPF_ABS | BPF_H] = true, 1334 [BPF_LD | BPF_ABS | BPF_W] = true, 1335 [BPF_LD | BPF_IND | BPF_B] = true, 1336 [BPF_LD | BPF_IND | BPF_H] = true, 1337 [BPF_LD | BPF_IND | BPF_W] = true, 1338 }; 1339 #undef BPF_INSN_3_TBL 1340 #undef BPF_INSN_2_TBL 1341 return public_insntable[code]; 1342 } 1343 1344 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1345 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 1346 { 1347 memset(dst, 0, size); 1348 return -EFAULT; 1349 } 1350 1351 /** 1352 * __bpf_prog_run - run eBPF program on a given context 1353 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1354 * @insn: is the array of eBPF instructions 1355 * @stack: is the eBPF storage stack 1356 * 1357 * Decode and execute eBPF instructions. 1358 */ 1359 static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) 1360 { 1361 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1362 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1363 static const void * const jumptable[256] __annotate_jump_table = { 1364 [0 ... 255] = &&default_label, 1365 /* Now overwrite non-defaults ... */ 1366 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1367 /* Non-UAPI available opcodes. */ 1368 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1369 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1370 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1371 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1372 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1373 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1374 }; 1375 #undef BPF_INSN_3_LBL 1376 #undef BPF_INSN_2_LBL 1377 u32 tail_call_cnt = 0; 1378 1379 #define CONT ({ insn++; goto select_insn; }) 1380 #define CONT_JMP ({ insn++; goto select_insn; }) 1381 1382 select_insn: 1383 goto *jumptable[insn->code]; 1384 1385 /* ALU */ 1386 #define ALU(OPCODE, OP) \ 1387 ALU64_##OPCODE##_X: \ 1388 DST = DST OP SRC; \ 1389 CONT; \ 1390 ALU_##OPCODE##_X: \ 1391 DST = (u32) DST OP (u32) SRC; \ 1392 CONT; \ 1393 ALU64_##OPCODE##_K: \ 1394 DST = DST OP IMM; \ 1395 CONT; \ 1396 ALU_##OPCODE##_K: \ 1397 DST = (u32) DST OP (u32) IMM; \ 1398 CONT; 1399 1400 ALU(ADD, +) 1401 ALU(SUB, -) 1402 ALU(AND, &) 1403 ALU(OR, |) 1404 ALU(LSH, <<) 1405 ALU(RSH, >>) 1406 ALU(XOR, ^) 1407 ALU(MUL, *) 1408 #undef ALU 1409 ALU_NEG: 1410 DST = (u32) -DST; 1411 CONT; 1412 ALU64_NEG: 1413 DST = -DST; 1414 CONT; 1415 ALU_MOV_X: 1416 DST = (u32) SRC; 1417 CONT; 1418 ALU_MOV_K: 1419 DST = (u32) IMM; 1420 CONT; 1421 ALU64_MOV_X: 1422 DST = SRC; 1423 CONT; 1424 ALU64_MOV_K: 1425 DST = IMM; 1426 CONT; 1427 LD_IMM_DW: 1428 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1429 insn++; 1430 CONT; 1431 ALU_ARSH_X: 1432 DST = (u64) (u32) (((s32) DST) >> SRC); 1433 CONT; 1434 ALU_ARSH_K: 1435 DST = (u64) (u32) (((s32) DST) >> IMM); 1436 CONT; 1437 ALU64_ARSH_X: 1438 (*(s64 *) &DST) >>= SRC; 1439 CONT; 1440 ALU64_ARSH_K: 1441 (*(s64 *) &DST) >>= IMM; 1442 CONT; 1443 ALU64_MOD_X: 1444 div64_u64_rem(DST, SRC, &AX); 1445 DST = AX; 1446 CONT; 1447 ALU_MOD_X: 1448 AX = (u32) DST; 1449 DST = do_div(AX, (u32) SRC); 1450 CONT; 1451 ALU64_MOD_K: 1452 div64_u64_rem(DST, IMM, &AX); 1453 DST = AX; 1454 CONT; 1455 ALU_MOD_K: 1456 AX = (u32) DST; 1457 DST = do_div(AX, (u32) IMM); 1458 CONT; 1459 ALU64_DIV_X: 1460 DST = div64_u64(DST, SRC); 1461 CONT; 1462 ALU_DIV_X: 1463 AX = (u32) DST; 1464 do_div(AX, (u32) SRC); 1465 DST = (u32) AX; 1466 CONT; 1467 ALU64_DIV_K: 1468 DST = div64_u64(DST, IMM); 1469 CONT; 1470 ALU_DIV_K: 1471 AX = (u32) DST; 1472 do_div(AX, (u32) IMM); 1473 DST = (u32) AX; 1474 CONT; 1475 ALU_END_TO_BE: 1476 switch (IMM) { 1477 case 16: 1478 DST = (__force u16) cpu_to_be16(DST); 1479 break; 1480 case 32: 1481 DST = (__force u32) cpu_to_be32(DST); 1482 break; 1483 case 64: 1484 DST = (__force u64) cpu_to_be64(DST); 1485 break; 1486 } 1487 CONT; 1488 ALU_END_TO_LE: 1489 switch (IMM) { 1490 case 16: 1491 DST = (__force u16) cpu_to_le16(DST); 1492 break; 1493 case 32: 1494 DST = (__force u32) cpu_to_le32(DST); 1495 break; 1496 case 64: 1497 DST = (__force u64) cpu_to_le64(DST); 1498 break; 1499 } 1500 CONT; 1501 1502 /* CALL */ 1503 JMP_CALL: 1504 /* Function call scratches BPF_R1-BPF_R5 registers, 1505 * preserves BPF_R6-BPF_R9, and stores return value 1506 * into BPF_R0. 1507 */ 1508 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1509 BPF_R4, BPF_R5); 1510 CONT; 1511 1512 JMP_CALL_ARGS: 1513 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1514 BPF_R3, BPF_R4, 1515 BPF_R5, 1516 insn + insn->off + 1); 1517 CONT; 1518 1519 JMP_TAIL_CALL: { 1520 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1521 struct bpf_array *array = container_of(map, struct bpf_array, map); 1522 struct bpf_prog *prog; 1523 u32 index = BPF_R3; 1524 1525 if (unlikely(index >= array->map.max_entries)) 1526 goto out; 1527 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1528 goto out; 1529 1530 tail_call_cnt++; 1531 1532 prog = READ_ONCE(array->ptrs[index]); 1533 if (!prog) 1534 goto out; 1535 1536 /* ARG1 at this point is guaranteed to point to CTX from 1537 * the verifier side due to the fact that the tail call is 1538 * handeled like a helper, that is, bpf_tail_call_proto, 1539 * where arg1_type is ARG_PTR_TO_CTX. 1540 */ 1541 insn = prog->insnsi; 1542 goto select_insn; 1543 out: 1544 CONT; 1545 } 1546 JMP_JA: 1547 insn += insn->off; 1548 CONT; 1549 JMP_EXIT: 1550 return BPF_R0; 1551 /* JMP */ 1552 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 1553 JMP_##OPCODE##_X: \ 1554 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 1555 insn += insn->off; \ 1556 CONT_JMP; \ 1557 } \ 1558 CONT; \ 1559 JMP32_##OPCODE##_X: \ 1560 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 1561 insn += insn->off; \ 1562 CONT_JMP; \ 1563 } \ 1564 CONT; \ 1565 JMP_##OPCODE##_K: \ 1566 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 1567 insn += insn->off; \ 1568 CONT_JMP; \ 1569 } \ 1570 CONT; \ 1571 JMP32_##OPCODE##_K: \ 1572 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 1573 insn += insn->off; \ 1574 CONT_JMP; \ 1575 } \ 1576 CONT; 1577 COND_JMP(u, JEQ, ==) 1578 COND_JMP(u, JNE, !=) 1579 COND_JMP(u, JGT, >) 1580 COND_JMP(u, JLT, <) 1581 COND_JMP(u, JGE, >=) 1582 COND_JMP(u, JLE, <=) 1583 COND_JMP(u, JSET, &) 1584 COND_JMP(s, JSGT, >) 1585 COND_JMP(s, JSLT, <) 1586 COND_JMP(s, JSGE, >=) 1587 COND_JMP(s, JSLE, <=) 1588 #undef COND_JMP 1589 /* STX and ST and LDX*/ 1590 #define LDST(SIZEOP, SIZE) \ 1591 STX_MEM_##SIZEOP: \ 1592 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1593 CONT; \ 1594 ST_MEM_##SIZEOP: \ 1595 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1596 CONT; \ 1597 LDX_MEM_##SIZEOP: \ 1598 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1599 CONT; 1600 1601 LDST(B, u8) 1602 LDST(H, u16) 1603 LDST(W, u32) 1604 LDST(DW, u64) 1605 #undef LDST 1606 #define LDX_PROBE(SIZEOP, SIZE) \ 1607 LDX_PROBE_MEM_##SIZEOP: \ 1608 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \ 1609 CONT; 1610 LDX_PROBE(B, 1) 1611 LDX_PROBE(H, 2) 1612 LDX_PROBE(W, 4) 1613 LDX_PROBE(DW, 8) 1614 #undef LDX_PROBE 1615 1616 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1617 atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1618 (DST + insn->off)); 1619 CONT; 1620 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1621 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1622 (DST + insn->off)); 1623 CONT; 1624 1625 default_label: 1626 /* If we ever reach this, we have a bug somewhere. Die hard here 1627 * instead of just returning 0; we could be somewhere in a subprog, 1628 * so execution could continue otherwise which we do /not/ want. 1629 * 1630 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1631 */ 1632 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); 1633 BUG_ON(1); 1634 return 0; 1635 } 1636 1637 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 1638 #define DEFINE_BPF_PROG_RUN(stack_size) \ 1639 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 1640 { \ 1641 u64 stack[stack_size / sizeof(u64)]; \ 1642 u64 regs[MAX_BPF_EXT_REG]; \ 1643 \ 1644 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1645 ARG1 = (u64) (unsigned long) ctx; \ 1646 return ___bpf_prog_run(regs, insn, stack); \ 1647 } 1648 1649 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 1650 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 1651 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 1652 const struct bpf_insn *insn) \ 1653 { \ 1654 u64 stack[stack_size / sizeof(u64)]; \ 1655 u64 regs[MAX_BPF_EXT_REG]; \ 1656 \ 1657 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 1658 BPF_R1 = r1; \ 1659 BPF_R2 = r2; \ 1660 BPF_R3 = r3; \ 1661 BPF_R4 = r4; \ 1662 BPF_R5 = r5; \ 1663 return ___bpf_prog_run(regs, insn, stack); \ 1664 } 1665 1666 #define EVAL1(FN, X) FN(X) 1667 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 1668 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 1669 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 1670 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 1671 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 1672 1673 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 1674 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 1675 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 1676 1677 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 1678 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 1679 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 1680 1681 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 1682 1683 static unsigned int (*interpreters[])(const void *ctx, 1684 const struct bpf_insn *insn) = { 1685 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1686 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1687 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1688 }; 1689 #undef PROG_NAME_LIST 1690 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 1691 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 1692 const struct bpf_insn *insn) = { 1693 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 1694 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 1695 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1696 }; 1697 #undef PROG_NAME_LIST 1698 1699 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 1700 { 1701 stack_depth = max_t(u32, stack_depth, 1); 1702 insn->off = (s16) insn->imm; 1703 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 1704 __bpf_call_base_args; 1705 insn->code = BPF_JMP | BPF_CALL_ARGS; 1706 } 1707 1708 #else 1709 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 1710 const struct bpf_insn *insn) 1711 { 1712 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 1713 * is not working properly, so warn about it! 1714 */ 1715 WARN_ON_ONCE(1); 1716 return 0; 1717 } 1718 #endif 1719 1720 bool bpf_prog_array_compatible(struct bpf_array *array, 1721 const struct bpf_prog *fp) 1722 { 1723 if (fp->kprobe_override) 1724 return false; 1725 1726 if (!array->aux->type) { 1727 /* There's no owner yet where we could check for 1728 * compatibility. 1729 */ 1730 array->aux->type = fp->type; 1731 array->aux->jited = fp->jited; 1732 return true; 1733 } 1734 1735 return array->aux->type == fp->type && 1736 array->aux->jited == fp->jited; 1737 } 1738 1739 static int bpf_check_tail_call(const struct bpf_prog *fp) 1740 { 1741 struct bpf_prog_aux *aux = fp->aux; 1742 int i; 1743 1744 for (i = 0; i < aux->used_map_cnt; i++) { 1745 struct bpf_map *map = aux->used_maps[i]; 1746 struct bpf_array *array; 1747 1748 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1749 continue; 1750 1751 array = container_of(map, struct bpf_array, map); 1752 if (!bpf_prog_array_compatible(array, fp)) 1753 return -EINVAL; 1754 } 1755 1756 return 0; 1757 } 1758 1759 static void bpf_prog_select_func(struct bpf_prog *fp) 1760 { 1761 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1762 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1763 1764 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1765 #else 1766 fp->bpf_func = __bpf_prog_ret0_warn; 1767 #endif 1768 } 1769 1770 /** 1771 * bpf_prog_select_runtime - select exec runtime for BPF program 1772 * @fp: bpf_prog populated with internal BPF program 1773 * @err: pointer to error variable 1774 * 1775 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1776 * The BPF program will be executed via BPF_PROG_RUN() macro. 1777 */ 1778 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1779 { 1780 /* In case of BPF to BPF calls, verifier did all the prep 1781 * work with regards to JITing, etc. 1782 */ 1783 if (fp->bpf_func) 1784 goto finalize; 1785 1786 bpf_prog_select_func(fp); 1787 1788 /* eBPF JITs can rewrite the program in case constant 1789 * blinding is active. However, in case of error during 1790 * blinding, bpf_int_jit_compile() must always return a 1791 * valid program, which in this case would simply not 1792 * be JITed, but falls back to the interpreter. 1793 */ 1794 if (!bpf_prog_is_dev_bound(fp->aux)) { 1795 *err = bpf_prog_alloc_jited_linfo(fp); 1796 if (*err) 1797 return fp; 1798 1799 fp = bpf_int_jit_compile(fp); 1800 if (!fp->jited) { 1801 bpf_prog_free_jited_linfo(fp); 1802 #ifdef CONFIG_BPF_JIT_ALWAYS_ON 1803 *err = -ENOTSUPP; 1804 return fp; 1805 #endif 1806 } else { 1807 bpf_prog_free_unused_jited_linfo(fp); 1808 } 1809 } else { 1810 *err = bpf_prog_offload_compile(fp); 1811 if (*err) 1812 return fp; 1813 } 1814 1815 finalize: 1816 bpf_prog_lock_ro(fp); 1817 1818 /* The tail call compatibility check can only be done at 1819 * this late stage as we need to determine, if we deal 1820 * with JITed or non JITed program concatenations and not 1821 * all eBPF JITs might immediately support all features. 1822 */ 1823 *err = bpf_check_tail_call(fp); 1824 1825 return fp; 1826 } 1827 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1828 1829 static unsigned int __bpf_prog_ret1(const void *ctx, 1830 const struct bpf_insn *insn) 1831 { 1832 return 1; 1833 } 1834 1835 static struct bpf_prog_dummy { 1836 struct bpf_prog prog; 1837 } dummy_bpf_prog = { 1838 .prog = { 1839 .bpf_func = __bpf_prog_ret1, 1840 }, 1841 }; 1842 1843 /* to avoid allocating empty bpf_prog_array for cgroups that 1844 * don't have bpf program attached use one global 'empty_prog_array' 1845 * It will not be modified the caller of bpf_prog_array_alloc() 1846 * (since caller requested prog_cnt == 0) 1847 * that pointer should be 'freed' by bpf_prog_array_free() 1848 */ 1849 static struct { 1850 struct bpf_prog_array hdr; 1851 struct bpf_prog *null_prog; 1852 } empty_prog_array = { 1853 .null_prog = NULL, 1854 }; 1855 1856 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 1857 { 1858 if (prog_cnt) 1859 return kzalloc(sizeof(struct bpf_prog_array) + 1860 sizeof(struct bpf_prog_array_item) * 1861 (prog_cnt + 1), 1862 flags); 1863 1864 return &empty_prog_array.hdr; 1865 } 1866 1867 void bpf_prog_array_free(struct bpf_prog_array *progs) 1868 { 1869 if (!progs || progs == &empty_prog_array.hdr) 1870 return; 1871 kfree_rcu(progs, rcu); 1872 } 1873 1874 int bpf_prog_array_length(struct bpf_prog_array *array) 1875 { 1876 struct bpf_prog_array_item *item; 1877 u32 cnt = 0; 1878 1879 for (item = array->items; item->prog; item++) 1880 if (item->prog != &dummy_bpf_prog.prog) 1881 cnt++; 1882 return cnt; 1883 } 1884 1885 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 1886 { 1887 struct bpf_prog_array_item *item; 1888 1889 for (item = array->items; item->prog; item++) 1890 if (item->prog != &dummy_bpf_prog.prog) 1891 return false; 1892 return true; 1893 } 1894 1895 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 1896 u32 *prog_ids, 1897 u32 request_cnt) 1898 { 1899 struct bpf_prog_array_item *item; 1900 int i = 0; 1901 1902 for (item = array->items; item->prog; item++) { 1903 if (item->prog == &dummy_bpf_prog.prog) 1904 continue; 1905 prog_ids[i] = item->prog->aux->id; 1906 if (++i == request_cnt) { 1907 item++; 1908 break; 1909 } 1910 } 1911 1912 return !!(item->prog); 1913 } 1914 1915 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 1916 __u32 __user *prog_ids, u32 cnt) 1917 { 1918 unsigned long err = 0; 1919 bool nospc; 1920 u32 *ids; 1921 1922 /* users of this function are doing: 1923 * cnt = bpf_prog_array_length(); 1924 * if (cnt > 0) 1925 * bpf_prog_array_copy_to_user(..., cnt); 1926 * so below kcalloc doesn't need extra cnt > 0 check. 1927 */ 1928 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 1929 if (!ids) 1930 return -ENOMEM; 1931 nospc = bpf_prog_array_copy_core(array, ids, cnt); 1932 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1933 kfree(ids); 1934 if (err) 1935 return -EFAULT; 1936 if (nospc) 1937 return -ENOSPC; 1938 return 0; 1939 } 1940 1941 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 1942 struct bpf_prog *old_prog) 1943 { 1944 struct bpf_prog_array_item *item; 1945 1946 for (item = array->items; item->prog; item++) 1947 if (item->prog == old_prog) { 1948 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 1949 break; 1950 } 1951 } 1952 1953 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1954 struct bpf_prog *exclude_prog, 1955 struct bpf_prog *include_prog, 1956 struct bpf_prog_array **new_array) 1957 { 1958 int new_prog_cnt, carry_prog_cnt = 0; 1959 struct bpf_prog_array_item *existing; 1960 struct bpf_prog_array *array; 1961 bool found_exclude = false; 1962 int new_prog_idx = 0; 1963 1964 /* Figure out how many existing progs we need to carry over to 1965 * the new array. 1966 */ 1967 if (old_array) { 1968 existing = old_array->items; 1969 for (; existing->prog; existing++) { 1970 if (existing->prog == exclude_prog) { 1971 found_exclude = true; 1972 continue; 1973 } 1974 if (existing->prog != &dummy_bpf_prog.prog) 1975 carry_prog_cnt++; 1976 if (existing->prog == include_prog) 1977 return -EEXIST; 1978 } 1979 } 1980 1981 if (exclude_prog && !found_exclude) 1982 return -ENOENT; 1983 1984 /* How many progs (not NULL) will be in the new array? */ 1985 new_prog_cnt = carry_prog_cnt; 1986 if (include_prog) 1987 new_prog_cnt += 1; 1988 1989 /* Do we have any prog (not NULL) in the new array? */ 1990 if (!new_prog_cnt) { 1991 *new_array = NULL; 1992 return 0; 1993 } 1994 1995 /* +1 as the end of prog_array is marked with NULL */ 1996 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 1997 if (!array) 1998 return -ENOMEM; 1999 2000 /* Fill in the new prog array */ 2001 if (carry_prog_cnt) { 2002 existing = old_array->items; 2003 for (; existing->prog; existing++) 2004 if (existing->prog != exclude_prog && 2005 existing->prog != &dummy_bpf_prog.prog) { 2006 array->items[new_prog_idx++].prog = 2007 existing->prog; 2008 } 2009 } 2010 if (include_prog) 2011 array->items[new_prog_idx++].prog = include_prog; 2012 array->items[new_prog_idx].prog = NULL; 2013 *new_array = array; 2014 return 0; 2015 } 2016 2017 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2018 u32 *prog_ids, u32 request_cnt, 2019 u32 *prog_cnt) 2020 { 2021 u32 cnt = 0; 2022 2023 if (array) 2024 cnt = bpf_prog_array_length(array); 2025 2026 *prog_cnt = cnt; 2027 2028 /* return early if user requested only program count or nothing to copy */ 2029 if (!request_cnt || !cnt) 2030 return 0; 2031 2032 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2033 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2034 : 0; 2035 } 2036 2037 static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux) 2038 { 2039 enum bpf_cgroup_storage_type stype; 2040 2041 for_each_cgroup_storage_type(stype) { 2042 if (!aux->cgroup_storage[stype]) 2043 continue; 2044 bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]); 2045 } 2046 } 2047 2048 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2049 struct bpf_map **used_maps, u32 len) 2050 { 2051 struct bpf_map *map; 2052 u32 i; 2053 2054 bpf_free_cgroup_storage(aux); 2055 for (i = 0; i < len; i++) { 2056 map = used_maps[i]; 2057 if (map->ops->map_poke_untrack) 2058 map->ops->map_poke_untrack(map, aux); 2059 bpf_map_put(map); 2060 } 2061 } 2062 2063 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2064 { 2065 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2066 kfree(aux->used_maps); 2067 } 2068 2069 static void bpf_prog_free_deferred(struct work_struct *work) 2070 { 2071 struct bpf_prog_aux *aux; 2072 int i; 2073 2074 aux = container_of(work, struct bpf_prog_aux, work); 2075 bpf_free_used_maps(aux); 2076 if (bpf_prog_is_dev_bound(aux)) 2077 bpf_prog_offload_destroy(aux->prog); 2078 #ifdef CONFIG_PERF_EVENTS 2079 if (aux->prog->has_callchain_buf) 2080 put_callchain_buffers(); 2081 #endif 2082 bpf_trampoline_put(aux->trampoline); 2083 for (i = 0; i < aux->func_cnt; i++) 2084 bpf_jit_free(aux->func[i]); 2085 if (aux->func_cnt) { 2086 kfree(aux->func); 2087 bpf_prog_unlock_free(aux->prog); 2088 } else { 2089 bpf_jit_free(aux->prog); 2090 } 2091 } 2092 2093 /* Free internal BPF program */ 2094 void bpf_prog_free(struct bpf_prog *fp) 2095 { 2096 struct bpf_prog_aux *aux = fp->aux; 2097 2098 if (aux->linked_prog) 2099 bpf_prog_put(aux->linked_prog); 2100 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2101 schedule_work(&aux->work); 2102 } 2103 EXPORT_SYMBOL_GPL(bpf_prog_free); 2104 2105 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 2106 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2107 2108 void bpf_user_rnd_init_once(void) 2109 { 2110 prandom_init_once(&bpf_user_rnd_state); 2111 } 2112 2113 BPF_CALL_0(bpf_user_rnd_u32) 2114 { 2115 /* Should someone ever have the rather unwise idea to use some 2116 * of the registers passed into this function, then note that 2117 * this function is called from native eBPF and classic-to-eBPF 2118 * transformations. Register assignments from both sides are 2119 * different, f.e. classic always sets fn(ctx, A, X) here. 2120 */ 2121 struct rnd_state *state; 2122 u32 res; 2123 2124 state = &get_cpu_var(bpf_user_rnd_state); 2125 res = prandom_u32_state(state); 2126 put_cpu_var(bpf_user_rnd_state); 2127 2128 return res; 2129 } 2130 2131 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2132 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2133 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2134 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2135 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2136 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2137 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2138 const struct bpf_func_proto bpf_spin_lock_proto __weak; 2139 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 2140 const struct bpf_func_proto bpf_jiffies64_proto __weak; 2141 2142 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2143 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2144 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2145 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2146 2147 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 2148 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 2149 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 2150 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 2151 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 2152 2153 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 2154 { 2155 return NULL; 2156 } 2157 2158 u64 __weak 2159 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 2160 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 2161 { 2162 return -ENOTSUPP; 2163 } 2164 EXPORT_SYMBOL_GPL(bpf_event_output); 2165 2166 /* Always built-in helper functions. */ 2167 const struct bpf_func_proto bpf_tail_call_proto = { 2168 .func = NULL, 2169 .gpl_only = false, 2170 .ret_type = RET_VOID, 2171 .arg1_type = ARG_PTR_TO_CTX, 2172 .arg2_type = ARG_CONST_MAP_PTR, 2173 .arg3_type = ARG_ANYTHING, 2174 }; 2175 2176 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 2177 * It is encouraged to implement bpf_int_jit_compile() instead, so that 2178 * eBPF and implicitly also cBPF can get JITed! 2179 */ 2180 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 2181 { 2182 return prog; 2183 } 2184 2185 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 2186 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 2187 */ 2188 void __weak bpf_jit_compile(struct bpf_prog *prog) 2189 { 2190 } 2191 2192 bool __weak bpf_helper_changes_pkt_data(void *func) 2193 { 2194 return false; 2195 } 2196 2197 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 2198 * analysis code and wants explicit zero extension inserted by verifier. 2199 * Otherwise, return FALSE. 2200 */ 2201 bool __weak bpf_jit_needs_zext(void) 2202 { 2203 return false; 2204 } 2205 2206 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 2207 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 2208 */ 2209 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 2210 int len) 2211 { 2212 return -EFAULT; 2213 } 2214 2215 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2216 void *addr1, void *addr2) 2217 { 2218 return -ENOTSUPP; 2219 } 2220 2221 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 2222 EXPORT_SYMBOL(bpf_stats_enabled_key); 2223 2224 /* All definitions of tracepoints related to BPF. */ 2225 #define CREATE_TRACE_POINTS 2226 #include <linux/bpf_trace.h> 2227 2228 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 2229 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 2230