1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <linux/filter.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/random.h> 25 #include <linux/moduleloader.h> 26 #include <linux/bpf.h> 27 #include <linux/btf.h> 28 #include <linux/objtool.h> 29 #include <linux/rbtree_latch.h> 30 #include <linux/kallsyms.h> 31 #include <linux/rcupdate.h> 32 #include <linux/perf_event.h> 33 #include <linux/extable.h> 34 #include <linux/log2.h> 35 #include <linux/bpf_verifier.h> 36 #include <linux/nodemask.h> 37 38 #include <asm/barrier.h> 39 #include <asm/unaligned.h> 40 41 /* Registers */ 42 #define BPF_R0 regs[BPF_REG_0] 43 #define BPF_R1 regs[BPF_REG_1] 44 #define BPF_R2 regs[BPF_REG_2] 45 #define BPF_R3 regs[BPF_REG_3] 46 #define BPF_R4 regs[BPF_REG_4] 47 #define BPF_R5 regs[BPF_REG_5] 48 #define BPF_R6 regs[BPF_REG_6] 49 #define BPF_R7 regs[BPF_REG_7] 50 #define BPF_R8 regs[BPF_REG_8] 51 #define BPF_R9 regs[BPF_REG_9] 52 #define BPF_R10 regs[BPF_REG_10] 53 54 /* Named registers */ 55 #define DST regs[insn->dst_reg] 56 #define SRC regs[insn->src_reg] 57 #define FP regs[BPF_REG_FP] 58 #define AX regs[BPF_REG_AX] 59 #define ARG1 regs[BPF_REG_ARG1] 60 #define CTX regs[BPF_REG_CTX] 61 #define IMM insn->imm 62 63 /* No hurry in this branch 64 * 65 * Exported for the bpf jit load helper. 66 */ 67 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 68 { 69 u8 *ptr = NULL; 70 71 if (k >= SKF_NET_OFF) { 72 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 73 } else if (k >= SKF_LL_OFF) { 74 if (unlikely(!skb_mac_header_was_set(skb))) 75 return NULL; 76 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 77 } 78 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 79 return ptr; 80 81 return NULL; 82 } 83 84 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 85 { 86 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; 87 struct bpf_prog_aux *aux; 88 struct bpf_prog *fp; 89 90 size = round_up(size, PAGE_SIZE); 91 fp = __vmalloc(size, gfp_flags); 92 if (fp == NULL) 93 return NULL; 94 95 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags); 96 if (aux == NULL) { 97 vfree(fp); 98 return NULL; 99 } 100 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags); 101 if (!fp->active) { 102 vfree(fp); 103 kfree(aux); 104 return NULL; 105 } 106 107 fp->pages = size / PAGE_SIZE; 108 fp->aux = aux; 109 fp->aux->prog = fp; 110 fp->jit_requested = ebpf_jit_enabled(); 111 fp->blinding_requested = bpf_jit_blinding_enabled(fp); 112 113 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); 114 mutex_init(&fp->aux->used_maps_mutex); 115 mutex_init(&fp->aux->dst_mutex); 116 117 return fp; 118 } 119 120 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 121 { 122 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; 123 struct bpf_prog *prog; 124 int cpu; 125 126 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 127 if (!prog) 128 return NULL; 129 130 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 131 if (!prog->stats) { 132 free_percpu(prog->active); 133 kfree(prog->aux); 134 vfree(prog); 135 return NULL; 136 } 137 138 for_each_possible_cpu(cpu) { 139 struct bpf_prog_stats *pstats; 140 141 pstats = per_cpu_ptr(prog->stats, cpu); 142 u64_stats_init(&pstats->syncp); 143 } 144 return prog; 145 } 146 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 147 148 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 149 { 150 if (!prog->aux->nr_linfo || !prog->jit_requested) 151 return 0; 152 153 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo, 154 sizeof(*prog->aux->jited_linfo), 155 GFP_KERNEL_ACCOUNT | __GFP_NOWARN); 156 if (!prog->aux->jited_linfo) 157 return -ENOMEM; 158 159 return 0; 160 } 161 162 void bpf_prog_jit_attempt_done(struct bpf_prog *prog) 163 { 164 if (prog->aux->jited_linfo && 165 (!prog->jited || !prog->aux->jited_linfo[0])) { 166 kvfree(prog->aux->jited_linfo); 167 prog->aux->jited_linfo = NULL; 168 } 169 170 kfree(prog->aux->kfunc_tab); 171 prog->aux->kfunc_tab = NULL; 172 } 173 174 /* The jit engine is responsible to provide an array 175 * for insn_off to the jited_off mapping (insn_to_jit_off). 176 * 177 * The idx to this array is the insn_off. Hence, the insn_off 178 * here is relative to the prog itself instead of the main prog. 179 * This array has one entry for each xlated bpf insn. 180 * 181 * jited_off is the byte off to the last byte of the jited insn. 182 * 183 * Hence, with 184 * insn_start: 185 * The first bpf insn off of the prog. The insn off 186 * here is relative to the main prog. 187 * e.g. if prog is a subprog, insn_start > 0 188 * linfo_idx: 189 * The prog's idx to prog->aux->linfo and jited_linfo 190 * 191 * jited_linfo[linfo_idx] = prog->bpf_func 192 * 193 * For i > linfo_idx, 194 * 195 * jited_linfo[i] = prog->bpf_func + 196 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 197 */ 198 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 199 const u32 *insn_to_jit_off) 200 { 201 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 202 const struct bpf_line_info *linfo; 203 void **jited_linfo; 204 205 if (!prog->aux->jited_linfo) 206 /* Userspace did not provide linfo */ 207 return; 208 209 linfo_idx = prog->aux->linfo_idx; 210 linfo = &prog->aux->linfo[linfo_idx]; 211 insn_start = linfo[0].insn_off; 212 insn_end = insn_start + prog->len; 213 214 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 215 jited_linfo[0] = prog->bpf_func; 216 217 nr_linfo = prog->aux->nr_linfo - linfo_idx; 218 219 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 220 /* The verifier ensures that linfo[i].insn_off is 221 * strictly increasing 222 */ 223 jited_linfo[i] = prog->bpf_func + 224 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 225 } 226 227 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 228 gfp_t gfp_extra_flags) 229 { 230 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; 231 struct bpf_prog *fp; 232 u32 pages; 233 234 size = round_up(size, PAGE_SIZE); 235 pages = size / PAGE_SIZE; 236 if (pages <= fp_old->pages) 237 return fp_old; 238 239 fp = __vmalloc(size, gfp_flags); 240 if (fp) { 241 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 242 fp->pages = pages; 243 fp->aux->prog = fp; 244 245 /* We keep fp->aux from fp_old around in the new 246 * reallocated structure. 247 */ 248 fp_old->aux = NULL; 249 fp_old->stats = NULL; 250 fp_old->active = NULL; 251 __bpf_prog_free(fp_old); 252 } 253 254 return fp; 255 } 256 257 void __bpf_prog_free(struct bpf_prog *fp) 258 { 259 if (fp->aux) { 260 mutex_destroy(&fp->aux->used_maps_mutex); 261 mutex_destroy(&fp->aux->dst_mutex); 262 kfree(fp->aux->poke_tab); 263 kfree(fp->aux); 264 } 265 free_percpu(fp->stats); 266 free_percpu(fp->active); 267 vfree(fp); 268 } 269 270 int bpf_prog_calc_tag(struct bpf_prog *fp) 271 { 272 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); 273 u32 raw_size = bpf_prog_tag_scratch_size(fp); 274 u32 digest[SHA1_DIGEST_WORDS]; 275 u32 ws[SHA1_WORKSPACE_WORDS]; 276 u32 i, bsize, psize, blocks; 277 struct bpf_insn *dst; 278 bool was_ld_map; 279 u8 *raw, *todo; 280 __be32 *result; 281 __be64 *bits; 282 283 raw = vmalloc(raw_size); 284 if (!raw) 285 return -ENOMEM; 286 287 sha1_init(digest); 288 memset(ws, 0, sizeof(ws)); 289 290 /* We need to take out the map fd for the digest calculation 291 * since they are unstable from user space side. 292 */ 293 dst = (void *)raw; 294 for (i = 0, was_ld_map = false; i < fp->len; i++) { 295 dst[i] = fp->insnsi[i]; 296 if (!was_ld_map && 297 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 298 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 299 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 300 was_ld_map = true; 301 dst[i].imm = 0; 302 } else if (was_ld_map && 303 dst[i].code == 0 && 304 dst[i].dst_reg == 0 && 305 dst[i].src_reg == 0 && 306 dst[i].off == 0) { 307 was_ld_map = false; 308 dst[i].imm = 0; 309 } else { 310 was_ld_map = false; 311 } 312 } 313 314 psize = bpf_prog_insn_size(fp); 315 memset(&raw[psize], 0, raw_size - psize); 316 raw[psize++] = 0x80; 317 318 bsize = round_up(psize, SHA1_BLOCK_SIZE); 319 blocks = bsize / SHA1_BLOCK_SIZE; 320 todo = raw; 321 if (bsize - psize >= sizeof(__be64)) { 322 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 323 } else { 324 bits = (__be64 *)(todo + bsize + bits_offset); 325 blocks++; 326 } 327 *bits = cpu_to_be64((psize - 1) << 3); 328 329 while (blocks--) { 330 sha1_transform(digest, todo, ws); 331 todo += SHA1_BLOCK_SIZE; 332 } 333 334 result = (__force __be32 *)digest; 335 for (i = 0; i < SHA1_DIGEST_WORDS; i++) 336 result[i] = cpu_to_be32(digest[i]); 337 memcpy(fp->tag, result, sizeof(fp->tag)); 338 339 vfree(raw); 340 return 0; 341 } 342 343 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 344 s32 end_new, s32 curr, const bool probe_pass) 345 { 346 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 347 s32 delta = end_new - end_old; 348 s64 imm = insn->imm; 349 350 if (curr < pos && curr + imm + 1 >= end_old) 351 imm += delta; 352 else if (curr >= end_new && curr + imm + 1 < end_new) 353 imm -= delta; 354 if (imm < imm_min || imm > imm_max) 355 return -ERANGE; 356 if (!probe_pass) 357 insn->imm = imm; 358 return 0; 359 } 360 361 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 362 s32 end_new, s32 curr, const bool probe_pass) 363 { 364 const s32 off_min = S16_MIN, off_max = S16_MAX; 365 s32 delta = end_new - end_old; 366 s32 off = insn->off; 367 368 if (curr < pos && curr + off + 1 >= end_old) 369 off += delta; 370 else if (curr >= end_new && curr + off + 1 < end_new) 371 off -= delta; 372 if (off < off_min || off > off_max) 373 return -ERANGE; 374 if (!probe_pass) 375 insn->off = off; 376 return 0; 377 } 378 379 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 380 s32 end_new, const bool probe_pass) 381 { 382 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 383 struct bpf_insn *insn = prog->insnsi; 384 int ret = 0; 385 386 for (i = 0; i < insn_cnt; i++, insn++) { 387 u8 code; 388 389 /* In the probing pass we still operate on the original, 390 * unpatched image in order to check overflows before we 391 * do any other adjustments. Therefore skip the patchlet. 392 */ 393 if (probe_pass && i == pos) { 394 i = end_new; 395 insn = prog->insnsi + end_old; 396 } 397 if (bpf_pseudo_func(insn)) { 398 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 399 end_new, i, probe_pass); 400 if (ret) 401 return ret; 402 continue; 403 } 404 code = insn->code; 405 if ((BPF_CLASS(code) != BPF_JMP && 406 BPF_CLASS(code) != BPF_JMP32) || 407 BPF_OP(code) == BPF_EXIT) 408 continue; 409 /* Adjust offset of jmps if we cross patch boundaries. */ 410 if (BPF_OP(code) == BPF_CALL) { 411 if (insn->src_reg != BPF_PSEUDO_CALL) 412 continue; 413 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 414 end_new, i, probe_pass); 415 } else { 416 ret = bpf_adj_delta_to_off(insn, pos, end_old, 417 end_new, i, probe_pass); 418 } 419 if (ret) 420 break; 421 } 422 423 return ret; 424 } 425 426 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 427 { 428 struct bpf_line_info *linfo; 429 u32 i, nr_linfo; 430 431 nr_linfo = prog->aux->nr_linfo; 432 if (!nr_linfo || !delta) 433 return; 434 435 linfo = prog->aux->linfo; 436 437 for (i = 0; i < nr_linfo; i++) 438 if (off < linfo[i].insn_off) 439 break; 440 441 /* Push all off < linfo[i].insn_off by delta */ 442 for (; i < nr_linfo; i++) 443 linfo[i].insn_off += delta; 444 } 445 446 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 447 const struct bpf_insn *patch, u32 len) 448 { 449 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 450 const u32 cnt_max = S16_MAX; 451 struct bpf_prog *prog_adj; 452 int err; 453 454 /* Since our patchlet doesn't expand the image, we're done. */ 455 if (insn_delta == 0) { 456 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 457 return prog; 458 } 459 460 insn_adj_cnt = prog->len + insn_delta; 461 462 /* Reject anything that would potentially let the insn->off 463 * target overflow when we have excessive program expansions. 464 * We need to probe here before we do any reallocation where 465 * we afterwards may not fail anymore. 466 */ 467 if (insn_adj_cnt > cnt_max && 468 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 469 return ERR_PTR(err); 470 471 /* Several new instructions need to be inserted. Make room 472 * for them. Likely, there's no need for a new allocation as 473 * last page could have large enough tailroom. 474 */ 475 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 476 GFP_USER); 477 if (!prog_adj) 478 return ERR_PTR(-ENOMEM); 479 480 prog_adj->len = insn_adj_cnt; 481 482 /* Patching happens in 3 steps: 483 * 484 * 1) Move over tail of insnsi from next instruction onwards, 485 * so we can patch the single target insn with one or more 486 * new ones (patching is always from 1 to n insns, n > 0). 487 * 2) Inject new instructions at the target location. 488 * 3) Adjust branch offsets if necessary. 489 */ 490 insn_rest = insn_adj_cnt - off - len; 491 492 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 493 sizeof(*patch) * insn_rest); 494 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 495 496 /* We are guaranteed to not fail at this point, otherwise 497 * the ship has sailed to reverse to the original state. An 498 * overflow cannot happen at this point. 499 */ 500 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 501 502 bpf_adj_linfo(prog_adj, off, insn_delta); 503 504 return prog_adj; 505 } 506 507 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 508 { 509 /* Branch offsets can't overflow when program is shrinking, no need 510 * to call bpf_adj_branches(..., true) here 511 */ 512 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 513 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 514 prog->len -= cnt; 515 516 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); 517 } 518 519 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 520 { 521 int i; 522 523 for (i = 0; i < fp->aux->func_cnt; i++) 524 bpf_prog_kallsyms_del(fp->aux->func[i]); 525 } 526 527 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 528 { 529 bpf_prog_kallsyms_del_subprogs(fp); 530 bpf_prog_kallsyms_del(fp); 531 } 532 533 #ifdef CONFIG_BPF_JIT 534 /* All BPF JIT sysctl knobs here. */ 535 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 536 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 537 int bpf_jit_harden __read_mostly; 538 long bpf_jit_limit __read_mostly; 539 long bpf_jit_limit_max __read_mostly; 540 541 static void 542 bpf_prog_ksym_set_addr(struct bpf_prog *prog) 543 { 544 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 545 546 prog->aux->ksym.start = (unsigned long) prog->bpf_func; 547 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len; 548 } 549 550 static void 551 bpf_prog_ksym_set_name(struct bpf_prog *prog) 552 { 553 char *sym = prog->aux->ksym.name; 554 const char *end = sym + KSYM_NAME_LEN; 555 const struct btf_type *type; 556 const char *func_name; 557 558 BUILD_BUG_ON(sizeof("bpf_prog_") + 559 sizeof(prog->tag) * 2 + 560 /* name has been null terminated. 561 * We should need +1 for the '_' preceding 562 * the name. However, the null character 563 * is double counted between the name and the 564 * sizeof("bpf_prog_") above, so we omit 565 * the +1 here. 566 */ 567 sizeof(prog->aux->name) > KSYM_NAME_LEN); 568 569 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 570 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 571 572 /* prog->aux->name will be ignored if full btf name is available */ 573 if (prog->aux->func_info_cnt) { 574 type = btf_type_by_id(prog->aux->btf, 575 prog->aux->func_info[prog->aux->func_idx].type_id); 576 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 577 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 578 return; 579 } 580 581 if (prog->aux->name[0]) 582 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 583 else 584 *sym = 0; 585 } 586 587 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n) 588 { 589 return container_of(n, struct bpf_ksym, tnode)->start; 590 } 591 592 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 593 struct latch_tree_node *b) 594 { 595 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); 596 } 597 598 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 599 { 600 unsigned long val = (unsigned long)key; 601 const struct bpf_ksym *ksym; 602 603 ksym = container_of(n, struct bpf_ksym, tnode); 604 605 if (val < ksym->start) 606 return -1; 607 if (val >= ksym->end) 608 return 1; 609 610 return 0; 611 } 612 613 static const struct latch_tree_ops bpf_tree_ops = { 614 .less = bpf_tree_less, 615 .comp = bpf_tree_comp, 616 }; 617 618 static DEFINE_SPINLOCK(bpf_lock); 619 static LIST_HEAD(bpf_kallsyms); 620 static struct latch_tree_root bpf_tree __cacheline_aligned; 621 622 void bpf_ksym_add(struct bpf_ksym *ksym) 623 { 624 spin_lock_bh(&bpf_lock); 625 WARN_ON_ONCE(!list_empty(&ksym->lnode)); 626 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); 627 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 628 spin_unlock_bh(&bpf_lock); 629 } 630 631 static void __bpf_ksym_del(struct bpf_ksym *ksym) 632 { 633 if (list_empty(&ksym->lnode)) 634 return; 635 636 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 637 list_del_rcu(&ksym->lnode); 638 } 639 640 void bpf_ksym_del(struct bpf_ksym *ksym) 641 { 642 spin_lock_bh(&bpf_lock); 643 __bpf_ksym_del(ksym); 644 spin_unlock_bh(&bpf_lock); 645 } 646 647 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 648 { 649 return fp->jited && !bpf_prog_was_classic(fp); 650 } 651 652 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 653 { 654 return list_empty(&fp->aux->ksym.lnode) || 655 fp->aux->ksym.lnode.prev == LIST_POISON2; 656 } 657 658 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 659 { 660 if (!bpf_prog_kallsyms_candidate(fp) || 661 !bpf_capable()) 662 return; 663 664 bpf_prog_ksym_set_addr(fp); 665 bpf_prog_ksym_set_name(fp); 666 fp->aux->ksym.prog = true; 667 668 bpf_ksym_add(&fp->aux->ksym); 669 } 670 671 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 672 { 673 if (!bpf_prog_kallsyms_candidate(fp)) 674 return; 675 676 bpf_ksym_del(&fp->aux->ksym); 677 } 678 679 static struct bpf_ksym *bpf_ksym_find(unsigned long addr) 680 { 681 struct latch_tree_node *n; 682 683 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 684 return n ? container_of(n, struct bpf_ksym, tnode) : NULL; 685 } 686 687 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 688 unsigned long *off, char *sym) 689 { 690 struct bpf_ksym *ksym; 691 char *ret = NULL; 692 693 rcu_read_lock(); 694 ksym = bpf_ksym_find(addr); 695 if (ksym) { 696 unsigned long symbol_start = ksym->start; 697 unsigned long symbol_end = ksym->end; 698 699 strncpy(sym, ksym->name, KSYM_NAME_LEN); 700 701 ret = sym; 702 if (size) 703 *size = symbol_end - symbol_start; 704 if (off) 705 *off = addr - symbol_start; 706 } 707 rcu_read_unlock(); 708 709 return ret; 710 } 711 712 bool is_bpf_text_address(unsigned long addr) 713 { 714 bool ret; 715 716 rcu_read_lock(); 717 ret = bpf_ksym_find(addr) != NULL; 718 rcu_read_unlock(); 719 720 return ret; 721 } 722 723 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) 724 { 725 struct bpf_ksym *ksym = bpf_ksym_find(addr); 726 727 return ksym && ksym->prog ? 728 container_of(ksym, struct bpf_prog_aux, ksym)->prog : 729 NULL; 730 } 731 732 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 733 { 734 const struct exception_table_entry *e = NULL; 735 struct bpf_prog *prog; 736 737 rcu_read_lock(); 738 prog = bpf_prog_ksym_find(addr); 739 if (!prog) 740 goto out; 741 if (!prog->aux->num_exentries) 742 goto out; 743 744 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 745 out: 746 rcu_read_unlock(); 747 return e; 748 } 749 750 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 751 char *sym) 752 { 753 struct bpf_ksym *ksym; 754 unsigned int it = 0; 755 int ret = -ERANGE; 756 757 if (!bpf_jit_kallsyms_enabled()) 758 return ret; 759 760 rcu_read_lock(); 761 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { 762 if (it++ != symnum) 763 continue; 764 765 strncpy(sym, ksym->name, KSYM_NAME_LEN); 766 767 *value = ksym->start; 768 *type = BPF_SYM_ELF_TYPE; 769 770 ret = 0; 771 break; 772 } 773 rcu_read_unlock(); 774 775 return ret; 776 } 777 778 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 779 struct bpf_jit_poke_descriptor *poke) 780 { 781 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 782 static const u32 poke_tab_max = 1024; 783 u32 slot = prog->aux->size_poke_tab; 784 u32 size = slot + 1; 785 786 if (size > poke_tab_max) 787 return -ENOSPC; 788 if (poke->tailcall_target || poke->tailcall_target_stable || 789 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) 790 return -EINVAL; 791 792 switch (poke->reason) { 793 case BPF_POKE_REASON_TAIL_CALL: 794 if (!poke->tail_call.map) 795 return -EINVAL; 796 break; 797 default: 798 return -EINVAL; 799 } 800 801 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL); 802 if (!tab) 803 return -ENOMEM; 804 805 memcpy(&tab[slot], poke, sizeof(*poke)); 806 prog->aux->size_poke_tab = size; 807 prog->aux->poke_tab = tab; 808 809 return slot; 810 } 811 812 /* 813 * BPF program pack allocator. 814 * 815 * Most BPF programs are pretty small. Allocating a hole page for each 816 * program is sometime a waste. Many small bpf program also adds pressure 817 * to instruction TLB. To solve this issue, we introduce a BPF program pack 818 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) 819 * to host BPF programs. 820 */ 821 #define BPF_PROG_CHUNK_SHIFT 6 822 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) 823 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) 824 825 struct bpf_prog_pack { 826 struct list_head list; 827 void *ptr; 828 unsigned long bitmap[]; 829 }; 830 831 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) 832 833 static size_t bpf_prog_pack_size = -1; 834 static size_t bpf_prog_pack_mask = -1; 835 836 static int bpf_prog_chunk_count(void) 837 { 838 WARN_ON_ONCE(bpf_prog_pack_size == -1); 839 return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE; 840 } 841 842 static DEFINE_MUTEX(pack_mutex); 843 static LIST_HEAD(pack_list); 844 845 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with 846 * CONFIG_MMU=n. Use PAGE_SIZE in these cases. 847 */ 848 #ifdef PMD_SIZE 849 #define BPF_HPAGE_SIZE PMD_SIZE 850 #define BPF_HPAGE_MASK PMD_MASK 851 #else 852 #define BPF_HPAGE_SIZE PAGE_SIZE 853 #define BPF_HPAGE_MASK PAGE_MASK 854 #endif 855 856 static size_t select_bpf_prog_pack_size(void) 857 { 858 size_t size; 859 void *ptr; 860 861 size = BPF_HPAGE_SIZE * num_online_nodes(); 862 ptr = module_alloc(size); 863 864 /* Test whether we can get huge pages. If not just use PAGE_SIZE 865 * packs. 866 */ 867 if (!ptr || !is_vm_area_hugepages(ptr)) { 868 size = PAGE_SIZE; 869 bpf_prog_pack_mask = PAGE_MASK; 870 } else { 871 bpf_prog_pack_mask = BPF_HPAGE_MASK; 872 } 873 874 vfree(ptr); 875 return size; 876 } 877 878 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) 879 { 880 struct bpf_prog_pack *pack; 881 882 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())), 883 GFP_KERNEL); 884 if (!pack) 885 return NULL; 886 pack->ptr = module_alloc(bpf_prog_pack_size); 887 if (!pack->ptr) { 888 kfree(pack); 889 return NULL; 890 } 891 bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size); 892 bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE); 893 list_add_tail(&pack->list, &pack_list); 894 895 set_vm_flush_reset_perms(pack->ptr); 896 set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); 897 set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); 898 return pack; 899 } 900 901 static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) 902 { 903 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); 904 struct bpf_prog_pack *pack; 905 unsigned long pos; 906 void *ptr = NULL; 907 908 mutex_lock(&pack_mutex); 909 if (bpf_prog_pack_size == -1) 910 bpf_prog_pack_size = select_bpf_prog_pack_size(); 911 912 if (size > bpf_prog_pack_size) { 913 size = round_up(size, PAGE_SIZE); 914 ptr = module_alloc(size); 915 if (ptr) { 916 bpf_fill_ill_insns(ptr, size); 917 set_vm_flush_reset_perms(ptr); 918 set_memory_ro((unsigned long)ptr, size / PAGE_SIZE); 919 set_memory_x((unsigned long)ptr, size / PAGE_SIZE); 920 } 921 goto out; 922 } 923 list_for_each_entry(pack, &pack_list, list) { 924 pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, 925 nbits, 0); 926 if (pos < bpf_prog_chunk_count()) 927 goto found_free_area; 928 } 929 930 pack = alloc_new_pack(bpf_fill_ill_insns); 931 if (!pack) 932 goto out; 933 934 pos = 0; 935 936 found_free_area: 937 bitmap_set(pack->bitmap, pos, nbits); 938 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT); 939 940 out: 941 mutex_unlock(&pack_mutex); 942 return ptr; 943 } 944 945 static void bpf_prog_pack_free(struct bpf_binary_header *hdr) 946 { 947 struct bpf_prog_pack *pack = NULL, *tmp; 948 unsigned int nbits; 949 unsigned long pos; 950 void *pack_ptr; 951 952 mutex_lock(&pack_mutex); 953 if (hdr->size > bpf_prog_pack_size) { 954 module_memfree(hdr); 955 goto out; 956 } 957 958 pack_ptr = (void *)((unsigned long)hdr & bpf_prog_pack_mask); 959 960 list_for_each_entry(tmp, &pack_list, list) { 961 if (tmp->ptr == pack_ptr) { 962 pack = tmp; 963 break; 964 } 965 } 966 967 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n")) 968 goto out; 969 970 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size); 971 pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT; 972 973 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size), 974 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n"); 975 976 bitmap_clear(pack->bitmap, pos, nbits); 977 if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, 978 bpf_prog_chunk_count(), 0) == 0) { 979 list_del(&pack->list); 980 module_memfree(pack->ptr); 981 kfree(pack); 982 } 983 out: 984 mutex_unlock(&pack_mutex); 985 } 986 987 static atomic_long_t bpf_jit_current; 988 989 /* Can be overridden by an arch's JIT compiler if it has a custom, 990 * dedicated BPF backend memory area, or if neither of the two 991 * below apply. 992 */ 993 u64 __weak bpf_jit_alloc_exec_limit(void) 994 { 995 #if defined(MODULES_VADDR) 996 return MODULES_END - MODULES_VADDR; 997 #else 998 return VMALLOC_END - VMALLOC_START; 999 #endif 1000 } 1001 1002 static int __init bpf_jit_charge_init(void) 1003 { 1004 /* Only used as heuristic here to derive limit. */ 1005 bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); 1006 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, 1007 PAGE_SIZE), LONG_MAX); 1008 return 0; 1009 } 1010 pure_initcall(bpf_jit_charge_init); 1011 1012 int bpf_jit_charge_modmem(u32 size) 1013 { 1014 if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) { 1015 if (!bpf_capable()) { 1016 atomic_long_sub(size, &bpf_jit_current); 1017 return -EPERM; 1018 } 1019 } 1020 1021 return 0; 1022 } 1023 1024 void bpf_jit_uncharge_modmem(u32 size) 1025 { 1026 atomic_long_sub(size, &bpf_jit_current); 1027 } 1028 1029 void *__weak bpf_jit_alloc_exec(unsigned long size) 1030 { 1031 return module_alloc(size); 1032 } 1033 1034 void __weak bpf_jit_free_exec(void *addr) 1035 { 1036 module_memfree(addr); 1037 } 1038 1039 struct bpf_binary_header * 1040 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 1041 unsigned int alignment, 1042 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1043 { 1044 struct bpf_binary_header *hdr; 1045 u32 size, hole, start; 1046 1047 WARN_ON_ONCE(!is_power_of_2(alignment) || 1048 alignment > BPF_IMAGE_ALIGNMENT); 1049 1050 /* Most of BPF filters are really small, but if some of them 1051 * fill a page, allow at least 128 extra bytes to insert a 1052 * random section of illegal instructions. 1053 */ 1054 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 1055 1056 if (bpf_jit_charge_modmem(size)) 1057 return NULL; 1058 hdr = bpf_jit_alloc_exec(size); 1059 if (!hdr) { 1060 bpf_jit_uncharge_modmem(size); 1061 return NULL; 1062 } 1063 1064 /* Fill space with illegal/arch-dep instructions. */ 1065 bpf_fill_ill_insns(hdr, size); 1066 1067 hdr->size = size; 1068 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 1069 PAGE_SIZE - sizeof(*hdr)); 1070 start = (get_random_int() % hole) & ~(alignment - 1); 1071 1072 /* Leave a random number of instructions before BPF code. */ 1073 *image_ptr = &hdr->image[start]; 1074 1075 return hdr; 1076 } 1077 1078 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 1079 { 1080 u32 size = hdr->size; 1081 1082 bpf_jit_free_exec(hdr); 1083 bpf_jit_uncharge_modmem(size); 1084 } 1085 1086 /* Allocate jit binary from bpf_prog_pack allocator. 1087 * Since the allocated memory is RO+X, the JIT engine cannot write directly 1088 * to the memory. To solve this problem, a RW buffer is also allocated at 1089 * as the same time. The JIT engine should calculate offsets based on the 1090 * RO memory address, but write JITed program to the RW buffer. Once the 1091 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies 1092 * the JITed program to the RO memory. 1093 */ 1094 struct bpf_binary_header * 1095 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, 1096 unsigned int alignment, 1097 struct bpf_binary_header **rw_header, 1098 u8 **rw_image, 1099 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1100 { 1101 struct bpf_binary_header *ro_header; 1102 u32 size, hole, start; 1103 1104 WARN_ON_ONCE(!is_power_of_2(alignment) || 1105 alignment > BPF_IMAGE_ALIGNMENT); 1106 1107 /* add 16 bytes for a random section of illegal instructions */ 1108 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE); 1109 1110 if (bpf_jit_charge_modmem(size)) 1111 return NULL; 1112 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns); 1113 if (!ro_header) { 1114 bpf_jit_uncharge_modmem(size); 1115 return NULL; 1116 } 1117 1118 *rw_header = kvmalloc(size, GFP_KERNEL); 1119 if (!*rw_header) { 1120 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size)); 1121 bpf_prog_pack_free(ro_header); 1122 bpf_jit_uncharge_modmem(size); 1123 return NULL; 1124 } 1125 1126 /* Fill space with illegal/arch-dep instructions. */ 1127 bpf_fill_ill_insns(*rw_header, size); 1128 (*rw_header)->size = size; 1129 1130 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), 1131 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); 1132 start = (get_random_int() % hole) & ~(alignment - 1); 1133 1134 *image_ptr = &ro_header->image[start]; 1135 *rw_image = &(*rw_header)->image[start]; 1136 1137 return ro_header; 1138 } 1139 1140 /* Copy JITed text from rw_header to its final location, the ro_header. */ 1141 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, 1142 struct bpf_binary_header *ro_header, 1143 struct bpf_binary_header *rw_header) 1144 { 1145 void *ptr; 1146 1147 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size); 1148 1149 kvfree(rw_header); 1150 1151 if (IS_ERR(ptr)) { 1152 bpf_prog_pack_free(ro_header); 1153 return PTR_ERR(ptr); 1154 } 1155 prog->aux->use_bpf_prog_pack = true; 1156 return 0; 1157 } 1158 1159 /* bpf_jit_binary_pack_free is called in two different scenarios: 1160 * 1) when the program is freed after; 1161 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize). 1162 * For case 2), we need to free both the RO memory and the RW buffer. 1163 * 1164 * bpf_jit_binary_pack_free requires proper ro_header->size. However, 1165 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size 1166 * must be set with either bpf_jit_binary_pack_finalize (normal path) or 1167 * bpf_arch_text_copy (when jit fails). 1168 */ 1169 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, 1170 struct bpf_binary_header *rw_header) 1171 { 1172 u32 size = ro_header->size; 1173 1174 bpf_prog_pack_free(ro_header); 1175 kvfree(rw_header); 1176 bpf_jit_uncharge_modmem(size); 1177 } 1178 1179 static inline struct bpf_binary_header * 1180 bpf_jit_binary_hdr(const struct bpf_prog *fp) 1181 { 1182 unsigned long real_start = (unsigned long)fp->bpf_func; 1183 unsigned long addr; 1184 1185 if (fp->aux->use_bpf_prog_pack) 1186 addr = real_start & BPF_PROG_CHUNK_MASK; 1187 else 1188 addr = real_start & PAGE_MASK; 1189 1190 return (void *)addr; 1191 } 1192 1193 /* This symbol is only overridden by archs that have different 1194 * requirements than the usual eBPF JITs, f.e. when they only 1195 * implement cBPF JIT, do not set images read-only, etc. 1196 */ 1197 void __weak bpf_jit_free(struct bpf_prog *fp) 1198 { 1199 if (fp->jited) { 1200 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 1201 1202 if (fp->aux->use_bpf_prog_pack) 1203 bpf_jit_binary_pack_free(hdr, NULL /* rw_buffer */); 1204 else 1205 bpf_jit_binary_free(hdr); 1206 1207 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 1208 } 1209 1210 bpf_prog_unlock_free(fp); 1211 } 1212 1213 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 1214 const struct bpf_insn *insn, bool extra_pass, 1215 u64 *func_addr, bool *func_addr_fixed) 1216 { 1217 s16 off = insn->off; 1218 s32 imm = insn->imm; 1219 u8 *addr; 1220 1221 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 1222 if (!*func_addr_fixed) { 1223 /* Place-holder address till the last pass has collected 1224 * all addresses for JITed subprograms in which case we 1225 * can pick them up from prog->aux. 1226 */ 1227 if (!extra_pass) 1228 addr = NULL; 1229 else if (prog->aux->func && 1230 off >= 0 && off < prog->aux->func_cnt) 1231 addr = (u8 *)prog->aux->func[off]->bpf_func; 1232 else 1233 return -EINVAL; 1234 } else { 1235 /* Address of a BPF helper call. Since part of the core 1236 * kernel, it's always at a fixed location. __bpf_call_base 1237 * and the helper with imm relative to it are both in core 1238 * kernel. 1239 */ 1240 addr = (u8 *)__bpf_call_base + imm; 1241 } 1242 1243 *func_addr = (unsigned long)addr; 1244 return 0; 1245 } 1246 1247 static int bpf_jit_blind_insn(const struct bpf_insn *from, 1248 const struct bpf_insn *aux, 1249 struct bpf_insn *to_buff, 1250 bool emit_zext) 1251 { 1252 struct bpf_insn *to = to_buff; 1253 u32 imm_rnd = get_random_int(); 1254 s16 off; 1255 1256 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 1257 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 1258 1259 /* Constraints on AX register: 1260 * 1261 * AX register is inaccessible from user space. It is mapped in 1262 * all JITs, and used here for constant blinding rewrites. It is 1263 * typically "stateless" meaning its contents are only valid within 1264 * the executed instruction, but not across several instructions. 1265 * There are a few exceptions however which are further detailed 1266 * below. 1267 * 1268 * Constant blinding is only used by JITs, not in the interpreter. 1269 * The interpreter uses AX in some occasions as a local temporary 1270 * register e.g. in DIV or MOD instructions. 1271 * 1272 * In restricted circumstances, the verifier can also use the AX 1273 * register for rewrites as long as they do not interfere with 1274 * the above cases! 1275 */ 1276 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 1277 goto out; 1278 1279 if (from->imm == 0 && 1280 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 1281 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 1282 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 1283 goto out; 1284 } 1285 1286 switch (from->code) { 1287 case BPF_ALU | BPF_ADD | BPF_K: 1288 case BPF_ALU | BPF_SUB | BPF_K: 1289 case BPF_ALU | BPF_AND | BPF_K: 1290 case BPF_ALU | BPF_OR | BPF_K: 1291 case BPF_ALU | BPF_XOR | BPF_K: 1292 case BPF_ALU | BPF_MUL | BPF_K: 1293 case BPF_ALU | BPF_MOV | BPF_K: 1294 case BPF_ALU | BPF_DIV | BPF_K: 1295 case BPF_ALU | BPF_MOD | BPF_K: 1296 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1297 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1298 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 1299 break; 1300 1301 case BPF_ALU64 | BPF_ADD | BPF_K: 1302 case BPF_ALU64 | BPF_SUB | BPF_K: 1303 case BPF_ALU64 | BPF_AND | BPF_K: 1304 case BPF_ALU64 | BPF_OR | BPF_K: 1305 case BPF_ALU64 | BPF_XOR | BPF_K: 1306 case BPF_ALU64 | BPF_MUL | BPF_K: 1307 case BPF_ALU64 | BPF_MOV | BPF_K: 1308 case BPF_ALU64 | BPF_DIV | BPF_K: 1309 case BPF_ALU64 | BPF_MOD | BPF_K: 1310 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1311 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1312 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 1313 break; 1314 1315 case BPF_JMP | BPF_JEQ | BPF_K: 1316 case BPF_JMP | BPF_JNE | BPF_K: 1317 case BPF_JMP | BPF_JGT | BPF_K: 1318 case BPF_JMP | BPF_JLT | BPF_K: 1319 case BPF_JMP | BPF_JGE | BPF_K: 1320 case BPF_JMP | BPF_JLE | BPF_K: 1321 case BPF_JMP | BPF_JSGT | BPF_K: 1322 case BPF_JMP | BPF_JSLT | BPF_K: 1323 case BPF_JMP | BPF_JSGE | BPF_K: 1324 case BPF_JMP | BPF_JSLE | BPF_K: 1325 case BPF_JMP | BPF_JSET | BPF_K: 1326 /* Accommodate for extra offset in case of a backjump. */ 1327 off = from->off; 1328 if (off < 0) 1329 off -= 2; 1330 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1331 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1332 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1333 break; 1334 1335 case BPF_JMP32 | BPF_JEQ | BPF_K: 1336 case BPF_JMP32 | BPF_JNE | BPF_K: 1337 case BPF_JMP32 | BPF_JGT | BPF_K: 1338 case BPF_JMP32 | BPF_JLT | BPF_K: 1339 case BPF_JMP32 | BPF_JGE | BPF_K: 1340 case BPF_JMP32 | BPF_JLE | BPF_K: 1341 case BPF_JMP32 | BPF_JSGT | BPF_K: 1342 case BPF_JMP32 | BPF_JSLT | BPF_K: 1343 case BPF_JMP32 | BPF_JSGE | BPF_K: 1344 case BPF_JMP32 | BPF_JSLE | BPF_K: 1345 case BPF_JMP32 | BPF_JSET | BPF_K: 1346 /* Accommodate for extra offset in case of a backjump. */ 1347 off = from->off; 1348 if (off < 0) 1349 off -= 2; 1350 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1351 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1352 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1353 off); 1354 break; 1355 1356 case BPF_LD | BPF_IMM | BPF_DW: 1357 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1358 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1359 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1360 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1361 break; 1362 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1363 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1364 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1365 if (emit_zext) 1366 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1367 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1368 break; 1369 1370 case BPF_ST | BPF_MEM | BPF_DW: 1371 case BPF_ST | BPF_MEM | BPF_W: 1372 case BPF_ST | BPF_MEM | BPF_H: 1373 case BPF_ST | BPF_MEM | BPF_B: 1374 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1375 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1376 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1377 break; 1378 } 1379 out: 1380 return to - to_buff; 1381 } 1382 1383 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1384 gfp_t gfp_extra_flags) 1385 { 1386 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1387 struct bpf_prog *fp; 1388 1389 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1390 if (fp != NULL) { 1391 /* aux->prog still points to the fp_other one, so 1392 * when promoting the clone to the real program, 1393 * this still needs to be adapted. 1394 */ 1395 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1396 } 1397 1398 return fp; 1399 } 1400 1401 static void bpf_prog_clone_free(struct bpf_prog *fp) 1402 { 1403 /* aux was stolen by the other clone, so we cannot free 1404 * it from this path! It will be freed eventually by the 1405 * other program on release. 1406 * 1407 * At this point, we don't need a deferred release since 1408 * clone is guaranteed to not be locked. 1409 */ 1410 fp->aux = NULL; 1411 fp->stats = NULL; 1412 fp->active = NULL; 1413 __bpf_prog_free(fp); 1414 } 1415 1416 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1417 { 1418 /* We have to repoint aux->prog to self, as we don't 1419 * know whether fp here is the clone or the original. 1420 */ 1421 fp->aux->prog = fp; 1422 bpf_prog_clone_free(fp_other); 1423 } 1424 1425 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1426 { 1427 struct bpf_insn insn_buff[16], aux[2]; 1428 struct bpf_prog *clone, *tmp; 1429 int insn_delta, insn_cnt; 1430 struct bpf_insn *insn; 1431 int i, rewritten; 1432 1433 if (!prog->blinding_requested || prog->blinded) 1434 return prog; 1435 1436 clone = bpf_prog_clone_create(prog, GFP_USER); 1437 if (!clone) 1438 return ERR_PTR(-ENOMEM); 1439 1440 insn_cnt = clone->len; 1441 insn = clone->insnsi; 1442 1443 for (i = 0; i < insn_cnt; i++, insn++) { 1444 if (bpf_pseudo_func(insn)) { 1445 /* ld_imm64 with an address of bpf subprog is not 1446 * a user controlled constant. Don't randomize it, 1447 * since it will conflict with jit_subprogs() logic. 1448 */ 1449 insn++; 1450 i++; 1451 continue; 1452 } 1453 1454 /* We temporarily need to hold the original ld64 insn 1455 * so that we can still access the first part in the 1456 * second blinding run. 1457 */ 1458 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1459 insn[1].code == 0) 1460 memcpy(aux, insn, sizeof(aux)); 1461 1462 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1463 clone->aux->verifier_zext); 1464 if (!rewritten) 1465 continue; 1466 1467 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1468 if (IS_ERR(tmp)) { 1469 /* Patching may have repointed aux->prog during 1470 * realloc from the original one, so we need to 1471 * fix it up here on error. 1472 */ 1473 bpf_jit_prog_release_other(prog, clone); 1474 return tmp; 1475 } 1476 1477 clone = tmp; 1478 insn_delta = rewritten - 1; 1479 1480 /* Walk new program and skip insns we just inserted. */ 1481 insn = clone->insnsi + i + insn_delta; 1482 insn_cnt += insn_delta; 1483 i += insn_delta; 1484 } 1485 1486 clone->blinded = 1; 1487 return clone; 1488 } 1489 #endif /* CONFIG_BPF_JIT */ 1490 1491 /* Base function for offset calculation. Needs to go into .text section, 1492 * therefore keeping it non-static as well; will also be used by JITs 1493 * anyway later on, so do not let the compiler omit it. This also needs 1494 * to go into kallsyms for correlation from e.g. bpftool, so naming 1495 * must not change. 1496 */ 1497 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1498 { 1499 return 0; 1500 } 1501 EXPORT_SYMBOL_GPL(__bpf_call_base); 1502 1503 /* All UAPI available opcodes. */ 1504 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1505 /* 32 bit ALU operations. */ \ 1506 /* Register based. */ \ 1507 INSN_3(ALU, ADD, X), \ 1508 INSN_3(ALU, SUB, X), \ 1509 INSN_3(ALU, AND, X), \ 1510 INSN_3(ALU, OR, X), \ 1511 INSN_3(ALU, LSH, X), \ 1512 INSN_3(ALU, RSH, X), \ 1513 INSN_3(ALU, XOR, X), \ 1514 INSN_3(ALU, MUL, X), \ 1515 INSN_3(ALU, MOV, X), \ 1516 INSN_3(ALU, ARSH, X), \ 1517 INSN_3(ALU, DIV, X), \ 1518 INSN_3(ALU, MOD, X), \ 1519 INSN_2(ALU, NEG), \ 1520 INSN_3(ALU, END, TO_BE), \ 1521 INSN_3(ALU, END, TO_LE), \ 1522 /* Immediate based. */ \ 1523 INSN_3(ALU, ADD, K), \ 1524 INSN_3(ALU, SUB, K), \ 1525 INSN_3(ALU, AND, K), \ 1526 INSN_3(ALU, OR, K), \ 1527 INSN_3(ALU, LSH, K), \ 1528 INSN_3(ALU, RSH, K), \ 1529 INSN_3(ALU, XOR, K), \ 1530 INSN_3(ALU, MUL, K), \ 1531 INSN_3(ALU, MOV, K), \ 1532 INSN_3(ALU, ARSH, K), \ 1533 INSN_3(ALU, DIV, K), \ 1534 INSN_3(ALU, MOD, K), \ 1535 /* 64 bit ALU operations. */ \ 1536 /* Register based. */ \ 1537 INSN_3(ALU64, ADD, X), \ 1538 INSN_3(ALU64, SUB, X), \ 1539 INSN_3(ALU64, AND, X), \ 1540 INSN_3(ALU64, OR, X), \ 1541 INSN_3(ALU64, LSH, X), \ 1542 INSN_3(ALU64, RSH, X), \ 1543 INSN_3(ALU64, XOR, X), \ 1544 INSN_3(ALU64, MUL, X), \ 1545 INSN_3(ALU64, MOV, X), \ 1546 INSN_3(ALU64, ARSH, X), \ 1547 INSN_3(ALU64, DIV, X), \ 1548 INSN_3(ALU64, MOD, X), \ 1549 INSN_2(ALU64, NEG), \ 1550 /* Immediate based. */ \ 1551 INSN_3(ALU64, ADD, K), \ 1552 INSN_3(ALU64, SUB, K), \ 1553 INSN_3(ALU64, AND, K), \ 1554 INSN_3(ALU64, OR, K), \ 1555 INSN_3(ALU64, LSH, K), \ 1556 INSN_3(ALU64, RSH, K), \ 1557 INSN_3(ALU64, XOR, K), \ 1558 INSN_3(ALU64, MUL, K), \ 1559 INSN_3(ALU64, MOV, K), \ 1560 INSN_3(ALU64, ARSH, K), \ 1561 INSN_3(ALU64, DIV, K), \ 1562 INSN_3(ALU64, MOD, K), \ 1563 /* Call instruction. */ \ 1564 INSN_2(JMP, CALL), \ 1565 /* Exit instruction. */ \ 1566 INSN_2(JMP, EXIT), \ 1567 /* 32-bit Jump instructions. */ \ 1568 /* Register based. */ \ 1569 INSN_3(JMP32, JEQ, X), \ 1570 INSN_3(JMP32, JNE, X), \ 1571 INSN_3(JMP32, JGT, X), \ 1572 INSN_3(JMP32, JLT, X), \ 1573 INSN_3(JMP32, JGE, X), \ 1574 INSN_3(JMP32, JLE, X), \ 1575 INSN_3(JMP32, JSGT, X), \ 1576 INSN_3(JMP32, JSLT, X), \ 1577 INSN_3(JMP32, JSGE, X), \ 1578 INSN_3(JMP32, JSLE, X), \ 1579 INSN_3(JMP32, JSET, X), \ 1580 /* Immediate based. */ \ 1581 INSN_3(JMP32, JEQ, K), \ 1582 INSN_3(JMP32, JNE, K), \ 1583 INSN_3(JMP32, JGT, K), \ 1584 INSN_3(JMP32, JLT, K), \ 1585 INSN_3(JMP32, JGE, K), \ 1586 INSN_3(JMP32, JLE, K), \ 1587 INSN_3(JMP32, JSGT, K), \ 1588 INSN_3(JMP32, JSLT, K), \ 1589 INSN_3(JMP32, JSGE, K), \ 1590 INSN_3(JMP32, JSLE, K), \ 1591 INSN_3(JMP32, JSET, K), \ 1592 /* Jump instructions. */ \ 1593 /* Register based. */ \ 1594 INSN_3(JMP, JEQ, X), \ 1595 INSN_3(JMP, JNE, X), \ 1596 INSN_3(JMP, JGT, X), \ 1597 INSN_3(JMP, JLT, X), \ 1598 INSN_3(JMP, JGE, X), \ 1599 INSN_3(JMP, JLE, X), \ 1600 INSN_3(JMP, JSGT, X), \ 1601 INSN_3(JMP, JSLT, X), \ 1602 INSN_3(JMP, JSGE, X), \ 1603 INSN_3(JMP, JSLE, X), \ 1604 INSN_3(JMP, JSET, X), \ 1605 /* Immediate based. */ \ 1606 INSN_3(JMP, JEQ, K), \ 1607 INSN_3(JMP, JNE, K), \ 1608 INSN_3(JMP, JGT, K), \ 1609 INSN_3(JMP, JLT, K), \ 1610 INSN_3(JMP, JGE, K), \ 1611 INSN_3(JMP, JLE, K), \ 1612 INSN_3(JMP, JSGT, K), \ 1613 INSN_3(JMP, JSLT, K), \ 1614 INSN_3(JMP, JSGE, K), \ 1615 INSN_3(JMP, JSLE, K), \ 1616 INSN_3(JMP, JSET, K), \ 1617 INSN_2(JMP, JA), \ 1618 /* Store instructions. */ \ 1619 /* Register based. */ \ 1620 INSN_3(STX, MEM, B), \ 1621 INSN_3(STX, MEM, H), \ 1622 INSN_3(STX, MEM, W), \ 1623 INSN_3(STX, MEM, DW), \ 1624 INSN_3(STX, ATOMIC, W), \ 1625 INSN_3(STX, ATOMIC, DW), \ 1626 /* Immediate based. */ \ 1627 INSN_3(ST, MEM, B), \ 1628 INSN_3(ST, MEM, H), \ 1629 INSN_3(ST, MEM, W), \ 1630 INSN_3(ST, MEM, DW), \ 1631 /* Load instructions. */ \ 1632 /* Register based. */ \ 1633 INSN_3(LDX, MEM, B), \ 1634 INSN_3(LDX, MEM, H), \ 1635 INSN_3(LDX, MEM, W), \ 1636 INSN_3(LDX, MEM, DW), \ 1637 /* Immediate based. */ \ 1638 INSN_3(LD, IMM, DW) 1639 1640 bool bpf_opcode_in_insntable(u8 code) 1641 { 1642 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1643 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1644 static const bool public_insntable[256] = { 1645 [0 ... 255] = false, 1646 /* Now overwrite non-defaults ... */ 1647 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1648 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1649 [BPF_LD | BPF_ABS | BPF_B] = true, 1650 [BPF_LD | BPF_ABS | BPF_H] = true, 1651 [BPF_LD | BPF_ABS | BPF_W] = true, 1652 [BPF_LD | BPF_IND | BPF_B] = true, 1653 [BPF_LD | BPF_IND | BPF_H] = true, 1654 [BPF_LD | BPF_IND | BPF_W] = true, 1655 }; 1656 #undef BPF_INSN_3_TBL 1657 #undef BPF_INSN_2_TBL 1658 return public_insntable[code]; 1659 } 1660 1661 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1662 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 1663 { 1664 memset(dst, 0, size); 1665 return -EFAULT; 1666 } 1667 1668 /** 1669 * ___bpf_prog_run - run eBPF program on a given context 1670 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1671 * @insn: is the array of eBPF instructions 1672 * 1673 * Decode and execute eBPF instructions. 1674 * 1675 * Return: whatever value is in %BPF_R0 at program exit 1676 */ 1677 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) 1678 { 1679 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1680 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1681 static const void * const jumptable[256] __annotate_jump_table = { 1682 [0 ... 255] = &&default_label, 1683 /* Now overwrite non-defaults ... */ 1684 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1685 /* Non-UAPI available opcodes. */ 1686 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1687 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1688 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, 1689 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1690 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1691 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1692 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1693 }; 1694 #undef BPF_INSN_3_LBL 1695 #undef BPF_INSN_2_LBL 1696 u32 tail_call_cnt = 0; 1697 1698 #define CONT ({ insn++; goto select_insn; }) 1699 #define CONT_JMP ({ insn++; goto select_insn; }) 1700 1701 select_insn: 1702 goto *jumptable[insn->code]; 1703 1704 /* Explicitly mask the register-based shift amounts with 63 or 31 1705 * to avoid undefined behavior. Normally this won't affect the 1706 * generated code, for example, in case of native 64 bit archs such 1707 * as x86-64 or arm64, the compiler is optimizing the AND away for 1708 * the interpreter. In case of JITs, each of the JIT backends compiles 1709 * the BPF shift operations to machine instructions which produce 1710 * implementation-defined results in such a case; the resulting 1711 * contents of the register may be arbitrary, but program behaviour 1712 * as a whole remains defined. In other words, in case of JIT backends, 1713 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. 1714 */ 1715 /* ALU (shifts) */ 1716 #define SHT(OPCODE, OP) \ 1717 ALU64_##OPCODE##_X: \ 1718 DST = DST OP (SRC & 63); \ 1719 CONT; \ 1720 ALU_##OPCODE##_X: \ 1721 DST = (u32) DST OP ((u32) SRC & 31); \ 1722 CONT; \ 1723 ALU64_##OPCODE##_K: \ 1724 DST = DST OP IMM; \ 1725 CONT; \ 1726 ALU_##OPCODE##_K: \ 1727 DST = (u32) DST OP (u32) IMM; \ 1728 CONT; 1729 /* ALU (rest) */ 1730 #define ALU(OPCODE, OP) \ 1731 ALU64_##OPCODE##_X: \ 1732 DST = DST OP SRC; \ 1733 CONT; \ 1734 ALU_##OPCODE##_X: \ 1735 DST = (u32) DST OP (u32) SRC; \ 1736 CONT; \ 1737 ALU64_##OPCODE##_K: \ 1738 DST = DST OP IMM; \ 1739 CONT; \ 1740 ALU_##OPCODE##_K: \ 1741 DST = (u32) DST OP (u32) IMM; \ 1742 CONT; 1743 ALU(ADD, +) 1744 ALU(SUB, -) 1745 ALU(AND, &) 1746 ALU(OR, |) 1747 ALU(XOR, ^) 1748 ALU(MUL, *) 1749 SHT(LSH, <<) 1750 SHT(RSH, >>) 1751 #undef SHT 1752 #undef ALU 1753 ALU_NEG: 1754 DST = (u32) -DST; 1755 CONT; 1756 ALU64_NEG: 1757 DST = -DST; 1758 CONT; 1759 ALU_MOV_X: 1760 DST = (u32) SRC; 1761 CONT; 1762 ALU_MOV_K: 1763 DST = (u32) IMM; 1764 CONT; 1765 ALU64_MOV_X: 1766 DST = SRC; 1767 CONT; 1768 ALU64_MOV_K: 1769 DST = IMM; 1770 CONT; 1771 LD_IMM_DW: 1772 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1773 insn++; 1774 CONT; 1775 ALU_ARSH_X: 1776 DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); 1777 CONT; 1778 ALU_ARSH_K: 1779 DST = (u64) (u32) (((s32) DST) >> IMM); 1780 CONT; 1781 ALU64_ARSH_X: 1782 (*(s64 *) &DST) >>= (SRC & 63); 1783 CONT; 1784 ALU64_ARSH_K: 1785 (*(s64 *) &DST) >>= IMM; 1786 CONT; 1787 ALU64_MOD_X: 1788 div64_u64_rem(DST, SRC, &AX); 1789 DST = AX; 1790 CONT; 1791 ALU_MOD_X: 1792 AX = (u32) DST; 1793 DST = do_div(AX, (u32) SRC); 1794 CONT; 1795 ALU64_MOD_K: 1796 div64_u64_rem(DST, IMM, &AX); 1797 DST = AX; 1798 CONT; 1799 ALU_MOD_K: 1800 AX = (u32) DST; 1801 DST = do_div(AX, (u32) IMM); 1802 CONT; 1803 ALU64_DIV_X: 1804 DST = div64_u64(DST, SRC); 1805 CONT; 1806 ALU_DIV_X: 1807 AX = (u32) DST; 1808 do_div(AX, (u32) SRC); 1809 DST = (u32) AX; 1810 CONT; 1811 ALU64_DIV_K: 1812 DST = div64_u64(DST, IMM); 1813 CONT; 1814 ALU_DIV_K: 1815 AX = (u32) DST; 1816 do_div(AX, (u32) IMM); 1817 DST = (u32) AX; 1818 CONT; 1819 ALU_END_TO_BE: 1820 switch (IMM) { 1821 case 16: 1822 DST = (__force u16) cpu_to_be16(DST); 1823 break; 1824 case 32: 1825 DST = (__force u32) cpu_to_be32(DST); 1826 break; 1827 case 64: 1828 DST = (__force u64) cpu_to_be64(DST); 1829 break; 1830 } 1831 CONT; 1832 ALU_END_TO_LE: 1833 switch (IMM) { 1834 case 16: 1835 DST = (__force u16) cpu_to_le16(DST); 1836 break; 1837 case 32: 1838 DST = (__force u32) cpu_to_le32(DST); 1839 break; 1840 case 64: 1841 DST = (__force u64) cpu_to_le64(DST); 1842 break; 1843 } 1844 CONT; 1845 1846 /* CALL */ 1847 JMP_CALL: 1848 /* Function call scratches BPF_R1-BPF_R5 registers, 1849 * preserves BPF_R6-BPF_R9, and stores return value 1850 * into BPF_R0. 1851 */ 1852 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1853 BPF_R4, BPF_R5); 1854 CONT; 1855 1856 JMP_CALL_ARGS: 1857 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1858 BPF_R3, BPF_R4, 1859 BPF_R5, 1860 insn + insn->off + 1); 1861 CONT; 1862 1863 JMP_TAIL_CALL: { 1864 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1865 struct bpf_array *array = container_of(map, struct bpf_array, map); 1866 struct bpf_prog *prog; 1867 u32 index = BPF_R3; 1868 1869 if (unlikely(index >= array->map.max_entries)) 1870 goto out; 1871 1872 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) 1873 goto out; 1874 1875 tail_call_cnt++; 1876 1877 prog = READ_ONCE(array->ptrs[index]); 1878 if (!prog) 1879 goto out; 1880 1881 /* ARG1 at this point is guaranteed to point to CTX from 1882 * the verifier side due to the fact that the tail call is 1883 * handled like a helper, that is, bpf_tail_call_proto, 1884 * where arg1_type is ARG_PTR_TO_CTX. 1885 */ 1886 insn = prog->insnsi; 1887 goto select_insn; 1888 out: 1889 CONT; 1890 } 1891 JMP_JA: 1892 insn += insn->off; 1893 CONT; 1894 JMP_EXIT: 1895 return BPF_R0; 1896 /* JMP */ 1897 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 1898 JMP_##OPCODE##_X: \ 1899 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 1900 insn += insn->off; \ 1901 CONT_JMP; \ 1902 } \ 1903 CONT; \ 1904 JMP32_##OPCODE##_X: \ 1905 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 1906 insn += insn->off; \ 1907 CONT_JMP; \ 1908 } \ 1909 CONT; \ 1910 JMP_##OPCODE##_K: \ 1911 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 1912 insn += insn->off; \ 1913 CONT_JMP; \ 1914 } \ 1915 CONT; \ 1916 JMP32_##OPCODE##_K: \ 1917 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 1918 insn += insn->off; \ 1919 CONT_JMP; \ 1920 } \ 1921 CONT; 1922 COND_JMP(u, JEQ, ==) 1923 COND_JMP(u, JNE, !=) 1924 COND_JMP(u, JGT, >) 1925 COND_JMP(u, JLT, <) 1926 COND_JMP(u, JGE, >=) 1927 COND_JMP(u, JLE, <=) 1928 COND_JMP(u, JSET, &) 1929 COND_JMP(s, JSGT, >) 1930 COND_JMP(s, JSLT, <) 1931 COND_JMP(s, JSGE, >=) 1932 COND_JMP(s, JSLE, <=) 1933 #undef COND_JMP 1934 /* ST, STX and LDX*/ 1935 ST_NOSPEC: 1936 /* Speculation barrier for mitigating Speculative Store Bypass. 1937 * In case of arm64, we rely on the firmware mitigation as 1938 * controlled via the ssbd kernel parameter. Whenever the 1939 * mitigation is enabled, it works for all of the kernel code 1940 * with no need to provide any additional instructions here. 1941 * In case of x86, we use 'lfence' insn for mitigation. We 1942 * reuse preexisting logic from Spectre v1 mitigation that 1943 * happens to produce the required code on x86 for v4 as well. 1944 */ 1945 #ifdef CONFIG_X86 1946 barrier_nospec(); 1947 #endif 1948 CONT; 1949 #define LDST(SIZEOP, SIZE) \ 1950 STX_MEM_##SIZEOP: \ 1951 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1952 CONT; \ 1953 ST_MEM_##SIZEOP: \ 1954 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1955 CONT; \ 1956 LDX_MEM_##SIZEOP: \ 1957 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1958 CONT; \ 1959 LDX_PROBE_MEM_##SIZEOP: \ 1960 bpf_probe_read_kernel(&DST, sizeof(SIZE), \ 1961 (const void *)(long) (SRC + insn->off)); \ 1962 DST = *((SIZE *)&DST); \ 1963 CONT; 1964 1965 LDST(B, u8) 1966 LDST(H, u16) 1967 LDST(W, u32) 1968 LDST(DW, u64) 1969 #undef LDST 1970 1971 #define ATOMIC_ALU_OP(BOP, KOP) \ 1972 case BOP: \ 1973 if (BPF_SIZE(insn->code) == BPF_W) \ 1974 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \ 1975 (DST + insn->off)); \ 1976 else \ 1977 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \ 1978 (DST + insn->off)); \ 1979 break; \ 1980 case BOP | BPF_FETCH: \ 1981 if (BPF_SIZE(insn->code) == BPF_W) \ 1982 SRC = (u32) atomic_fetch_##KOP( \ 1983 (u32) SRC, \ 1984 (atomic_t *)(unsigned long) (DST + insn->off)); \ 1985 else \ 1986 SRC = (u64) atomic64_fetch_##KOP( \ 1987 (u64) SRC, \ 1988 (atomic64_t *)(unsigned long) (DST + insn->off)); \ 1989 break; 1990 1991 STX_ATOMIC_DW: 1992 STX_ATOMIC_W: 1993 switch (IMM) { 1994 ATOMIC_ALU_OP(BPF_ADD, add) 1995 ATOMIC_ALU_OP(BPF_AND, and) 1996 ATOMIC_ALU_OP(BPF_OR, or) 1997 ATOMIC_ALU_OP(BPF_XOR, xor) 1998 #undef ATOMIC_ALU_OP 1999 2000 case BPF_XCHG: 2001 if (BPF_SIZE(insn->code) == BPF_W) 2002 SRC = (u32) atomic_xchg( 2003 (atomic_t *)(unsigned long) (DST + insn->off), 2004 (u32) SRC); 2005 else 2006 SRC = (u64) atomic64_xchg( 2007 (atomic64_t *)(unsigned long) (DST + insn->off), 2008 (u64) SRC); 2009 break; 2010 case BPF_CMPXCHG: 2011 if (BPF_SIZE(insn->code) == BPF_W) 2012 BPF_R0 = (u32) atomic_cmpxchg( 2013 (atomic_t *)(unsigned long) (DST + insn->off), 2014 (u32) BPF_R0, (u32) SRC); 2015 else 2016 BPF_R0 = (u64) atomic64_cmpxchg( 2017 (atomic64_t *)(unsigned long) (DST + insn->off), 2018 (u64) BPF_R0, (u64) SRC); 2019 break; 2020 2021 default: 2022 goto default_label; 2023 } 2024 CONT; 2025 2026 default_label: 2027 /* If we ever reach this, we have a bug somewhere. Die hard here 2028 * instead of just returning 0; we could be somewhere in a subprog, 2029 * so execution could continue otherwise which we do /not/ want. 2030 * 2031 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 2032 */ 2033 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n", 2034 insn->code, insn->imm); 2035 BUG_ON(1); 2036 return 0; 2037 } 2038 2039 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 2040 #define DEFINE_BPF_PROG_RUN(stack_size) \ 2041 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 2042 { \ 2043 u64 stack[stack_size / sizeof(u64)]; \ 2044 u64 regs[MAX_BPF_EXT_REG]; \ 2045 \ 2046 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2047 ARG1 = (u64) (unsigned long) ctx; \ 2048 return ___bpf_prog_run(regs, insn); \ 2049 } 2050 2051 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 2052 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 2053 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 2054 const struct bpf_insn *insn) \ 2055 { \ 2056 u64 stack[stack_size / sizeof(u64)]; \ 2057 u64 regs[MAX_BPF_EXT_REG]; \ 2058 \ 2059 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2060 BPF_R1 = r1; \ 2061 BPF_R2 = r2; \ 2062 BPF_R3 = r3; \ 2063 BPF_R4 = r4; \ 2064 BPF_R5 = r5; \ 2065 return ___bpf_prog_run(regs, insn); \ 2066 } 2067 2068 #define EVAL1(FN, X) FN(X) 2069 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 2070 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 2071 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 2072 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 2073 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 2074 2075 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 2076 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 2077 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 2078 2079 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 2080 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 2081 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 2082 2083 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 2084 2085 static unsigned int (*interpreters[])(const void *ctx, 2086 const struct bpf_insn *insn) = { 2087 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2088 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2089 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2090 }; 2091 #undef PROG_NAME_LIST 2092 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 2093 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 2094 const struct bpf_insn *insn) = { 2095 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2096 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2097 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2098 }; 2099 #undef PROG_NAME_LIST 2100 2101 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 2102 { 2103 stack_depth = max_t(u32, stack_depth, 1); 2104 insn->off = (s16) insn->imm; 2105 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 2106 __bpf_call_base_args; 2107 insn->code = BPF_JMP | BPF_CALL_ARGS; 2108 } 2109 2110 #else 2111 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 2112 const struct bpf_insn *insn) 2113 { 2114 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 2115 * is not working properly, so warn about it! 2116 */ 2117 WARN_ON_ONCE(1); 2118 return 0; 2119 } 2120 #endif 2121 2122 bool bpf_prog_map_compatible(struct bpf_map *map, 2123 const struct bpf_prog *fp) 2124 { 2125 bool ret; 2126 2127 if (fp->kprobe_override) 2128 return false; 2129 2130 spin_lock(&map->owner.lock); 2131 if (!map->owner.type) { 2132 /* There's no owner yet where we could check for 2133 * compatibility. 2134 */ 2135 map->owner.type = fp->type; 2136 map->owner.jited = fp->jited; 2137 map->owner.xdp_has_frags = fp->aux->xdp_has_frags; 2138 ret = true; 2139 } else { 2140 ret = map->owner.type == fp->type && 2141 map->owner.jited == fp->jited && 2142 map->owner.xdp_has_frags == fp->aux->xdp_has_frags; 2143 } 2144 spin_unlock(&map->owner.lock); 2145 2146 return ret; 2147 } 2148 2149 static int bpf_check_tail_call(const struct bpf_prog *fp) 2150 { 2151 struct bpf_prog_aux *aux = fp->aux; 2152 int i, ret = 0; 2153 2154 mutex_lock(&aux->used_maps_mutex); 2155 for (i = 0; i < aux->used_map_cnt; i++) { 2156 struct bpf_map *map = aux->used_maps[i]; 2157 2158 if (!map_type_contains_progs(map)) 2159 continue; 2160 2161 if (!bpf_prog_map_compatible(map, fp)) { 2162 ret = -EINVAL; 2163 goto out; 2164 } 2165 } 2166 2167 out: 2168 mutex_unlock(&aux->used_maps_mutex); 2169 return ret; 2170 } 2171 2172 static void bpf_prog_select_func(struct bpf_prog *fp) 2173 { 2174 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2175 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 2176 2177 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 2178 #else 2179 fp->bpf_func = __bpf_prog_ret0_warn; 2180 #endif 2181 } 2182 2183 /** 2184 * bpf_prog_select_runtime - select exec runtime for BPF program 2185 * @fp: bpf_prog populated with BPF program 2186 * @err: pointer to error variable 2187 * 2188 * Try to JIT eBPF program, if JIT is not available, use interpreter. 2189 * The BPF program will be executed via bpf_prog_run() function. 2190 * 2191 * Return: the &fp argument along with &err set to 0 for success or 2192 * a negative errno code on failure 2193 */ 2194 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 2195 { 2196 /* In case of BPF to BPF calls, verifier did all the prep 2197 * work with regards to JITing, etc. 2198 */ 2199 bool jit_needed = false; 2200 2201 if (fp->bpf_func) 2202 goto finalize; 2203 2204 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) || 2205 bpf_prog_has_kfunc_call(fp)) 2206 jit_needed = true; 2207 2208 bpf_prog_select_func(fp); 2209 2210 /* eBPF JITs can rewrite the program in case constant 2211 * blinding is active. However, in case of error during 2212 * blinding, bpf_int_jit_compile() must always return a 2213 * valid program, which in this case would simply not 2214 * be JITed, but falls back to the interpreter. 2215 */ 2216 if (!bpf_prog_is_dev_bound(fp->aux)) { 2217 *err = bpf_prog_alloc_jited_linfo(fp); 2218 if (*err) 2219 return fp; 2220 2221 fp = bpf_int_jit_compile(fp); 2222 bpf_prog_jit_attempt_done(fp); 2223 if (!fp->jited && jit_needed) { 2224 *err = -ENOTSUPP; 2225 return fp; 2226 } 2227 } else { 2228 *err = bpf_prog_offload_compile(fp); 2229 if (*err) 2230 return fp; 2231 } 2232 2233 finalize: 2234 bpf_prog_lock_ro(fp); 2235 2236 /* The tail call compatibility check can only be done at 2237 * this late stage as we need to determine, if we deal 2238 * with JITed or non JITed program concatenations and not 2239 * all eBPF JITs might immediately support all features. 2240 */ 2241 *err = bpf_check_tail_call(fp); 2242 2243 return fp; 2244 } 2245 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 2246 2247 static unsigned int __bpf_prog_ret1(const void *ctx, 2248 const struct bpf_insn *insn) 2249 { 2250 return 1; 2251 } 2252 2253 static struct bpf_prog_dummy { 2254 struct bpf_prog prog; 2255 } dummy_bpf_prog = { 2256 .prog = { 2257 .bpf_func = __bpf_prog_ret1, 2258 }, 2259 }; 2260 2261 struct bpf_empty_prog_array bpf_empty_prog_array = { 2262 .null_prog = NULL, 2263 }; 2264 EXPORT_SYMBOL(bpf_empty_prog_array); 2265 2266 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 2267 { 2268 if (prog_cnt) 2269 return kzalloc(sizeof(struct bpf_prog_array) + 2270 sizeof(struct bpf_prog_array_item) * 2271 (prog_cnt + 1), 2272 flags); 2273 2274 return &bpf_empty_prog_array.hdr; 2275 } 2276 2277 void bpf_prog_array_free(struct bpf_prog_array *progs) 2278 { 2279 if (!progs || progs == &bpf_empty_prog_array.hdr) 2280 return; 2281 kfree_rcu(progs, rcu); 2282 } 2283 2284 int bpf_prog_array_length(struct bpf_prog_array *array) 2285 { 2286 struct bpf_prog_array_item *item; 2287 u32 cnt = 0; 2288 2289 for (item = array->items; item->prog; item++) 2290 if (item->prog != &dummy_bpf_prog.prog) 2291 cnt++; 2292 return cnt; 2293 } 2294 2295 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 2296 { 2297 struct bpf_prog_array_item *item; 2298 2299 for (item = array->items; item->prog; item++) 2300 if (item->prog != &dummy_bpf_prog.prog) 2301 return false; 2302 return true; 2303 } 2304 2305 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 2306 u32 *prog_ids, 2307 u32 request_cnt) 2308 { 2309 struct bpf_prog_array_item *item; 2310 int i = 0; 2311 2312 for (item = array->items; item->prog; item++) { 2313 if (item->prog == &dummy_bpf_prog.prog) 2314 continue; 2315 prog_ids[i] = item->prog->aux->id; 2316 if (++i == request_cnt) { 2317 item++; 2318 break; 2319 } 2320 } 2321 2322 return !!(item->prog); 2323 } 2324 2325 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 2326 __u32 __user *prog_ids, u32 cnt) 2327 { 2328 unsigned long err = 0; 2329 bool nospc; 2330 u32 *ids; 2331 2332 /* users of this function are doing: 2333 * cnt = bpf_prog_array_length(); 2334 * if (cnt > 0) 2335 * bpf_prog_array_copy_to_user(..., cnt); 2336 * so below kcalloc doesn't need extra cnt > 0 check. 2337 */ 2338 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 2339 if (!ids) 2340 return -ENOMEM; 2341 nospc = bpf_prog_array_copy_core(array, ids, cnt); 2342 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 2343 kfree(ids); 2344 if (err) 2345 return -EFAULT; 2346 if (nospc) 2347 return -ENOSPC; 2348 return 0; 2349 } 2350 2351 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 2352 struct bpf_prog *old_prog) 2353 { 2354 struct bpf_prog_array_item *item; 2355 2356 for (item = array->items; item->prog; item++) 2357 if (item->prog == old_prog) { 2358 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 2359 break; 2360 } 2361 } 2362 2363 /** 2364 * bpf_prog_array_delete_safe_at() - Replaces the program at the given 2365 * index into the program array with 2366 * a dummy no-op program. 2367 * @array: a bpf_prog_array 2368 * @index: the index of the program to replace 2369 * 2370 * Skips over dummy programs, by not counting them, when calculating 2371 * the position of the program to replace. 2372 * 2373 * Return: 2374 * * 0 - Success 2375 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2376 * * -ENOENT - Index out of range 2377 */ 2378 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) 2379 { 2380 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); 2381 } 2382 2383 /** 2384 * bpf_prog_array_update_at() - Updates the program at the given index 2385 * into the program array. 2386 * @array: a bpf_prog_array 2387 * @index: the index of the program to update 2388 * @prog: the program to insert into the array 2389 * 2390 * Skips over dummy programs, by not counting them, when calculating 2391 * the position of the program to update. 2392 * 2393 * Return: 2394 * * 0 - Success 2395 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2396 * * -ENOENT - Index out of range 2397 */ 2398 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 2399 struct bpf_prog *prog) 2400 { 2401 struct bpf_prog_array_item *item; 2402 2403 if (unlikely(index < 0)) 2404 return -EINVAL; 2405 2406 for (item = array->items; item->prog; item++) { 2407 if (item->prog == &dummy_bpf_prog.prog) 2408 continue; 2409 if (!index) { 2410 WRITE_ONCE(item->prog, prog); 2411 return 0; 2412 } 2413 index--; 2414 } 2415 return -ENOENT; 2416 } 2417 2418 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 2419 struct bpf_prog *exclude_prog, 2420 struct bpf_prog *include_prog, 2421 u64 bpf_cookie, 2422 struct bpf_prog_array **new_array) 2423 { 2424 int new_prog_cnt, carry_prog_cnt = 0; 2425 struct bpf_prog_array_item *existing, *new; 2426 struct bpf_prog_array *array; 2427 bool found_exclude = false; 2428 2429 /* Figure out how many existing progs we need to carry over to 2430 * the new array. 2431 */ 2432 if (old_array) { 2433 existing = old_array->items; 2434 for (; existing->prog; existing++) { 2435 if (existing->prog == exclude_prog) { 2436 found_exclude = true; 2437 continue; 2438 } 2439 if (existing->prog != &dummy_bpf_prog.prog) 2440 carry_prog_cnt++; 2441 if (existing->prog == include_prog) 2442 return -EEXIST; 2443 } 2444 } 2445 2446 if (exclude_prog && !found_exclude) 2447 return -ENOENT; 2448 2449 /* How many progs (not NULL) will be in the new array? */ 2450 new_prog_cnt = carry_prog_cnt; 2451 if (include_prog) 2452 new_prog_cnt += 1; 2453 2454 /* Do we have any prog (not NULL) in the new array? */ 2455 if (!new_prog_cnt) { 2456 *new_array = NULL; 2457 return 0; 2458 } 2459 2460 /* +1 as the end of prog_array is marked with NULL */ 2461 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 2462 if (!array) 2463 return -ENOMEM; 2464 new = array->items; 2465 2466 /* Fill in the new prog array */ 2467 if (carry_prog_cnt) { 2468 existing = old_array->items; 2469 for (; existing->prog; existing++) { 2470 if (existing->prog == exclude_prog || 2471 existing->prog == &dummy_bpf_prog.prog) 2472 continue; 2473 2474 new->prog = existing->prog; 2475 new->bpf_cookie = existing->bpf_cookie; 2476 new++; 2477 } 2478 } 2479 if (include_prog) { 2480 new->prog = include_prog; 2481 new->bpf_cookie = bpf_cookie; 2482 new++; 2483 } 2484 new->prog = NULL; 2485 *new_array = array; 2486 return 0; 2487 } 2488 2489 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2490 u32 *prog_ids, u32 request_cnt, 2491 u32 *prog_cnt) 2492 { 2493 u32 cnt = 0; 2494 2495 if (array) 2496 cnt = bpf_prog_array_length(array); 2497 2498 *prog_cnt = cnt; 2499 2500 /* return early if user requested only program count or nothing to copy */ 2501 if (!request_cnt || !cnt) 2502 return 0; 2503 2504 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2505 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2506 : 0; 2507 } 2508 2509 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2510 struct bpf_map **used_maps, u32 len) 2511 { 2512 struct bpf_map *map; 2513 u32 i; 2514 2515 for (i = 0; i < len; i++) { 2516 map = used_maps[i]; 2517 if (map->ops->map_poke_untrack) 2518 map->ops->map_poke_untrack(map, aux); 2519 bpf_map_put(map); 2520 } 2521 } 2522 2523 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2524 { 2525 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2526 kfree(aux->used_maps); 2527 } 2528 2529 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2530 struct btf_mod_pair *used_btfs, u32 len) 2531 { 2532 #ifdef CONFIG_BPF_SYSCALL 2533 struct btf_mod_pair *btf_mod; 2534 u32 i; 2535 2536 for (i = 0; i < len; i++) { 2537 btf_mod = &used_btfs[i]; 2538 if (btf_mod->module) 2539 module_put(btf_mod->module); 2540 btf_put(btf_mod->btf); 2541 } 2542 #endif 2543 } 2544 2545 static void bpf_free_used_btfs(struct bpf_prog_aux *aux) 2546 { 2547 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt); 2548 kfree(aux->used_btfs); 2549 } 2550 2551 static void bpf_prog_free_deferred(struct work_struct *work) 2552 { 2553 struct bpf_prog_aux *aux; 2554 int i; 2555 2556 aux = container_of(work, struct bpf_prog_aux, work); 2557 #ifdef CONFIG_BPF_SYSCALL 2558 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); 2559 #endif 2560 bpf_free_used_maps(aux); 2561 bpf_free_used_btfs(aux); 2562 if (bpf_prog_is_dev_bound(aux)) 2563 bpf_prog_offload_destroy(aux->prog); 2564 #ifdef CONFIG_PERF_EVENTS 2565 if (aux->prog->has_callchain_buf) 2566 put_callchain_buffers(); 2567 #endif 2568 if (aux->dst_trampoline) 2569 bpf_trampoline_put(aux->dst_trampoline); 2570 for (i = 0; i < aux->func_cnt; i++) { 2571 /* We can just unlink the subprog poke descriptor table as 2572 * it was originally linked to the main program and is also 2573 * released along with it. 2574 */ 2575 aux->func[i]->aux->poke_tab = NULL; 2576 bpf_jit_free(aux->func[i]); 2577 } 2578 if (aux->func_cnt) { 2579 kfree(aux->func); 2580 bpf_prog_unlock_free(aux->prog); 2581 } else { 2582 bpf_jit_free(aux->prog); 2583 } 2584 } 2585 2586 void bpf_prog_free(struct bpf_prog *fp) 2587 { 2588 struct bpf_prog_aux *aux = fp->aux; 2589 2590 if (aux->dst_prog) 2591 bpf_prog_put(aux->dst_prog); 2592 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2593 schedule_work(&aux->work); 2594 } 2595 EXPORT_SYMBOL_GPL(bpf_prog_free); 2596 2597 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 2598 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2599 2600 void bpf_user_rnd_init_once(void) 2601 { 2602 prandom_init_once(&bpf_user_rnd_state); 2603 } 2604 2605 BPF_CALL_0(bpf_user_rnd_u32) 2606 { 2607 /* Should someone ever have the rather unwise idea to use some 2608 * of the registers passed into this function, then note that 2609 * this function is called from native eBPF and classic-to-eBPF 2610 * transformations. Register assignments from both sides are 2611 * different, f.e. classic always sets fn(ctx, A, X) here. 2612 */ 2613 struct rnd_state *state; 2614 u32 res; 2615 2616 state = &get_cpu_var(bpf_user_rnd_state); 2617 res = prandom_u32_state(state); 2618 put_cpu_var(bpf_user_rnd_state); 2619 2620 return res; 2621 } 2622 2623 BPF_CALL_0(bpf_get_raw_cpu_id) 2624 { 2625 return raw_smp_processor_id(); 2626 } 2627 2628 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2629 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2630 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2631 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2632 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2633 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2634 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2635 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak; 2636 const struct bpf_func_proto bpf_spin_lock_proto __weak; 2637 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 2638 const struct bpf_func_proto bpf_jiffies64_proto __weak; 2639 2640 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2641 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2642 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2643 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2644 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; 2645 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak; 2646 2647 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 2648 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 2649 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 2650 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 2651 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak; 2652 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 2653 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak; 2654 const struct bpf_func_proto bpf_snprintf_btf_proto __weak; 2655 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak; 2656 2657 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 2658 { 2659 return NULL; 2660 } 2661 2662 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) 2663 { 2664 return NULL; 2665 } 2666 2667 u64 __weak 2668 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 2669 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 2670 { 2671 return -ENOTSUPP; 2672 } 2673 EXPORT_SYMBOL_GPL(bpf_event_output); 2674 2675 /* Always built-in helper functions. */ 2676 const struct bpf_func_proto bpf_tail_call_proto = { 2677 .func = NULL, 2678 .gpl_only = false, 2679 .ret_type = RET_VOID, 2680 .arg1_type = ARG_PTR_TO_CTX, 2681 .arg2_type = ARG_CONST_MAP_PTR, 2682 .arg3_type = ARG_ANYTHING, 2683 }; 2684 2685 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 2686 * It is encouraged to implement bpf_int_jit_compile() instead, so that 2687 * eBPF and implicitly also cBPF can get JITed! 2688 */ 2689 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 2690 { 2691 return prog; 2692 } 2693 2694 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 2695 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 2696 */ 2697 void __weak bpf_jit_compile(struct bpf_prog *prog) 2698 { 2699 } 2700 2701 bool __weak bpf_helper_changes_pkt_data(void *func) 2702 { 2703 return false; 2704 } 2705 2706 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 2707 * analysis code and wants explicit zero extension inserted by verifier. 2708 * Otherwise, return FALSE. 2709 * 2710 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if 2711 * you don't override this. JITs that don't want these extra insns can detect 2712 * them using insn_is_zext. 2713 */ 2714 bool __weak bpf_jit_needs_zext(void) 2715 { 2716 return false; 2717 } 2718 2719 bool __weak bpf_jit_supports_kfunc_call(void) 2720 { 2721 return false; 2722 } 2723 2724 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 2725 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 2726 */ 2727 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 2728 int len) 2729 { 2730 return -EFAULT; 2731 } 2732 2733 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2734 void *addr1, void *addr2) 2735 { 2736 return -ENOTSUPP; 2737 } 2738 2739 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len) 2740 { 2741 return ERR_PTR(-ENOTSUPP); 2742 } 2743 2744 int __weak bpf_arch_text_invalidate(void *dst, size_t len) 2745 { 2746 return -ENOTSUPP; 2747 } 2748 2749 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 2750 EXPORT_SYMBOL(bpf_stats_enabled_key); 2751 2752 /* All definitions of tracepoints related to BPF. */ 2753 #define CREATE_TRACE_POINTS 2754 #include <linux/bpf_trace.h> 2755 2756 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 2757 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 2758