1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <uapi/linux/btf.h> 21 #include <linux/filter.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/random.h> 25 #include <linux/moduleloader.h> 26 #include <linux/bpf.h> 27 #include <linux/btf.h> 28 #include <linux/objtool.h> 29 #include <linux/rbtree_latch.h> 30 #include <linux/kallsyms.h> 31 #include <linux/rcupdate.h> 32 #include <linux/perf_event.h> 33 #include <linux/extable.h> 34 #include <linux/log2.h> 35 #include <linux/bpf_verifier.h> 36 #include <linux/nodemask.h> 37 #include <linux/nospec.h> 38 #include <linux/bpf_mem_alloc.h> 39 #include <linux/memcontrol.h> 40 41 #include <asm/barrier.h> 42 #include <asm/unaligned.h> 43 44 /* Registers */ 45 #define BPF_R0 regs[BPF_REG_0] 46 #define BPF_R1 regs[BPF_REG_1] 47 #define BPF_R2 regs[BPF_REG_2] 48 #define BPF_R3 regs[BPF_REG_3] 49 #define BPF_R4 regs[BPF_REG_4] 50 #define BPF_R5 regs[BPF_REG_5] 51 #define BPF_R6 regs[BPF_REG_6] 52 #define BPF_R7 regs[BPF_REG_7] 53 #define BPF_R8 regs[BPF_REG_8] 54 #define BPF_R9 regs[BPF_REG_9] 55 #define BPF_R10 regs[BPF_REG_10] 56 57 /* Named registers */ 58 #define DST regs[insn->dst_reg] 59 #define SRC regs[insn->src_reg] 60 #define FP regs[BPF_REG_FP] 61 #define AX regs[BPF_REG_AX] 62 #define ARG1 regs[BPF_REG_ARG1] 63 #define CTX regs[BPF_REG_CTX] 64 #define OFF insn->off 65 #define IMM insn->imm 66 67 struct bpf_mem_alloc bpf_global_ma; 68 bool bpf_global_ma_set; 69 70 /* No hurry in this branch 71 * 72 * Exported for the bpf jit load helper. 73 */ 74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 75 { 76 u8 *ptr = NULL; 77 78 if (k >= SKF_NET_OFF) { 79 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 80 } else if (k >= SKF_LL_OFF) { 81 if (unlikely(!skb_mac_header_was_set(skb))) 82 return NULL; 83 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 84 } 85 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 86 return ptr; 87 88 return NULL; 89 } 90 91 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) 92 { 93 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 94 struct bpf_prog_aux *aux; 95 struct bpf_prog *fp; 96 97 size = round_up(size, PAGE_SIZE); 98 fp = __vmalloc(size, gfp_flags); 99 if (fp == NULL) 100 return NULL; 101 102 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 103 if (aux == NULL) { 104 vfree(fp); 105 return NULL; 106 } 107 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); 108 if (!fp->active) { 109 vfree(fp); 110 kfree(aux); 111 return NULL; 112 } 113 114 fp->pages = size / PAGE_SIZE; 115 fp->aux = aux; 116 fp->aux->prog = fp; 117 fp->jit_requested = ebpf_jit_enabled(); 118 fp->blinding_requested = bpf_jit_blinding_enabled(fp); 119 #ifdef CONFIG_CGROUP_BPF 120 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID; 121 #endif 122 123 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); 124 mutex_init(&fp->aux->used_maps_mutex); 125 mutex_init(&fp->aux->dst_mutex); 126 127 return fp; 128 } 129 130 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 131 { 132 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 133 struct bpf_prog *prog; 134 int cpu; 135 136 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); 137 if (!prog) 138 return NULL; 139 140 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); 141 if (!prog->stats) { 142 free_percpu(prog->active); 143 kfree(prog->aux); 144 vfree(prog); 145 return NULL; 146 } 147 148 for_each_possible_cpu(cpu) { 149 struct bpf_prog_stats *pstats; 150 151 pstats = per_cpu_ptr(prog->stats, cpu); 152 u64_stats_init(&pstats->syncp); 153 } 154 return prog; 155 } 156 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 157 158 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) 159 { 160 if (!prog->aux->nr_linfo || !prog->jit_requested) 161 return 0; 162 163 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo, 164 sizeof(*prog->aux->jited_linfo), 165 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN)); 166 if (!prog->aux->jited_linfo) 167 return -ENOMEM; 168 169 return 0; 170 } 171 172 void bpf_prog_jit_attempt_done(struct bpf_prog *prog) 173 { 174 if (prog->aux->jited_linfo && 175 (!prog->jited || !prog->aux->jited_linfo[0])) { 176 kvfree(prog->aux->jited_linfo); 177 prog->aux->jited_linfo = NULL; 178 } 179 180 kfree(prog->aux->kfunc_tab); 181 prog->aux->kfunc_tab = NULL; 182 } 183 184 /* The jit engine is responsible to provide an array 185 * for insn_off to the jited_off mapping (insn_to_jit_off). 186 * 187 * The idx to this array is the insn_off. Hence, the insn_off 188 * here is relative to the prog itself instead of the main prog. 189 * This array has one entry for each xlated bpf insn. 190 * 191 * jited_off is the byte off to the end of the jited insn. 192 * 193 * Hence, with 194 * insn_start: 195 * The first bpf insn off of the prog. The insn off 196 * here is relative to the main prog. 197 * e.g. if prog is a subprog, insn_start > 0 198 * linfo_idx: 199 * The prog's idx to prog->aux->linfo and jited_linfo 200 * 201 * jited_linfo[linfo_idx] = prog->bpf_func 202 * 203 * For i > linfo_idx, 204 * 205 * jited_linfo[i] = prog->bpf_func + 206 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] 207 */ 208 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 209 const u32 *insn_to_jit_off) 210 { 211 u32 linfo_idx, insn_start, insn_end, nr_linfo, i; 212 const struct bpf_line_info *linfo; 213 void **jited_linfo; 214 215 if (!prog->aux->jited_linfo) 216 /* Userspace did not provide linfo */ 217 return; 218 219 linfo_idx = prog->aux->linfo_idx; 220 linfo = &prog->aux->linfo[linfo_idx]; 221 insn_start = linfo[0].insn_off; 222 insn_end = insn_start + prog->len; 223 224 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; 225 jited_linfo[0] = prog->bpf_func; 226 227 nr_linfo = prog->aux->nr_linfo - linfo_idx; 228 229 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) 230 /* The verifier ensures that linfo[i].insn_off is 231 * strictly increasing 232 */ 233 jited_linfo[i] = prog->bpf_func + 234 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; 235 } 236 237 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 238 gfp_t gfp_extra_flags) 239 { 240 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); 241 struct bpf_prog *fp; 242 u32 pages; 243 244 size = round_up(size, PAGE_SIZE); 245 pages = size / PAGE_SIZE; 246 if (pages <= fp_old->pages) 247 return fp_old; 248 249 fp = __vmalloc(size, gfp_flags); 250 if (fp) { 251 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 252 fp->pages = pages; 253 fp->aux->prog = fp; 254 255 /* We keep fp->aux from fp_old around in the new 256 * reallocated structure. 257 */ 258 fp_old->aux = NULL; 259 fp_old->stats = NULL; 260 fp_old->active = NULL; 261 __bpf_prog_free(fp_old); 262 } 263 264 return fp; 265 } 266 267 void __bpf_prog_free(struct bpf_prog *fp) 268 { 269 if (fp->aux) { 270 mutex_destroy(&fp->aux->used_maps_mutex); 271 mutex_destroy(&fp->aux->dst_mutex); 272 kfree(fp->aux->poke_tab); 273 kfree(fp->aux); 274 } 275 free_percpu(fp->stats); 276 free_percpu(fp->active); 277 vfree(fp); 278 } 279 280 int bpf_prog_calc_tag(struct bpf_prog *fp) 281 { 282 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); 283 u32 raw_size = bpf_prog_tag_scratch_size(fp); 284 u32 digest[SHA1_DIGEST_WORDS]; 285 u32 ws[SHA1_WORKSPACE_WORDS]; 286 u32 i, bsize, psize, blocks; 287 struct bpf_insn *dst; 288 bool was_ld_map; 289 u8 *raw, *todo; 290 __be32 *result; 291 __be64 *bits; 292 293 raw = vmalloc(raw_size); 294 if (!raw) 295 return -ENOMEM; 296 297 sha1_init(digest); 298 memset(ws, 0, sizeof(ws)); 299 300 /* We need to take out the map fd for the digest calculation 301 * since they are unstable from user space side. 302 */ 303 dst = (void *)raw; 304 for (i = 0, was_ld_map = false; i < fp->len; i++) { 305 dst[i] = fp->insnsi[i]; 306 if (!was_ld_map && 307 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 308 (dst[i].src_reg == BPF_PSEUDO_MAP_FD || 309 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { 310 was_ld_map = true; 311 dst[i].imm = 0; 312 } else if (was_ld_map && 313 dst[i].code == 0 && 314 dst[i].dst_reg == 0 && 315 dst[i].src_reg == 0 && 316 dst[i].off == 0) { 317 was_ld_map = false; 318 dst[i].imm = 0; 319 } else { 320 was_ld_map = false; 321 } 322 } 323 324 psize = bpf_prog_insn_size(fp); 325 memset(&raw[psize], 0, raw_size - psize); 326 raw[psize++] = 0x80; 327 328 bsize = round_up(psize, SHA1_BLOCK_SIZE); 329 blocks = bsize / SHA1_BLOCK_SIZE; 330 todo = raw; 331 if (bsize - psize >= sizeof(__be64)) { 332 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 333 } else { 334 bits = (__be64 *)(todo + bsize + bits_offset); 335 blocks++; 336 } 337 *bits = cpu_to_be64((psize - 1) << 3); 338 339 while (blocks--) { 340 sha1_transform(digest, todo, ws); 341 todo += SHA1_BLOCK_SIZE; 342 } 343 344 result = (__force __be32 *)digest; 345 for (i = 0; i < SHA1_DIGEST_WORDS; i++) 346 result[i] = cpu_to_be32(digest[i]); 347 memcpy(fp->tag, result, sizeof(fp->tag)); 348 349 vfree(raw); 350 return 0; 351 } 352 353 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, 354 s32 end_new, s32 curr, const bool probe_pass) 355 { 356 const s64 imm_min = S32_MIN, imm_max = S32_MAX; 357 s32 delta = end_new - end_old; 358 s64 imm = insn->imm; 359 360 if (curr < pos && curr + imm + 1 >= end_old) 361 imm += delta; 362 else if (curr >= end_new && curr + imm + 1 < end_new) 363 imm -= delta; 364 if (imm < imm_min || imm > imm_max) 365 return -ERANGE; 366 if (!probe_pass) 367 insn->imm = imm; 368 return 0; 369 } 370 371 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, 372 s32 end_new, s32 curr, const bool probe_pass) 373 { 374 const s32 off_min = S16_MIN, off_max = S16_MAX; 375 s32 delta = end_new - end_old; 376 s32 off = insn->off; 377 378 if (curr < pos && curr + off + 1 >= end_old) 379 off += delta; 380 else if (curr >= end_new && curr + off + 1 < end_new) 381 off -= delta; 382 if (off < off_min || off > off_max) 383 return -ERANGE; 384 if (!probe_pass) 385 insn->off = off; 386 return 0; 387 } 388 389 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, 390 s32 end_new, const bool probe_pass) 391 { 392 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); 393 struct bpf_insn *insn = prog->insnsi; 394 int ret = 0; 395 396 for (i = 0; i < insn_cnt; i++, insn++) { 397 u8 code; 398 399 /* In the probing pass we still operate on the original, 400 * unpatched image in order to check overflows before we 401 * do any other adjustments. Therefore skip the patchlet. 402 */ 403 if (probe_pass && i == pos) { 404 i = end_new; 405 insn = prog->insnsi + end_old; 406 } 407 if (bpf_pseudo_func(insn)) { 408 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 409 end_new, i, probe_pass); 410 if (ret) 411 return ret; 412 continue; 413 } 414 code = insn->code; 415 if ((BPF_CLASS(code) != BPF_JMP && 416 BPF_CLASS(code) != BPF_JMP32) || 417 BPF_OP(code) == BPF_EXIT) 418 continue; 419 /* Adjust offset of jmps if we cross patch boundaries. */ 420 if (BPF_OP(code) == BPF_CALL) { 421 if (insn->src_reg != BPF_PSEUDO_CALL) 422 continue; 423 ret = bpf_adj_delta_to_imm(insn, pos, end_old, 424 end_new, i, probe_pass); 425 } else { 426 ret = bpf_adj_delta_to_off(insn, pos, end_old, 427 end_new, i, probe_pass); 428 } 429 if (ret) 430 break; 431 } 432 433 return ret; 434 } 435 436 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) 437 { 438 struct bpf_line_info *linfo; 439 u32 i, nr_linfo; 440 441 nr_linfo = prog->aux->nr_linfo; 442 if (!nr_linfo || !delta) 443 return; 444 445 linfo = prog->aux->linfo; 446 447 for (i = 0; i < nr_linfo; i++) 448 if (off < linfo[i].insn_off) 449 break; 450 451 /* Push all off < linfo[i].insn_off by delta */ 452 for (; i < nr_linfo; i++) 453 linfo[i].insn_off += delta; 454 } 455 456 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 457 const struct bpf_insn *patch, u32 len) 458 { 459 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 460 const u32 cnt_max = S16_MAX; 461 struct bpf_prog *prog_adj; 462 int err; 463 464 /* Since our patchlet doesn't expand the image, we're done. */ 465 if (insn_delta == 0) { 466 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 467 return prog; 468 } 469 470 insn_adj_cnt = prog->len + insn_delta; 471 472 /* Reject anything that would potentially let the insn->off 473 * target overflow when we have excessive program expansions. 474 * We need to probe here before we do any reallocation where 475 * we afterwards may not fail anymore. 476 */ 477 if (insn_adj_cnt > cnt_max && 478 (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) 479 return ERR_PTR(err); 480 481 /* Several new instructions need to be inserted. Make room 482 * for them. Likely, there's no need for a new allocation as 483 * last page could have large enough tailroom. 484 */ 485 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 486 GFP_USER); 487 if (!prog_adj) 488 return ERR_PTR(-ENOMEM); 489 490 prog_adj->len = insn_adj_cnt; 491 492 /* Patching happens in 3 steps: 493 * 494 * 1) Move over tail of insnsi from next instruction onwards, 495 * so we can patch the single target insn with one or more 496 * new ones (patching is always from 1 to n insns, n > 0). 497 * 2) Inject new instructions at the target location. 498 * 3) Adjust branch offsets if necessary. 499 */ 500 insn_rest = insn_adj_cnt - off - len; 501 502 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 503 sizeof(*patch) * insn_rest); 504 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 505 506 /* We are guaranteed to not fail at this point, otherwise 507 * the ship has sailed to reverse to the original state. An 508 * overflow cannot happen at this point. 509 */ 510 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); 511 512 bpf_adj_linfo(prog_adj, off, insn_delta); 513 514 return prog_adj; 515 } 516 517 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) 518 { 519 /* Branch offsets can't overflow when program is shrinking, no need 520 * to call bpf_adj_branches(..., true) here 521 */ 522 memmove(prog->insnsi + off, prog->insnsi + off + cnt, 523 sizeof(struct bpf_insn) * (prog->len - off - cnt)); 524 prog->len -= cnt; 525 526 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); 527 } 528 529 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 530 { 531 int i; 532 533 for (i = 0; i < fp->aux->func_cnt; i++) 534 bpf_prog_kallsyms_del(fp->aux->func[i]); 535 } 536 537 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) 538 { 539 bpf_prog_kallsyms_del_subprogs(fp); 540 bpf_prog_kallsyms_del(fp); 541 } 542 543 #ifdef CONFIG_BPF_JIT 544 /* All BPF JIT sysctl knobs here. */ 545 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 546 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); 547 int bpf_jit_harden __read_mostly; 548 long bpf_jit_limit __read_mostly; 549 long bpf_jit_limit_max __read_mostly; 550 551 static void 552 bpf_prog_ksym_set_addr(struct bpf_prog *prog) 553 { 554 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 555 556 prog->aux->ksym.start = (unsigned long) prog->bpf_func; 557 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len; 558 } 559 560 static void 561 bpf_prog_ksym_set_name(struct bpf_prog *prog) 562 { 563 char *sym = prog->aux->ksym.name; 564 const char *end = sym + KSYM_NAME_LEN; 565 const struct btf_type *type; 566 const char *func_name; 567 568 BUILD_BUG_ON(sizeof("bpf_prog_") + 569 sizeof(prog->tag) * 2 + 570 /* name has been null terminated. 571 * We should need +1 for the '_' preceding 572 * the name. However, the null character 573 * is double counted between the name and the 574 * sizeof("bpf_prog_") above, so we omit 575 * the +1 here. 576 */ 577 sizeof(prog->aux->name) > KSYM_NAME_LEN); 578 579 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 580 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 581 582 /* prog->aux->name will be ignored if full btf name is available */ 583 if (prog->aux->func_info_cnt) { 584 type = btf_type_by_id(prog->aux->btf, 585 prog->aux->func_info[prog->aux->func_idx].type_id); 586 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); 587 snprintf(sym, (size_t)(end - sym), "_%s", func_name); 588 return; 589 } 590 591 if (prog->aux->name[0]) 592 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); 593 else 594 *sym = 0; 595 } 596 597 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n) 598 { 599 return container_of(n, struct bpf_ksym, tnode)->start; 600 } 601 602 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 603 struct latch_tree_node *b) 604 { 605 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); 606 } 607 608 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 609 { 610 unsigned long val = (unsigned long)key; 611 const struct bpf_ksym *ksym; 612 613 ksym = container_of(n, struct bpf_ksym, tnode); 614 615 if (val < ksym->start) 616 return -1; 617 if (val >= ksym->end) 618 return 1; 619 620 return 0; 621 } 622 623 static const struct latch_tree_ops bpf_tree_ops = { 624 .less = bpf_tree_less, 625 .comp = bpf_tree_comp, 626 }; 627 628 static DEFINE_SPINLOCK(bpf_lock); 629 static LIST_HEAD(bpf_kallsyms); 630 static struct latch_tree_root bpf_tree __cacheline_aligned; 631 632 void bpf_ksym_add(struct bpf_ksym *ksym) 633 { 634 spin_lock_bh(&bpf_lock); 635 WARN_ON_ONCE(!list_empty(&ksym->lnode)); 636 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); 637 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 638 spin_unlock_bh(&bpf_lock); 639 } 640 641 static void __bpf_ksym_del(struct bpf_ksym *ksym) 642 { 643 if (list_empty(&ksym->lnode)) 644 return; 645 646 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); 647 list_del_rcu(&ksym->lnode); 648 } 649 650 void bpf_ksym_del(struct bpf_ksym *ksym) 651 { 652 spin_lock_bh(&bpf_lock); 653 __bpf_ksym_del(ksym); 654 spin_unlock_bh(&bpf_lock); 655 } 656 657 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 658 { 659 return fp->jited && !bpf_prog_was_classic(fp); 660 } 661 662 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 663 { 664 if (!bpf_prog_kallsyms_candidate(fp) || 665 !bpf_capable()) 666 return; 667 668 bpf_prog_ksym_set_addr(fp); 669 bpf_prog_ksym_set_name(fp); 670 fp->aux->ksym.prog = true; 671 672 bpf_ksym_add(&fp->aux->ksym); 673 } 674 675 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 676 { 677 if (!bpf_prog_kallsyms_candidate(fp)) 678 return; 679 680 bpf_ksym_del(&fp->aux->ksym); 681 } 682 683 static struct bpf_ksym *bpf_ksym_find(unsigned long addr) 684 { 685 struct latch_tree_node *n; 686 687 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 688 return n ? container_of(n, struct bpf_ksym, tnode) : NULL; 689 } 690 691 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 692 unsigned long *off, char *sym) 693 { 694 struct bpf_ksym *ksym; 695 char *ret = NULL; 696 697 rcu_read_lock(); 698 ksym = bpf_ksym_find(addr); 699 if (ksym) { 700 unsigned long symbol_start = ksym->start; 701 unsigned long symbol_end = ksym->end; 702 703 strncpy(sym, ksym->name, KSYM_NAME_LEN); 704 705 ret = sym; 706 if (size) 707 *size = symbol_end - symbol_start; 708 if (off) 709 *off = addr - symbol_start; 710 } 711 rcu_read_unlock(); 712 713 return ret; 714 } 715 716 bool is_bpf_text_address(unsigned long addr) 717 { 718 bool ret; 719 720 rcu_read_lock(); 721 ret = bpf_ksym_find(addr) != NULL; 722 rcu_read_unlock(); 723 724 return ret; 725 } 726 727 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) 728 { 729 struct bpf_ksym *ksym = bpf_ksym_find(addr); 730 731 return ksym && ksym->prog ? 732 container_of(ksym, struct bpf_prog_aux, ksym)->prog : 733 NULL; 734 } 735 736 const struct exception_table_entry *search_bpf_extables(unsigned long addr) 737 { 738 const struct exception_table_entry *e = NULL; 739 struct bpf_prog *prog; 740 741 rcu_read_lock(); 742 prog = bpf_prog_ksym_find(addr); 743 if (!prog) 744 goto out; 745 if (!prog->aux->num_exentries) 746 goto out; 747 748 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); 749 out: 750 rcu_read_unlock(); 751 return e; 752 } 753 754 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 755 char *sym) 756 { 757 struct bpf_ksym *ksym; 758 unsigned int it = 0; 759 int ret = -ERANGE; 760 761 if (!bpf_jit_kallsyms_enabled()) 762 return ret; 763 764 rcu_read_lock(); 765 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { 766 if (it++ != symnum) 767 continue; 768 769 strncpy(sym, ksym->name, KSYM_NAME_LEN); 770 771 *value = ksym->start; 772 *type = BPF_SYM_ELF_TYPE; 773 774 ret = 0; 775 break; 776 } 777 rcu_read_unlock(); 778 779 return ret; 780 } 781 782 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 783 struct bpf_jit_poke_descriptor *poke) 784 { 785 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 786 static const u32 poke_tab_max = 1024; 787 u32 slot = prog->aux->size_poke_tab; 788 u32 size = slot + 1; 789 790 if (size > poke_tab_max) 791 return -ENOSPC; 792 if (poke->tailcall_target || poke->tailcall_target_stable || 793 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) 794 return -EINVAL; 795 796 switch (poke->reason) { 797 case BPF_POKE_REASON_TAIL_CALL: 798 if (!poke->tail_call.map) 799 return -EINVAL; 800 break; 801 default: 802 return -EINVAL; 803 } 804 805 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL); 806 if (!tab) 807 return -ENOMEM; 808 809 memcpy(&tab[slot], poke, sizeof(*poke)); 810 prog->aux->size_poke_tab = size; 811 prog->aux->poke_tab = tab; 812 813 return slot; 814 } 815 816 /* 817 * BPF program pack allocator. 818 * 819 * Most BPF programs are pretty small. Allocating a hole page for each 820 * program is sometime a waste. Many small bpf program also adds pressure 821 * to instruction TLB. To solve this issue, we introduce a BPF program pack 822 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) 823 * to host BPF programs. 824 */ 825 #define BPF_PROG_CHUNK_SHIFT 6 826 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) 827 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) 828 829 struct bpf_prog_pack { 830 struct list_head list; 831 void *ptr; 832 unsigned long bitmap[]; 833 }; 834 835 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size) 836 { 837 memset(area, 0, size); 838 } 839 840 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) 841 842 static DEFINE_MUTEX(pack_mutex); 843 static LIST_HEAD(pack_list); 844 845 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with 846 * CONFIG_MMU=n. Use PAGE_SIZE in these cases. 847 */ 848 #ifdef PMD_SIZE 849 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes()) 850 #else 851 #define BPF_PROG_PACK_SIZE PAGE_SIZE 852 #endif 853 854 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) 855 856 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) 857 { 858 struct bpf_prog_pack *pack; 859 860 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), 861 GFP_KERNEL); 862 if (!pack) 863 return NULL; 864 pack->ptr = module_alloc(BPF_PROG_PACK_SIZE); 865 if (!pack->ptr) { 866 kfree(pack); 867 return NULL; 868 } 869 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); 870 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); 871 list_add_tail(&pack->list, &pack_list); 872 873 set_vm_flush_reset_perms(pack->ptr); 874 set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); 875 return pack; 876 } 877 878 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) 879 { 880 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); 881 struct bpf_prog_pack *pack; 882 unsigned long pos; 883 void *ptr = NULL; 884 885 mutex_lock(&pack_mutex); 886 if (size > BPF_PROG_PACK_SIZE) { 887 size = round_up(size, PAGE_SIZE); 888 ptr = module_alloc(size); 889 if (ptr) { 890 bpf_fill_ill_insns(ptr, size); 891 set_vm_flush_reset_perms(ptr); 892 set_memory_rox((unsigned long)ptr, size / PAGE_SIZE); 893 } 894 goto out; 895 } 896 list_for_each_entry(pack, &pack_list, list) { 897 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 898 nbits, 0); 899 if (pos < BPF_PROG_CHUNK_COUNT) 900 goto found_free_area; 901 } 902 903 pack = alloc_new_pack(bpf_fill_ill_insns); 904 if (!pack) 905 goto out; 906 907 pos = 0; 908 909 found_free_area: 910 bitmap_set(pack->bitmap, pos, nbits); 911 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT); 912 913 out: 914 mutex_unlock(&pack_mutex); 915 return ptr; 916 } 917 918 void bpf_prog_pack_free(struct bpf_binary_header *hdr) 919 { 920 struct bpf_prog_pack *pack = NULL, *tmp; 921 unsigned int nbits; 922 unsigned long pos; 923 924 mutex_lock(&pack_mutex); 925 if (hdr->size > BPF_PROG_PACK_SIZE) { 926 module_memfree(hdr); 927 goto out; 928 } 929 930 list_for_each_entry(tmp, &pack_list, list) { 931 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) { 932 pack = tmp; 933 break; 934 } 935 } 936 937 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n")) 938 goto out; 939 940 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size); 941 pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT; 942 943 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size), 944 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n"); 945 946 bitmap_clear(pack->bitmap, pos, nbits); 947 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, 948 BPF_PROG_CHUNK_COUNT, 0) == 0) { 949 list_del(&pack->list); 950 module_memfree(pack->ptr); 951 kfree(pack); 952 } 953 out: 954 mutex_unlock(&pack_mutex); 955 } 956 957 static atomic_long_t bpf_jit_current; 958 959 /* Can be overridden by an arch's JIT compiler if it has a custom, 960 * dedicated BPF backend memory area, or if neither of the two 961 * below apply. 962 */ 963 u64 __weak bpf_jit_alloc_exec_limit(void) 964 { 965 #if defined(MODULES_VADDR) 966 return MODULES_END - MODULES_VADDR; 967 #else 968 return VMALLOC_END - VMALLOC_START; 969 #endif 970 } 971 972 static int __init bpf_jit_charge_init(void) 973 { 974 /* Only used as heuristic here to derive limit. */ 975 bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); 976 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, 977 PAGE_SIZE), LONG_MAX); 978 return 0; 979 } 980 pure_initcall(bpf_jit_charge_init); 981 982 int bpf_jit_charge_modmem(u32 size) 983 { 984 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) { 985 if (!bpf_capable()) { 986 atomic_long_sub(size, &bpf_jit_current); 987 return -EPERM; 988 } 989 } 990 991 return 0; 992 } 993 994 void bpf_jit_uncharge_modmem(u32 size) 995 { 996 atomic_long_sub(size, &bpf_jit_current); 997 } 998 999 void *__weak bpf_jit_alloc_exec(unsigned long size) 1000 { 1001 return module_alloc(size); 1002 } 1003 1004 void __weak bpf_jit_free_exec(void *addr) 1005 { 1006 module_memfree(addr); 1007 } 1008 1009 struct bpf_binary_header * 1010 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 1011 unsigned int alignment, 1012 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1013 { 1014 struct bpf_binary_header *hdr; 1015 u32 size, hole, start; 1016 1017 WARN_ON_ONCE(!is_power_of_2(alignment) || 1018 alignment > BPF_IMAGE_ALIGNMENT); 1019 1020 /* Most of BPF filters are really small, but if some of them 1021 * fill a page, allow at least 128 extra bytes to insert a 1022 * random section of illegal instructions. 1023 */ 1024 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 1025 1026 if (bpf_jit_charge_modmem(size)) 1027 return NULL; 1028 hdr = bpf_jit_alloc_exec(size); 1029 if (!hdr) { 1030 bpf_jit_uncharge_modmem(size); 1031 return NULL; 1032 } 1033 1034 /* Fill space with illegal/arch-dep instructions. */ 1035 bpf_fill_ill_insns(hdr, size); 1036 1037 hdr->size = size; 1038 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 1039 PAGE_SIZE - sizeof(*hdr)); 1040 start = get_random_u32_below(hole) & ~(alignment - 1); 1041 1042 /* Leave a random number of instructions before BPF code. */ 1043 *image_ptr = &hdr->image[start]; 1044 1045 return hdr; 1046 } 1047 1048 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 1049 { 1050 u32 size = hdr->size; 1051 1052 bpf_jit_free_exec(hdr); 1053 bpf_jit_uncharge_modmem(size); 1054 } 1055 1056 /* Allocate jit binary from bpf_prog_pack allocator. 1057 * Since the allocated memory is RO+X, the JIT engine cannot write directly 1058 * to the memory. To solve this problem, a RW buffer is also allocated at 1059 * as the same time. The JIT engine should calculate offsets based on the 1060 * RO memory address, but write JITed program to the RW buffer. Once the 1061 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies 1062 * the JITed program to the RO memory. 1063 */ 1064 struct bpf_binary_header * 1065 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, 1066 unsigned int alignment, 1067 struct bpf_binary_header **rw_header, 1068 u8 **rw_image, 1069 bpf_jit_fill_hole_t bpf_fill_ill_insns) 1070 { 1071 struct bpf_binary_header *ro_header; 1072 u32 size, hole, start; 1073 1074 WARN_ON_ONCE(!is_power_of_2(alignment) || 1075 alignment > BPF_IMAGE_ALIGNMENT); 1076 1077 /* add 16 bytes for a random section of illegal instructions */ 1078 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE); 1079 1080 if (bpf_jit_charge_modmem(size)) 1081 return NULL; 1082 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns); 1083 if (!ro_header) { 1084 bpf_jit_uncharge_modmem(size); 1085 return NULL; 1086 } 1087 1088 *rw_header = kvmalloc(size, GFP_KERNEL); 1089 if (!*rw_header) { 1090 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size)); 1091 bpf_prog_pack_free(ro_header); 1092 bpf_jit_uncharge_modmem(size); 1093 return NULL; 1094 } 1095 1096 /* Fill space with illegal/arch-dep instructions. */ 1097 bpf_fill_ill_insns(*rw_header, size); 1098 (*rw_header)->size = size; 1099 1100 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), 1101 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); 1102 start = get_random_u32_below(hole) & ~(alignment - 1); 1103 1104 *image_ptr = &ro_header->image[start]; 1105 *rw_image = &(*rw_header)->image[start]; 1106 1107 return ro_header; 1108 } 1109 1110 /* Copy JITed text from rw_header to its final location, the ro_header. */ 1111 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, 1112 struct bpf_binary_header *ro_header, 1113 struct bpf_binary_header *rw_header) 1114 { 1115 void *ptr; 1116 1117 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size); 1118 1119 kvfree(rw_header); 1120 1121 if (IS_ERR(ptr)) { 1122 bpf_prog_pack_free(ro_header); 1123 return PTR_ERR(ptr); 1124 } 1125 return 0; 1126 } 1127 1128 /* bpf_jit_binary_pack_free is called in two different scenarios: 1129 * 1) when the program is freed after; 1130 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize). 1131 * For case 2), we need to free both the RO memory and the RW buffer. 1132 * 1133 * bpf_jit_binary_pack_free requires proper ro_header->size. However, 1134 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size 1135 * must be set with either bpf_jit_binary_pack_finalize (normal path) or 1136 * bpf_arch_text_copy (when jit fails). 1137 */ 1138 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, 1139 struct bpf_binary_header *rw_header) 1140 { 1141 u32 size = ro_header->size; 1142 1143 bpf_prog_pack_free(ro_header); 1144 kvfree(rw_header); 1145 bpf_jit_uncharge_modmem(size); 1146 } 1147 1148 struct bpf_binary_header * 1149 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp) 1150 { 1151 unsigned long real_start = (unsigned long)fp->bpf_func; 1152 unsigned long addr; 1153 1154 addr = real_start & BPF_PROG_CHUNK_MASK; 1155 return (void *)addr; 1156 } 1157 1158 static inline struct bpf_binary_header * 1159 bpf_jit_binary_hdr(const struct bpf_prog *fp) 1160 { 1161 unsigned long real_start = (unsigned long)fp->bpf_func; 1162 unsigned long addr; 1163 1164 addr = real_start & PAGE_MASK; 1165 return (void *)addr; 1166 } 1167 1168 /* This symbol is only overridden by archs that have different 1169 * requirements than the usual eBPF JITs, f.e. when they only 1170 * implement cBPF JIT, do not set images read-only, etc. 1171 */ 1172 void __weak bpf_jit_free(struct bpf_prog *fp) 1173 { 1174 if (fp->jited) { 1175 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 1176 1177 bpf_jit_binary_free(hdr); 1178 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 1179 } 1180 1181 bpf_prog_unlock_free(fp); 1182 } 1183 1184 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 1185 const struct bpf_insn *insn, bool extra_pass, 1186 u64 *func_addr, bool *func_addr_fixed) 1187 { 1188 s16 off = insn->off; 1189 s32 imm = insn->imm; 1190 u8 *addr; 1191 int err; 1192 1193 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; 1194 if (!*func_addr_fixed) { 1195 /* Place-holder address till the last pass has collected 1196 * all addresses for JITed subprograms in which case we 1197 * can pick them up from prog->aux. 1198 */ 1199 if (!extra_pass) 1200 addr = NULL; 1201 else if (prog->aux->func && 1202 off >= 0 && off < prog->aux->func_cnt) 1203 addr = (u8 *)prog->aux->func[off]->bpf_func; 1204 else 1205 return -EINVAL; 1206 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && 1207 bpf_jit_supports_far_kfunc_call()) { 1208 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr); 1209 if (err) 1210 return err; 1211 } else { 1212 /* Address of a BPF helper call. Since part of the core 1213 * kernel, it's always at a fixed location. __bpf_call_base 1214 * and the helper with imm relative to it are both in core 1215 * kernel. 1216 */ 1217 addr = (u8 *)__bpf_call_base + imm; 1218 } 1219 1220 *func_addr = (unsigned long)addr; 1221 return 0; 1222 } 1223 1224 static int bpf_jit_blind_insn(const struct bpf_insn *from, 1225 const struct bpf_insn *aux, 1226 struct bpf_insn *to_buff, 1227 bool emit_zext) 1228 { 1229 struct bpf_insn *to = to_buff; 1230 u32 imm_rnd = get_random_u32(); 1231 s16 off; 1232 1233 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 1234 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 1235 1236 /* Constraints on AX register: 1237 * 1238 * AX register is inaccessible from user space. It is mapped in 1239 * all JITs, and used here for constant blinding rewrites. It is 1240 * typically "stateless" meaning its contents are only valid within 1241 * the executed instruction, but not across several instructions. 1242 * There are a few exceptions however which are further detailed 1243 * below. 1244 * 1245 * Constant blinding is only used by JITs, not in the interpreter. 1246 * The interpreter uses AX in some occasions as a local temporary 1247 * register e.g. in DIV or MOD instructions. 1248 * 1249 * In restricted circumstances, the verifier can also use the AX 1250 * register for rewrites as long as they do not interfere with 1251 * the above cases! 1252 */ 1253 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) 1254 goto out; 1255 1256 if (from->imm == 0 && 1257 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 1258 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 1259 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 1260 goto out; 1261 } 1262 1263 switch (from->code) { 1264 case BPF_ALU | BPF_ADD | BPF_K: 1265 case BPF_ALU | BPF_SUB | BPF_K: 1266 case BPF_ALU | BPF_AND | BPF_K: 1267 case BPF_ALU | BPF_OR | BPF_K: 1268 case BPF_ALU | BPF_XOR | BPF_K: 1269 case BPF_ALU | BPF_MUL | BPF_K: 1270 case BPF_ALU | BPF_MOV | BPF_K: 1271 case BPF_ALU | BPF_DIV | BPF_K: 1272 case BPF_ALU | BPF_MOD | BPF_K: 1273 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1274 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1275 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 1276 break; 1277 1278 case BPF_ALU64 | BPF_ADD | BPF_K: 1279 case BPF_ALU64 | BPF_SUB | BPF_K: 1280 case BPF_ALU64 | BPF_AND | BPF_K: 1281 case BPF_ALU64 | BPF_OR | BPF_K: 1282 case BPF_ALU64 | BPF_XOR | BPF_K: 1283 case BPF_ALU64 | BPF_MUL | BPF_K: 1284 case BPF_ALU64 | BPF_MOV | BPF_K: 1285 case BPF_ALU64 | BPF_DIV | BPF_K: 1286 case BPF_ALU64 | BPF_MOD | BPF_K: 1287 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1288 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1289 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 1290 break; 1291 1292 case BPF_JMP | BPF_JEQ | BPF_K: 1293 case BPF_JMP | BPF_JNE | BPF_K: 1294 case BPF_JMP | BPF_JGT | BPF_K: 1295 case BPF_JMP | BPF_JLT | BPF_K: 1296 case BPF_JMP | BPF_JGE | BPF_K: 1297 case BPF_JMP | BPF_JLE | BPF_K: 1298 case BPF_JMP | BPF_JSGT | BPF_K: 1299 case BPF_JMP | BPF_JSLT | BPF_K: 1300 case BPF_JMP | BPF_JSGE | BPF_K: 1301 case BPF_JMP | BPF_JSLE | BPF_K: 1302 case BPF_JMP | BPF_JSET | BPF_K: 1303 /* Accommodate for extra offset in case of a backjump. */ 1304 off = from->off; 1305 if (off < 0) 1306 off -= 2; 1307 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1308 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1309 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 1310 break; 1311 1312 case BPF_JMP32 | BPF_JEQ | BPF_K: 1313 case BPF_JMP32 | BPF_JNE | BPF_K: 1314 case BPF_JMP32 | BPF_JGT | BPF_K: 1315 case BPF_JMP32 | BPF_JLT | BPF_K: 1316 case BPF_JMP32 | BPF_JGE | BPF_K: 1317 case BPF_JMP32 | BPF_JLE | BPF_K: 1318 case BPF_JMP32 | BPF_JSGT | BPF_K: 1319 case BPF_JMP32 | BPF_JSLT | BPF_K: 1320 case BPF_JMP32 | BPF_JSGE | BPF_K: 1321 case BPF_JMP32 | BPF_JSLE | BPF_K: 1322 case BPF_JMP32 | BPF_JSET | BPF_K: 1323 /* Accommodate for extra offset in case of a backjump. */ 1324 off = from->off; 1325 if (off < 0) 1326 off -= 2; 1327 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1328 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1329 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, 1330 off); 1331 break; 1332 1333 case BPF_LD | BPF_IMM | BPF_DW: 1334 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 1335 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1336 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 1337 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 1338 break; 1339 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1340 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1341 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1342 if (emit_zext) 1343 *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1344 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1345 break; 1346 1347 case BPF_ST | BPF_MEM | BPF_DW: 1348 case BPF_ST | BPF_MEM | BPF_W: 1349 case BPF_ST | BPF_MEM | BPF_H: 1350 case BPF_ST | BPF_MEM | BPF_B: 1351 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 1352 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1353 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1354 break; 1355 } 1356 out: 1357 return to - to_buff; 1358 } 1359 1360 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 1361 gfp_t gfp_extra_flags) 1362 { 1363 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 1364 struct bpf_prog *fp; 1365 1366 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); 1367 if (fp != NULL) { 1368 /* aux->prog still points to the fp_other one, so 1369 * when promoting the clone to the real program, 1370 * this still needs to be adapted. 1371 */ 1372 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 1373 } 1374 1375 return fp; 1376 } 1377 1378 static void bpf_prog_clone_free(struct bpf_prog *fp) 1379 { 1380 /* aux was stolen by the other clone, so we cannot free 1381 * it from this path! It will be freed eventually by the 1382 * other program on release. 1383 * 1384 * At this point, we don't need a deferred release since 1385 * clone is guaranteed to not be locked. 1386 */ 1387 fp->aux = NULL; 1388 fp->stats = NULL; 1389 fp->active = NULL; 1390 __bpf_prog_free(fp); 1391 } 1392 1393 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 1394 { 1395 /* We have to repoint aux->prog to self, as we don't 1396 * know whether fp here is the clone or the original. 1397 */ 1398 fp->aux->prog = fp; 1399 bpf_prog_clone_free(fp_other); 1400 } 1401 1402 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 1403 { 1404 struct bpf_insn insn_buff[16], aux[2]; 1405 struct bpf_prog *clone, *tmp; 1406 int insn_delta, insn_cnt; 1407 struct bpf_insn *insn; 1408 int i, rewritten; 1409 1410 if (!prog->blinding_requested || prog->blinded) 1411 return prog; 1412 1413 clone = bpf_prog_clone_create(prog, GFP_USER); 1414 if (!clone) 1415 return ERR_PTR(-ENOMEM); 1416 1417 insn_cnt = clone->len; 1418 insn = clone->insnsi; 1419 1420 for (i = 0; i < insn_cnt; i++, insn++) { 1421 if (bpf_pseudo_func(insn)) { 1422 /* ld_imm64 with an address of bpf subprog is not 1423 * a user controlled constant. Don't randomize it, 1424 * since it will conflict with jit_subprogs() logic. 1425 */ 1426 insn++; 1427 i++; 1428 continue; 1429 } 1430 1431 /* We temporarily need to hold the original ld64 insn 1432 * so that we can still access the first part in the 1433 * second blinding run. 1434 */ 1435 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 1436 insn[1].code == 0) 1437 memcpy(aux, insn, sizeof(aux)); 1438 1439 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1440 clone->aux->verifier_zext); 1441 if (!rewritten) 1442 continue; 1443 1444 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 1445 if (IS_ERR(tmp)) { 1446 /* Patching may have repointed aux->prog during 1447 * realloc from the original one, so we need to 1448 * fix it up here on error. 1449 */ 1450 bpf_jit_prog_release_other(prog, clone); 1451 return tmp; 1452 } 1453 1454 clone = tmp; 1455 insn_delta = rewritten - 1; 1456 1457 /* Walk new program and skip insns we just inserted. */ 1458 insn = clone->insnsi + i + insn_delta; 1459 insn_cnt += insn_delta; 1460 i += insn_delta; 1461 } 1462 1463 clone->blinded = 1; 1464 return clone; 1465 } 1466 #endif /* CONFIG_BPF_JIT */ 1467 1468 /* Base function for offset calculation. Needs to go into .text section, 1469 * therefore keeping it non-static as well; will also be used by JITs 1470 * anyway later on, so do not let the compiler omit it. This also needs 1471 * to go into kallsyms for correlation from e.g. bpftool, so naming 1472 * must not change. 1473 */ 1474 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1475 { 1476 return 0; 1477 } 1478 EXPORT_SYMBOL_GPL(__bpf_call_base); 1479 1480 /* All UAPI available opcodes. */ 1481 #define BPF_INSN_MAP(INSN_2, INSN_3) \ 1482 /* 32 bit ALU operations. */ \ 1483 /* Register based. */ \ 1484 INSN_3(ALU, ADD, X), \ 1485 INSN_3(ALU, SUB, X), \ 1486 INSN_3(ALU, AND, X), \ 1487 INSN_3(ALU, OR, X), \ 1488 INSN_3(ALU, LSH, X), \ 1489 INSN_3(ALU, RSH, X), \ 1490 INSN_3(ALU, XOR, X), \ 1491 INSN_3(ALU, MUL, X), \ 1492 INSN_3(ALU, MOV, X), \ 1493 INSN_3(ALU, ARSH, X), \ 1494 INSN_3(ALU, DIV, X), \ 1495 INSN_3(ALU, MOD, X), \ 1496 INSN_2(ALU, NEG), \ 1497 INSN_3(ALU, END, TO_BE), \ 1498 INSN_3(ALU, END, TO_LE), \ 1499 /* Immediate based. */ \ 1500 INSN_3(ALU, ADD, K), \ 1501 INSN_3(ALU, SUB, K), \ 1502 INSN_3(ALU, AND, K), \ 1503 INSN_3(ALU, OR, K), \ 1504 INSN_3(ALU, LSH, K), \ 1505 INSN_3(ALU, RSH, K), \ 1506 INSN_3(ALU, XOR, K), \ 1507 INSN_3(ALU, MUL, K), \ 1508 INSN_3(ALU, MOV, K), \ 1509 INSN_3(ALU, ARSH, K), \ 1510 INSN_3(ALU, DIV, K), \ 1511 INSN_3(ALU, MOD, K), \ 1512 /* 64 bit ALU operations. */ \ 1513 /* Register based. */ \ 1514 INSN_3(ALU64, ADD, X), \ 1515 INSN_3(ALU64, SUB, X), \ 1516 INSN_3(ALU64, AND, X), \ 1517 INSN_3(ALU64, OR, X), \ 1518 INSN_3(ALU64, LSH, X), \ 1519 INSN_3(ALU64, RSH, X), \ 1520 INSN_3(ALU64, XOR, X), \ 1521 INSN_3(ALU64, MUL, X), \ 1522 INSN_3(ALU64, MOV, X), \ 1523 INSN_3(ALU64, ARSH, X), \ 1524 INSN_3(ALU64, DIV, X), \ 1525 INSN_3(ALU64, MOD, X), \ 1526 INSN_2(ALU64, NEG), \ 1527 INSN_3(ALU64, END, TO_LE), \ 1528 /* Immediate based. */ \ 1529 INSN_3(ALU64, ADD, K), \ 1530 INSN_3(ALU64, SUB, K), \ 1531 INSN_3(ALU64, AND, K), \ 1532 INSN_3(ALU64, OR, K), \ 1533 INSN_3(ALU64, LSH, K), \ 1534 INSN_3(ALU64, RSH, K), \ 1535 INSN_3(ALU64, XOR, K), \ 1536 INSN_3(ALU64, MUL, K), \ 1537 INSN_3(ALU64, MOV, K), \ 1538 INSN_3(ALU64, ARSH, K), \ 1539 INSN_3(ALU64, DIV, K), \ 1540 INSN_3(ALU64, MOD, K), \ 1541 /* Call instruction. */ \ 1542 INSN_2(JMP, CALL), \ 1543 /* Exit instruction. */ \ 1544 INSN_2(JMP, EXIT), \ 1545 /* 32-bit Jump instructions. */ \ 1546 /* Register based. */ \ 1547 INSN_3(JMP32, JEQ, X), \ 1548 INSN_3(JMP32, JNE, X), \ 1549 INSN_3(JMP32, JGT, X), \ 1550 INSN_3(JMP32, JLT, X), \ 1551 INSN_3(JMP32, JGE, X), \ 1552 INSN_3(JMP32, JLE, X), \ 1553 INSN_3(JMP32, JSGT, X), \ 1554 INSN_3(JMP32, JSLT, X), \ 1555 INSN_3(JMP32, JSGE, X), \ 1556 INSN_3(JMP32, JSLE, X), \ 1557 INSN_3(JMP32, JSET, X), \ 1558 /* Immediate based. */ \ 1559 INSN_3(JMP32, JEQ, K), \ 1560 INSN_3(JMP32, JNE, K), \ 1561 INSN_3(JMP32, JGT, K), \ 1562 INSN_3(JMP32, JLT, K), \ 1563 INSN_3(JMP32, JGE, K), \ 1564 INSN_3(JMP32, JLE, K), \ 1565 INSN_3(JMP32, JSGT, K), \ 1566 INSN_3(JMP32, JSLT, K), \ 1567 INSN_3(JMP32, JSGE, K), \ 1568 INSN_3(JMP32, JSLE, K), \ 1569 INSN_3(JMP32, JSET, K), \ 1570 /* Jump instructions. */ \ 1571 /* Register based. */ \ 1572 INSN_3(JMP, JEQ, X), \ 1573 INSN_3(JMP, JNE, X), \ 1574 INSN_3(JMP, JGT, X), \ 1575 INSN_3(JMP, JLT, X), \ 1576 INSN_3(JMP, JGE, X), \ 1577 INSN_3(JMP, JLE, X), \ 1578 INSN_3(JMP, JSGT, X), \ 1579 INSN_3(JMP, JSLT, X), \ 1580 INSN_3(JMP, JSGE, X), \ 1581 INSN_3(JMP, JSLE, X), \ 1582 INSN_3(JMP, JSET, X), \ 1583 /* Immediate based. */ \ 1584 INSN_3(JMP, JEQ, K), \ 1585 INSN_3(JMP, JNE, K), \ 1586 INSN_3(JMP, JGT, K), \ 1587 INSN_3(JMP, JLT, K), \ 1588 INSN_3(JMP, JGE, K), \ 1589 INSN_3(JMP, JLE, K), \ 1590 INSN_3(JMP, JSGT, K), \ 1591 INSN_3(JMP, JSLT, K), \ 1592 INSN_3(JMP, JSGE, K), \ 1593 INSN_3(JMP, JSLE, K), \ 1594 INSN_3(JMP, JSET, K), \ 1595 INSN_2(JMP, JA), \ 1596 /* Store instructions. */ \ 1597 /* Register based. */ \ 1598 INSN_3(STX, MEM, B), \ 1599 INSN_3(STX, MEM, H), \ 1600 INSN_3(STX, MEM, W), \ 1601 INSN_3(STX, MEM, DW), \ 1602 INSN_3(STX, ATOMIC, W), \ 1603 INSN_3(STX, ATOMIC, DW), \ 1604 /* Immediate based. */ \ 1605 INSN_3(ST, MEM, B), \ 1606 INSN_3(ST, MEM, H), \ 1607 INSN_3(ST, MEM, W), \ 1608 INSN_3(ST, MEM, DW), \ 1609 /* Load instructions. */ \ 1610 /* Register based. */ \ 1611 INSN_3(LDX, MEM, B), \ 1612 INSN_3(LDX, MEM, H), \ 1613 INSN_3(LDX, MEM, W), \ 1614 INSN_3(LDX, MEM, DW), \ 1615 INSN_3(LDX, MEMSX, B), \ 1616 INSN_3(LDX, MEMSX, H), \ 1617 INSN_3(LDX, MEMSX, W), \ 1618 /* Immediate based. */ \ 1619 INSN_3(LD, IMM, DW) 1620 1621 bool bpf_opcode_in_insntable(u8 code) 1622 { 1623 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true 1624 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true 1625 static const bool public_insntable[256] = { 1626 [0 ... 255] = false, 1627 /* Now overwrite non-defaults ... */ 1628 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), 1629 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ 1630 [BPF_LD | BPF_ABS | BPF_B] = true, 1631 [BPF_LD | BPF_ABS | BPF_H] = true, 1632 [BPF_LD | BPF_ABS | BPF_W] = true, 1633 [BPF_LD | BPF_IND | BPF_B] = true, 1634 [BPF_LD | BPF_IND | BPF_H] = true, 1635 [BPF_LD | BPF_IND | BPF_W] = true, 1636 }; 1637 #undef BPF_INSN_3_TBL 1638 #undef BPF_INSN_2_TBL 1639 return public_insntable[code]; 1640 } 1641 1642 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1643 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 1644 { 1645 memset(dst, 0, size); 1646 return -EFAULT; 1647 } 1648 1649 /** 1650 * ___bpf_prog_run - run eBPF program on a given context 1651 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers 1652 * @insn: is the array of eBPF instructions 1653 * 1654 * Decode and execute eBPF instructions. 1655 * 1656 * Return: whatever value is in %BPF_R0 at program exit 1657 */ 1658 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) 1659 { 1660 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y 1661 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z 1662 static const void * const jumptable[256] __annotate_jump_table = { 1663 [0 ... 255] = &&default_label, 1664 /* Now overwrite non-defaults ... */ 1665 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), 1666 /* Non-UAPI available opcodes. */ 1667 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, 1668 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, 1669 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, 1670 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, 1671 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, 1672 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, 1673 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, 1674 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B, 1675 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H, 1676 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W, 1677 }; 1678 #undef BPF_INSN_3_LBL 1679 #undef BPF_INSN_2_LBL 1680 u32 tail_call_cnt = 0; 1681 1682 #define CONT ({ insn++; goto select_insn; }) 1683 #define CONT_JMP ({ insn++; goto select_insn; }) 1684 1685 select_insn: 1686 goto *jumptable[insn->code]; 1687 1688 /* Explicitly mask the register-based shift amounts with 63 or 31 1689 * to avoid undefined behavior. Normally this won't affect the 1690 * generated code, for example, in case of native 64 bit archs such 1691 * as x86-64 or arm64, the compiler is optimizing the AND away for 1692 * the interpreter. In case of JITs, each of the JIT backends compiles 1693 * the BPF shift operations to machine instructions which produce 1694 * implementation-defined results in such a case; the resulting 1695 * contents of the register may be arbitrary, but program behaviour 1696 * as a whole remains defined. In other words, in case of JIT backends, 1697 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. 1698 */ 1699 /* ALU (shifts) */ 1700 #define SHT(OPCODE, OP) \ 1701 ALU64_##OPCODE##_X: \ 1702 DST = DST OP (SRC & 63); \ 1703 CONT; \ 1704 ALU_##OPCODE##_X: \ 1705 DST = (u32) DST OP ((u32) SRC & 31); \ 1706 CONT; \ 1707 ALU64_##OPCODE##_K: \ 1708 DST = DST OP IMM; \ 1709 CONT; \ 1710 ALU_##OPCODE##_K: \ 1711 DST = (u32) DST OP (u32) IMM; \ 1712 CONT; 1713 /* ALU (rest) */ 1714 #define ALU(OPCODE, OP) \ 1715 ALU64_##OPCODE##_X: \ 1716 DST = DST OP SRC; \ 1717 CONT; \ 1718 ALU_##OPCODE##_X: \ 1719 DST = (u32) DST OP (u32) SRC; \ 1720 CONT; \ 1721 ALU64_##OPCODE##_K: \ 1722 DST = DST OP IMM; \ 1723 CONT; \ 1724 ALU_##OPCODE##_K: \ 1725 DST = (u32) DST OP (u32) IMM; \ 1726 CONT; 1727 ALU(ADD, +) 1728 ALU(SUB, -) 1729 ALU(AND, &) 1730 ALU(OR, |) 1731 ALU(XOR, ^) 1732 ALU(MUL, *) 1733 SHT(LSH, <<) 1734 SHT(RSH, >>) 1735 #undef SHT 1736 #undef ALU 1737 ALU_NEG: 1738 DST = (u32) -DST; 1739 CONT; 1740 ALU64_NEG: 1741 DST = -DST; 1742 CONT; 1743 ALU_MOV_X: 1744 switch (OFF) { 1745 case 0: 1746 DST = (u32) SRC; 1747 break; 1748 case 8: 1749 DST = (u32)(s8) SRC; 1750 break; 1751 case 16: 1752 DST = (u32)(s16) SRC; 1753 break; 1754 } 1755 CONT; 1756 ALU_MOV_K: 1757 DST = (u32) IMM; 1758 CONT; 1759 ALU64_MOV_X: 1760 switch (OFF) { 1761 case 0: 1762 DST = SRC; 1763 break; 1764 case 8: 1765 DST = (s8) SRC; 1766 break; 1767 case 16: 1768 DST = (s16) SRC; 1769 break; 1770 case 32: 1771 DST = (s32) SRC; 1772 break; 1773 } 1774 CONT; 1775 ALU64_MOV_K: 1776 DST = IMM; 1777 CONT; 1778 LD_IMM_DW: 1779 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 1780 insn++; 1781 CONT; 1782 ALU_ARSH_X: 1783 DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); 1784 CONT; 1785 ALU_ARSH_K: 1786 DST = (u64) (u32) (((s32) DST) >> IMM); 1787 CONT; 1788 ALU64_ARSH_X: 1789 (*(s64 *) &DST) >>= (SRC & 63); 1790 CONT; 1791 ALU64_ARSH_K: 1792 (*(s64 *) &DST) >>= IMM; 1793 CONT; 1794 ALU64_MOD_X: 1795 div64_u64_rem(DST, SRC, &AX); 1796 DST = AX; 1797 CONT; 1798 ALU_MOD_X: 1799 AX = (u32) DST; 1800 DST = do_div(AX, (u32) SRC); 1801 CONT; 1802 ALU64_MOD_K: 1803 div64_u64_rem(DST, IMM, &AX); 1804 DST = AX; 1805 CONT; 1806 ALU_MOD_K: 1807 AX = (u32) DST; 1808 DST = do_div(AX, (u32) IMM); 1809 CONT; 1810 ALU64_DIV_X: 1811 DST = div64_u64(DST, SRC); 1812 CONT; 1813 ALU_DIV_X: 1814 AX = (u32) DST; 1815 do_div(AX, (u32) SRC); 1816 DST = (u32) AX; 1817 CONT; 1818 ALU64_DIV_K: 1819 DST = div64_u64(DST, IMM); 1820 CONT; 1821 ALU_DIV_K: 1822 AX = (u32) DST; 1823 do_div(AX, (u32) IMM); 1824 DST = (u32) AX; 1825 CONT; 1826 ALU_END_TO_BE: 1827 switch (IMM) { 1828 case 16: 1829 DST = (__force u16) cpu_to_be16(DST); 1830 break; 1831 case 32: 1832 DST = (__force u32) cpu_to_be32(DST); 1833 break; 1834 case 64: 1835 DST = (__force u64) cpu_to_be64(DST); 1836 break; 1837 } 1838 CONT; 1839 ALU_END_TO_LE: 1840 switch (IMM) { 1841 case 16: 1842 DST = (__force u16) cpu_to_le16(DST); 1843 break; 1844 case 32: 1845 DST = (__force u32) cpu_to_le32(DST); 1846 break; 1847 case 64: 1848 DST = (__force u64) cpu_to_le64(DST); 1849 break; 1850 } 1851 CONT; 1852 ALU64_END_TO_LE: 1853 switch (IMM) { 1854 case 16: 1855 DST = (__force u16) __swab16(DST); 1856 break; 1857 case 32: 1858 DST = (__force u32) __swab32(DST); 1859 break; 1860 case 64: 1861 DST = (__force u64) __swab64(DST); 1862 break; 1863 } 1864 CONT; 1865 1866 /* CALL */ 1867 JMP_CALL: 1868 /* Function call scratches BPF_R1-BPF_R5 registers, 1869 * preserves BPF_R6-BPF_R9, and stores return value 1870 * into BPF_R0. 1871 */ 1872 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1873 BPF_R4, BPF_R5); 1874 CONT; 1875 1876 JMP_CALL_ARGS: 1877 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, 1878 BPF_R3, BPF_R4, 1879 BPF_R5, 1880 insn + insn->off + 1); 1881 CONT; 1882 1883 JMP_TAIL_CALL: { 1884 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1885 struct bpf_array *array = container_of(map, struct bpf_array, map); 1886 struct bpf_prog *prog; 1887 u32 index = BPF_R3; 1888 1889 if (unlikely(index >= array->map.max_entries)) 1890 goto out; 1891 1892 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) 1893 goto out; 1894 1895 tail_call_cnt++; 1896 1897 prog = READ_ONCE(array->ptrs[index]); 1898 if (!prog) 1899 goto out; 1900 1901 /* ARG1 at this point is guaranteed to point to CTX from 1902 * the verifier side due to the fact that the tail call is 1903 * handled like a helper, that is, bpf_tail_call_proto, 1904 * where arg1_type is ARG_PTR_TO_CTX. 1905 */ 1906 insn = prog->insnsi; 1907 goto select_insn; 1908 out: 1909 CONT; 1910 } 1911 JMP_JA: 1912 insn += insn->off; 1913 CONT; 1914 JMP_EXIT: 1915 return BPF_R0; 1916 /* JMP */ 1917 #define COND_JMP(SIGN, OPCODE, CMP_OP) \ 1918 JMP_##OPCODE##_X: \ 1919 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ 1920 insn += insn->off; \ 1921 CONT_JMP; \ 1922 } \ 1923 CONT; \ 1924 JMP32_##OPCODE##_X: \ 1925 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ 1926 insn += insn->off; \ 1927 CONT_JMP; \ 1928 } \ 1929 CONT; \ 1930 JMP_##OPCODE##_K: \ 1931 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ 1932 insn += insn->off; \ 1933 CONT_JMP; \ 1934 } \ 1935 CONT; \ 1936 JMP32_##OPCODE##_K: \ 1937 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ 1938 insn += insn->off; \ 1939 CONT_JMP; \ 1940 } \ 1941 CONT; 1942 COND_JMP(u, JEQ, ==) 1943 COND_JMP(u, JNE, !=) 1944 COND_JMP(u, JGT, >) 1945 COND_JMP(u, JLT, <) 1946 COND_JMP(u, JGE, >=) 1947 COND_JMP(u, JLE, <=) 1948 COND_JMP(u, JSET, &) 1949 COND_JMP(s, JSGT, >) 1950 COND_JMP(s, JSLT, <) 1951 COND_JMP(s, JSGE, >=) 1952 COND_JMP(s, JSLE, <=) 1953 #undef COND_JMP 1954 /* ST, STX and LDX*/ 1955 ST_NOSPEC: 1956 /* Speculation barrier for mitigating Speculative Store Bypass. 1957 * In case of arm64, we rely on the firmware mitigation as 1958 * controlled via the ssbd kernel parameter. Whenever the 1959 * mitigation is enabled, it works for all of the kernel code 1960 * with no need to provide any additional instructions here. 1961 * In case of x86, we use 'lfence' insn for mitigation. We 1962 * reuse preexisting logic from Spectre v1 mitigation that 1963 * happens to produce the required code on x86 for v4 as well. 1964 */ 1965 barrier_nospec(); 1966 CONT; 1967 #define LDST(SIZEOP, SIZE) \ 1968 STX_MEM_##SIZEOP: \ 1969 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1970 CONT; \ 1971 ST_MEM_##SIZEOP: \ 1972 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1973 CONT; \ 1974 LDX_MEM_##SIZEOP: \ 1975 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1976 CONT; \ 1977 LDX_PROBE_MEM_##SIZEOP: \ 1978 bpf_probe_read_kernel(&DST, sizeof(SIZE), \ 1979 (const void *)(long) (SRC + insn->off)); \ 1980 DST = *((SIZE *)&DST); \ 1981 CONT; 1982 1983 LDST(B, u8) 1984 LDST(H, u16) 1985 LDST(W, u32) 1986 LDST(DW, u64) 1987 #undef LDST 1988 1989 #define LDSX(SIZEOP, SIZE) \ 1990 LDX_MEMSX_##SIZEOP: \ 1991 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1992 CONT; \ 1993 LDX_PROBE_MEMSX_##SIZEOP: \ 1994 bpf_probe_read_kernel(&DST, sizeof(SIZE), \ 1995 (const void *)(long) (SRC + insn->off)); \ 1996 DST = *((SIZE *)&DST); \ 1997 CONT; 1998 1999 LDSX(B, s8) 2000 LDSX(H, s16) 2001 LDSX(W, s32) 2002 #undef LDSX 2003 2004 #define ATOMIC_ALU_OP(BOP, KOP) \ 2005 case BOP: \ 2006 if (BPF_SIZE(insn->code) == BPF_W) \ 2007 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \ 2008 (DST + insn->off)); \ 2009 else \ 2010 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \ 2011 (DST + insn->off)); \ 2012 break; \ 2013 case BOP | BPF_FETCH: \ 2014 if (BPF_SIZE(insn->code) == BPF_W) \ 2015 SRC = (u32) atomic_fetch_##KOP( \ 2016 (u32) SRC, \ 2017 (atomic_t *)(unsigned long) (DST + insn->off)); \ 2018 else \ 2019 SRC = (u64) atomic64_fetch_##KOP( \ 2020 (u64) SRC, \ 2021 (atomic64_t *)(unsigned long) (DST + insn->off)); \ 2022 break; 2023 2024 STX_ATOMIC_DW: 2025 STX_ATOMIC_W: 2026 switch (IMM) { 2027 ATOMIC_ALU_OP(BPF_ADD, add) 2028 ATOMIC_ALU_OP(BPF_AND, and) 2029 ATOMIC_ALU_OP(BPF_OR, or) 2030 ATOMIC_ALU_OP(BPF_XOR, xor) 2031 #undef ATOMIC_ALU_OP 2032 2033 case BPF_XCHG: 2034 if (BPF_SIZE(insn->code) == BPF_W) 2035 SRC = (u32) atomic_xchg( 2036 (atomic_t *)(unsigned long) (DST + insn->off), 2037 (u32) SRC); 2038 else 2039 SRC = (u64) atomic64_xchg( 2040 (atomic64_t *)(unsigned long) (DST + insn->off), 2041 (u64) SRC); 2042 break; 2043 case BPF_CMPXCHG: 2044 if (BPF_SIZE(insn->code) == BPF_W) 2045 BPF_R0 = (u32) atomic_cmpxchg( 2046 (atomic_t *)(unsigned long) (DST + insn->off), 2047 (u32) BPF_R0, (u32) SRC); 2048 else 2049 BPF_R0 = (u64) atomic64_cmpxchg( 2050 (atomic64_t *)(unsigned long) (DST + insn->off), 2051 (u64) BPF_R0, (u64) SRC); 2052 break; 2053 2054 default: 2055 goto default_label; 2056 } 2057 CONT; 2058 2059 default_label: 2060 /* If we ever reach this, we have a bug somewhere. Die hard here 2061 * instead of just returning 0; we could be somewhere in a subprog, 2062 * so execution could continue otherwise which we do /not/ want. 2063 * 2064 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 2065 */ 2066 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n", 2067 insn->code, insn->imm); 2068 BUG_ON(1); 2069 return 0; 2070 } 2071 2072 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size 2073 #define DEFINE_BPF_PROG_RUN(stack_size) \ 2074 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ 2075 { \ 2076 u64 stack[stack_size / sizeof(u64)]; \ 2077 u64 regs[MAX_BPF_EXT_REG] = {}; \ 2078 \ 2079 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2080 ARG1 = (u64) (unsigned long) ctx; \ 2081 return ___bpf_prog_run(regs, insn); \ 2082 } 2083 2084 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size 2085 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ 2086 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ 2087 const struct bpf_insn *insn) \ 2088 { \ 2089 u64 stack[stack_size / sizeof(u64)]; \ 2090 u64 regs[MAX_BPF_EXT_REG]; \ 2091 \ 2092 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ 2093 BPF_R1 = r1; \ 2094 BPF_R2 = r2; \ 2095 BPF_R3 = r3; \ 2096 BPF_R4 = r4; \ 2097 BPF_R5 = r5; \ 2098 return ___bpf_prog_run(regs, insn); \ 2099 } 2100 2101 #define EVAL1(FN, X) FN(X) 2102 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) 2103 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) 2104 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) 2105 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) 2106 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) 2107 2108 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); 2109 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); 2110 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); 2111 2112 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); 2113 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); 2114 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); 2115 2116 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), 2117 2118 static unsigned int (*interpreters[])(const void *ctx, 2119 const struct bpf_insn *insn) = { 2120 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2121 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2122 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2123 }; 2124 #undef PROG_NAME_LIST 2125 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), 2126 static __maybe_unused 2127 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, 2128 const struct bpf_insn *insn) = { 2129 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) 2130 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) 2131 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 2132 }; 2133 #undef PROG_NAME_LIST 2134 2135 #ifdef CONFIG_BPF_SYSCALL 2136 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) 2137 { 2138 stack_depth = max_t(u32, stack_depth, 1); 2139 insn->off = (s16) insn->imm; 2140 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - 2141 __bpf_call_base_args; 2142 insn->code = BPF_JMP | BPF_CALL_ARGS; 2143 } 2144 #endif 2145 #else 2146 static unsigned int __bpf_prog_ret0_warn(const void *ctx, 2147 const struct bpf_insn *insn) 2148 { 2149 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON 2150 * is not working properly, so warn about it! 2151 */ 2152 WARN_ON_ONCE(1); 2153 return 0; 2154 } 2155 #endif 2156 2157 bool bpf_prog_map_compatible(struct bpf_map *map, 2158 const struct bpf_prog *fp) 2159 { 2160 enum bpf_prog_type prog_type = resolve_prog_type(fp); 2161 bool ret; 2162 2163 if (fp->kprobe_override) 2164 return false; 2165 2166 /* XDP programs inserted into maps are not guaranteed to run on 2167 * a particular netdev (and can run outside driver context entirely 2168 * in the case of devmap and cpumap). Until device checks 2169 * are implemented, prohibit adding dev-bound programs to program maps. 2170 */ 2171 if (bpf_prog_is_dev_bound(fp->aux)) 2172 return false; 2173 2174 spin_lock(&map->owner.lock); 2175 if (!map->owner.type) { 2176 /* There's no owner yet where we could check for 2177 * compatibility. 2178 */ 2179 map->owner.type = prog_type; 2180 map->owner.jited = fp->jited; 2181 map->owner.xdp_has_frags = fp->aux->xdp_has_frags; 2182 ret = true; 2183 } else { 2184 ret = map->owner.type == prog_type && 2185 map->owner.jited == fp->jited && 2186 map->owner.xdp_has_frags == fp->aux->xdp_has_frags; 2187 } 2188 spin_unlock(&map->owner.lock); 2189 2190 return ret; 2191 } 2192 2193 static int bpf_check_tail_call(const struct bpf_prog *fp) 2194 { 2195 struct bpf_prog_aux *aux = fp->aux; 2196 int i, ret = 0; 2197 2198 mutex_lock(&aux->used_maps_mutex); 2199 for (i = 0; i < aux->used_map_cnt; i++) { 2200 struct bpf_map *map = aux->used_maps[i]; 2201 2202 if (!map_type_contains_progs(map)) 2203 continue; 2204 2205 if (!bpf_prog_map_compatible(map, fp)) { 2206 ret = -EINVAL; 2207 goto out; 2208 } 2209 } 2210 2211 out: 2212 mutex_unlock(&aux->used_maps_mutex); 2213 return ret; 2214 } 2215 2216 static void bpf_prog_select_func(struct bpf_prog *fp) 2217 { 2218 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2219 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 2220 2221 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 2222 #else 2223 fp->bpf_func = __bpf_prog_ret0_warn; 2224 #endif 2225 } 2226 2227 /** 2228 * bpf_prog_select_runtime - select exec runtime for BPF program 2229 * @fp: bpf_prog populated with BPF program 2230 * @err: pointer to error variable 2231 * 2232 * Try to JIT eBPF program, if JIT is not available, use interpreter. 2233 * The BPF program will be executed via bpf_prog_run() function. 2234 * 2235 * Return: the &fp argument along with &err set to 0 for success or 2236 * a negative errno code on failure 2237 */ 2238 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 2239 { 2240 /* In case of BPF to BPF calls, verifier did all the prep 2241 * work with regards to JITing, etc. 2242 */ 2243 bool jit_needed = false; 2244 2245 if (fp->bpf_func) 2246 goto finalize; 2247 2248 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) || 2249 bpf_prog_has_kfunc_call(fp)) 2250 jit_needed = true; 2251 2252 bpf_prog_select_func(fp); 2253 2254 /* eBPF JITs can rewrite the program in case constant 2255 * blinding is active. However, in case of error during 2256 * blinding, bpf_int_jit_compile() must always return a 2257 * valid program, which in this case would simply not 2258 * be JITed, but falls back to the interpreter. 2259 */ 2260 if (!bpf_prog_is_offloaded(fp->aux)) { 2261 *err = bpf_prog_alloc_jited_linfo(fp); 2262 if (*err) 2263 return fp; 2264 2265 fp = bpf_int_jit_compile(fp); 2266 bpf_prog_jit_attempt_done(fp); 2267 if (!fp->jited && jit_needed) { 2268 *err = -ENOTSUPP; 2269 return fp; 2270 } 2271 } else { 2272 *err = bpf_prog_offload_compile(fp); 2273 if (*err) 2274 return fp; 2275 } 2276 2277 finalize: 2278 bpf_prog_lock_ro(fp); 2279 2280 /* The tail call compatibility check can only be done at 2281 * this late stage as we need to determine, if we deal 2282 * with JITed or non JITed program concatenations and not 2283 * all eBPF JITs might immediately support all features. 2284 */ 2285 *err = bpf_check_tail_call(fp); 2286 2287 return fp; 2288 } 2289 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 2290 2291 static unsigned int __bpf_prog_ret1(const void *ctx, 2292 const struct bpf_insn *insn) 2293 { 2294 return 1; 2295 } 2296 2297 static struct bpf_prog_dummy { 2298 struct bpf_prog prog; 2299 } dummy_bpf_prog = { 2300 .prog = { 2301 .bpf_func = __bpf_prog_ret1, 2302 }, 2303 }; 2304 2305 struct bpf_empty_prog_array bpf_empty_prog_array = { 2306 .null_prog = NULL, 2307 }; 2308 EXPORT_SYMBOL(bpf_empty_prog_array); 2309 2310 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) 2311 { 2312 if (prog_cnt) 2313 return kzalloc(sizeof(struct bpf_prog_array) + 2314 sizeof(struct bpf_prog_array_item) * 2315 (prog_cnt + 1), 2316 flags); 2317 2318 return &bpf_empty_prog_array.hdr; 2319 } 2320 2321 void bpf_prog_array_free(struct bpf_prog_array *progs) 2322 { 2323 if (!progs || progs == &bpf_empty_prog_array.hdr) 2324 return; 2325 kfree_rcu(progs, rcu); 2326 } 2327 2328 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu) 2329 { 2330 struct bpf_prog_array *progs; 2331 2332 /* If RCU Tasks Trace grace period implies RCU grace period, there is 2333 * no need to call kfree_rcu(), just call kfree() directly. 2334 */ 2335 progs = container_of(rcu, struct bpf_prog_array, rcu); 2336 if (rcu_trace_implies_rcu_gp()) 2337 kfree(progs); 2338 else 2339 kfree_rcu(progs, rcu); 2340 } 2341 2342 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs) 2343 { 2344 if (!progs || progs == &bpf_empty_prog_array.hdr) 2345 return; 2346 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb); 2347 } 2348 2349 int bpf_prog_array_length(struct bpf_prog_array *array) 2350 { 2351 struct bpf_prog_array_item *item; 2352 u32 cnt = 0; 2353 2354 for (item = array->items; item->prog; item++) 2355 if (item->prog != &dummy_bpf_prog.prog) 2356 cnt++; 2357 return cnt; 2358 } 2359 2360 bool bpf_prog_array_is_empty(struct bpf_prog_array *array) 2361 { 2362 struct bpf_prog_array_item *item; 2363 2364 for (item = array->items; item->prog; item++) 2365 if (item->prog != &dummy_bpf_prog.prog) 2366 return false; 2367 return true; 2368 } 2369 2370 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, 2371 u32 *prog_ids, 2372 u32 request_cnt) 2373 { 2374 struct bpf_prog_array_item *item; 2375 int i = 0; 2376 2377 for (item = array->items; item->prog; item++) { 2378 if (item->prog == &dummy_bpf_prog.prog) 2379 continue; 2380 prog_ids[i] = item->prog->aux->id; 2381 if (++i == request_cnt) { 2382 item++; 2383 break; 2384 } 2385 } 2386 2387 return !!(item->prog); 2388 } 2389 2390 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, 2391 __u32 __user *prog_ids, u32 cnt) 2392 { 2393 unsigned long err = 0; 2394 bool nospc; 2395 u32 *ids; 2396 2397 /* users of this function are doing: 2398 * cnt = bpf_prog_array_length(); 2399 * if (cnt > 0) 2400 * bpf_prog_array_copy_to_user(..., cnt); 2401 * so below kcalloc doesn't need extra cnt > 0 check. 2402 */ 2403 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); 2404 if (!ids) 2405 return -ENOMEM; 2406 nospc = bpf_prog_array_copy_core(array, ids, cnt); 2407 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 2408 kfree(ids); 2409 if (err) 2410 return -EFAULT; 2411 if (nospc) 2412 return -ENOSPC; 2413 return 0; 2414 } 2415 2416 void bpf_prog_array_delete_safe(struct bpf_prog_array *array, 2417 struct bpf_prog *old_prog) 2418 { 2419 struct bpf_prog_array_item *item; 2420 2421 for (item = array->items; item->prog; item++) 2422 if (item->prog == old_prog) { 2423 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); 2424 break; 2425 } 2426 } 2427 2428 /** 2429 * bpf_prog_array_delete_safe_at() - Replaces the program at the given 2430 * index into the program array with 2431 * a dummy no-op program. 2432 * @array: a bpf_prog_array 2433 * @index: the index of the program to replace 2434 * 2435 * Skips over dummy programs, by not counting them, when calculating 2436 * the position of the program to replace. 2437 * 2438 * Return: 2439 * * 0 - Success 2440 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2441 * * -ENOENT - Index out of range 2442 */ 2443 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) 2444 { 2445 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); 2446 } 2447 2448 /** 2449 * bpf_prog_array_update_at() - Updates the program at the given index 2450 * into the program array. 2451 * @array: a bpf_prog_array 2452 * @index: the index of the program to update 2453 * @prog: the program to insert into the array 2454 * 2455 * Skips over dummy programs, by not counting them, when calculating 2456 * the position of the program to update. 2457 * 2458 * Return: 2459 * * 0 - Success 2460 * * -EINVAL - Invalid index value. Must be a non-negative integer. 2461 * * -ENOENT - Index out of range 2462 */ 2463 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 2464 struct bpf_prog *prog) 2465 { 2466 struct bpf_prog_array_item *item; 2467 2468 if (unlikely(index < 0)) 2469 return -EINVAL; 2470 2471 for (item = array->items; item->prog; item++) { 2472 if (item->prog == &dummy_bpf_prog.prog) 2473 continue; 2474 if (!index) { 2475 WRITE_ONCE(item->prog, prog); 2476 return 0; 2477 } 2478 index--; 2479 } 2480 return -ENOENT; 2481 } 2482 2483 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 2484 struct bpf_prog *exclude_prog, 2485 struct bpf_prog *include_prog, 2486 u64 bpf_cookie, 2487 struct bpf_prog_array **new_array) 2488 { 2489 int new_prog_cnt, carry_prog_cnt = 0; 2490 struct bpf_prog_array_item *existing, *new; 2491 struct bpf_prog_array *array; 2492 bool found_exclude = false; 2493 2494 /* Figure out how many existing progs we need to carry over to 2495 * the new array. 2496 */ 2497 if (old_array) { 2498 existing = old_array->items; 2499 for (; existing->prog; existing++) { 2500 if (existing->prog == exclude_prog) { 2501 found_exclude = true; 2502 continue; 2503 } 2504 if (existing->prog != &dummy_bpf_prog.prog) 2505 carry_prog_cnt++; 2506 if (existing->prog == include_prog) 2507 return -EEXIST; 2508 } 2509 } 2510 2511 if (exclude_prog && !found_exclude) 2512 return -ENOENT; 2513 2514 /* How many progs (not NULL) will be in the new array? */ 2515 new_prog_cnt = carry_prog_cnt; 2516 if (include_prog) 2517 new_prog_cnt += 1; 2518 2519 /* Do we have any prog (not NULL) in the new array? */ 2520 if (!new_prog_cnt) { 2521 *new_array = NULL; 2522 return 0; 2523 } 2524 2525 /* +1 as the end of prog_array is marked with NULL */ 2526 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); 2527 if (!array) 2528 return -ENOMEM; 2529 new = array->items; 2530 2531 /* Fill in the new prog array */ 2532 if (carry_prog_cnt) { 2533 existing = old_array->items; 2534 for (; existing->prog; existing++) { 2535 if (existing->prog == exclude_prog || 2536 existing->prog == &dummy_bpf_prog.prog) 2537 continue; 2538 2539 new->prog = existing->prog; 2540 new->bpf_cookie = existing->bpf_cookie; 2541 new++; 2542 } 2543 } 2544 if (include_prog) { 2545 new->prog = include_prog; 2546 new->bpf_cookie = bpf_cookie; 2547 new++; 2548 } 2549 new->prog = NULL; 2550 *new_array = array; 2551 return 0; 2552 } 2553 2554 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 2555 u32 *prog_ids, u32 request_cnt, 2556 u32 *prog_cnt) 2557 { 2558 u32 cnt = 0; 2559 2560 if (array) 2561 cnt = bpf_prog_array_length(array); 2562 2563 *prog_cnt = cnt; 2564 2565 /* return early if user requested only program count or nothing to copy */ 2566 if (!request_cnt || !cnt) 2567 return 0; 2568 2569 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ 2570 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC 2571 : 0; 2572 } 2573 2574 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2575 struct bpf_map **used_maps, u32 len) 2576 { 2577 struct bpf_map *map; 2578 u32 i; 2579 2580 for (i = 0; i < len; i++) { 2581 map = used_maps[i]; 2582 if (map->ops->map_poke_untrack) 2583 map->ops->map_poke_untrack(map, aux); 2584 bpf_map_put(map); 2585 } 2586 } 2587 2588 static void bpf_free_used_maps(struct bpf_prog_aux *aux) 2589 { 2590 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); 2591 kfree(aux->used_maps); 2592 } 2593 2594 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2595 struct btf_mod_pair *used_btfs, u32 len) 2596 { 2597 #ifdef CONFIG_BPF_SYSCALL 2598 struct btf_mod_pair *btf_mod; 2599 u32 i; 2600 2601 for (i = 0; i < len; i++) { 2602 btf_mod = &used_btfs[i]; 2603 if (btf_mod->module) 2604 module_put(btf_mod->module); 2605 btf_put(btf_mod->btf); 2606 } 2607 #endif 2608 } 2609 2610 static void bpf_free_used_btfs(struct bpf_prog_aux *aux) 2611 { 2612 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt); 2613 kfree(aux->used_btfs); 2614 } 2615 2616 static void bpf_prog_free_deferred(struct work_struct *work) 2617 { 2618 struct bpf_prog_aux *aux; 2619 int i; 2620 2621 aux = container_of(work, struct bpf_prog_aux, work); 2622 #ifdef CONFIG_BPF_SYSCALL 2623 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); 2624 #endif 2625 #ifdef CONFIG_CGROUP_BPF 2626 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID) 2627 bpf_cgroup_atype_put(aux->cgroup_atype); 2628 #endif 2629 bpf_free_used_maps(aux); 2630 bpf_free_used_btfs(aux); 2631 if (bpf_prog_is_dev_bound(aux)) 2632 bpf_prog_dev_bound_destroy(aux->prog); 2633 #ifdef CONFIG_PERF_EVENTS 2634 if (aux->prog->has_callchain_buf) 2635 put_callchain_buffers(); 2636 #endif 2637 if (aux->dst_trampoline) 2638 bpf_trampoline_put(aux->dst_trampoline); 2639 for (i = 0; i < aux->func_cnt; i++) { 2640 /* We can just unlink the subprog poke descriptor table as 2641 * it was originally linked to the main program and is also 2642 * released along with it. 2643 */ 2644 aux->func[i]->aux->poke_tab = NULL; 2645 bpf_jit_free(aux->func[i]); 2646 } 2647 if (aux->func_cnt) { 2648 kfree(aux->func); 2649 bpf_prog_unlock_free(aux->prog); 2650 } else { 2651 bpf_jit_free(aux->prog); 2652 } 2653 } 2654 2655 void bpf_prog_free(struct bpf_prog *fp) 2656 { 2657 struct bpf_prog_aux *aux = fp->aux; 2658 2659 if (aux->dst_prog) 2660 bpf_prog_put(aux->dst_prog); 2661 INIT_WORK(&aux->work, bpf_prog_free_deferred); 2662 schedule_work(&aux->work); 2663 } 2664 EXPORT_SYMBOL_GPL(bpf_prog_free); 2665 2666 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 2667 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 2668 2669 void bpf_user_rnd_init_once(void) 2670 { 2671 prandom_init_once(&bpf_user_rnd_state); 2672 } 2673 2674 BPF_CALL_0(bpf_user_rnd_u32) 2675 { 2676 /* Should someone ever have the rather unwise idea to use some 2677 * of the registers passed into this function, then note that 2678 * this function is called from native eBPF and classic-to-eBPF 2679 * transformations. Register assignments from both sides are 2680 * different, f.e. classic always sets fn(ctx, A, X) here. 2681 */ 2682 struct rnd_state *state; 2683 u32 res; 2684 2685 state = &get_cpu_var(bpf_user_rnd_state); 2686 res = prandom_u32_state(state); 2687 put_cpu_var(bpf_user_rnd_state); 2688 2689 return res; 2690 } 2691 2692 BPF_CALL_0(bpf_get_raw_cpu_id) 2693 { 2694 return raw_smp_processor_id(); 2695 } 2696 2697 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 2698 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 2699 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 2700 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 2701 const struct bpf_func_proto bpf_map_push_elem_proto __weak; 2702 const struct bpf_func_proto bpf_map_pop_elem_proto __weak; 2703 const struct bpf_func_proto bpf_map_peek_elem_proto __weak; 2704 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak; 2705 const struct bpf_func_proto bpf_spin_lock_proto __weak; 2706 const struct bpf_func_proto bpf_spin_unlock_proto __weak; 2707 const struct bpf_func_proto bpf_jiffies64_proto __weak; 2708 2709 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 2710 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 2711 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 2712 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 2713 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; 2714 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak; 2715 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak; 2716 2717 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 2718 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 2719 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 2720 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; 2721 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak; 2722 const struct bpf_func_proto bpf_get_local_storage_proto __weak; 2723 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak; 2724 const struct bpf_func_proto bpf_snprintf_btf_proto __weak; 2725 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak; 2726 const struct bpf_func_proto bpf_set_retval_proto __weak; 2727 const struct bpf_func_proto bpf_get_retval_proto __weak; 2728 2729 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 2730 { 2731 return NULL; 2732 } 2733 2734 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) 2735 { 2736 return NULL; 2737 } 2738 2739 u64 __weak 2740 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 2741 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 2742 { 2743 return -ENOTSUPP; 2744 } 2745 EXPORT_SYMBOL_GPL(bpf_event_output); 2746 2747 /* Always built-in helper functions. */ 2748 const struct bpf_func_proto bpf_tail_call_proto = { 2749 .func = NULL, 2750 .gpl_only = false, 2751 .ret_type = RET_VOID, 2752 .arg1_type = ARG_PTR_TO_CTX, 2753 .arg2_type = ARG_CONST_MAP_PTR, 2754 .arg3_type = ARG_ANYTHING, 2755 }; 2756 2757 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 2758 * It is encouraged to implement bpf_int_jit_compile() instead, so that 2759 * eBPF and implicitly also cBPF can get JITed! 2760 */ 2761 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 2762 { 2763 return prog; 2764 } 2765 2766 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 2767 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 2768 */ 2769 void __weak bpf_jit_compile(struct bpf_prog *prog) 2770 { 2771 } 2772 2773 bool __weak bpf_helper_changes_pkt_data(void *func) 2774 { 2775 return false; 2776 } 2777 2778 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage 2779 * analysis code and wants explicit zero extension inserted by verifier. 2780 * Otherwise, return FALSE. 2781 * 2782 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if 2783 * you don't override this. JITs that don't want these extra insns can detect 2784 * them using insn_is_zext. 2785 */ 2786 bool __weak bpf_jit_needs_zext(void) 2787 { 2788 return false; 2789 } 2790 2791 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */ 2792 bool __weak bpf_jit_supports_subprog_tailcalls(void) 2793 { 2794 return false; 2795 } 2796 2797 bool __weak bpf_jit_supports_kfunc_call(void) 2798 { 2799 return false; 2800 } 2801 2802 bool __weak bpf_jit_supports_far_kfunc_call(void) 2803 { 2804 return false; 2805 } 2806 2807 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 2808 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 2809 */ 2810 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 2811 int len) 2812 { 2813 return -EFAULT; 2814 } 2815 2816 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2817 void *addr1, void *addr2) 2818 { 2819 return -ENOTSUPP; 2820 } 2821 2822 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len) 2823 { 2824 return ERR_PTR(-ENOTSUPP); 2825 } 2826 2827 int __weak bpf_arch_text_invalidate(void *dst, size_t len) 2828 { 2829 return -ENOTSUPP; 2830 } 2831 2832 #ifdef CONFIG_BPF_SYSCALL 2833 static int __init bpf_global_ma_init(void) 2834 { 2835 int ret; 2836 2837 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false); 2838 bpf_global_ma_set = !ret; 2839 return ret; 2840 } 2841 late_initcall(bpf_global_ma_init); 2842 #endif 2843 2844 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 2845 EXPORT_SYMBOL(bpf_stats_enabled_key); 2846 2847 /* All definitions of tracepoints related to BPF. */ 2848 #define CREATE_TRACE_POINTS 2849 #include <linux/bpf_trace.h> 2850 2851 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 2852 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); 2853