1 /* 2 * Linux Socket Filter - Kernel level socket filtering 3 * 4 * Based on the design of the Berkeley Packet Filter. The new 5 * internal format has been designed by PLUMgrid: 6 * 7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 8 * 9 * Authors: 10 * 11 * Jay Schulist <jschlst@samba.org> 12 * Alexei Starovoitov <ast@plumgrid.com> 13 * Daniel Borkmann <dborkman@redhat.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * Andi Kleen - Fix a few bad bugs and races. 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 22 */ 23 24 #include <linux/filter.h> 25 #include <linux/skbuff.h> 26 #include <linux/vmalloc.h> 27 #include <linux/random.h> 28 #include <linux/moduleloader.h> 29 #include <linux/bpf.h> 30 #include <linux/frame.h> 31 #include <linux/rbtree_latch.h> 32 #include <linux/kallsyms.h> 33 #include <linux/rcupdate.h> 34 35 #include <asm/unaligned.h> 36 37 /* Registers */ 38 #define BPF_R0 regs[BPF_REG_0] 39 #define BPF_R1 regs[BPF_REG_1] 40 #define BPF_R2 regs[BPF_REG_2] 41 #define BPF_R3 regs[BPF_REG_3] 42 #define BPF_R4 regs[BPF_REG_4] 43 #define BPF_R5 regs[BPF_REG_5] 44 #define BPF_R6 regs[BPF_REG_6] 45 #define BPF_R7 regs[BPF_REG_7] 46 #define BPF_R8 regs[BPF_REG_8] 47 #define BPF_R9 regs[BPF_REG_9] 48 #define BPF_R10 regs[BPF_REG_10] 49 50 /* Named registers */ 51 #define DST regs[insn->dst_reg] 52 #define SRC regs[insn->src_reg] 53 #define FP regs[BPF_REG_FP] 54 #define ARG1 regs[BPF_REG_ARG1] 55 #define CTX regs[BPF_REG_CTX] 56 #define IMM insn->imm 57 58 /* No hurry in this branch 59 * 60 * Exported for the bpf jit load helper. 61 */ 62 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) 63 { 64 u8 *ptr = NULL; 65 66 if (k >= SKF_NET_OFF) 67 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 68 else if (k >= SKF_LL_OFF) 69 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 70 71 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 72 return ptr; 73 74 return NULL; 75 } 76 77 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) 78 { 79 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 80 struct bpf_prog_aux *aux; 81 struct bpf_prog *fp; 82 83 size = round_up(size, PAGE_SIZE); 84 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 85 if (fp == NULL) 86 return NULL; 87 88 kmemcheck_annotate_bitfield(fp, meta); 89 90 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); 91 if (aux == NULL) { 92 vfree(fp); 93 return NULL; 94 } 95 96 fp->pages = size / PAGE_SIZE; 97 fp->aux = aux; 98 fp->aux->prog = fp; 99 100 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); 101 102 return fp; 103 } 104 EXPORT_SYMBOL_GPL(bpf_prog_alloc); 105 106 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 107 gfp_t gfp_extra_flags) 108 { 109 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 110 struct bpf_prog *fp; 111 u32 pages, delta; 112 int ret; 113 114 BUG_ON(fp_old == NULL); 115 116 size = round_up(size, PAGE_SIZE); 117 pages = size / PAGE_SIZE; 118 if (pages <= fp_old->pages) 119 return fp_old; 120 121 delta = pages - fp_old->pages; 122 ret = __bpf_prog_charge(fp_old->aux->user, delta); 123 if (ret) 124 return NULL; 125 126 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 127 if (fp == NULL) { 128 __bpf_prog_uncharge(fp_old->aux->user, delta); 129 } else { 130 kmemcheck_annotate_bitfield(fp, meta); 131 132 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 133 fp->pages = pages; 134 fp->aux->prog = fp; 135 136 /* We keep fp->aux from fp_old around in the new 137 * reallocated structure. 138 */ 139 fp_old->aux = NULL; 140 __bpf_prog_free(fp_old); 141 } 142 143 return fp; 144 } 145 146 void __bpf_prog_free(struct bpf_prog *fp) 147 { 148 kfree(fp->aux); 149 vfree(fp); 150 } 151 152 int bpf_prog_calc_tag(struct bpf_prog *fp) 153 { 154 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 155 u32 raw_size = bpf_prog_tag_scratch_size(fp); 156 u32 digest[SHA_DIGEST_WORDS]; 157 u32 ws[SHA_WORKSPACE_WORDS]; 158 u32 i, bsize, psize, blocks; 159 struct bpf_insn *dst; 160 bool was_ld_map; 161 u8 *raw, *todo; 162 __be32 *result; 163 __be64 *bits; 164 165 raw = vmalloc(raw_size); 166 if (!raw) 167 return -ENOMEM; 168 169 sha_init(digest); 170 memset(ws, 0, sizeof(ws)); 171 172 /* We need to take out the map fd for the digest calculation 173 * since they are unstable from user space side. 174 */ 175 dst = (void *)raw; 176 for (i = 0, was_ld_map = false; i < fp->len; i++) { 177 dst[i] = fp->insnsi[i]; 178 if (!was_ld_map && 179 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && 180 dst[i].src_reg == BPF_PSEUDO_MAP_FD) { 181 was_ld_map = true; 182 dst[i].imm = 0; 183 } else if (was_ld_map && 184 dst[i].code == 0 && 185 dst[i].dst_reg == 0 && 186 dst[i].src_reg == 0 && 187 dst[i].off == 0) { 188 was_ld_map = false; 189 dst[i].imm = 0; 190 } else { 191 was_ld_map = false; 192 } 193 } 194 195 psize = bpf_prog_insn_size(fp); 196 memset(&raw[psize], 0, raw_size - psize); 197 raw[psize++] = 0x80; 198 199 bsize = round_up(psize, SHA_MESSAGE_BYTES); 200 blocks = bsize / SHA_MESSAGE_BYTES; 201 todo = raw; 202 if (bsize - psize >= sizeof(__be64)) { 203 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 204 } else { 205 bits = (__be64 *)(todo + bsize + bits_offset); 206 blocks++; 207 } 208 *bits = cpu_to_be64((psize - 1) << 3); 209 210 while (blocks--) { 211 sha_transform(digest, todo, ws); 212 todo += SHA_MESSAGE_BYTES; 213 } 214 215 result = (__force __be32 *)digest; 216 for (i = 0; i < SHA_DIGEST_WORDS; i++) 217 result[i] = cpu_to_be32(digest[i]); 218 memcpy(fp->tag, result, sizeof(fp->tag)); 219 220 vfree(raw); 221 return 0; 222 } 223 224 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) 225 { 226 return BPF_CLASS(insn->code) == BPF_JMP && 227 /* Call and Exit are both special jumps with no 228 * target inside the BPF instruction image. 229 */ 230 BPF_OP(insn->code) != BPF_CALL && 231 BPF_OP(insn->code) != BPF_EXIT; 232 } 233 234 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta) 235 { 236 struct bpf_insn *insn = prog->insnsi; 237 u32 i, insn_cnt = prog->len; 238 239 for (i = 0; i < insn_cnt; i++, insn++) { 240 if (!bpf_is_jmp_and_has_target(insn)) 241 continue; 242 243 /* Adjust offset of jmps if we cross boundaries. */ 244 if (i < pos && i + insn->off + 1 > pos) 245 insn->off += delta; 246 else if (i > pos + delta && i + insn->off + 1 <= pos + delta) 247 insn->off -= delta; 248 } 249 } 250 251 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 252 const struct bpf_insn *patch, u32 len) 253 { 254 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 255 struct bpf_prog *prog_adj; 256 257 /* Since our patchlet doesn't expand the image, we're done. */ 258 if (insn_delta == 0) { 259 memcpy(prog->insnsi + off, patch, sizeof(*patch)); 260 return prog; 261 } 262 263 insn_adj_cnt = prog->len + insn_delta; 264 265 /* Several new instructions need to be inserted. Make room 266 * for them. Likely, there's no need for a new allocation as 267 * last page could have large enough tailroom. 268 */ 269 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), 270 GFP_USER); 271 if (!prog_adj) 272 return NULL; 273 274 prog_adj->len = insn_adj_cnt; 275 276 /* Patching happens in 3 steps: 277 * 278 * 1) Move over tail of insnsi from next instruction onwards, 279 * so we can patch the single target insn with one or more 280 * new ones (patching is always from 1 to n insns, n > 0). 281 * 2) Inject new instructions at the target location. 282 * 3) Adjust branch offsets if necessary. 283 */ 284 insn_rest = insn_adj_cnt - off - len; 285 286 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, 287 sizeof(*patch) * insn_rest); 288 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 289 290 bpf_adj_branches(prog_adj, off, insn_delta); 291 292 return prog_adj; 293 } 294 295 #ifdef CONFIG_BPF_JIT 296 static __always_inline void 297 bpf_get_prog_addr_region(const struct bpf_prog *prog, 298 unsigned long *symbol_start, 299 unsigned long *symbol_end) 300 { 301 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); 302 unsigned long addr = (unsigned long)hdr; 303 304 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); 305 306 *symbol_start = addr; 307 *symbol_end = addr + hdr->pages * PAGE_SIZE; 308 } 309 310 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) 311 { 312 BUILD_BUG_ON(sizeof("bpf_prog_") + 313 sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN); 314 315 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); 316 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); 317 *sym = 0; 318 } 319 320 static __always_inline unsigned long 321 bpf_get_prog_addr_start(struct latch_tree_node *n) 322 { 323 unsigned long symbol_start, symbol_end; 324 const struct bpf_prog_aux *aux; 325 326 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 327 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 328 329 return symbol_start; 330 } 331 332 static __always_inline bool bpf_tree_less(struct latch_tree_node *a, 333 struct latch_tree_node *b) 334 { 335 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); 336 } 337 338 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) 339 { 340 unsigned long val = (unsigned long)key; 341 unsigned long symbol_start, symbol_end; 342 const struct bpf_prog_aux *aux; 343 344 aux = container_of(n, struct bpf_prog_aux, ksym_tnode); 345 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 346 347 if (val < symbol_start) 348 return -1; 349 if (val >= symbol_end) 350 return 1; 351 352 return 0; 353 } 354 355 static const struct latch_tree_ops bpf_tree_ops = { 356 .less = bpf_tree_less, 357 .comp = bpf_tree_comp, 358 }; 359 360 static DEFINE_SPINLOCK(bpf_lock); 361 static LIST_HEAD(bpf_kallsyms); 362 static struct latch_tree_root bpf_tree __cacheline_aligned; 363 364 int bpf_jit_kallsyms __read_mostly; 365 366 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) 367 { 368 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); 369 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); 370 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 371 } 372 373 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) 374 { 375 if (list_empty(&aux->ksym_lnode)) 376 return; 377 378 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); 379 list_del_rcu(&aux->ksym_lnode); 380 } 381 382 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) 383 { 384 return fp->jited && !bpf_prog_was_classic(fp); 385 } 386 387 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 388 { 389 return list_empty(&fp->aux->ksym_lnode) || 390 fp->aux->ksym_lnode.prev == LIST_POISON2; 391 } 392 393 void bpf_prog_kallsyms_add(struct bpf_prog *fp) 394 { 395 if (!bpf_prog_kallsyms_candidate(fp) || 396 !capable(CAP_SYS_ADMIN)) 397 return; 398 399 spin_lock_bh(&bpf_lock); 400 bpf_prog_ksym_node_add(fp->aux); 401 spin_unlock_bh(&bpf_lock); 402 } 403 404 void bpf_prog_kallsyms_del(struct bpf_prog *fp) 405 { 406 if (!bpf_prog_kallsyms_candidate(fp)) 407 return; 408 409 spin_lock_bh(&bpf_lock); 410 bpf_prog_ksym_node_del(fp->aux); 411 spin_unlock_bh(&bpf_lock); 412 } 413 414 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) 415 { 416 struct latch_tree_node *n; 417 418 if (!bpf_jit_kallsyms_enabled()) 419 return NULL; 420 421 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); 422 return n ? 423 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : 424 NULL; 425 } 426 427 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 428 unsigned long *off, char *sym) 429 { 430 unsigned long symbol_start, symbol_end; 431 struct bpf_prog *prog; 432 char *ret = NULL; 433 434 rcu_read_lock(); 435 prog = bpf_prog_kallsyms_find(addr); 436 if (prog) { 437 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); 438 bpf_get_prog_name(prog, sym); 439 440 ret = sym; 441 if (size) 442 *size = symbol_end - symbol_start; 443 if (off) 444 *off = addr - symbol_start; 445 } 446 rcu_read_unlock(); 447 448 return ret; 449 } 450 451 bool is_bpf_text_address(unsigned long addr) 452 { 453 bool ret; 454 455 rcu_read_lock(); 456 ret = bpf_prog_kallsyms_find(addr) != NULL; 457 rcu_read_unlock(); 458 459 return ret; 460 } 461 462 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 463 char *sym) 464 { 465 unsigned long symbol_start, symbol_end; 466 struct bpf_prog_aux *aux; 467 unsigned int it = 0; 468 int ret = -ERANGE; 469 470 if (!bpf_jit_kallsyms_enabled()) 471 return ret; 472 473 rcu_read_lock(); 474 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { 475 if (it++ != symnum) 476 continue; 477 478 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); 479 bpf_get_prog_name(aux->prog, sym); 480 481 *value = symbol_start; 482 *type = BPF_SYM_ELF_TYPE; 483 484 ret = 0; 485 break; 486 } 487 rcu_read_unlock(); 488 489 return ret; 490 } 491 492 struct bpf_binary_header * 493 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 494 unsigned int alignment, 495 bpf_jit_fill_hole_t bpf_fill_ill_insns) 496 { 497 struct bpf_binary_header *hdr; 498 unsigned int size, hole, start; 499 500 /* Most of BPF filters are really small, but if some of them 501 * fill a page, allow at least 128 extra bytes to insert a 502 * random section of illegal instructions. 503 */ 504 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 505 hdr = module_alloc(size); 506 if (hdr == NULL) 507 return NULL; 508 509 /* Fill space with illegal/arch-dep instructions. */ 510 bpf_fill_ill_insns(hdr, size); 511 512 hdr->pages = size / PAGE_SIZE; 513 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 514 PAGE_SIZE - sizeof(*hdr)); 515 start = (get_random_int() % hole) & ~(alignment - 1); 516 517 /* Leave a random number of instructions before BPF code. */ 518 *image_ptr = &hdr->image[start]; 519 520 return hdr; 521 } 522 523 void bpf_jit_binary_free(struct bpf_binary_header *hdr) 524 { 525 module_memfree(hdr); 526 } 527 528 /* This symbol is only overridden by archs that have different 529 * requirements than the usual eBPF JITs, f.e. when they only 530 * implement cBPF JIT, do not set images read-only, etc. 531 */ 532 void __weak bpf_jit_free(struct bpf_prog *fp) 533 { 534 if (fp->jited) { 535 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); 536 537 bpf_jit_binary_unlock_ro(hdr); 538 bpf_jit_binary_free(hdr); 539 540 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); 541 } 542 543 bpf_prog_unlock_free(fp); 544 } 545 546 int bpf_jit_harden __read_mostly; 547 548 static int bpf_jit_blind_insn(const struct bpf_insn *from, 549 const struct bpf_insn *aux, 550 struct bpf_insn *to_buff) 551 { 552 struct bpf_insn *to = to_buff; 553 u32 imm_rnd = get_random_int(); 554 s16 off; 555 556 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); 557 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); 558 559 if (from->imm == 0 && 560 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || 561 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { 562 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); 563 goto out; 564 } 565 566 switch (from->code) { 567 case BPF_ALU | BPF_ADD | BPF_K: 568 case BPF_ALU | BPF_SUB | BPF_K: 569 case BPF_ALU | BPF_AND | BPF_K: 570 case BPF_ALU | BPF_OR | BPF_K: 571 case BPF_ALU | BPF_XOR | BPF_K: 572 case BPF_ALU | BPF_MUL | BPF_K: 573 case BPF_ALU | BPF_MOV | BPF_K: 574 case BPF_ALU | BPF_DIV | BPF_K: 575 case BPF_ALU | BPF_MOD | BPF_K: 576 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 577 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 578 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); 579 break; 580 581 case BPF_ALU64 | BPF_ADD | BPF_K: 582 case BPF_ALU64 | BPF_SUB | BPF_K: 583 case BPF_ALU64 | BPF_AND | BPF_K: 584 case BPF_ALU64 | BPF_OR | BPF_K: 585 case BPF_ALU64 | BPF_XOR | BPF_K: 586 case BPF_ALU64 | BPF_MUL | BPF_K: 587 case BPF_ALU64 | BPF_MOV | BPF_K: 588 case BPF_ALU64 | BPF_DIV | BPF_K: 589 case BPF_ALU64 | BPF_MOD | BPF_K: 590 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 591 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 592 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); 593 break; 594 595 case BPF_JMP | BPF_JEQ | BPF_K: 596 case BPF_JMP | BPF_JNE | BPF_K: 597 case BPF_JMP | BPF_JGT | BPF_K: 598 case BPF_JMP | BPF_JGE | BPF_K: 599 case BPF_JMP | BPF_JSGT | BPF_K: 600 case BPF_JMP | BPF_JSGE | BPF_K: 601 case BPF_JMP | BPF_JSET | BPF_K: 602 /* Accommodate for extra offset in case of a backjump. */ 603 off = from->off; 604 if (off < 0) 605 off -= 2; 606 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 607 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 608 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); 609 break; 610 611 case BPF_LD | BPF_ABS | BPF_W: 612 case BPF_LD | BPF_ABS | BPF_H: 613 case BPF_LD | BPF_ABS | BPF_B: 614 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 615 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 616 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0); 617 break; 618 619 case BPF_LD | BPF_IND | BPF_W: 620 case BPF_LD | BPF_IND | BPF_H: 621 case BPF_LD | BPF_IND | BPF_B: 622 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 623 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 624 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg); 625 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0); 626 break; 627 628 case BPF_LD | BPF_IMM | BPF_DW: 629 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); 630 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 631 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 632 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); 633 break; 634 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 635 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 636 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 637 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 638 break; 639 640 case BPF_ST | BPF_MEM | BPF_DW: 641 case BPF_ST | BPF_MEM | BPF_W: 642 case BPF_ST | BPF_MEM | BPF_H: 643 case BPF_ST | BPF_MEM | BPF_B: 644 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); 645 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 646 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 647 break; 648 } 649 out: 650 return to - to_buff; 651 } 652 653 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, 654 gfp_t gfp_extra_flags) 655 { 656 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; 657 struct bpf_prog *fp; 658 659 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); 660 if (fp != NULL) { 661 kmemcheck_annotate_bitfield(fp, meta); 662 663 /* aux->prog still points to the fp_other one, so 664 * when promoting the clone to the real program, 665 * this still needs to be adapted. 666 */ 667 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); 668 } 669 670 return fp; 671 } 672 673 static void bpf_prog_clone_free(struct bpf_prog *fp) 674 { 675 /* aux was stolen by the other clone, so we cannot free 676 * it from this path! It will be freed eventually by the 677 * other program on release. 678 * 679 * At this point, we don't need a deferred release since 680 * clone is guaranteed to not be locked. 681 */ 682 fp->aux = NULL; 683 __bpf_prog_free(fp); 684 } 685 686 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) 687 { 688 /* We have to repoint aux->prog to self, as we don't 689 * know whether fp here is the clone or the original. 690 */ 691 fp->aux->prog = fp; 692 bpf_prog_clone_free(fp_other); 693 } 694 695 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) 696 { 697 struct bpf_insn insn_buff[16], aux[2]; 698 struct bpf_prog *clone, *tmp; 699 int insn_delta, insn_cnt; 700 struct bpf_insn *insn; 701 int i, rewritten; 702 703 if (!bpf_jit_blinding_enabled()) 704 return prog; 705 706 clone = bpf_prog_clone_create(prog, GFP_USER); 707 if (!clone) 708 return ERR_PTR(-ENOMEM); 709 710 insn_cnt = clone->len; 711 insn = clone->insnsi; 712 713 for (i = 0; i < insn_cnt; i++, insn++) { 714 /* We temporarily need to hold the original ld64 insn 715 * so that we can still access the first part in the 716 * second blinding run. 717 */ 718 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && 719 insn[1].code == 0) 720 memcpy(aux, insn, sizeof(aux)); 721 722 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 723 if (!rewritten) 724 continue; 725 726 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); 727 if (!tmp) { 728 /* Patching may have repointed aux->prog during 729 * realloc from the original one, so we need to 730 * fix it up here on error. 731 */ 732 bpf_jit_prog_release_other(prog, clone); 733 return ERR_PTR(-ENOMEM); 734 } 735 736 clone = tmp; 737 insn_delta = rewritten - 1; 738 739 /* Walk new program and skip insns we just inserted. */ 740 insn = clone->insnsi + i + insn_delta; 741 insn_cnt += insn_delta; 742 i += insn_delta; 743 } 744 745 return clone; 746 } 747 #endif /* CONFIG_BPF_JIT */ 748 749 /* Base function for offset calculation. Needs to go into .text section, 750 * therefore keeping it non-static as well; will also be used by JITs 751 * anyway later on, so do not let the compiler omit it. 752 */ 753 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 754 { 755 return 0; 756 } 757 EXPORT_SYMBOL_GPL(__bpf_call_base); 758 759 /** 760 * __bpf_prog_run - run eBPF program on a given context 761 * @ctx: is the data we are operating on 762 * @insn: is the array of eBPF instructions 763 * 764 * Decode and execute eBPF instructions. 765 */ 766 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) 767 { 768 u64 stack[MAX_BPF_STACK / sizeof(u64)]; 769 u64 regs[MAX_BPF_REG], tmp; 770 static const void *jumptable[256] = { 771 [0 ... 255] = &&default_label, 772 /* Now overwrite non-defaults ... */ 773 /* 32 bit ALU operations */ 774 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X, 775 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K, 776 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X, 777 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K, 778 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X, 779 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K, 780 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X, 781 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K, 782 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X, 783 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K, 784 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X, 785 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K, 786 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X, 787 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K, 788 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X, 789 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K, 790 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X, 791 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K, 792 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X, 793 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K, 794 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X, 795 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K, 796 [BPF_ALU | BPF_NEG] = &&ALU_NEG, 797 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE, 798 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE, 799 /* 64 bit ALU operations */ 800 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X, 801 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K, 802 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X, 803 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K, 804 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X, 805 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K, 806 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X, 807 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K, 808 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X, 809 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K, 810 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X, 811 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K, 812 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X, 813 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K, 814 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X, 815 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K, 816 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X, 817 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K, 818 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X, 819 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K, 820 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X, 821 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K, 822 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X, 823 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K, 824 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, 825 /* Call instruction */ 826 [BPF_JMP | BPF_CALL] = &&JMP_CALL, 827 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL, 828 /* Jumps */ 829 [BPF_JMP | BPF_JA] = &&JMP_JA, 830 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X, 831 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K, 832 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X, 833 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K, 834 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X, 835 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K, 836 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X, 837 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K, 838 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X, 839 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K, 840 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X, 841 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K, 842 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X, 843 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K, 844 /* Program return */ 845 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT, 846 /* Store instructions */ 847 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B, 848 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H, 849 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W, 850 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW, 851 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W, 852 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW, 853 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B, 854 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H, 855 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W, 856 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW, 857 /* Load instructions */ 858 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B, 859 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H, 860 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W, 861 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW, 862 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W, 863 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H, 864 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B, 865 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W, 866 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H, 867 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B, 868 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW, 869 }; 870 u32 tail_call_cnt = 0; 871 void *ptr; 872 int off; 873 874 #define CONT ({ insn++; goto select_insn; }) 875 #define CONT_JMP ({ insn++; goto select_insn; }) 876 877 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; 878 ARG1 = (u64) (unsigned long) ctx; 879 880 select_insn: 881 goto *jumptable[insn->code]; 882 883 /* ALU */ 884 #define ALU(OPCODE, OP) \ 885 ALU64_##OPCODE##_X: \ 886 DST = DST OP SRC; \ 887 CONT; \ 888 ALU_##OPCODE##_X: \ 889 DST = (u32) DST OP (u32) SRC; \ 890 CONT; \ 891 ALU64_##OPCODE##_K: \ 892 DST = DST OP IMM; \ 893 CONT; \ 894 ALU_##OPCODE##_K: \ 895 DST = (u32) DST OP (u32) IMM; \ 896 CONT; 897 898 ALU(ADD, +) 899 ALU(SUB, -) 900 ALU(AND, &) 901 ALU(OR, |) 902 ALU(LSH, <<) 903 ALU(RSH, >>) 904 ALU(XOR, ^) 905 ALU(MUL, *) 906 #undef ALU 907 ALU_NEG: 908 DST = (u32) -DST; 909 CONT; 910 ALU64_NEG: 911 DST = -DST; 912 CONT; 913 ALU_MOV_X: 914 DST = (u32) SRC; 915 CONT; 916 ALU_MOV_K: 917 DST = (u32) IMM; 918 CONT; 919 ALU64_MOV_X: 920 DST = SRC; 921 CONT; 922 ALU64_MOV_K: 923 DST = IMM; 924 CONT; 925 LD_IMM_DW: 926 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; 927 insn++; 928 CONT; 929 ALU64_ARSH_X: 930 (*(s64 *) &DST) >>= SRC; 931 CONT; 932 ALU64_ARSH_K: 933 (*(s64 *) &DST) >>= IMM; 934 CONT; 935 ALU64_MOD_X: 936 if (unlikely(SRC == 0)) 937 return 0; 938 div64_u64_rem(DST, SRC, &tmp); 939 DST = tmp; 940 CONT; 941 ALU_MOD_X: 942 if (unlikely(SRC == 0)) 943 return 0; 944 tmp = (u32) DST; 945 DST = do_div(tmp, (u32) SRC); 946 CONT; 947 ALU64_MOD_K: 948 div64_u64_rem(DST, IMM, &tmp); 949 DST = tmp; 950 CONT; 951 ALU_MOD_K: 952 tmp = (u32) DST; 953 DST = do_div(tmp, (u32) IMM); 954 CONT; 955 ALU64_DIV_X: 956 if (unlikely(SRC == 0)) 957 return 0; 958 DST = div64_u64(DST, SRC); 959 CONT; 960 ALU_DIV_X: 961 if (unlikely(SRC == 0)) 962 return 0; 963 tmp = (u32) DST; 964 do_div(tmp, (u32) SRC); 965 DST = (u32) tmp; 966 CONT; 967 ALU64_DIV_K: 968 DST = div64_u64(DST, IMM); 969 CONT; 970 ALU_DIV_K: 971 tmp = (u32) DST; 972 do_div(tmp, (u32) IMM); 973 DST = (u32) tmp; 974 CONT; 975 ALU_END_TO_BE: 976 switch (IMM) { 977 case 16: 978 DST = (__force u16) cpu_to_be16(DST); 979 break; 980 case 32: 981 DST = (__force u32) cpu_to_be32(DST); 982 break; 983 case 64: 984 DST = (__force u64) cpu_to_be64(DST); 985 break; 986 } 987 CONT; 988 ALU_END_TO_LE: 989 switch (IMM) { 990 case 16: 991 DST = (__force u16) cpu_to_le16(DST); 992 break; 993 case 32: 994 DST = (__force u32) cpu_to_le32(DST); 995 break; 996 case 64: 997 DST = (__force u64) cpu_to_le64(DST); 998 break; 999 } 1000 CONT; 1001 1002 /* CALL */ 1003 JMP_CALL: 1004 /* Function call scratches BPF_R1-BPF_R5 registers, 1005 * preserves BPF_R6-BPF_R9, and stores return value 1006 * into BPF_R0. 1007 */ 1008 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, 1009 BPF_R4, BPF_R5); 1010 CONT; 1011 1012 JMP_TAIL_CALL: { 1013 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1014 struct bpf_array *array = container_of(map, struct bpf_array, map); 1015 struct bpf_prog *prog; 1016 u64 index = BPF_R3; 1017 1018 if (unlikely(index >= array->map.max_entries)) 1019 goto out; 1020 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 1021 goto out; 1022 1023 tail_call_cnt++; 1024 1025 prog = READ_ONCE(array->ptrs[index]); 1026 if (!prog) 1027 goto out; 1028 1029 /* ARG1 at this point is guaranteed to point to CTX from 1030 * the verifier side due to the fact that the tail call is 1031 * handeled like a helper, that is, bpf_tail_call_proto, 1032 * where arg1_type is ARG_PTR_TO_CTX. 1033 */ 1034 insn = prog->insnsi; 1035 goto select_insn; 1036 out: 1037 CONT; 1038 } 1039 /* JMP */ 1040 JMP_JA: 1041 insn += insn->off; 1042 CONT; 1043 JMP_JEQ_X: 1044 if (DST == SRC) { 1045 insn += insn->off; 1046 CONT_JMP; 1047 } 1048 CONT; 1049 JMP_JEQ_K: 1050 if (DST == IMM) { 1051 insn += insn->off; 1052 CONT_JMP; 1053 } 1054 CONT; 1055 JMP_JNE_X: 1056 if (DST != SRC) { 1057 insn += insn->off; 1058 CONT_JMP; 1059 } 1060 CONT; 1061 JMP_JNE_K: 1062 if (DST != IMM) { 1063 insn += insn->off; 1064 CONT_JMP; 1065 } 1066 CONT; 1067 JMP_JGT_X: 1068 if (DST > SRC) { 1069 insn += insn->off; 1070 CONT_JMP; 1071 } 1072 CONT; 1073 JMP_JGT_K: 1074 if (DST > IMM) { 1075 insn += insn->off; 1076 CONT_JMP; 1077 } 1078 CONT; 1079 JMP_JGE_X: 1080 if (DST >= SRC) { 1081 insn += insn->off; 1082 CONT_JMP; 1083 } 1084 CONT; 1085 JMP_JGE_K: 1086 if (DST >= IMM) { 1087 insn += insn->off; 1088 CONT_JMP; 1089 } 1090 CONT; 1091 JMP_JSGT_X: 1092 if (((s64) DST) > ((s64) SRC)) { 1093 insn += insn->off; 1094 CONT_JMP; 1095 } 1096 CONT; 1097 JMP_JSGT_K: 1098 if (((s64) DST) > ((s64) IMM)) { 1099 insn += insn->off; 1100 CONT_JMP; 1101 } 1102 CONT; 1103 JMP_JSGE_X: 1104 if (((s64) DST) >= ((s64) SRC)) { 1105 insn += insn->off; 1106 CONT_JMP; 1107 } 1108 CONT; 1109 JMP_JSGE_K: 1110 if (((s64) DST) >= ((s64) IMM)) { 1111 insn += insn->off; 1112 CONT_JMP; 1113 } 1114 CONT; 1115 JMP_JSET_X: 1116 if (DST & SRC) { 1117 insn += insn->off; 1118 CONT_JMP; 1119 } 1120 CONT; 1121 JMP_JSET_K: 1122 if (DST & IMM) { 1123 insn += insn->off; 1124 CONT_JMP; 1125 } 1126 CONT; 1127 JMP_EXIT: 1128 return BPF_R0; 1129 1130 /* STX and ST and LDX*/ 1131 #define LDST(SIZEOP, SIZE) \ 1132 STX_MEM_##SIZEOP: \ 1133 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ 1134 CONT; \ 1135 ST_MEM_##SIZEOP: \ 1136 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ 1137 CONT; \ 1138 LDX_MEM_##SIZEOP: \ 1139 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ 1140 CONT; 1141 1142 LDST(B, u8) 1143 LDST(H, u16) 1144 LDST(W, u32) 1145 LDST(DW, u64) 1146 #undef LDST 1147 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1148 atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1149 (DST + insn->off)); 1150 CONT; 1151 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1152 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1153 (DST + insn->off)); 1154 CONT; 1155 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ 1156 off = IMM; 1157 load_word: 1158 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only 1159 * appearing in the programs where ctx == skb 1160 * (see may_access_skb() in the verifier). All programs 1161 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6, 1162 * bpf_convert_filter() saves it in BPF_R6, internal BPF 1163 * verifier will check that BPF_R6 == ctx. 1164 * 1165 * BPF_ABS and BPF_IND are wrappers of function calls, 1166 * so they scratch BPF_R1-BPF_R5 registers, preserve 1167 * BPF_R6-BPF_R9, and store return value into BPF_R0. 1168 * 1169 * Implicit input: 1170 * ctx == skb == BPF_R6 == CTX 1171 * 1172 * Explicit input: 1173 * SRC == any register 1174 * IMM == 32-bit immediate 1175 * 1176 * Output: 1177 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness 1178 */ 1179 1180 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp); 1181 if (likely(ptr != NULL)) { 1182 BPF_R0 = get_unaligned_be32(ptr); 1183 CONT; 1184 } 1185 1186 return 0; 1187 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ 1188 off = IMM; 1189 load_half: 1190 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp); 1191 if (likely(ptr != NULL)) { 1192 BPF_R0 = get_unaligned_be16(ptr); 1193 CONT; 1194 } 1195 1196 return 0; 1197 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ 1198 off = IMM; 1199 load_byte: 1200 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp); 1201 if (likely(ptr != NULL)) { 1202 BPF_R0 = *(u8 *)ptr; 1203 CONT; 1204 } 1205 1206 return 0; 1207 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */ 1208 off = IMM + SRC; 1209 goto load_word; 1210 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */ 1211 off = IMM + SRC; 1212 goto load_half; 1213 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */ 1214 off = IMM + SRC; 1215 goto load_byte; 1216 1217 default_label: 1218 /* If we ever reach this, we have a bug somewhere. */ 1219 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); 1220 return 0; 1221 } 1222 STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */ 1223 1224 bool bpf_prog_array_compatible(struct bpf_array *array, 1225 const struct bpf_prog *fp) 1226 { 1227 if (!array->owner_prog_type) { 1228 /* There's no owner yet where we could check for 1229 * compatibility. 1230 */ 1231 array->owner_prog_type = fp->type; 1232 array->owner_jited = fp->jited; 1233 1234 return true; 1235 } 1236 1237 return array->owner_prog_type == fp->type && 1238 array->owner_jited == fp->jited; 1239 } 1240 1241 static int bpf_check_tail_call(const struct bpf_prog *fp) 1242 { 1243 struct bpf_prog_aux *aux = fp->aux; 1244 int i; 1245 1246 for (i = 0; i < aux->used_map_cnt; i++) { 1247 struct bpf_map *map = aux->used_maps[i]; 1248 struct bpf_array *array; 1249 1250 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1251 continue; 1252 1253 array = container_of(map, struct bpf_array, map); 1254 if (!bpf_prog_array_compatible(array, fp)) 1255 return -EINVAL; 1256 } 1257 1258 return 0; 1259 } 1260 1261 /** 1262 * bpf_prog_select_runtime - select exec runtime for BPF program 1263 * @fp: bpf_prog populated with internal BPF program 1264 * @err: pointer to error variable 1265 * 1266 * Try to JIT eBPF program, if JIT is not available, use interpreter. 1267 * The BPF program will be executed via BPF_PROG_RUN() macro. 1268 */ 1269 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1270 { 1271 fp->bpf_func = (void *) __bpf_prog_run; 1272 1273 /* eBPF JITs can rewrite the program in case constant 1274 * blinding is active. However, in case of error during 1275 * blinding, bpf_int_jit_compile() must always return a 1276 * valid program, which in this case would simply not 1277 * be JITed, but falls back to the interpreter. 1278 */ 1279 fp = bpf_int_jit_compile(fp); 1280 bpf_prog_lock_ro(fp); 1281 1282 /* The tail call compatibility check can only be done at 1283 * this late stage as we need to determine, if we deal 1284 * with JITed or non JITed program concatenations and not 1285 * all eBPF JITs might immediately support all features. 1286 */ 1287 *err = bpf_check_tail_call(fp); 1288 1289 return fp; 1290 } 1291 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1292 1293 static void bpf_prog_free_deferred(struct work_struct *work) 1294 { 1295 struct bpf_prog_aux *aux; 1296 1297 aux = container_of(work, struct bpf_prog_aux, work); 1298 bpf_jit_free(aux->prog); 1299 } 1300 1301 /* Free internal BPF program */ 1302 void bpf_prog_free(struct bpf_prog *fp) 1303 { 1304 struct bpf_prog_aux *aux = fp->aux; 1305 1306 INIT_WORK(&aux->work, bpf_prog_free_deferred); 1307 schedule_work(&aux->work); 1308 } 1309 EXPORT_SYMBOL_GPL(bpf_prog_free); 1310 1311 /* RNG for unpriviledged user space with separated state from prandom_u32(). */ 1312 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state); 1313 1314 void bpf_user_rnd_init_once(void) 1315 { 1316 prandom_init_once(&bpf_user_rnd_state); 1317 } 1318 1319 BPF_CALL_0(bpf_user_rnd_u32) 1320 { 1321 /* Should someone ever have the rather unwise idea to use some 1322 * of the registers passed into this function, then note that 1323 * this function is called from native eBPF and classic-to-eBPF 1324 * transformations. Register assignments from both sides are 1325 * different, f.e. classic always sets fn(ctx, A, X) here. 1326 */ 1327 struct rnd_state *state; 1328 u32 res; 1329 1330 state = &get_cpu_var(bpf_user_rnd_state); 1331 res = prandom_u32_state(state); 1332 put_cpu_var(bpf_user_rnd_state); 1333 1334 return res; 1335 } 1336 1337 /* Weak definitions of helper functions in case we don't have bpf syscall. */ 1338 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 1339 const struct bpf_func_proto bpf_map_update_elem_proto __weak; 1340 const struct bpf_func_proto bpf_map_delete_elem_proto __weak; 1341 1342 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 1343 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 1344 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; 1345 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 1346 1347 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; 1348 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; 1349 const struct bpf_func_proto bpf_get_current_comm_proto __weak; 1350 1351 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) 1352 { 1353 return NULL; 1354 } 1355 1356 u64 __weak 1357 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1358 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 1359 { 1360 return -ENOTSUPP; 1361 } 1362 1363 /* Always built-in helper functions. */ 1364 const struct bpf_func_proto bpf_tail_call_proto = { 1365 .func = NULL, 1366 .gpl_only = false, 1367 .ret_type = RET_VOID, 1368 .arg1_type = ARG_PTR_TO_CTX, 1369 .arg2_type = ARG_CONST_MAP_PTR, 1370 .arg3_type = ARG_ANYTHING, 1371 }; 1372 1373 /* Stub for JITs that only support cBPF. eBPF programs are interpreted. 1374 * It is encouraged to implement bpf_int_jit_compile() instead, so that 1375 * eBPF and implicitly also cBPF can get JITed! 1376 */ 1377 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 1378 { 1379 return prog; 1380 } 1381 1382 /* Stub for JITs that support eBPF. All cBPF code gets transformed into 1383 * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). 1384 */ 1385 void __weak bpf_jit_compile(struct bpf_prog *prog) 1386 { 1387 } 1388 1389 bool __weak bpf_helper_changes_pkt_data(void *func) 1390 { 1391 return false; 1392 } 1393 1394 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 1395 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 1396 */ 1397 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, 1398 int len) 1399 { 1400 return -EFAULT; 1401 } 1402 1403 /* All definitions of tracepoints related to BPF. */ 1404 #define CREATE_TRACE_POINTS 1405 #include <linux/bpf_trace.h> 1406 1407 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 1408 1409 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type); 1410 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu); 1411