1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Linux Socket Filter Data Structures 4 */ 5 #ifndef __LINUX_FILTER_H__ 6 #define __LINUX_FILTER_H__ 7 8 #include <linux/atomic.h> 9 #include <linux/bpf.h> 10 #include <linux/refcount.h> 11 #include <linux/compat.h> 12 #include <linux/skbuff.h> 13 #include <linux/linkage.h> 14 #include <linux/printk.h> 15 #include <linux/workqueue.h> 16 #include <linux/sched.h> 17 #include <linux/sched/clock.h> 18 #include <linux/capability.h> 19 #include <linux/set_memory.h> 20 #include <linux/kallsyms.h> 21 #include <linux/if_vlan.h> 22 #include <linux/vmalloc.h> 23 #include <linux/sockptr.h> 24 #include <crypto/sha1.h> 25 #include <linux/u64_stats_sync.h> 26 27 #include <net/sch_generic.h> 28 29 #include <asm/byteorder.h> 30 #include <uapi/linux/filter.h> 31 32 struct sk_buff; 33 struct sock; 34 struct seccomp_data; 35 struct bpf_prog_aux; 36 struct xdp_rxq_info; 37 struct xdp_buff; 38 struct sock_reuseport; 39 struct ctl_table; 40 struct ctl_table_header; 41 42 /* ArgX, context and stack frame pointer register positions. Note, 43 * Arg1, Arg2, Arg3, etc are used as argument mappings of function 44 * calls in BPF_CALL instruction. 45 */ 46 #define BPF_REG_ARG1 BPF_REG_1 47 #define BPF_REG_ARG2 BPF_REG_2 48 #define BPF_REG_ARG3 BPF_REG_3 49 #define BPF_REG_ARG4 BPF_REG_4 50 #define BPF_REG_ARG5 BPF_REG_5 51 #define BPF_REG_CTX BPF_REG_6 52 #define BPF_REG_FP BPF_REG_10 53 54 /* Additional register mappings for converted user programs. */ 55 #define BPF_REG_A BPF_REG_0 56 #define BPF_REG_X BPF_REG_7 57 #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ 58 #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ 59 #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ 60 61 /* Kernel hidden auxiliary/helper register. */ 62 #define BPF_REG_AX MAX_BPF_REG 63 #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) 64 #define MAX_BPF_JIT_REG MAX_BPF_EXT_REG 65 66 /* unused opcode to mark special call to bpf_tail_call() helper */ 67 #define BPF_TAIL_CALL 0xf0 68 69 /* unused opcode to mark special load instruction. Same as BPF_ABS */ 70 #define BPF_PROBE_MEM 0x20 71 72 /* unused opcode to mark special ldsx instruction. Same as BPF_IND */ 73 #define BPF_PROBE_MEMSX 0x40 74 75 /* unused opcode to mark call to interpreter with arguments */ 76 #define BPF_CALL_ARGS 0xe0 77 78 /* unused opcode to mark speculation barrier for mitigating 79 * Speculative Store Bypass 80 */ 81 #define BPF_NOSPEC 0xc0 82 83 /* As per nm, we expose JITed images as text (code) section for 84 * kallsyms. That way, tools like perf can find it to match 85 * addresses. 86 */ 87 #define BPF_SYM_ELF_TYPE 't' 88 89 /* BPF program can access up to 512 bytes of stack space. */ 90 #define MAX_BPF_STACK 512 91 92 /* Helper macros for filter block array initializers. */ 93 94 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 95 96 #define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \ 97 ((struct bpf_insn) { \ 98 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ 99 .dst_reg = DST, \ 100 .src_reg = SRC, \ 101 .off = OFF, \ 102 .imm = 0 }) 103 104 #define BPF_ALU64_REG(OP, DST, SRC) \ 105 BPF_ALU64_REG_OFF(OP, DST, SRC, 0) 106 107 #define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \ 108 ((struct bpf_insn) { \ 109 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ 110 .dst_reg = DST, \ 111 .src_reg = SRC, \ 112 .off = OFF, \ 113 .imm = 0 }) 114 115 #define BPF_ALU32_REG(OP, DST, SRC) \ 116 BPF_ALU32_REG_OFF(OP, DST, SRC, 0) 117 118 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ 119 120 #define BPF_ALU64_IMM(OP, DST, IMM) \ 121 ((struct bpf_insn) { \ 122 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ 123 .dst_reg = DST, \ 124 .src_reg = 0, \ 125 .off = 0, \ 126 .imm = IMM }) 127 128 #define BPF_ALU32_IMM(OP, DST, IMM) \ 129 ((struct bpf_insn) { \ 130 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ 131 .dst_reg = DST, \ 132 .src_reg = 0, \ 133 .off = 0, \ 134 .imm = IMM }) 135 136 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ 137 138 #define BPF_ENDIAN(TYPE, DST, LEN) \ 139 ((struct bpf_insn) { \ 140 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ 141 .dst_reg = DST, \ 142 .src_reg = 0, \ 143 .off = 0, \ 144 .imm = LEN }) 145 146 /* Short form of mov, dst_reg = src_reg */ 147 148 #define BPF_MOV64_REG(DST, SRC) \ 149 ((struct bpf_insn) { \ 150 .code = BPF_ALU64 | BPF_MOV | BPF_X, \ 151 .dst_reg = DST, \ 152 .src_reg = SRC, \ 153 .off = 0, \ 154 .imm = 0 }) 155 156 #define BPF_MOV32_REG(DST, SRC) \ 157 ((struct bpf_insn) { \ 158 .code = BPF_ALU | BPF_MOV | BPF_X, \ 159 .dst_reg = DST, \ 160 .src_reg = SRC, \ 161 .off = 0, \ 162 .imm = 0 }) 163 164 /* Short form of mov, dst_reg = imm32 */ 165 166 #define BPF_MOV64_IMM(DST, IMM) \ 167 ((struct bpf_insn) { \ 168 .code = BPF_ALU64 | BPF_MOV | BPF_K, \ 169 .dst_reg = DST, \ 170 .src_reg = 0, \ 171 .off = 0, \ 172 .imm = IMM }) 173 174 #define BPF_MOV32_IMM(DST, IMM) \ 175 ((struct bpf_insn) { \ 176 .code = BPF_ALU | BPF_MOV | BPF_K, \ 177 .dst_reg = DST, \ 178 .src_reg = 0, \ 179 .off = 0, \ 180 .imm = IMM }) 181 182 /* Special form of mov32, used for doing explicit zero extension on dst. */ 183 #define BPF_ZEXT_REG(DST) \ 184 ((struct bpf_insn) { \ 185 .code = BPF_ALU | BPF_MOV | BPF_X, \ 186 .dst_reg = DST, \ 187 .src_reg = DST, \ 188 .off = 0, \ 189 .imm = 1 }) 190 191 static inline bool insn_is_zext(const struct bpf_insn *insn) 192 { 193 return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; 194 } 195 196 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ 197 #define BPF_LD_IMM64(DST, IMM) \ 198 BPF_LD_IMM64_RAW(DST, 0, IMM) 199 200 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ 201 ((struct bpf_insn) { \ 202 .code = BPF_LD | BPF_DW | BPF_IMM, \ 203 .dst_reg = DST, \ 204 .src_reg = SRC, \ 205 .off = 0, \ 206 .imm = (__u32) (IMM) }), \ 207 ((struct bpf_insn) { \ 208 .code = 0, /* zero is reserved opcode */ \ 209 .dst_reg = 0, \ 210 .src_reg = 0, \ 211 .off = 0, \ 212 .imm = ((__u64) (IMM)) >> 32 }) 213 214 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ 215 #define BPF_LD_MAP_FD(DST, MAP_FD) \ 216 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) 217 218 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 219 220 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 221 ((struct bpf_insn) { \ 222 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ 223 .dst_reg = DST, \ 224 .src_reg = SRC, \ 225 .off = 0, \ 226 .imm = IMM }) 227 228 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ 229 ((struct bpf_insn) { \ 230 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ 231 .dst_reg = DST, \ 232 .src_reg = SRC, \ 233 .off = 0, \ 234 .imm = IMM }) 235 236 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ 237 238 #define BPF_LD_ABS(SIZE, IMM) \ 239 ((struct bpf_insn) { \ 240 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ 241 .dst_reg = 0, \ 242 .src_reg = 0, \ 243 .off = 0, \ 244 .imm = IMM }) 245 246 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ 247 248 #define BPF_LD_IND(SIZE, SRC, IMM) \ 249 ((struct bpf_insn) { \ 250 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ 251 .dst_reg = 0, \ 252 .src_reg = SRC, \ 253 .off = 0, \ 254 .imm = IMM }) 255 256 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ 257 258 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ 259 ((struct bpf_insn) { \ 260 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ 261 .dst_reg = DST, \ 262 .src_reg = SRC, \ 263 .off = OFF, \ 264 .imm = 0 }) 265 266 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ 267 268 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ 269 ((struct bpf_insn) { \ 270 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ 271 .dst_reg = DST, \ 272 .src_reg = SRC, \ 273 .off = OFF, \ 274 .imm = 0 }) 275 276 277 /* 278 * Atomic operations: 279 * 280 * BPF_ADD *(uint *) (dst_reg + off16) += src_reg 281 * BPF_AND *(uint *) (dst_reg + off16) &= src_reg 282 * BPF_OR *(uint *) (dst_reg + off16) |= src_reg 283 * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg 284 * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg); 285 * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg); 286 * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg); 287 * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg); 288 * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg) 289 * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg) 290 */ 291 292 #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ 293 ((struct bpf_insn) { \ 294 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ 295 .dst_reg = DST, \ 296 .src_reg = SRC, \ 297 .off = OFF, \ 298 .imm = OP }) 299 300 /* Legacy alias */ 301 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) 302 303 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 304 305 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 306 ((struct bpf_insn) { \ 307 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ 308 .dst_reg = DST, \ 309 .src_reg = 0, \ 310 .off = OFF, \ 311 .imm = IMM }) 312 313 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ 314 315 #define BPF_JMP_REG(OP, DST, SRC, OFF) \ 316 ((struct bpf_insn) { \ 317 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ 318 .dst_reg = DST, \ 319 .src_reg = SRC, \ 320 .off = OFF, \ 321 .imm = 0 }) 322 323 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ 324 325 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ 326 ((struct bpf_insn) { \ 327 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ 328 .dst_reg = DST, \ 329 .src_reg = 0, \ 330 .off = OFF, \ 331 .imm = IMM }) 332 333 /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ 334 335 #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ 336 ((struct bpf_insn) { \ 337 .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ 338 .dst_reg = DST, \ 339 .src_reg = SRC, \ 340 .off = OFF, \ 341 .imm = 0 }) 342 343 /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ 344 345 #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ 346 ((struct bpf_insn) { \ 347 .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ 348 .dst_reg = DST, \ 349 .src_reg = 0, \ 350 .off = OFF, \ 351 .imm = IMM }) 352 353 /* Unconditional jumps, goto pc + off16 */ 354 355 #define BPF_JMP_A(OFF) \ 356 ((struct bpf_insn) { \ 357 .code = BPF_JMP | BPF_JA, \ 358 .dst_reg = 0, \ 359 .src_reg = 0, \ 360 .off = OFF, \ 361 .imm = 0 }) 362 363 /* Relative call */ 364 365 #define BPF_CALL_REL(TGT) \ 366 ((struct bpf_insn) { \ 367 .code = BPF_JMP | BPF_CALL, \ 368 .dst_reg = 0, \ 369 .src_reg = BPF_PSEUDO_CALL, \ 370 .off = 0, \ 371 .imm = TGT }) 372 373 /* Convert function address to BPF immediate */ 374 375 #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base) 376 377 #define BPF_EMIT_CALL(FUNC) \ 378 ((struct bpf_insn) { \ 379 .code = BPF_JMP | BPF_CALL, \ 380 .dst_reg = 0, \ 381 .src_reg = 0, \ 382 .off = 0, \ 383 .imm = BPF_CALL_IMM(FUNC) }) 384 385 /* Raw code statement block */ 386 387 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ 388 ((struct bpf_insn) { \ 389 .code = CODE, \ 390 .dst_reg = DST, \ 391 .src_reg = SRC, \ 392 .off = OFF, \ 393 .imm = IMM }) 394 395 /* Program exit */ 396 397 #define BPF_EXIT_INSN() \ 398 ((struct bpf_insn) { \ 399 .code = BPF_JMP | BPF_EXIT, \ 400 .dst_reg = 0, \ 401 .src_reg = 0, \ 402 .off = 0, \ 403 .imm = 0 }) 404 405 /* Speculation barrier */ 406 407 #define BPF_ST_NOSPEC() \ 408 ((struct bpf_insn) { \ 409 .code = BPF_ST | BPF_NOSPEC, \ 410 .dst_reg = 0, \ 411 .src_reg = 0, \ 412 .off = 0, \ 413 .imm = 0 }) 414 415 /* Internal classic blocks for direct assignment */ 416 417 #define __BPF_STMT(CODE, K) \ 418 ((struct sock_filter) BPF_STMT(CODE, K)) 419 420 #define __BPF_JUMP(CODE, K, JT, JF) \ 421 ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) 422 423 #define bytes_to_bpf_size(bytes) \ 424 ({ \ 425 int bpf_size = -EINVAL; \ 426 \ 427 if (bytes == sizeof(u8)) \ 428 bpf_size = BPF_B; \ 429 else if (bytes == sizeof(u16)) \ 430 bpf_size = BPF_H; \ 431 else if (bytes == sizeof(u32)) \ 432 bpf_size = BPF_W; \ 433 else if (bytes == sizeof(u64)) \ 434 bpf_size = BPF_DW; \ 435 \ 436 bpf_size; \ 437 }) 438 439 #define bpf_size_to_bytes(bpf_size) \ 440 ({ \ 441 int bytes = -EINVAL; \ 442 \ 443 if (bpf_size == BPF_B) \ 444 bytes = sizeof(u8); \ 445 else if (bpf_size == BPF_H) \ 446 bytes = sizeof(u16); \ 447 else if (bpf_size == BPF_W) \ 448 bytes = sizeof(u32); \ 449 else if (bpf_size == BPF_DW) \ 450 bytes = sizeof(u64); \ 451 \ 452 bytes; \ 453 }) 454 455 #define BPF_SIZEOF(type) \ 456 ({ \ 457 const int __size = bytes_to_bpf_size(sizeof(type)); \ 458 BUILD_BUG_ON(__size < 0); \ 459 __size; \ 460 }) 461 462 #define BPF_FIELD_SIZEOF(type, field) \ 463 ({ \ 464 const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \ 465 BUILD_BUG_ON(__size < 0); \ 466 __size; \ 467 }) 468 469 #define BPF_LDST_BYTES(insn) \ 470 ({ \ 471 const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ 472 WARN_ON(__size < 0); \ 473 __size; \ 474 }) 475 476 #define __BPF_MAP_0(m, v, ...) v 477 #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) 478 #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) 479 #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) 480 #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) 481 #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) 482 483 #define __BPF_REG_0(...) __BPF_PAD(5) 484 #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) 485 #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) 486 #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) 487 #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) 488 #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) 489 490 #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) 491 #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) 492 493 #define __BPF_CAST(t, a) \ 494 (__force t) \ 495 (__force \ 496 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ 497 (unsigned long)0, (t)0))) a 498 #define __BPF_V void 499 #define __BPF_N 500 501 #define __BPF_DECL_ARGS(t, a) t a 502 #define __BPF_DECL_REGS(t, a) u64 a 503 504 #define __BPF_PAD(n) \ 505 __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ 506 u64, __ur_3, u64, __ur_4, u64, __ur_5) 507 508 #define BPF_CALL_x(x, name, ...) \ 509 static __always_inline \ 510 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ 511 typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ 512 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ 513 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ 514 { \ 515 return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ 516 } \ 517 static __always_inline \ 518 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) 519 520 #define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) 521 #define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) 522 #define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) 523 #define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) 524 #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) 525 #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) 526 527 #define bpf_ctx_range(TYPE, MEMBER) \ 528 offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 529 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ 530 offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 531 #if BITS_PER_LONG == 64 532 # define bpf_ctx_range_ptr(TYPE, MEMBER) \ 533 offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 534 #else 535 # define bpf_ctx_range_ptr(TYPE, MEMBER) \ 536 offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 537 #endif /* BITS_PER_LONG == 64 */ 538 539 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ 540 ({ \ 541 BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \ 542 *(PTR_SIZE) = (SIZE); \ 543 offsetof(TYPE, MEMBER); \ 544 }) 545 546 /* A struct sock_filter is architecture independent. */ 547 struct compat_sock_fprog { 548 u16 len; 549 compat_uptr_t filter; /* struct sock_filter * */ 550 }; 551 552 struct sock_fprog_kern { 553 u16 len; 554 struct sock_filter *filter; 555 }; 556 557 /* Some arches need doubleword alignment for their instructions and/or data */ 558 #define BPF_IMAGE_ALIGNMENT 8 559 560 struct bpf_binary_header { 561 u32 size; 562 u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); 563 }; 564 565 struct bpf_prog_stats { 566 u64_stats_t cnt; 567 u64_stats_t nsecs; 568 u64_stats_t misses; 569 struct u64_stats_sync syncp; 570 } __aligned(2 * sizeof(u64)); 571 572 struct sk_filter { 573 refcount_t refcnt; 574 struct rcu_head rcu; 575 struct bpf_prog *prog; 576 }; 577 578 DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); 579 580 extern struct mutex nf_conn_btf_access_lock; 581 extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, 582 const struct bpf_reg_state *reg, 583 int off, int size); 584 585 typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, 586 const struct bpf_insn *insnsi, 587 unsigned int (*bpf_func)(const void *, 588 const struct bpf_insn *)); 589 590 static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, 591 const void *ctx, 592 bpf_dispatcher_fn dfunc) 593 { 594 u32 ret; 595 596 cant_migrate(); 597 if (static_branch_unlikely(&bpf_stats_enabled_key)) { 598 struct bpf_prog_stats *stats; 599 u64 start = sched_clock(); 600 unsigned long flags; 601 602 ret = dfunc(ctx, prog->insnsi, prog->bpf_func); 603 stats = this_cpu_ptr(prog->stats); 604 flags = u64_stats_update_begin_irqsave(&stats->syncp); 605 u64_stats_inc(&stats->cnt); 606 u64_stats_add(&stats->nsecs, sched_clock() - start); 607 u64_stats_update_end_irqrestore(&stats->syncp, flags); 608 } else { 609 ret = dfunc(ctx, prog->insnsi, prog->bpf_func); 610 } 611 return ret; 612 } 613 614 static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx) 615 { 616 return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func); 617 } 618 619 /* 620 * Use in preemptible and therefore migratable context to make sure that 621 * the execution of the BPF program runs on one CPU. 622 * 623 * This uses migrate_disable/enable() explicitly to document that the 624 * invocation of a BPF program does not require reentrancy protection 625 * against a BPF program which is invoked from a preempting task. 626 */ 627 static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, 628 const void *ctx) 629 { 630 u32 ret; 631 632 migrate_disable(); 633 ret = bpf_prog_run(prog, ctx); 634 migrate_enable(); 635 return ret; 636 } 637 638 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN 639 640 struct bpf_skb_data_end { 641 struct qdisc_skb_cb qdisc_cb; 642 void *data_meta; 643 void *data_end; 644 }; 645 646 struct bpf_nh_params { 647 u32 nh_family; 648 union { 649 u32 ipv4_nh; 650 struct in6_addr ipv6_nh; 651 }; 652 }; 653 654 struct bpf_redirect_info { 655 u64 tgt_index; 656 void *tgt_value; 657 struct bpf_map *map; 658 u32 flags; 659 u32 kern_flags; 660 u32 map_id; 661 enum bpf_map_type map_type; 662 struct bpf_nh_params nh; 663 }; 664 665 DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); 666 667 /* flags for bpf_redirect_info kern_flags */ 668 #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ 669 670 /* Compute the linear packet data range [data, data_end) which 671 * will be accessed by various program types (cls_bpf, act_bpf, 672 * lwt, ...). Subsystems allowing direct data access must (!) 673 * ensure that cb[] area can be written to when BPF program is 674 * invoked (otherwise cb[] save/restore is necessary). 675 */ 676 static inline void bpf_compute_data_pointers(struct sk_buff *skb) 677 { 678 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; 679 680 BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); 681 cb->data_meta = skb->data - skb_metadata_len(skb); 682 cb->data_end = skb->data + skb_headlen(skb); 683 } 684 685 /* Similar to bpf_compute_data_pointers(), except that save orginal 686 * data in cb->data and cb->meta_data for restore. 687 */ 688 static inline void bpf_compute_and_save_data_end( 689 struct sk_buff *skb, void **saved_data_end) 690 { 691 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; 692 693 *saved_data_end = cb->data_end; 694 cb->data_end = skb->data + skb_headlen(skb); 695 } 696 697 /* Restore data saved by bpf_compute_data_pointers(). */ 698 static inline void bpf_restore_data_end( 699 struct sk_buff *skb, void *saved_data_end) 700 { 701 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; 702 703 cb->data_end = saved_data_end; 704 } 705 706 static inline u8 *bpf_skb_cb(const struct sk_buff *skb) 707 { 708 /* eBPF programs may read/write skb->cb[] area to transfer meta 709 * data between tail calls. Since this also needs to work with 710 * tc, that scratch memory is mapped to qdisc_skb_cb's data area. 711 * 712 * In some socket filter cases, the cb unfortunately needs to be 713 * saved/restored so that protocol specific skb->cb[] data won't 714 * be lost. In any case, due to unpriviledged eBPF programs 715 * attached to sockets, we need to clear the bpf_skb_cb() area 716 * to not leak previous contents to user space. 717 */ 718 BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN); 719 BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != 720 sizeof_field(struct qdisc_skb_cb, data)); 721 722 return qdisc_skb_cb(skb)->data; 723 } 724 725 /* Must be invoked with migration disabled */ 726 static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, 727 const void *ctx) 728 { 729 const struct sk_buff *skb = ctx; 730 u8 *cb_data = bpf_skb_cb(skb); 731 u8 cb_saved[BPF_SKB_CB_LEN]; 732 u32 res; 733 734 if (unlikely(prog->cb_access)) { 735 memcpy(cb_saved, cb_data, sizeof(cb_saved)); 736 memset(cb_data, 0, sizeof(cb_saved)); 737 } 738 739 res = bpf_prog_run(prog, skb); 740 741 if (unlikely(prog->cb_access)) 742 memcpy(cb_data, cb_saved, sizeof(cb_saved)); 743 744 return res; 745 } 746 747 static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, 748 struct sk_buff *skb) 749 { 750 u32 res; 751 752 migrate_disable(); 753 res = __bpf_prog_run_save_cb(prog, skb); 754 migrate_enable(); 755 return res; 756 } 757 758 static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, 759 struct sk_buff *skb) 760 { 761 u8 *cb_data = bpf_skb_cb(skb); 762 u32 res; 763 764 if (unlikely(prog->cb_access)) 765 memset(cb_data, 0, BPF_SKB_CB_LEN); 766 767 res = bpf_prog_run_pin_on_cpu(prog, skb); 768 return res; 769 } 770 771 DECLARE_BPF_DISPATCHER(xdp) 772 773 DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); 774 775 u32 xdp_master_redirect(struct xdp_buff *xdp); 776 777 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); 778 779 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) 780 { 781 return prog->len * sizeof(struct bpf_insn); 782 } 783 784 static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) 785 { 786 return round_up(bpf_prog_insn_size(prog) + 787 sizeof(__be64) + 1, SHA1_BLOCK_SIZE); 788 } 789 790 static inline unsigned int bpf_prog_size(unsigned int proglen) 791 { 792 return max(sizeof(struct bpf_prog), 793 offsetof(struct bpf_prog, insns[proglen])); 794 } 795 796 static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) 797 { 798 /* When classic BPF programs have been loaded and the arch 799 * does not have a classic BPF JIT (anymore), they have been 800 * converted via bpf_migrate_filter() to eBPF and thus always 801 * have an unspec program type. 802 */ 803 return prog->type == BPF_PROG_TYPE_UNSPEC; 804 } 805 806 static inline u32 bpf_ctx_off_adjust_machine(u32 size) 807 { 808 const u32 size_machine = sizeof(unsigned long); 809 810 if (size > size_machine && size % size_machine == 0) 811 size = size_machine; 812 813 return size; 814 } 815 816 static inline bool 817 bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) 818 { 819 return size <= size_default && (size & (size - 1)) == 0; 820 } 821 822 static inline u8 823 bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) 824 { 825 u8 access_off = off & (size_default - 1); 826 827 #ifdef __LITTLE_ENDIAN 828 return access_off; 829 #else 830 return size_default - (access_off + size); 831 #endif 832 } 833 834 #define bpf_ctx_wide_access_ok(off, size, type, field) \ 835 (size == sizeof(__u64) && \ 836 off >= offsetof(type, field) && \ 837 off + sizeof(__u64) <= offsetofend(type, field) && \ 838 off % sizeof(__u64) == 0) 839 840 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 841 842 static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 843 { 844 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 845 if (!fp->jited) { 846 set_vm_flush_reset_perms(fp); 847 set_memory_ro((unsigned long)fp, fp->pages); 848 } 849 #endif 850 } 851 852 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) 853 { 854 set_vm_flush_reset_perms(hdr); 855 set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); 856 } 857 858 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); 859 static inline int sk_filter(struct sock *sk, struct sk_buff *skb) 860 { 861 return sk_filter_trim_cap(sk, skb, 1); 862 } 863 864 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); 865 void bpf_prog_free(struct bpf_prog *fp); 866 867 bool bpf_opcode_in_insntable(u8 code); 868 869 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, 870 const u32 *insn_to_jit_off); 871 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); 872 void bpf_prog_jit_attempt_done(struct bpf_prog *prog); 873 874 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); 875 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); 876 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 877 gfp_t gfp_extra_flags); 878 void __bpf_prog_free(struct bpf_prog *fp); 879 880 static inline void bpf_prog_unlock_free(struct bpf_prog *fp) 881 { 882 __bpf_prog_free(fp); 883 } 884 885 typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, 886 unsigned int flen); 887 888 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); 889 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, 890 bpf_aux_classic_check_t trans, bool save_orig); 891 void bpf_prog_destroy(struct bpf_prog *fp); 892 893 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 894 int sk_attach_bpf(u32 ufd, struct sock *sk); 895 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); 896 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); 897 void sk_reuseport_prog_free(struct bpf_prog *prog); 898 int sk_detach_filter(struct sock *sk); 899 int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len); 900 901 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); 902 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); 903 904 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 905 #define __bpf_call_base_args \ 906 ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ 907 (void *)__bpf_call_base) 908 909 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); 910 void bpf_jit_compile(struct bpf_prog *prog); 911 bool bpf_jit_needs_zext(void); 912 bool bpf_jit_supports_subprog_tailcalls(void); 913 bool bpf_jit_supports_kfunc_call(void); 914 bool bpf_jit_supports_far_kfunc_call(void); 915 bool bpf_helper_changes_pkt_data(void *func); 916 917 static inline bool bpf_dump_raw_ok(const struct cred *cred) 918 { 919 /* Reconstruction of call-sites is dependent on kallsyms, 920 * thus make dump the same restriction. 921 */ 922 return kallsyms_show_value(cred); 923 } 924 925 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 926 const struct bpf_insn *patch, u32 len); 927 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); 928 929 void bpf_clear_redirect_map(struct bpf_map *map); 930 931 static inline bool xdp_return_frame_no_direct(void) 932 { 933 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 934 935 return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; 936 } 937 938 static inline void xdp_set_return_frame_no_direct(void) 939 { 940 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 941 942 ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; 943 } 944 945 static inline void xdp_clear_return_frame_no_direct(void) 946 { 947 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 948 949 ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; 950 } 951 952 static inline int xdp_ok_fwd_dev(const struct net_device *fwd, 953 unsigned int pktlen) 954 { 955 unsigned int len; 956 957 if (unlikely(!(fwd->flags & IFF_UP))) 958 return -ENETDOWN; 959 960 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; 961 if (pktlen > len) 962 return -EMSGSIZE; 963 964 return 0; 965 } 966 967 /* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the 968 * same cpu context. Further for best results no more than a single map 969 * for the do_redirect/do_flush pair should be used. This limitation is 970 * because we only track one map and force a flush when the map changes. 971 * This does not appear to be a real limitation for existing software. 972 */ 973 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, 974 struct xdp_buff *xdp, struct bpf_prog *prog); 975 int xdp_do_redirect(struct net_device *dev, 976 struct xdp_buff *xdp, 977 struct bpf_prog *prog); 978 int xdp_do_redirect_frame(struct net_device *dev, 979 struct xdp_buff *xdp, 980 struct xdp_frame *xdpf, 981 struct bpf_prog *prog); 982 void xdp_do_flush(void); 983 984 /* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as 985 * it is no longer only flushing maps. Keep this define for compatibility 986 * until all drivers are updated - do not use xdp_do_flush_map() in new code! 987 */ 988 #define xdp_do_flush_map xdp_do_flush 989 990 void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act); 991 992 #ifdef CONFIG_INET 993 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, 994 struct bpf_prog *prog, struct sk_buff *skb, 995 struct sock *migrating_sk, 996 u32 hash); 997 #else 998 static inline struct sock * 999 bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, 1000 struct bpf_prog *prog, struct sk_buff *skb, 1001 struct sock *migrating_sk, 1002 u32 hash) 1003 { 1004 return NULL; 1005 } 1006 #endif 1007 1008 #ifdef CONFIG_BPF_JIT 1009 extern int bpf_jit_enable; 1010 extern int bpf_jit_harden; 1011 extern int bpf_jit_kallsyms; 1012 extern long bpf_jit_limit; 1013 extern long bpf_jit_limit_max; 1014 1015 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 1016 1017 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size); 1018 1019 struct bpf_binary_header * 1020 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 1021 unsigned int alignment, 1022 bpf_jit_fill_hole_t bpf_fill_ill_insns); 1023 void bpf_jit_binary_free(struct bpf_binary_header *hdr); 1024 u64 bpf_jit_alloc_exec_limit(void); 1025 void *bpf_jit_alloc_exec(unsigned long size); 1026 void bpf_jit_free_exec(void *addr); 1027 void bpf_jit_free(struct bpf_prog *fp); 1028 struct bpf_binary_header * 1029 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp); 1030 1031 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns); 1032 void bpf_prog_pack_free(struct bpf_binary_header *hdr); 1033 1034 static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) 1035 { 1036 return list_empty(&fp->aux->ksym.lnode) || 1037 fp->aux->ksym.lnode.prev == LIST_POISON2; 1038 } 1039 1040 struct bpf_binary_header * 1041 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, 1042 unsigned int alignment, 1043 struct bpf_binary_header **rw_hdr, 1044 u8 **rw_image, 1045 bpf_jit_fill_hole_t bpf_fill_ill_insns); 1046 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, 1047 struct bpf_binary_header *ro_header, 1048 struct bpf_binary_header *rw_header); 1049 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, 1050 struct bpf_binary_header *rw_header); 1051 1052 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 1053 struct bpf_jit_poke_descriptor *poke); 1054 1055 int bpf_jit_get_func_addr(const struct bpf_prog *prog, 1056 const struct bpf_insn *insn, bool extra_pass, 1057 u64 *func_addr, bool *func_addr_fixed); 1058 1059 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); 1060 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); 1061 1062 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, 1063 u32 pass, void *image) 1064 { 1065 pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, 1066 proglen, pass, image, current->comm, task_pid_nr(current)); 1067 1068 if (image) 1069 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 1070 16, 1, image, proglen, false); 1071 } 1072 1073 static inline bool bpf_jit_is_ebpf(void) 1074 { 1075 # ifdef CONFIG_HAVE_EBPF_JIT 1076 return true; 1077 # else 1078 return false; 1079 # endif 1080 } 1081 1082 static inline bool ebpf_jit_enabled(void) 1083 { 1084 return bpf_jit_enable && bpf_jit_is_ebpf(); 1085 } 1086 1087 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) 1088 { 1089 return fp->jited && bpf_jit_is_ebpf(); 1090 } 1091 1092 static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) 1093 { 1094 /* These are the prerequisites, should someone ever have the 1095 * idea to call blinding outside of them, we make sure to 1096 * bail out. 1097 */ 1098 if (!bpf_jit_is_ebpf()) 1099 return false; 1100 if (!prog->jit_requested) 1101 return false; 1102 if (!bpf_jit_harden) 1103 return false; 1104 if (bpf_jit_harden == 1 && bpf_capable()) 1105 return false; 1106 1107 return true; 1108 } 1109 1110 static inline bool bpf_jit_kallsyms_enabled(void) 1111 { 1112 /* There are a couple of corner cases where kallsyms should 1113 * not be enabled f.e. on hardening. 1114 */ 1115 if (bpf_jit_harden) 1116 return false; 1117 if (!bpf_jit_kallsyms) 1118 return false; 1119 if (bpf_jit_kallsyms == 1) 1120 return true; 1121 1122 return false; 1123 } 1124 1125 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, 1126 unsigned long *off, char *sym); 1127 bool is_bpf_text_address(unsigned long addr); 1128 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 1129 char *sym); 1130 1131 static inline const char * 1132 bpf_address_lookup(unsigned long addr, unsigned long *size, 1133 unsigned long *off, char **modname, char *sym) 1134 { 1135 const char *ret = __bpf_address_lookup(addr, size, off, sym); 1136 1137 if (ret && modname) 1138 *modname = NULL; 1139 return ret; 1140 } 1141 1142 void bpf_prog_kallsyms_add(struct bpf_prog *fp); 1143 void bpf_prog_kallsyms_del(struct bpf_prog *fp); 1144 1145 #else /* CONFIG_BPF_JIT */ 1146 1147 static inline bool ebpf_jit_enabled(void) 1148 { 1149 return false; 1150 } 1151 1152 static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) 1153 { 1154 return false; 1155 } 1156 1157 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) 1158 { 1159 return false; 1160 } 1161 1162 static inline int 1163 bpf_jit_add_poke_descriptor(struct bpf_prog *prog, 1164 struct bpf_jit_poke_descriptor *poke) 1165 { 1166 return -ENOTSUPP; 1167 } 1168 1169 static inline void bpf_jit_free(struct bpf_prog *fp) 1170 { 1171 bpf_prog_unlock_free(fp); 1172 } 1173 1174 static inline bool bpf_jit_kallsyms_enabled(void) 1175 { 1176 return false; 1177 } 1178 1179 static inline const char * 1180 __bpf_address_lookup(unsigned long addr, unsigned long *size, 1181 unsigned long *off, char *sym) 1182 { 1183 return NULL; 1184 } 1185 1186 static inline bool is_bpf_text_address(unsigned long addr) 1187 { 1188 return false; 1189 } 1190 1191 static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, 1192 char *type, char *sym) 1193 { 1194 return -ERANGE; 1195 } 1196 1197 static inline const char * 1198 bpf_address_lookup(unsigned long addr, unsigned long *size, 1199 unsigned long *off, char **modname, char *sym) 1200 { 1201 return NULL; 1202 } 1203 1204 static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) 1205 { 1206 } 1207 1208 static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) 1209 { 1210 } 1211 1212 #endif /* CONFIG_BPF_JIT */ 1213 1214 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); 1215 1216 #define BPF_ANC BIT(15) 1217 1218 static inline bool bpf_needs_clear_a(const struct sock_filter *first) 1219 { 1220 switch (first->code) { 1221 case BPF_RET | BPF_K: 1222 case BPF_LD | BPF_W | BPF_LEN: 1223 return false; 1224 1225 case BPF_LD | BPF_W | BPF_ABS: 1226 case BPF_LD | BPF_H | BPF_ABS: 1227 case BPF_LD | BPF_B | BPF_ABS: 1228 if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) 1229 return true; 1230 return false; 1231 1232 default: 1233 return true; 1234 } 1235 } 1236 1237 static inline u16 bpf_anc_helper(const struct sock_filter *ftest) 1238 { 1239 BUG_ON(ftest->code & BPF_ANC); 1240 1241 switch (ftest->code) { 1242 case BPF_LD | BPF_W | BPF_ABS: 1243 case BPF_LD | BPF_H | BPF_ABS: 1244 case BPF_LD | BPF_B | BPF_ABS: 1245 #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ 1246 return BPF_ANC | SKF_AD_##CODE 1247 switch (ftest->k) { 1248 BPF_ANCILLARY(PROTOCOL); 1249 BPF_ANCILLARY(PKTTYPE); 1250 BPF_ANCILLARY(IFINDEX); 1251 BPF_ANCILLARY(NLATTR); 1252 BPF_ANCILLARY(NLATTR_NEST); 1253 BPF_ANCILLARY(MARK); 1254 BPF_ANCILLARY(QUEUE); 1255 BPF_ANCILLARY(HATYPE); 1256 BPF_ANCILLARY(RXHASH); 1257 BPF_ANCILLARY(CPU); 1258 BPF_ANCILLARY(ALU_XOR_X); 1259 BPF_ANCILLARY(VLAN_TAG); 1260 BPF_ANCILLARY(VLAN_TAG_PRESENT); 1261 BPF_ANCILLARY(PAY_OFFSET); 1262 BPF_ANCILLARY(RANDOM); 1263 BPF_ANCILLARY(VLAN_TPID); 1264 } 1265 fallthrough; 1266 default: 1267 return ftest->code; 1268 } 1269 } 1270 1271 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, 1272 int k, unsigned int size); 1273 1274 static inline int bpf_tell_extensions(void) 1275 { 1276 return SKF_AD_MAX; 1277 } 1278 1279 struct bpf_sock_addr_kern { 1280 struct sock *sk; 1281 struct sockaddr *uaddr; 1282 /* Temporary "register" to make indirect stores to nested structures 1283 * defined above. We need three registers to make such a store, but 1284 * only two (src and dst) are available at convert_ctx_access time 1285 */ 1286 u64 tmp_reg; 1287 void *t_ctx; /* Attach type specific context. */ 1288 }; 1289 1290 struct bpf_sock_ops_kern { 1291 struct sock *sk; 1292 union { 1293 u32 args[4]; 1294 u32 reply; 1295 u32 replylong[4]; 1296 }; 1297 struct sk_buff *syn_skb; 1298 struct sk_buff *skb; 1299 void *skb_data_end; 1300 u8 op; 1301 u8 is_fullsock; 1302 u8 remaining_opt_len; 1303 u64 temp; /* temp and everything after is not 1304 * initialized to 0 before calling 1305 * the BPF program. New fields that 1306 * should be initialized to 0 should 1307 * be inserted before temp. 1308 * temp is scratch storage used by 1309 * sock_ops_convert_ctx_access 1310 * as temporary storage of a register. 1311 */ 1312 }; 1313 1314 struct bpf_sysctl_kern { 1315 struct ctl_table_header *head; 1316 struct ctl_table *table; 1317 void *cur_val; 1318 size_t cur_len; 1319 void *new_val; 1320 size_t new_len; 1321 int new_updated; 1322 int write; 1323 loff_t *ppos; 1324 /* Temporary "register" for indirect stores to ppos. */ 1325 u64 tmp_reg; 1326 }; 1327 1328 #define BPF_SOCKOPT_KERN_BUF_SIZE 32 1329 struct bpf_sockopt_buf { 1330 u8 data[BPF_SOCKOPT_KERN_BUF_SIZE]; 1331 }; 1332 1333 struct bpf_sockopt_kern { 1334 struct sock *sk; 1335 u8 *optval; 1336 u8 *optval_end; 1337 s32 level; 1338 s32 optname; 1339 s32 optlen; 1340 /* for retval in struct bpf_cg_run_ctx */ 1341 struct task_struct *current_task; 1342 /* Temporary "register" for indirect stores to ppos. */ 1343 u64 tmp_reg; 1344 }; 1345 1346 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); 1347 1348 struct bpf_sk_lookup_kern { 1349 u16 family; 1350 u16 protocol; 1351 __be16 sport; 1352 u16 dport; 1353 struct { 1354 __be32 saddr; 1355 __be32 daddr; 1356 } v4; 1357 struct { 1358 const struct in6_addr *saddr; 1359 const struct in6_addr *daddr; 1360 } v6; 1361 struct sock *selected_sk; 1362 u32 ingress_ifindex; 1363 bool no_reuseport; 1364 }; 1365 1366 extern struct static_key_false bpf_sk_lookup_enabled; 1367 1368 /* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. 1369 * 1370 * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and 1371 * SK_DROP. Their meaning is as follows: 1372 * 1373 * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result 1374 * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup 1375 * SK_DROP : terminate lookup with -ECONNREFUSED 1376 * 1377 * This macro aggregates return values and selected sockets from 1378 * multiple BPF programs according to following rules in order: 1379 * 1380 * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, 1381 * macro result is SK_PASS and last ctx.selected_sk is used. 1382 * 2. If any program returned SK_DROP return value, 1383 * macro result is SK_DROP. 1384 * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. 1385 * 1386 * Caller must ensure that the prog array is non-NULL, and that the 1387 * array as well as the programs it contains remain valid. 1388 */ 1389 #define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ 1390 ({ \ 1391 struct bpf_sk_lookup_kern *_ctx = &(ctx); \ 1392 struct bpf_prog_array_item *_item; \ 1393 struct sock *_selected_sk = NULL; \ 1394 bool _no_reuseport = false; \ 1395 struct bpf_prog *_prog; \ 1396 bool _all_pass = true; \ 1397 u32 _ret; \ 1398 \ 1399 migrate_disable(); \ 1400 _item = &(array)->items[0]; \ 1401 while ((_prog = READ_ONCE(_item->prog))) { \ 1402 /* restore most recent selection */ \ 1403 _ctx->selected_sk = _selected_sk; \ 1404 _ctx->no_reuseport = _no_reuseport; \ 1405 \ 1406 _ret = func(_prog, _ctx); \ 1407 if (_ret == SK_PASS && _ctx->selected_sk) { \ 1408 /* remember last non-NULL socket */ \ 1409 _selected_sk = _ctx->selected_sk; \ 1410 _no_reuseport = _ctx->no_reuseport; \ 1411 } else if (_ret == SK_DROP && _all_pass) { \ 1412 _all_pass = false; \ 1413 } \ 1414 _item++; \ 1415 } \ 1416 _ctx->selected_sk = _selected_sk; \ 1417 _ctx->no_reuseport = _no_reuseport; \ 1418 migrate_enable(); \ 1419 _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ 1420 }) 1421 1422 static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, 1423 const __be32 saddr, const __be16 sport, 1424 const __be32 daddr, const u16 dport, 1425 const int ifindex, struct sock **psk) 1426 { 1427 struct bpf_prog_array *run_array; 1428 struct sock *selected_sk = NULL; 1429 bool no_reuseport = false; 1430 1431 rcu_read_lock(); 1432 run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); 1433 if (run_array) { 1434 struct bpf_sk_lookup_kern ctx = { 1435 .family = AF_INET, 1436 .protocol = protocol, 1437 .v4.saddr = saddr, 1438 .v4.daddr = daddr, 1439 .sport = sport, 1440 .dport = dport, 1441 .ingress_ifindex = ifindex, 1442 }; 1443 u32 act; 1444 1445 act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); 1446 if (act == SK_PASS) { 1447 selected_sk = ctx.selected_sk; 1448 no_reuseport = ctx.no_reuseport; 1449 } else { 1450 selected_sk = ERR_PTR(-ECONNREFUSED); 1451 } 1452 } 1453 rcu_read_unlock(); 1454 *psk = selected_sk; 1455 return no_reuseport; 1456 } 1457 1458 #if IS_ENABLED(CONFIG_IPV6) 1459 static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, 1460 const struct in6_addr *saddr, 1461 const __be16 sport, 1462 const struct in6_addr *daddr, 1463 const u16 dport, 1464 const int ifindex, struct sock **psk) 1465 { 1466 struct bpf_prog_array *run_array; 1467 struct sock *selected_sk = NULL; 1468 bool no_reuseport = false; 1469 1470 rcu_read_lock(); 1471 run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); 1472 if (run_array) { 1473 struct bpf_sk_lookup_kern ctx = { 1474 .family = AF_INET6, 1475 .protocol = protocol, 1476 .v6.saddr = saddr, 1477 .v6.daddr = daddr, 1478 .sport = sport, 1479 .dport = dport, 1480 .ingress_ifindex = ifindex, 1481 }; 1482 u32 act; 1483 1484 act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); 1485 if (act == SK_PASS) { 1486 selected_sk = ctx.selected_sk; 1487 no_reuseport = ctx.no_reuseport; 1488 } else { 1489 selected_sk = ERR_PTR(-ECONNREFUSED); 1490 } 1491 } 1492 rcu_read_unlock(); 1493 *psk = selected_sk; 1494 return no_reuseport; 1495 } 1496 #endif /* IS_ENABLED(CONFIG_IPV6) */ 1497 1498 static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index, 1499 u64 flags, const u64 flag_mask, 1500 void *lookup_elem(struct bpf_map *map, u32 key)) 1501 { 1502 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 1503 const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; 1504 1505 /* Lower bits of the flags are used as return code on lookup failure */ 1506 if (unlikely(flags & ~(action_mask | flag_mask))) 1507 return XDP_ABORTED; 1508 1509 ri->tgt_value = lookup_elem(map, index); 1510 if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) { 1511 /* If the lookup fails we want to clear out the state in the 1512 * redirect_info struct completely, so that if an eBPF program 1513 * performs multiple lookups, the last one always takes 1514 * precedence. 1515 */ 1516 ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */ 1517 ri->map_type = BPF_MAP_TYPE_UNSPEC; 1518 return flags & action_mask; 1519 } 1520 1521 ri->tgt_index = index; 1522 ri->map_id = map->id; 1523 ri->map_type = map->map_type; 1524 1525 if (flags & BPF_F_BROADCAST) { 1526 WRITE_ONCE(ri->map, map); 1527 ri->flags = flags; 1528 } else { 1529 WRITE_ONCE(ri->map, NULL); 1530 ri->flags = 0; 1531 } 1532 1533 return XDP_REDIRECT; 1534 } 1535 1536 #ifdef CONFIG_NET 1537 int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len); 1538 int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, 1539 u32 len, u64 flags); 1540 int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); 1541 int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); 1542 void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len); 1543 void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, 1544 void *buf, unsigned long len, bool flush); 1545 #else /* CONFIG_NET */ 1546 static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, 1547 void *to, u32 len) 1548 { 1549 return -EOPNOTSUPP; 1550 } 1551 1552 static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, 1553 const void *from, u32 len, u64 flags) 1554 { 1555 return -EOPNOTSUPP; 1556 } 1557 1558 static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, 1559 void *buf, u32 len) 1560 { 1561 return -EOPNOTSUPP; 1562 } 1563 1564 static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, 1565 void *buf, u32 len) 1566 { 1567 return -EOPNOTSUPP; 1568 } 1569 1570 static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) 1571 { 1572 return NULL; 1573 } 1574 1575 static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, 1576 unsigned long len, bool flush) 1577 { 1578 } 1579 #endif /* CONFIG_NET */ 1580 1581 #endif /* __LINUX_FILTER_H__ */ 1582