1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <linux/module.h> 21 #include <linux/types.h> 22 #include <linux/mm.h> 23 #include <linux/fcntl.h> 24 #include <linux/socket.h> 25 #include <linux/sock_diag.h> 26 #include <linux/in.h> 27 #include <linux/inet.h> 28 #include <linux/netdevice.h> 29 #include <linux/if_packet.h> 30 #include <linux/if_arp.h> 31 #include <linux/gfp.h> 32 #include <net/inet_common.h> 33 #include <net/ip.h> 34 #include <net/protocol.h> 35 #include <net/netlink.h> 36 #include <linux/skbuff.h> 37 #include <linux/skmsg.h> 38 #include <net/sock.h> 39 #include <net/flow_dissector.h> 40 #include <linux/errno.h> 41 #include <linux/timer.h> 42 #include <linux/uaccess.h> 43 #include <asm/unaligned.h> 44 #include <asm/cmpxchg.h> 45 #include <linux/filter.h> 46 #include <linux/ratelimit.h> 47 #include <linux/seccomp.h> 48 #include <linux/if_vlan.h> 49 #include <linux/bpf.h> 50 #include <linux/btf.h> 51 #include <net/sch_generic.h> 52 #include <net/cls_cgroup.h> 53 #include <net/dst_metadata.h> 54 #include <net/dst.h> 55 #include <net/sock_reuseport.h> 56 #include <net/busy_poll.h> 57 #include <net/tcp.h> 58 #include <net/xfrm.h> 59 #include <net/udp.h> 60 #include <linux/bpf_trace.h> 61 #include <net/xdp_sock.h> 62 #include <linux/inetdevice.h> 63 #include <net/inet_hashtables.h> 64 #include <net/inet6_hashtables.h> 65 #include <net/ip_fib.h> 66 #include <net/nexthop.h> 67 #include <net/flow.h> 68 #include <net/arp.h> 69 #include <net/ipv6.h> 70 #include <net/net_namespace.h> 71 #include <linux/seg6_local.h> 72 #include <net/seg6.h> 73 #include <net/seg6_local.h> 74 #include <net/lwtunnel.h> 75 #include <net/ipv6_stubs.h> 76 #include <net/bpf_sk_storage.h> 77 #include <net/transp_v6.h> 78 #include <linux/btf_ids.h> 79 #include <net/tls.h> 80 81 static const struct bpf_func_proto * 82 bpf_sk_base_func_proto(enum bpf_func_id func_id); 83 84 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) 85 { 86 if (in_compat_syscall()) { 87 struct compat_sock_fprog f32; 88 89 if (len != sizeof(f32)) 90 return -EINVAL; 91 if (copy_from_sockptr(&f32, src, sizeof(f32))) 92 return -EFAULT; 93 memset(dst, 0, sizeof(*dst)); 94 dst->len = f32.len; 95 dst->filter = compat_ptr(f32.filter); 96 } else { 97 if (len != sizeof(*dst)) 98 return -EINVAL; 99 if (copy_from_sockptr(dst, src, sizeof(*dst))) 100 return -EFAULT; 101 } 102 103 return 0; 104 } 105 EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user); 106 107 /** 108 * sk_filter_trim_cap - run a packet through a socket filter 109 * @sk: sock associated with &sk_buff 110 * @skb: buffer to filter 111 * @cap: limit on how short the eBPF program may trim the packet 112 * 113 * Run the eBPF program and then cut skb->data to correct size returned by 114 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller 115 * than pkt_len we keep whole skb->data. This is the socket level 116 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should 117 * be accepted or -EPERM if the packet should be tossed. 118 * 119 */ 120 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) 121 { 122 int err; 123 struct sk_filter *filter; 124 125 /* 126 * If the skb was allocated from pfmemalloc reserves, only 127 * allow SOCK_MEMALLOC sockets to use it as this socket is 128 * helping free memory 129 */ 130 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { 131 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); 132 return -ENOMEM; 133 } 134 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); 135 if (err) 136 return err; 137 138 err = security_sock_rcv_skb(sk, skb); 139 if (err) 140 return err; 141 142 rcu_read_lock(); 143 filter = rcu_dereference(sk->sk_filter); 144 if (filter) { 145 struct sock *save_sk = skb->sk; 146 unsigned int pkt_len; 147 148 skb->sk = sk; 149 pkt_len = bpf_prog_run_save_cb(filter->prog, skb); 150 skb->sk = save_sk; 151 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; 152 } 153 rcu_read_unlock(); 154 155 return err; 156 } 157 EXPORT_SYMBOL(sk_filter_trim_cap); 158 159 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) 160 { 161 return skb_get_poff(skb); 162 } 163 164 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) 165 { 166 struct nlattr *nla; 167 168 if (skb_is_nonlinear(skb)) 169 return 0; 170 171 if (skb->len < sizeof(struct nlattr)) 172 return 0; 173 174 if (a > skb->len - sizeof(struct nlattr)) 175 return 0; 176 177 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); 178 if (nla) 179 return (void *) nla - (void *) skb->data; 180 181 return 0; 182 } 183 184 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) 185 { 186 struct nlattr *nla; 187 188 if (skb_is_nonlinear(skb)) 189 return 0; 190 191 if (skb->len < sizeof(struct nlattr)) 192 return 0; 193 194 if (a > skb->len - sizeof(struct nlattr)) 195 return 0; 196 197 nla = (struct nlattr *) &skb->data[a]; 198 if (nla->nla_len > skb->len - a) 199 return 0; 200 201 nla = nla_find_nested(nla, x); 202 if (nla) 203 return (void *) nla - (void *) skb->data; 204 205 return 0; 206 } 207 208 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, 209 data, int, headlen, int, offset) 210 { 211 u8 tmp, *ptr; 212 const int len = sizeof(tmp); 213 214 if (offset >= 0) { 215 if (headlen - offset >= len) 216 return *(u8 *)(data + offset); 217 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 218 return tmp; 219 } else { 220 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 221 if (likely(ptr)) 222 return *(u8 *)ptr; 223 } 224 225 return -EFAULT; 226 } 227 228 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, 229 int, offset) 230 { 231 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, 232 offset); 233 } 234 235 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, 236 data, int, headlen, int, offset) 237 { 238 u16 tmp, *ptr; 239 const int len = sizeof(tmp); 240 241 if (offset >= 0) { 242 if (headlen - offset >= len) 243 return get_unaligned_be16(data + offset); 244 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 245 return be16_to_cpu(tmp); 246 } else { 247 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 248 if (likely(ptr)) 249 return get_unaligned_be16(ptr); 250 } 251 252 return -EFAULT; 253 } 254 255 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, 256 int, offset) 257 { 258 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, 259 offset); 260 } 261 262 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, 263 data, int, headlen, int, offset) 264 { 265 u32 tmp, *ptr; 266 const int len = sizeof(tmp); 267 268 if (likely(offset >= 0)) { 269 if (headlen - offset >= len) 270 return get_unaligned_be32(data + offset); 271 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 272 return be32_to_cpu(tmp); 273 } else { 274 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 275 if (likely(ptr)) 276 return get_unaligned_be32(ptr); 277 } 278 279 return -EFAULT; 280 } 281 282 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, 283 int, offset) 284 { 285 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, 286 offset); 287 } 288 289 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, 290 struct bpf_insn *insn_buf) 291 { 292 struct bpf_insn *insn = insn_buf; 293 294 switch (skb_field) { 295 case SKF_AD_MARK: 296 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); 297 298 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, 299 offsetof(struct sk_buff, mark)); 300 break; 301 302 case SKF_AD_PKTTYPE: 303 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); 304 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); 305 #ifdef __BIG_ENDIAN_BITFIELD 306 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); 307 #endif 308 break; 309 310 case SKF_AD_QUEUE: 311 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); 312 313 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 314 offsetof(struct sk_buff, queue_mapping)); 315 break; 316 317 case SKF_AD_VLAN_TAG: 318 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); 319 320 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ 321 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 322 offsetof(struct sk_buff, vlan_tci)); 323 break; 324 case SKF_AD_VLAN_TAG_PRESENT: 325 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET()); 326 if (PKT_VLAN_PRESENT_BIT) 327 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); 328 if (PKT_VLAN_PRESENT_BIT < 7) 329 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); 330 break; 331 } 332 333 return insn - insn_buf; 334 } 335 336 static bool convert_bpf_extensions(struct sock_filter *fp, 337 struct bpf_insn **insnp) 338 { 339 struct bpf_insn *insn = *insnp; 340 u32 cnt; 341 342 switch (fp->k) { 343 case SKF_AD_OFF + SKF_AD_PROTOCOL: 344 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); 345 346 /* A = *(u16 *) (CTX + offsetof(protocol)) */ 347 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 348 offsetof(struct sk_buff, protocol)); 349 /* A = ntohs(A) [emitting a nop or swap16] */ 350 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 351 break; 352 353 case SKF_AD_OFF + SKF_AD_PKTTYPE: 354 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); 355 insn += cnt - 1; 356 break; 357 358 case SKF_AD_OFF + SKF_AD_IFINDEX: 359 case SKF_AD_OFF + SKF_AD_HATYPE: 360 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); 361 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); 362 363 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 364 BPF_REG_TMP, BPF_REG_CTX, 365 offsetof(struct sk_buff, dev)); 366 /* if (tmp != 0) goto pc + 1 */ 367 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); 368 *insn++ = BPF_EXIT_INSN(); 369 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) 370 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, 371 offsetof(struct net_device, ifindex)); 372 else 373 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, 374 offsetof(struct net_device, type)); 375 break; 376 377 case SKF_AD_OFF + SKF_AD_MARK: 378 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); 379 insn += cnt - 1; 380 break; 381 382 case SKF_AD_OFF + SKF_AD_RXHASH: 383 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); 384 385 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, 386 offsetof(struct sk_buff, hash)); 387 break; 388 389 case SKF_AD_OFF + SKF_AD_QUEUE: 390 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); 391 insn += cnt - 1; 392 break; 393 394 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 395 cnt = convert_skb_access(SKF_AD_VLAN_TAG, 396 BPF_REG_A, BPF_REG_CTX, insn); 397 insn += cnt - 1; 398 break; 399 400 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 401 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, 402 BPF_REG_A, BPF_REG_CTX, insn); 403 insn += cnt - 1; 404 break; 405 406 case SKF_AD_OFF + SKF_AD_VLAN_TPID: 407 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2); 408 409 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ 410 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 411 offsetof(struct sk_buff, vlan_proto)); 412 /* A = ntohs(A) [emitting a nop or swap16] */ 413 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 414 break; 415 416 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 417 case SKF_AD_OFF + SKF_AD_NLATTR: 418 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 419 case SKF_AD_OFF + SKF_AD_CPU: 420 case SKF_AD_OFF + SKF_AD_RANDOM: 421 /* arg1 = CTX */ 422 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 423 /* arg2 = A */ 424 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); 425 /* arg3 = X */ 426 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); 427 /* Emit call(arg1=CTX, arg2=A, arg3=X) */ 428 switch (fp->k) { 429 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 430 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); 431 break; 432 case SKF_AD_OFF + SKF_AD_NLATTR: 433 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); 434 break; 435 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 436 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); 437 break; 438 case SKF_AD_OFF + SKF_AD_CPU: 439 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); 440 break; 441 case SKF_AD_OFF + SKF_AD_RANDOM: 442 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); 443 bpf_user_rnd_init_once(); 444 break; 445 } 446 break; 447 448 case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 449 /* A ^= X */ 450 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); 451 break; 452 453 default: 454 /* This is just a dummy call to avoid letting the compiler 455 * evict __bpf_call_base() as an optimization. Placed here 456 * where no-one bothers. 457 */ 458 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); 459 return false; 460 } 461 462 *insnp = insn; 463 return true; 464 } 465 466 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) 467 { 468 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); 469 int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); 470 bool endian = BPF_SIZE(fp->code) == BPF_H || 471 BPF_SIZE(fp->code) == BPF_W; 472 bool indirect = BPF_MODE(fp->code) == BPF_IND; 473 const int ip_align = NET_IP_ALIGN; 474 struct bpf_insn *insn = *insnp; 475 int offset = fp->k; 476 477 if (!indirect && 478 ((unaligned_ok && offset >= 0) || 479 (!unaligned_ok && offset >= 0 && 480 offset + ip_align >= 0 && 481 offset + ip_align % size == 0))) { 482 bool ldx_off_ok = offset <= S16_MAX; 483 484 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); 485 if (offset) 486 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); 487 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, 488 size, 2 + endian + (!ldx_off_ok * 2)); 489 if (ldx_off_ok) { 490 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 491 BPF_REG_D, offset); 492 } else { 493 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); 494 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); 495 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 496 BPF_REG_TMP, 0); 497 } 498 if (endian) 499 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); 500 *insn++ = BPF_JMP_A(8); 501 } 502 503 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 504 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); 505 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); 506 if (!indirect) { 507 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); 508 } else { 509 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); 510 if (fp->k) 511 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); 512 } 513 514 switch (BPF_SIZE(fp->code)) { 515 case BPF_B: 516 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); 517 break; 518 case BPF_H: 519 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); 520 break; 521 case BPF_W: 522 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); 523 break; 524 default: 525 return false; 526 } 527 528 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); 529 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 530 *insn = BPF_EXIT_INSN(); 531 532 *insnp = insn; 533 return true; 534 } 535 536 /** 537 * bpf_convert_filter - convert filter program 538 * @prog: the user passed filter program 539 * @len: the length of the user passed filter program 540 * @new_prog: allocated 'struct bpf_prog' or NULL 541 * @new_len: pointer to store length of converted program 542 * @seen_ld_abs: bool whether we've seen ld_abs/ind 543 * 544 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' 545 * style extended BPF (eBPF). 546 * Conversion workflow: 547 * 548 * 1) First pass for calculating the new program length: 549 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) 550 * 551 * 2) 2nd pass to remap in two passes: 1st pass finds new 552 * jump offsets, 2nd pass remapping: 553 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) 554 */ 555 static int bpf_convert_filter(struct sock_filter *prog, int len, 556 struct bpf_prog *new_prog, int *new_len, 557 bool *seen_ld_abs) 558 { 559 int new_flen = 0, pass = 0, target, i, stack_off; 560 struct bpf_insn *new_insn, *first_insn = NULL; 561 struct sock_filter *fp; 562 int *addrs = NULL; 563 u8 bpf_src; 564 565 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 566 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 567 568 if (len <= 0 || len > BPF_MAXINSNS) 569 return -EINVAL; 570 571 if (new_prog) { 572 first_insn = new_prog->insnsi; 573 addrs = kcalloc(len, sizeof(*addrs), 574 GFP_KERNEL | __GFP_NOWARN); 575 if (!addrs) 576 return -ENOMEM; 577 } 578 579 do_pass: 580 new_insn = first_insn; 581 fp = prog; 582 583 /* Classic BPF related prologue emission. */ 584 if (new_prog) { 585 /* Classic BPF expects A and X to be reset first. These need 586 * to be guaranteed to be the first two instructions. 587 */ 588 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 589 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); 590 591 /* All programs must keep CTX in callee saved BPF_REG_CTX. 592 * In eBPF case it's done by the compiler, here we need to 593 * do this ourself. Initial CTX is present in BPF_REG_ARG1. 594 */ 595 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); 596 if (*seen_ld_abs) { 597 /* For packet access in classic BPF, cache skb->data 598 * in callee-saved BPF R8 and skb->len - skb->data_len 599 * (headlen) in BPF R9. Since classic BPF is read-only 600 * on CTX, we only need to cache it once. 601 */ 602 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 603 BPF_REG_D, BPF_REG_CTX, 604 offsetof(struct sk_buff, data)); 605 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, 606 offsetof(struct sk_buff, len)); 607 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, 608 offsetof(struct sk_buff, data_len)); 609 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); 610 } 611 } else { 612 new_insn += 3; 613 } 614 615 for (i = 0; i < len; fp++, i++) { 616 struct bpf_insn tmp_insns[32] = { }; 617 struct bpf_insn *insn = tmp_insns; 618 619 if (addrs) 620 addrs[i] = new_insn - first_insn; 621 622 switch (fp->code) { 623 /* All arithmetic insns and skb loads map as-is. */ 624 case BPF_ALU | BPF_ADD | BPF_X: 625 case BPF_ALU | BPF_ADD | BPF_K: 626 case BPF_ALU | BPF_SUB | BPF_X: 627 case BPF_ALU | BPF_SUB | BPF_K: 628 case BPF_ALU | BPF_AND | BPF_X: 629 case BPF_ALU | BPF_AND | BPF_K: 630 case BPF_ALU | BPF_OR | BPF_X: 631 case BPF_ALU | BPF_OR | BPF_K: 632 case BPF_ALU | BPF_LSH | BPF_X: 633 case BPF_ALU | BPF_LSH | BPF_K: 634 case BPF_ALU | BPF_RSH | BPF_X: 635 case BPF_ALU | BPF_RSH | BPF_K: 636 case BPF_ALU | BPF_XOR | BPF_X: 637 case BPF_ALU | BPF_XOR | BPF_K: 638 case BPF_ALU | BPF_MUL | BPF_X: 639 case BPF_ALU | BPF_MUL | BPF_K: 640 case BPF_ALU | BPF_DIV | BPF_X: 641 case BPF_ALU | BPF_DIV | BPF_K: 642 case BPF_ALU | BPF_MOD | BPF_X: 643 case BPF_ALU | BPF_MOD | BPF_K: 644 case BPF_ALU | BPF_NEG: 645 case BPF_LD | BPF_ABS | BPF_W: 646 case BPF_LD | BPF_ABS | BPF_H: 647 case BPF_LD | BPF_ABS | BPF_B: 648 case BPF_LD | BPF_IND | BPF_W: 649 case BPF_LD | BPF_IND | BPF_H: 650 case BPF_LD | BPF_IND | BPF_B: 651 /* Check for overloaded BPF extension and 652 * directly convert it if found, otherwise 653 * just move on with mapping. 654 */ 655 if (BPF_CLASS(fp->code) == BPF_LD && 656 BPF_MODE(fp->code) == BPF_ABS && 657 convert_bpf_extensions(fp, &insn)) 658 break; 659 if (BPF_CLASS(fp->code) == BPF_LD && 660 convert_bpf_ld_abs(fp, &insn)) { 661 *seen_ld_abs = true; 662 break; 663 } 664 665 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || 666 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { 667 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); 668 /* Error with exception code on div/mod by 0. 669 * For cBPF programs, this was always return 0. 670 */ 671 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); 672 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 673 *insn++ = BPF_EXIT_INSN(); 674 } 675 676 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 677 break; 678 679 /* Jump transformation cannot use BPF block macros 680 * everywhere as offset calculation and target updates 681 * require a bit more work than the rest, i.e. jump 682 * opcodes map as-is, but offsets need adjustment. 683 */ 684 685 #define BPF_EMIT_JMP \ 686 do { \ 687 const s32 off_min = S16_MIN, off_max = S16_MAX; \ 688 s32 off; \ 689 \ 690 if (target >= len || target < 0) \ 691 goto err; \ 692 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 693 /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 694 off -= insn - tmp_insns; \ 695 /* Reject anything not fitting into insn->off. */ \ 696 if (off < off_min || off > off_max) \ 697 goto err; \ 698 insn->off = off; \ 699 } while (0) 700 701 case BPF_JMP | BPF_JA: 702 target = i + fp->k + 1; 703 insn->code = fp->code; 704 BPF_EMIT_JMP; 705 break; 706 707 case BPF_JMP | BPF_JEQ | BPF_K: 708 case BPF_JMP | BPF_JEQ | BPF_X: 709 case BPF_JMP | BPF_JSET | BPF_K: 710 case BPF_JMP | BPF_JSET | BPF_X: 711 case BPF_JMP | BPF_JGT | BPF_K: 712 case BPF_JMP | BPF_JGT | BPF_X: 713 case BPF_JMP | BPF_JGE | BPF_K: 714 case BPF_JMP | BPF_JGE | BPF_X: 715 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { 716 /* BPF immediates are signed, zero extend 717 * immediate into tmp register and use it 718 * in compare insn. 719 */ 720 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); 721 722 insn->dst_reg = BPF_REG_A; 723 insn->src_reg = BPF_REG_TMP; 724 bpf_src = BPF_X; 725 } else { 726 insn->dst_reg = BPF_REG_A; 727 insn->imm = fp->k; 728 bpf_src = BPF_SRC(fp->code); 729 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; 730 } 731 732 /* Common case where 'jump_false' is next insn. */ 733 if (fp->jf == 0) { 734 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 735 target = i + fp->jt + 1; 736 BPF_EMIT_JMP; 737 break; 738 } 739 740 /* Convert some jumps when 'jump_true' is next insn. */ 741 if (fp->jt == 0) { 742 switch (BPF_OP(fp->code)) { 743 case BPF_JEQ: 744 insn->code = BPF_JMP | BPF_JNE | bpf_src; 745 break; 746 case BPF_JGT: 747 insn->code = BPF_JMP | BPF_JLE | bpf_src; 748 break; 749 case BPF_JGE: 750 insn->code = BPF_JMP | BPF_JLT | bpf_src; 751 break; 752 default: 753 goto jmp_rest; 754 } 755 756 target = i + fp->jf + 1; 757 BPF_EMIT_JMP; 758 break; 759 } 760 jmp_rest: 761 /* Other jumps are mapped into two insns: Jxx and JA. */ 762 target = i + fp->jt + 1; 763 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 764 BPF_EMIT_JMP; 765 insn++; 766 767 insn->code = BPF_JMP | BPF_JA; 768 target = i + fp->jf + 1; 769 BPF_EMIT_JMP; 770 break; 771 772 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 773 case BPF_LDX | BPF_MSH | BPF_B: { 774 struct sock_filter tmp = { 775 .code = BPF_LD | BPF_ABS | BPF_B, 776 .k = fp->k, 777 }; 778 779 *seen_ld_abs = true; 780 781 /* X = A */ 782 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 783 /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 784 convert_bpf_ld_abs(&tmp, &insn); 785 insn++; 786 /* A &= 0xf */ 787 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 788 /* A <<= 2 */ 789 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 790 /* tmp = X */ 791 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); 792 /* X = A */ 793 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 794 /* A = tmp */ 795 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); 796 break; 797 } 798 /* RET_K is remaped into 2 insns. RET_A case doesn't need an 799 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. 800 */ 801 case BPF_RET | BPF_A: 802 case BPF_RET | BPF_K: 803 if (BPF_RVAL(fp->code) == BPF_K) 804 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, 805 0, fp->k); 806 *insn = BPF_EXIT_INSN(); 807 break; 808 809 /* Store to stack. */ 810 case BPF_ST: 811 case BPF_STX: 812 stack_off = fp->k * 4 + 4; 813 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == 814 BPF_ST ? BPF_REG_A : BPF_REG_X, 815 -stack_off); 816 /* check_load_and_stores() verifies that classic BPF can 817 * load from stack only after write, so tracking 818 * stack_depth for ST|STX insns is enough 819 */ 820 if (new_prog && new_prog->aux->stack_depth < stack_off) 821 new_prog->aux->stack_depth = stack_off; 822 break; 823 824 /* Load from stack. */ 825 case BPF_LD | BPF_MEM: 826 case BPF_LDX | BPF_MEM: 827 stack_off = fp->k * 4 + 4; 828 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 829 BPF_REG_A : BPF_REG_X, BPF_REG_FP, 830 -stack_off); 831 break; 832 833 /* A = K or X = K */ 834 case BPF_LD | BPF_IMM: 835 case BPF_LDX | BPF_IMM: 836 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? 837 BPF_REG_A : BPF_REG_X, fp->k); 838 break; 839 840 /* X = A */ 841 case BPF_MISC | BPF_TAX: 842 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 843 break; 844 845 /* A = X */ 846 case BPF_MISC | BPF_TXA: 847 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); 848 break; 849 850 /* A = skb->len or X = skb->len */ 851 case BPF_LD | BPF_W | BPF_LEN: 852 case BPF_LDX | BPF_W | BPF_LEN: 853 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 854 BPF_REG_A : BPF_REG_X, BPF_REG_CTX, 855 offsetof(struct sk_buff, len)); 856 break; 857 858 /* Access seccomp_data fields. */ 859 case BPF_LDX | BPF_ABS | BPF_W: 860 /* A = *(u32 *) (ctx + K) */ 861 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 862 break; 863 864 /* Unknown instruction. */ 865 default: 866 goto err; 867 } 868 869 insn++; 870 if (new_prog) 871 memcpy(new_insn, tmp_insns, 872 sizeof(*insn) * (insn - tmp_insns)); 873 new_insn += insn - tmp_insns; 874 } 875 876 if (!new_prog) { 877 /* Only calculating new length. */ 878 *new_len = new_insn - first_insn; 879 if (*seen_ld_abs) 880 *new_len += 4; /* Prologue bits. */ 881 return 0; 882 } 883 884 pass++; 885 if (new_flen != new_insn - first_insn) { 886 new_flen = new_insn - first_insn; 887 if (pass > 2) 888 goto err; 889 goto do_pass; 890 } 891 892 kfree(addrs); 893 BUG_ON(*new_len != new_flen); 894 return 0; 895 err: 896 kfree(addrs); 897 return -EINVAL; 898 } 899 900 /* Security: 901 * 902 * As we dont want to clear mem[] array for each packet going through 903 * __bpf_prog_run(), we check that filter loaded by user never try to read 904 * a cell if not previously written, and we check all branches to be sure 905 * a malicious user doesn't try to abuse us. 906 */ 907 static int check_load_and_stores(const struct sock_filter *filter, int flen) 908 { 909 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ 910 int pc, ret = 0; 911 912 BUILD_BUG_ON(BPF_MEMWORDS > 16); 913 914 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); 915 if (!masks) 916 return -ENOMEM; 917 918 memset(masks, 0xff, flen * sizeof(*masks)); 919 920 for (pc = 0; pc < flen; pc++) { 921 memvalid &= masks[pc]; 922 923 switch (filter[pc].code) { 924 case BPF_ST: 925 case BPF_STX: 926 memvalid |= (1 << filter[pc].k); 927 break; 928 case BPF_LD | BPF_MEM: 929 case BPF_LDX | BPF_MEM: 930 if (!(memvalid & (1 << filter[pc].k))) { 931 ret = -EINVAL; 932 goto error; 933 } 934 break; 935 case BPF_JMP | BPF_JA: 936 /* A jump must set masks on target */ 937 masks[pc + 1 + filter[pc].k] &= memvalid; 938 memvalid = ~0; 939 break; 940 case BPF_JMP | BPF_JEQ | BPF_K: 941 case BPF_JMP | BPF_JEQ | BPF_X: 942 case BPF_JMP | BPF_JGE | BPF_K: 943 case BPF_JMP | BPF_JGE | BPF_X: 944 case BPF_JMP | BPF_JGT | BPF_K: 945 case BPF_JMP | BPF_JGT | BPF_X: 946 case BPF_JMP | BPF_JSET | BPF_K: 947 case BPF_JMP | BPF_JSET | BPF_X: 948 /* A jump must set masks on targets */ 949 masks[pc + 1 + filter[pc].jt] &= memvalid; 950 masks[pc + 1 + filter[pc].jf] &= memvalid; 951 memvalid = ~0; 952 break; 953 } 954 } 955 error: 956 kfree(masks); 957 return ret; 958 } 959 960 static bool chk_code_allowed(u16 code_to_probe) 961 { 962 static const bool codes[] = { 963 /* 32 bit ALU operations */ 964 [BPF_ALU | BPF_ADD | BPF_K] = true, 965 [BPF_ALU | BPF_ADD | BPF_X] = true, 966 [BPF_ALU | BPF_SUB | BPF_K] = true, 967 [BPF_ALU | BPF_SUB | BPF_X] = true, 968 [BPF_ALU | BPF_MUL | BPF_K] = true, 969 [BPF_ALU | BPF_MUL | BPF_X] = true, 970 [BPF_ALU | BPF_DIV | BPF_K] = true, 971 [BPF_ALU | BPF_DIV | BPF_X] = true, 972 [BPF_ALU | BPF_MOD | BPF_K] = true, 973 [BPF_ALU | BPF_MOD | BPF_X] = true, 974 [BPF_ALU | BPF_AND | BPF_K] = true, 975 [BPF_ALU | BPF_AND | BPF_X] = true, 976 [BPF_ALU | BPF_OR | BPF_K] = true, 977 [BPF_ALU | BPF_OR | BPF_X] = true, 978 [BPF_ALU | BPF_XOR | BPF_K] = true, 979 [BPF_ALU | BPF_XOR | BPF_X] = true, 980 [BPF_ALU | BPF_LSH | BPF_K] = true, 981 [BPF_ALU | BPF_LSH | BPF_X] = true, 982 [BPF_ALU | BPF_RSH | BPF_K] = true, 983 [BPF_ALU | BPF_RSH | BPF_X] = true, 984 [BPF_ALU | BPF_NEG] = true, 985 /* Load instructions */ 986 [BPF_LD | BPF_W | BPF_ABS] = true, 987 [BPF_LD | BPF_H | BPF_ABS] = true, 988 [BPF_LD | BPF_B | BPF_ABS] = true, 989 [BPF_LD | BPF_W | BPF_LEN] = true, 990 [BPF_LD | BPF_W | BPF_IND] = true, 991 [BPF_LD | BPF_H | BPF_IND] = true, 992 [BPF_LD | BPF_B | BPF_IND] = true, 993 [BPF_LD | BPF_IMM] = true, 994 [BPF_LD | BPF_MEM] = true, 995 [BPF_LDX | BPF_W | BPF_LEN] = true, 996 [BPF_LDX | BPF_B | BPF_MSH] = true, 997 [BPF_LDX | BPF_IMM] = true, 998 [BPF_LDX | BPF_MEM] = true, 999 /* Store instructions */ 1000 [BPF_ST] = true, 1001 [BPF_STX] = true, 1002 /* Misc instructions */ 1003 [BPF_MISC | BPF_TAX] = true, 1004 [BPF_MISC | BPF_TXA] = true, 1005 /* Return instructions */ 1006 [BPF_RET | BPF_K] = true, 1007 [BPF_RET | BPF_A] = true, 1008 /* Jump instructions */ 1009 [BPF_JMP | BPF_JA] = true, 1010 [BPF_JMP | BPF_JEQ | BPF_K] = true, 1011 [BPF_JMP | BPF_JEQ | BPF_X] = true, 1012 [BPF_JMP | BPF_JGE | BPF_K] = true, 1013 [BPF_JMP | BPF_JGE | BPF_X] = true, 1014 [BPF_JMP | BPF_JGT | BPF_K] = true, 1015 [BPF_JMP | BPF_JGT | BPF_X] = true, 1016 [BPF_JMP | BPF_JSET | BPF_K] = true, 1017 [BPF_JMP | BPF_JSET | BPF_X] = true, 1018 }; 1019 1020 if (code_to_probe >= ARRAY_SIZE(codes)) 1021 return false; 1022 1023 return codes[code_to_probe]; 1024 } 1025 1026 static bool bpf_check_basics_ok(const struct sock_filter *filter, 1027 unsigned int flen) 1028 { 1029 if (filter == NULL) 1030 return false; 1031 if (flen == 0 || flen > BPF_MAXINSNS) 1032 return false; 1033 1034 return true; 1035 } 1036 1037 /** 1038 * bpf_check_classic - verify socket filter code 1039 * @filter: filter to verify 1040 * @flen: length of filter 1041 * 1042 * Check the user's filter code. If we let some ugly 1043 * filter code slip through kaboom! The filter must contain 1044 * no references or jumps that are out of range, no illegal 1045 * instructions, and must end with a RET instruction. 1046 * 1047 * All jumps are forward as they are not signed. 1048 * 1049 * Returns 0 if the rule set is legal or -EINVAL if not. 1050 */ 1051 static int bpf_check_classic(const struct sock_filter *filter, 1052 unsigned int flen) 1053 { 1054 bool anc_found; 1055 int pc; 1056 1057 /* Check the filter code now */ 1058 for (pc = 0; pc < flen; pc++) { 1059 const struct sock_filter *ftest = &filter[pc]; 1060 1061 /* May we actually operate on this code? */ 1062 if (!chk_code_allowed(ftest->code)) 1063 return -EINVAL; 1064 1065 /* Some instructions need special checks */ 1066 switch (ftest->code) { 1067 case BPF_ALU | BPF_DIV | BPF_K: 1068 case BPF_ALU | BPF_MOD | BPF_K: 1069 /* Check for division by zero */ 1070 if (ftest->k == 0) 1071 return -EINVAL; 1072 break; 1073 case BPF_ALU | BPF_LSH | BPF_K: 1074 case BPF_ALU | BPF_RSH | BPF_K: 1075 if (ftest->k >= 32) 1076 return -EINVAL; 1077 break; 1078 case BPF_LD | BPF_MEM: 1079 case BPF_LDX | BPF_MEM: 1080 case BPF_ST: 1081 case BPF_STX: 1082 /* Check for invalid memory addresses */ 1083 if (ftest->k >= BPF_MEMWORDS) 1084 return -EINVAL; 1085 break; 1086 case BPF_JMP | BPF_JA: 1087 /* Note, the large ftest->k might cause loops. 1088 * Compare this with conditional jumps below, 1089 * where offsets are limited. --ANK (981016) 1090 */ 1091 if (ftest->k >= (unsigned int)(flen - pc - 1)) 1092 return -EINVAL; 1093 break; 1094 case BPF_JMP | BPF_JEQ | BPF_K: 1095 case BPF_JMP | BPF_JEQ | BPF_X: 1096 case BPF_JMP | BPF_JGE | BPF_K: 1097 case BPF_JMP | BPF_JGE | BPF_X: 1098 case BPF_JMP | BPF_JGT | BPF_K: 1099 case BPF_JMP | BPF_JGT | BPF_X: 1100 case BPF_JMP | BPF_JSET | BPF_K: 1101 case BPF_JMP | BPF_JSET | BPF_X: 1102 /* Both conditionals must be safe */ 1103 if (pc + ftest->jt + 1 >= flen || 1104 pc + ftest->jf + 1 >= flen) 1105 return -EINVAL; 1106 break; 1107 case BPF_LD | BPF_W | BPF_ABS: 1108 case BPF_LD | BPF_H | BPF_ABS: 1109 case BPF_LD | BPF_B | BPF_ABS: 1110 anc_found = false; 1111 if (bpf_anc_helper(ftest) & BPF_ANC) 1112 anc_found = true; 1113 /* Ancillary operation unknown or unsupported */ 1114 if (anc_found == false && ftest->k >= SKF_AD_OFF) 1115 return -EINVAL; 1116 } 1117 } 1118 1119 /* Last instruction must be a RET code */ 1120 switch (filter[flen - 1].code) { 1121 case BPF_RET | BPF_K: 1122 case BPF_RET | BPF_A: 1123 return check_load_and_stores(filter, flen); 1124 } 1125 1126 return -EINVAL; 1127 } 1128 1129 static int bpf_prog_store_orig_filter(struct bpf_prog *fp, 1130 const struct sock_fprog *fprog) 1131 { 1132 unsigned int fsize = bpf_classic_proglen(fprog); 1133 struct sock_fprog_kern *fkprog; 1134 1135 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); 1136 if (!fp->orig_prog) 1137 return -ENOMEM; 1138 1139 fkprog = fp->orig_prog; 1140 fkprog->len = fprog->len; 1141 1142 fkprog->filter = kmemdup(fp->insns, fsize, 1143 GFP_KERNEL | __GFP_NOWARN); 1144 if (!fkprog->filter) { 1145 kfree(fp->orig_prog); 1146 return -ENOMEM; 1147 } 1148 1149 return 0; 1150 } 1151 1152 static void bpf_release_orig_filter(struct bpf_prog *fp) 1153 { 1154 struct sock_fprog_kern *fprog = fp->orig_prog; 1155 1156 if (fprog) { 1157 kfree(fprog->filter); 1158 kfree(fprog); 1159 } 1160 } 1161 1162 static void __bpf_prog_release(struct bpf_prog *prog) 1163 { 1164 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { 1165 bpf_prog_put(prog); 1166 } else { 1167 bpf_release_orig_filter(prog); 1168 bpf_prog_free(prog); 1169 } 1170 } 1171 1172 static void __sk_filter_release(struct sk_filter *fp) 1173 { 1174 __bpf_prog_release(fp->prog); 1175 kfree(fp); 1176 } 1177 1178 /** 1179 * sk_filter_release_rcu - Release a socket filter by rcu_head 1180 * @rcu: rcu_head that contains the sk_filter to free 1181 */ 1182 static void sk_filter_release_rcu(struct rcu_head *rcu) 1183 { 1184 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 1185 1186 __sk_filter_release(fp); 1187 } 1188 1189 /** 1190 * sk_filter_release - release a socket filter 1191 * @fp: filter to remove 1192 * 1193 * Remove a filter from a socket and release its resources. 1194 */ 1195 static void sk_filter_release(struct sk_filter *fp) 1196 { 1197 if (refcount_dec_and_test(&fp->refcnt)) 1198 call_rcu(&fp->rcu, sk_filter_release_rcu); 1199 } 1200 1201 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1202 { 1203 u32 filter_size = bpf_prog_size(fp->prog->len); 1204 1205 atomic_sub(filter_size, &sk->sk_omem_alloc); 1206 sk_filter_release(fp); 1207 } 1208 1209 /* try to charge the socket memory if there is space available 1210 * return true on success 1211 */ 1212 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1213 { 1214 u32 filter_size = bpf_prog_size(fp->prog->len); 1215 1216 /* same check as in sock_kmalloc() */ 1217 if (filter_size <= sysctl_optmem_max && 1218 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { 1219 atomic_add(filter_size, &sk->sk_omem_alloc); 1220 return true; 1221 } 1222 return false; 1223 } 1224 1225 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1226 { 1227 if (!refcount_inc_not_zero(&fp->refcnt)) 1228 return false; 1229 1230 if (!__sk_filter_charge(sk, fp)) { 1231 sk_filter_release(fp); 1232 return false; 1233 } 1234 return true; 1235 } 1236 1237 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) 1238 { 1239 struct sock_filter *old_prog; 1240 struct bpf_prog *old_fp; 1241 int err, new_len, old_len = fp->len; 1242 bool seen_ld_abs = false; 1243 1244 /* We are free to overwrite insns et al right here as it 1245 * won't be used at this point in time anymore internally 1246 * after the migration to the internal BPF instruction 1247 * representation. 1248 */ 1249 BUILD_BUG_ON(sizeof(struct sock_filter) != 1250 sizeof(struct bpf_insn)); 1251 1252 /* Conversion cannot happen on overlapping memory areas, 1253 * so we need to keep the user BPF around until the 2nd 1254 * pass. At this time, the user BPF is stored in fp->insns. 1255 */ 1256 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), 1257 GFP_KERNEL | __GFP_NOWARN); 1258 if (!old_prog) { 1259 err = -ENOMEM; 1260 goto out_err; 1261 } 1262 1263 /* 1st pass: calculate the new program length. */ 1264 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, 1265 &seen_ld_abs); 1266 if (err) 1267 goto out_err_free; 1268 1269 /* Expand fp for appending the new filter representation. */ 1270 old_fp = fp; 1271 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); 1272 if (!fp) { 1273 /* The old_fp is still around in case we couldn't 1274 * allocate new memory, so uncharge on that one. 1275 */ 1276 fp = old_fp; 1277 err = -ENOMEM; 1278 goto out_err_free; 1279 } 1280 1281 fp->len = new_len; 1282 1283 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ 1284 err = bpf_convert_filter(old_prog, old_len, fp, &new_len, 1285 &seen_ld_abs); 1286 if (err) 1287 /* 2nd bpf_convert_filter() can fail only if it fails 1288 * to allocate memory, remapping must succeed. Note, 1289 * that at this time old_fp has already been released 1290 * by krealloc(). 1291 */ 1292 goto out_err_free; 1293 1294 fp = bpf_prog_select_runtime(fp, &err); 1295 if (err) 1296 goto out_err_free; 1297 1298 kfree(old_prog); 1299 return fp; 1300 1301 out_err_free: 1302 kfree(old_prog); 1303 out_err: 1304 __bpf_prog_release(fp); 1305 return ERR_PTR(err); 1306 } 1307 1308 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, 1309 bpf_aux_classic_check_t trans) 1310 { 1311 int err; 1312 1313 fp->bpf_func = NULL; 1314 fp->jited = 0; 1315 1316 err = bpf_check_classic(fp->insns, fp->len); 1317 if (err) { 1318 __bpf_prog_release(fp); 1319 return ERR_PTR(err); 1320 } 1321 1322 /* There might be additional checks and transformations 1323 * needed on classic filters, f.e. in case of seccomp. 1324 */ 1325 if (trans) { 1326 err = trans(fp->insns, fp->len); 1327 if (err) { 1328 __bpf_prog_release(fp); 1329 return ERR_PTR(err); 1330 } 1331 } 1332 1333 /* Probe if we can JIT compile the filter and if so, do 1334 * the compilation of the filter. 1335 */ 1336 bpf_jit_compile(fp); 1337 1338 /* JIT compiler couldn't process this filter, so do the 1339 * internal BPF translation for the optimized interpreter. 1340 */ 1341 if (!fp->jited) 1342 fp = bpf_migrate_filter(fp); 1343 1344 return fp; 1345 } 1346 1347 /** 1348 * bpf_prog_create - create an unattached filter 1349 * @pfp: the unattached filter that is created 1350 * @fprog: the filter program 1351 * 1352 * Create a filter independent of any socket. We first run some 1353 * sanity checks on it to make sure it does not explode on us later. 1354 * If an error occurs or there is insufficient memory for the filter 1355 * a negative errno code is returned. On success the return is zero. 1356 */ 1357 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) 1358 { 1359 unsigned int fsize = bpf_classic_proglen(fprog); 1360 struct bpf_prog *fp; 1361 1362 /* Make sure new filter is there and in the right amounts. */ 1363 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1364 return -EINVAL; 1365 1366 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1367 if (!fp) 1368 return -ENOMEM; 1369 1370 memcpy(fp->insns, fprog->filter, fsize); 1371 1372 fp->len = fprog->len; 1373 /* Since unattached filters are not copied back to user 1374 * space through sk_get_filter(), we do not need to hold 1375 * a copy here, and can spare us the work. 1376 */ 1377 fp->orig_prog = NULL; 1378 1379 /* bpf_prepare_filter() already takes care of freeing 1380 * memory in case something goes wrong. 1381 */ 1382 fp = bpf_prepare_filter(fp, NULL); 1383 if (IS_ERR(fp)) 1384 return PTR_ERR(fp); 1385 1386 *pfp = fp; 1387 return 0; 1388 } 1389 EXPORT_SYMBOL_GPL(bpf_prog_create); 1390 1391 /** 1392 * bpf_prog_create_from_user - create an unattached filter from user buffer 1393 * @pfp: the unattached filter that is created 1394 * @fprog: the filter program 1395 * @trans: post-classic verifier transformation handler 1396 * @save_orig: save classic BPF program 1397 * 1398 * This function effectively does the same as bpf_prog_create(), only 1399 * that it builds up its insns buffer from user space provided buffer. 1400 * It also allows for passing a bpf_aux_classic_check_t handler. 1401 */ 1402 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, 1403 bpf_aux_classic_check_t trans, bool save_orig) 1404 { 1405 unsigned int fsize = bpf_classic_proglen(fprog); 1406 struct bpf_prog *fp; 1407 int err; 1408 1409 /* Make sure new filter is there and in the right amounts. */ 1410 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1411 return -EINVAL; 1412 1413 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1414 if (!fp) 1415 return -ENOMEM; 1416 1417 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1418 __bpf_prog_free(fp); 1419 return -EFAULT; 1420 } 1421 1422 fp->len = fprog->len; 1423 fp->orig_prog = NULL; 1424 1425 if (save_orig) { 1426 err = bpf_prog_store_orig_filter(fp, fprog); 1427 if (err) { 1428 __bpf_prog_free(fp); 1429 return -ENOMEM; 1430 } 1431 } 1432 1433 /* bpf_prepare_filter() already takes care of freeing 1434 * memory in case something goes wrong. 1435 */ 1436 fp = bpf_prepare_filter(fp, trans); 1437 if (IS_ERR(fp)) 1438 return PTR_ERR(fp); 1439 1440 *pfp = fp; 1441 return 0; 1442 } 1443 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); 1444 1445 void bpf_prog_destroy(struct bpf_prog *fp) 1446 { 1447 __bpf_prog_release(fp); 1448 } 1449 EXPORT_SYMBOL_GPL(bpf_prog_destroy); 1450 1451 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) 1452 { 1453 struct sk_filter *fp, *old_fp; 1454 1455 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 1456 if (!fp) 1457 return -ENOMEM; 1458 1459 fp->prog = prog; 1460 1461 if (!__sk_filter_charge(sk, fp)) { 1462 kfree(fp); 1463 return -ENOMEM; 1464 } 1465 refcount_set(&fp->refcnt, 1); 1466 1467 old_fp = rcu_dereference_protected(sk->sk_filter, 1468 lockdep_sock_is_held(sk)); 1469 rcu_assign_pointer(sk->sk_filter, fp); 1470 1471 if (old_fp) 1472 sk_filter_uncharge(sk, old_fp); 1473 1474 return 0; 1475 } 1476 1477 static 1478 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) 1479 { 1480 unsigned int fsize = bpf_classic_proglen(fprog); 1481 struct bpf_prog *prog; 1482 int err; 1483 1484 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1485 return ERR_PTR(-EPERM); 1486 1487 /* Make sure new filter is there and in the right amounts. */ 1488 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1489 return ERR_PTR(-EINVAL); 1490 1491 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1492 if (!prog) 1493 return ERR_PTR(-ENOMEM); 1494 1495 if (copy_from_user(prog->insns, fprog->filter, fsize)) { 1496 __bpf_prog_free(prog); 1497 return ERR_PTR(-EFAULT); 1498 } 1499 1500 prog->len = fprog->len; 1501 1502 err = bpf_prog_store_orig_filter(prog, fprog); 1503 if (err) { 1504 __bpf_prog_free(prog); 1505 return ERR_PTR(-ENOMEM); 1506 } 1507 1508 /* bpf_prepare_filter() already takes care of freeing 1509 * memory in case something goes wrong. 1510 */ 1511 return bpf_prepare_filter(prog, NULL); 1512 } 1513 1514 /** 1515 * sk_attach_filter - attach a socket filter 1516 * @fprog: the filter program 1517 * @sk: the socket to use 1518 * 1519 * Attach the user's filter code. We first run some sanity checks on 1520 * it to make sure it does not explode on us later. If an error 1521 * occurs or there is insufficient memory for the filter a negative 1522 * errno code is returned. On success the return is zero. 1523 */ 1524 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1525 { 1526 struct bpf_prog *prog = __get_filter(fprog, sk); 1527 int err; 1528 1529 if (IS_ERR(prog)) 1530 return PTR_ERR(prog); 1531 1532 err = __sk_attach_prog(prog, sk); 1533 if (err < 0) { 1534 __bpf_prog_release(prog); 1535 return err; 1536 } 1537 1538 return 0; 1539 } 1540 EXPORT_SYMBOL_GPL(sk_attach_filter); 1541 1542 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1543 { 1544 struct bpf_prog *prog = __get_filter(fprog, sk); 1545 int err; 1546 1547 if (IS_ERR(prog)) 1548 return PTR_ERR(prog); 1549 1550 if (bpf_prog_size(prog->len) > sysctl_optmem_max) 1551 err = -ENOMEM; 1552 else 1553 err = reuseport_attach_prog(sk, prog); 1554 1555 if (err) 1556 __bpf_prog_release(prog); 1557 1558 return err; 1559 } 1560 1561 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) 1562 { 1563 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1564 return ERR_PTR(-EPERM); 1565 1566 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1567 } 1568 1569 int sk_attach_bpf(u32 ufd, struct sock *sk) 1570 { 1571 struct bpf_prog *prog = __get_bpf(ufd, sk); 1572 int err; 1573 1574 if (IS_ERR(prog)) 1575 return PTR_ERR(prog); 1576 1577 err = __sk_attach_prog(prog, sk); 1578 if (err < 0) { 1579 bpf_prog_put(prog); 1580 return err; 1581 } 1582 1583 return 0; 1584 } 1585 1586 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) 1587 { 1588 struct bpf_prog *prog; 1589 int err; 1590 1591 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1592 return -EPERM; 1593 1594 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1595 if (PTR_ERR(prog) == -EINVAL) 1596 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); 1597 if (IS_ERR(prog)) 1598 return PTR_ERR(prog); 1599 1600 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { 1601 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER 1602 * bpf prog (e.g. sockmap). It depends on the 1603 * limitation imposed by bpf_prog_load(). 1604 * Hence, sysctl_optmem_max is not checked. 1605 */ 1606 if ((sk->sk_type != SOCK_STREAM && 1607 sk->sk_type != SOCK_DGRAM) || 1608 (sk->sk_protocol != IPPROTO_UDP && 1609 sk->sk_protocol != IPPROTO_TCP) || 1610 (sk->sk_family != AF_INET && 1611 sk->sk_family != AF_INET6)) { 1612 err = -ENOTSUPP; 1613 goto err_prog_put; 1614 } 1615 } else { 1616 /* BPF_PROG_TYPE_SOCKET_FILTER */ 1617 if (bpf_prog_size(prog->len) > sysctl_optmem_max) { 1618 err = -ENOMEM; 1619 goto err_prog_put; 1620 } 1621 } 1622 1623 err = reuseport_attach_prog(sk, prog); 1624 err_prog_put: 1625 if (err) 1626 bpf_prog_put(prog); 1627 1628 return err; 1629 } 1630 1631 void sk_reuseport_prog_free(struct bpf_prog *prog) 1632 { 1633 if (!prog) 1634 return; 1635 1636 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) 1637 bpf_prog_put(prog); 1638 else 1639 bpf_prog_destroy(prog); 1640 } 1641 1642 struct bpf_scratchpad { 1643 union { 1644 __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; 1645 u8 buff[MAX_BPF_STACK]; 1646 }; 1647 }; 1648 1649 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); 1650 1651 static inline int __bpf_try_make_writable(struct sk_buff *skb, 1652 unsigned int write_len) 1653 { 1654 return skb_ensure_writable(skb, write_len); 1655 } 1656 1657 static inline int bpf_try_make_writable(struct sk_buff *skb, 1658 unsigned int write_len) 1659 { 1660 int err = __bpf_try_make_writable(skb, write_len); 1661 1662 bpf_compute_data_pointers(skb); 1663 return err; 1664 } 1665 1666 static int bpf_try_make_head_writable(struct sk_buff *skb) 1667 { 1668 return bpf_try_make_writable(skb, skb_headlen(skb)); 1669 } 1670 1671 static inline void bpf_push_mac_rcsum(struct sk_buff *skb) 1672 { 1673 if (skb_at_tc_ingress(skb)) 1674 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1675 } 1676 1677 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) 1678 { 1679 if (skb_at_tc_ingress(skb)) 1680 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1681 } 1682 1683 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, 1684 const void *, from, u32, len, u64, flags) 1685 { 1686 void *ptr; 1687 1688 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1689 return -EINVAL; 1690 if (unlikely(offset > 0xffff)) 1691 return -EFAULT; 1692 if (unlikely(bpf_try_make_writable(skb, offset + len))) 1693 return -EFAULT; 1694 1695 ptr = skb->data + offset; 1696 if (flags & BPF_F_RECOMPUTE_CSUM) 1697 __skb_postpull_rcsum(skb, ptr, len, offset); 1698 1699 memcpy(ptr, from, len); 1700 1701 if (flags & BPF_F_RECOMPUTE_CSUM) 1702 __skb_postpush_rcsum(skb, ptr, len, offset); 1703 if (flags & BPF_F_INVALIDATE_HASH) 1704 skb_clear_hash(skb); 1705 1706 return 0; 1707 } 1708 1709 static const struct bpf_func_proto bpf_skb_store_bytes_proto = { 1710 .func = bpf_skb_store_bytes, 1711 .gpl_only = false, 1712 .ret_type = RET_INTEGER, 1713 .arg1_type = ARG_PTR_TO_CTX, 1714 .arg2_type = ARG_ANYTHING, 1715 .arg3_type = ARG_PTR_TO_MEM, 1716 .arg4_type = ARG_CONST_SIZE, 1717 .arg5_type = ARG_ANYTHING, 1718 }; 1719 1720 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, 1721 void *, to, u32, len) 1722 { 1723 void *ptr; 1724 1725 if (unlikely(offset > 0xffff)) 1726 goto err_clear; 1727 1728 ptr = skb_header_pointer(skb, offset, len, to); 1729 if (unlikely(!ptr)) 1730 goto err_clear; 1731 if (ptr != to) 1732 memcpy(to, ptr, len); 1733 1734 return 0; 1735 err_clear: 1736 memset(to, 0, len); 1737 return -EFAULT; 1738 } 1739 1740 static const struct bpf_func_proto bpf_skb_load_bytes_proto = { 1741 .func = bpf_skb_load_bytes, 1742 .gpl_only = false, 1743 .ret_type = RET_INTEGER, 1744 .arg1_type = ARG_PTR_TO_CTX, 1745 .arg2_type = ARG_ANYTHING, 1746 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1747 .arg4_type = ARG_CONST_SIZE, 1748 }; 1749 1750 BPF_CALL_4(bpf_flow_dissector_load_bytes, 1751 const struct bpf_flow_dissector *, ctx, u32, offset, 1752 void *, to, u32, len) 1753 { 1754 void *ptr; 1755 1756 if (unlikely(offset > 0xffff)) 1757 goto err_clear; 1758 1759 if (unlikely(!ctx->skb)) 1760 goto err_clear; 1761 1762 ptr = skb_header_pointer(ctx->skb, offset, len, to); 1763 if (unlikely(!ptr)) 1764 goto err_clear; 1765 if (ptr != to) 1766 memcpy(to, ptr, len); 1767 1768 return 0; 1769 err_clear: 1770 memset(to, 0, len); 1771 return -EFAULT; 1772 } 1773 1774 static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = { 1775 .func = bpf_flow_dissector_load_bytes, 1776 .gpl_only = false, 1777 .ret_type = RET_INTEGER, 1778 .arg1_type = ARG_PTR_TO_CTX, 1779 .arg2_type = ARG_ANYTHING, 1780 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1781 .arg4_type = ARG_CONST_SIZE, 1782 }; 1783 1784 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, 1785 u32, offset, void *, to, u32, len, u32, start_header) 1786 { 1787 u8 *end = skb_tail_pointer(skb); 1788 u8 *start, *ptr; 1789 1790 if (unlikely(offset > 0xffff)) 1791 goto err_clear; 1792 1793 switch (start_header) { 1794 case BPF_HDR_START_MAC: 1795 if (unlikely(!skb_mac_header_was_set(skb))) 1796 goto err_clear; 1797 start = skb_mac_header(skb); 1798 break; 1799 case BPF_HDR_START_NET: 1800 start = skb_network_header(skb); 1801 break; 1802 default: 1803 goto err_clear; 1804 } 1805 1806 ptr = start + offset; 1807 1808 if (likely(ptr + len <= end)) { 1809 memcpy(to, ptr, len); 1810 return 0; 1811 } 1812 1813 err_clear: 1814 memset(to, 0, len); 1815 return -EFAULT; 1816 } 1817 1818 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { 1819 .func = bpf_skb_load_bytes_relative, 1820 .gpl_only = false, 1821 .ret_type = RET_INTEGER, 1822 .arg1_type = ARG_PTR_TO_CTX, 1823 .arg2_type = ARG_ANYTHING, 1824 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1825 .arg4_type = ARG_CONST_SIZE, 1826 .arg5_type = ARG_ANYTHING, 1827 }; 1828 1829 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) 1830 { 1831 /* Idea is the following: should the needed direct read/write 1832 * test fail during runtime, we can pull in more data and redo 1833 * again, since implicitly, we invalidate previous checks here. 1834 * 1835 * Or, since we know how much we need to make read/writeable, 1836 * this can be done once at the program beginning for direct 1837 * access case. By this we overcome limitations of only current 1838 * headroom being accessible. 1839 */ 1840 return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); 1841 } 1842 1843 static const struct bpf_func_proto bpf_skb_pull_data_proto = { 1844 .func = bpf_skb_pull_data, 1845 .gpl_only = false, 1846 .ret_type = RET_INTEGER, 1847 .arg1_type = ARG_PTR_TO_CTX, 1848 .arg2_type = ARG_ANYTHING, 1849 }; 1850 1851 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) 1852 { 1853 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; 1854 } 1855 1856 static const struct bpf_func_proto bpf_sk_fullsock_proto = { 1857 .func = bpf_sk_fullsock, 1858 .gpl_only = false, 1859 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 1860 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 1861 }; 1862 1863 static inline int sk_skb_try_make_writable(struct sk_buff *skb, 1864 unsigned int write_len) 1865 { 1866 int err = __bpf_try_make_writable(skb, write_len); 1867 1868 bpf_compute_data_end_sk_skb(skb); 1869 return err; 1870 } 1871 1872 BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) 1873 { 1874 /* Idea is the following: should the needed direct read/write 1875 * test fail during runtime, we can pull in more data and redo 1876 * again, since implicitly, we invalidate previous checks here. 1877 * 1878 * Or, since we know how much we need to make read/writeable, 1879 * this can be done once at the program beginning for direct 1880 * access case. By this we overcome limitations of only current 1881 * headroom being accessible. 1882 */ 1883 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); 1884 } 1885 1886 static const struct bpf_func_proto sk_skb_pull_data_proto = { 1887 .func = sk_skb_pull_data, 1888 .gpl_only = false, 1889 .ret_type = RET_INTEGER, 1890 .arg1_type = ARG_PTR_TO_CTX, 1891 .arg2_type = ARG_ANYTHING, 1892 }; 1893 1894 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, 1895 u64, from, u64, to, u64, flags) 1896 { 1897 __sum16 *ptr; 1898 1899 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1900 return -EINVAL; 1901 if (unlikely(offset > 0xffff || offset & 1)) 1902 return -EFAULT; 1903 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1904 return -EFAULT; 1905 1906 ptr = (__sum16 *)(skb->data + offset); 1907 switch (flags & BPF_F_HDR_FIELD_MASK) { 1908 case 0: 1909 if (unlikely(from != 0)) 1910 return -EINVAL; 1911 1912 csum_replace_by_diff(ptr, to); 1913 break; 1914 case 2: 1915 csum_replace2(ptr, from, to); 1916 break; 1917 case 4: 1918 csum_replace4(ptr, from, to); 1919 break; 1920 default: 1921 return -EINVAL; 1922 } 1923 1924 return 0; 1925 } 1926 1927 static const struct bpf_func_proto bpf_l3_csum_replace_proto = { 1928 .func = bpf_l3_csum_replace, 1929 .gpl_only = false, 1930 .ret_type = RET_INTEGER, 1931 .arg1_type = ARG_PTR_TO_CTX, 1932 .arg2_type = ARG_ANYTHING, 1933 .arg3_type = ARG_ANYTHING, 1934 .arg4_type = ARG_ANYTHING, 1935 .arg5_type = ARG_ANYTHING, 1936 }; 1937 1938 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, 1939 u64, from, u64, to, u64, flags) 1940 { 1941 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1942 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1943 bool do_mforce = flags & BPF_F_MARK_ENFORCE; 1944 __sum16 *ptr; 1945 1946 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | 1947 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) 1948 return -EINVAL; 1949 if (unlikely(offset > 0xffff || offset & 1)) 1950 return -EFAULT; 1951 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1952 return -EFAULT; 1953 1954 ptr = (__sum16 *)(skb->data + offset); 1955 if (is_mmzero && !do_mforce && !*ptr) 1956 return 0; 1957 1958 switch (flags & BPF_F_HDR_FIELD_MASK) { 1959 case 0: 1960 if (unlikely(from != 0)) 1961 return -EINVAL; 1962 1963 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); 1964 break; 1965 case 2: 1966 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); 1967 break; 1968 case 4: 1969 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); 1970 break; 1971 default: 1972 return -EINVAL; 1973 } 1974 1975 if (is_mmzero && !*ptr) 1976 *ptr = CSUM_MANGLED_0; 1977 return 0; 1978 } 1979 1980 static const struct bpf_func_proto bpf_l4_csum_replace_proto = { 1981 .func = bpf_l4_csum_replace, 1982 .gpl_only = false, 1983 .ret_type = RET_INTEGER, 1984 .arg1_type = ARG_PTR_TO_CTX, 1985 .arg2_type = ARG_ANYTHING, 1986 .arg3_type = ARG_ANYTHING, 1987 .arg4_type = ARG_ANYTHING, 1988 .arg5_type = ARG_ANYTHING, 1989 }; 1990 1991 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, 1992 __be32 *, to, u32, to_size, __wsum, seed) 1993 { 1994 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); 1995 u32 diff_size = from_size + to_size; 1996 int i, j = 0; 1997 1998 /* This is quite flexible, some examples: 1999 * 2000 * from_size == 0, to_size > 0, seed := csum --> pushing data 2001 * from_size > 0, to_size == 0, seed := csum --> pulling data 2002 * from_size > 0, to_size > 0, seed := 0 --> diffing data 2003 * 2004 * Even for diffing, from_size and to_size don't need to be equal. 2005 */ 2006 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || 2007 diff_size > sizeof(sp->diff))) 2008 return -EINVAL; 2009 2010 for (i = 0; i < from_size / sizeof(__be32); i++, j++) 2011 sp->diff[j] = ~from[i]; 2012 for (i = 0; i < to_size / sizeof(__be32); i++, j++) 2013 sp->diff[j] = to[i]; 2014 2015 return csum_partial(sp->diff, diff_size, seed); 2016 } 2017 2018 static const struct bpf_func_proto bpf_csum_diff_proto = { 2019 .func = bpf_csum_diff, 2020 .gpl_only = false, 2021 .pkt_access = true, 2022 .ret_type = RET_INTEGER, 2023 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 2024 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2025 .arg3_type = ARG_PTR_TO_MEM_OR_NULL, 2026 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 2027 .arg5_type = ARG_ANYTHING, 2028 }; 2029 2030 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) 2031 { 2032 /* The interface is to be used in combination with bpf_csum_diff() 2033 * for direct packet writes. csum rotation for alignment as well 2034 * as emulating csum_sub() can be done from the eBPF program. 2035 */ 2036 if (skb->ip_summed == CHECKSUM_COMPLETE) 2037 return (skb->csum = csum_add(skb->csum, csum)); 2038 2039 return -ENOTSUPP; 2040 } 2041 2042 static const struct bpf_func_proto bpf_csum_update_proto = { 2043 .func = bpf_csum_update, 2044 .gpl_only = false, 2045 .ret_type = RET_INTEGER, 2046 .arg1_type = ARG_PTR_TO_CTX, 2047 .arg2_type = ARG_ANYTHING, 2048 }; 2049 2050 BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level) 2051 { 2052 /* The interface is to be used in combination with bpf_skb_adjust_room() 2053 * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET 2054 * is passed as flags, for example. 2055 */ 2056 switch (level) { 2057 case BPF_CSUM_LEVEL_INC: 2058 __skb_incr_checksum_unnecessary(skb); 2059 break; 2060 case BPF_CSUM_LEVEL_DEC: 2061 __skb_decr_checksum_unnecessary(skb); 2062 break; 2063 case BPF_CSUM_LEVEL_RESET: 2064 __skb_reset_checksum_unnecessary(skb); 2065 break; 2066 case BPF_CSUM_LEVEL_QUERY: 2067 return skb->ip_summed == CHECKSUM_UNNECESSARY ? 2068 skb->csum_level : -EACCES; 2069 default: 2070 return -EINVAL; 2071 } 2072 2073 return 0; 2074 } 2075 2076 static const struct bpf_func_proto bpf_csum_level_proto = { 2077 .func = bpf_csum_level, 2078 .gpl_only = false, 2079 .ret_type = RET_INTEGER, 2080 .arg1_type = ARG_PTR_TO_CTX, 2081 .arg2_type = ARG_ANYTHING, 2082 }; 2083 2084 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) 2085 { 2086 return dev_forward_skb_nomtu(dev, skb); 2087 } 2088 2089 static inline int __bpf_rx_skb_no_mac(struct net_device *dev, 2090 struct sk_buff *skb) 2091 { 2092 int ret = ____dev_forward_skb(dev, skb, false); 2093 2094 if (likely(!ret)) { 2095 skb->dev = dev; 2096 ret = netif_rx(skb); 2097 } 2098 2099 return ret; 2100 } 2101 2102 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) 2103 { 2104 int ret; 2105 2106 if (dev_xmit_recursion()) { 2107 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2108 kfree_skb(skb); 2109 return -ENETDOWN; 2110 } 2111 2112 skb->dev = dev; 2113 skb->tstamp = 0; 2114 2115 dev_xmit_recursion_inc(); 2116 ret = dev_queue_xmit(skb); 2117 dev_xmit_recursion_dec(); 2118 2119 return ret; 2120 } 2121 2122 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2123 u32 flags) 2124 { 2125 unsigned int mlen = skb_network_offset(skb); 2126 2127 if (mlen) { 2128 __skb_pull(skb, mlen); 2129 2130 /* At ingress, the mac header has already been pulled once. 2131 * At egress, skb_pospull_rcsum has to be done in case that 2132 * the skb is originated from ingress (i.e. a forwarded skb) 2133 * to ensure that rcsum starts at net header. 2134 */ 2135 if (!skb_at_tc_ingress(skb)) 2136 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2137 } 2138 skb_pop_mac_header(skb); 2139 skb_reset_mac_len(skb); 2140 return flags & BPF_F_INGRESS ? 2141 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); 2142 } 2143 2144 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, 2145 u32 flags) 2146 { 2147 /* Verify that a link layer header is carried */ 2148 if (unlikely(skb->mac_header >= skb->network_header)) { 2149 kfree_skb(skb); 2150 return -ERANGE; 2151 } 2152 2153 bpf_push_mac_rcsum(skb); 2154 return flags & BPF_F_INGRESS ? 2155 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 2156 } 2157 2158 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, 2159 u32 flags) 2160 { 2161 if (dev_is_mac_header_xmit(dev)) 2162 return __bpf_redirect_common(skb, dev, flags); 2163 else 2164 return __bpf_redirect_no_mac(skb, dev, flags); 2165 } 2166 2167 #if IS_ENABLED(CONFIG_IPV6) 2168 static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, 2169 struct net_device *dev, struct bpf_nh_params *nh) 2170 { 2171 u32 hh_len = LL_RESERVED_SPACE(dev); 2172 const struct in6_addr *nexthop; 2173 struct dst_entry *dst = NULL; 2174 struct neighbour *neigh; 2175 2176 if (dev_xmit_recursion()) { 2177 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2178 goto out_drop; 2179 } 2180 2181 skb->dev = dev; 2182 skb->tstamp = 0; 2183 2184 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2185 struct sk_buff *skb2; 2186 2187 skb2 = skb_realloc_headroom(skb, hh_len); 2188 if (unlikely(!skb2)) { 2189 kfree_skb(skb); 2190 return -ENOMEM; 2191 } 2192 if (skb->sk) 2193 skb_set_owner_w(skb2, skb->sk); 2194 consume_skb(skb); 2195 skb = skb2; 2196 } 2197 2198 rcu_read_lock_bh(); 2199 if (!nh) { 2200 dst = skb_dst(skb); 2201 nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst), 2202 &ipv6_hdr(skb)->daddr); 2203 } else { 2204 nexthop = &nh->ipv6_nh; 2205 } 2206 neigh = ip_neigh_gw6(dev, nexthop); 2207 if (likely(!IS_ERR(neigh))) { 2208 int ret; 2209 2210 sock_confirm_neigh(skb, neigh); 2211 dev_xmit_recursion_inc(); 2212 ret = neigh_output(neigh, skb, false); 2213 dev_xmit_recursion_dec(); 2214 rcu_read_unlock_bh(); 2215 return ret; 2216 } 2217 rcu_read_unlock_bh(); 2218 if (dst) 2219 IP6_INC_STATS(dev_net(dst->dev), 2220 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 2221 out_drop: 2222 kfree_skb(skb); 2223 return -ENETDOWN; 2224 } 2225 2226 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2227 struct bpf_nh_params *nh) 2228 { 2229 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 2230 struct net *net = dev_net(dev); 2231 int err, ret = NET_XMIT_DROP; 2232 2233 if (!nh) { 2234 struct dst_entry *dst; 2235 struct flowi6 fl6 = { 2236 .flowi6_flags = FLOWI_FLAG_ANYSRC, 2237 .flowi6_mark = skb->mark, 2238 .flowlabel = ip6_flowinfo(ip6h), 2239 .flowi6_oif = dev->ifindex, 2240 .flowi6_proto = ip6h->nexthdr, 2241 .daddr = ip6h->daddr, 2242 .saddr = ip6h->saddr, 2243 }; 2244 2245 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); 2246 if (IS_ERR(dst)) 2247 goto out_drop; 2248 2249 skb_dst_set(skb, dst); 2250 } else if (nh->nh_family != AF_INET6) { 2251 goto out_drop; 2252 } 2253 2254 err = bpf_out_neigh_v6(net, skb, dev, nh); 2255 if (unlikely(net_xmit_eval(err))) 2256 dev->stats.tx_errors++; 2257 else 2258 ret = NET_XMIT_SUCCESS; 2259 goto out_xmit; 2260 out_drop: 2261 dev->stats.tx_errors++; 2262 kfree_skb(skb); 2263 out_xmit: 2264 return ret; 2265 } 2266 #else 2267 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2268 struct bpf_nh_params *nh) 2269 { 2270 kfree_skb(skb); 2271 return NET_XMIT_DROP; 2272 } 2273 #endif /* CONFIG_IPV6 */ 2274 2275 #if IS_ENABLED(CONFIG_INET) 2276 static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, 2277 struct net_device *dev, struct bpf_nh_params *nh) 2278 { 2279 u32 hh_len = LL_RESERVED_SPACE(dev); 2280 struct neighbour *neigh; 2281 bool is_v6gw = false; 2282 2283 if (dev_xmit_recursion()) { 2284 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2285 goto out_drop; 2286 } 2287 2288 skb->dev = dev; 2289 skb->tstamp = 0; 2290 2291 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2292 struct sk_buff *skb2; 2293 2294 skb2 = skb_realloc_headroom(skb, hh_len); 2295 if (unlikely(!skb2)) { 2296 kfree_skb(skb); 2297 return -ENOMEM; 2298 } 2299 if (skb->sk) 2300 skb_set_owner_w(skb2, skb->sk); 2301 consume_skb(skb); 2302 skb = skb2; 2303 } 2304 2305 rcu_read_lock_bh(); 2306 if (!nh) { 2307 struct dst_entry *dst = skb_dst(skb); 2308 struct rtable *rt = container_of(dst, struct rtable, dst); 2309 2310 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 2311 } else if (nh->nh_family == AF_INET6) { 2312 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); 2313 is_v6gw = true; 2314 } else if (nh->nh_family == AF_INET) { 2315 neigh = ip_neigh_gw4(dev, nh->ipv4_nh); 2316 } else { 2317 rcu_read_unlock_bh(); 2318 goto out_drop; 2319 } 2320 2321 if (likely(!IS_ERR(neigh))) { 2322 int ret; 2323 2324 sock_confirm_neigh(skb, neigh); 2325 dev_xmit_recursion_inc(); 2326 ret = neigh_output(neigh, skb, is_v6gw); 2327 dev_xmit_recursion_dec(); 2328 rcu_read_unlock_bh(); 2329 return ret; 2330 } 2331 rcu_read_unlock_bh(); 2332 out_drop: 2333 kfree_skb(skb); 2334 return -ENETDOWN; 2335 } 2336 2337 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2338 struct bpf_nh_params *nh) 2339 { 2340 const struct iphdr *ip4h = ip_hdr(skb); 2341 struct net *net = dev_net(dev); 2342 int err, ret = NET_XMIT_DROP; 2343 2344 if (!nh) { 2345 struct flowi4 fl4 = { 2346 .flowi4_flags = FLOWI_FLAG_ANYSRC, 2347 .flowi4_mark = skb->mark, 2348 .flowi4_tos = RT_TOS(ip4h->tos), 2349 .flowi4_oif = dev->ifindex, 2350 .flowi4_proto = ip4h->protocol, 2351 .daddr = ip4h->daddr, 2352 .saddr = ip4h->saddr, 2353 }; 2354 struct rtable *rt; 2355 2356 rt = ip_route_output_flow(net, &fl4, NULL); 2357 if (IS_ERR(rt)) 2358 goto out_drop; 2359 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { 2360 ip_rt_put(rt); 2361 goto out_drop; 2362 } 2363 2364 skb_dst_set(skb, &rt->dst); 2365 } 2366 2367 err = bpf_out_neigh_v4(net, skb, dev, nh); 2368 if (unlikely(net_xmit_eval(err))) 2369 dev->stats.tx_errors++; 2370 else 2371 ret = NET_XMIT_SUCCESS; 2372 goto out_xmit; 2373 out_drop: 2374 dev->stats.tx_errors++; 2375 kfree_skb(skb); 2376 out_xmit: 2377 return ret; 2378 } 2379 #else 2380 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2381 struct bpf_nh_params *nh) 2382 { 2383 kfree_skb(skb); 2384 return NET_XMIT_DROP; 2385 } 2386 #endif /* CONFIG_INET */ 2387 2388 static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev, 2389 struct bpf_nh_params *nh) 2390 { 2391 struct ethhdr *ethh = eth_hdr(skb); 2392 2393 if (unlikely(skb->mac_header >= skb->network_header)) 2394 goto out; 2395 bpf_push_mac_rcsum(skb); 2396 if (is_multicast_ether_addr(ethh->h_dest)) 2397 goto out; 2398 2399 skb_pull(skb, sizeof(*ethh)); 2400 skb_unset_mac_header(skb); 2401 skb_reset_network_header(skb); 2402 2403 if (skb->protocol == htons(ETH_P_IP)) 2404 return __bpf_redirect_neigh_v4(skb, dev, nh); 2405 else if (skb->protocol == htons(ETH_P_IPV6)) 2406 return __bpf_redirect_neigh_v6(skb, dev, nh); 2407 out: 2408 kfree_skb(skb); 2409 return -ENOTSUPP; 2410 } 2411 2412 /* Internal, non-exposed redirect flags. */ 2413 enum { 2414 BPF_F_NEIGH = (1ULL << 1), 2415 BPF_F_PEER = (1ULL << 2), 2416 BPF_F_NEXTHOP = (1ULL << 3), 2417 #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) 2418 }; 2419 2420 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) 2421 { 2422 struct net_device *dev; 2423 struct sk_buff *clone; 2424 int ret; 2425 2426 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2427 return -EINVAL; 2428 2429 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); 2430 if (unlikely(!dev)) 2431 return -EINVAL; 2432 2433 clone = skb_clone(skb, GFP_ATOMIC); 2434 if (unlikely(!clone)) 2435 return -ENOMEM; 2436 2437 /* For direct write, we need to keep the invariant that the skbs 2438 * we're dealing with need to be uncloned. Should uncloning fail 2439 * here, we need to free the just generated clone to unclone once 2440 * again. 2441 */ 2442 ret = bpf_try_make_head_writable(skb); 2443 if (unlikely(ret)) { 2444 kfree_skb(clone); 2445 return -ENOMEM; 2446 } 2447 2448 return __bpf_redirect(clone, dev, flags); 2449 } 2450 2451 static const struct bpf_func_proto bpf_clone_redirect_proto = { 2452 .func = bpf_clone_redirect, 2453 .gpl_only = false, 2454 .ret_type = RET_INTEGER, 2455 .arg1_type = ARG_PTR_TO_CTX, 2456 .arg2_type = ARG_ANYTHING, 2457 .arg3_type = ARG_ANYTHING, 2458 }; 2459 2460 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); 2461 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); 2462 2463 int skb_do_redirect(struct sk_buff *skb) 2464 { 2465 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2466 struct net *net = dev_net(skb->dev); 2467 struct net_device *dev; 2468 u32 flags = ri->flags; 2469 2470 dev = dev_get_by_index_rcu(net, ri->tgt_index); 2471 ri->tgt_index = 0; 2472 ri->flags = 0; 2473 if (unlikely(!dev)) 2474 goto out_drop; 2475 if (flags & BPF_F_PEER) { 2476 const struct net_device_ops *ops = dev->netdev_ops; 2477 2478 if (unlikely(!ops->ndo_get_peer_dev || 2479 !skb_at_tc_ingress(skb))) 2480 goto out_drop; 2481 dev = ops->ndo_get_peer_dev(dev); 2482 if (unlikely(!dev || 2483 !(dev->flags & IFF_UP) || 2484 net_eq(net, dev_net(dev)))) 2485 goto out_drop; 2486 skb->dev = dev; 2487 return -EAGAIN; 2488 } 2489 return flags & BPF_F_NEIGH ? 2490 __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ? 2491 &ri->nh : NULL) : 2492 __bpf_redirect(skb, dev, flags); 2493 out_drop: 2494 kfree_skb(skb); 2495 return -EINVAL; 2496 } 2497 2498 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) 2499 { 2500 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2501 2502 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2503 return TC_ACT_SHOT; 2504 2505 ri->flags = flags; 2506 ri->tgt_index = ifindex; 2507 2508 return TC_ACT_REDIRECT; 2509 } 2510 2511 static const struct bpf_func_proto bpf_redirect_proto = { 2512 .func = bpf_redirect, 2513 .gpl_only = false, 2514 .ret_type = RET_INTEGER, 2515 .arg1_type = ARG_ANYTHING, 2516 .arg2_type = ARG_ANYTHING, 2517 }; 2518 2519 BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) 2520 { 2521 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2522 2523 if (unlikely(flags)) 2524 return TC_ACT_SHOT; 2525 2526 ri->flags = BPF_F_PEER; 2527 ri->tgt_index = ifindex; 2528 2529 return TC_ACT_REDIRECT; 2530 } 2531 2532 static const struct bpf_func_proto bpf_redirect_peer_proto = { 2533 .func = bpf_redirect_peer, 2534 .gpl_only = false, 2535 .ret_type = RET_INTEGER, 2536 .arg1_type = ARG_ANYTHING, 2537 .arg2_type = ARG_ANYTHING, 2538 }; 2539 2540 BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, 2541 int, plen, u64, flags) 2542 { 2543 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2544 2545 if (unlikely((plen && plen < sizeof(*params)) || flags)) 2546 return TC_ACT_SHOT; 2547 2548 ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0); 2549 ri->tgt_index = ifindex; 2550 2551 BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params)); 2552 if (plen) 2553 memcpy(&ri->nh, params, sizeof(ri->nh)); 2554 2555 return TC_ACT_REDIRECT; 2556 } 2557 2558 static const struct bpf_func_proto bpf_redirect_neigh_proto = { 2559 .func = bpf_redirect_neigh, 2560 .gpl_only = false, 2561 .ret_type = RET_INTEGER, 2562 .arg1_type = ARG_ANYTHING, 2563 .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 2564 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 2565 .arg4_type = ARG_ANYTHING, 2566 }; 2567 2568 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) 2569 { 2570 msg->apply_bytes = bytes; 2571 return 0; 2572 } 2573 2574 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { 2575 .func = bpf_msg_apply_bytes, 2576 .gpl_only = false, 2577 .ret_type = RET_INTEGER, 2578 .arg1_type = ARG_PTR_TO_CTX, 2579 .arg2_type = ARG_ANYTHING, 2580 }; 2581 2582 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) 2583 { 2584 msg->cork_bytes = bytes; 2585 return 0; 2586 } 2587 2588 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { 2589 .func = bpf_msg_cork_bytes, 2590 .gpl_only = false, 2591 .ret_type = RET_INTEGER, 2592 .arg1_type = ARG_PTR_TO_CTX, 2593 .arg2_type = ARG_ANYTHING, 2594 }; 2595 2596 BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, 2597 u32, end, u64, flags) 2598 { 2599 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; 2600 u32 first_sge, last_sge, i, shift, bytes_sg_total; 2601 struct scatterlist *sge; 2602 u8 *raw, *to, *from; 2603 struct page *page; 2604 2605 if (unlikely(flags || end <= start)) 2606 return -EINVAL; 2607 2608 /* First find the starting scatterlist element */ 2609 i = msg->sg.start; 2610 do { 2611 offset += len; 2612 len = sk_msg_elem(msg, i)->length; 2613 if (start < offset + len) 2614 break; 2615 sk_msg_iter_var_next(i); 2616 } while (i != msg->sg.end); 2617 2618 if (unlikely(start >= offset + len)) 2619 return -EINVAL; 2620 2621 first_sge = i; 2622 /* The start may point into the sg element so we need to also 2623 * account for the headroom. 2624 */ 2625 bytes_sg_total = start - offset + bytes; 2626 if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len) 2627 goto out; 2628 2629 /* At this point we need to linearize multiple scatterlist 2630 * elements or a single shared page. Either way we need to 2631 * copy into a linear buffer exclusively owned by BPF. Then 2632 * place the buffer in the scatterlist and fixup the original 2633 * entries by removing the entries now in the linear buffer 2634 * and shifting the remaining entries. For now we do not try 2635 * to copy partial entries to avoid complexity of running out 2636 * of sg_entry slots. The downside is reading a single byte 2637 * will copy the entire sg entry. 2638 */ 2639 do { 2640 copy += sk_msg_elem(msg, i)->length; 2641 sk_msg_iter_var_next(i); 2642 if (bytes_sg_total <= copy) 2643 break; 2644 } while (i != msg->sg.end); 2645 last_sge = i; 2646 2647 if (unlikely(bytes_sg_total > copy)) 2648 return -EINVAL; 2649 2650 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2651 get_order(copy)); 2652 if (unlikely(!page)) 2653 return -ENOMEM; 2654 2655 raw = page_address(page); 2656 i = first_sge; 2657 do { 2658 sge = sk_msg_elem(msg, i); 2659 from = sg_virt(sge); 2660 len = sge->length; 2661 to = raw + poffset; 2662 2663 memcpy(to, from, len); 2664 poffset += len; 2665 sge->length = 0; 2666 put_page(sg_page(sge)); 2667 2668 sk_msg_iter_var_next(i); 2669 } while (i != last_sge); 2670 2671 sg_set_page(&msg->sg.data[first_sge], page, copy, 0); 2672 2673 /* To repair sg ring we need to shift entries. If we only 2674 * had a single entry though we can just replace it and 2675 * be done. Otherwise walk the ring and shift the entries. 2676 */ 2677 WARN_ON_ONCE(last_sge == first_sge); 2678 shift = last_sge > first_sge ? 2679 last_sge - first_sge - 1 : 2680 NR_MSG_FRAG_IDS - first_sge + last_sge - 1; 2681 if (!shift) 2682 goto out; 2683 2684 i = first_sge; 2685 sk_msg_iter_var_next(i); 2686 do { 2687 u32 move_from; 2688 2689 if (i + shift >= NR_MSG_FRAG_IDS) 2690 move_from = i + shift - NR_MSG_FRAG_IDS; 2691 else 2692 move_from = i + shift; 2693 if (move_from == msg->sg.end) 2694 break; 2695 2696 msg->sg.data[i] = msg->sg.data[move_from]; 2697 msg->sg.data[move_from].length = 0; 2698 msg->sg.data[move_from].page_link = 0; 2699 msg->sg.data[move_from].offset = 0; 2700 sk_msg_iter_var_next(i); 2701 } while (1); 2702 2703 msg->sg.end = msg->sg.end - shift > msg->sg.end ? 2704 msg->sg.end - shift + NR_MSG_FRAG_IDS : 2705 msg->sg.end - shift; 2706 out: 2707 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; 2708 msg->data_end = msg->data + bytes; 2709 return 0; 2710 } 2711 2712 static const struct bpf_func_proto bpf_msg_pull_data_proto = { 2713 .func = bpf_msg_pull_data, 2714 .gpl_only = false, 2715 .ret_type = RET_INTEGER, 2716 .arg1_type = ARG_PTR_TO_CTX, 2717 .arg2_type = ARG_ANYTHING, 2718 .arg3_type = ARG_ANYTHING, 2719 .arg4_type = ARG_ANYTHING, 2720 }; 2721 2722 BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, 2723 u32, len, u64, flags) 2724 { 2725 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; 2726 u32 new, i = 0, l = 0, space, copy = 0, offset = 0; 2727 u8 *raw, *to, *from; 2728 struct page *page; 2729 2730 if (unlikely(flags)) 2731 return -EINVAL; 2732 2733 /* First find the starting scatterlist element */ 2734 i = msg->sg.start; 2735 do { 2736 offset += l; 2737 l = sk_msg_elem(msg, i)->length; 2738 2739 if (start < offset + l) 2740 break; 2741 sk_msg_iter_var_next(i); 2742 } while (i != msg->sg.end); 2743 2744 if (start >= offset + l) 2745 return -EINVAL; 2746 2747 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2748 2749 /* If no space available will fallback to copy, we need at 2750 * least one scatterlist elem available to push data into 2751 * when start aligns to the beginning of an element or two 2752 * when it falls inside an element. We handle the start equals 2753 * offset case because its the common case for inserting a 2754 * header. 2755 */ 2756 if (!space || (space == 1 && start != offset)) 2757 copy = msg->sg.data[i].length; 2758 2759 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2760 get_order(copy + len)); 2761 if (unlikely(!page)) 2762 return -ENOMEM; 2763 2764 if (copy) { 2765 int front, back; 2766 2767 raw = page_address(page); 2768 2769 psge = sk_msg_elem(msg, i); 2770 front = start - offset; 2771 back = psge->length - front; 2772 from = sg_virt(psge); 2773 2774 if (front) 2775 memcpy(raw, from, front); 2776 2777 if (back) { 2778 from += front; 2779 to = raw + front + len; 2780 2781 memcpy(to, from, back); 2782 } 2783 2784 put_page(sg_page(psge)); 2785 } else if (start - offset) { 2786 psge = sk_msg_elem(msg, i); 2787 rsge = sk_msg_elem_cpy(msg, i); 2788 2789 psge->length = start - offset; 2790 rsge.length -= psge->length; 2791 rsge.offset += start; 2792 2793 sk_msg_iter_var_next(i); 2794 sg_unmark_end(psge); 2795 sg_unmark_end(&rsge); 2796 sk_msg_iter_next(msg, end); 2797 } 2798 2799 /* Slot(s) to place newly allocated data */ 2800 new = i; 2801 2802 /* Shift one or two slots as needed */ 2803 if (!copy) { 2804 sge = sk_msg_elem_cpy(msg, i); 2805 2806 sk_msg_iter_var_next(i); 2807 sg_unmark_end(&sge); 2808 sk_msg_iter_next(msg, end); 2809 2810 nsge = sk_msg_elem_cpy(msg, i); 2811 if (rsge.length) { 2812 sk_msg_iter_var_next(i); 2813 nnsge = sk_msg_elem_cpy(msg, i); 2814 } 2815 2816 while (i != msg->sg.end) { 2817 msg->sg.data[i] = sge; 2818 sge = nsge; 2819 sk_msg_iter_var_next(i); 2820 if (rsge.length) { 2821 nsge = nnsge; 2822 nnsge = sk_msg_elem_cpy(msg, i); 2823 } else { 2824 nsge = sk_msg_elem_cpy(msg, i); 2825 } 2826 } 2827 } 2828 2829 /* Place newly allocated data buffer */ 2830 sk_mem_charge(msg->sk, len); 2831 msg->sg.size += len; 2832 __clear_bit(new, &msg->sg.copy); 2833 sg_set_page(&msg->sg.data[new], page, len + copy, 0); 2834 if (rsge.length) { 2835 get_page(sg_page(&rsge)); 2836 sk_msg_iter_var_next(new); 2837 msg->sg.data[new] = rsge; 2838 } 2839 2840 sk_msg_compute_data_pointers(msg); 2841 return 0; 2842 } 2843 2844 static const struct bpf_func_proto bpf_msg_push_data_proto = { 2845 .func = bpf_msg_push_data, 2846 .gpl_only = false, 2847 .ret_type = RET_INTEGER, 2848 .arg1_type = ARG_PTR_TO_CTX, 2849 .arg2_type = ARG_ANYTHING, 2850 .arg3_type = ARG_ANYTHING, 2851 .arg4_type = ARG_ANYTHING, 2852 }; 2853 2854 static void sk_msg_shift_left(struct sk_msg *msg, int i) 2855 { 2856 int prev; 2857 2858 do { 2859 prev = i; 2860 sk_msg_iter_var_next(i); 2861 msg->sg.data[prev] = msg->sg.data[i]; 2862 } while (i != msg->sg.end); 2863 2864 sk_msg_iter_prev(msg, end); 2865 } 2866 2867 static void sk_msg_shift_right(struct sk_msg *msg, int i) 2868 { 2869 struct scatterlist tmp, sge; 2870 2871 sk_msg_iter_next(msg, end); 2872 sge = sk_msg_elem_cpy(msg, i); 2873 sk_msg_iter_var_next(i); 2874 tmp = sk_msg_elem_cpy(msg, i); 2875 2876 while (i != msg->sg.end) { 2877 msg->sg.data[i] = sge; 2878 sk_msg_iter_var_next(i); 2879 sge = tmp; 2880 tmp = sk_msg_elem_cpy(msg, i); 2881 } 2882 } 2883 2884 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, 2885 u32, len, u64, flags) 2886 { 2887 u32 i = 0, l = 0, space, offset = 0; 2888 u64 last = start + len; 2889 int pop; 2890 2891 if (unlikely(flags)) 2892 return -EINVAL; 2893 2894 /* First find the starting scatterlist element */ 2895 i = msg->sg.start; 2896 do { 2897 offset += l; 2898 l = sk_msg_elem(msg, i)->length; 2899 2900 if (start < offset + l) 2901 break; 2902 sk_msg_iter_var_next(i); 2903 } while (i != msg->sg.end); 2904 2905 /* Bounds checks: start and pop must be inside message */ 2906 if (start >= offset + l || last >= msg->sg.size) 2907 return -EINVAL; 2908 2909 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2910 2911 pop = len; 2912 /* --------------| offset 2913 * -| start |-------- len -------| 2914 * 2915 * |----- a ----|-------- pop -------|----- b ----| 2916 * |______________________________________________| length 2917 * 2918 * 2919 * a: region at front of scatter element to save 2920 * b: region at back of scatter element to save when length > A + pop 2921 * pop: region to pop from element, same as input 'pop' here will be 2922 * decremented below per iteration. 2923 * 2924 * Two top-level cases to handle when start != offset, first B is non 2925 * zero and second B is zero corresponding to when a pop includes more 2926 * than one element. 2927 * 2928 * Then if B is non-zero AND there is no space allocate space and 2929 * compact A, B regions into page. If there is space shift ring to 2930 * the rigth free'ing the next element in ring to place B, leaving 2931 * A untouched except to reduce length. 2932 */ 2933 if (start != offset) { 2934 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); 2935 int a = start; 2936 int b = sge->length - pop - a; 2937 2938 sk_msg_iter_var_next(i); 2939 2940 if (pop < sge->length - a) { 2941 if (space) { 2942 sge->length = a; 2943 sk_msg_shift_right(msg, i); 2944 nsge = sk_msg_elem(msg, i); 2945 get_page(sg_page(sge)); 2946 sg_set_page(nsge, 2947 sg_page(sge), 2948 b, sge->offset + pop + a); 2949 } else { 2950 struct page *page, *orig; 2951 u8 *to, *from; 2952 2953 page = alloc_pages(__GFP_NOWARN | 2954 __GFP_COMP | GFP_ATOMIC, 2955 get_order(a + b)); 2956 if (unlikely(!page)) 2957 return -ENOMEM; 2958 2959 sge->length = a; 2960 orig = sg_page(sge); 2961 from = sg_virt(sge); 2962 to = page_address(page); 2963 memcpy(to, from, a); 2964 memcpy(to + a, from + a + pop, b); 2965 sg_set_page(sge, page, a + b, 0); 2966 put_page(orig); 2967 } 2968 pop = 0; 2969 } else if (pop >= sge->length - a) { 2970 pop -= (sge->length - a); 2971 sge->length = a; 2972 } 2973 } 2974 2975 /* From above the current layout _must_ be as follows, 2976 * 2977 * -| offset 2978 * -| start 2979 * 2980 * |---- pop ---|---------------- b ------------| 2981 * |____________________________________________| length 2982 * 2983 * Offset and start of the current msg elem are equal because in the 2984 * previous case we handled offset != start and either consumed the 2985 * entire element and advanced to the next element OR pop == 0. 2986 * 2987 * Two cases to handle here are first pop is less than the length 2988 * leaving some remainder b above. Simply adjust the element's layout 2989 * in this case. Or pop >= length of the element so that b = 0. In this 2990 * case advance to next element decrementing pop. 2991 */ 2992 while (pop) { 2993 struct scatterlist *sge = sk_msg_elem(msg, i); 2994 2995 if (pop < sge->length) { 2996 sge->length -= pop; 2997 sge->offset += pop; 2998 pop = 0; 2999 } else { 3000 pop -= sge->length; 3001 sk_msg_shift_left(msg, i); 3002 } 3003 sk_msg_iter_var_next(i); 3004 } 3005 3006 sk_mem_uncharge(msg->sk, len - pop); 3007 msg->sg.size -= (len - pop); 3008 sk_msg_compute_data_pointers(msg); 3009 return 0; 3010 } 3011 3012 static const struct bpf_func_proto bpf_msg_pop_data_proto = { 3013 .func = bpf_msg_pop_data, 3014 .gpl_only = false, 3015 .ret_type = RET_INTEGER, 3016 .arg1_type = ARG_PTR_TO_CTX, 3017 .arg2_type = ARG_ANYTHING, 3018 .arg3_type = ARG_ANYTHING, 3019 .arg4_type = ARG_ANYTHING, 3020 }; 3021 3022 #ifdef CONFIG_CGROUP_NET_CLASSID 3023 BPF_CALL_0(bpf_get_cgroup_classid_curr) 3024 { 3025 return __task_get_classid(current); 3026 } 3027 3028 static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = { 3029 .func = bpf_get_cgroup_classid_curr, 3030 .gpl_only = false, 3031 .ret_type = RET_INTEGER, 3032 }; 3033 3034 BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb) 3035 { 3036 struct sock *sk = skb_to_full_sk(skb); 3037 3038 if (!sk || !sk_fullsock(sk)) 3039 return 0; 3040 3041 return sock_cgroup_classid(&sk->sk_cgrp_data); 3042 } 3043 3044 static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = { 3045 .func = bpf_skb_cgroup_classid, 3046 .gpl_only = false, 3047 .ret_type = RET_INTEGER, 3048 .arg1_type = ARG_PTR_TO_CTX, 3049 }; 3050 #endif 3051 3052 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 3053 { 3054 return task_get_classid(skb); 3055 } 3056 3057 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { 3058 .func = bpf_get_cgroup_classid, 3059 .gpl_only = false, 3060 .ret_type = RET_INTEGER, 3061 .arg1_type = ARG_PTR_TO_CTX, 3062 }; 3063 3064 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) 3065 { 3066 return dst_tclassid(skb); 3067 } 3068 3069 static const struct bpf_func_proto bpf_get_route_realm_proto = { 3070 .func = bpf_get_route_realm, 3071 .gpl_only = false, 3072 .ret_type = RET_INTEGER, 3073 .arg1_type = ARG_PTR_TO_CTX, 3074 }; 3075 3076 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) 3077 { 3078 /* If skb_clear_hash() was called due to mangling, we can 3079 * trigger SW recalculation here. Later access to hash 3080 * can then use the inline skb->hash via context directly 3081 * instead of calling this helper again. 3082 */ 3083 return skb_get_hash(skb); 3084 } 3085 3086 static const struct bpf_func_proto bpf_get_hash_recalc_proto = { 3087 .func = bpf_get_hash_recalc, 3088 .gpl_only = false, 3089 .ret_type = RET_INTEGER, 3090 .arg1_type = ARG_PTR_TO_CTX, 3091 }; 3092 3093 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) 3094 { 3095 /* After all direct packet write, this can be used once for 3096 * triggering a lazy recalc on next skb_get_hash() invocation. 3097 */ 3098 skb_clear_hash(skb); 3099 return 0; 3100 } 3101 3102 static const struct bpf_func_proto bpf_set_hash_invalid_proto = { 3103 .func = bpf_set_hash_invalid, 3104 .gpl_only = false, 3105 .ret_type = RET_INTEGER, 3106 .arg1_type = ARG_PTR_TO_CTX, 3107 }; 3108 3109 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) 3110 { 3111 /* Set user specified hash as L4(+), so that it gets returned 3112 * on skb_get_hash() call unless BPF prog later on triggers a 3113 * skb_clear_hash(). 3114 */ 3115 __skb_set_sw_hash(skb, hash, true); 3116 return 0; 3117 } 3118 3119 static const struct bpf_func_proto bpf_set_hash_proto = { 3120 .func = bpf_set_hash, 3121 .gpl_only = false, 3122 .ret_type = RET_INTEGER, 3123 .arg1_type = ARG_PTR_TO_CTX, 3124 .arg2_type = ARG_ANYTHING, 3125 }; 3126 3127 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, 3128 u16, vlan_tci) 3129 { 3130 int ret; 3131 3132 if (unlikely(vlan_proto != htons(ETH_P_8021Q) && 3133 vlan_proto != htons(ETH_P_8021AD))) 3134 vlan_proto = htons(ETH_P_8021Q); 3135 3136 bpf_push_mac_rcsum(skb); 3137 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 3138 bpf_pull_mac_rcsum(skb); 3139 3140 bpf_compute_data_pointers(skb); 3141 return ret; 3142 } 3143 3144 static const struct bpf_func_proto bpf_skb_vlan_push_proto = { 3145 .func = bpf_skb_vlan_push, 3146 .gpl_only = false, 3147 .ret_type = RET_INTEGER, 3148 .arg1_type = ARG_PTR_TO_CTX, 3149 .arg2_type = ARG_ANYTHING, 3150 .arg3_type = ARG_ANYTHING, 3151 }; 3152 3153 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) 3154 { 3155 int ret; 3156 3157 bpf_push_mac_rcsum(skb); 3158 ret = skb_vlan_pop(skb); 3159 bpf_pull_mac_rcsum(skb); 3160 3161 bpf_compute_data_pointers(skb); 3162 return ret; 3163 } 3164 3165 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { 3166 .func = bpf_skb_vlan_pop, 3167 .gpl_only = false, 3168 .ret_type = RET_INTEGER, 3169 .arg1_type = ARG_PTR_TO_CTX, 3170 }; 3171 3172 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) 3173 { 3174 /* Caller already did skb_cow() with len as headroom, 3175 * so no need to do it here. 3176 */ 3177 skb_push(skb, len); 3178 memmove(skb->data, skb->data + len, off); 3179 memset(skb->data + off, 0, len); 3180 3181 /* No skb_postpush_rcsum(skb, skb->data + off, len) 3182 * needed here as it does not change the skb->csum 3183 * result for checksum complete when summing over 3184 * zeroed blocks. 3185 */ 3186 return 0; 3187 } 3188 3189 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) 3190 { 3191 /* skb_ensure_writable() is not needed here, as we're 3192 * already working on an uncloned skb. 3193 */ 3194 if (unlikely(!pskb_may_pull(skb, off + len))) 3195 return -ENOMEM; 3196 3197 skb_postpull_rcsum(skb, skb->data + off, len); 3198 memmove(skb->data + len, skb->data, off); 3199 __skb_pull(skb, len); 3200 3201 return 0; 3202 } 3203 3204 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) 3205 { 3206 bool trans_same = skb->transport_header == skb->network_header; 3207 int ret; 3208 3209 /* There's no need for __skb_push()/__skb_pull() pair to 3210 * get to the start of the mac header as we're guaranteed 3211 * to always start from here under eBPF. 3212 */ 3213 ret = bpf_skb_generic_push(skb, off, len); 3214 if (likely(!ret)) { 3215 skb->mac_header -= len; 3216 skb->network_header -= len; 3217 if (trans_same) 3218 skb->transport_header = skb->network_header; 3219 } 3220 3221 return ret; 3222 } 3223 3224 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) 3225 { 3226 bool trans_same = skb->transport_header == skb->network_header; 3227 int ret; 3228 3229 /* Same here, __skb_push()/__skb_pull() pair not needed. */ 3230 ret = bpf_skb_generic_pop(skb, off, len); 3231 if (likely(!ret)) { 3232 skb->mac_header += len; 3233 skb->network_header += len; 3234 if (trans_same) 3235 skb->transport_header = skb->network_header; 3236 } 3237 3238 return ret; 3239 } 3240 3241 static int bpf_skb_proto_4_to_6(struct sk_buff *skb) 3242 { 3243 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3244 u32 off = skb_mac_header_len(skb); 3245 int ret; 3246 3247 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) 3248 return -ENOTSUPP; 3249 3250 ret = skb_cow(skb, len_diff); 3251 if (unlikely(ret < 0)) 3252 return ret; 3253 3254 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3255 if (unlikely(ret < 0)) 3256 return ret; 3257 3258 if (skb_is_gso(skb)) { 3259 struct skb_shared_info *shinfo = skb_shinfo(skb); 3260 3261 /* SKB_GSO_TCPV4 needs to be changed into 3262 * SKB_GSO_TCPV6. 3263 */ 3264 if (shinfo->gso_type & SKB_GSO_TCPV4) { 3265 shinfo->gso_type &= ~SKB_GSO_TCPV4; 3266 shinfo->gso_type |= SKB_GSO_TCPV6; 3267 } 3268 3269 /* Due to IPv6 header, MSS needs to be downgraded. */ 3270 skb_decrease_gso_size(shinfo, len_diff); 3271 /* Header must be checked, and gso_segs recomputed. */ 3272 shinfo->gso_type |= SKB_GSO_DODGY; 3273 shinfo->gso_segs = 0; 3274 } 3275 3276 skb->protocol = htons(ETH_P_IPV6); 3277 skb_clear_hash(skb); 3278 3279 return 0; 3280 } 3281 3282 static int bpf_skb_proto_6_to_4(struct sk_buff *skb) 3283 { 3284 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3285 u32 off = skb_mac_header_len(skb); 3286 int ret; 3287 3288 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) 3289 return -ENOTSUPP; 3290 3291 ret = skb_unclone(skb, GFP_ATOMIC); 3292 if (unlikely(ret < 0)) 3293 return ret; 3294 3295 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3296 if (unlikely(ret < 0)) 3297 return ret; 3298 3299 if (skb_is_gso(skb)) { 3300 struct skb_shared_info *shinfo = skb_shinfo(skb); 3301 3302 /* SKB_GSO_TCPV6 needs to be changed into 3303 * SKB_GSO_TCPV4. 3304 */ 3305 if (shinfo->gso_type & SKB_GSO_TCPV6) { 3306 shinfo->gso_type &= ~SKB_GSO_TCPV6; 3307 shinfo->gso_type |= SKB_GSO_TCPV4; 3308 } 3309 3310 /* Due to IPv4 header, MSS can be upgraded. */ 3311 skb_increase_gso_size(shinfo, len_diff); 3312 /* Header must be checked, and gso_segs recomputed. */ 3313 shinfo->gso_type |= SKB_GSO_DODGY; 3314 shinfo->gso_segs = 0; 3315 } 3316 3317 skb->protocol = htons(ETH_P_IP); 3318 skb_clear_hash(skb); 3319 3320 return 0; 3321 } 3322 3323 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) 3324 { 3325 __be16 from_proto = skb->protocol; 3326 3327 if (from_proto == htons(ETH_P_IP) && 3328 to_proto == htons(ETH_P_IPV6)) 3329 return bpf_skb_proto_4_to_6(skb); 3330 3331 if (from_proto == htons(ETH_P_IPV6) && 3332 to_proto == htons(ETH_P_IP)) 3333 return bpf_skb_proto_6_to_4(skb); 3334 3335 return -ENOTSUPP; 3336 } 3337 3338 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, 3339 u64, flags) 3340 { 3341 int ret; 3342 3343 if (unlikely(flags)) 3344 return -EINVAL; 3345 3346 /* General idea is that this helper does the basic groundwork 3347 * needed for changing the protocol, and eBPF program fills the 3348 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() 3349 * and other helpers, rather than passing a raw buffer here. 3350 * 3351 * The rationale is to keep this minimal and without a need to 3352 * deal with raw packet data. F.e. even if we would pass buffers 3353 * here, the program still needs to call the bpf_lX_csum_replace() 3354 * helpers anyway. Plus, this way we keep also separation of 3355 * concerns, since f.e. bpf_skb_store_bytes() should only take 3356 * care of stores. 3357 * 3358 * Currently, additional options and extension header space are 3359 * not supported, but flags register is reserved so we can adapt 3360 * that. For offloads, we mark packet as dodgy, so that headers 3361 * need to be verified first. 3362 */ 3363 ret = bpf_skb_proto_xlat(skb, proto); 3364 bpf_compute_data_pointers(skb); 3365 return ret; 3366 } 3367 3368 static const struct bpf_func_proto bpf_skb_change_proto_proto = { 3369 .func = bpf_skb_change_proto, 3370 .gpl_only = false, 3371 .ret_type = RET_INTEGER, 3372 .arg1_type = ARG_PTR_TO_CTX, 3373 .arg2_type = ARG_ANYTHING, 3374 .arg3_type = ARG_ANYTHING, 3375 }; 3376 3377 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) 3378 { 3379 /* We only allow a restricted subset to be changed for now. */ 3380 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || 3381 !skb_pkt_type_ok(pkt_type))) 3382 return -EINVAL; 3383 3384 skb->pkt_type = pkt_type; 3385 return 0; 3386 } 3387 3388 static const struct bpf_func_proto bpf_skb_change_type_proto = { 3389 .func = bpf_skb_change_type, 3390 .gpl_only = false, 3391 .ret_type = RET_INTEGER, 3392 .arg1_type = ARG_PTR_TO_CTX, 3393 .arg2_type = ARG_ANYTHING, 3394 }; 3395 3396 static u32 bpf_skb_net_base_len(const struct sk_buff *skb) 3397 { 3398 switch (skb->protocol) { 3399 case htons(ETH_P_IP): 3400 return sizeof(struct iphdr); 3401 case htons(ETH_P_IPV6): 3402 return sizeof(struct ipv6hdr); 3403 default: 3404 return ~0U; 3405 } 3406 } 3407 3408 #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \ 3409 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3410 3411 #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ 3412 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ 3413 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ 3414 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ 3415 BPF_F_ADJ_ROOM_ENCAP_L2( \ 3416 BPF_ADJ_ROOM_ENCAP_L2_MASK)) 3417 3418 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, 3419 u64 flags) 3420 { 3421 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT; 3422 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; 3423 u16 mac_len = 0, inner_net = 0, inner_trans = 0; 3424 unsigned int gso_type = SKB_GSO_DODGY; 3425 int ret; 3426 3427 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3428 /* udp gso_size delineates datagrams, only allow if fixed */ 3429 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3430 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3431 return -ENOTSUPP; 3432 } 3433 3434 ret = skb_cow_head(skb, len_diff); 3435 if (unlikely(ret < 0)) 3436 return ret; 3437 3438 if (encap) { 3439 if (skb->protocol != htons(ETH_P_IP) && 3440 skb->protocol != htons(ETH_P_IPV6)) 3441 return -ENOTSUPP; 3442 3443 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 && 3444 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3445 return -EINVAL; 3446 3447 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE && 3448 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3449 return -EINVAL; 3450 3451 if (skb->encapsulation) 3452 return -EALREADY; 3453 3454 mac_len = skb->network_header - skb->mac_header; 3455 inner_net = skb->network_header; 3456 if (inner_mac_len > len_diff) 3457 return -EINVAL; 3458 inner_trans = skb->transport_header; 3459 } 3460 3461 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3462 if (unlikely(ret < 0)) 3463 return ret; 3464 3465 if (encap) { 3466 skb->inner_mac_header = inner_net - inner_mac_len; 3467 skb->inner_network_header = inner_net; 3468 skb->inner_transport_header = inner_trans; 3469 skb_set_inner_protocol(skb, skb->protocol); 3470 3471 skb->encapsulation = 1; 3472 skb_set_network_header(skb, mac_len); 3473 3474 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3475 gso_type |= SKB_GSO_UDP_TUNNEL; 3476 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE) 3477 gso_type |= SKB_GSO_GRE; 3478 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3479 gso_type |= SKB_GSO_IPXIP6; 3480 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3481 gso_type |= SKB_GSO_IPXIP4; 3482 3483 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE || 3484 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) { 3485 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ? 3486 sizeof(struct ipv6hdr) : 3487 sizeof(struct iphdr); 3488 3489 skb_set_transport_header(skb, mac_len + nh_len); 3490 } 3491 3492 /* Match skb->protocol to new outer l3 protocol */ 3493 if (skb->protocol == htons(ETH_P_IP) && 3494 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3495 skb->protocol = htons(ETH_P_IPV6); 3496 else if (skb->protocol == htons(ETH_P_IPV6) && 3497 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3498 skb->protocol = htons(ETH_P_IP); 3499 } 3500 3501 if (skb_is_gso(skb)) { 3502 struct skb_shared_info *shinfo = skb_shinfo(skb); 3503 3504 /* Due to header grow, MSS needs to be downgraded. */ 3505 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3506 skb_decrease_gso_size(shinfo, len_diff); 3507 3508 /* Header must be checked, and gso_segs recomputed. */ 3509 shinfo->gso_type |= gso_type; 3510 shinfo->gso_segs = 0; 3511 } 3512 3513 return 0; 3514 } 3515 3516 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, 3517 u64 flags) 3518 { 3519 int ret; 3520 3521 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO | 3522 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3523 return -EINVAL; 3524 3525 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3526 /* udp gso_size delineates datagrams, only allow if fixed */ 3527 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3528 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3529 return -ENOTSUPP; 3530 } 3531 3532 ret = skb_unclone(skb, GFP_ATOMIC); 3533 if (unlikely(ret < 0)) 3534 return ret; 3535 3536 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3537 if (unlikely(ret < 0)) 3538 return ret; 3539 3540 if (skb_is_gso(skb)) { 3541 struct skb_shared_info *shinfo = skb_shinfo(skb); 3542 3543 /* Due to header shrink, MSS can be upgraded. */ 3544 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3545 skb_increase_gso_size(shinfo, len_diff); 3546 3547 /* Header must be checked, and gso_segs recomputed. */ 3548 shinfo->gso_type |= SKB_GSO_DODGY; 3549 shinfo->gso_segs = 0; 3550 } 3551 3552 return 0; 3553 } 3554 3555 #define BPF_SKB_MAX_LEN SKB_MAX_ALLOC 3556 3557 BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3558 u32, mode, u64, flags) 3559 { 3560 u32 len_diff_abs = abs(len_diff); 3561 bool shrink = len_diff < 0; 3562 int ret = 0; 3563 3564 if (unlikely(flags || mode)) 3565 return -EINVAL; 3566 if (unlikely(len_diff_abs > 0xfffU)) 3567 return -EFAULT; 3568 3569 if (!shrink) { 3570 ret = skb_cow(skb, len_diff); 3571 if (unlikely(ret < 0)) 3572 return ret; 3573 __skb_push(skb, len_diff_abs); 3574 memset(skb->data, 0, len_diff_abs); 3575 } else { 3576 if (unlikely(!pskb_may_pull(skb, len_diff_abs))) 3577 return -ENOMEM; 3578 __skb_pull(skb, len_diff_abs); 3579 } 3580 bpf_compute_data_end_sk_skb(skb); 3581 if (tls_sw_has_ctx_rx(skb->sk)) { 3582 struct strp_msg *rxm = strp_msg(skb); 3583 3584 rxm->full_len += len_diff; 3585 } 3586 return ret; 3587 } 3588 3589 static const struct bpf_func_proto sk_skb_adjust_room_proto = { 3590 .func = sk_skb_adjust_room, 3591 .gpl_only = false, 3592 .ret_type = RET_INTEGER, 3593 .arg1_type = ARG_PTR_TO_CTX, 3594 .arg2_type = ARG_ANYTHING, 3595 .arg3_type = ARG_ANYTHING, 3596 .arg4_type = ARG_ANYTHING, 3597 }; 3598 3599 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3600 u32, mode, u64, flags) 3601 { 3602 u32 len_cur, len_diff_abs = abs(len_diff); 3603 u32 len_min = bpf_skb_net_base_len(skb); 3604 u32 len_max = BPF_SKB_MAX_LEN; 3605 __be16 proto = skb->protocol; 3606 bool shrink = len_diff < 0; 3607 u32 off; 3608 int ret; 3609 3610 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK | 3611 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3612 return -EINVAL; 3613 if (unlikely(len_diff_abs > 0xfffU)) 3614 return -EFAULT; 3615 if (unlikely(proto != htons(ETH_P_IP) && 3616 proto != htons(ETH_P_IPV6))) 3617 return -ENOTSUPP; 3618 3619 off = skb_mac_header_len(skb); 3620 switch (mode) { 3621 case BPF_ADJ_ROOM_NET: 3622 off += bpf_skb_net_base_len(skb); 3623 break; 3624 case BPF_ADJ_ROOM_MAC: 3625 break; 3626 default: 3627 return -ENOTSUPP; 3628 } 3629 3630 len_cur = skb->len - skb_network_offset(skb); 3631 if ((shrink && (len_diff_abs >= len_cur || 3632 len_cur - len_diff_abs < len_min)) || 3633 (!shrink && (skb->len + len_diff_abs > len_max && 3634 !skb_is_gso(skb)))) 3635 return -ENOTSUPP; 3636 3637 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : 3638 bpf_skb_net_grow(skb, off, len_diff_abs, flags); 3639 if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET)) 3640 __skb_reset_checksum_unnecessary(skb); 3641 3642 bpf_compute_data_pointers(skb); 3643 return ret; 3644 } 3645 3646 static const struct bpf_func_proto bpf_skb_adjust_room_proto = { 3647 .func = bpf_skb_adjust_room, 3648 .gpl_only = false, 3649 .ret_type = RET_INTEGER, 3650 .arg1_type = ARG_PTR_TO_CTX, 3651 .arg2_type = ARG_ANYTHING, 3652 .arg3_type = ARG_ANYTHING, 3653 .arg4_type = ARG_ANYTHING, 3654 }; 3655 3656 static u32 __bpf_skb_min_len(const struct sk_buff *skb) 3657 { 3658 u32 min_len = skb_network_offset(skb); 3659 3660 if (skb_transport_header_was_set(skb)) 3661 min_len = skb_transport_offset(skb); 3662 if (skb->ip_summed == CHECKSUM_PARTIAL) 3663 min_len = skb_checksum_start_offset(skb) + 3664 skb->csum_offset + sizeof(__sum16); 3665 return min_len; 3666 } 3667 3668 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) 3669 { 3670 unsigned int old_len = skb->len; 3671 int ret; 3672 3673 ret = __skb_grow_rcsum(skb, new_len); 3674 if (!ret) 3675 memset(skb->data + old_len, 0, new_len - old_len); 3676 return ret; 3677 } 3678 3679 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) 3680 { 3681 return __skb_trim_rcsum(skb, new_len); 3682 } 3683 3684 static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, 3685 u64 flags) 3686 { 3687 u32 max_len = BPF_SKB_MAX_LEN; 3688 u32 min_len = __bpf_skb_min_len(skb); 3689 int ret; 3690 3691 if (unlikely(flags || new_len > max_len || new_len < min_len)) 3692 return -EINVAL; 3693 if (skb->encapsulation) 3694 return -ENOTSUPP; 3695 3696 /* The basic idea of this helper is that it's performing the 3697 * needed work to either grow or trim an skb, and eBPF program 3698 * rewrites the rest via helpers like bpf_skb_store_bytes(), 3699 * bpf_lX_csum_replace() and others rather than passing a raw 3700 * buffer here. This one is a slow path helper and intended 3701 * for replies with control messages. 3702 * 3703 * Like in bpf_skb_change_proto(), we want to keep this rather 3704 * minimal and without protocol specifics so that we are able 3705 * to separate concerns as in bpf_skb_store_bytes() should only 3706 * be the one responsible for writing buffers. 3707 * 3708 * It's really expected to be a slow path operation here for 3709 * control message replies, so we're implicitly linearizing, 3710 * uncloning and drop offloads from the skb by this. 3711 */ 3712 ret = __bpf_try_make_writable(skb, skb->len); 3713 if (!ret) { 3714 if (new_len > skb->len) 3715 ret = bpf_skb_grow_rcsum(skb, new_len); 3716 else if (new_len < skb->len) 3717 ret = bpf_skb_trim_rcsum(skb, new_len); 3718 if (!ret && skb_is_gso(skb)) 3719 skb_gso_reset(skb); 3720 } 3721 return ret; 3722 } 3723 3724 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3725 u64, flags) 3726 { 3727 int ret = __bpf_skb_change_tail(skb, new_len, flags); 3728 3729 bpf_compute_data_pointers(skb); 3730 return ret; 3731 } 3732 3733 static const struct bpf_func_proto bpf_skb_change_tail_proto = { 3734 .func = bpf_skb_change_tail, 3735 .gpl_only = false, 3736 .ret_type = RET_INTEGER, 3737 .arg1_type = ARG_PTR_TO_CTX, 3738 .arg2_type = ARG_ANYTHING, 3739 .arg3_type = ARG_ANYTHING, 3740 }; 3741 3742 BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3743 u64, flags) 3744 { 3745 int ret = __bpf_skb_change_tail(skb, new_len, flags); 3746 3747 bpf_compute_data_end_sk_skb(skb); 3748 return ret; 3749 } 3750 3751 static const struct bpf_func_proto sk_skb_change_tail_proto = { 3752 .func = sk_skb_change_tail, 3753 .gpl_only = false, 3754 .ret_type = RET_INTEGER, 3755 .arg1_type = ARG_PTR_TO_CTX, 3756 .arg2_type = ARG_ANYTHING, 3757 .arg3_type = ARG_ANYTHING, 3758 }; 3759 3760 static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, 3761 u64 flags) 3762 { 3763 u32 max_len = BPF_SKB_MAX_LEN; 3764 u32 new_len = skb->len + head_room; 3765 int ret; 3766 3767 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || 3768 new_len < skb->len)) 3769 return -EINVAL; 3770 3771 ret = skb_cow(skb, head_room); 3772 if (likely(!ret)) { 3773 /* Idea for this helper is that we currently only 3774 * allow to expand on mac header. This means that 3775 * skb->protocol network header, etc, stay as is. 3776 * Compared to bpf_skb_change_tail(), we're more 3777 * flexible due to not needing to linearize or 3778 * reset GSO. Intention for this helper is to be 3779 * used by an L3 skb that needs to push mac header 3780 * for redirection into L2 device. 3781 */ 3782 __skb_push(skb, head_room); 3783 memset(skb->data, 0, head_room); 3784 skb_reset_mac_header(skb); 3785 } 3786 3787 return ret; 3788 } 3789 3790 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, 3791 u64, flags) 3792 { 3793 int ret = __bpf_skb_change_head(skb, head_room, flags); 3794 3795 bpf_compute_data_pointers(skb); 3796 return ret; 3797 } 3798 3799 static const struct bpf_func_proto bpf_skb_change_head_proto = { 3800 .func = bpf_skb_change_head, 3801 .gpl_only = false, 3802 .ret_type = RET_INTEGER, 3803 .arg1_type = ARG_PTR_TO_CTX, 3804 .arg2_type = ARG_ANYTHING, 3805 .arg3_type = ARG_ANYTHING, 3806 }; 3807 3808 BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, 3809 u64, flags) 3810 { 3811 int ret = __bpf_skb_change_head(skb, head_room, flags); 3812 3813 bpf_compute_data_end_sk_skb(skb); 3814 return ret; 3815 } 3816 3817 static const struct bpf_func_proto sk_skb_change_head_proto = { 3818 .func = sk_skb_change_head, 3819 .gpl_only = false, 3820 .ret_type = RET_INTEGER, 3821 .arg1_type = ARG_PTR_TO_CTX, 3822 .arg2_type = ARG_ANYTHING, 3823 .arg3_type = ARG_ANYTHING, 3824 }; 3825 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) 3826 { 3827 return xdp_data_meta_unsupported(xdp) ? 0 : 3828 xdp->data - xdp->data_meta; 3829 } 3830 3831 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) 3832 { 3833 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 3834 unsigned long metalen = xdp_get_metalen(xdp); 3835 void *data_start = xdp_frame_end + metalen; 3836 void *data = xdp->data + offset; 3837 3838 if (unlikely(data < data_start || 3839 data > xdp->data_end - ETH_HLEN)) 3840 return -EINVAL; 3841 3842 if (metalen) 3843 memmove(xdp->data_meta + offset, 3844 xdp->data_meta, metalen); 3845 xdp->data_meta += offset; 3846 xdp->data = data; 3847 3848 return 0; 3849 } 3850 3851 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { 3852 .func = bpf_xdp_adjust_head, 3853 .gpl_only = false, 3854 .ret_type = RET_INTEGER, 3855 .arg1_type = ARG_PTR_TO_CTX, 3856 .arg2_type = ARG_ANYTHING, 3857 }; 3858 3859 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) 3860 { 3861 void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */ 3862 void *data_end = xdp->data_end + offset; 3863 3864 /* Notice that xdp_data_hard_end have reserved some tailroom */ 3865 if (unlikely(data_end > data_hard_end)) 3866 return -EINVAL; 3867 3868 /* ALL drivers MUST init xdp->frame_sz, chicken check below */ 3869 if (unlikely(xdp->frame_sz > PAGE_SIZE)) { 3870 WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz); 3871 return -EINVAL; 3872 } 3873 3874 if (unlikely(data_end < xdp->data + ETH_HLEN)) 3875 return -EINVAL; 3876 3877 /* Clear memory area on grow, can contain uninit kernel memory */ 3878 if (offset > 0) 3879 memset(xdp->data_end, 0, offset); 3880 3881 xdp->data_end = data_end; 3882 3883 return 0; 3884 } 3885 3886 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = { 3887 .func = bpf_xdp_adjust_tail, 3888 .gpl_only = false, 3889 .ret_type = RET_INTEGER, 3890 .arg1_type = ARG_PTR_TO_CTX, 3891 .arg2_type = ARG_ANYTHING, 3892 }; 3893 3894 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) 3895 { 3896 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 3897 void *meta = xdp->data_meta + offset; 3898 unsigned long metalen = xdp->data - meta; 3899 3900 if (xdp_data_meta_unsupported(xdp)) 3901 return -ENOTSUPP; 3902 if (unlikely(meta < xdp_frame_end || 3903 meta > xdp->data)) 3904 return -EINVAL; 3905 if (unlikely((metalen & (sizeof(__u32) - 1)) || 3906 (metalen > 32))) 3907 return -EACCES; 3908 3909 xdp->data_meta = meta; 3910 3911 return 0; 3912 } 3913 3914 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { 3915 .func = bpf_xdp_adjust_meta, 3916 .gpl_only = false, 3917 .ret_type = RET_INTEGER, 3918 .arg1_type = ARG_PTR_TO_CTX, 3919 .arg2_type = ARG_ANYTHING, 3920 }; 3921 3922 static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, 3923 struct bpf_map *map, struct xdp_buff *xdp) 3924 { 3925 switch (map->map_type) { 3926 case BPF_MAP_TYPE_DEVMAP: 3927 case BPF_MAP_TYPE_DEVMAP_HASH: 3928 return dev_map_enqueue(fwd, xdp, dev_rx); 3929 case BPF_MAP_TYPE_CPUMAP: 3930 return cpu_map_enqueue(fwd, xdp, dev_rx); 3931 case BPF_MAP_TYPE_XSKMAP: 3932 return __xsk_map_redirect(fwd, xdp); 3933 default: 3934 return -EBADRQC; 3935 } 3936 return 0; 3937 } 3938 3939 void xdp_do_flush(void) 3940 { 3941 __dev_flush(); 3942 __cpu_map_flush(); 3943 __xsk_map_flush(); 3944 } 3945 EXPORT_SYMBOL_GPL(xdp_do_flush); 3946 3947 static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) 3948 { 3949 switch (map->map_type) { 3950 case BPF_MAP_TYPE_DEVMAP: 3951 return __dev_map_lookup_elem(map, index); 3952 case BPF_MAP_TYPE_DEVMAP_HASH: 3953 return __dev_map_hash_lookup_elem(map, index); 3954 case BPF_MAP_TYPE_CPUMAP: 3955 return __cpu_map_lookup_elem(map, index); 3956 case BPF_MAP_TYPE_XSKMAP: 3957 return __xsk_map_lookup_elem(map, index); 3958 default: 3959 return NULL; 3960 } 3961 } 3962 3963 void bpf_clear_redirect_map(struct bpf_map *map) 3964 { 3965 struct bpf_redirect_info *ri; 3966 int cpu; 3967 3968 for_each_possible_cpu(cpu) { 3969 ri = per_cpu_ptr(&bpf_redirect_info, cpu); 3970 /* Avoid polluting remote cacheline due to writes if 3971 * not needed. Once we pass this test, we need the 3972 * cmpxchg() to make sure it hasn't been changed in 3973 * the meantime by remote CPU. 3974 */ 3975 if (unlikely(READ_ONCE(ri->map) == map)) 3976 cmpxchg(&ri->map, map, NULL); 3977 } 3978 } 3979 3980 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, 3981 struct bpf_prog *xdp_prog) 3982 { 3983 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 3984 struct bpf_map *map = READ_ONCE(ri->map); 3985 u32 index = ri->tgt_index; 3986 void *fwd = ri->tgt_value; 3987 int err; 3988 3989 ri->tgt_index = 0; 3990 ri->tgt_value = NULL; 3991 WRITE_ONCE(ri->map, NULL); 3992 3993 if (unlikely(!map)) { 3994 fwd = dev_get_by_index_rcu(dev_net(dev), index); 3995 if (unlikely(!fwd)) { 3996 err = -EINVAL; 3997 goto err; 3998 } 3999 4000 err = dev_xdp_enqueue(fwd, xdp, dev); 4001 } else { 4002 err = __bpf_tx_xdp_map(dev, fwd, map, xdp); 4003 } 4004 4005 if (unlikely(err)) 4006 goto err; 4007 4008 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); 4009 return 0; 4010 err: 4011 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); 4012 return err; 4013 } 4014 EXPORT_SYMBOL_GPL(xdp_do_redirect); 4015 4016 static int xdp_do_generic_redirect_map(struct net_device *dev, 4017 struct sk_buff *skb, 4018 struct xdp_buff *xdp, 4019 struct bpf_prog *xdp_prog, 4020 struct bpf_map *map) 4021 { 4022 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4023 u32 index = ri->tgt_index; 4024 void *fwd = ri->tgt_value; 4025 int err = 0; 4026 4027 ri->tgt_index = 0; 4028 ri->tgt_value = NULL; 4029 WRITE_ONCE(ri->map, NULL); 4030 4031 if (map->map_type == BPF_MAP_TYPE_DEVMAP || 4032 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 4033 struct bpf_dtab_netdev *dst = fwd; 4034 4035 err = dev_map_generic_redirect(dst, skb, xdp_prog); 4036 if (unlikely(err)) 4037 goto err; 4038 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 4039 struct xdp_sock *xs = fwd; 4040 4041 err = xsk_generic_rcv(xs, xdp); 4042 if (err) 4043 goto err; 4044 consume_skb(skb); 4045 } else { 4046 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */ 4047 err = -EBADRQC; 4048 goto err; 4049 } 4050 4051 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); 4052 return 0; 4053 err: 4054 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); 4055 return err; 4056 } 4057 4058 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, 4059 struct xdp_buff *xdp, struct bpf_prog *xdp_prog) 4060 { 4061 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4062 struct bpf_map *map = READ_ONCE(ri->map); 4063 u32 index = ri->tgt_index; 4064 struct net_device *fwd; 4065 int err = 0; 4066 4067 if (map) 4068 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, 4069 map); 4070 ri->tgt_index = 0; 4071 fwd = dev_get_by_index_rcu(dev_net(dev), index); 4072 if (unlikely(!fwd)) { 4073 err = -EINVAL; 4074 goto err; 4075 } 4076 4077 err = xdp_ok_fwd_dev(fwd, skb->len); 4078 if (unlikely(err)) 4079 goto err; 4080 4081 skb->dev = fwd; 4082 _trace_xdp_redirect(dev, xdp_prog, index); 4083 generic_xdp_tx(skb, xdp_prog); 4084 return 0; 4085 err: 4086 _trace_xdp_redirect_err(dev, xdp_prog, index, err); 4087 return err; 4088 } 4089 4090 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) 4091 { 4092 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4093 4094 if (unlikely(flags)) 4095 return XDP_ABORTED; 4096 4097 ri->flags = flags; 4098 ri->tgt_index = ifindex; 4099 ri->tgt_value = NULL; 4100 WRITE_ONCE(ri->map, NULL); 4101 4102 return XDP_REDIRECT; 4103 } 4104 4105 static const struct bpf_func_proto bpf_xdp_redirect_proto = { 4106 .func = bpf_xdp_redirect, 4107 .gpl_only = false, 4108 .ret_type = RET_INTEGER, 4109 .arg1_type = ARG_ANYTHING, 4110 .arg2_type = ARG_ANYTHING, 4111 }; 4112 4113 BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, 4114 u64, flags) 4115 { 4116 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4117 4118 /* Lower bits of the flags are used as return code on lookup failure */ 4119 if (unlikely(flags > XDP_TX)) 4120 return XDP_ABORTED; 4121 4122 ri->tgt_value = __xdp_map_lookup_elem(map, ifindex); 4123 if (unlikely(!ri->tgt_value)) { 4124 /* If the lookup fails we want to clear out the state in the 4125 * redirect_info struct completely, so that if an eBPF program 4126 * performs multiple lookups, the last one always takes 4127 * precedence. 4128 */ 4129 WRITE_ONCE(ri->map, NULL); 4130 return flags; 4131 } 4132 4133 ri->flags = flags; 4134 ri->tgt_index = ifindex; 4135 WRITE_ONCE(ri->map, map); 4136 4137 return XDP_REDIRECT; 4138 } 4139 4140 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { 4141 .func = bpf_xdp_redirect_map, 4142 .gpl_only = false, 4143 .ret_type = RET_INTEGER, 4144 .arg1_type = ARG_CONST_MAP_PTR, 4145 .arg2_type = ARG_ANYTHING, 4146 .arg3_type = ARG_ANYTHING, 4147 }; 4148 4149 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, 4150 unsigned long off, unsigned long len) 4151 { 4152 void *ptr = skb_header_pointer(skb, off, len, dst_buff); 4153 4154 if (unlikely(!ptr)) 4155 return len; 4156 if (ptr != dst_buff) 4157 memcpy(dst_buff, ptr, len); 4158 4159 return 0; 4160 } 4161 4162 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, 4163 u64, flags, void *, meta, u64, meta_size) 4164 { 4165 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4166 4167 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4168 return -EINVAL; 4169 if (unlikely(!skb || skb_size > skb->len)) 4170 return -EFAULT; 4171 4172 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, 4173 bpf_skb_copy); 4174 } 4175 4176 static const struct bpf_func_proto bpf_skb_event_output_proto = { 4177 .func = bpf_skb_event_output, 4178 .gpl_only = true, 4179 .ret_type = RET_INTEGER, 4180 .arg1_type = ARG_PTR_TO_CTX, 4181 .arg2_type = ARG_CONST_MAP_PTR, 4182 .arg3_type = ARG_ANYTHING, 4183 .arg4_type = ARG_PTR_TO_MEM, 4184 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4185 }; 4186 4187 BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff) 4188 4189 const struct bpf_func_proto bpf_skb_output_proto = { 4190 .func = bpf_skb_event_output, 4191 .gpl_only = true, 4192 .ret_type = RET_INTEGER, 4193 .arg1_type = ARG_PTR_TO_BTF_ID, 4194 .arg1_btf_id = &bpf_skb_output_btf_ids[0], 4195 .arg2_type = ARG_CONST_MAP_PTR, 4196 .arg3_type = ARG_ANYTHING, 4197 .arg4_type = ARG_PTR_TO_MEM, 4198 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4199 }; 4200 4201 static unsigned short bpf_tunnel_key_af(u64 flags) 4202 { 4203 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; 4204 } 4205 4206 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, 4207 u32, size, u64, flags) 4208 { 4209 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4210 u8 compat[sizeof(struct bpf_tunnel_key)]; 4211 void *to_orig = to; 4212 int err; 4213 4214 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { 4215 err = -EINVAL; 4216 goto err_clear; 4217 } 4218 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { 4219 err = -EPROTO; 4220 goto err_clear; 4221 } 4222 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4223 err = -EINVAL; 4224 switch (size) { 4225 case offsetof(struct bpf_tunnel_key, tunnel_label): 4226 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4227 goto set_compat; 4228 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4229 /* Fixup deprecated structure layouts here, so we have 4230 * a common path later on. 4231 */ 4232 if (ip_tunnel_info_af(info) != AF_INET) 4233 goto err_clear; 4234 set_compat: 4235 to = (struct bpf_tunnel_key *)compat; 4236 break; 4237 default: 4238 goto err_clear; 4239 } 4240 } 4241 4242 to->tunnel_id = be64_to_cpu(info->key.tun_id); 4243 to->tunnel_tos = info->key.tos; 4244 to->tunnel_ttl = info->key.ttl; 4245 to->tunnel_ext = 0; 4246 4247 if (flags & BPF_F_TUNINFO_IPV6) { 4248 memcpy(to->remote_ipv6, &info->key.u.ipv6.src, 4249 sizeof(to->remote_ipv6)); 4250 to->tunnel_label = be32_to_cpu(info->key.label); 4251 } else { 4252 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); 4253 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 4254 to->tunnel_label = 0; 4255 } 4256 4257 if (unlikely(size != sizeof(struct bpf_tunnel_key))) 4258 memcpy(to_orig, to, size); 4259 4260 return 0; 4261 err_clear: 4262 memset(to_orig, 0, size); 4263 return err; 4264 } 4265 4266 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { 4267 .func = bpf_skb_get_tunnel_key, 4268 .gpl_only = false, 4269 .ret_type = RET_INTEGER, 4270 .arg1_type = ARG_PTR_TO_CTX, 4271 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4272 .arg3_type = ARG_CONST_SIZE, 4273 .arg4_type = ARG_ANYTHING, 4274 }; 4275 4276 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) 4277 { 4278 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4279 int err; 4280 4281 if (unlikely(!info || 4282 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { 4283 err = -ENOENT; 4284 goto err_clear; 4285 } 4286 if (unlikely(size < info->options_len)) { 4287 err = -ENOMEM; 4288 goto err_clear; 4289 } 4290 4291 ip_tunnel_info_opts_get(to, info); 4292 if (size > info->options_len) 4293 memset(to + info->options_len, 0, size - info->options_len); 4294 4295 return info->options_len; 4296 err_clear: 4297 memset(to, 0, size); 4298 return err; 4299 } 4300 4301 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { 4302 .func = bpf_skb_get_tunnel_opt, 4303 .gpl_only = false, 4304 .ret_type = RET_INTEGER, 4305 .arg1_type = ARG_PTR_TO_CTX, 4306 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4307 .arg3_type = ARG_CONST_SIZE, 4308 }; 4309 4310 static struct metadata_dst __percpu *md_dst; 4311 4312 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, 4313 const struct bpf_tunnel_key *, from, u32, size, u64, flags) 4314 { 4315 struct metadata_dst *md = this_cpu_ptr(md_dst); 4316 u8 compat[sizeof(struct bpf_tunnel_key)]; 4317 struct ip_tunnel_info *info; 4318 4319 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | 4320 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) 4321 return -EINVAL; 4322 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4323 switch (size) { 4324 case offsetof(struct bpf_tunnel_key, tunnel_label): 4325 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4326 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4327 /* Fixup deprecated structure layouts here, so we have 4328 * a common path later on. 4329 */ 4330 memcpy(compat, from, size); 4331 memset(compat + size, 0, sizeof(compat) - size); 4332 from = (const struct bpf_tunnel_key *) compat; 4333 break; 4334 default: 4335 return -EINVAL; 4336 } 4337 } 4338 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || 4339 from->tunnel_ext)) 4340 return -EINVAL; 4341 4342 skb_dst_drop(skb); 4343 dst_hold((struct dst_entry *) md); 4344 skb_dst_set(skb, (struct dst_entry *) md); 4345 4346 info = &md->u.tun_info; 4347 memset(info, 0, sizeof(*info)); 4348 info->mode = IP_TUNNEL_INFO_TX; 4349 4350 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 4351 if (flags & BPF_F_DONT_FRAGMENT) 4352 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; 4353 if (flags & BPF_F_ZERO_CSUM_TX) 4354 info->key.tun_flags &= ~TUNNEL_CSUM; 4355 if (flags & BPF_F_SEQ_NUMBER) 4356 info->key.tun_flags |= TUNNEL_SEQ; 4357 4358 info->key.tun_id = cpu_to_be64(from->tunnel_id); 4359 info->key.tos = from->tunnel_tos; 4360 info->key.ttl = from->tunnel_ttl; 4361 4362 if (flags & BPF_F_TUNINFO_IPV6) { 4363 info->mode |= IP_TUNNEL_INFO_IPV6; 4364 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, 4365 sizeof(from->remote_ipv6)); 4366 info->key.label = cpu_to_be32(from->tunnel_label) & 4367 IPV6_FLOWLABEL_MASK; 4368 } else { 4369 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); 4370 } 4371 4372 return 0; 4373 } 4374 4375 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { 4376 .func = bpf_skb_set_tunnel_key, 4377 .gpl_only = false, 4378 .ret_type = RET_INTEGER, 4379 .arg1_type = ARG_PTR_TO_CTX, 4380 .arg2_type = ARG_PTR_TO_MEM, 4381 .arg3_type = ARG_CONST_SIZE, 4382 .arg4_type = ARG_ANYTHING, 4383 }; 4384 4385 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, 4386 const u8 *, from, u32, size) 4387 { 4388 struct ip_tunnel_info *info = skb_tunnel_info(skb); 4389 const struct metadata_dst *md = this_cpu_ptr(md_dst); 4390 4391 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) 4392 return -EINVAL; 4393 if (unlikely(size > IP_TUNNEL_OPTS_MAX)) 4394 return -ENOMEM; 4395 4396 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); 4397 4398 return 0; 4399 } 4400 4401 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { 4402 .func = bpf_skb_set_tunnel_opt, 4403 .gpl_only = false, 4404 .ret_type = RET_INTEGER, 4405 .arg1_type = ARG_PTR_TO_CTX, 4406 .arg2_type = ARG_PTR_TO_MEM, 4407 .arg3_type = ARG_CONST_SIZE, 4408 }; 4409 4410 static const struct bpf_func_proto * 4411 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) 4412 { 4413 if (!md_dst) { 4414 struct metadata_dst __percpu *tmp; 4415 4416 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX, 4417 METADATA_IP_TUNNEL, 4418 GFP_KERNEL); 4419 if (!tmp) 4420 return NULL; 4421 if (cmpxchg(&md_dst, NULL, tmp)) 4422 metadata_dst_free_percpu(tmp); 4423 } 4424 4425 switch (which) { 4426 case BPF_FUNC_skb_set_tunnel_key: 4427 return &bpf_skb_set_tunnel_key_proto; 4428 case BPF_FUNC_skb_set_tunnel_opt: 4429 return &bpf_skb_set_tunnel_opt_proto; 4430 default: 4431 return NULL; 4432 } 4433 } 4434 4435 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, 4436 u32, idx) 4437 { 4438 struct bpf_array *array = container_of(map, struct bpf_array, map); 4439 struct cgroup *cgrp; 4440 struct sock *sk; 4441 4442 sk = skb_to_full_sk(skb); 4443 if (!sk || !sk_fullsock(sk)) 4444 return -ENOENT; 4445 if (unlikely(idx >= array->map.max_entries)) 4446 return -E2BIG; 4447 4448 cgrp = READ_ONCE(array->ptrs[idx]); 4449 if (unlikely(!cgrp)) 4450 return -EAGAIN; 4451 4452 return sk_under_cgroup_hierarchy(sk, cgrp); 4453 } 4454 4455 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { 4456 .func = bpf_skb_under_cgroup, 4457 .gpl_only = false, 4458 .ret_type = RET_INTEGER, 4459 .arg1_type = ARG_PTR_TO_CTX, 4460 .arg2_type = ARG_CONST_MAP_PTR, 4461 .arg3_type = ARG_ANYTHING, 4462 }; 4463 4464 #ifdef CONFIG_SOCK_CGROUP_DATA 4465 static inline u64 __bpf_sk_cgroup_id(struct sock *sk) 4466 { 4467 struct cgroup *cgrp; 4468 4469 sk = sk_to_full_sk(sk); 4470 if (!sk || !sk_fullsock(sk)) 4471 return 0; 4472 4473 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4474 return cgroup_id(cgrp); 4475 } 4476 4477 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) 4478 { 4479 return __bpf_sk_cgroup_id(skb->sk); 4480 } 4481 4482 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { 4483 .func = bpf_skb_cgroup_id, 4484 .gpl_only = false, 4485 .ret_type = RET_INTEGER, 4486 .arg1_type = ARG_PTR_TO_CTX, 4487 }; 4488 4489 static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk, 4490 int ancestor_level) 4491 { 4492 struct cgroup *ancestor; 4493 struct cgroup *cgrp; 4494 4495 sk = sk_to_full_sk(sk); 4496 if (!sk || !sk_fullsock(sk)) 4497 return 0; 4498 4499 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4500 ancestor = cgroup_ancestor(cgrp, ancestor_level); 4501 if (!ancestor) 4502 return 0; 4503 4504 return cgroup_id(ancestor); 4505 } 4506 4507 BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, 4508 ancestor_level) 4509 { 4510 return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level); 4511 } 4512 4513 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { 4514 .func = bpf_skb_ancestor_cgroup_id, 4515 .gpl_only = false, 4516 .ret_type = RET_INTEGER, 4517 .arg1_type = ARG_PTR_TO_CTX, 4518 .arg2_type = ARG_ANYTHING, 4519 }; 4520 4521 BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk) 4522 { 4523 return __bpf_sk_cgroup_id(sk); 4524 } 4525 4526 static const struct bpf_func_proto bpf_sk_cgroup_id_proto = { 4527 .func = bpf_sk_cgroup_id, 4528 .gpl_only = false, 4529 .ret_type = RET_INTEGER, 4530 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4531 }; 4532 4533 BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level) 4534 { 4535 return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level); 4536 } 4537 4538 static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = { 4539 .func = bpf_sk_ancestor_cgroup_id, 4540 .gpl_only = false, 4541 .ret_type = RET_INTEGER, 4542 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4543 .arg2_type = ARG_ANYTHING, 4544 }; 4545 #endif 4546 4547 static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, 4548 unsigned long off, unsigned long len) 4549 { 4550 memcpy(dst_buff, src_buff + off, len); 4551 return 0; 4552 } 4553 4554 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, 4555 u64, flags, void *, meta, u64, meta_size) 4556 { 4557 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4558 4559 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4560 return -EINVAL; 4561 if (unlikely(!xdp || 4562 xdp_size > (unsigned long)(xdp->data_end - xdp->data))) 4563 return -EFAULT; 4564 4565 return bpf_event_output(map, flags, meta, meta_size, xdp->data, 4566 xdp_size, bpf_xdp_copy); 4567 } 4568 4569 static const struct bpf_func_proto bpf_xdp_event_output_proto = { 4570 .func = bpf_xdp_event_output, 4571 .gpl_only = true, 4572 .ret_type = RET_INTEGER, 4573 .arg1_type = ARG_PTR_TO_CTX, 4574 .arg2_type = ARG_CONST_MAP_PTR, 4575 .arg3_type = ARG_ANYTHING, 4576 .arg4_type = ARG_PTR_TO_MEM, 4577 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4578 }; 4579 4580 BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff) 4581 4582 const struct bpf_func_proto bpf_xdp_output_proto = { 4583 .func = bpf_xdp_event_output, 4584 .gpl_only = true, 4585 .ret_type = RET_INTEGER, 4586 .arg1_type = ARG_PTR_TO_BTF_ID, 4587 .arg1_btf_id = &bpf_xdp_output_btf_ids[0], 4588 .arg2_type = ARG_CONST_MAP_PTR, 4589 .arg3_type = ARG_ANYTHING, 4590 .arg4_type = ARG_PTR_TO_MEM, 4591 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4592 }; 4593 4594 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) 4595 { 4596 return skb->sk ? __sock_gen_cookie(skb->sk) : 0; 4597 } 4598 4599 static const struct bpf_func_proto bpf_get_socket_cookie_proto = { 4600 .func = bpf_get_socket_cookie, 4601 .gpl_only = false, 4602 .ret_type = RET_INTEGER, 4603 .arg1_type = ARG_PTR_TO_CTX, 4604 }; 4605 4606 BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4607 { 4608 return __sock_gen_cookie(ctx->sk); 4609 } 4610 4611 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { 4612 .func = bpf_get_socket_cookie_sock_addr, 4613 .gpl_only = false, 4614 .ret_type = RET_INTEGER, 4615 .arg1_type = ARG_PTR_TO_CTX, 4616 }; 4617 4618 BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx) 4619 { 4620 return __sock_gen_cookie(ctx); 4621 } 4622 4623 static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = { 4624 .func = bpf_get_socket_cookie_sock, 4625 .gpl_only = false, 4626 .ret_type = RET_INTEGER, 4627 .arg1_type = ARG_PTR_TO_CTX, 4628 }; 4629 4630 BPF_CALL_1(bpf_get_socket_ptr_cookie, struct sock *, sk) 4631 { 4632 return sk ? sock_gen_cookie(sk) : 0; 4633 } 4634 4635 const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = { 4636 .func = bpf_get_socket_ptr_cookie, 4637 .gpl_only = false, 4638 .ret_type = RET_INTEGER, 4639 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4640 }; 4641 4642 BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) 4643 { 4644 return __sock_gen_cookie(ctx->sk); 4645 } 4646 4647 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { 4648 .func = bpf_get_socket_cookie_sock_ops, 4649 .gpl_only = false, 4650 .ret_type = RET_INTEGER, 4651 .arg1_type = ARG_PTR_TO_CTX, 4652 }; 4653 4654 static u64 __bpf_get_netns_cookie(struct sock *sk) 4655 { 4656 const struct net *net = sk ? sock_net(sk) : &init_net; 4657 4658 return net->net_cookie; 4659 } 4660 4661 BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx) 4662 { 4663 return __bpf_get_netns_cookie(ctx); 4664 } 4665 4666 static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = { 4667 .func = bpf_get_netns_cookie_sock, 4668 .gpl_only = false, 4669 .ret_type = RET_INTEGER, 4670 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4671 }; 4672 4673 BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4674 { 4675 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4676 } 4677 4678 static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = { 4679 .func = bpf_get_netns_cookie_sock_addr, 4680 .gpl_only = false, 4681 .ret_type = RET_INTEGER, 4682 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4683 }; 4684 4685 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) 4686 { 4687 struct sock *sk = sk_to_full_sk(skb->sk); 4688 kuid_t kuid; 4689 4690 if (!sk || !sk_fullsock(sk)) 4691 return overflowuid; 4692 kuid = sock_net_uid(sock_net(sk), sk); 4693 return from_kuid_munged(sock_net(sk)->user_ns, kuid); 4694 } 4695 4696 static const struct bpf_func_proto bpf_get_socket_uid_proto = { 4697 .func = bpf_get_socket_uid, 4698 .gpl_only = false, 4699 .ret_type = RET_INTEGER, 4700 .arg1_type = ARG_PTR_TO_CTX, 4701 }; 4702 4703 static int _bpf_setsockopt(struct sock *sk, int level, int optname, 4704 char *optval, int optlen) 4705 { 4706 char devname[IFNAMSIZ]; 4707 int val, valbool; 4708 struct net *net; 4709 int ifindex; 4710 int ret = 0; 4711 4712 if (!sk_fullsock(sk)) 4713 return -EINVAL; 4714 4715 sock_owned_by_me(sk); 4716 4717 if (level == SOL_SOCKET) { 4718 if (optlen != sizeof(int) && optname != SO_BINDTODEVICE) 4719 return -EINVAL; 4720 val = *((int *)optval); 4721 valbool = val ? 1 : 0; 4722 4723 /* Only some socketops are supported */ 4724 switch (optname) { 4725 case SO_RCVBUF: 4726 val = min_t(u32, val, sysctl_rmem_max); 4727 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 4728 WRITE_ONCE(sk->sk_rcvbuf, 4729 max_t(int, val * 2, SOCK_MIN_RCVBUF)); 4730 break; 4731 case SO_SNDBUF: 4732 val = min_t(u32, val, sysctl_wmem_max); 4733 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 4734 WRITE_ONCE(sk->sk_sndbuf, 4735 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 4736 break; 4737 case SO_MAX_PACING_RATE: /* 32bit version */ 4738 if (val != ~0U) 4739 cmpxchg(&sk->sk_pacing_status, 4740 SK_PACING_NONE, 4741 SK_PACING_NEEDED); 4742 sk->sk_max_pacing_rate = (val == ~0U) ? 4743 ~0UL : (unsigned int)val; 4744 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 4745 sk->sk_max_pacing_rate); 4746 break; 4747 case SO_PRIORITY: 4748 sk->sk_priority = val; 4749 break; 4750 case SO_RCVLOWAT: 4751 if (val < 0) 4752 val = INT_MAX; 4753 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 4754 break; 4755 case SO_MARK: 4756 if (sk->sk_mark != val) { 4757 sk->sk_mark = val; 4758 sk_dst_reset(sk); 4759 } 4760 break; 4761 case SO_BINDTODEVICE: 4762 optlen = min_t(long, optlen, IFNAMSIZ - 1); 4763 strncpy(devname, optval, optlen); 4764 devname[optlen] = 0; 4765 4766 ifindex = 0; 4767 if (devname[0] != '\0') { 4768 struct net_device *dev; 4769 4770 ret = -ENODEV; 4771 4772 net = sock_net(sk); 4773 dev = dev_get_by_name(net, devname); 4774 if (!dev) 4775 break; 4776 ifindex = dev->ifindex; 4777 dev_put(dev); 4778 } 4779 fallthrough; 4780 case SO_BINDTOIFINDEX: 4781 if (optname == SO_BINDTOIFINDEX) 4782 ifindex = val; 4783 ret = sock_bindtoindex(sk, ifindex, false); 4784 break; 4785 case SO_KEEPALIVE: 4786 if (sk->sk_prot->keepalive) 4787 sk->sk_prot->keepalive(sk, valbool); 4788 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 4789 break; 4790 default: 4791 ret = -EINVAL; 4792 } 4793 #ifdef CONFIG_INET 4794 } else if (level == SOL_IP) { 4795 if (optlen != sizeof(int) || sk->sk_family != AF_INET) 4796 return -EINVAL; 4797 4798 val = *((int *)optval); 4799 /* Only some options are supported */ 4800 switch (optname) { 4801 case IP_TOS: 4802 if (val < -1 || val > 0xff) { 4803 ret = -EINVAL; 4804 } else { 4805 struct inet_sock *inet = inet_sk(sk); 4806 4807 if (val == -1) 4808 val = 0; 4809 inet->tos = val; 4810 } 4811 break; 4812 default: 4813 ret = -EINVAL; 4814 } 4815 #if IS_ENABLED(CONFIG_IPV6) 4816 } else if (level == SOL_IPV6) { 4817 if (optlen != sizeof(int) || sk->sk_family != AF_INET6) 4818 return -EINVAL; 4819 4820 val = *((int *)optval); 4821 /* Only some options are supported */ 4822 switch (optname) { 4823 case IPV6_TCLASS: 4824 if (val < -1 || val > 0xff) { 4825 ret = -EINVAL; 4826 } else { 4827 struct ipv6_pinfo *np = inet6_sk(sk); 4828 4829 if (val == -1) 4830 val = 0; 4831 np->tclass = val; 4832 } 4833 break; 4834 default: 4835 ret = -EINVAL; 4836 } 4837 #endif 4838 } else if (level == SOL_TCP && 4839 sk->sk_prot->setsockopt == tcp_setsockopt) { 4840 if (optname == TCP_CONGESTION) { 4841 char name[TCP_CA_NAME_MAX]; 4842 4843 strncpy(name, optval, min_t(long, optlen, 4844 TCP_CA_NAME_MAX-1)); 4845 name[TCP_CA_NAME_MAX-1] = 0; 4846 ret = tcp_set_congestion_control(sk, name, false, true); 4847 } else { 4848 struct inet_connection_sock *icsk = inet_csk(sk); 4849 struct tcp_sock *tp = tcp_sk(sk); 4850 unsigned long timeout; 4851 4852 if (optlen != sizeof(int)) 4853 return -EINVAL; 4854 4855 val = *((int *)optval); 4856 /* Only some options are supported */ 4857 switch (optname) { 4858 case TCP_BPF_IW: 4859 if (val <= 0 || tp->data_segs_out > tp->syn_data) 4860 ret = -EINVAL; 4861 else 4862 tp->snd_cwnd = val; 4863 break; 4864 case TCP_BPF_SNDCWND_CLAMP: 4865 if (val <= 0) { 4866 ret = -EINVAL; 4867 } else { 4868 tp->snd_cwnd_clamp = val; 4869 tp->snd_ssthresh = val; 4870 } 4871 break; 4872 case TCP_BPF_DELACK_MAX: 4873 timeout = usecs_to_jiffies(val); 4874 if (timeout > TCP_DELACK_MAX || 4875 timeout < TCP_TIMEOUT_MIN) 4876 return -EINVAL; 4877 inet_csk(sk)->icsk_delack_max = timeout; 4878 break; 4879 case TCP_BPF_RTO_MIN: 4880 timeout = usecs_to_jiffies(val); 4881 if (timeout > TCP_RTO_MIN || 4882 timeout < TCP_TIMEOUT_MIN) 4883 return -EINVAL; 4884 inet_csk(sk)->icsk_rto_min = timeout; 4885 break; 4886 case TCP_SAVE_SYN: 4887 if (val < 0 || val > 1) 4888 ret = -EINVAL; 4889 else 4890 tp->save_syn = val; 4891 break; 4892 case TCP_KEEPIDLE: 4893 ret = tcp_sock_set_keepidle_locked(sk, val); 4894 break; 4895 case TCP_KEEPINTVL: 4896 if (val < 1 || val > MAX_TCP_KEEPINTVL) 4897 ret = -EINVAL; 4898 else 4899 tp->keepalive_intvl = val * HZ; 4900 break; 4901 case TCP_KEEPCNT: 4902 if (val < 1 || val > MAX_TCP_KEEPCNT) 4903 ret = -EINVAL; 4904 else 4905 tp->keepalive_probes = val; 4906 break; 4907 case TCP_SYNCNT: 4908 if (val < 1 || val > MAX_TCP_SYNCNT) 4909 ret = -EINVAL; 4910 else 4911 icsk->icsk_syn_retries = val; 4912 break; 4913 case TCP_USER_TIMEOUT: 4914 if (val < 0) 4915 ret = -EINVAL; 4916 else 4917 icsk->icsk_user_timeout = val; 4918 break; 4919 case TCP_NOTSENT_LOWAT: 4920 tp->notsent_lowat = val; 4921 sk->sk_write_space(sk); 4922 break; 4923 case TCP_WINDOW_CLAMP: 4924 ret = tcp_set_window_clamp(sk, val); 4925 break; 4926 default: 4927 ret = -EINVAL; 4928 } 4929 } 4930 #endif 4931 } else { 4932 ret = -EINVAL; 4933 } 4934 return ret; 4935 } 4936 4937 static int _bpf_getsockopt(struct sock *sk, int level, int optname, 4938 char *optval, int optlen) 4939 { 4940 if (!sk_fullsock(sk)) 4941 goto err_clear; 4942 4943 sock_owned_by_me(sk); 4944 4945 if (level == SOL_SOCKET) { 4946 if (optlen != sizeof(int)) 4947 goto err_clear; 4948 4949 switch (optname) { 4950 case SO_MARK: 4951 *((int *)optval) = sk->sk_mark; 4952 break; 4953 case SO_PRIORITY: 4954 *((int *)optval) = sk->sk_priority; 4955 break; 4956 case SO_BINDTOIFINDEX: 4957 *((int *)optval) = sk->sk_bound_dev_if; 4958 break; 4959 default: 4960 goto err_clear; 4961 } 4962 #ifdef CONFIG_INET 4963 } else if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { 4964 struct inet_connection_sock *icsk; 4965 struct tcp_sock *tp; 4966 4967 switch (optname) { 4968 case TCP_CONGESTION: 4969 icsk = inet_csk(sk); 4970 4971 if (!icsk->icsk_ca_ops || optlen <= 1) 4972 goto err_clear; 4973 strncpy(optval, icsk->icsk_ca_ops->name, optlen); 4974 optval[optlen - 1] = 0; 4975 break; 4976 case TCP_SAVED_SYN: 4977 tp = tcp_sk(sk); 4978 4979 if (optlen <= 0 || !tp->saved_syn || 4980 optlen > tcp_saved_syn_len(tp->saved_syn)) 4981 goto err_clear; 4982 memcpy(optval, tp->saved_syn->data, optlen); 4983 break; 4984 default: 4985 goto err_clear; 4986 } 4987 } else if (level == SOL_IP) { 4988 struct inet_sock *inet = inet_sk(sk); 4989 4990 if (optlen != sizeof(int) || sk->sk_family != AF_INET) 4991 goto err_clear; 4992 4993 /* Only some options are supported */ 4994 switch (optname) { 4995 case IP_TOS: 4996 *((int *)optval) = (int)inet->tos; 4997 break; 4998 default: 4999 goto err_clear; 5000 } 5001 #if IS_ENABLED(CONFIG_IPV6) 5002 } else if (level == SOL_IPV6) { 5003 struct ipv6_pinfo *np = inet6_sk(sk); 5004 5005 if (optlen != sizeof(int) || sk->sk_family != AF_INET6) 5006 goto err_clear; 5007 5008 /* Only some options are supported */ 5009 switch (optname) { 5010 case IPV6_TCLASS: 5011 *((int *)optval) = (int)np->tclass; 5012 break; 5013 default: 5014 goto err_clear; 5015 } 5016 #endif 5017 #endif 5018 } else { 5019 goto err_clear; 5020 } 5021 return 0; 5022 err_clear: 5023 memset(optval, 0, optlen); 5024 return -EINVAL; 5025 } 5026 5027 BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx, 5028 int, level, int, optname, char *, optval, int, optlen) 5029 { 5030 return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen); 5031 } 5032 5033 static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = { 5034 .func = bpf_sock_addr_setsockopt, 5035 .gpl_only = false, 5036 .ret_type = RET_INTEGER, 5037 .arg1_type = ARG_PTR_TO_CTX, 5038 .arg2_type = ARG_ANYTHING, 5039 .arg3_type = ARG_ANYTHING, 5040 .arg4_type = ARG_PTR_TO_MEM, 5041 .arg5_type = ARG_CONST_SIZE, 5042 }; 5043 5044 BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx, 5045 int, level, int, optname, char *, optval, int, optlen) 5046 { 5047 return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen); 5048 } 5049 5050 static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = { 5051 .func = bpf_sock_addr_getsockopt, 5052 .gpl_only = false, 5053 .ret_type = RET_INTEGER, 5054 .arg1_type = ARG_PTR_TO_CTX, 5055 .arg2_type = ARG_ANYTHING, 5056 .arg3_type = ARG_ANYTHING, 5057 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5058 .arg5_type = ARG_CONST_SIZE, 5059 }; 5060 5061 BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5062 int, level, int, optname, char *, optval, int, optlen) 5063 { 5064 return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen); 5065 } 5066 5067 static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = { 5068 .func = bpf_sock_ops_setsockopt, 5069 .gpl_only = false, 5070 .ret_type = RET_INTEGER, 5071 .arg1_type = ARG_PTR_TO_CTX, 5072 .arg2_type = ARG_ANYTHING, 5073 .arg3_type = ARG_ANYTHING, 5074 .arg4_type = ARG_PTR_TO_MEM, 5075 .arg5_type = ARG_CONST_SIZE, 5076 }; 5077 5078 static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock, 5079 int optname, const u8 **start) 5080 { 5081 struct sk_buff *syn_skb = bpf_sock->syn_skb; 5082 const u8 *hdr_start; 5083 int ret; 5084 5085 if (syn_skb) { 5086 /* sk is a request_sock here */ 5087 5088 if (optname == TCP_BPF_SYN) { 5089 hdr_start = syn_skb->data; 5090 ret = tcp_hdrlen(syn_skb); 5091 } else if (optname == TCP_BPF_SYN_IP) { 5092 hdr_start = skb_network_header(syn_skb); 5093 ret = skb_network_header_len(syn_skb) + 5094 tcp_hdrlen(syn_skb); 5095 } else { 5096 /* optname == TCP_BPF_SYN_MAC */ 5097 hdr_start = skb_mac_header(syn_skb); 5098 ret = skb_mac_header_len(syn_skb) + 5099 skb_network_header_len(syn_skb) + 5100 tcp_hdrlen(syn_skb); 5101 } 5102 } else { 5103 struct sock *sk = bpf_sock->sk; 5104 struct saved_syn *saved_syn; 5105 5106 if (sk->sk_state == TCP_NEW_SYN_RECV) 5107 /* synack retransmit. bpf_sock->syn_skb will 5108 * not be available. It has to resort to 5109 * saved_syn (if it is saved). 5110 */ 5111 saved_syn = inet_reqsk(sk)->saved_syn; 5112 else 5113 saved_syn = tcp_sk(sk)->saved_syn; 5114 5115 if (!saved_syn) 5116 return -ENOENT; 5117 5118 if (optname == TCP_BPF_SYN) { 5119 hdr_start = saved_syn->data + 5120 saved_syn->mac_hdrlen + 5121 saved_syn->network_hdrlen; 5122 ret = saved_syn->tcp_hdrlen; 5123 } else if (optname == TCP_BPF_SYN_IP) { 5124 hdr_start = saved_syn->data + 5125 saved_syn->mac_hdrlen; 5126 ret = saved_syn->network_hdrlen + 5127 saved_syn->tcp_hdrlen; 5128 } else { 5129 /* optname == TCP_BPF_SYN_MAC */ 5130 5131 /* TCP_SAVE_SYN may not have saved the mac hdr */ 5132 if (!saved_syn->mac_hdrlen) 5133 return -ENOENT; 5134 5135 hdr_start = saved_syn->data; 5136 ret = saved_syn->mac_hdrlen + 5137 saved_syn->network_hdrlen + 5138 saved_syn->tcp_hdrlen; 5139 } 5140 } 5141 5142 *start = hdr_start; 5143 return ret; 5144 } 5145 5146 BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5147 int, level, int, optname, char *, optval, int, optlen) 5148 { 5149 if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP && 5150 optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) { 5151 int ret, copy_len = 0; 5152 const u8 *start; 5153 5154 ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start); 5155 if (ret > 0) { 5156 copy_len = ret; 5157 if (optlen < copy_len) { 5158 copy_len = optlen; 5159 ret = -ENOSPC; 5160 } 5161 5162 memcpy(optval, start, copy_len); 5163 } 5164 5165 /* Zero out unused buffer at the end */ 5166 memset(optval + copy_len, 0, optlen - copy_len); 5167 5168 return ret; 5169 } 5170 5171 return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen); 5172 } 5173 5174 static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = { 5175 .func = bpf_sock_ops_getsockopt, 5176 .gpl_only = false, 5177 .ret_type = RET_INTEGER, 5178 .arg1_type = ARG_PTR_TO_CTX, 5179 .arg2_type = ARG_ANYTHING, 5180 .arg3_type = ARG_ANYTHING, 5181 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5182 .arg5_type = ARG_CONST_SIZE, 5183 }; 5184 5185 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, 5186 int, argval) 5187 { 5188 struct sock *sk = bpf_sock->sk; 5189 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; 5190 5191 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) 5192 return -EINVAL; 5193 5194 tcp_sk(sk)->bpf_sock_ops_cb_flags = val; 5195 5196 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); 5197 } 5198 5199 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { 5200 .func = bpf_sock_ops_cb_flags_set, 5201 .gpl_only = false, 5202 .ret_type = RET_INTEGER, 5203 .arg1_type = ARG_PTR_TO_CTX, 5204 .arg2_type = ARG_ANYTHING, 5205 }; 5206 5207 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; 5208 EXPORT_SYMBOL_GPL(ipv6_bpf_stub); 5209 5210 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, 5211 int, addr_len) 5212 { 5213 #ifdef CONFIG_INET 5214 struct sock *sk = ctx->sk; 5215 u32 flags = BIND_FROM_BPF; 5216 int err; 5217 5218 err = -EINVAL; 5219 if (addr_len < offsetofend(struct sockaddr, sa_family)) 5220 return err; 5221 if (addr->sa_family == AF_INET) { 5222 if (addr_len < sizeof(struct sockaddr_in)) 5223 return err; 5224 if (((struct sockaddr_in *)addr)->sin_port == htons(0)) 5225 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5226 return __inet_bind(sk, addr, addr_len, flags); 5227 #if IS_ENABLED(CONFIG_IPV6) 5228 } else if (addr->sa_family == AF_INET6) { 5229 if (addr_len < SIN6_LEN_RFC2133) 5230 return err; 5231 if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0)) 5232 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5233 /* ipv6_bpf_stub cannot be NULL, since it's called from 5234 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded 5235 */ 5236 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags); 5237 #endif /* CONFIG_IPV6 */ 5238 } 5239 #endif /* CONFIG_INET */ 5240 5241 return -EAFNOSUPPORT; 5242 } 5243 5244 static const struct bpf_func_proto bpf_bind_proto = { 5245 .func = bpf_bind, 5246 .gpl_only = false, 5247 .ret_type = RET_INTEGER, 5248 .arg1_type = ARG_PTR_TO_CTX, 5249 .arg2_type = ARG_PTR_TO_MEM, 5250 .arg3_type = ARG_CONST_SIZE, 5251 }; 5252 5253 #ifdef CONFIG_XFRM 5254 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index, 5255 struct bpf_xfrm_state *, to, u32, size, u64, flags) 5256 { 5257 const struct sec_path *sp = skb_sec_path(skb); 5258 const struct xfrm_state *x; 5259 5260 if (!sp || unlikely(index >= sp->len || flags)) 5261 goto err_clear; 5262 5263 x = sp->xvec[index]; 5264 5265 if (unlikely(size != sizeof(struct bpf_xfrm_state))) 5266 goto err_clear; 5267 5268 to->reqid = x->props.reqid; 5269 to->spi = x->id.spi; 5270 to->family = x->props.family; 5271 to->ext = 0; 5272 5273 if (to->family == AF_INET6) { 5274 memcpy(to->remote_ipv6, x->props.saddr.a6, 5275 sizeof(to->remote_ipv6)); 5276 } else { 5277 to->remote_ipv4 = x->props.saddr.a4; 5278 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 5279 } 5280 5281 return 0; 5282 err_clear: 5283 memset(to, 0, size); 5284 return -EINVAL; 5285 } 5286 5287 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { 5288 .func = bpf_skb_get_xfrm_state, 5289 .gpl_only = false, 5290 .ret_type = RET_INTEGER, 5291 .arg1_type = ARG_PTR_TO_CTX, 5292 .arg2_type = ARG_ANYTHING, 5293 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 5294 .arg4_type = ARG_CONST_SIZE, 5295 .arg5_type = ARG_ANYTHING, 5296 }; 5297 #endif 5298 5299 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) 5300 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, 5301 const struct neighbour *neigh, 5302 const struct net_device *dev, u32 mtu) 5303 { 5304 memcpy(params->dmac, neigh->ha, ETH_ALEN); 5305 memcpy(params->smac, dev->dev_addr, ETH_ALEN); 5306 params->h_vlan_TCI = 0; 5307 params->h_vlan_proto = 0; 5308 if (mtu) 5309 params->mtu_result = mtu; /* union with tot_len */ 5310 5311 return 0; 5312 } 5313 #endif 5314 5315 #if IS_ENABLED(CONFIG_INET) 5316 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5317 u32 flags, bool check_mtu) 5318 { 5319 struct fib_nh_common *nhc; 5320 struct in_device *in_dev; 5321 struct neighbour *neigh; 5322 struct net_device *dev; 5323 struct fib_result res; 5324 struct flowi4 fl4; 5325 u32 mtu = 0; 5326 int err; 5327 5328 dev = dev_get_by_index_rcu(net, params->ifindex); 5329 if (unlikely(!dev)) 5330 return -ENODEV; 5331 5332 /* verify forwarding is enabled on this interface */ 5333 in_dev = __in_dev_get_rcu(dev); 5334 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) 5335 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5336 5337 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 5338 fl4.flowi4_iif = 1; 5339 fl4.flowi4_oif = params->ifindex; 5340 } else { 5341 fl4.flowi4_iif = params->ifindex; 5342 fl4.flowi4_oif = 0; 5343 } 5344 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK; 5345 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 5346 fl4.flowi4_flags = 0; 5347 5348 fl4.flowi4_proto = params->l4_protocol; 5349 fl4.daddr = params->ipv4_dst; 5350 fl4.saddr = params->ipv4_src; 5351 fl4.fl4_sport = params->sport; 5352 fl4.fl4_dport = params->dport; 5353 fl4.flowi4_multipath_hash = 0; 5354 5355 if (flags & BPF_FIB_LOOKUP_DIRECT) { 5356 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 5357 struct fib_table *tb; 5358 5359 tb = fib_get_table(net, tbid); 5360 if (unlikely(!tb)) 5361 return BPF_FIB_LKUP_RET_NOT_FWDED; 5362 5363 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); 5364 } else { 5365 fl4.flowi4_mark = 0; 5366 fl4.flowi4_secid = 0; 5367 fl4.flowi4_tun_key.tun_id = 0; 5368 fl4.flowi4_uid = sock_net_uid(net, NULL); 5369 5370 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); 5371 } 5372 5373 if (err) { 5374 /* map fib lookup errors to RTN_ type */ 5375 if (err == -EINVAL) 5376 return BPF_FIB_LKUP_RET_BLACKHOLE; 5377 if (err == -EHOSTUNREACH) 5378 return BPF_FIB_LKUP_RET_UNREACHABLE; 5379 if (err == -EACCES) 5380 return BPF_FIB_LKUP_RET_PROHIBIT; 5381 5382 return BPF_FIB_LKUP_RET_NOT_FWDED; 5383 } 5384 5385 if (res.type != RTN_UNICAST) 5386 return BPF_FIB_LKUP_RET_NOT_FWDED; 5387 5388 if (fib_info_num_path(res.fi) > 1) 5389 fib_select_path(net, &res, &fl4, NULL); 5390 5391 if (check_mtu) { 5392 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); 5393 if (params->tot_len > mtu) { 5394 params->mtu_result = mtu; /* union with tot_len */ 5395 return BPF_FIB_LKUP_RET_FRAG_NEEDED; 5396 } 5397 } 5398 5399 nhc = res.nhc; 5400 5401 /* do not handle lwt encaps right now */ 5402 if (nhc->nhc_lwtstate) 5403 return BPF_FIB_LKUP_RET_UNSUPP_LWT; 5404 5405 dev = nhc->nhc_dev; 5406 5407 params->rt_metric = res.fi->fib_priority; 5408 params->ifindex = dev->ifindex; 5409 5410 /* xdp and cls_bpf programs are run in RCU-bh so 5411 * rcu_read_lock_bh is not needed here 5412 */ 5413 if (likely(nhc->nhc_gw_family != AF_INET6)) { 5414 if (nhc->nhc_gw_family) 5415 params->ipv4_dst = nhc->nhc_gw.ipv4; 5416 5417 neigh = __ipv4_neigh_lookup_noref(dev, 5418 (__force u32)params->ipv4_dst); 5419 } else { 5420 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; 5421 5422 params->family = AF_INET6; 5423 *dst = nhc->nhc_gw.ipv6; 5424 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); 5425 } 5426 5427 if (!neigh) 5428 return BPF_FIB_LKUP_RET_NO_NEIGH; 5429 5430 return bpf_fib_set_fwd_params(params, neigh, dev, mtu); 5431 } 5432 #endif 5433 5434 #if IS_ENABLED(CONFIG_IPV6) 5435 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5436 u32 flags, bool check_mtu) 5437 { 5438 struct in6_addr *src = (struct in6_addr *) params->ipv6_src; 5439 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst; 5440 struct fib6_result res = {}; 5441 struct neighbour *neigh; 5442 struct net_device *dev; 5443 struct inet6_dev *idev; 5444 struct flowi6 fl6; 5445 int strict = 0; 5446 int oif, err; 5447 u32 mtu = 0; 5448 5449 /* link local addresses are never forwarded */ 5450 if (rt6_need_strict(dst) || rt6_need_strict(src)) 5451 return BPF_FIB_LKUP_RET_NOT_FWDED; 5452 5453 dev = dev_get_by_index_rcu(net, params->ifindex); 5454 if (unlikely(!dev)) 5455 return -ENODEV; 5456 5457 idev = __in6_dev_get_safely(dev); 5458 if (unlikely(!idev || !idev->cnf.forwarding)) 5459 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5460 5461 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 5462 fl6.flowi6_iif = 1; 5463 oif = fl6.flowi6_oif = params->ifindex; 5464 } else { 5465 oif = fl6.flowi6_iif = params->ifindex; 5466 fl6.flowi6_oif = 0; 5467 strict = RT6_LOOKUP_F_HAS_SADDR; 5468 } 5469 fl6.flowlabel = params->flowinfo; 5470 fl6.flowi6_scope = 0; 5471 fl6.flowi6_flags = 0; 5472 fl6.mp_hash = 0; 5473 5474 fl6.flowi6_proto = params->l4_protocol; 5475 fl6.daddr = *dst; 5476 fl6.saddr = *src; 5477 fl6.fl6_sport = params->sport; 5478 fl6.fl6_dport = params->dport; 5479 5480 if (flags & BPF_FIB_LOOKUP_DIRECT) { 5481 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 5482 struct fib6_table *tb; 5483 5484 tb = ipv6_stub->fib6_get_table(net, tbid); 5485 if (unlikely(!tb)) 5486 return BPF_FIB_LKUP_RET_NOT_FWDED; 5487 5488 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res, 5489 strict); 5490 } else { 5491 fl6.flowi6_mark = 0; 5492 fl6.flowi6_secid = 0; 5493 fl6.flowi6_tun_key.tun_id = 0; 5494 fl6.flowi6_uid = sock_net_uid(net, NULL); 5495 5496 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict); 5497 } 5498 5499 if (unlikely(err || IS_ERR_OR_NULL(res.f6i) || 5500 res.f6i == net->ipv6.fib6_null_entry)) 5501 return BPF_FIB_LKUP_RET_NOT_FWDED; 5502 5503 switch (res.fib6_type) { 5504 /* only unicast is forwarded */ 5505 case RTN_UNICAST: 5506 break; 5507 case RTN_BLACKHOLE: 5508 return BPF_FIB_LKUP_RET_BLACKHOLE; 5509 case RTN_UNREACHABLE: 5510 return BPF_FIB_LKUP_RET_UNREACHABLE; 5511 case RTN_PROHIBIT: 5512 return BPF_FIB_LKUP_RET_PROHIBIT; 5513 default: 5514 return BPF_FIB_LKUP_RET_NOT_FWDED; 5515 } 5516 5517 ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif, 5518 fl6.flowi6_oif != 0, NULL, strict); 5519 5520 if (check_mtu) { 5521 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src); 5522 if (params->tot_len > mtu) { 5523 params->mtu_result = mtu; /* union with tot_len */ 5524 return BPF_FIB_LKUP_RET_FRAG_NEEDED; 5525 } 5526 } 5527 5528 if (res.nh->fib_nh_lws) 5529 return BPF_FIB_LKUP_RET_UNSUPP_LWT; 5530 5531 if (res.nh->fib_nh_gw_family) 5532 *dst = res.nh->fib_nh_gw6; 5533 5534 dev = res.nh->fib_nh_dev; 5535 params->rt_metric = res.f6i->fib6_metric; 5536 params->ifindex = dev->ifindex; 5537 5538 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is 5539 * not needed here. 5540 */ 5541 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); 5542 if (!neigh) 5543 return BPF_FIB_LKUP_RET_NO_NEIGH; 5544 5545 return bpf_fib_set_fwd_params(params, neigh, dev, mtu); 5546 } 5547 #endif 5548 5549 BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, 5550 struct bpf_fib_lookup *, params, int, plen, u32, flags) 5551 { 5552 if (plen < sizeof(*params)) 5553 return -EINVAL; 5554 5555 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) 5556 return -EINVAL; 5557 5558 switch (params->family) { 5559 #if IS_ENABLED(CONFIG_INET) 5560 case AF_INET: 5561 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params, 5562 flags, true); 5563 #endif 5564 #if IS_ENABLED(CONFIG_IPV6) 5565 case AF_INET6: 5566 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params, 5567 flags, true); 5568 #endif 5569 } 5570 return -EAFNOSUPPORT; 5571 } 5572 5573 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = { 5574 .func = bpf_xdp_fib_lookup, 5575 .gpl_only = true, 5576 .ret_type = RET_INTEGER, 5577 .arg1_type = ARG_PTR_TO_CTX, 5578 .arg2_type = ARG_PTR_TO_MEM, 5579 .arg3_type = ARG_CONST_SIZE, 5580 .arg4_type = ARG_ANYTHING, 5581 }; 5582 5583 BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, 5584 struct bpf_fib_lookup *, params, int, plen, u32, flags) 5585 { 5586 struct net *net = dev_net(skb->dev); 5587 int rc = -EAFNOSUPPORT; 5588 bool check_mtu = false; 5589 5590 if (plen < sizeof(*params)) 5591 return -EINVAL; 5592 5593 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) 5594 return -EINVAL; 5595 5596 if (params->tot_len) 5597 check_mtu = true; 5598 5599 switch (params->family) { 5600 #if IS_ENABLED(CONFIG_INET) 5601 case AF_INET: 5602 rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu); 5603 break; 5604 #endif 5605 #if IS_ENABLED(CONFIG_IPV6) 5606 case AF_INET6: 5607 rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu); 5608 break; 5609 #endif 5610 } 5611 5612 if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) { 5613 struct net_device *dev; 5614 5615 /* When tot_len isn't provided by user, check skb 5616 * against MTU of FIB lookup resulting net_device 5617 */ 5618 dev = dev_get_by_index_rcu(net, params->ifindex); 5619 if (!is_skb_forwardable(dev, skb)) 5620 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; 5621 5622 params->mtu_result = dev->mtu; /* union with tot_len */ 5623 } 5624 5625 return rc; 5626 } 5627 5628 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { 5629 .func = bpf_skb_fib_lookup, 5630 .gpl_only = true, 5631 .ret_type = RET_INTEGER, 5632 .arg1_type = ARG_PTR_TO_CTX, 5633 .arg2_type = ARG_PTR_TO_MEM, 5634 .arg3_type = ARG_CONST_SIZE, 5635 .arg4_type = ARG_ANYTHING, 5636 }; 5637 5638 static struct net_device *__dev_via_ifindex(struct net_device *dev_curr, 5639 u32 ifindex) 5640 { 5641 struct net *netns = dev_net(dev_curr); 5642 5643 /* Non-redirect use-cases can use ifindex=0 and save ifindex lookup */ 5644 if (ifindex == 0) 5645 return dev_curr; 5646 5647 return dev_get_by_index_rcu(netns, ifindex); 5648 } 5649 5650 BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, 5651 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags) 5652 { 5653 int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; 5654 struct net_device *dev = skb->dev; 5655 int skb_len, dev_len; 5656 int mtu; 5657 5658 if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) 5659 return -EINVAL; 5660 5661 if (unlikely(flags & BPF_MTU_CHK_SEGS && len_diff)) 5662 return -EINVAL; 5663 5664 dev = __dev_via_ifindex(dev, ifindex); 5665 if (unlikely(!dev)) 5666 return -ENODEV; 5667 5668 mtu = READ_ONCE(dev->mtu); 5669 5670 dev_len = mtu + dev->hard_header_len; 5671 skb_len = skb->len + len_diff; /* minus result pass check */ 5672 if (skb_len <= dev_len) { 5673 ret = BPF_MTU_CHK_RET_SUCCESS; 5674 goto out; 5675 } 5676 /* At this point, skb->len exceed MTU, but as it include length of all 5677 * segments, it can still be below MTU. The SKB can possibly get 5678 * re-segmented in transmit path (see validate_xmit_skb). Thus, user 5679 * must choose if segs are to be MTU checked. 5680 */ 5681 if (skb_is_gso(skb)) { 5682 ret = BPF_MTU_CHK_RET_SUCCESS; 5683 5684 if (flags & BPF_MTU_CHK_SEGS && 5685 !skb_gso_validate_network_len(skb, mtu)) 5686 ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; 5687 } 5688 out: 5689 /* BPF verifier guarantees valid pointer */ 5690 *mtu_len = mtu; 5691 5692 return ret; 5693 } 5694 5695 BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, 5696 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags) 5697 { 5698 struct net_device *dev = xdp->rxq->dev; 5699 int xdp_len = xdp->data_end - xdp->data; 5700 int ret = BPF_MTU_CHK_RET_SUCCESS; 5701 int mtu, dev_len; 5702 5703 /* XDP variant doesn't support multi-buffer segment check (yet) */ 5704 if (unlikely(flags)) 5705 return -EINVAL; 5706 5707 dev = __dev_via_ifindex(dev, ifindex); 5708 if (unlikely(!dev)) 5709 return -ENODEV; 5710 5711 mtu = READ_ONCE(dev->mtu); 5712 5713 /* Add L2-header as dev MTU is L3 size */ 5714 dev_len = mtu + dev->hard_header_len; 5715 5716 xdp_len += len_diff; /* minus result pass check */ 5717 if (xdp_len > dev_len) 5718 ret = BPF_MTU_CHK_RET_FRAG_NEEDED; 5719 5720 /* BPF verifier guarantees valid pointer */ 5721 *mtu_len = mtu; 5722 5723 return ret; 5724 } 5725 5726 static const struct bpf_func_proto bpf_skb_check_mtu_proto = { 5727 .func = bpf_skb_check_mtu, 5728 .gpl_only = true, 5729 .ret_type = RET_INTEGER, 5730 .arg1_type = ARG_PTR_TO_CTX, 5731 .arg2_type = ARG_ANYTHING, 5732 .arg3_type = ARG_PTR_TO_INT, 5733 .arg4_type = ARG_ANYTHING, 5734 .arg5_type = ARG_ANYTHING, 5735 }; 5736 5737 static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { 5738 .func = bpf_xdp_check_mtu, 5739 .gpl_only = true, 5740 .ret_type = RET_INTEGER, 5741 .arg1_type = ARG_PTR_TO_CTX, 5742 .arg2_type = ARG_ANYTHING, 5743 .arg3_type = ARG_PTR_TO_INT, 5744 .arg4_type = ARG_ANYTHING, 5745 .arg5_type = ARG_ANYTHING, 5746 }; 5747 5748 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 5749 static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 5750 { 5751 int err; 5752 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr; 5753 5754 if (!seg6_validate_srh(srh, len, false)) 5755 return -EINVAL; 5756 5757 switch (type) { 5758 case BPF_LWT_ENCAP_SEG6_INLINE: 5759 if (skb->protocol != htons(ETH_P_IPV6)) 5760 return -EBADMSG; 5761 5762 err = seg6_do_srh_inline(skb, srh); 5763 break; 5764 case BPF_LWT_ENCAP_SEG6: 5765 skb_reset_inner_headers(skb); 5766 skb->encapsulation = 1; 5767 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6); 5768 break; 5769 default: 5770 return -EINVAL; 5771 } 5772 5773 bpf_compute_data_pointers(skb); 5774 if (err) 5775 return err; 5776 5777 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 5778 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 5779 5780 return seg6_lookup_nexthop(skb, NULL, 0); 5781 } 5782 #endif /* CONFIG_IPV6_SEG6_BPF */ 5783 5784 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 5785 static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, 5786 bool ingress) 5787 { 5788 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress); 5789 } 5790 #endif 5791 5792 BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr, 5793 u32, len) 5794 { 5795 switch (type) { 5796 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 5797 case BPF_LWT_ENCAP_SEG6: 5798 case BPF_LWT_ENCAP_SEG6_INLINE: 5799 return bpf_push_seg6_encap(skb, type, hdr, len); 5800 #endif 5801 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 5802 case BPF_LWT_ENCAP_IP: 5803 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */); 5804 #endif 5805 default: 5806 return -EINVAL; 5807 } 5808 } 5809 5810 BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type, 5811 void *, hdr, u32, len) 5812 { 5813 switch (type) { 5814 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 5815 case BPF_LWT_ENCAP_IP: 5816 return bpf_push_ip_encap(skb, hdr, len, false /* egress */); 5817 #endif 5818 default: 5819 return -EINVAL; 5820 } 5821 } 5822 5823 static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = { 5824 .func = bpf_lwt_in_push_encap, 5825 .gpl_only = false, 5826 .ret_type = RET_INTEGER, 5827 .arg1_type = ARG_PTR_TO_CTX, 5828 .arg2_type = ARG_ANYTHING, 5829 .arg3_type = ARG_PTR_TO_MEM, 5830 .arg4_type = ARG_CONST_SIZE 5831 }; 5832 5833 static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = { 5834 .func = bpf_lwt_xmit_push_encap, 5835 .gpl_only = false, 5836 .ret_type = RET_INTEGER, 5837 .arg1_type = ARG_PTR_TO_CTX, 5838 .arg2_type = ARG_ANYTHING, 5839 .arg3_type = ARG_PTR_TO_MEM, 5840 .arg4_type = ARG_CONST_SIZE 5841 }; 5842 5843 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 5844 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, 5845 const void *, from, u32, len) 5846 { 5847 struct seg6_bpf_srh_state *srh_state = 5848 this_cpu_ptr(&seg6_bpf_srh_states); 5849 struct ipv6_sr_hdr *srh = srh_state->srh; 5850 void *srh_tlvs, *srh_end, *ptr; 5851 int srhoff = 0; 5852 5853 if (srh == NULL) 5854 return -EINVAL; 5855 5856 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4)); 5857 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen); 5858 5859 ptr = skb->data + offset; 5860 if (ptr >= srh_tlvs && ptr + len <= srh_end) 5861 srh_state->valid = false; 5862 else if (ptr < (void *)&srh->flags || 5863 ptr + len > (void *)&srh->segments) 5864 return -EFAULT; 5865 5866 if (unlikely(bpf_try_make_writable(skb, offset + len))) 5867 return -EFAULT; 5868 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) 5869 return -EINVAL; 5870 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 5871 5872 memcpy(skb->data + offset, from, len); 5873 return 0; 5874 } 5875 5876 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { 5877 .func = bpf_lwt_seg6_store_bytes, 5878 .gpl_only = false, 5879 .ret_type = RET_INTEGER, 5880 .arg1_type = ARG_PTR_TO_CTX, 5881 .arg2_type = ARG_ANYTHING, 5882 .arg3_type = ARG_PTR_TO_MEM, 5883 .arg4_type = ARG_CONST_SIZE 5884 }; 5885 5886 static void bpf_update_srh_state(struct sk_buff *skb) 5887 { 5888 struct seg6_bpf_srh_state *srh_state = 5889 this_cpu_ptr(&seg6_bpf_srh_states); 5890 int srhoff = 0; 5891 5892 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) { 5893 srh_state->srh = NULL; 5894 } else { 5895 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 5896 srh_state->hdrlen = srh_state->srh->hdrlen << 3; 5897 srh_state->valid = true; 5898 } 5899 } 5900 5901 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, 5902 u32, action, void *, param, u32, param_len) 5903 { 5904 struct seg6_bpf_srh_state *srh_state = 5905 this_cpu_ptr(&seg6_bpf_srh_states); 5906 int hdroff = 0; 5907 int err; 5908 5909 switch (action) { 5910 case SEG6_LOCAL_ACTION_END_X: 5911 if (!seg6_bpf_has_valid_srh(skb)) 5912 return -EBADMSG; 5913 if (param_len != sizeof(struct in6_addr)) 5914 return -EINVAL; 5915 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0); 5916 case SEG6_LOCAL_ACTION_END_T: 5917 if (!seg6_bpf_has_valid_srh(skb)) 5918 return -EBADMSG; 5919 if (param_len != sizeof(int)) 5920 return -EINVAL; 5921 return seg6_lookup_nexthop(skb, NULL, *(int *)param); 5922 case SEG6_LOCAL_ACTION_END_DT6: 5923 if (!seg6_bpf_has_valid_srh(skb)) 5924 return -EBADMSG; 5925 if (param_len != sizeof(int)) 5926 return -EINVAL; 5927 5928 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0) 5929 return -EBADMSG; 5930 if (!pskb_pull(skb, hdroff)) 5931 return -EBADMSG; 5932 5933 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff); 5934 skb_reset_network_header(skb); 5935 skb_reset_transport_header(skb); 5936 skb->encapsulation = 0; 5937 5938 bpf_compute_data_pointers(skb); 5939 bpf_update_srh_state(skb); 5940 return seg6_lookup_nexthop(skb, NULL, *(int *)param); 5941 case SEG6_LOCAL_ACTION_END_B6: 5942 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) 5943 return -EBADMSG; 5944 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE, 5945 param, param_len); 5946 if (!err) 5947 bpf_update_srh_state(skb); 5948 5949 return err; 5950 case SEG6_LOCAL_ACTION_END_B6_ENCAP: 5951 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) 5952 return -EBADMSG; 5953 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6, 5954 param, param_len); 5955 if (!err) 5956 bpf_update_srh_state(skb); 5957 5958 return err; 5959 default: 5960 return -EINVAL; 5961 } 5962 } 5963 5964 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { 5965 .func = bpf_lwt_seg6_action, 5966 .gpl_only = false, 5967 .ret_type = RET_INTEGER, 5968 .arg1_type = ARG_PTR_TO_CTX, 5969 .arg2_type = ARG_ANYTHING, 5970 .arg3_type = ARG_PTR_TO_MEM, 5971 .arg4_type = ARG_CONST_SIZE 5972 }; 5973 5974 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, 5975 s32, len) 5976 { 5977 struct seg6_bpf_srh_state *srh_state = 5978 this_cpu_ptr(&seg6_bpf_srh_states); 5979 struct ipv6_sr_hdr *srh = srh_state->srh; 5980 void *srh_end, *srh_tlvs, *ptr; 5981 struct ipv6hdr *hdr; 5982 int srhoff = 0; 5983 int ret; 5984 5985 if (unlikely(srh == NULL)) 5986 return -EINVAL; 5987 5988 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) + 5989 ((srh->first_segment + 1) << 4)); 5990 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) + 5991 srh_state->hdrlen); 5992 ptr = skb->data + offset; 5993 5994 if (unlikely(ptr < srh_tlvs || ptr > srh_end)) 5995 return -EFAULT; 5996 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end)) 5997 return -EFAULT; 5998 5999 if (len > 0) { 6000 ret = skb_cow_head(skb, len); 6001 if (unlikely(ret < 0)) 6002 return ret; 6003 6004 ret = bpf_skb_net_hdr_push(skb, offset, len); 6005 } else { 6006 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len); 6007 } 6008 6009 bpf_compute_data_pointers(skb); 6010 if (unlikely(ret < 0)) 6011 return ret; 6012 6013 hdr = (struct ipv6hdr *)skb->data; 6014 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 6015 6016 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) 6017 return -EINVAL; 6018 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 6019 srh_state->hdrlen += len; 6020 srh_state->valid = false; 6021 return 0; 6022 } 6023 6024 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { 6025 .func = bpf_lwt_seg6_adjust_srh, 6026 .gpl_only = false, 6027 .ret_type = RET_INTEGER, 6028 .arg1_type = ARG_PTR_TO_CTX, 6029 .arg2_type = ARG_ANYTHING, 6030 .arg3_type = ARG_ANYTHING, 6031 }; 6032 #endif /* CONFIG_IPV6_SEG6_BPF */ 6033 6034 #ifdef CONFIG_INET 6035 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, 6036 int dif, int sdif, u8 family, u8 proto) 6037 { 6038 bool refcounted = false; 6039 struct sock *sk = NULL; 6040 6041 if (family == AF_INET) { 6042 __be32 src4 = tuple->ipv4.saddr; 6043 __be32 dst4 = tuple->ipv4.daddr; 6044 6045 if (proto == IPPROTO_TCP) 6046 sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0, 6047 src4, tuple->ipv4.sport, 6048 dst4, tuple->ipv4.dport, 6049 dif, sdif, &refcounted); 6050 else 6051 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, 6052 dst4, tuple->ipv4.dport, 6053 dif, sdif, &udp_table, NULL); 6054 #if IS_ENABLED(CONFIG_IPV6) 6055 } else { 6056 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; 6057 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; 6058 6059 if (proto == IPPROTO_TCP) 6060 sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0, 6061 src6, tuple->ipv6.sport, 6062 dst6, ntohs(tuple->ipv6.dport), 6063 dif, sdif, &refcounted); 6064 else if (likely(ipv6_bpf_stub)) 6065 sk = ipv6_bpf_stub->udp6_lib_lookup(net, 6066 src6, tuple->ipv6.sport, 6067 dst6, tuple->ipv6.dport, 6068 dif, sdif, 6069 &udp_table, NULL); 6070 #endif 6071 } 6072 6073 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { 6074 WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); 6075 sk = NULL; 6076 } 6077 return sk; 6078 } 6079 6080 /* bpf_skc_lookup performs the core lookup for different types of sockets, 6081 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. 6082 * Returns the socket as an 'unsigned long' to simplify the casting in the 6083 * callers to satisfy BPF_CALL declarations. 6084 */ 6085 static struct sock * 6086 __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6087 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, 6088 u64 flags) 6089 { 6090 struct sock *sk = NULL; 6091 u8 family = AF_UNSPEC; 6092 struct net *net; 6093 int sdif; 6094 6095 if (len == sizeof(tuple->ipv4)) 6096 family = AF_INET; 6097 else if (len == sizeof(tuple->ipv6)) 6098 family = AF_INET6; 6099 else 6100 return NULL; 6101 6102 if (unlikely(family == AF_UNSPEC || flags || 6103 !((s32)netns_id < 0 || netns_id <= S32_MAX))) 6104 goto out; 6105 6106 if (family == AF_INET) 6107 sdif = inet_sdif(skb); 6108 else 6109 sdif = inet6_sdif(skb); 6110 6111 if ((s32)netns_id < 0) { 6112 net = caller_net; 6113 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); 6114 } else { 6115 net = get_net_ns_by_id(caller_net, netns_id); 6116 if (unlikely(!net)) 6117 goto out; 6118 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); 6119 put_net(net); 6120 } 6121 6122 out: 6123 return sk; 6124 } 6125 6126 static struct sock * 6127 __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6128 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, 6129 u64 flags) 6130 { 6131 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net, 6132 ifindex, proto, netns_id, flags); 6133 6134 if (sk) { 6135 sk = sk_to_full_sk(sk); 6136 if (!sk_fullsock(sk)) { 6137 sock_gen_put(sk); 6138 return NULL; 6139 } 6140 } 6141 6142 return sk; 6143 } 6144 6145 static struct sock * 6146 bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6147 u8 proto, u64 netns_id, u64 flags) 6148 { 6149 struct net *caller_net; 6150 int ifindex; 6151 6152 if (skb->dev) { 6153 caller_net = dev_net(skb->dev); 6154 ifindex = skb->dev->ifindex; 6155 } else { 6156 caller_net = sock_net(skb->sk); 6157 ifindex = 0; 6158 } 6159 6160 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, 6161 netns_id, flags); 6162 } 6163 6164 static struct sock * 6165 bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6166 u8 proto, u64 netns_id, u64 flags) 6167 { 6168 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id, 6169 flags); 6170 6171 if (sk) { 6172 sk = sk_to_full_sk(sk); 6173 if (!sk_fullsock(sk)) { 6174 sock_gen_put(sk); 6175 return NULL; 6176 } 6177 } 6178 6179 return sk; 6180 } 6181 6182 BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb, 6183 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6184 { 6185 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP, 6186 netns_id, flags); 6187 } 6188 6189 static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = { 6190 .func = bpf_skc_lookup_tcp, 6191 .gpl_only = false, 6192 .pkt_access = true, 6193 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6194 .arg1_type = ARG_PTR_TO_CTX, 6195 .arg2_type = ARG_PTR_TO_MEM, 6196 .arg3_type = ARG_CONST_SIZE, 6197 .arg4_type = ARG_ANYTHING, 6198 .arg5_type = ARG_ANYTHING, 6199 }; 6200 6201 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, 6202 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6203 { 6204 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, 6205 netns_id, flags); 6206 } 6207 6208 static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { 6209 .func = bpf_sk_lookup_tcp, 6210 .gpl_only = false, 6211 .pkt_access = true, 6212 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6213 .arg1_type = ARG_PTR_TO_CTX, 6214 .arg2_type = ARG_PTR_TO_MEM, 6215 .arg3_type = ARG_CONST_SIZE, 6216 .arg4_type = ARG_ANYTHING, 6217 .arg5_type = ARG_ANYTHING, 6218 }; 6219 6220 BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, 6221 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6222 { 6223 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, 6224 netns_id, flags); 6225 } 6226 6227 static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { 6228 .func = bpf_sk_lookup_udp, 6229 .gpl_only = false, 6230 .pkt_access = true, 6231 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6232 .arg1_type = ARG_PTR_TO_CTX, 6233 .arg2_type = ARG_PTR_TO_MEM, 6234 .arg3_type = ARG_CONST_SIZE, 6235 .arg4_type = ARG_ANYTHING, 6236 .arg5_type = ARG_ANYTHING, 6237 }; 6238 6239 BPF_CALL_1(bpf_sk_release, struct sock *, sk) 6240 { 6241 if (sk && sk_is_refcounted(sk)) 6242 sock_gen_put(sk); 6243 return 0; 6244 } 6245 6246 static const struct bpf_func_proto bpf_sk_release_proto = { 6247 .func = bpf_sk_release, 6248 .gpl_only = false, 6249 .ret_type = RET_INTEGER, 6250 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 6251 }; 6252 6253 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, 6254 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6255 { 6256 struct net *caller_net = dev_net(ctx->rxq->dev); 6257 int ifindex = ctx->rxq->dev->ifindex; 6258 6259 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, 6260 ifindex, IPPROTO_UDP, netns_id, 6261 flags); 6262 } 6263 6264 static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { 6265 .func = bpf_xdp_sk_lookup_udp, 6266 .gpl_only = false, 6267 .pkt_access = true, 6268 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6269 .arg1_type = ARG_PTR_TO_CTX, 6270 .arg2_type = ARG_PTR_TO_MEM, 6271 .arg3_type = ARG_CONST_SIZE, 6272 .arg4_type = ARG_ANYTHING, 6273 .arg5_type = ARG_ANYTHING, 6274 }; 6275 6276 BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx, 6277 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6278 { 6279 struct net *caller_net = dev_net(ctx->rxq->dev); 6280 int ifindex = ctx->rxq->dev->ifindex; 6281 6282 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net, 6283 ifindex, IPPROTO_TCP, netns_id, 6284 flags); 6285 } 6286 6287 static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { 6288 .func = bpf_xdp_skc_lookup_tcp, 6289 .gpl_only = false, 6290 .pkt_access = true, 6291 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6292 .arg1_type = ARG_PTR_TO_CTX, 6293 .arg2_type = ARG_PTR_TO_MEM, 6294 .arg3_type = ARG_CONST_SIZE, 6295 .arg4_type = ARG_ANYTHING, 6296 .arg5_type = ARG_ANYTHING, 6297 }; 6298 6299 BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx, 6300 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6301 { 6302 struct net *caller_net = dev_net(ctx->rxq->dev); 6303 int ifindex = ctx->rxq->dev->ifindex; 6304 6305 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, 6306 ifindex, IPPROTO_TCP, netns_id, 6307 flags); 6308 } 6309 6310 static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { 6311 .func = bpf_xdp_sk_lookup_tcp, 6312 .gpl_only = false, 6313 .pkt_access = true, 6314 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6315 .arg1_type = ARG_PTR_TO_CTX, 6316 .arg2_type = ARG_PTR_TO_MEM, 6317 .arg3_type = ARG_CONST_SIZE, 6318 .arg4_type = ARG_ANYTHING, 6319 .arg5_type = ARG_ANYTHING, 6320 }; 6321 6322 BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx, 6323 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6324 { 6325 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, 6326 sock_net(ctx->sk), 0, 6327 IPPROTO_TCP, netns_id, flags); 6328 } 6329 6330 static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { 6331 .func = bpf_sock_addr_skc_lookup_tcp, 6332 .gpl_only = false, 6333 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6334 .arg1_type = ARG_PTR_TO_CTX, 6335 .arg2_type = ARG_PTR_TO_MEM, 6336 .arg3_type = ARG_CONST_SIZE, 6337 .arg4_type = ARG_ANYTHING, 6338 .arg5_type = ARG_ANYTHING, 6339 }; 6340 6341 BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx, 6342 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6343 { 6344 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, 6345 sock_net(ctx->sk), 0, IPPROTO_TCP, 6346 netns_id, flags); 6347 } 6348 6349 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { 6350 .func = bpf_sock_addr_sk_lookup_tcp, 6351 .gpl_only = false, 6352 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6353 .arg1_type = ARG_PTR_TO_CTX, 6354 .arg2_type = ARG_PTR_TO_MEM, 6355 .arg3_type = ARG_CONST_SIZE, 6356 .arg4_type = ARG_ANYTHING, 6357 .arg5_type = ARG_ANYTHING, 6358 }; 6359 6360 BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx, 6361 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6362 { 6363 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, 6364 sock_net(ctx->sk), 0, IPPROTO_UDP, 6365 netns_id, flags); 6366 } 6367 6368 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { 6369 .func = bpf_sock_addr_sk_lookup_udp, 6370 .gpl_only = false, 6371 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6372 .arg1_type = ARG_PTR_TO_CTX, 6373 .arg2_type = ARG_PTR_TO_MEM, 6374 .arg3_type = ARG_CONST_SIZE, 6375 .arg4_type = ARG_ANYTHING, 6376 .arg5_type = ARG_ANYTHING, 6377 }; 6378 6379 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 6380 struct bpf_insn_access_aux *info) 6381 { 6382 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, 6383 icsk_retransmits)) 6384 return false; 6385 6386 if (off % size != 0) 6387 return false; 6388 6389 switch (off) { 6390 case offsetof(struct bpf_tcp_sock, bytes_received): 6391 case offsetof(struct bpf_tcp_sock, bytes_acked): 6392 return size == sizeof(__u64); 6393 default: 6394 return size == sizeof(__u32); 6395 } 6396 } 6397 6398 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 6399 const struct bpf_insn *si, 6400 struct bpf_insn *insn_buf, 6401 struct bpf_prog *prog, u32 *target_size) 6402 { 6403 struct bpf_insn *insn = insn_buf; 6404 6405 #define BPF_TCP_SOCK_GET_COMMON(FIELD) \ 6406 do { \ 6407 BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \ 6408 sizeof_field(struct bpf_tcp_sock, FIELD)); \ 6409 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\ 6410 si->dst_reg, si->src_reg, \ 6411 offsetof(struct tcp_sock, FIELD)); \ 6412 } while (0) 6413 6414 #define BPF_INET_SOCK_GET_COMMON(FIELD) \ 6415 do { \ 6416 BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \ 6417 FIELD) > \ 6418 sizeof_field(struct bpf_tcp_sock, FIELD)); \ 6419 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 6420 struct inet_connection_sock, \ 6421 FIELD), \ 6422 si->dst_reg, si->src_reg, \ 6423 offsetof( \ 6424 struct inet_connection_sock, \ 6425 FIELD)); \ 6426 } while (0) 6427 6428 if (insn > insn_buf) 6429 return insn - insn_buf; 6430 6431 switch (si->off) { 6432 case offsetof(struct bpf_tcp_sock, rtt_min): 6433 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 6434 sizeof(struct minmax)); 6435 BUILD_BUG_ON(sizeof(struct minmax) < 6436 sizeof(struct minmax_sample)); 6437 6438 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 6439 offsetof(struct tcp_sock, rtt_min) + 6440 offsetof(struct minmax_sample, v)); 6441 break; 6442 case offsetof(struct bpf_tcp_sock, snd_cwnd): 6443 BPF_TCP_SOCK_GET_COMMON(snd_cwnd); 6444 break; 6445 case offsetof(struct bpf_tcp_sock, srtt_us): 6446 BPF_TCP_SOCK_GET_COMMON(srtt_us); 6447 break; 6448 case offsetof(struct bpf_tcp_sock, snd_ssthresh): 6449 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh); 6450 break; 6451 case offsetof(struct bpf_tcp_sock, rcv_nxt): 6452 BPF_TCP_SOCK_GET_COMMON(rcv_nxt); 6453 break; 6454 case offsetof(struct bpf_tcp_sock, snd_nxt): 6455 BPF_TCP_SOCK_GET_COMMON(snd_nxt); 6456 break; 6457 case offsetof(struct bpf_tcp_sock, snd_una): 6458 BPF_TCP_SOCK_GET_COMMON(snd_una); 6459 break; 6460 case offsetof(struct bpf_tcp_sock, mss_cache): 6461 BPF_TCP_SOCK_GET_COMMON(mss_cache); 6462 break; 6463 case offsetof(struct bpf_tcp_sock, ecn_flags): 6464 BPF_TCP_SOCK_GET_COMMON(ecn_flags); 6465 break; 6466 case offsetof(struct bpf_tcp_sock, rate_delivered): 6467 BPF_TCP_SOCK_GET_COMMON(rate_delivered); 6468 break; 6469 case offsetof(struct bpf_tcp_sock, rate_interval_us): 6470 BPF_TCP_SOCK_GET_COMMON(rate_interval_us); 6471 break; 6472 case offsetof(struct bpf_tcp_sock, packets_out): 6473 BPF_TCP_SOCK_GET_COMMON(packets_out); 6474 break; 6475 case offsetof(struct bpf_tcp_sock, retrans_out): 6476 BPF_TCP_SOCK_GET_COMMON(retrans_out); 6477 break; 6478 case offsetof(struct bpf_tcp_sock, total_retrans): 6479 BPF_TCP_SOCK_GET_COMMON(total_retrans); 6480 break; 6481 case offsetof(struct bpf_tcp_sock, segs_in): 6482 BPF_TCP_SOCK_GET_COMMON(segs_in); 6483 break; 6484 case offsetof(struct bpf_tcp_sock, data_segs_in): 6485 BPF_TCP_SOCK_GET_COMMON(data_segs_in); 6486 break; 6487 case offsetof(struct bpf_tcp_sock, segs_out): 6488 BPF_TCP_SOCK_GET_COMMON(segs_out); 6489 break; 6490 case offsetof(struct bpf_tcp_sock, data_segs_out): 6491 BPF_TCP_SOCK_GET_COMMON(data_segs_out); 6492 break; 6493 case offsetof(struct bpf_tcp_sock, lost_out): 6494 BPF_TCP_SOCK_GET_COMMON(lost_out); 6495 break; 6496 case offsetof(struct bpf_tcp_sock, sacked_out): 6497 BPF_TCP_SOCK_GET_COMMON(sacked_out); 6498 break; 6499 case offsetof(struct bpf_tcp_sock, bytes_received): 6500 BPF_TCP_SOCK_GET_COMMON(bytes_received); 6501 break; 6502 case offsetof(struct bpf_tcp_sock, bytes_acked): 6503 BPF_TCP_SOCK_GET_COMMON(bytes_acked); 6504 break; 6505 case offsetof(struct bpf_tcp_sock, dsack_dups): 6506 BPF_TCP_SOCK_GET_COMMON(dsack_dups); 6507 break; 6508 case offsetof(struct bpf_tcp_sock, delivered): 6509 BPF_TCP_SOCK_GET_COMMON(delivered); 6510 break; 6511 case offsetof(struct bpf_tcp_sock, delivered_ce): 6512 BPF_TCP_SOCK_GET_COMMON(delivered_ce); 6513 break; 6514 case offsetof(struct bpf_tcp_sock, icsk_retransmits): 6515 BPF_INET_SOCK_GET_COMMON(icsk_retransmits); 6516 break; 6517 } 6518 6519 return insn - insn_buf; 6520 } 6521 6522 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) 6523 { 6524 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) 6525 return (unsigned long)sk; 6526 6527 return (unsigned long)NULL; 6528 } 6529 6530 const struct bpf_func_proto bpf_tcp_sock_proto = { 6531 .func = bpf_tcp_sock, 6532 .gpl_only = false, 6533 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL, 6534 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 6535 }; 6536 6537 BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk) 6538 { 6539 sk = sk_to_full_sk(sk); 6540 6541 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) 6542 return (unsigned long)sk; 6543 6544 return (unsigned long)NULL; 6545 } 6546 6547 static const struct bpf_func_proto bpf_get_listener_sock_proto = { 6548 .func = bpf_get_listener_sock, 6549 .gpl_only = false, 6550 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6551 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 6552 }; 6553 6554 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) 6555 { 6556 unsigned int iphdr_len; 6557 6558 switch (skb_protocol(skb, true)) { 6559 case cpu_to_be16(ETH_P_IP): 6560 iphdr_len = sizeof(struct iphdr); 6561 break; 6562 case cpu_to_be16(ETH_P_IPV6): 6563 iphdr_len = sizeof(struct ipv6hdr); 6564 break; 6565 default: 6566 return 0; 6567 } 6568 6569 if (skb_headlen(skb) < iphdr_len) 6570 return 0; 6571 6572 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len)) 6573 return 0; 6574 6575 return INET_ECN_set_ce(skb); 6576 } 6577 6578 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 6579 struct bpf_insn_access_aux *info) 6580 { 6581 if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id)) 6582 return false; 6583 6584 if (off % size != 0) 6585 return false; 6586 6587 switch (off) { 6588 default: 6589 return size == sizeof(__u32); 6590 } 6591 } 6592 6593 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 6594 const struct bpf_insn *si, 6595 struct bpf_insn *insn_buf, 6596 struct bpf_prog *prog, u32 *target_size) 6597 { 6598 struct bpf_insn *insn = insn_buf; 6599 6600 #define BPF_XDP_SOCK_GET(FIELD) \ 6601 do { \ 6602 BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \ 6603 sizeof_field(struct bpf_xdp_sock, FIELD)); \ 6604 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\ 6605 si->dst_reg, si->src_reg, \ 6606 offsetof(struct xdp_sock, FIELD)); \ 6607 } while (0) 6608 6609 switch (si->off) { 6610 case offsetof(struct bpf_xdp_sock, queue_id): 6611 BPF_XDP_SOCK_GET(queue_id); 6612 break; 6613 } 6614 6615 return insn - insn_buf; 6616 } 6617 6618 static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { 6619 .func = bpf_skb_ecn_set_ce, 6620 .gpl_only = false, 6621 .ret_type = RET_INTEGER, 6622 .arg1_type = ARG_PTR_TO_CTX, 6623 }; 6624 6625 BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len, 6626 struct tcphdr *, th, u32, th_len) 6627 { 6628 #ifdef CONFIG_SYN_COOKIES 6629 u32 cookie; 6630 int ret; 6631 6632 if (unlikely(!sk || th_len < sizeof(*th))) 6633 return -EINVAL; 6634 6635 /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */ 6636 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) 6637 return -EINVAL; 6638 6639 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) 6640 return -EINVAL; 6641 6642 if (!th->ack || th->rst || th->syn) 6643 return -ENOENT; 6644 6645 if (tcp_synq_no_recent_overflow(sk)) 6646 return -ENOENT; 6647 6648 cookie = ntohl(th->ack_seq) - 1; 6649 6650 switch (sk->sk_family) { 6651 case AF_INET: 6652 if (unlikely(iph_len < sizeof(struct iphdr))) 6653 return -EINVAL; 6654 6655 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie); 6656 break; 6657 6658 #if IS_BUILTIN(CONFIG_IPV6) 6659 case AF_INET6: 6660 if (unlikely(iph_len < sizeof(struct ipv6hdr))) 6661 return -EINVAL; 6662 6663 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie); 6664 break; 6665 #endif /* CONFIG_IPV6 */ 6666 6667 default: 6668 return -EPROTONOSUPPORT; 6669 } 6670 6671 if (ret > 0) 6672 return 0; 6673 6674 return -ENOENT; 6675 #else 6676 return -ENOTSUPP; 6677 #endif 6678 } 6679 6680 static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = { 6681 .func = bpf_tcp_check_syncookie, 6682 .gpl_only = true, 6683 .pkt_access = true, 6684 .ret_type = RET_INTEGER, 6685 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 6686 .arg2_type = ARG_PTR_TO_MEM, 6687 .arg3_type = ARG_CONST_SIZE, 6688 .arg4_type = ARG_PTR_TO_MEM, 6689 .arg5_type = ARG_CONST_SIZE, 6690 }; 6691 6692 BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, 6693 struct tcphdr *, th, u32, th_len) 6694 { 6695 #ifdef CONFIG_SYN_COOKIES 6696 u32 cookie; 6697 u16 mss; 6698 6699 if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4)) 6700 return -EINVAL; 6701 6702 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) 6703 return -EINVAL; 6704 6705 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) 6706 return -ENOENT; 6707 6708 if (!th->syn || th->ack || th->fin || th->rst) 6709 return -EINVAL; 6710 6711 if (unlikely(iph_len < sizeof(struct iphdr))) 6712 return -EINVAL; 6713 6714 /* Both struct iphdr and struct ipv6hdr have the version field at the 6715 * same offset so we can cast to the shorter header (struct iphdr). 6716 */ 6717 switch (((struct iphdr *)iph)->version) { 6718 case 4: 6719 if (sk->sk_family == AF_INET6 && sk->sk_ipv6only) 6720 return -EINVAL; 6721 6722 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie); 6723 break; 6724 6725 #if IS_BUILTIN(CONFIG_IPV6) 6726 case 6: 6727 if (unlikely(iph_len < sizeof(struct ipv6hdr))) 6728 return -EINVAL; 6729 6730 if (sk->sk_family != AF_INET6) 6731 return -EINVAL; 6732 6733 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie); 6734 break; 6735 #endif /* CONFIG_IPV6 */ 6736 6737 default: 6738 return -EPROTONOSUPPORT; 6739 } 6740 if (mss == 0) 6741 return -ENOENT; 6742 6743 return cookie | ((u64)mss << 32); 6744 #else 6745 return -EOPNOTSUPP; 6746 #endif /* CONFIG_SYN_COOKIES */ 6747 } 6748 6749 static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { 6750 .func = bpf_tcp_gen_syncookie, 6751 .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */ 6752 .pkt_access = true, 6753 .ret_type = RET_INTEGER, 6754 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 6755 .arg2_type = ARG_PTR_TO_MEM, 6756 .arg3_type = ARG_CONST_SIZE, 6757 .arg4_type = ARG_PTR_TO_MEM, 6758 .arg5_type = ARG_CONST_SIZE, 6759 }; 6760 6761 BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags) 6762 { 6763 if (!sk || flags != 0) 6764 return -EINVAL; 6765 if (!skb_at_tc_ingress(skb)) 6766 return -EOPNOTSUPP; 6767 if (unlikely(dev_net(skb->dev) != sock_net(sk))) 6768 return -ENETUNREACH; 6769 if (unlikely(sk_fullsock(sk) && sk->sk_reuseport)) 6770 return -ESOCKTNOSUPPORT; 6771 if (sk_is_refcounted(sk) && 6772 unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 6773 return -ENOENT; 6774 6775 skb_orphan(skb); 6776 skb->sk = sk; 6777 skb->destructor = sock_pfree; 6778 6779 return 0; 6780 } 6781 6782 static const struct bpf_func_proto bpf_sk_assign_proto = { 6783 .func = bpf_sk_assign, 6784 .gpl_only = false, 6785 .ret_type = RET_INTEGER, 6786 .arg1_type = ARG_PTR_TO_CTX, 6787 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 6788 .arg3_type = ARG_ANYTHING, 6789 }; 6790 6791 static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend, 6792 u8 search_kind, const u8 *magic, 6793 u8 magic_len, bool *eol) 6794 { 6795 u8 kind, kind_len; 6796 6797 *eol = false; 6798 6799 while (op < opend) { 6800 kind = op[0]; 6801 6802 if (kind == TCPOPT_EOL) { 6803 *eol = true; 6804 return ERR_PTR(-ENOMSG); 6805 } else if (kind == TCPOPT_NOP) { 6806 op++; 6807 continue; 6808 } 6809 6810 if (opend - op < 2 || opend - op < op[1] || op[1] < 2) 6811 /* Something is wrong in the received header. 6812 * Follow the TCP stack's tcp_parse_options() 6813 * and just bail here. 6814 */ 6815 return ERR_PTR(-EFAULT); 6816 6817 kind_len = op[1]; 6818 if (search_kind == kind) { 6819 if (!magic_len) 6820 return op; 6821 6822 if (magic_len > kind_len - 2) 6823 return ERR_PTR(-ENOMSG); 6824 6825 if (!memcmp(&op[2], magic, magic_len)) 6826 return op; 6827 } 6828 6829 op += kind_len; 6830 } 6831 6832 return ERR_PTR(-ENOMSG); 6833 } 6834 6835 BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 6836 void *, search_res, u32, len, u64, flags) 6837 { 6838 bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN; 6839 const u8 *op, *opend, *magic, *search = search_res; 6840 u8 search_kind, search_len, copy_len, magic_len; 6841 int ret; 6842 6843 /* 2 byte is the minimal option len except TCPOPT_NOP and 6844 * TCPOPT_EOL which are useless for the bpf prog to learn 6845 * and this helper disallow loading them also. 6846 */ 6847 if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN) 6848 return -EINVAL; 6849 6850 search_kind = search[0]; 6851 search_len = search[1]; 6852 6853 if (search_len > len || search_kind == TCPOPT_NOP || 6854 search_kind == TCPOPT_EOL) 6855 return -EINVAL; 6856 6857 if (search_kind == TCPOPT_EXP || search_kind == 253) { 6858 /* 16 or 32 bit magic. +2 for kind and kind length */ 6859 if (search_len != 4 && search_len != 6) 6860 return -EINVAL; 6861 magic = &search[2]; 6862 magic_len = search_len - 2; 6863 } else { 6864 if (search_len) 6865 return -EINVAL; 6866 magic = NULL; 6867 magic_len = 0; 6868 } 6869 6870 if (load_syn) { 6871 ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op); 6872 if (ret < 0) 6873 return ret; 6874 6875 opend = op + ret; 6876 op += sizeof(struct tcphdr); 6877 } else { 6878 if (!bpf_sock->skb || 6879 bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB) 6880 /* This bpf_sock->op cannot call this helper */ 6881 return -EPERM; 6882 6883 opend = bpf_sock->skb_data_end; 6884 op = bpf_sock->skb->data + sizeof(struct tcphdr); 6885 } 6886 6887 op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len, 6888 &eol); 6889 if (IS_ERR(op)) 6890 return PTR_ERR(op); 6891 6892 copy_len = op[1]; 6893 ret = copy_len; 6894 if (copy_len > len) { 6895 ret = -ENOSPC; 6896 copy_len = len; 6897 } 6898 6899 memcpy(search_res, op, copy_len); 6900 return ret; 6901 } 6902 6903 static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = { 6904 .func = bpf_sock_ops_load_hdr_opt, 6905 .gpl_only = false, 6906 .ret_type = RET_INTEGER, 6907 .arg1_type = ARG_PTR_TO_CTX, 6908 .arg2_type = ARG_PTR_TO_MEM, 6909 .arg3_type = ARG_CONST_SIZE, 6910 .arg4_type = ARG_ANYTHING, 6911 }; 6912 6913 BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 6914 const void *, from, u32, len, u64, flags) 6915 { 6916 u8 new_kind, new_kind_len, magic_len = 0, *opend; 6917 const u8 *op, *new_op, *magic = NULL; 6918 struct sk_buff *skb; 6919 bool eol; 6920 6921 if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB) 6922 return -EPERM; 6923 6924 if (len < 2 || flags) 6925 return -EINVAL; 6926 6927 new_op = from; 6928 new_kind = new_op[0]; 6929 new_kind_len = new_op[1]; 6930 6931 if (new_kind_len > len || new_kind == TCPOPT_NOP || 6932 new_kind == TCPOPT_EOL) 6933 return -EINVAL; 6934 6935 if (new_kind_len > bpf_sock->remaining_opt_len) 6936 return -ENOSPC; 6937 6938 /* 253 is another experimental kind */ 6939 if (new_kind == TCPOPT_EXP || new_kind == 253) { 6940 if (new_kind_len < 4) 6941 return -EINVAL; 6942 /* Match for the 2 byte magic also. 6943 * RFC 6994: the magic could be 2 or 4 bytes. 6944 * Hence, matching by 2 byte only is on the 6945 * conservative side but it is the right 6946 * thing to do for the 'search-for-duplication' 6947 * purpose. 6948 */ 6949 magic = &new_op[2]; 6950 magic_len = 2; 6951 } 6952 6953 /* Check for duplication */ 6954 skb = bpf_sock->skb; 6955 op = skb->data + sizeof(struct tcphdr); 6956 opend = bpf_sock->skb_data_end; 6957 6958 op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len, 6959 &eol); 6960 if (!IS_ERR(op)) 6961 return -EEXIST; 6962 6963 if (PTR_ERR(op) != -ENOMSG) 6964 return PTR_ERR(op); 6965 6966 if (eol) 6967 /* The option has been ended. Treat it as no more 6968 * header option can be written. 6969 */ 6970 return -ENOSPC; 6971 6972 /* No duplication found. Store the header option. */ 6973 memcpy(opend, from, new_kind_len); 6974 6975 bpf_sock->remaining_opt_len -= new_kind_len; 6976 bpf_sock->skb_data_end += new_kind_len; 6977 6978 return 0; 6979 } 6980 6981 static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = { 6982 .func = bpf_sock_ops_store_hdr_opt, 6983 .gpl_only = false, 6984 .ret_type = RET_INTEGER, 6985 .arg1_type = ARG_PTR_TO_CTX, 6986 .arg2_type = ARG_PTR_TO_MEM, 6987 .arg3_type = ARG_CONST_SIZE, 6988 .arg4_type = ARG_ANYTHING, 6989 }; 6990 6991 BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 6992 u32, len, u64, flags) 6993 { 6994 if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB) 6995 return -EPERM; 6996 6997 if (flags || len < 2) 6998 return -EINVAL; 6999 7000 if (len > bpf_sock->remaining_opt_len) 7001 return -ENOSPC; 7002 7003 bpf_sock->remaining_opt_len -= len; 7004 7005 return 0; 7006 } 7007 7008 static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { 7009 .func = bpf_sock_ops_reserve_hdr_opt, 7010 .gpl_only = false, 7011 .ret_type = RET_INTEGER, 7012 .arg1_type = ARG_PTR_TO_CTX, 7013 .arg2_type = ARG_ANYTHING, 7014 .arg3_type = ARG_ANYTHING, 7015 }; 7016 7017 #endif /* CONFIG_INET */ 7018 7019 bool bpf_helper_changes_pkt_data(void *func) 7020 { 7021 if (func == bpf_skb_vlan_push || 7022 func == bpf_skb_vlan_pop || 7023 func == bpf_skb_store_bytes || 7024 func == bpf_skb_change_proto || 7025 func == bpf_skb_change_head || 7026 func == sk_skb_change_head || 7027 func == bpf_skb_change_tail || 7028 func == sk_skb_change_tail || 7029 func == bpf_skb_adjust_room || 7030 func == sk_skb_adjust_room || 7031 func == bpf_skb_pull_data || 7032 func == sk_skb_pull_data || 7033 func == bpf_clone_redirect || 7034 func == bpf_l3_csum_replace || 7035 func == bpf_l4_csum_replace || 7036 func == bpf_xdp_adjust_head || 7037 func == bpf_xdp_adjust_meta || 7038 func == bpf_msg_pull_data || 7039 func == bpf_msg_push_data || 7040 func == bpf_msg_pop_data || 7041 func == bpf_xdp_adjust_tail || 7042 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 7043 func == bpf_lwt_seg6_store_bytes || 7044 func == bpf_lwt_seg6_adjust_srh || 7045 func == bpf_lwt_seg6_action || 7046 #endif 7047 #ifdef CONFIG_INET 7048 func == bpf_sock_ops_store_hdr_opt || 7049 #endif 7050 func == bpf_lwt_in_push_encap || 7051 func == bpf_lwt_xmit_push_encap) 7052 return true; 7053 7054 return false; 7055 } 7056 7057 const struct bpf_func_proto bpf_event_output_data_proto __weak; 7058 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak; 7059 7060 static const struct bpf_func_proto * 7061 sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7062 { 7063 switch (func_id) { 7064 /* inet and inet6 sockets are created in a process 7065 * context so there is always a valid uid/gid 7066 */ 7067 case BPF_FUNC_get_current_uid_gid: 7068 return &bpf_get_current_uid_gid_proto; 7069 case BPF_FUNC_get_local_storage: 7070 return &bpf_get_local_storage_proto; 7071 case BPF_FUNC_get_socket_cookie: 7072 return &bpf_get_socket_cookie_sock_proto; 7073 case BPF_FUNC_get_netns_cookie: 7074 return &bpf_get_netns_cookie_sock_proto; 7075 case BPF_FUNC_perf_event_output: 7076 return &bpf_event_output_data_proto; 7077 case BPF_FUNC_get_current_pid_tgid: 7078 return &bpf_get_current_pid_tgid_proto; 7079 case BPF_FUNC_get_current_comm: 7080 return &bpf_get_current_comm_proto; 7081 #ifdef CONFIG_CGROUPS 7082 case BPF_FUNC_get_current_cgroup_id: 7083 return &bpf_get_current_cgroup_id_proto; 7084 case BPF_FUNC_get_current_ancestor_cgroup_id: 7085 return &bpf_get_current_ancestor_cgroup_id_proto; 7086 #endif 7087 #ifdef CONFIG_CGROUP_NET_CLASSID 7088 case BPF_FUNC_get_cgroup_classid: 7089 return &bpf_get_cgroup_classid_curr_proto; 7090 #endif 7091 case BPF_FUNC_sk_storage_get: 7092 return &bpf_sk_storage_get_cg_sock_proto; 7093 default: 7094 return bpf_base_func_proto(func_id); 7095 } 7096 } 7097 7098 static const struct bpf_func_proto * 7099 sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7100 { 7101 switch (func_id) { 7102 /* inet and inet6 sockets are created in a process 7103 * context so there is always a valid uid/gid 7104 */ 7105 case BPF_FUNC_get_current_uid_gid: 7106 return &bpf_get_current_uid_gid_proto; 7107 case BPF_FUNC_bind: 7108 switch (prog->expected_attach_type) { 7109 case BPF_CGROUP_INET4_CONNECT: 7110 case BPF_CGROUP_INET6_CONNECT: 7111 return &bpf_bind_proto; 7112 default: 7113 return NULL; 7114 } 7115 case BPF_FUNC_get_socket_cookie: 7116 return &bpf_get_socket_cookie_sock_addr_proto; 7117 case BPF_FUNC_get_netns_cookie: 7118 return &bpf_get_netns_cookie_sock_addr_proto; 7119 case BPF_FUNC_get_local_storage: 7120 return &bpf_get_local_storage_proto; 7121 case BPF_FUNC_perf_event_output: 7122 return &bpf_event_output_data_proto; 7123 case BPF_FUNC_get_current_pid_tgid: 7124 return &bpf_get_current_pid_tgid_proto; 7125 case BPF_FUNC_get_current_comm: 7126 return &bpf_get_current_comm_proto; 7127 #ifdef CONFIG_CGROUPS 7128 case BPF_FUNC_get_current_cgroup_id: 7129 return &bpf_get_current_cgroup_id_proto; 7130 case BPF_FUNC_get_current_ancestor_cgroup_id: 7131 return &bpf_get_current_ancestor_cgroup_id_proto; 7132 #endif 7133 #ifdef CONFIG_CGROUP_NET_CLASSID 7134 case BPF_FUNC_get_cgroup_classid: 7135 return &bpf_get_cgroup_classid_curr_proto; 7136 #endif 7137 #ifdef CONFIG_INET 7138 case BPF_FUNC_sk_lookup_tcp: 7139 return &bpf_sock_addr_sk_lookup_tcp_proto; 7140 case BPF_FUNC_sk_lookup_udp: 7141 return &bpf_sock_addr_sk_lookup_udp_proto; 7142 case BPF_FUNC_sk_release: 7143 return &bpf_sk_release_proto; 7144 case BPF_FUNC_skc_lookup_tcp: 7145 return &bpf_sock_addr_skc_lookup_tcp_proto; 7146 #endif /* CONFIG_INET */ 7147 case BPF_FUNC_sk_storage_get: 7148 return &bpf_sk_storage_get_proto; 7149 case BPF_FUNC_sk_storage_delete: 7150 return &bpf_sk_storage_delete_proto; 7151 case BPF_FUNC_setsockopt: 7152 switch (prog->expected_attach_type) { 7153 case BPF_CGROUP_INET4_BIND: 7154 case BPF_CGROUP_INET6_BIND: 7155 case BPF_CGROUP_INET4_CONNECT: 7156 case BPF_CGROUP_INET6_CONNECT: 7157 case BPF_CGROUP_UDP4_RECVMSG: 7158 case BPF_CGROUP_UDP6_RECVMSG: 7159 case BPF_CGROUP_UDP4_SENDMSG: 7160 case BPF_CGROUP_UDP6_SENDMSG: 7161 case BPF_CGROUP_INET4_GETPEERNAME: 7162 case BPF_CGROUP_INET6_GETPEERNAME: 7163 case BPF_CGROUP_INET4_GETSOCKNAME: 7164 case BPF_CGROUP_INET6_GETSOCKNAME: 7165 return &bpf_sock_addr_setsockopt_proto; 7166 default: 7167 return NULL; 7168 } 7169 case BPF_FUNC_getsockopt: 7170 switch (prog->expected_attach_type) { 7171 case BPF_CGROUP_INET4_BIND: 7172 case BPF_CGROUP_INET6_BIND: 7173 case BPF_CGROUP_INET4_CONNECT: 7174 case BPF_CGROUP_INET6_CONNECT: 7175 case BPF_CGROUP_UDP4_RECVMSG: 7176 case BPF_CGROUP_UDP6_RECVMSG: 7177 case BPF_CGROUP_UDP4_SENDMSG: 7178 case BPF_CGROUP_UDP6_SENDMSG: 7179 case BPF_CGROUP_INET4_GETPEERNAME: 7180 case BPF_CGROUP_INET6_GETPEERNAME: 7181 case BPF_CGROUP_INET4_GETSOCKNAME: 7182 case BPF_CGROUP_INET6_GETSOCKNAME: 7183 return &bpf_sock_addr_getsockopt_proto; 7184 default: 7185 return NULL; 7186 } 7187 default: 7188 return bpf_sk_base_func_proto(func_id); 7189 } 7190 } 7191 7192 static const struct bpf_func_proto * 7193 sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7194 { 7195 switch (func_id) { 7196 case BPF_FUNC_skb_load_bytes: 7197 return &bpf_skb_load_bytes_proto; 7198 case BPF_FUNC_skb_load_bytes_relative: 7199 return &bpf_skb_load_bytes_relative_proto; 7200 case BPF_FUNC_get_socket_cookie: 7201 return &bpf_get_socket_cookie_proto; 7202 case BPF_FUNC_get_socket_uid: 7203 return &bpf_get_socket_uid_proto; 7204 case BPF_FUNC_perf_event_output: 7205 return &bpf_skb_event_output_proto; 7206 default: 7207 return bpf_sk_base_func_proto(func_id); 7208 } 7209 } 7210 7211 const struct bpf_func_proto bpf_sk_storage_get_proto __weak; 7212 const struct bpf_func_proto bpf_sk_storage_delete_proto __weak; 7213 7214 static const struct bpf_func_proto * 7215 cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7216 { 7217 switch (func_id) { 7218 case BPF_FUNC_get_local_storage: 7219 return &bpf_get_local_storage_proto; 7220 case BPF_FUNC_sk_fullsock: 7221 return &bpf_sk_fullsock_proto; 7222 case BPF_FUNC_sk_storage_get: 7223 return &bpf_sk_storage_get_proto; 7224 case BPF_FUNC_sk_storage_delete: 7225 return &bpf_sk_storage_delete_proto; 7226 case BPF_FUNC_perf_event_output: 7227 return &bpf_skb_event_output_proto; 7228 #ifdef CONFIG_SOCK_CGROUP_DATA 7229 case BPF_FUNC_skb_cgroup_id: 7230 return &bpf_skb_cgroup_id_proto; 7231 case BPF_FUNC_skb_ancestor_cgroup_id: 7232 return &bpf_skb_ancestor_cgroup_id_proto; 7233 case BPF_FUNC_sk_cgroup_id: 7234 return &bpf_sk_cgroup_id_proto; 7235 case BPF_FUNC_sk_ancestor_cgroup_id: 7236 return &bpf_sk_ancestor_cgroup_id_proto; 7237 #endif 7238 #ifdef CONFIG_INET 7239 case BPF_FUNC_sk_lookup_tcp: 7240 return &bpf_sk_lookup_tcp_proto; 7241 case BPF_FUNC_sk_lookup_udp: 7242 return &bpf_sk_lookup_udp_proto; 7243 case BPF_FUNC_sk_release: 7244 return &bpf_sk_release_proto; 7245 case BPF_FUNC_skc_lookup_tcp: 7246 return &bpf_skc_lookup_tcp_proto; 7247 case BPF_FUNC_tcp_sock: 7248 return &bpf_tcp_sock_proto; 7249 case BPF_FUNC_get_listener_sock: 7250 return &bpf_get_listener_sock_proto; 7251 case BPF_FUNC_skb_ecn_set_ce: 7252 return &bpf_skb_ecn_set_ce_proto; 7253 #endif 7254 default: 7255 return sk_filter_func_proto(func_id, prog); 7256 } 7257 } 7258 7259 static const struct bpf_func_proto * 7260 tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7261 { 7262 switch (func_id) { 7263 case BPF_FUNC_skb_store_bytes: 7264 return &bpf_skb_store_bytes_proto; 7265 case BPF_FUNC_skb_load_bytes: 7266 return &bpf_skb_load_bytes_proto; 7267 case BPF_FUNC_skb_load_bytes_relative: 7268 return &bpf_skb_load_bytes_relative_proto; 7269 case BPF_FUNC_skb_pull_data: 7270 return &bpf_skb_pull_data_proto; 7271 case BPF_FUNC_csum_diff: 7272 return &bpf_csum_diff_proto; 7273 case BPF_FUNC_csum_update: 7274 return &bpf_csum_update_proto; 7275 case BPF_FUNC_csum_level: 7276 return &bpf_csum_level_proto; 7277 case BPF_FUNC_l3_csum_replace: 7278 return &bpf_l3_csum_replace_proto; 7279 case BPF_FUNC_l4_csum_replace: 7280 return &bpf_l4_csum_replace_proto; 7281 case BPF_FUNC_clone_redirect: 7282 return &bpf_clone_redirect_proto; 7283 case BPF_FUNC_get_cgroup_classid: 7284 return &bpf_get_cgroup_classid_proto; 7285 case BPF_FUNC_skb_vlan_push: 7286 return &bpf_skb_vlan_push_proto; 7287 case BPF_FUNC_skb_vlan_pop: 7288 return &bpf_skb_vlan_pop_proto; 7289 case BPF_FUNC_skb_change_proto: 7290 return &bpf_skb_change_proto_proto; 7291 case BPF_FUNC_skb_change_type: 7292 return &bpf_skb_change_type_proto; 7293 case BPF_FUNC_skb_adjust_room: 7294 return &bpf_skb_adjust_room_proto; 7295 case BPF_FUNC_skb_change_tail: 7296 return &bpf_skb_change_tail_proto; 7297 case BPF_FUNC_skb_change_head: 7298 return &bpf_skb_change_head_proto; 7299 case BPF_FUNC_skb_get_tunnel_key: 7300 return &bpf_skb_get_tunnel_key_proto; 7301 case BPF_FUNC_skb_set_tunnel_key: 7302 return bpf_get_skb_set_tunnel_proto(func_id); 7303 case BPF_FUNC_skb_get_tunnel_opt: 7304 return &bpf_skb_get_tunnel_opt_proto; 7305 case BPF_FUNC_skb_set_tunnel_opt: 7306 return bpf_get_skb_set_tunnel_proto(func_id); 7307 case BPF_FUNC_redirect: 7308 return &bpf_redirect_proto; 7309 case BPF_FUNC_redirect_neigh: 7310 return &bpf_redirect_neigh_proto; 7311 case BPF_FUNC_redirect_peer: 7312 return &bpf_redirect_peer_proto; 7313 case BPF_FUNC_get_route_realm: 7314 return &bpf_get_route_realm_proto; 7315 case BPF_FUNC_get_hash_recalc: 7316 return &bpf_get_hash_recalc_proto; 7317 case BPF_FUNC_set_hash_invalid: 7318 return &bpf_set_hash_invalid_proto; 7319 case BPF_FUNC_set_hash: 7320 return &bpf_set_hash_proto; 7321 case BPF_FUNC_perf_event_output: 7322 return &bpf_skb_event_output_proto; 7323 case BPF_FUNC_get_smp_processor_id: 7324 return &bpf_get_smp_processor_id_proto; 7325 case BPF_FUNC_skb_under_cgroup: 7326 return &bpf_skb_under_cgroup_proto; 7327 case BPF_FUNC_get_socket_cookie: 7328 return &bpf_get_socket_cookie_proto; 7329 case BPF_FUNC_get_socket_uid: 7330 return &bpf_get_socket_uid_proto; 7331 case BPF_FUNC_fib_lookup: 7332 return &bpf_skb_fib_lookup_proto; 7333 case BPF_FUNC_check_mtu: 7334 return &bpf_skb_check_mtu_proto; 7335 case BPF_FUNC_sk_fullsock: 7336 return &bpf_sk_fullsock_proto; 7337 case BPF_FUNC_sk_storage_get: 7338 return &bpf_sk_storage_get_proto; 7339 case BPF_FUNC_sk_storage_delete: 7340 return &bpf_sk_storage_delete_proto; 7341 #ifdef CONFIG_XFRM 7342 case BPF_FUNC_skb_get_xfrm_state: 7343 return &bpf_skb_get_xfrm_state_proto; 7344 #endif 7345 #ifdef CONFIG_CGROUP_NET_CLASSID 7346 case BPF_FUNC_skb_cgroup_classid: 7347 return &bpf_skb_cgroup_classid_proto; 7348 #endif 7349 #ifdef CONFIG_SOCK_CGROUP_DATA 7350 case BPF_FUNC_skb_cgroup_id: 7351 return &bpf_skb_cgroup_id_proto; 7352 case BPF_FUNC_skb_ancestor_cgroup_id: 7353 return &bpf_skb_ancestor_cgroup_id_proto; 7354 #endif 7355 #ifdef CONFIG_INET 7356 case BPF_FUNC_sk_lookup_tcp: 7357 return &bpf_sk_lookup_tcp_proto; 7358 case BPF_FUNC_sk_lookup_udp: 7359 return &bpf_sk_lookup_udp_proto; 7360 case BPF_FUNC_sk_release: 7361 return &bpf_sk_release_proto; 7362 case BPF_FUNC_tcp_sock: 7363 return &bpf_tcp_sock_proto; 7364 case BPF_FUNC_get_listener_sock: 7365 return &bpf_get_listener_sock_proto; 7366 case BPF_FUNC_skc_lookup_tcp: 7367 return &bpf_skc_lookup_tcp_proto; 7368 case BPF_FUNC_tcp_check_syncookie: 7369 return &bpf_tcp_check_syncookie_proto; 7370 case BPF_FUNC_skb_ecn_set_ce: 7371 return &bpf_skb_ecn_set_ce_proto; 7372 case BPF_FUNC_tcp_gen_syncookie: 7373 return &bpf_tcp_gen_syncookie_proto; 7374 case BPF_FUNC_sk_assign: 7375 return &bpf_sk_assign_proto; 7376 #endif 7377 default: 7378 return bpf_sk_base_func_proto(func_id); 7379 } 7380 } 7381 7382 static const struct bpf_func_proto * 7383 xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7384 { 7385 switch (func_id) { 7386 case BPF_FUNC_perf_event_output: 7387 return &bpf_xdp_event_output_proto; 7388 case BPF_FUNC_get_smp_processor_id: 7389 return &bpf_get_smp_processor_id_proto; 7390 case BPF_FUNC_csum_diff: 7391 return &bpf_csum_diff_proto; 7392 case BPF_FUNC_xdp_adjust_head: 7393 return &bpf_xdp_adjust_head_proto; 7394 case BPF_FUNC_xdp_adjust_meta: 7395 return &bpf_xdp_adjust_meta_proto; 7396 case BPF_FUNC_redirect: 7397 return &bpf_xdp_redirect_proto; 7398 case BPF_FUNC_redirect_map: 7399 return &bpf_xdp_redirect_map_proto; 7400 case BPF_FUNC_xdp_adjust_tail: 7401 return &bpf_xdp_adjust_tail_proto; 7402 case BPF_FUNC_fib_lookup: 7403 return &bpf_xdp_fib_lookup_proto; 7404 case BPF_FUNC_check_mtu: 7405 return &bpf_xdp_check_mtu_proto; 7406 #ifdef CONFIG_INET 7407 case BPF_FUNC_sk_lookup_udp: 7408 return &bpf_xdp_sk_lookup_udp_proto; 7409 case BPF_FUNC_sk_lookup_tcp: 7410 return &bpf_xdp_sk_lookup_tcp_proto; 7411 case BPF_FUNC_sk_release: 7412 return &bpf_sk_release_proto; 7413 case BPF_FUNC_skc_lookup_tcp: 7414 return &bpf_xdp_skc_lookup_tcp_proto; 7415 case BPF_FUNC_tcp_check_syncookie: 7416 return &bpf_tcp_check_syncookie_proto; 7417 case BPF_FUNC_tcp_gen_syncookie: 7418 return &bpf_tcp_gen_syncookie_proto; 7419 #endif 7420 default: 7421 return bpf_sk_base_func_proto(func_id); 7422 } 7423 } 7424 7425 const struct bpf_func_proto bpf_sock_map_update_proto __weak; 7426 const struct bpf_func_proto bpf_sock_hash_update_proto __weak; 7427 7428 static const struct bpf_func_proto * 7429 sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7430 { 7431 switch (func_id) { 7432 case BPF_FUNC_setsockopt: 7433 return &bpf_sock_ops_setsockopt_proto; 7434 case BPF_FUNC_getsockopt: 7435 return &bpf_sock_ops_getsockopt_proto; 7436 case BPF_FUNC_sock_ops_cb_flags_set: 7437 return &bpf_sock_ops_cb_flags_set_proto; 7438 case BPF_FUNC_sock_map_update: 7439 return &bpf_sock_map_update_proto; 7440 case BPF_FUNC_sock_hash_update: 7441 return &bpf_sock_hash_update_proto; 7442 case BPF_FUNC_get_socket_cookie: 7443 return &bpf_get_socket_cookie_sock_ops_proto; 7444 case BPF_FUNC_get_local_storage: 7445 return &bpf_get_local_storage_proto; 7446 case BPF_FUNC_perf_event_output: 7447 return &bpf_event_output_data_proto; 7448 case BPF_FUNC_sk_storage_get: 7449 return &bpf_sk_storage_get_proto; 7450 case BPF_FUNC_sk_storage_delete: 7451 return &bpf_sk_storage_delete_proto; 7452 #ifdef CONFIG_INET 7453 case BPF_FUNC_load_hdr_opt: 7454 return &bpf_sock_ops_load_hdr_opt_proto; 7455 case BPF_FUNC_store_hdr_opt: 7456 return &bpf_sock_ops_store_hdr_opt_proto; 7457 case BPF_FUNC_reserve_hdr_opt: 7458 return &bpf_sock_ops_reserve_hdr_opt_proto; 7459 case BPF_FUNC_tcp_sock: 7460 return &bpf_tcp_sock_proto; 7461 #endif /* CONFIG_INET */ 7462 default: 7463 return bpf_sk_base_func_proto(func_id); 7464 } 7465 } 7466 7467 const struct bpf_func_proto bpf_msg_redirect_map_proto __weak; 7468 const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak; 7469 7470 static const struct bpf_func_proto * 7471 sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7472 { 7473 switch (func_id) { 7474 case BPF_FUNC_msg_redirect_map: 7475 return &bpf_msg_redirect_map_proto; 7476 case BPF_FUNC_msg_redirect_hash: 7477 return &bpf_msg_redirect_hash_proto; 7478 case BPF_FUNC_msg_apply_bytes: 7479 return &bpf_msg_apply_bytes_proto; 7480 case BPF_FUNC_msg_cork_bytes: 7481 return &bpf_msg_cork_bytes_proto; 7482 case BPF_FUNC_msg_pull_data: 7483 return &bpf_msg_pull_data_proto; 7484 case BPF_FUNC_msg_push_data: 7485 return &bpf_msg_push_data_proto; 7486 case BPF_FUNC_msg_pop_data: 7487 return &bpf_msg_pop_data_proto; 7488 case BPF_FUNC_perf_event_output: 7489 return &bpf_event_output_data_proto; 7490 case BPF_FUNC_get_current_uid_gid: 7491 return &bpf_get_current_uid_gid_proto; 7492 case BPF_FUNC_get_current_pid_tgid: 7493 return &bpf_get_current_pid_tgid_proto; 7494 case BPF_FUNC_sk_storage_get: 7495 return &bpf_sk_storage_get_proto; 7496 case BPF_FUNC_sk_storage_delete: 7497 return &bpf_sk_storage_delete_proto; 7498 #ifdef CONFIG_CGROUPS 7499 case BPF_FUNC_get_current_cgroup_id: 7500 return &bpf_get_current_cgroup_id_proto; 7501 case BPF_FUNC_get_current_ancestor_cgroup_id: 7502 return &bpf_get_current_ancestor_cgroup_id_proto; 7503 #endif 7504 #ifdef CONFIG_CGROUP_NET_CLASSID 7505 case BPF_FUNC_get_cgroup_classid: 7506 return &bpf_get_cgroup_classid_curr_proto; 7507 #endif 7508 default: 7509 return bpf_sk_base_func_proto(func_id); 7510 } 7511 } 7512 7513 const struct bpf_func_proto bpf_sk_redirect_map_proto __weak; 7514 const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak; 7515 7516 static const struct bpf_func_proto * 7517 sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7518 { 7519 switch (func_id) { 7520 case BPF_FUNC_skb_store_bytes: 7521 return &bpf_skb_store_bytes_proto; 7522 case BPF_FUNC_skb_load_bytes: 7523 return &bpf_skb_load_bytes_proto; 7524 case BPF_FUNC_skb_pull_data: 7525 return &sk_skb_pull_data_proto; 7526 case BPF_FUNC_skb_change_tail: 7527 return &sk_skb_change_tail_proto; 7528 case BPF_FUNC_skb_change_head: 7529 return &sk_skb_change_head_proto; 7530 case BPF_FUNC_skb_adjust_room: 7531 return &sk_skb_adjust_room_proto; 7532 case BPF_FUNC_get_socket_cookie: 7533 return &bpf_get_socket_cookie_proto; 7534 case BPF_FUNC_get_socket_uid: 7535 return &bpf_get_socket_uid_proto; 7536 case BPF_FUNC_sk_redirect_map: 7537 return &bpf_sk_redirect_map_proto; 7538 case BPF_FUNC_sk_redirect_hash: 7539 return &bpf_sk_redirect_hash_proto; 7540 case BPF_FUNC_perf_event_output: 7541 return &bpf_skb_event_output_proto; 7542 #ifdef CONFIG_INET 7543 case BPF_FUNC_sk_lookup_tcp: 7544 return &bpf_sk_lookup_tcp_proto; 7545 case BPF_FUNC_sk_lookup_udp: 7546 return &bpf_sk_lookup_udp_proto; 7547 case BPF_FUNC_sk_release: 7548 return &bpf_sk_release_proto; 7549 case BPF_FUNC_skc_lookup_tcp: 7550 return &bpf_skc_lookup_tcp_proto; 7551 #endif 7552 default: 7553 return bpf_sk_base_func_proto(func_id); 7554 } 7555 } 7556 7557 static const struct bpf_func_proto * 7558 flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7559 { 7560 switch (func_id) { 7561 case BPF_FUNC_skb_load_bytes: 7562 return &bpf_flow_dissector_load_bytes_proto; 7563 default: 7564 return bpf_sk_base_func_proto(func_id); 7565 } 7566 } 7567 7568 static const struct bpf_func_proto * 7569 lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7570 { 7571 switch (func_id) { 7572 case BPF_FUNC_skb_load_bytes: 7573 return &bpf_skb_load_bytes_proto; 7574 case BPF_FUNC_skb_pull_data: 7575 return &bpf_skb_pull_data_proto; 7576 case BPF_FUNC_csum_diff: 7577 return &bpf_csum_diff_proto; 7578 case BPF_FUNC_get_cgroup_classid: 7579 return &bpf_get_cgroup_classid_proto; 7580 case BPF_FUNC_get_route_realm: 7581 return &bpf_get_route_realm_proto; 7582 case BPF_FUNC_get_hash_recalc: 7583 return &bpf_get_hash_recalc_proto; 7584 case BPF_FUNC_perf_event_output: 7585 return &bpf_skb_event_output_proto; 7586 case BPF_FUNC_get_smp_processor_id: 7587 return &bpf_get_smp_processor_id_proto; 7588 case BPF_FUNC_skb_under_cgroup: 7589 return &bpf_skb_under_cgroup_proto; 7590 default: 7591 return bpf_sk_base_func_proto(func_id); 7592 } 7593 } 7594 7595 static const struct bpf_func_proto * 7596 lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7597 { 7598 switch (func_id) { 7599 case BPF_FUNC_lwt_push_encap: 7600 return &bpf_lwt_in_push_encap_proto; 7601 default: 7602 return lwt_out_func_proto(func_id, prog); 7603 } 7604 } 7605 7606 static const struct bpf_func_proto * 7607 lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7608 { 7609 switch (func_id) { 7610 case BPF_FUNC_skb_get_tunnel_key: 7611 return &bpf_skb_get_tunnel_key_proto; 7612 case BPF_FUNC_skb_set_tunnel_key: 7613 return bpf_get_skb_set_tunnel_proto(func_id); 7614 case BPF_FUNC_skb_get_tunnel_opt: 7615 return &bpf_skb_get_tunnel_opt_proto; 7616 case BPF_FUNC_skb_set_tunnel_opt: 7617 return bpf_get_skb_set_tunnel_proto(func_id); 7618 case BPF_FUNC_redirect: 7619 return &bpf_redirect_proto; 7620 case BPF_FUNC_clone_redirect: 7621 return &bpf_clone_redirect_proto; 7622 case BPF_FUNC_skb_change_tail: 7623 return &bpf_skb_change_tail_proto; 7624 case BPF_FUNC_skb_change_head: 7625 return &bpf_skb_change_head_proto; 7626 case BPF_FUNC_skb_store_bytes: 7627 return &bpf_skb_store_bytes_proto; 7628 case BPF_FUNC_csum_update: 7629 return &bpf_csum_update_proto; 7630 case BPF_FUNC_csum_level: 7631 return &bpf_csum_level_proto; 7632 case BPF_FUNC_l3_csum_replace: 7633 return &bpf_l3_csum_replace_proto; 7634 case BPF_FUNC_l4_csum_replace: 7635 return &bpf_l4_csum_replace_proto; 7636 case BPF_FUNC_set_hash_invalid: 7637 return &bpf_set_hash_invalid_proto; 7638 case BPF_FUNC_lwt_push_encap: 7639 return &bpf_lwt_xmit_push_encap_proto; 7640 default: 7641 return lwt_out_func_proto(func_id, prog); 7642 } 7643 } 7644 7645 static const struct bpf_func_proto * 7646 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7647 { 7648 switch (func_id) { 7649 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 7650 case BPF_FUNC_lwt_seg6_store_bytes: 7651 return &bpf_lwt_seg6_store_bytes_proto; 7652 case BPF_FUNC_lwt_seg6_action: 7653 return &bpf_lwt_seg6_action_proto; 7654 case BPF_FUNC_lwt_seg6_adjust_srh: 7655 return &bpf_lwt_seg6_adjust_srh_proto; 7656 #endif 7657 default: 7658 return lwt_out_func_proto(func_id, prog); 7659 } 7660 } 7661 7662 static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type, 7663 const struct bpf_prog *prog, 7664 struct bpf_insn_access_aux *info) 7665 { 7666 const int size_default = sizeof(__u32); 7667 7668 if (off < 0 || off >= sizeof(struct __sk_buff)) 7669 return false; 7670 7671 /* The verifier guarantees that size > 0. */ 7672 if (off % size != 0) 7673 return false; 7674 7675 switch (off) { 7676 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 7677 if (off + size > offsetofend(struct __sk_buff, cb[4])) 7678 return false; 7679 break; 7680 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): 7681 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): 7682 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): 7683 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): 7684 case bpf_ctx_range(struct __sk_buff, data): 7685 case bpf_ctx_range(struct __sk_buff, data_meta): 7686 case bpf_ctx_range(struct __sk_buff, data_end): 7687 if (size != size_default) 7688 return false; 7689 break; 7690 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 7691 return false; 7692 case bpf_ctx_range(struct __sk_buff, tstamp): 7693 if (size != sizeof(__u64)) 7694 return false; 7695 break; 7696 case offsetof(struct __sk_buff, sk): 7697 if (type == BPF_WRITE || size != sizeof(__u64)) 7698 return false; 7699 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; 7700 break; 7701 default: 7702 /* Only narrow read access allowed for now. */ 7703 if (type == BPF_WRITE) { 7704 if (size != size_default) 7705 return false; 7706 } else { 7707 bpf_ctx_record_field_size(info, size_default); 7708 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 7709 return false; 7710 } 7711 } 7712 7713 return true; 7714 } 7715 7716 static bool sk_filter_is_valid_access(int off, int size, 7717 enum bpf_access_type type, 7718 const struct bpf_prog *prog, 7719 struct bpf_insn_access_aux *info) 7720 { 7721 switch (off) { 7722 case bpf_ctx_range(struct __sk_buff, tc_classid): 7723 case bpf_ctx_range(struct __sk_buff, data): 7724 case bpf_ctx_range(struct __sk_buff, data_meta): 7725 case bpf_ctx_range(struct __sk_buff, data_end): 7726 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 7727 case bpf_ctx_range(struct __sk_buff, tstamp): 7728 case bpf_ctx_range(struct __sk_buff, wire_len): 7729 return false; 7730 } 7731 7732 if (type == BPF_WRITE) { 7733 switch (off) { 7734 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 7735 break; 7736 default: 7737 return false; 7738 } 7739 } 7740 7741 return bpf_skb_is_valid_access(off, size, type, prog, info); 7742 } 7743 7744 static bool cg_skb_is_valid_access(int off, int size, 7745 enum bpf_access_type type, 7746 const struct bpf_prog *prog, 7747 struct bpf_insn_access_aux *info) 7748 { 7749 switch (off) { 7750 case bpf_ctx_range(struct __sk_buff, tc_classid): 7751 case bpf_ctx_range(struct __sk_buff, data_meta): 7752 case bpf_ctx_range(struct __sk_buff, wire_len): 7753 return false; 7754 case bpf_ctx_range(struct __sk_buff, data): 7755 case bpf_ctx_range(struct __sk_buff, data_end): 7756 if (!bpf_capable()) 7757 return false; 7758 break; 7759 } 7760 7761 if (type == BPF_WRITE) { 7762 switch (off) { 7763 case bpf_ctx_range(struct __sk_buff, mark): 7764 case bpf_ctx_range(struct __sk_buff, priority): 7765 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 7766 break; 7767 case bpf_ctx_range(struct __sk_buff, tstamp): 7768 if (!bpf_capable()) 7769 return false; 7770 break; 7771 default: 7772 return false; 7773 } 7774 } 7775 7776 switch (off) { 7777 case bpf_ctx_range(struct __sk_buff, data): 7778 info->reg_type = PTR_TO_PACKET; 7779 break; 7780 case bpf_ctx_range(struct __sk_buff, data_end): 7781 info->reg_type = PTR_TO_PACKET_END; 7782 break; 7783 } 7784 7785 return bpf_skb_is_valid_access(off, size, type, prog, info); 7786 } 7787 7788 static bool lwt_is_valid_access(int off, int size, 7789 enum bpf_access_type type, 7790 const struct bpf_prog *prog, 7791 struct bpf_insn_access_aux *info) 7792 { 7793 switch (off) { 7794 case bpf_ctx_range(struct __sk_buff, tc_classid): 7795 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 7796 case bpf_ctx_range(struct __sk_buff, data_meta): 7797 case bpf_ctx_range(struct __sk_buff, tstamp): 7798 case bpf_ctx_range(struct __sk_buff, wire_len): 7799 return false; 7800 } 7801 7802 if (type == BPF_WRITE) { 7803 switch (off) { 7804 case bpf_ctx_range(struct __sk_buff, mark): 7805 case bpf_ctx_range(struct __sk_buff, priority): 7806 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 7807 break; 7808 default: 7809 return false; 7810 } 7811 } 7812 7813 switch (off) { 7814 case bpf_ctx_range(struct __sk_buff, data): 7815 info->reg_type = PTR_TO_PACKET; 7816 break; 7817 case bpf_ctx_range(struct __sk_buff, data_end): 7818 info->reg_type = PTR_TO_PACKET_END; 7819 break; 7820 } 7821 7822 return bpf_skb_is_valid_access(off, size, type, prog, info); 7823 } 7824 7825 /* Attach type specific accesses */ 7826 static bool __sock_filter_check_attach_type(int off, 7827 enum bpf_access_type access_type, 7828 enum bpf_attach_type attach_type) 7829 { 7830 switch (off) { 7831 case offsetof(struct bpf_sock, bound_dev_if): 7832 case offsetof(struct bpf_sock, mark): 7833 case offsetof(struct bpf_sock, priority): 7834 switch (attach_type) { 7835 case BPF_CGROUP_INET_SOCK_CREATE: 7836 case BPF_CGROUP_INET_SOCK_RELEASE: 7837 goto full_access; 7838 default: 7839 return false; 7840 } 7841 case bpf_ctx_range(struct bpf_sock, src_ip4): 7842 switch (attach_type) { 7843 case BPF_CGROUP_INET4_POST_BIND: 7844 goto read_only; 7845 default: 7846 return false; 7847 } 7848 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 7849 switch (attach_type) { 7850 case BPF_CGROUP_INET6_POST_BIND: 7851 goto read_only; 7852 default: 7853 return false; 7854 } 7855 case bpf_ctx_range(struct bpf_sock, src_port): 7856 switch (attach_type) { 7857 case BPF_CGROUP_INET4_POST_BIND: 7858 case BPF_CGROUP_INET6_POST_BIND: 7859 goto read_only; 7860 default: 7861 return false; 7862 } 7863 } 7864 read_only: 7865 return access_type == BPF_READ; 7866 full_access: 7867 return true; 7868 } 7869 7870 bool bpf_sock_common_is_valid_access(int off, int size, 7871 enum bpf_access_type type, 7872 struct bpf_insn_access_aux *info) 7873 { 7874 switch (off) { 7875 case bpf_ctx_range_till(struct bpf_sock, type, priority): 7876 return false; 7877 default: 7878 return bpf_sock_is_valid_access(off, size, type, info); 7879 } 7880 } 7881 7882 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 7883 struct bpf_insn_access_aux *info) 7884 { 7885 const int size_default = sizeof(__u32); 7886 7887 if (off < 0 || off >= sizeof(struct bpf_sock)) 7888 return false; 7889 if (off % size != 0) 7890 return false; 7891 7892 switch (off) { 7893 case offsetof(struct bpf_sock, state): 7894 case offsetof(struct bpf_sock, family): 7895 case offsetof(struct bpf_sock, type): 7896 case offsetof(struct bpf_sock, protocol): 7897 case offsetof(struct bpf_sock, dst_port): 7898 case offsetof(struct bpf_sock, src_port): 7899 case offsetof(struct bpf_sock, rx_queue_mapping): 7900 case bpf_ctx_range(struct bpf_sock, src_ip4): 7901 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 7902 case bpf_ctx_range(struct bpf_sock, dst_ip4): 7903 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): 7904 bpf_ctx_record_field_size(info, size_default); 7905 return bpf_ctx_narrow_access_ok(off, size, size_default); 7906 } 7907 7908 return size == size_default; 7909 } 7910 7911 static bool sock_filter_is_valid_access(int off, int size, 7912 enum bpf_access_type type, 7913 const struct bpf_prog *prog, 7914 struct bpf_insn_access_aux *info) 7915 { 7916 if (!bpf_sock_is_valid_access(off, size, type, info)) 7917 return false; 7918 return __sock_filter_check_attach_type(off, type, 7919 prog->expected_attach_type); 7920 } 7921 7922 static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write, 7923 const struct bpf_prog *prog) 7924 { 7925 /* Neither direct read nor direct write requires any preliminary 7926 * action. 7927 */ 7928 return 0; 7929 } 7930 7931 static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, 7932 const struct bpf_prog *prog, int drop_verdict) 7933 { 7934 struct bpf_insn *insn = insn_buf; 7935 7936 if (!direct_write) 7937 return 0; 7938 7939 /* if (!skb->cloned) 7940 * goto start; 7941 * 7942 * (Fast-path, otherwise approximation that we might be 7943 * a clone, do the rest in helper.) 7944 */ 7945 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); 7946 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); 7947 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); 7948 7949 /* ret = bpf_skb_pull_data(skb, 0); */ 7950 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 7951 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); 7952 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 7953 BPF_FUNC_skb_pull_data); 7954 /* if (!ret) 7955 * goto restore; 7956 * return TC_ACT_SHOT; 7957 */ 7958 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); 7959 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); 7960 *insn++ = BPF_EXIT_INSN(); 7961 7962 /* restore: */ 7963 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 7964 /* start: */ 7965 *insn++ = prog->insnsi[0]; 7966 7967 return insn - insn_buf; 7968 } 7969 7970 static int bpf_gen_ld_abs(const struct bpf_insn *orig, 7971 struct bpf_insn *insn_buf) 7972 { 7973 bool indirect = BPF_MODE(orig->code) == BPF_IND; 7974 struct bpf_insn *insn = insn_buf; 7975 7976 if (!indirect) { 7977 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); 7978 } else { 7979 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg); 7980 if (orig->imm) 7981 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); 7982 } 7983 /* We're guaranteed here that CTX is in R6. */ 7984 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); 7985 7986 switch (BPF_SIZE(orig->code)) { 7987 case BPF_B: 7988 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache); 7989 break; 7990 case BPF_H: 7991 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache); 7992 break; 7993 case BPF_W: 7994 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache); 7995 break; 7996 } 7997 7998 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2); 7999 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); 8000 *insn++ = BPF_EXIT_INSN(); 8001 8002 return insn - insn_buf; 8003 } 8004 8005 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, 8006 const struct bpf_prog *prog) 8007 { 8008 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT); 8009 } 8010 8011 static bool tc_cls_act_is_valid_access(int off, int size, 8012 enum bpf_access_type type, 8013 const struct bpf_prog *prog, 8014 struct bpf_insn_access_aux *info) 8015 { 8016 if (type == BPF_WRITE) { 8017 switch (off) { 8018 case bpf_ctx_range(struct __sk_buff, mark): 8019 case bpf_ctx_range(struct __sk_buff, tc_index): 8020 case bpf_ctx_range(struct __sk_buff, priority): 8021 case bpf_ctx_range(struct __sk_buff, tc_classid): 8022 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8023 case bpf_ctx_range(struct __sk_buff, tstamp): 8024 case bpf_ctx_range(struct __sk_buff, queue_mapping): 8025 break; 8026 default: 8027 return false; 8028 } 8029 } 8030 8031 switch (off) { 8032 case bpf_ctx_range(struct __sk_buff, data): 8033 info->reg_type = PTR_TO_PACKET; 8034 break; 8035 case bpf_ctx_range(struct __sk_buff, data_meta): 8036 info->reg_type = PTR_TO_PACKET_META; 8037 break; 8038 case bpf_ctx_range(struct __sk_buff, data_end): 8039 info->reg_type = PTR_TO_PACKET_END; 8040 break; 8041 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 8042 return false; 8043 } 8044 8045 return bpf_skb_is_valid_access(off, size, type, prog, info); 8046 } 8047 8048 static bool __is_valid_xdp_access(int off, int size) 8049 { 8050 if (off < 0 || off >= sizeof(struct xdp_md)) 8051 return false; 8052 if (off % size != 0) 8053 return false; 8054 if (size != sizeof(__u32)) 8055 return false; 8056 8057 return true; 8058 } 8059 8060 static bool xdp_is_valid_access(int off, int size, 8061 enum bpf_access_type type, 8062 const struct bpf_prog *prog, 8063 struct bpf_insn_access_aux *info) 8064 { 8065 if (prog->expected_attach_type != BPF_XDP_DEVMAP) { 8066 switch (off) { 8067 case offsetof(struct xdp_md, egress_ifindex): 8068 return false; 8069 } 8070 } 8071 8072 if (type == BPF_WRITE) { 8073 if (bpf_prog_is_dev_bound(prog->aux)) { 8074 switch (off) { 8075 case offsetof(struct xdp_md, rx_queue_index): 8076 return __is_valid_xdp_access(off, size); 8077 } 8078 } 8079 return false; 8080 } 8081 8082 switch (off) { 8083 case offsetof(struct xdp_md, data): 8084 info->reg_type = PTR_TO_PACKET; 8085 break; 8086 case offsetof(struct xdp_md, data_meta): 8087 info->reg_type = PTR_TO_PACKET_META; 8088 break; 8089 case offsetof(struct xdp_md, data_end): 8090 info->reg_type = PTR_TO_PACKET_END; 8091 break; 8092 } 8093 8094 return __is_valid_xdp_access(off, size); 8095 } 8096 8097 void bpf_warn_invalid_xdp_action(u32 act) 8098 { 8099 const u32 act_max = XDP_REDIRECT; 8100 8101 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n", 8102 act > act_max ? "Illegal" : "Driver unsupported", 8103 act); 8104 } 8105 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); 8106 8107 static bool sock_addr_is_valid_access(int off, int size, 8108 enum bpf_access_type type, 8109 const struct bpf_prog *prog, 8110 struct bpf_insn_access_aux *info) 8111 { 8112 const int size_default = sizeof(__u32); 8113 8114 if (off < 0 || off >= sizeof(struct bpf_sock_addr)) 8115 return false; 8116 if (off % size != 0) 8117 return false; 8118 8119 /* Disallow access to IPv6 fields from IPv4 contex and vise 8120 * versa. 8121 */ 8122 switch (off) { 8123 case bpf_ctx_range(struct bpf_sock_addr, user_ip4): 8124 switch (prog->expected_attach_type) { 8125 case BPF_CGROUP_INET4_BIND: 8126 case BPF_CGROUP_INET4_CONNECT: 8127 case BPF_CGROUP_INET4_GETPEERNAME: 8128 case BPF_CGROUP_INET4_GETSOCKNAME: 8129 case BPF_CGROUP_UDP4_SENDMSG: 8130 case BPF_CGROUP_UDP4_RECVMSG: 8131 break; 8132 default: 8133 return false; 8134 } 8135 break; 8136 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 8137 switch (prog->expected_attach_type) { 8138 case BPF_CGROUP_INET6_BIND: 8139 case BPF_CGROUP_INET6_CONNECT: 8140 case BPF_CGROUP_INET6_GETPEERNAME: 8141 case BPF_CGROUP_INET6_GETSOCKNAME: 8142 case BPF_CGROUP_UDP6_SENDMSG: 8143 case BPF_CGROUP_UDP6_RECVMSG: 8144 break; 8145 default: 8146 return false; 8147 } 8148 break; 8149 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): 8150 switch (prog->expected_attach_type) { 8151 case BPF_CGROUP_UDP4_SENDMSG: 8152 break; 8153 default: 8154 return false; 8155 } 8156 break; 8157 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 8158 msg_src_ip6[3]): 8159 switch (prog->expected_attach_type) { 8160 case BPF_CGROUP_UDP6_SENDMSG: 8161 break; 8162 default: 8163 return false; 8164 } 8165 break; 8166 } 8167 8168 switch (off) { 8169 case bpf_ctx_range(struct bpf_sock_addr, user_ip4): 8170 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 8171 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): 8172 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 8173 msg_src_ip6[3]): 8174 case bpf_ctx_range(struct bpf_sock_addr, user_port): 8175 if (type == BPF_READ) { 8176 bpf_ctx_record_field_size(info, size_default); 8177 8178 if (bpf_ctx_wide_access_ok(off, size, 8179 struct bpf_sock_addr, 8180 user_ip6)) 8181 return true; 8182 8183 if (bpf_ctx_wide_access_ok(off, size, 8184 struct bpf_sock_addr, 8185 msg_src_ip6)) 8186 return true; 8187 8188 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 8189 return false; 8190 } else { 8191 if (bpf_ctx_wide_access_ok(off, size, 8192 struct bpf_sock_addr, 8193 user_ip6)) 8194 return true; 8195 8196 if (bpf_ctx_wide_access_ok(off, size, 8197 struct bpf_sock_addr, 8198 msg_src_ip6)) 8199 return true; 8200 8201 if (size != size_default) 8202 return false; 8203 } 8204 break; 8205 case offsetof(struct bpf_sock_addr, sk): 8206 if (type != BPF_READ) 8207 return false; 8208 if (size != sizeof(__u64)) 8209 return false; 8210 info->reg_type = PTR_TO_SOCKET; 8211 break; 8212 default: 8213 if (type == BPF_READ) { 8214 if (size != size_default) 8215 return false; 8216 } else { 8217 return false; 8218 } 8219 } 8220 8221 return true; 8222 } 8223 8224 static bool sock_ops_is_valid_access(int off, int size, 8225 enum bpf_access_type type, 8226 const struct bpf_prog *prog, 8227 struct bpf_insn_access_aux *info) 8228 { 8229 const int size_default = sizeof(__u32); 8230 8231 if (off < 0 || off >= sizeof(struct bpf_sock_ops)) 8232 return false; 8233 8234 /* The verifier guarantees that size > 0. */ 8235 if (off % size != 0) 8236 return false; 8237 8238 if (type == BPF_WRITE) { 8239 switch (off) { 8240 case offsetof(struct bpf_sock_ops, reply): 8241 case offsetof(struct bpf_sock_ops, sk_txhash): 8242 if (size != size_default) 8243 return false; 8244 break; 8245 default: 8246 return false; 8247 } 8248 } else { 8249 switch (off) { 8250 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, 8251 bytes_acked): 8252 if (size != sizeof(__u64)) 8253 return false; 8254 break; 8255 case offsetof(struct bpf_sock_ops, sk): 8256 if (size != sizeof(__u64)) 8257 return false; 8258 info->reg_type = PTR_TO_SOCKET_OR_NULL; 8259 break; 8260 case offsetof(struct bpf_sock_ops, skb_data): 8261 if (size != sizeof(__u64)) 8262 return false; 8263 info->reg_type = PTR_TO_PACKET; 8264 break; 8265 case offsetof(struct bpf_sock_ops, skb_data_end): 8266 if (size != sizeof(__u64)) 8267 return false; 8268 info->reg_type = PTR_TO_PACKET_END; 8269 break; 8270 case offsetof(struct bpf_sock_ops, skb_tcp_flags): 8271 bpf_ctx_record_field_size(info, size_default); 8272 return bpf_ctx_narrow_access_ok(off, size, 8273 size_default); 8274 default: 8275 if (size != size_default) 8276 return false; 8277 break; 8278 } 8279 } 8280 8281 return true; 8282 } 8283 8284 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, 8285 const struct bpf_prog *prog) 8286 { 8287 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP); 8288 } 8289 8290 static bool sk_skb_is_valid_access(int off, int size, 8291 enum bpf_access_type type, 8292 const struct bpf_prog *prog, 8293 struct bpf_insn_access_aux *info) 8294 { 8295 switch (off) { 8296 case bpf_ctx_range(struct __sk_buff, tc_classid): 8297 case bpf_ctx_range(struct __sk_buff, data_meta): 8298 case bpf_ctx_range(struct __sk_buff, tstamp): 8299 case bpf_ctx_range(struct __sk_buff, wire_len): 8300 return false; 8301 } 8302 8303 if (type == BPF_WRITE) { 8304 switch (off) { 8305 case bpf_ctx_range(struct __sk_buff, tc_index): 8306 case bpf_ctx_range(struct __sk_buff, priority): 8307 break; 8308 default: 8309 return false; 8310 } 8311 } 8312 8313 switch (off) { 8314 case bpf_ctx_range(struct __sk_buff, mark): 8315 return false; 8316 case bpf_ctx_range(struct __sk_buff, data): 8317 info->reg_type = PTR_TO_PACKET; 8318 break; 8319 case bpf_ctx_range(struct __sk_buff, data_end): 8320 info->reg_type = PTR_TO_PACKET_END; 8321 break; 8322 } 8323 8324 return bpf_skb_is_valid_access(off, size, type, prog, info); 8325 } 8326 8327 static bool sk_msg_is_valid_access(int off, int size, 8328 enum bpf_access_type type, 8329 const struct bpf_prog *prog, 8330 struct bpf_insn_access_aux *info) 8331 { 8332 if (type == BPF_WRITE) 8333 return false; 8334 8335 if (off % size != 0) 8336 return false; 8337 8338 switch (off) { 8339 case offsetof(struct sk_msg_md, data): 8340 info->reg_type = PTR_TO_PACKET; 8341 if (size != sizeof(__u64)) 8342 return false; 8343 break; 8344 case offsetof(struct sk_msg_md, data_end): 8345 info->reg_type = PTR_TO_PACKET_END; 8346 if (size != sizeof(__u64)) 8347 return false; 8348 break; 8349 case offsetof(struct sk_msg_md, sk): 8350 if (size != sizeof(__u64)) 8351 return false; 8352 info->reg_type = PTR_TO_SOCKET; 8353 break; 8354 case bpf_ctx_range(struct sk_msg_md, family): 8355 case bpf_ctx_range(struct sk_msg_md, remote_ip4): 8356 case bpf_ctx_range(struct sk_msg_md, local_ip4): 8357 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]): 8358 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]): 8359 case bpf_ctx_range(struct sk_msg_md, remote_port): 8360 case bpf_ctx_range(struct sk_msg_md, local_port): 8361 case bpf_ctx_range(struct sk_msg_md, size): 8362 if (size != sizeof(__u32)) 8363 return false; 8364 break; 8365 default: 8366 return false; 8367 } 8368 return true; 8369 } 8370 8371 static bool flow_dissector_is_valid_access(int off, int size, 8372 enum bpf_access_type type, 8373 const struct bpf_prog *prog, 8374 struct bpf_insn_access_aux *info) 8375 { 8376 const int size_default = sizeof(__u32); 8377 8378 if (off < 0 || off >= sizeof(struct __sk_buff)) 8379 return false; 8380 8381 if (type == BPF_WRITE) 8382 return false; 8383 8384 switch (off) { 8385 case bpf_ctx_range(struct __sk_buff, data): 8386 if (size != size_default) 8387 return false; 8388 info->reg_type = PTR_TO_PACKET; 8389 return true; 8390 case bpf_ctx_range(struct __sk_buff, data_end): 8391 if (size != size_default) 8392 return false; 8393 info->reg_type = PTR_TO_PACKET_END; 8394 return true; 8395 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 8396 if (size != sizeof(__u64)) 8397 return false; 8398 info->reg_type = PTR_TO_FLOW_KEYS; 8399 return true; 8400 default: 8401 return false; 8402 } 8403 } 8404 8405 static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, 8406 const struct bpf_insn *si, 8407 struct bpf_insn *insn_buf, 8408 struct bpf_prog *prog, 8409 u32 *target_size) 8410 8411 { 8412 struct bpf_insn *insn = insn_buf; 8413 8414 switch (si->off) { 8415 case offsetof(struct __sk_buff, data): 8416 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data), 8417 si->dst_reg, si->src_reg, 8418 offsetof(struct bpf_flow_dissector, data)); 8419 break; 8420 8421 case offsetof(struct __sk_buff, data_end): 8422 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end), 8423 si->dst_reg, si->src_reg, 8424 offsetof(struct bpf_flow_dissector, data_end)); 8425 break; 8426 8427 case offsetof(struct __sk_buff, flow_keys): 8428 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys), 8429 si->dst_reg, si->src_reg, 8430 offsetof(struct bpf_flow_dissector, flow_keys)); 8431 break; 8432 } 8433 8434 return insn - insn_buf; 8435 } 8436 8437 static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, 8438 struct bpf_insn *insn) 8439 { 8440 /* si->dst_reg = skb_shinfo(SKB); */ 8441 #ifdef NET_SKBUFF_DATA_USES_OFFSET 8442 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 8443 BPF_REG_AX, si->src_reg, 8444 offsetof(struct sk_buff, end)); 8445 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), 8446 si->dst_reg, si->src_reg, 8447 offsetof(struct sk_buff, head)); 8448 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); 8449 #else 8450 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 8451 si->dst_reg, si->src_reg, 8452 offsetof(struct sk_buff, end)); 8453 #endif 8454 8455 return insn; 8456 } 8457 8458 static u32 bpf_convert_ctx_access(enum bpf_access_type type, 8459 const struct bpf_insn *si, 8460 struct bpf_insn *insn_buf, 8461 struct bpf_prog *prog, u32 *target_size) 8462 { 8463 struct bpf_insn *insn = insn_buf; 8464 int off; 8465 8466 switch (si->off) { 8467 case offsetof(struct __sk_buff, len): 8468 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8469 bpf_target_off(struct sk_buff, len, 4, 8470 target_size)); 8471 break; 8472 8473 case offsetof(struct __sk_buff, protocol): 8474 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 8475 bpf_target_off(struct sk_buff, protocol, 2, 8476 target_size)); 8477 break; 8478 8479 case offsetof(struct __sk_buff, vlan_proto): 8480 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 8481 bpf_target_off(struct sk_buff, vlan_proto, 2, 8482 target_size)); 8483 break; 8484 8485 case offsetof(struct __sk_buff, priority): 8486 if (type == BPF_WRITE) 8487 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 8488 bpf_target_off(struct sk_buff, priority, 4, 8489 target_size)); 8490 else 8491 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8492 bpf_target_off(struct sk_buff, priority, 4, 8493 target_size)); 8494 break; 8495 8496 case offsetof(struct __sk_buff, ingress_ifindex): 8497 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8498 bpf_target_off(struct sk_buff, skb_iif, 4, 8499 target_size)); 8500 break; 8501 8502 case offsetof(struct __sk_buff, ifindex): 8503 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 8504 si->dst_reg, si->src_reg, 8505 offsetof(struct sk_buff, dev)); 8506 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 8507 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 8508 bpf_target_off(struct net_device, ifindex, 4, 8509 target_size)); 8510 break; 8511 8512 case offsetof(struct __sk_buff, hash): 8513 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8514 bpf_target_off(struct sk_buff, hash, 4, 8515 target_size)); 8516 break; 8517 8518 case offsetof(struct __sk_buff, mark): 8519 if (type == BPF_WRITE) 8520 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 8521 bpf_target_off(struct sk_buff, mark, 4, 8522 target_size)); 8523 else 8524 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8525 bpf_target_off(struct sk_buff, mark, 4, 8526 target_size)); 8527 break; 8528 8529 case offsetof(struct __sk_buff, pkt_type): 8530 *target_size = 1; 8531 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, 8532 PKT_TYPE_OFFSET()); 8533 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); 8534 #ifdef __BIG_ENDIAN_BITFIELD 8535 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); 8536 #endif 8537 break; 8538 8539 case offsetof(struct __sk_buff, queue_mapping): 8540 if (type == BPF_WRITE) { 8541 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); 8542 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, 8543 bpf_target_off(struct sk_buff, 8544 queue_mapping, 8545 2, target_size)); 8546 } else { 8547 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 8548 bpf_target_off(struct sk_buff, 8549 queue_mapping, 8550 2, target_size)); 8551 } 8552 break; 8553 8554 case offsetof(struct __sk_buff, vlan_present): 8555 *target_size = 1; 8556 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, 8557 PKT_VLAN_PRESENT_OFFSET()); 8558 if (PKT_VLAN_PRESENT_BIT) 8559 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); 8560 if (PKT_VLAN_PRESENT_BIT < 7) 8561 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); 8562 break; 8563 8564 case offsetof(struct __sk_buff, vlan_tci): 8565 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 8566 bpf_target_off(struct sk_buff, vlan_tci, 2, 8567 target_size)); 8568 break; 8569 8570 case offsetof(struct __sk_buff, cb[0]) ... 8571 offsetofend(struct __sk_buff, cb[4]) - 1: 8572 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20); 8573 BUILD_BUG_ON((offsetof(struct sk_buff, cb) + 8574 offsetof(struct qdisc_skb_cb, data)) % 8575 sizeof(__u64)); 8576 8577 prog->cb_access = 1; 8578 off = si->off; 8579 off -= offsetof(struct __sk_buff, cb[0]); 8580 off += offsetof(struct sk_buff, cb); 8581 off += offsetof(struct qdisc_skb_cb, data); 8582 if (type == BPF_WRITE) 8583 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, 8584 si->src_reg, off); 8585 else 8586 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, 8587 si->src_reg, off); 8588 break; 8589 8590 case offsetof(struct __sk_buff, tc_classid): 8591 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2); 8592 8593 off = si->off; 8594 off -= offsetof(struct __sk_buff, tc_classid); 8595 off += offsetof(struct sk_buff, cb); 8596 off += offsetof(struct qdisc_skb_cb, tc_classid); 8597 *target_size = 2; 8598 if (type == BPF_WRITE) 8599 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, 8600 si->src_reg, off); 8601 else 8602 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, 8603 si->src_reg, off); 8604 break; 8605 8606 case offsetof(struct __sk_buff, data): 8607 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 8608 si->dst_reg, si->src_reg, 8609 offsetof(struct sk_buff, data)); 8610 break; 8611 8612 case offsetof(struct __sk_buff, data_meta): 8613 off = si->off; 8614 off -= offsetof(struct __sk_buff, data_meta); 8615 off += offsetof(struct sk_buff, cb); 8616 off += offsetof(struct bpf_skb_data_end, data_meta); 8617 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 8618 si->src_reg, off); 8619 break; 8620 8621 case offsetof(struct __sk_buff, data_end): 8622 off = si->off; 8623 off -= offsetof(struct __sk_buff, data_end); 8624 off += offsetof(struct sk_buff, cb); 8625 off += offsetof(struct bpf_skb_data_end, data_end); 8626 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 8627 si->src_reg, off); 8628 break; 8629 8630 case offsetof(struct __sk_buff, tc_index): 8631 #ifdef CONFIG_NET_SCHED 8632 if (type == BPF_WRITE) 8633 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, 8634 bpf_target_off(struct sk_buff, tc_index, 2, 8635 target_size)); 8636 else 8637 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 8638 bpf_target_off(struct sk_buff, tc_index, 2, 8639 target_size)); 8640 #else 8641 *target_size = 2; 8642 if (type == BPF_WRITE) 8643 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); 8644 else 8645 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 8646 #endif 8647 break; 8648 8649 case offsetof(struct __sk_buff, napi_id): 8650 #if defined(CONFIG_NET_RX_BUSY_POLL) 8651 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8652 bpf_target_off(struct sk_buff, napi_id, 4, 8653 target_size)); 8654 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1); 8655 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 8656 #else 8657 *target_size = 4; 8658 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 8659 #endif 8660 break; 8661 case offsetof(struct __sk_buff, family): 8662 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 8663 8664 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8665 si->dst_reg, si->src_reg, 8666 offsetof(struct sk_buff, sk)); 8667 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 8668 bpf_target_off(struct sock_common, 8669 skc_family, 8670 2, target_size)); 8671 break; 8672 case offsetof(struct __sk_buff, remote_ip4): 8673 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 8674 8675 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8676 si->dst_reg, si->src_reg, 8677 offsetof(struct sk_buff, sk)); 8678 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 8679 bpf_target_off(struct sock_common, 8680 skc_daddr, 8681 4, target_size)); 8682 break; 8683 case offsetof(struct __sk_buff, local_ip4): 8684 BUILD_BUG_ON(sizeof_field(struct sock_common, 8685 skc_rcv_saddr) != 4); 8686 8687 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8688 si->dst_reg, si->src_reg, 8689 offsetof(struct sk_buff, sk)); 8690 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 8691 bpf_target_off(struct sock_common, 8692 skc_rcv_saddr, 8693 4, target_size)); 8694 break; 8695 case offsetof(struct __sk_buff, remote_ip6[0]) ... 8696 offsetof(struct __sk_buff, remote_ip6[3]): 8697 #if IS_ENABLED(CONFIG_IPV6) 8698 BUILD_BUG_ON(sizeof_field(struct sock_common, 8699 skc_v6_daddr.s6_addr32[0]) != 4); 8700 8701 off = si->off; 8702 off -= offsetof(struct __sk_buff, remote_ip6[0]); 8703 8704 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8705 si->dst_reg, si->src_reg, 8706 offsetof(struct sk_buff, sk)); 8707 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 8708 offsetof(struct sock_common, 8709 skc_v6_daddr.s6_addr32[0]) + 8710 off); 8711 #else 8712 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 8713 #endif 8714 break; 8715 case offsetof(struct __sk_buff, local_ip6[0]) ... 8716 offsetof(struct __sk_buff, local_ip6[3]): 8717 #if IS_ENABLED(CONFIG_IPV6) 8718 BUILD_BUG_ON(sizeof_field(struct sock_common, 8719 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 8720 8721 off = si->off; 8722 off -= offsetof(struct __sk_buff, local_ip6[0]); 8723 8724 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8725 si->dst_reg, si->src_reg, 8726 offsetof(struct sk_buff, sk)); 8727 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 8728 offsetof(struct sock_common, 8729 skc_v6_rcv_saddr.s6_addr32[0]) + 8730 off); 8731 #else 8732 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 8733 #endif 8734 break; 8735 8736 case offsetof(struct __sk_buff, remote_port): 8737 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 8738 8739 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8740 si->dst_reg, si->src_reg, 8741 offsetof(struct sk_buff, sk)); 8742 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 8743 bpf_target_off(struct sock_common, 8744 skc_dport, 8745 2, target_size)); 8746 #ifndef __BIG_ENDIAN_BITFIELD 8747 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 8748 #endif 8749 break; 8750 8751 case offsetof(struct __sk_buff, local_port): 8752 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 8753 8754 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8755 si->dst_reg, si->src_reg, 8756 offsetof(struct sk_buff, sk)); 8757 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 8758 bpf_target_off(struct sock_common, 8759 skc_num, 2, target_size)); 8760 break; 8761 8762 case offsetof(struct __sk_buff, tstamp): 8763 BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); 8764 8765 if (type == BPF_WRITE) 8766 *insn++ = BPF_STX_MEM(BPF_DW, 8767 si->dst_reg, si->src_reg, 8768 bpf_target_off(struct sk_buff, 8769 tstamp, 8, 8770 target_size)); 8771 else 8772 *insn++ = BPF_LDX_MEM(BPF_DW, 8773 si->dst_reg, si->src_reg, 8774 bpf_target_off(struct sk_buff, 8775 tstamp, 8, 8776 target_size)); 8777 break; 8778 8779 case offsetof(struct __sk_buff, gso_segs): 8780 insn = bpf_convert_shinfo_access(si, insn); 8781 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs), 8782 si->dst_reg, si->dst_reg, 8783 bpf_target_off(struct skb_shared_info, 8784 gso_segs, 2, 8785 target_size)); 8786 break; 8787 case offsetof(struct __sk_buff, gso_size): 8788 insn = bpf_convert_shinfo_access(si, insn); 8789 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size), 8790 si->dst_reg, si->dst_reg, 8791 bpf_target_off(struct skb_shared_info, 8792 gso_size, 2, 8793 target_size)); 8794 break; 8795 case offsetof(struct __sk_buff, wire_len): 8796 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4); 8797 8798 off = si->off; 8799 off -= offsetof(struct __sk_buff, wire_len); 8800 off += offsetof(struct sk_buff, cb); 8801 off += offsetof(struct qdisc_skb_cb, pkt_len); 8802 *target_size = 4; 8803 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); 8804 break; 8805 8806 case offsetof(struct __sk_buff, sk): 8807 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8808 si->dst_reg, si->src_reg, 8809 offsetof(struct sk_buff, sk)); 8810 break; 8811 } 8812 8813 return insn - insn_buf; 8814 } 8815 8816 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 8817 const struct bpf_insn *si, 8818 struct bpf_insn *insn_buf, 8819 struct bpf_prog *prog, u32 *target_size) 8820 { 8821 struct bpf_insn *insn = insn_buf; 8822 int off; 8823 8824 switch (si->off) { 8825 case offsetof(struct bpf_sock, bound_dev_if): 8826 BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4); 8827 8828 if (type == BPF_WRITE) 8829 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 8830 offsetof(struct sock, sk_bound_dev_if)); 8831 else 8832 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8833 offsetof(struct sock, sk_bound_dev_if)); 8834 break; 8835 8836 case offsetof(struct bpf_sock, mark): 8837 BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4); 8838 8839 if (type == BPF_WRITE) 8840 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 8841 offsetof(struct sock, sk_mark)); 8842 else 8843 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8844 offsetof(struct sock, sk_mark)); 8845 break; 8846 8847 case offsetof(struct bpf_sock, priority): 8848 BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4); 8849 8850 if (type == BPF_WRITE) 8851 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 8852 offsetof(struct sock, sk_priority)); 8853 else 8854 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 8855 offsetof(struct sock, sk_priority)); 8856 break; 8857 8858 case offsetof(struct bpf_sock, family): 8859 *insn++ = BPF_LDX_MEM( 8860 BPF_FIELD_SIZEOF(struct sock_common, skc_family), 8861 si->dst_reg, si->src_reg, 8862 bpf_target_off(struct sock_common, 8863 skc_family, 8864 sizeof_field(struct sock_common, 8865 skc_family), 8866 target_size)); 8867 break; 8868 8869 case offsetof(struct bpf_sock, type): 8870 *insn++ = BPF_LDX_MEM( 8871 BPF_FIELD_SIZEOF(struct sock, sk_type), 8872 si->dst_reg, si->src_reg, 8873 bpf_target_off(struct sock, sk_type, 8874 sizeof_field(struct sock, sk_type), 8875 target_size)); 8876 break; 8877 8878 case offsetof(struct bpf_sock, protocol): 8879 *insn++ = BPF_LDX_MEM( 8880 BPF_FIELD_SIZEOF(struct sock, sk_protocol), 8881 si->dst_reg, si->src_reg, 8882 bpf_target_off(struct sock, sk_protocol, 8883 sizeof_field(struct sock, sk_protocol), 8884 target_size)); 8885 break; 8886 8887 case offsetof(struct bpf_sock, src_ip4): 8888 *insn++ = BPF_LDX_MEM( 8889 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 8890 bpf_target_off(struct sock_common, skc_rcv_saddr, 8891 sizeof_field(struct sock_common, 8892 skc_rcv_saddr), 8893 target_size)); 8894 break; 8895 8896 case offsetof(struct bpf_sock, dst_ip4): 8897 *insn++ = BPF_LDX_MEM( 8898 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 8899 bpf_target_off(struct sock_common, skc_daddr, 8900 sizeof_field(struct sock_common, 8901 skc_daddr), 8902 target_size)); 8903 break; 8904 8905 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 8906 #if IS_ENABLED(CONFIG_IPV6) 8907 off = si->off; 8908 off -= offsetof(struct bpf_sock, src_ip6[0]); 8909 *insn++ = BPF_LDX_MEM( 8910 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 8911 bpf_target_off( 8912 struct sock_common, 8913 skc_v6_rcv_saddr.s6_addr32[0], 8914 sizeof_field(struct sock_common, 8915 skc_v6_rcv_saddr.s6_addr32[0]), 8916 target_size) + off); 8917 #else 8918 (void)off; 8919 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 8920 #endif 8921 break; 8922 8923 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): 8924 #if IS_ENABLED(CONFIG_IPV6) 8925 off = si->off; 8926 off -= offsetof(struct bpf_sock, dst_ip6[0]); 8927 *insn++ = BPF_LDX_MEM( 8928 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 8929 bpf_target_off(struct sock_common, 8930 skc_v6_daddr.s6_addr32[0], 8931 sizeof_field(struct sock_common, 8932 skc_v6_daddr.s6_addr32[0]), 8933 target_size) + off); 8934 #else 8935 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 8936 *target_size = 4; 8937 #endif 8938 break; 8939 8940 case offsetof(struct bpf_sock, src_port): 8941 *insn++ = BPF_LDX_MEM( 8942 BPF_FIELD_SIZEOF(struct sock_common, skc_num), 8943 si->dst_reg, si->src_reg, 8944 bpf_target_off(struct sock_common, skc_num, 8945 sizeof_field(struct sock_common, 8946 skc_num), 8947 target_size)); 8948 break; 8949 8950 case offsetof(struct bpf_sock, dst_port): 8951 *insn++ = BPF_LDX_MEM( 8952 BPF_FIELD_SIZEOF(struct sock_common, skc_dport), 8953 si->dst_reg, si->src_reg, 8954 bpf_target_off(struct sock_common, skc_dport, 8955 sizeof_field(struct sock_common, 8956 skc_dport), 8957 target_size)); 8958 break; 8959 8960 case offsetof(struct bpf_sock, state): 8961 *insn++ = BPF_LDX_MEM( 8962 BPF_FIELD_SIZEOF(struct sock_common, skc_state), 8963 si->dst_reg, si->src_reg, 8964 bpf_target_off(struct sock_common, skc_state, 8965 sizeof_field(struct sock_common, 8966 skc_state), 8967 target_size)); 8968 break; 8969 case offsetof(struct bpf_sock, rx_queue_mapping): 8970 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING 8971 *insn++ = BPF_LDX_MEM( 8972 BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping), 8973 si->dst_reg, si->src_reg, 8974 bpf_target_off(struct sock, sk_rx_queue_mapping, 8975 sizeof_field(struct sock, 8976 sk_rx_queue_mapping), 8977 target_size)); 8978 *insn++ = BPF_JMP_IMM(BPF_JNE, si->dst_reg, NO_QUEUE_MAPPING, 8979 1); 8980 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1); 8981 #else 8982 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1); 8983 *target_size = 2; 8984 #endif 8985 break; 8986 } 8987 8988 return insn - insn_buf; 8989 } 8990 8991 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, 8992 const struct bpf_insn *si, 8993 struct bpf_insn *insn_buf, 8994 struct bpf_prog *prog, u32 *target_size) 8995 { 8996 struct bpf_insn *insn = insn_buf; 8997 8998 switch (si->off) { 8999 case offsetof(struct __sk_buff, ifindex): 9000 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 9001 si->dst_reg, si->src_reg, 9002 offsetof(struct sk_buff, dev)); 9003 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9004 bpf_target_off(struct net_device, ifindex, 4, 9005 target_size)); 9006 break; 9007 default: 9008 return bpf_convert_ctx_access(type, si, insn_buf, prog, 9009 target_size); 9010 } 9011 9012 return insn - insn_buf; 9013 } 9014 9015 static u32 xdp_convert_ctx_access(enum bpf_access_type type, 9016 const struct bpf_insn *si, 9017 struct bpf_insn *insn_buf, 9018 struct bpf_prog *prog, u32 *target_size) 9019 { 9020 struct bpf_insn *insn = insn_buf; 9021 9022 switch (si->off) { 9023 case offsetof(struct xdp_md, data): 9024 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), 9025 si->dst_reg, si->src_reg, 9026 offsetof(struct xdp_buff, data)); 9027 break; 9028 case offsetof(struct xdp_md, data_meta): 9029 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta), 9030 si->dst_reg, si->src_reg, 9031 offsetof(struct xdp_buff, data_meta)); 9032 break; 9033 case offsetof(struct xdp_md, data_end): 9034 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), 9035 si->dst_reg, si->src_reg, 9036 offsetof(struct xdp_buff, data_end)); 9037 break; 9038 case offsetof(struct xdp_md, ingress_ifindex): 9039 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), 9040 si->dst_reg, si->src_reg, 9041 offsetof(struct xdp_buff, rxq)); 9042 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev), 9043 si->dst_reg, si->dst_reg, 9044 offsetof(struct xdp_rxq_info, dev)); 9045 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9046 offsetof(struct net_device, ifindex)); 9047 break; 9048 case offsetof(struct xdp_md, rx_queue_index): 9049 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), 9050 si->dst_reg, si->src_reg, 9051 offsetof(struct xdp_buff, rxq)); 9052 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9053 offsetof(struct xdp_rxq_info, 9054 queue_index)); 9055 break; 9056 case offsetof(struct xdp_md, egress_ifindex): 9057 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, txq), 9058 si->dst_reg, si->src_reg, 9059 offsetof(struct xdp_buff, txq)); 9060 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_txq_info, dev), 9061 si->dst_reg, si->dst_reg, 9062 offsetof(struct xdp_txq_info, dev)); 9063 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9064 offsetof(struct net_device, ifindex)); 9065 break; 9066 } 9067 9068 return insn - insn_buf; 9069 } 9070 9071 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of 9072 * context Structure, F is Field in context structure that contains a pointer 9073 * to Nested Structure of type NS that has the field NF. 9074 * 9075 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make 9076 * sure that SIZE is not greater than actual size of S.F.NF. 9077 * 9078 * If offset OFF is provided, the load happens from that offset relative to 9079 * offset of NF. 9080 */ 9081 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \ 9082 do { \ 9083 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \ 9084 si->src_reg, offsetof(S, F)); \ 9085 *insn++ = BPF_LDX_MEM( \ 9086 SIZE, si->dst_reg, si->dst_reg, \ 9087 bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 9088 target_size) \ 9089 + OFF); \ 9090 } while (0) 9091 9092 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \ 9093 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \ 9094 BPF_FIELD_SIZEOF(NS, NF), 0) 9095 9096 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to 9097 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation. 9098 * 9099 * In addition it uses Temporary Field TF (member of struct S) as the 3rd 9100 * "register" since two registers available in convert_ctx_access are not 9101 * enough: we can't override neither SRC, since it contains value to store, nor 9102 * DST since it contains pointer to context that may be used by later 9103 * instructions. But we need a temporary place to save pointer to nested 9104 * structure whose field we want to store to. 9105 */ 9106 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \ 9107 do { \ 9108 int tmp_reg = BPF_REG_9; \ 9109 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ 9110 --tmp_reg; \ 9111 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ 9112 --tmp_reg; \ 9113 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \ 9114 offsetof(S, TF)); \ 9115 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ 9116 si->dst_reg, offsetof(S, F)); \ 9117 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ 9118 bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 9119 target_size) \ 9120 + OFF); \ 9121 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ 9122 offsetof(S, TF)); \ 9123 } while (0) 9124 9125 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \ 9126 TF) \ 9127 do { \ 9128 if (type == BPF_WRITE) { \ 9129 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \ 9130 OFF, TF); \ 9131 } else { \ 9132 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \ 9133 S, NS, F, NF, SIZE, OFF); \ 9134 } \ 9135 } while (0) 9136 9137 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \ 9138 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \ 9139 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) 9140 9141 static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, 9142 const struct bpf_insn *si, 9143 struct bpf_insn *insn_buf, 9144 struct bpf_prog *prog, u32 *target_size) 9145 { 9146 int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port); 9147 struct bpf_insn *insn = insn_buf; 9148 9149 switch (si->off) { 9150 case offsetof(struct bpf_sock_addr, user_family): 9151 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9152 struct sockaddr, uaddr, sa_family); 9153 break; 9154 9155 case offsetof(struct bpf_sock_addr, user_ip4): 9156 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9157 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr, 9158 sin_addr, BPF_SIZE(si->code), 0, tmp_reg); 9159 break; 9160 9161 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 9162 off = si->off; 9163 off -= offsetof(struct bpf_sock_addr, user_ip6[0]); 9164 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9165 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, 9166 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off, 9167 tmp_reg); 9168 break; 9169 9170 case offsetof(struct bpf_sock_addr, user_port): 9171 /* To get port we need to know sa_family first and then treat 9172 * sockaddr as either sockaddr_in or sockaddr_in6. 9173 * Though we can simplify since port field has same offset and 9174 * size in both structures. 9175 * Here we check this invariant and use just one of the 9176 * structures if it's true. 9177 */ 9178 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != 9179 offsetof(struct sockaddr_in6, sin6_port)); 9180 BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) != 9181 sizeof_field(struct sockaddr_in6, sin6_port)); 9182 /* Account for sin6_port being smaller than user_port. */ 9183 port_size = min(port_size, BPF_LDST_BYTES(si)); 9184 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9185 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, 9186 sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg); 9187 break; 9188 9189 case offsetof(struct bpf_sock_addr, family): 9190 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9191 struct sock, sk, sk_family); 9192 break; 9193 9194 case offsetof(struct bpf_sock_addr, type): 9195 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9196 struct sock, sk, sk_type); 9197 break; 9198 9199 case offsetof(struct bpf_sock_addr, protocol): 9200 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9201 struct sock, sk, sk_protocol); 9202 break; 9203 9204 case offsetof(struct bpf_sock_addr, msg_src_ip4): 9205 /* Treat t_ctx as struct in_addr for msg_src_ip4. */ 9206 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9207 struct bpf_sock_addr_kern, struct in_addr, t_ctx, 9208 s_addr, BPF_SIZE(si->code), 0, tmp_reg); 9209 break; 9210 9211 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 9212 msg_src_ip6[3]): 9213 off = si->off; 9214 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]); 9215 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */ 9216 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9217 struct bpf_sock_addr_kern, struct in6_addr, t_ctx, 9218 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); 9219 break; 9220 case offsetof(struct bpf_sock_addr, sk): 9221 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk), 9222 si->dst_reg, si->src_reg, 9223 offsetof(struct bpf_sock_addr_kern, sk)); 9224 break; 9225 } 9226 9227 return insn - insn_buf; 9228 } 9229 9230 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, 9231 const struct bpf_insn *si, 9232 struct bpf_insn *insn_buf, 9233 struct bpf_prog *prog, 9234 u32 *target_size) 9235 { 9236 struct bpf_insn *insn = insn_buf; 9237 int off; 9238 9239 /* Helper macro for adding read access to tcp_sock or sock fields. */ 9240 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 9241 do { \ 9242 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \ 9243 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 9244 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 9245 if (si->dst_reg == reg || si->src_reg == reg) \ 9246 reg--; \ 9247 if (si->dst_reg == reg || si->src_reg == reg) \ 9248 reg--; \ 9249 if (si->dst_reg == si->src_reg) { \ 9250 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ 9251 offsetof(struct bpf_sock_ops_kern, \ 9252 temp)); \ 9253 fullsock_reg = reg; \ 9254 jmp += 2; \ 9255 } \ 9256 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9257 struct bpf_sock_ops_kern, \ 9258 is_fullsock), \ 9259 fullsock_reg, si->src_reg, \ 9260 offsetof(struct bpf_sock_ops_kern, \ 9261 is_fullsock)); \ 9262 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ 9263 if (si->dst_reg == si->src_reg) \ 9264 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9265 offsetof(struct bpf_sock_ops_kern, \ 9266 temp)); \ 9267 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9268 struct bpf_sock_ops_kern, sk),\ 9269 si->dst_reg, si->src_reg, \ 9270 offsetof(struct bpf_sock_ops_kern, sk));\ 9271 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \ 9272 OBJ_FIELD), \ 9273 si->dst_reg, si->dst_reg, \ 9274 offsetof(OBJ, OBJ_FIELD)); \ 9275 if (si->dst_reg == si->src_reg) { \ 9276 *insn++ = BPF_JMP_A(1); \ 9277 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9278 offsetof(struct bpf_sock_ops_kern, \ 9279 temp)); \ 9280 } \ 9281 } while (0) 9282 9283 #define SOCK_OPS_GET_SK() \ 9284 do { \ 9285 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \ 9286 if (si->dst_reg == reg || si->src_reg == reg) \ 9287 reg--; \ 9288 if (si->dst_reg == reg || si->src_reg == reg) \ 9289 reg--; \ 9290 if (si->dst_reg == si->src_reg) { \ 9291 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ 9292 offsetof(struct bpf_sock_ops_kern, \ 9293 temp)); \ 9294 fullsock_reg = reg; \ 9295 jmp += 2; \ 9296 } \ 9297 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9298 struct bpf_sock_ops_kern, \ 9299 is_fullsock), \ 9300 fullsock_reg, si->src_reg, \ 9301 offsetof(struct bpf_sock_ops_kern, \ 9302 is_fullsock)); \ 9303 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ 9304 if (si->dst_reg == si->src_reg) \ 9305 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9306 offsetof(struct bpf_sock_ops_kern, \ 9307 temp)); \ 9308 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9309 struct bpf_sock_ops_kern, sk),\ 9310 si->dst_reg, si->src_reg, \ 9311 offsetof(struct bpf_sock_ops_kern, sk));\ 9312 if (si->dst_reg == si->src_reg) { \ 9313 *insn++ = BPF_JMP_A(1); \ 9314 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9315 offsetof(struct bpf_sock_ops_kern, \ 9316 temp)); \ 9317 } \ 9318 } while (0) 9319 9320 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ 9321 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock) 9322 9323 /* Helper macro for adding write access to tcp_sock or sock fields. 9324 * The macro is called with two registers, dst_reg which contains a pointer 9325 * to ctx (context) and src_reg which contains the value that should be 9326 * stored. However, we need an additional register since we cannot overwrite 9327 * dst_reg because it may be used later in the program. 9328 * Instead we "borrow" one of the other register. We first save its value 9329 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore 9330 * it at the end of the macro. 9331 */ 9332 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 9333 do { \ 9334 int reg = BPF_REG_9; \ 9335 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 9336 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 9337 if (si->dst_reg == reg || si->src_reg == reg) \ 9338 reg--; \ 9339 if (si->dst_reg == reg || si->src_reg == reg) \ 9340 reg--; \ 9341 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \ 9342 offsetof(struct bpf_sock_ops_kern, \ 9343 temp)); \ 9344 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9345 struct bpf_sock_ops_kern, \ 9346 is_fullsock), \ 9347 reg, si->dst_reg, \ 9348 offsetof(struct bpf_sock_ops_kern, \ 9349 is_fullsock)); \ 9350 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ 9351 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9352 struct bpf_sock_ops_kern, sk),\ 9353 reg, si->dst_reg, \ 9354 offsetof(struct bpf_sock_ops_kern, sk));\ 9355 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \ 9356 reg, si->src_reg, \ 9357 offsetof(OBJ, OBJ_FIELD)); \ 9358 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \ 9359 offsetof(struct bpf_sock_ops_kern, \ 9360 temp)); \ 9361 } while (0) 9362 9363 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \ 9364 do { \ 9365 if (TYPE == BPF_WRITE) \ 9366 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ 9367 else \ 9368 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ 9369 } while (0) 9370 9371 if (insn > insn_buf) 9372 return insn - insn_buf; 9373 9374 switch (si->off) { 9375 case offsetof(struct bpf_sock_ops, op): 9376 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 9377 op), 9378 si->dst_reg, si->src_reg, 9379 offsetof(struct bpf_sock_ops_kern, op)); 9380 break; 9381 9382 case offsetof(struct bpf_sock_ops, replylong[0]) ... 9383 offsetof(struct bpf_sock_ops, replylong[3]): 9384 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) != 9385 sizeof_field(struct bpf_sock_ops_kern, reply)); 9386 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) != 9387 sizeof_field(struct bpf_sock_ops_kern, replylong)); 9388 off = si->off; 9389 off -= offsetof(struct bpf_sock_ops, replylong[0]); 9390 off += offsetof(struct bpf_sock_ops_kern, replylong[0]); 9391 if (type == BPF_WRITE) 9392 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9393 off); 9394 else 9395 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9396 off); 9397 break; 9398 9399 case offsetof(struct bpf_sock_ops, family): 9400 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 9401 9402 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9403 struct bpf_sock_ops_kern, sk), 9404 si->dst_reg, si->src_reg, 9405 offsetof(struct bpf_sock_ops_kern, sk)); 9406 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9407 offsetof(struct sock_common, skc_family)); 9408 break; 9409 9410 case offsetof(struct bpf_sock_ops, remote_ip4): 9411 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 9412 9413 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9414 struct bpf_sock_ops_kern, sk), 9415 si->dst_reg, si->src_reg, 9416 offsetof(struct bpf_sock_ops_kern, sk)); 9417 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9418 offsetof(struct sock_common, skc_daddr)); 9419 break; 9420 9421 case offsetof(struct bpf_sock_ops, local_ip4): 9422 BUILD_BUG_ON(sizeof_field(struct sock_common, 9423 skc_rcv_saddr) != 4); 9424 9425 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9426 struct bpf_sock_ops_kern, sk), 9427 si->dst_reg, si->src_reg, 9428 offsetof(struct bpf_sock_ops_kern, sk)); 9429 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9430 offsetof(struct sock_common, 9431 skc_rcv_saddr)); 9432 break; 9433 9434 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... 9435 offsetof(struct bpf_sock_ops, remote_ip6[3]): 9436 #if IS_ENABLED(CONFIG_IPV6) 9437 BUILD_BUG_ON(sizeof_field(struct sock_common, 9438 skc_v6_daddr.s6_addr32[0]) != 4); 9439 9440 off = si->off; 9441 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]); 9442 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9443 struct bpf_sock_ops_kern, sk), 9444 si->dst_reg, si->src_reg, 9445 offsetof(struct bpf_sock_ops_kern, sk)); 9446 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9447 offsetof(struct sock_common, 9448 skc_v6_daddr.s6_addr32[0]) + 9449 off); 9450 #else 9451 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9452 #endif 9453 break; 9454 9455 case offsetof(struct bpf_sock_ops, local_ip6[0]) ... 9456 offsetof(struct bpf_sock_ops, local_ip6[3]): 9457 #if IS_ENABLED(CONFIG_IPV6) 9458 BUILD_BUG_ON(sizeof_field(struct sock_common, 9459 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 9460 9461 off = si->off; 9462 off -= offsetof(struct bpf_sock_ops, local_ip6[0]); 9463 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9464 struct bpf_sock_ops_kern, sk), 9465 si->dst_reg, si->src_reg, 9466 offsetof(struct bpf_sock_ops_kern, sk)); 9467 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9468 offsetof(struct sock_common, 9469 skc_v6_rcv_saddr.s6_addr32[0]) + 9470 off); 9471 #else 9472 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9473 #endif 9474 break; 9475 9476 case offsetof(struct bpf_sock_ops, remote_port): 9477 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 9478 9479 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9480 struct bpf_sock_ops_kern, sk), 9481 si->dst_reg, si->src_reg, 9482 offsetof(struct bpf_sock_ops_kern, sk)); 9483 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9484 offsetof(struct sock_common, skc_dport)); 9485 #ifndef __BIG_ENDIAN_BITFIELD 9486 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 9487 #endif 9488 break; 9489 9490 case offsetof(struct bpf_sock_ops, local_port): 9491 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 9492 9493 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9494 struct bpf_sock_ops_kern, sk), 9495 si->dst_reg, si->src_reg, 9496 offsetof(struct bpf_sock_ops_kern, sk)); 9497 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9498 offsetof(struct sock_common, skc_num)); 9499 break; 9500 9501 case offsetof(struct bpf_sock_ops, is_fullsock): 9502 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9503 struct bpf_sock_ops_kern, 9504 is_fullsock), 9505 si->dst_reg, si->src_reg, 9506 offsetof(struct bpf_sock_ops_kern, 9507 is_fullsock)); 9508 break; 9509 9510 case offsetof(struct bpf_sock_ops, state): 9511 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1); 9512 9513 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9514 struct bpf_sock_ops_kern, sk), 9515 si->dst_reg, si->src_reg, 9516 offsetof(struct bpf_sock_ops_kern, sk)); 9517 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg, 9518 offsetof(struct sock_common, skc_state)); 9519 break; 9520 9521 case offsetof(struct bpf_sock_ops, rtt_min): 9522 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 9523 sizeof(struct minmax)); 9524 BUILD_BUG_ON(sizeof(struct minmax) < 9525 sizeof(struct minmax_sample)); 9526 9527 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9528 struct bpf_sock_ops_kern, sk), 9529 si->dst_reg, si->src_reg, 9530 offsetof(struct bpf_sock_ops_kern, sk)); 9531 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9532 offsetof(struct tcp_sock, rtt_min) + 9533 sizeof_field(struct minmax_sample, t)); 9534 break; 9535 9536 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags): 9537 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags, 9538 struct tcp_sock); 9539 break; 9540 9541 case offsetof(struct bpf_sock_ops, sk_txhash): 9542 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, 9543 struct sock, type); 9544 break; 9545 case offsetof(struct bpf_sock_ops, snd_cwnd): 9546 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd); 9547 break; 9548 case offsetof(struct bpf_sock_ops, srtt_us): 9549 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us); 9550 break; 9551 case offsetof(struct bpf_sock_ops, snd_ssthresh): 9552 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh); 9553 break; 9554 case offsetof(struct bpf_sock_ops, rcv_nxt): 9555 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt); 9556 break; 9557 case offsetof(struct bpf_sock_ops, snd_nxt): 9558 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt); 9559 break; 9560 case offsetof(struct bpf_sock_ops, snd_una): 9561 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una); 9562 break; 9563 case offsetof(struct bpf_sock_ops, mss_cache): 9564 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache); 9565 break; 9566 case offsetof(struct bpf_sock_ops, ecn_flags): 9567 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags); 9568 break; 9569 case offsetof(struct bpf_sock_ops, rate_delivered): 9570 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered); 9571 break; 9572 case offsetof(struct bpf_sock_ops, rate_interval_us): 9573 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us); 9574 break; 9575 case offsetof(struct bpf_sock_ops, packets_out): 9576 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out); 9577 break; 9578 case offsetof(struct bpf_sock_ops, retrans_out): 9579 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out); 9580 break; 9581 case offsetof(struct bpf_sock_ops, total_retrans): 9582 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans); 9583 break; 9584 case offsetof(struct bpf_sock_ops, segs_in): 9585 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in); 9586 break; 9587 case offsetof(struct bpf_sock_ops, data_segs_in): 9588 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in); 9589 break; 9590 case offsetof(struct bpf_sock_ops, segs_out): 9591 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out); 9592 break; 9593 case offsetof(struct bpf_sock_ops, data_segs_out): 9594 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out); 9595 break; 9596 case offsetof(struct bpf_sock_ops, lost_out): 9597 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out); 9598 break; 9599 case offsetof(struct bpf_sock_ops, sacked_out): 9600 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out); 9601 break; 9602 case offsetof(struct bpf_sock_ops, bytes_received): 9603 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received); 9604 break; 9605 case offsetof(struct bpf_sock_ops, bytes_acked): 9606 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); 9607 break; 9608 case offsetof(struct bpf_sock_ops, sk): 9609 SOCK_OPS_GET_SK(); 9610 break; 9611 case offsetof(struct bpf_sock_ops, skb_data_end): 9612 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 9613 skb_data_end), 9614 si->dst_reg, si->src_reg, 9615 offsetof(struct bpf_sock_ops_kern, 9616 skb_data_end)); 9617 break; 9618 case offsetof(struct bpf_sock_ops, skb_data): 9619 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 9620 skb), 9621 si->dst_reg, si->src_reg, 9622 offsetof(struct bpf_sock_ops_kern, 9623 skb)); 9624 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 9625 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 9626 si->dst_reg, si->dst_reg, 9627 offsetof(struct sk_buff, data)); 9628 break; 9629 case offsetof(struct bpf_sock_ops, skb_len): 9630 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 9631 skb), 9632 si->dst_reg, si->src_reg, 9633 offsetof(struct bpf_sock_ops_kern, 9634 skb)); 9635 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 9636 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len), 9637 si->dst_reg, si->dst_reg, 9638 offsetof(struct sk_buff, len)); 9639 break; 9640 case offsetof(struct bpf_sock_ops, skb_tcp_flags): 9641 off = offsetof(struct sk_buff, cb); 9642 off += offsetof(struct tcp_skb_cb, tcp_flags); 9643 *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags); 9644 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 9645 skb), 9646 si->dst_reg, si->src_reg, 9647 offsetof(struct bpf_sock_ops_kern, 9648 skb)); 9649 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 9650 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb, 9651 tcp_flags), 9652 si->dst_reg, si->dst_reg, off); 9653 break; 9654 } 9655 return insn - insn_buf; 9656 } 9657 9658 static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, 9659 const struct bpf_insn *si, 9660 struct bpf_insn *insn_buf, 9661 struct bpf_prog *prog, u32 *target_size) 9662 { 9663 struct bpf_insn *insn = insn_buf; 9664 int off; 9665 9666 switch (si->off) { 9667 case offsetof(struct __sk_buff, data_end): 9668 off = si->off; 9669 off -= offsetof(struct __sk_buff, data_end); 9670 off += offsetof(struct sk_buff, cb); 9671 off += offsetof(struct tcp_skb_cb, bpf.data_end); 9672 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 9673 si->src_reg, off); 9674 break; 9675 default: 9676 return bpf_convert_ctx_access(type, si, insn_buf, prog, 9677 target_size); 9678 } 9679 9680 return insn - insn_buf; 9681 } 9682 9683 static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, 9684 const struct bpf_insn *si, 9685 struct bpf_insn *insn_buf, 9686 struct bpf_prog *prog, u32 *target_size) 9687 { 9688 struct bpf_insn *insn = insn_buf; 9689 #if IS_ENABLED(CONFIG_IPV6) 9690 int off; 9691 #endif 9692 9693 /* convert ctx uses the fact sg element is first in struct */ 9694 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0); 9695 9696 switch (si->off) { 9697 case offsetof(struct sk_msg_md, data): 9698 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), 9699 si->dst_reg, si->src_reg, 9700 offsetof(struct sk_msg, data)); 9701 break; 9702 case offsetof(struct sk_msg_md, data_end): 9703 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end), 9704 si->dst_reg, si->src_reg, 9705 offsetof(struct sk_msg, data_end)); 9706 break; 9707 case offsetof(struct sk_msg_md, family): 9708 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 9709 9710 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9711 struct sk_msg, sk), 9712 si->dst_reg, si->src_reg, 9713 offsetof(struct sk_msg, sk)); 9714 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9715 offsetof(struct sock_common, skc_family)); 9716 break; 9717 9718 case offsetof(struct sk_msg_md, remote_ip4): 9719 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 9720 9721 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9722 struct sk_msg, sk), 9723 si->dst_reg, si->src_reg, 9724 offsetof(struct sk_msg, sk)); 9725 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9726 offsetof(struct sock_common, skc_daddr)); 9727 break; 9728 9729 case offsetof(struct sk_msg_md, local_ip4): 9730 BUILD_BUG_ON(sizeof_field(struct sock_common, 9731 skc_rcv_saddr) != 4); 9732 9733 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9734 struct sk_msg, sk), 9735 si->dst_reg, si->src_reg, 9736 offsetof(struct sk_msg, sk)); 9737 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9738 offsetof(struct sock_common, 9739 skc_rcv_saddr)); 9740 break; 9741 9742 case offsetof(struct sk_msg_md, remote_ip6[0]) ... 9743 offsetof(struct sk_msg_md, remote_ip6[3]): 9744 #if IS_ENABLED(CONFIG_IPV6) 9745 BUILD_BUG_ON(sizeof_field(struct sock_common, 9746 skc_v6_daddr.s6_addr32[0]) != 4); 9747 9748 off = si->off; 9749 off -= offsetof(struct sk_msg_md, remote_ip6[0]); 9750 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9751 struct sk_msg, sk), 9752 si->dst_reg, si->src_reg, 9753 offsetof(struct sk_msg, sk)); 9754 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9755 offsetof(struct sock_common, 9756 skc_v6_daddr.s6_addr32[0]) + 9757 off); 9758 #else 9759 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9760 #endif 9761 break; 9762 9763 case offsetof(struct sk_msg_md, local_ip6[0]) ... 9764 offsetof(struct sk_msg_md, local_ip6[3]): 9765 #if IS_ENABLED(CONFIG_IPV6) 9766 BUILD_BUG_ON(sizeof_field(struct sock_common, 9767 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 9768 9769 off = si->off; 9770 off -= offsetof(struct sk_msg_md, local_ip6[0]); 9771 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9772 struct sk_msg, sk), 9773 si->dst_reg, si->src_reg, 9774 offsetof(struct sk_msg, sk)); 9775 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9776 offsetof(struct sock_common, 9777 skc_v6_rcv_saddr.s6_addr32[0]) + 9778 off); 9779 #else 9780 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9781 #endif 9782 break; 9783 9784 case offsetof(struct sk_msg_md, remote_port): 9785 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 9786 9787 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9788 struct sk_msg, sk), 9789 si->dst_reg, si->src_reg, 9790 offsetof(struct sk_msg, sk)); 9791 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9792 offsetof(struct sock_common, skc_dport)); 9793 #ifndef __BIG_ENDIAN_BITFIELD 9794 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 9795 #endif 9796 break; 9797 9798 case offsetof(struct sk_msg_md, local_port): 9799 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 9800 9801 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9802 struct sk_msg, sk), 9803 si->dst_reg, si->src_reg, 9804 offsetof(struct sk_msg, sk)); 9805 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9806 offsetof(struct sock_common, skc_num)); 9807 break; 9808 9809 case offsetof(struct sk_msg_md, size): 9810 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size), 9811 si->dst_reg, si->src_reg, 9812 offsetof(struct sk_msg_sg, size)); 9813 break; 9814 9815 case offsetof(struct sk_msg_md, sk): 9816 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk), 9817 si->dst_reg, si->src_reg, 9818 offsetof(struct sk_msg, sk)); 9819 break; 9820 } 9821 9822 return insn - insn_buf; 9823 } 9824 9825 const struct bpf_verifier_ops sk_filter_verifier_ops = { 9826 .get_func_proto = sk_filter_func_proto, 9827 .is_valid_access = sk_filter_is_valid_access, 9828 .convert_ctx_access = bpf_convert_ctx_access, 9829 .gen_ld_abs = bpf_gen_ld_abs, 9830 }; 9831 9832 const struct bpf_prog_ops sk_filter_prog_ops = { 9833 .test_run = bpf_prog_test_run_skb, 9834 }; 9835 9836 const struct bpf_verifier_ops tc_cls_act_verifier_ops = { 9837 .get_func_proto = tc_cls_act_func_proto, 9838 .is_valid_access = tc_cls_act_is_valid_access, 9839 .convert_ctx_access = tc_cls_act_convert_ctx_access, 9840 .gen_prologue = tc_cls_act_prologue, 9841 .gen_ld_abs = bpf_gen_ld_abs, 9842 }; 9843 9844 const struct bpf_prog_ops tc_cls_act_prog_ops = { 9845 .test_run = bpf_prog_test_run_skb, 9846 }; 9847 9848 const struct bpf_verifier_ops xdp_verifier_ops = { 9849 .get_func_proto = xdp_func_proto, 9850 .is_valid_access = xdp_is_valid_access, 9851 .convert_ctx_access = xdp_convert_ctx_access, 9852 .gen_prologue = bpf_noop_prologue, 9853 }; 9854 9855 const struct bpf_prog_ops xdp_prog_ops = { 9856 .test_run = bpf_prog_test_run_xdp, 9857 }; 9858 9859 const struct bpf_verifier_ops cg_skb_verifier_ops = { 9860 .get_func_proto = cg_skb_func_proto, 9861 .is_valid_access = cg_skb_is_valid_access, 9862 .convert_ctx_access = bpf_convert_ctx_access, 9863 }; 9864 9865 const struct bpf_prog_ops cg_skb_prog_ops = { 9866 .test_run = bpf_prog_test_run_skb, 9867 }; 9868 9869 const struct bpf_verifier_ops lwt_in_verifier_ops = { 9870 .get_func_proto = lwt_in_func_proto, 9871 .is_valid_access = lwt_is_valid_access, 9872 .convert_ctx_access = bpf_convert_ctx_access, 9873 }; 9874 9875 const struct bpf_prog_ops lwt_in_prog_ops = { 9876 .test_run = bpf_prog_test_run_skb, 9877 }; 9878 9879 const struct bpf_verifier_ops lwt_out_verifier_ops = { 9880 .get_func_proto = lwt_out_func_proto, 9881 .is_valid_access = lwt_is_valid_access, 9882 .convert_ctx_access = bpf_convert_ctx_access, 9883 }; 9884 9885 const struct bpf_prog_ops lwt_out_prog_ops = { 9886 .test_run = bpf_prog_test_run_skb, 9887 }; 9888 9889 const struct bpf_verifier_ops lwt_xmit_verifier_ops = { 9890 .get_func_proto = lwt_xmit_func_proto, 9891 .is_valid_access = lwt_is_valid_access, 9892 .convert_ctx_access = bpf_convert_ctx_access, 9893 .gen_prologue = tc_cls_act_prologue, 9894 }; 9895 9896 const struct bpf_prog_ops lwt_xmit_prog_ops = { 9897 .test_run = bpf_prog_test_run_skb, 9898 }; 9899 9900 const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { 9901 .get_func_proto = lwt_seg6local_func_proto, 9902 .is_valid_access = lwt_is_valid_access, 9903 .convert_ctx_access = bpf_convert_ctx_access, 9904 }; 9905 9906 const struct bpf_prog_ops lwt_seg6local_prog_ops = { 9907 .test_run = bpf_prog_test_run_skb, 9908 }; 9909 9910 const struct bpf_verifier_ops cg_sock_verifier_ops = { 9911 .get_func_proto = sock_filter_func_proto, 9912 .is_valid_access = sock_filter_is_valid_access, 9913 .convert_ctx_access = bpf_sock_convert_ctx_access, 9914 }; 9915 9916 const struct bpf_prog_ops cg_sock_prog_ops = { 9917 }; 9918 9919 const struct bpf_verifier_ops cg_sock_addr_verifier_ops = { 9920 .get_func_proto = sock_addr_func_proto, 9921 .is_valid_access = sock_addr_is_valid_access, 9922 .convert_ctx_access = sock_addr_convert_ctx_access, 9923 }; 9924 9925 const struct bpf_prog_ops cg_sock_addr_prog_ops = { 9926 }; 9927 9928 const struct bpf_verifier_ops sock_ops_verifier_ops = { 9929 .get_func_proto = sock_ops_func_proto, 9930 .is_valid_access = sock_ops_is_valid_access, 9931 .convert_ctx_access = sock_ops_convert_ctx_access, 9932 }; 9933 9934 const struct bpf_prog_ops sock_ops_prog_ops = { 9935 }; 9936 9937 const struct bpf_verifier_ops sk_skb_verifier_ops = { 9938 .get_func_proto = sk_skb_func_proto, 9939 .is_valid_access = sk_skb_is_valid_access, 9940 .convert_ctx_access = sk_skb_convert_ctx_access, 9941 .gen_prologue = sk_skb_prologue, 9942 }; 9943 9944 const struct bpf_prog_ops sk_skb_prog_ops = { 9945 }; 9946 9947 const struct bpf_verifier_ops sk_msg_verifier_ops = { 9948 .get_func_proto = sk_msg_func_proto, 9949 .is_valid_access = sk_msg_is_valid_access, 9950 .convert_ctx_access = sk_msg_convert_ctx_access, 9951 .gen_prologue = bpf_noop_prologue, 9952 }; 9953 9954 const struct bpf_prog_ops sk_msg_prog_ops = { 9955 }; 9956 9957 const struct bpf_verifier_ops flow_dissector_verifier_ops = { 9958 .get_func_proto = flow_dissector_func_proto, 9959 .is_valid_access = flow_dissector_is_valid_access, 9960 .convert_ctx_access = flow_dissector_convert_ctx_access, 9961 }; 9962 9963 const struct bpf_prog_ops flow_dissector_prog_ops = { 9964 .test_run = bpf_prog_test_run_flow_dissector, 9965 }; 9966 9967 int sk_detach_filter(struct sock *sk) 9968 { 9969 int ret = -ENOENT; 9970 struct sk_filter *filter; 9971 9972 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 9973 return -EPERM; 9974 9975 filter = rcu_dereference_protected(sk->sk_filter, 9976 lockdep_sock_is_held(sk)); 9977 if (filter) { 9978 RCU_INIT_POINTER(sk->sk_filter, NULL); 9979 sk_filter_uncharge(sk, filter); 9980 ret = 0; 9981 } 9982 9983 return ret; 9984 } 9985 EXPORT_SYMBOL_GPL(sk_detach_filter); 9986 9987 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, 9988 unsigned int len) 9989 { 9990 struct sock_fprog_kern *fprog; 9991 struct sk_filter *filter; 9992 int ret = 0; 9993 9994 lock_sock(sk); 9995 filter = rcu_dereference_protected(sk->sk_filter, 9996 lockdep_sock_is_held(sk)); 9997 if (!filter) 9998 goto out; 9999 10000 /* We're copying the filter that has been originally attached, 10001 * so no conversion/decode needed anymore. eBPF programs that 10002 * have no original program cannot be dumped through this. 10003 */ 10004 ret = -EACCES; 10005 fprog = filter->prog->orig_prog; 10006 if (!fprog) 10007 goto out; 10008 10009 ret = fprog->len; 10010 if (!len) 10011 /* User space only enquires number of filter blocks. */ 10012 goto out; 10013 10014 ret = -EINVAL; 10015 if (len < fprog->len) 10016 goto out; 10017 10018 ret = -EFAULT; 10019 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) 10020 goto out; 10021 10022 /* Instead of bytes, the API requests to return the number 10023 * of filter blocks. 10024 */ 10025 ret = fprog->len; 10026 out: 10027 release_sock(sk); 10028 return ret; 10029 } 10030 10031 #ifdef CONFIG_INET 10032 static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, 10033 struct sock_reuseport *reuse, 10034 struct sock *sk, struct sk_buff *skb, 10035 u32 hash) 10036 { 10037 reuse_kern->skb = skb; 10038 reuse_kern->sk = sk; 10039 reuse_kern->selected_sk = NULL; 10040 reuse_kern->data_end = skb->data + skb_headlen(skb); 10041 reuse_kern->hash = hash; 10042 reuse_kern->reuseport_id = reuse->reuseport_id; 10043 reuse_kern->bind_inany = reuse->bind_inany; 10044 } 10045 10046 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, 10047 struct bpf_prog *prog, struct sk_buff *skb, 10048 u32 hash) 10049 { 10050 struct sk_reuseport_kern reuse_kern; 10051 enum sk_action action; 10052 10053 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash); 10054 action = BPF_PROG_RUN(prog, &reuse_kern); 10055 10056 if (action == SK_PASS) 10057 return reuse_kern.selected_sk; 10058 else 10059 return ERR_PTR(-ECONNREFUSED); 10060 } 10061 10062 BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, 10063 struct bpf_map *, map, void *, key, u32, flags) 10064 { 10065 bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; 10066 struct sock_reuseport *reuse; 10067 struct sock *selected_sk; 10068 10069 selected_sk = map->ops->map_lookup_elem(map, key); 10070 if (!selected_sk) 10071 return -ENOENT; 10072 10073 reuse = rcu_dereference(selected_sk->sk_reuseport_cb); 10074 if (!reuse) { 10075 /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ 10076 if (sk_is_refcounted(selected_sk)) 10077 sock_put(selected_sk); 10078 10079 /* reuseport_array has only sk with non NULL sk_reuseport_cb. 10080 * The only (!reuse) case here is - the sk has already been 10081 * unhashed (e.g. by close()), so treat it as -ENOENT. 10082 * 10083 * Other maps (e.g. sock_map) do not provide this guarantee and 10084 * the sk may never be in the reuseport group to begin with. 10085 */ 10086 return is_sockarray ? -ENOENT : -EINVAL; 10087 } 10088 10089 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { 10090 struct sock *sk = reuse_kern->sk; 10091 10092 if (sk->sk_protocol != selected_sk->sk_protocol) 10093 return -EPROTOTYPE; 10094 else if (sk->sk_family != selected_sk->sk_family) 10095 return -EAFNOSUPPORT; 10096 10097 /* Catch all. Likely bound to a different sockaddr. */ 10098 return -EBADFD; 10099 } 10100 10101 reuse_kern->selected_sk = selected_sk; 10102 10103 return 0; 10104 } 10105 10106 static const struct bpf_func_proto sk_select_reuseport_proto = { 10107 .func = sk_select_reuseport, 10108 .gpl_only = false, 10109 .ret_type = RET_INTEGER, 10110 .arg1_type = ARG_PTR_TO_CTX, 10111 .arg2_type = ARG_CONST_MAP_PTR, 10112 .arg3_type = ARG_PTR_TO_MAP_KEY, 10113 .arg4_type = ARG_ANYTHING, 10114 }; 10115 10116 BPF_CALL_4(sk_reuseport_load_bytes, 10117 const struct sk_reuseport_kern *, reuse_kern, u32, offset, 10118 void *, to, u32, len) 10119 { 10120 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len); 10121 } 10122 10123 static const struct bpf_func_proto sk_reuseport_load_bytes_proto = { 10124 .func = sk_reuseport_load_bytes, 10125 .gpl_only = false, 10126 .ret_type = RET_INTEGER, 10127 .arg1_type = ARG_PTR_TO_CTX, 10128 .arg2_type = ARG_ANYTHING, 10129 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 10130 .arg4_type = ARG_CONST_SIZE, 10131 }; 10132 10133 BPF_CALL_5(sk_reuseport_load_bytes_relative, 10134 const struct sk_reuseport_kern *, reuse_kern, u32, offset, 10135 void *, to, u32, len, u32, start_header) 10136 { 10137 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to, 10138 len, start_header); 10139 } 10140 10141 static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = { 10142 .func = sk_reuseport_load_bytes_relative, 10143 .gpl_only = false, 10144 .ret_type = RET_INTEGER, 10145 .arg1_type = ARG_PTR_TO_CTX, 10146 .arg2_type = ARG_ANYTHING, 10147 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 10148 .arg4_type = ARG_CONST_SIZE, 10149 .arg5_type = ARG_ANYTHING, 10150 }; 10151 10152 static const struct bpf_func_proto * 10153 sk_reuseport_func_proto(enum bpf_func_id func_id, 10154 const struct bpf_prog *prog) 10155 { 10156 switch (func_id) { 10157 case BPF_FUNC_sk_select_reuseport: 10158 return &sk_select_reuseport_proto; 10159 case BPF_FUNC_skb_load_bytes: 10160 return &sk_reuseport_load_bytes_proto; 10161 case BPF_FUNC_skb_load_bytes_relative: 10162 return &sk_reuseport_load_bytes_relative_proto; 10163 default: 10164 return bpf_base_func_proto(func_id); 10165 } 10166 } 10167 10168 static bool 10169 sk_reuseport_is_valid_access(int off, int size, 10170 enum bpf_access_type type, 10171 const struct bpf_prog *prog, 10172 struct bpf_insn_access_aux *info) 10173 { 10174 const u32 size_default = sizeof(__u32); 10175 10176 if (off < 0 || off >= sizeof(struct sk_reuseport_md) || 10177 off % size || type != BPF_READ) 10178 return false; 10179 10180 switch (off) { 10181 case offsetof(struct sk_reuseport_md, data): 10182 info->reg_type = PTR_TO_PACKET; 10183 return size == sizeof(__u64); 10184 10185 case offsetof(struct sk_reuseport_md, data_end): 10186 info->reg_type = PTR_TO_PACKET_END; 10187 return size == sizeof(__u64); 10188 10189 case offsetof(struct sk_reuseport_md, hash): 10190 return size == size_default; 10191 10192 /* Fields that allow narrowing */ 10193 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): 10194 if (size < sizeof_field(struct sk_buff, protocol)) 10195 return false; 10196 fallthrough; 10197 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): 10198 case bpf_ctx_range(struct sk_reuseport_md, bind_inany): 10199 case bpf_ctx_range(struct sk_reuseport_md, len): 10200 bpf_ctx_record_field_size(info, size_default); 10201 return bpf_ctx_narrow_access_ok(off, size, size_default); 10202 10203 default: 10204 return false; 10205 } 10206 } 10207 10208 #define SK_REUSEPORT_LOAD_FIELD(F) ({ \ 10209 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ 10210 si->dst_reg, si->src_reg, \ 10211 bpf_target_off(struct sk_reuseport_kern, F, \ 10212 sizeof_field(struct sk_reuseport_kern, F), \ 10213 target_size)); \ 10214 }) 10215 10216 #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \ 10217 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ 10218 struct sk_buff, \ 10219 skb, \ 10220 SKB_FIELD) 10221 10222 #define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) \ 10223 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ 10224 struct sock, \ 10225 sk, \ 10226 SK_FIELD) 10227 10228 static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, 10229 const struct bpf_insn *si, 10230 struct bpf_insn *insn_buf, 10231 struct bpf_prog *prog, 10232 u32 *target_size) 10233 { 10234 struct bpf_insn *insn = insn_buf; 10235 10236 switch (si->off) { 10237 case offsetof(struct sk_reuseport_md, data): 10238 SK_REUSEPORT_LOAD_SKB_FIELD(data); 10239 break; 10240 10241 case offsetof(struct sk_reuseport_md, len): 10242 SK_REUSEPORT_LOAD_SKB_FIELD(len); 10243 break; 10244 10245 case offsetof(struct sk_reuseport_md, eth_protocol): 10246 SK_REUSEPORT_LOAD_SKB_FIELD(protocol); 10247 break; 10248 10249 case offsetof(struct sk_reuseport_md, ip_protocol): 10250 SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol); 10251 break; 10252 10253 case offsetof(struct sk_reuseport_md, data_end): 10254 SK_REUSEPORT_LOAD_FIELD(data_end); 10255 break; 10256 10257 case offsetof(struct sk_reuseport_md, hash): 10258 SK_REUSEPORT_LOAD_FIELD(hash); 10259 break; 10260 10261 case offsetof(struct sk_reuseport_md, bind_inany): 10262 SK_REUSEPORT_LOAD_FIELD(bind_inany); 10263 break; 10264 } 10265 10266 return insn - insn_buf; 10267 } 10268 10269 const struct bpf_verifier_ops sk_reuseport_verifier_ops = { 10270 .get_func_proto = sk_reuseport_func_proto, 10271 .is_valid_access = sk_reuseport_is_valid_access, 10272 .convert_ctx_access = sk_reuseport_convert_ctx_access, 10273 }; 10274 10275 const struct bpf_prog_ops sk_reuseport_prog_ops = { 10276 }; 10277 10278 DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled); 10279 EXPORT_SYMBOL(bpf_sk_lookup_enabled); 10280 10281 BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, 10282 struct sock *, sk, u64, flags) 10283 { 10284 if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE | 10285 BPF_SK_LOOKUP_F_NO_REUSEPORT))) 10286 return -EINVAL; 10287 if (unlikely(sk && sk_is_refcounted(sk))) 10288 return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */ 10289 if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED)) 10290 return -ESOCKTNOSUPPORT; /* reject connected sockets */ 10291 10292 /* Check if socket is suitable for packet L3/L4 protocol */ 10293 if (sk && sk->sk_protocol != ctx->protocol) 10294 return -EPROTOTYPE; 10295 if (sk && sk->sk_family != ctx->family && 10296 (sk->sk_family == AF_INET || ipv6_only_sock(sk))) 10297 return -EAFNOSUPPORT; 10298 10299 if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE)) 10300 return -EEXIST; 10301 10302 /* Select socket as lookup result */ 10303 ctx->selected_sk = sk; 10304 ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT; 10305 return 0; 10306 } 10307 10308 static const struct bpf_func_proto bpf_sk_lookup_assign_proto = { 10309 .func = bpf_sk_lookup_assign, 10310 .gpl_only = false, 10311 .ret_type = RET_INTEGER, 10312 .arg1_type = ARG_PTR_TO_CTX, 10313 .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL, 10314 .arg3_type = ARG_ANYTHING, 10315 }; 10316 10317 static const struct bpf_func_proto * 10318 sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 10319 { 10320 switch (func_id) { 10321 case BPF_FUNC_perf_event_output: 10322 return &bpf_event_output_data_proto; 10323 case BPF_FUNC_sk_assign: 10324 return &bpf_sk_lookup_assign_proto; 10325 case BPF_FUNC_sk_release: 10326 return &bpf_sk_release_proto; 10327 default: 10328 return bpf_sk_base_func_proto(func_id); 10329 } 10330 } 10331 10332 static bool sk_lookup_is_valid_access(int off, int size, 10333 enum bpf_access_type type, 10334 const struct bpf_prog *prog, 10335 struct bpf_insn_access_aux *info) 10336 { 10337 if (off < 0 || off >= sizeof(struct bpf_sk_lookup)) 10338 return false; 10339 if (off % size != 0) 10340 return false; 10341 if (type != BPF_READ) 10342 return false; 10343 10344 switch (off) { 10345 case offsetof(struct bpf_sk_lookup, sk): 10346 info->reg_type = PTR_TO_SOCKET_OR_NULL; 10347 return size == sizeof(__u64); 10348 10349 case bpf_ctx_range(struct bpf_sk_lookup, family): 10350 case bpf_ctx_range(struct bpf_sk_lookup, protocol): 10351 case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4): 10352 case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): 10353 case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): 10354 case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): 10355 case bpf_ctx_range(struct bpf_sk_lookup, remote_port): 10356 case bpf_ctx_range(struct bpf_sk_lookup, local_port): 10357 bpf_ctx_record_field_size(info, sizeof(__u32)); 10358 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); 10359 10360 default: 10361 return false; 10362 } 10363 } 10364 10365 static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, 10366 const struct bpf_insn *si, 10367 struct bpf_insn *insn_buf, 10368 struct bpf_prog *prog, 10369 u32 *target_size) 10370 { 10371 struct bpf_insn *insn = insn_buf; 10372 10373 switch (si->off) { 10374 case offsetof(struct bpf_sk_lookup, sk): 10375 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 10376 offsetof(struct bpf_sk_lookup_kern, selected_sk)); 10377 break; 10378 10379 case offsetof(struct bpf_sk_lookup, family): 10380 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 10381 bpf_target_off(struct bpf_sk_lookup_kern, 10382 family, 2, target_size)); 10383 break; 10384 10385 case offsetof(struct bpf_sk_lookup, protocol): 10386 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 10387 bpf_target_off(struct bpf_sk_lookup_kern, 10388 protocol, 2, target_size)); 10389 break; 10390 10391 case offsetof(struct bpf_sk_lookup, remote_ip4): 10392 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 10393 bpf_target_off(struct bpf_sk_lookup_kern, 10394 v4.saddr, 4, target_size)); 10395 break; 10396 10397 case offsetof(struct bpf_sk_lookup, local_ip4): 10398 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 10399 bpf_target_off(struct bpf_sk_lookup_kern, 10400 v4.daddr, 4, target_size)); 10401 break; 10402 10403 case bpf_ctx_range_till(struct bpf_sk_lookup, 10404 remote_ip6[0], remote_ip6[3]): { 10405 #if IS_ENABLED(CONFIG_IPV6) 10406 int off = si->off; 10407 10408 off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]); 10409 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); 10410 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 10411 offsetof(struct bpf_sk_lookup_kern, v6.saddr)); 10412 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10413 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); 10414 #else 10415 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10416 #endif 10417 break; 10418 } 10419 case bpf_ctx_range_till(struct bpf_sk_lookup, 10420 local_ip6[0], local_ip6[3]): { 10421 #if IS_ENABLED(CONFIG_IPV6) 10422 int off = si->off; 10423 10424 off -= offsetof(struct bpf_sk_lookup, local_ip6[0]); 10425 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); 10426 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 10427 offsetof(struct bpf_sk_lookup_kern, v6.daddr)); 10428 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10429 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); 10430 #else 10431 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10432 #endif 10433 break; 10434 } 10435 case offsetof(struct bpf_sk_lookup, remote_port): 10436 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 10437 bpf_target_off(struct bpf_sk_lookup_kern, 10438 sport, 2, target_size)); 10439 break; 10440 10441 case offsetof(struct bpf_sk_lookup, local_port): 10442 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 10443 bpf_target_off(struct bpf_sk_lookup_kern, 10444 dport, 2, target_size)); 10445 break; 10446 } 10447 10448 return insn - insn_buf; 10449 } 10450 10451 const struct bpf_prog_ops sk_lookup_prog_ops = { 10452 }; 10453 10454 const struct bpf_verifier_ops sk_lookup_verifier_ops = { 10455 .get_func_proto = sk_lookup_func_proto, 10456 .is_valid_access = sk_lookup_is_valid_access, 10457 .convert_ctx_access = sk_lookup_convert_ctx_access, 10458 }; 10459 10460 #endif /* CONFIG_INET */ 10461 10462 DEFINE_BPF_DISPATCHER(xdp) 10463 10464 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) 10465 { 10466 bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); 10467 } 10468 10469 #ifdef CONFIG_DEBUG_INFO_BTF 10470 BTF_ID_LIST_GLOBAL(btf_sock_ids) 10471 #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) 10472 BTF_SOCK_TYPE_xxx 10473 #undef BTF_SOCK_TYPE 10474 #else 10475 u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; 10476 #endif 10477 10478 BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) 10479 { 10480 /* tcp6_sock type is not generated in dwarf and hence btf, 10481 * trigger an explicit type generation here. 10482 */ 10483 BTF_TYPE_EMIT(struct tcp6_sock); 10484 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && 10485 sk->sk_family == AF_INET6) 10486 return (unsigned long)sk; 10487 10488 return (unsigned long)NULL; 10489 } 10490 10491 const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { 10492 .func = bpf_skc_to_tcp6_sock, 10493 .gpl_only = false, 10494 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 10495 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 10496 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], 10497 }; 10498 10499 BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) 10500 { 10501 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) 10502 return (unsigned long)sk; 10503 10504 return (unsigned long)NULL; 10505 } 10506 10507 const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { 10508 .func = bpf_skc_to_tcp_sock, 10509 .gpl_only = false, 10510 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 10511 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 10512 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 10513 }; 10514 10515 BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) 10516 { 10517 /* BTF types for tcp_timewait_sock and inet_timewait_sock are not 10518 * generated if CONFIG_INET=n. Trigger an explicit generation here. 10519 */ 10520 BTF_TYPE_EMIT(struct inet_timewait_sock); 10521 BTF_TYPE_EMIT(struct tcp_timewait_sock); 10522 10523 #ifdef CONFIG_INET 10524 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) 10525 return (unsigned long)sk; 10526 #endif 10527 10528 #if IS_BUILTIN(CONFIG_IPV6) 10529 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) 10530 return (unsigned long)sk; 10531 #endif 10532 10533 return (unsigned long)NULL; 10534 } 10535 10536 const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { 10537 .func = bpf_skc_to_tcp_timewait_sock, 10538 .gpl_only = false, 10539 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 10540 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 10541 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], 10542 }; 10543 10544 BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) 10545 { 10546 #ifdef CONFIG_INET 10547 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) 10548 return (unsigned long)sk; 10549 #endif 10550 10551 #if IS_BUILTIN(CONFIG_IPV6) 10552 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) 10553 return (unsigned long)sk; 10554 #endif 10555 10556 return (unsigned long)NULL; 10557 } 10558 10559 const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { 10560 .func = bpf_skc_to_tcp_request_sock, 10561 .gpl_only = false, 10562 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 10563 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 10564 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], 10565 }; 10566 10567 BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) 10568 { 10569 /* udp6_sock type is not generated in dwarf and hence btf, 10570 * trigger an explicit type generation here. 10571 */ 10572 BTF_TYPE_EMIT(struct udp6_sock); 10573 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && 10574 sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) 10575 return (unsigned long)sk; 10576 10577 return (unsigned long)NULL; 10578 } 10579 10580 const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { 10581 .func = bpf_skc_to_udp6_sock, 10582 .gpl_only = false, 10583 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 10584 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 10585 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], 10586 }; 10587 10588 BPF_CALL_1(bpf_sock_from_file, struct file *, file) 10589 { 10590 return (unsigned long)sock_from_file(file); 10591 } 10592 10593 BTF_ID_LIST(bpf_sock_from_file_btf_ids) 10594 BTF_ID(struct, socket) 10595 BTF_ID(struct, file) 10596 10597 const struct bpf_func_proto bpf_sock_from_file_proto = { 10598 .func = bpf_sock_from_file, 10599 .gpl_only = false, 10600 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 10601 .ret_btf_id = &bpf_sock_from_file_btf_ids[0], 10602 .arg1_type = ARG_PTR_TO_BTF_ID, 10603 .arg1_btf_id = &bpf_sock_from_file_btf_ids[1], 10604 }; 10605 10606 static const struct bpf_func_proto * 10607 bpf_sk_base_func_proto(enum bpf_func_id func_id) 10608 { 10609 const struct bpf_func_proto *func; 10610 10611 switch (func_id) { 10612 case BPF_FUNC_skc_to_tcp6_sock: 10613 func = &bpf_skc_to_tcp6_sock_proto; 10614 break; 10615 case BPF_FUNC_skc_to_tcp_sock: 10616 func = &bpf_skc_to_tcp_sock_proto; 10617 break; 10618 case BPF_FUNC_skc_to_tcp_timewait_sock: 10619 func = &bpf_skc_to_tcp_timewait_sock_proto; 10620 break; 10621 case BPF_FUNC_skc_to_tcp_request_sock: 10622 func = &bpf_skc_to_tcp_request_sock_proto; 10623 break; 10624 case BPF_FUNC_skc_to_udp6_sock: 10625 func = &bpf_skc_to_udp6_sock_proto; 10626 break; 10627 default: 10628 return bpf_base_func_proto(func_id); 10629 } 10630 10631 if (!perfmon_capable()) 10632 return NULL; 10633 10634 return func; 10635 } 10636