1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <linux/atomic.h> 21 #include <linux/module.h> 22 #include <linux/types.h> 23 #include <linux/mm.h> 24 #include <linux/fcntl.h> 25 #include <linux/socket.h> 26 #include <linux/sock_diag.h> 27 #include <linux/in.h> 28 #include <linux/inet.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_packet.h> 31 #include <linux/if_arp.h> 32 #include <linux/gfp.h> 33 #include <net/inet_common.h> 34 #include <net/ip.h> 35 #include <net/protocol.h> 36 #include <net/netlink.h> 37 #include <linux/skbuff.h> 38 #include <linux/skmsg.h> 39 #include <net/sock.h> 40 #include <net/flow_dissector.h> 41 #include <linux/errno.h> 42 #include <linux/timer.h> 43 #include <linux/uaccess.h> 44 #include <asm/unaligned.h> 45 #include <linux/filter.h> 46 #include <linux/ratelimit.h> 47 #include <linux/seccomp.h> 48 #include <linux/if_vlan.h> 49 #include <linux/bpf.h> 50 #include <linux/btf.h> 51 #include <net/sch_generic.h> 52 #include <net/cls_cgroup.h> 53 #include <net/dst_metadata.h> 54 #include <net/dst.h> 55 #include <net/sock_reuseport.h> 56 #include <net/busy_poll.h> 57 #include <net/tcp.h> 58 #include <net/xfrm.h> 59 #include <net/udp.h> 60 #include <linux/bpf_trace.h> 61 #include <net/xdp_sock.h> 62 #include <linux/inetdevice.h> 63 #include <net/inet_hashtables.h> 64 #include <net/inet6_hashtables.h> 65 #include <net/ip_fib.h> 66 #include <net/nexthop.h> 67 #include <net/flow.h> 68 #include <net/arp.h> 69 #include <net/ipv6.h> 70 #include <net/net_namespace.h> 71 #include <linux/seg6_local.h> 72 #include <net/seg6.h> 73 #include <net/seg6_local.h> 74 #include <net/lwtunnel.h> 75 #include <net/ipv6_stubs.h> 76 #include <net/bpf_sk_storage.h> 77 #include <net/transp_v6.h> 78 #include <linux/btf_ids.h> 79 #include <net/tls.h> 80 #include <net/xdp.h> 81 82 static const struct bpf_func_proto * 83 bpf_sk_base_func_proto(enum bpf_func_id func_id); 84 85 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) 86 { 87 if (in_compat_syscall()) { 88 struct compat_sock_fprog f32; 89 90 if (len != sizeof(f32)) 91 return -EINVAL; 92 if (copy_from_sockptr(&f32, src, sizeof(f32))) 93 return -EFAULT; 94 memset(dst, 0, sizeof(*dst)); 95 dst->len = f32.len; 96 dst->filter = compat_ptr(f32.filter); 97 } else { 98 if (len != sizeof(*dst)) 99 return -EINVAL; 100 if (copy_from_sockptr(dst, src, sizeof(*dst))) 101 return -EFAULT; 102 } 103 104 return 0; 105 } 106 EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user); 107 108 /** 109 * sk_filter_trim_cap - run a packet through a socket filter 110 * @sk: sock associated with &sk_buff 111 * @skb: buffer to filter 112 * @cap: limit on how short the eBPF program may trim the packet 113 * 114 * Run the eBPF program and then cut skb->data to correct size returned by 115 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller 116 * than pkt_len we keep whole skb->data. This is the socket level 117 * wrapper to bpf_prog_run. It returns 0 if the packet should 118 * be accepted or -EPERM if the packet should be tossed. 119 * 120 */ 121 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) 122 { 123 int err; 124 struct sk_filter *filter; 125 126 /* 127 * If the skb was allocated from pfmemalloc reserves, only 128 * allow SOCK_MEMALLOC sockets to use it as this socket is 129 * helping free memory 130 */ 131 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { 132 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); 133 return -ENOMEM; 134 } 135 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); 136 if (err) 137 return err; 138 139 err = security_sock_rcv_skb(sk, skb); 140 if (err) 141 return err; 142 143 rcu_read_lock(); 144 filter = rcu_dereference(sk->sk_filter); 145 if (filter) { 146 struct sock *save_sk = skb->sk; 147 unsigned int pkt_len; 148 149 skb->sk = sk; 150 pkt_len = bpf_prog_run_save_cb(filter->prog, skb); 151 skb->sk = save_sk; 152 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; 153 } 154 rcu_read_unlock(); 155 156 return err; 157 } 158 EXPORT_SYMBOL(sk_filter_trim_cap); 159 160 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) 161 { 162 return skb_get_poff(skb); 163 } 164 165 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) 166 { 167 struct nlattr *nla; 168 169 if (skb_is_nonlinear(skb)) 170 return 0; 171 172 if (skb->len < sizeof(struct nlattr)) 173 return 0; 174 175 if (a > skb->len - sizeof(struct nlattr)) 176 return 0; 177 178 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); 179 if (nla) 180 return (void *) nla - (void *) skb->data; 181 182 return 0; 183 } 184 185 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) 186 { 187 struct nlattr *nla; 188 189 if (skb_is_nonlinear(skb)) 190 return 0; 191 192 if (skb->len < sizeof(struct nlattr)) 193 return 0; 194 195 if (a > skb->len - sizeof(struct nlattr)) 196 return 0; 197 198 nla = (struct nlattr *) &skb->data[a]; 199 if (nla->nla_len > skb->len - a) 200 return 0; 201 202 nla = nla_find_nested(nla, x); 203 if (nla) 204 return (void *) nla - (void *) skb->data; 205 206 return 0; 207 } 208 209 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, 210 data, int, headlen, int, offset) 211 { 212 u8 tmp, *ptr; 213 const int len = sizeof(tmp); 214 215 if (offset >= 0) { 216 if (headlen - offset >= len) 217 return *(u8 *)(data + offset); 218 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 219 return tmp; 220 } else { 221 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 222 if (likely(ptr)) 223 return *(u8 *)ptr; 224 } 225 226 return -EFAULT; 227 } 228 229 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, 230 int, offset) 231 { 232 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, 233 offset); 234 } 235 236 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, 237 data, int, headlen, int, offset) 238 { 239 u16 tmp, *ptr; 240 const int len = sizeof(tmp); 241 242 if (offset >= 0) { 243 if (headlen - offset >= len) 244 return get_unaligned_be16(data + offset); 245 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 246 return be16_to_cpu(tmp); 247 } else { 248 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 249 if (likely(ptr)) 250 return get_unaligned_be16(ptr); 251 } 252 253 return -EFAULT; 254 } 255 256 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, 257 int, offset) 258 { 259 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, 260 offset); 261 } 262 263 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, 264 data, int, headlen, int, offset) 265 { 266 u32 tmp, *ptr; 267 const int len = sizeof(tmp); 268 269 if (likely(offset >= 0)) { 270 if (headlen - offset >= len) 271 return get_unaligned_be32(data + offset); 272 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 273 return be32_to_cpu(tmp); 274 } else { 275 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 276 if (likely(ptr)) 277 return get_unaligned_be32(ptr); 278 } 279 280 return -EFAULT; 281 } 282 283 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, 284 int, offset) 285 { 286 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, 287 offset); 288 } 289 290 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, 291 struct bpf_insn *insn_buf) 292 { 293 struct bpf_insn *insn = insn_buf; 294 295 switch (skb_field) { 296 case SKF_AD_MARK: 297 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); 298 299 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, 300 offsetof(struct sk_buff, mark)); 301 break; 302 303 case SKF_AD_PKTTYPE: 304 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET); 305 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); 306 #ifdef __BIG_ENDIAN_BITFIELD 307 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); 308 #endif 309 break; 310 311 case SKF_AD_QUEUE: 312 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); 313 314 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 315 offsetof(struct sk_buff, queue_mapping)); 316 break; 317 318 case SKF_AD_VLAN_TAG: 319 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); 320 321 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ 322 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 323 offsetof(struct sk_buff, vlan_tci)); 324 break; 325 case SKF_AD_VLAN_TAG_PRESENT: 326 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET); 327 if (PKT_VLAN_PRESENT_BIT) 328 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); 329 if (PKT_VLAN_PRESENT_BIT < 7) 330 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); 331 break; 332 } 333 334 return insn - insn_buf; 335 } 336 337 static bool convert_bpf_extensions(struct sock_filter *fp, 338 struct bpf_insn **insnp) 339 { 340 struct bpf_insn *insn = *insnp; 341 u32 cnt; 342 343 switch (fp->k) { 344 case SKF_AD_OFF + SKF_AD_PROTOCOL: 345 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); 346 347 /* A = *(u16 *) (CTX + offsetof(protocol)) */ 348 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 349 offsetof(struct sk_buff, protocol)); 350 /* A = ntohs(A) [emitting a nop or swap16] */ 351 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 352 break; 353 354 case SKF_AD_OFF + SKF_AD_PKTTYPE: 355 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); 356 insn += cnt - 1; 357 break; 358 359 case SKF_AD_OFF + SKF_AD_IFINDEX: 360 case SKF_AD_OFF + SKF_AD_HATYPE: 361 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); 362 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); 363 364 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 365 BPF_REG_TMP, BPF_REG_CTX, 366 offsetof(struct sk_buff, dev)); 367 /* if (tmp != 0) goto pc + 1 */ 368 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); 369 *insn++ = BPF_EXIT_INSN(); 370 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) 371 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, 372 offsetof(struct net_device, ifindex)); 373 else 374 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, 375 offsetof(struct net_device, type)); 376 break; 377 378 case SKF_AD_OFF + SKF_AD_MARK: 379 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); 380 insn += cnt - 1; 381 break; 382 383 case SKF_AD_OFF + SKF_AD_RXHASH: 384 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); 385 386 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, 387 offsetof(struct sk_buff, hash)); 388 break; 389 390 case SKF_AD_OFF + SKF_AD_QUEUE: 391 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); 392 insn += cnt - 1; 393 break; 394 395 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 396 cnt = convert_skb_access(SKF_AD_VLAN_TAG, 397 BPF_REG_A, BPF_REG_CTX, insn); 398 insn += cnt - 1; 399 break; 400 401 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 402 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, 403 BPF_REG_A, BPF_REG_CTX, insn); 404 insn += cnt - 1; 405 break; 406 407 case SKF_AD_OFF + SKF_AD_VLAN_TPID: 408 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2); 409 410 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ 411 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 412 offsetof(struct sk_buff, vlan_proto)); 413 /* A = ntohs(A) [emitting a nop or swap16] */ 414 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 415 break; 416 417 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 418 case SKF_AD_OFF + SKF_AD_NLATTR: 419 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 420 case SKF_AD_OFF + SKF_AD_CPU: 421 case SKF_AD_OFF + SKF_AD_RANDOM: 422 /* arg1 = CTX */ 423 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 424 /* arg2 = A */ 425 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); 426 /* arg3 = X */ 427 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); 428 /* Emit call(arg1=CTX, arg2=A, arg3=X) */ 429 switch (fp->k) { 430 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 431 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); 432 break; 433 case SKF_AD_OFF + SKF_AD_NLATTR: 434 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); 435 break; 436 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 437 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); 438 break; 439 case SKF_AD_OFF + SKF_AD_CPU: 440 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); 441 break; 442 case SKF_AD_OFF + SKF_AD_RANDOM: 443 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); 444 bpf_user_rnd_init_once(); 445 break; 446 } 447 break; 448 449 case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 450 /* A ^= X */ 451 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); 452 break; 453 454 default: 455 /* This is just a dummy call to avoid letting the compiler 456 * evict __bpf_call_base() as an optimization. Placed here 457 * where no-one bothers. 458 */ 459 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); 460 return false; 461 } 462 463 *insnp = insn; 464 return true; 465 } 466 467 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) 468 { 469 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); 470 int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); 471 bool endian = BPF_SIZE(fp->code) == BPF_H || 472 BPF_SIZE(fp->code) == BPF_W; 473 bool indirect = BPF_MODE(fp->code) == BPF_IND; 474 const int ip_align = NET_IP_ALIGN; 475 struct bpf_insn *insn = *insnp; 476 int offset = fp->k; 477 478 if (!indirect && 479 ((unaligned_ok && offset >= 0) || 480 (!unaligned_ok && offset >= 0 && 481 offset + ip_align >= 0 && 482 offset + ip_align % size == 0))) { 483 bool ldx_off_ok = offset <= S16_MAX; 484 485 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); 486 if (offset) 487 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); 488 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, 489 size, 2 + endian + (!ldx_off_ok * 2)); 490 if (ldx_off_ok) { 491 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 492 BPF_REG_D, offset); 493 } else { 494 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); 495 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); 496 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 497 BPF_REG_TMP, 0); 498 } 499 if (endian) 500 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); 501 *insn++ = BPF_JMP_A(8); 502 } 503 504 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 505 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); 506 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); 507 if (!indirect) { 508 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); 509 } else { 510 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); 511 if (fp->k) 512 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); 513 } 514 515 switch (BPF_SIZE(fp->code)) { 516 case BPF_B: 517 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); 518 break; 519 case BPF_H: 520 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); 521 break; 522 case BPF_W: 523 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); 524 break; 525 default: 526 return false; 527 } 528 529 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); 530 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 531 *insn = BPF_EXIT_INSN(); 532 533 *insnp = insn; 534 return true; 535 } 536 537 /** 538 * bpf_convert_filter - convert filter program 539 * @prog: the user passed filter program 540 * @len: the length of the user passed filter program 541 * @new_prog: allocated 'struct bpf_prog' or NULL 542 * @new_len: pointer to store length of converted program 543 * @seen_ld_abs: bool whether we've seen ld_abs/ind 544 * 545 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' 546 * style extended BPF (eBPF). 547 * Conversion workflow: 548 * 549 * 1) First pass for calculating the new program length: 550 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) 551 * 552 * 2) 2nd pass to remap in two passes: 1st pass finds new 553 * jump offsets, 2nd pass remapping: 554 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) 555 */ 556 static int bpf_convert_filter(struct sock_filter *prog, int len, 557 struct bpf_prog *new_prog, int *new_len, 558 bool *seen_ld_abs) 559 { 560 int new_flen = 0, pass = 0, target, i, stack_off; 561 struct bpf_insn *new_insn, *first_insn = NULL; 562 struct sock_filter *fp; 563 int *addrs = NULL; 564 u8 bpf_src; 565 566 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 567 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 568 569 if (len <= 0 || len > BPF_MAXINSNS) 570 return -EINVAL; 571 572 if (new_prog) { 573 first_insn = new_prog->insnsi; 574 addrs = kcalloc(len, sizeof(*addrs), 575 GFP_KERNEL | __GFP_NOWARN); 576 if (!addrs) 577 return -ENOMEM; 578 } 579 580 do_pass: 581 new_insn = first_insn; 582 fp = prog; 583 584 /* Classic BPF related prologue emission. */ 585 if (new_prog) { 586 /* Classic BPF expects A and X to be reset first. These need 587 * to be guaranteed to be the first two instructions. 588 */ 589 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 590 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); 591 592 /* All programs must keep CTX in callee saved BPF_REG_CTX. 593 * In eBPF case it's done by the compiler, here we need to 594 * do this ourself. Initial CTX is present in BPF_REG_ARG1. 595 */ 596 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); 597 if (*seen_ld_abs) { 598 /* For packet access in classic BPF, cache skb->data 599 * in callee-saved BPF R8 and skb->len - skb->data_len 600 * (headlen) in BPF R9. Since classic BPF is read-only 601 * on CTX, we only need to cache it once. 602 */ 603 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 604 BPF_REG_D, BPF_REG_CTX, 605 offsetof(struct sk_buff, data)); 606 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, 607 offsetof(struct sk_buff, len)); 608 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, 609 offsetof(struct sk_buff, data_len)); 610 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); 611 } 612 } else { 613 new_insn += 3; 614 } 615 616 for (i = 0; i < len; fp++, i++) { 617 struct bpf_insn tmp_insns[32] = { }; 618 struct bpf_insn *insn = tmp_insns; 619 620 if (addrs) 621 addrs[i] = new_insn - first_insn; 622 623 switch (fp->code) { 624 /* All arithmetic insns and skb loads map as-is. */ 625 case BPF_ALU | BPF_ADD | BPF_X: 626 case BPF_ALU | BPF_ADD | BPF_K: 627 case BPF_ALU | BPF_SUB | BPF_X: 628 case BPF_ALU | BPF_SUB | BPF_K: 629 case BPF_ALU | BPF_AND | BPF_X: 630 case BPF_ALU | BPF_AND | BPF_K: 631 case BPF_ALU | BPF_OR | BPF_X: 632 case BPF_ALU | BPF_OR | BPF_K: 633 case BPF_ALU | BPF_LSH | BPF_X: 634 case BPF_ALU | BPF_LSH | BPF_K: 635 case BPF_ALU | BPF_RSH | BPF_X: 636 case BPF_ALU | BPF_RSH | BPF_K: 637 case BPF_ALU | BPF_XOR | BPF_X: 638 case BPF_ALU | BPF_XOR | BPF_K: 639 case BPF_ALU | BPF_MUL | BPF_X: 640 case BPF_ALU | BPF_MUL | BPF_K: 641 case BPF_ALU | BPF_DIV | BPF_X: 642 case BPF_ALU | BPF_DIV | BPF_K: 643 case BPF_ALU | BPF_MOD | BPF_X: 644 case BPF_ALU | BPF_MOD | BPF_K: 645 case BPF_ALU | BPF_NEG: 646 case BPF_LD | BPF_ABS | BPF_W: 647 case BPF_LD | BPF_ABS | BPF_H: 648 case BPF_LD | BPF_ABS | BPF_B: 649 case BPF_LD | BPF_IND | BPF_W: 650 case BPF_LD | BPF_IND | BPF_H: 651 case BPF_LD | BPF_IND | BPF_B: 652 /* Check for overloaded BPF extension and 653 * directly convert it if found, otherwise 654 * just move on with mapping. 655 */ 656 if (BPF_CLASS(fp->code) == BPF_LD && 657 BPF_MODE(fp->code) == BPF_ABS && 658 convert_bpf_extensions(fp, &insn)) 659 break; 660 if (BPF_CLASS(fp->code) == BPF_LD && 661 convert_bpf_ld_abs(fp, &insn)) { 662 *seen_ld_abs = true; 663 break; 664 } 665 666 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || 667 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { 668 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); 669 /* Error with exception code on div/mod by 0. 670 * For cBPF programs, this was always return 0. 671 */ 672 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); 673 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 674 *insn++ = BPF_EXIT_INSN(); 675 } 676 677 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 678 break; 679 680 /* Jump transformation cannot use BPF block macros 681 * everywhere as offset calculation and target updates 682 * require a bit more work than the rest, i.e. jump 683 * opcodes map as-is, but offsets need adjustment. 684 */ 685 686 #define BPF_EMIT_JMP \ 687 do { \ 688 const s32 off_min = S16_MIN, off_max = S16_MAX; \ 689 s32 off; \ 690 \ 691 if (target >= len || target < 0) \ 692 goto err; \ 693 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 694 /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 695 off -= insn - tmp_insns; \ 696 /* Reject anything not fitting into insn->off. */ \ 697 if (off < off_min || off > off_max) \ 698 goto err; \ 699 insn->off = off; \ 700 } while (0) 701 702 case BPF_JMP | BPF_JA: 703 target = i + fp->k + 1; 704 insn->code = fp->code; 705 BPF_EMIT_JMP; 706 break; 707 708 case BPF_JMP | BPF_JEQ | BPF_K: 709 case BPF_JMP | BPF_JEQ | BPF_X: 710 case BPF_JMP | BPF_JSET | BPF_K: 711 case BPF_JMP | BPF_JSET | BPF_X: 712 case BPF_JMP | BPF_JGT | BPF_K: 713 case BPF_JMP | BPF_JGT | BPF_X: 714 case BPF_JMP | BPF_JGE | BPF_K: 715 case BPF_JMP | BPF_JGE | BPF_X: 716 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { 717 /* BPF immediates are signed, zero extend 718 * immediate into tmp register and use it 719 * in compare insn. 720 */ 721 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); 722 723 insn->dst_reg = BPF_REG_A; 724 insn->src_reg = BPF_REG_TMP; 725 bpf_src = BPF_X; 726 } else { 727 insn->dst_reg = BPF_REG_A; 728 insn->imm = fp->k; 729 bpf_src = BPF_SRC(fp->code); 730 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; 731 } 732 733 /* Common case where 'jump_false' is next insn. */ 734 if (fp->jf == 0) { 735 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 736 target = i + fp->jt + 1; 737 BPF_EMIT_JMP; 738 break; 739 } 740 741 /* Convert some jumps when 'jump_true' is next insn. */ 742 if (fp->jt == 0) { 743 switch (BPF_OP(fp->code)) { 744 case BPF_JEQ: 745 insn->code = BPF_JMP | BPF_JNE | bpf_src; 746 break; 747 case BPF_JGT: 748 insn->code = BPF_JMP | BPF_JLE | bpf_src; 749 break; 750 case BPF_JGE: 751 insn->code = BPF_JMP | BPF_JLT | bpf_src; 752 break; 753 default: 754 goto jmp_rest; 755 } 756 757 target = i + fp->jf + 1; 758 BPF_EMIT_JMP; 759 break; 760 } 761 jmp_rest: 762 /* Other jumps are mapped into two insns: Jxx and JA. */ 763 target = i + fp->jt + 1; 764 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 765 BPF_EMIT_JMP; 766 insn++; 767 768 insn->code = BPF_JMP | BPF_JA; 769 target = i + fp->jf + 1; 770 BPF_EMIT_JMP; 771 break; 772 773 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 774 case BPF_LDX | BPF_MSH | BPF_B: { 775 struct sock_filter tmp = { 776 .code = BPF_LD | BPF_ABS | BPF_B, 777 .k = fp->k, 778 }; 779 780 *seen_ld_abs = true; 781 782 /* X = A */ 783 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 784 /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 785 convert_bpf_ld_abs(&tmp, &insn); 786 insn++; 787 /* A &= 0xf */ 788 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 789 /* A <<= 2 */ 790 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 791 /* tmp = X */ 792 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); 793 /* X = A */ 794 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 795 /* A = tmp */ 796 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); 797 break; 798 } 799 /* RET_K is remaped into 2 insns. RET_A case doesn't need an 800 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. 801 */ 802 case BPF_RET | BPF_A: 803 case BPF_RET | BPF_K: 804 if (BPF_RVAL(fp->code) == BPF_K) 805 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, 806 0, fp->k); 807 *insn = BPF_EXIT_INSN(); 808 break; 809 810 /* Store to stack. */ 811 case BPF_ST: 812 case BPF_STX: 813 stack_off = fp->k * 4 + 4; 814 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == 815 BPF_ST ? BPF_REG_A : BPF_REG_X, 816 -stack_off); 817 /* check_load_and_stores() verifies that classic BPF can 818 * load from stack only after write, so tracking 819 * stack_depth for ST|STX insns is enough 820 */ 821 if (new_prog && new_prog->aux->stack_depth < stack_off) 822 new_prog->aux->stack_depth = stack_off; 823 break; 824 825 /* Load from stack. */ 826 case BPF_LD | BPF_MEM: 827 case BPF_LDX | BPF_MEM: 828 stack_off = fp->k * 4 + 4; 829 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 830 BPF_REG_A : BPF_REG_X, BPF_REG_FP, 831 -stack_off); 832 break; 833 834 /* A = K or X = K */ 835 case BPF_LD | BPF_IMM: 836 case BPF_LDX | BPF_IMM: 837 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? 838 BPF_REG_A : BPF_REG_X, fp->k); 839 break; 840 841 /* X = A */ 842 case BPF_MISC | BPF_TAX: 843 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 844 break; 845 846 /* A = X */ 847 case BPF_MISC | BPF_TXA: 848 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); 849 break; 850 851 /* A = skb->len or X = skb->len */ 852 case BPF_LD | BPF_W | BPF_LEN: 853 case BPF_LDX | BPF_W | BPF_LEN: 854 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 855 BPF_REG_A : BPF_REG_X, BPF_REG_CTX, 856 offsetof(struct sk_buff, len)); 857 break; 858 859 /* Access seccomp_data fields. */ 860 case BPF_LDX | BPF_ABS | BPF_W: 861 /* A = *(u32 *) (ctx + K) */ 862 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 863 break; 864 865 /* Unknown instruction. */ 866 default: 867 goto err; 868 } 869 870 insn++; 871 if (new_prog) 872 memcpy(new_insn, tmp_insns, 873 sizeof(*insn) * (insn - tmp_insns)); 874 new_insn += insn - tmp_insns; 875 } 876 877 if (!new_prog) { 878 /* Only calculating new length. */ 879 *new_len = new_insn - first_insn; 880 if (*seen_ld_abs) 881 *new_len += 4; /* Prologue bits. */ 882 return 0; 883 } 884 885 pass++; 886 if (new_flen != new_insn - first_insn) { 887 new_flen = new_insn - first_insn; 888 if (pass > 2) 889 goto err; 890 goto do_pass; 891 } 892 893 kfree(addrs); 894 BUG_ON(*new_len != new_flen); 895 return 0; 896 err: 897 kfree(addrs); 898 return -EINVAL; 899 } 900 901 /* Security: 902 * 903 * As we dont want to clear mem[] array for each packet going through 904 * __bpf_prog_run(), we check that filter loaded by user never try to read 905 * a cell if not previously written, and we check all branches to be sure 906 * a malicious user doesn't try to abuse us. 907 */ 908 static int check_load_and_stores(const struct sock_filter *filter, int flen) 909 { 910 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ 911 int pc, ret = 0; 912 913 BUILD_BUG_ON(BPF_MEMWORDS > 16); 914 915 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); 916 if (!masks) 917 return -ENOMEM; 918 919 memset(masks, 0xff, flen * sizeof(*masks)); 920 921 for (pc = 0; pc < flen; pc++) { 922 memvalid &= masks[pc]; 923 924 switch (filter[pc].code) { 925 case BPF_ST: 926 case BPF_STX: 927 memvalid |= (1 << filter[pc].k); 928 break; 929 case BPF_LD | BPF_MEM: 930 case BPF_LDX | BPF_MEM: 931 if (!(memvalid & (1 << filter[pc].k))) { 932 ret = -EINVAL; 933 goto error; 934 } 935 break; 936 case BPF_JMP | BPF_JA: 937 /* A jump must set masks on target */ 938 masks[pc + 1 + filter[pc].k] &= memvalid; 939 memvalid = ~0; 940 break; 941 case BPF_JMP | BPF_JEQ | BPF_K: 942 case BPF_JMP | BPF_JEQ | BPF_X: 943 case BPF_JMP | BPF_JGE | BPF_K: 944 case BPF_JMP | BPF_JGE | BPF_X: 945 case BPF_JMP | BPF_JGT | BPF_K: 946 case BPF_JMP | BPF_JGT | BPF_X: 947 case BPF_JMP | BPF_JSET | BPF_K: 948 case BPF_JMP | BPF_JSET | BPF_X: 949 /* A jump must set masks on targets */ 950 masks[pc + 1 + filter[pc].jt] &= memvalid; 951 masks[pc + 1 + filter[pc].jf] &= memvalid; 952 memvalid = ~0; 953 break; 954 } 955 } 956 error: 957 kfree(masks); 958 return ret; 959 } 960 961 static bool chk_code_allowed(u16 code_to_probe) 962 { 963 static const bool codes[] = { 964 /* 32 bit ALU operations */ 965 [BPF_ALU | BPF_ADD | BPF_K] = true, 966 [BPF_ALU | BPF_ADD | BPF_X] = true, 967 [BPF_ALU | BPF_SUB | BPF_K] = true, 968 [BPF_ALU | BPF_SUB | BPF_X] = true, 969 [BPF_ALU | BPF_MUL | BPF_K] = true, 970 [BPF_ALU | BPF_MUL | BPF_X] = true, 971 [BPF_ALU | BPF_DIV | BPF_K] = true, 972 [BPF_ALU | BPF_DIV | BPF_X] = true, 973 [BPF_ALU | BPF_MOD | BPF_K] = true, 974 [BPF_ALU | BPF_MOD | BPF_X] = true, 975 [BPF_ALU | BPF_AND | BPF_K] = true, 976 [BPF_ALU | BPF_AND | BPF_X] = true, 977 [BPF_ALU | BPF_OR | BPF_K] = true, 978 [BPF_ALU | BPF_OR | BPF_X] = true, 979 [BPF_ALU | BPF_XOR | BPF_K] = true, 980 [BPF_ALU | BPF_XOR | BPF_X] = true, 981 [BPF_ALU | BPF_LSH | BPF_K] = true, 982 [BPF_ALU | BPF_LSH | BPF_X] = true, 983 [BPF_ALU | BPF_RSH | BPF_K] = true, 984 [BPF_ALU | BPF_RSH | BPF_X] = true, 985 [BPF_ALU | BPF_NEG] = true, 986 /* Load instructions */ 987 [BPF_LD | BPF_W | BPF_ABS] = true, 988 [BPF_LD | BPF_H | BPF_ABS] = true, 989 [BPF_LD | BPF_B | BPF_ABS] = true, 990 [BPF_LD | BPF_W | BPF_LEN] = true, 991 [BPF_LD | BPF_W | BPF_IND] = true, 992 [BPF_LD | BPF_H | BPF_IND] = true, 993 [BPF_LD | BPF_B | BPF_IND] = true, 994 [BPF_LD | BPF_IMM] = true, 995 [BPF_LD | BPF_MEM] = true, 996 [BPF_LDX | BPF_W | BPF_LEN] = true, 997 [BPF_LDX | BPF_B | BPF_MSH] = true, 998 [BPF_LDX | BPF_IMM] = true, 999 [BPF_LDX | BPF_MEM] = true, 1000 /* Store instructions */ 1001 [BPF_ST] = true, 1002 [BPF_STX] = true, 1003 /* Misc instructions */ 1004 [BPF_MISC | BPF_TAX] = true, 1005 [BPF_MISC | BPF_TXA] = true, 1006 /* Return instructions */ 1007 [BPF_RET | BPF_K] = true, 1008 [BPF_RET | BPF_A] = true, 1009 /* Jump instructions */ 1010 [BPF_JMP | BPF_JA] = true, 1011 [BPF_JMP | BPF_JEQ | BPF_K] = true, 1012 [BPF_JMP | BPF_JEQ | BPF_X] = true, 1013 [BPF_JMP | BPF_JGE | BPF_K] = true, 1014 [BPF_JMP | BPF_JGE | BPF_X] = true, 1015 [BPF_JMP | BPF_JGT | BPF_K] = true, 1016 [BPF_JMP | BPF_JGT | BPF_X] = true, 1017 [BPF_JMP | BPF_JSET | BPF_K] = true, 1018 [BPF_JMP | BPF_JSET | BPF_X] = true, 1019 }; 1020 1021 if (code_to_probe >= ARRAY_SIZE(codes)) 1022 return false; 1023 1024 return codes[code_to_probe]; 1025 } 1026 1027 static bool bpf_check_basics_ok(const struct sock_filter *filter, 1028 unsigned int flen) 1029 { 1030 if (filter == NULL) 1031 return false; 1032 if (flen == 0 || flen > BPF_MAXINSNS) 1033 return false; 1034 1035 return true; 1036 } 1037 1038 /** 1039 * bpf_check_classic - verify socket filter code 1040 * @filter: filter to verify 1041 * @flen: length of filter 1042 * 1043 * Check the user's filter code. If we let some ugly 1044 * filter code slip through kaboom! The filter must contain 1045 * no references or jumps that are out of range, no illegal 1046 * instructions, and must end with a RET instruction. 1047 * 1048 * All jumps are forward as they are not signed. 1049 * 1050 * Returns 0 if the rule set is legal or -EINVAL if not. 1051 */ 1052 static int bpf_check_classic(const struct sock_filter *filter, 1053 unsigned int flen) 1054 { 1055 bool anc_found; 1056 int pc; 1057 1058 /* Check the filter code now */ 1059 for (pc = 0; pc < flen; pc++) { 1060 const struct sock_filter *ftest = &filter[pc]; 1061 1062 /* May we actually operate on this code? */ 1063 if (!chk_code_allowed(ftest->code)) 1064 return -EINVAL; 1065 1066 /* Some instructions need special checks */ 1067 switch (ftest->code) { 1068 case BPF_ALU | BPF_DIV | BPF_K: 1069 case BPF_ALU | BPF_MOD | BPF_K: 1070 /* Check for division by zero */ 1071 if (ftest->k == 0) 1072 return -EINVAL; 1073 break; 1074 case BPF_ALU | BPF_LSH | BPF_K: 1075 case BPF_ALU | BPF_RSH | BPF_K: 1076 if (ftest->k >= 32) 1077 return -EINVAL; 1078 break; 1079 case BPF_LD | BPF_MEM: 1080 case BPF_LDX | BPF_MEM: 1081 case BPF_ST: 1082 case BPF_STX: 1083 /* Check for invalid memory addresses */ 1084 if (ftest->k >= BPF_MEMWORDS) 1085 return -EINVAL; 1086 break; 1087 case BPF_JMP | BPF_JA: 1088 /* Note, the large ftest->k might cause loops. 1089 * Compare this with conditional jumps below, 1090 * where offsets are limited. --ANK (981016) 1091 */ 1092 if (ftest->k >= (unsigned int)(flen - pc - 1)) 1093 return -EINVAL; 1094 break; 1095 case BPF_JMP | BPF_JEQ | BPF_K: 1096 case BPF_JMP | BPF_JEQ | BPF_X: 1097 case BPF_JMP | BPF_JGE | BPF_K: 1098 case BPF_JMP | BPF_JGE | BPF_X: 1099 case BPF_JMP | BPF_JGT | BPF_K: 1100 case BPF_JMP | BPF_JGT | BPF_X: 1101 case BPF_JMP | BPF_JSET | BPF_K: 1102 case BPF_JMP | BPF_JSET | BPF_X: 1103 /* Both conditionals must be safe */ 1104 if (pc + ftest->jt + 1 >= flen || 1105 pc + ftest->jf + 1 >= flen) 1106 return -EINVAL; 1107 break; 1108 case BPF_LD | BPF_W | BPF_ABS: 1109 case BPF_LD | BPF_H | BPF_ABS: 1110 case BPF_LD | BPF_B | BPF_ABS: 1111 anc_found = false; 1112 if (bpf_anc_helper(ftest) & BPF_ANC) 1113 anc_found = true; 1114 /* Ancillary operation unknown or unsupported */ 1115 if (anc_found == false && ftest->k >= SKF_AD_OFF) 1116 return -EINVAL; 1117 } 1118 } 1119 1120 /* Last instruction must be a RET code */ 1121 switch (filter[flen - 1].code) { 1122 case BPF_RET | BPF_K: 1123 case BPF_RET | BPF_A: 1124 return check_load_and_stores(filter, flen); 1125 } 1126 1127 return -EINVAL; 1128 } 1129 1130 static int bpf_prog_store_orig_filter(struct bpf_prog *fp, 1131 const struct sock_fprog *fprog) 1132 { 1133 unsigned int fsize = bpf_classic_proglen(fprog); 1134 struct sock_fprog_kern *fkprog; 1135 1136 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); 1137 if (!fp->orig_prog) 1138 return -ENOMEM; 1139 1140 fkprog = fp->orig_prog; 1141 fkprog->len = fprog->len; 1142 1143 fkprog->filter = kmemdup(fp->insns, fsize, 1144 GFP_KERNEL | __GFP_NOWARN); 1145 if (!fkprog->filter) { 1146 kfree(fp->orig_prog); 1147 return -ENOMEM; 1148 } 1149 1150 return 0; 1151 } 1152 1153 static void bpf_release_orig_filter(struct bpf_prog *fp) 1154 { 1155 struct sock_fprog_kern *fprog = fp->orig_prog; 1156 1157 if (fprog) { 1158 kfree(fprog->filter); 1159 kfree(fprog); 1160 } 1161 } 1162 1163 static void __bpf_prog_release(struct bpf_prog *prog) 1164 { 1165 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { 1166 bpf_prog_put(prog); 1167 } else { 1168 bpf_release_orig_filter(prog); 1169 bpf_prog_free(prog); 1170 } 1171 } 1172 1173 static void __sk_filter_release(struct sk_filter *fp) 1174 { 1175 __bpf_prog_release(fp->prog); 1176 kfree(fp); 1177 } 1178 1179 /** 1180 * sk_filter_release_rcu - Release a socket filter by rcu_head 1181 * @rcu: rcu_head that contains the sk_filter to free 1182 */ 1183 static void sk_filter_release_rcu(struct rcu_head *rcu) 1184 { 1185 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 1186 1187 __sk_filter_release(fp); 1188 } 1189 1190 /** 1191 * sk_filter_release - release a socket filter 1192 * @fp: filter to remove 1193 * 1194 * Remove a filter from a socket and release its resources. 1195 */ 1196 static void sk_filter_release(struct sk_filter *fp) 1197 { 1198 if (refcount_dec_and_test(&fp->refcnt)) 1199 call_rcu(&fp->rcu, sk_filter_release_rcu); 1200 } 1201 1202 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1203 { 1204 u32 filter_size = bpf_prog_size(fp->prog->len); 1205 1206 atomic_sub(filter_size, &sk->sk_omem_alloc); 1207 sk_filter_release(fp); 1208 } 1209 1210 /* try to charge the socket memory if there is space available 1211 * return true on success 1212 */ 1213 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1214 { 1215 u32 filter_size = bpf_prog_size(fp->prog->len); 1216 1217 /* same check as in sock_kmalloc() */ 1218 if (filter_size <= sysctl_optmem_max && 1219 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { 1220 atomic_add(filter_size, &sk->sk_omem_alloc); 1221 return true; 1222 } 1223 return false; 1224 } 1225 1226 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1227 { 1228 if (!refcount_inc_not_zero(&fp->refcnt)) 1229 return false; 1230 1231 if (!__sk_filter_charge(sk, fp)) { 1232 sk_filter_release(fp); 1233 return false; 1234 } 1235 return true; 1236 } 1237 1238 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) 1239 { 1240 struct sock_filter *old_prog; 1241 struct bpf_prog *old_fp; 1242 int err, new_len, old_len = fp->len; 1243 bool seen_ld_abs = false; 1244 1245 /* We are free to overwrite insns et al right here as it won't be used at 1246 * this point in time anymore internally after the migration to the eBPF 1247 * instruction representation. 1248 */ 1249 BUILD_BUG_ON(sizeof(struct sock_filter) != 1250 sizeof(struct bpf_insn)); 1251 1252 /* Conversion cannot happen on overlapping memory areas, 1253 * so we need to keep the user BPF around until the 2nd 1254 * pass. At this time, the user BPF is stored in fp->insns. 1255 */ 1256 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), 1257 GFP_KERNEL | __GFP_NOWARN); 1258 if (!old_prog) { 1259 err = -ENOMEM; 1260 goto out_err; 1261 } 1262 1263 /* 1st pass: calculate the new program length. */ 1264 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, 1265 &seen_ld_abs); 1266 if (err) 1267 goto out_err_free; 1268 1269 /* Expand fp for appending the new filter representation. */ 1270 old_fp = fp; 1271 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); 1272 if (!fp) { 1273 /* The old_fp is still around in case we couldn't 1274 * allocate new memory, so uncharge on that one. 1275 */ 1276 fp = old_fp; 1277 err = -ENOMEM; 1278 goto out_err_free; 1279 } 1280 1281 fp->len = new_len; 1282 1283 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ 1284 err = bpf_convert_filter(old_prog, old_len, fp, &new_len, 1285 &seen_ld_abs); 1286 if (err) 1287 /* 2nd bpf_convert_filter() can fail only if it fails 1288 * to allocate memory, remapping must succeed. Note, 1289 * that at this time old_fp has already been released 1290 * by krealloc(). 1291 */ 1292 goto out_err_free; 1293 1294 fp = bpf_prog_select_runtime(fp, &err); 1295 if (err) 1296 goto out_err_free; 1297 1298 kfree(old_prog); 1299 return fp; 1300 1301 out_err_free: 1302 kfree(old_prog); 1303 out_err: 1304 __bpf_prog_release(fp); 1305 return ERR_PTR(err); 1306 } 1307 1308 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, 1309 bpf_aux_classic_check_t trans) 1310 { 1311 int err; 1312 1313 fp->bpf_func = NULL; 1314 fp->jited = 0; 1315 1316 err = bpf_check_classic(fp->insns, fp->len); 1317 if (err) { 1318 __bpf_prog_release(fp); 1319 return ERR_PTR(err); 1320 } 1321 1322 /* There might be additional checks and transformations 1323 * needed on classic filters, f.e. in case of seccomp. 1324 */ 1325 if (trans) { 1326 err = trans(fp->insns, fp->len); 1327 if (err) { 1328 __bpf_prog_release(fp); 1329 return ERR_PTR(err); 1330 } 1331 } 1332 1333 /* Probe if we can JIT compile the filter and if so, do 1334 * the compilation of the filter. 1335 */ 1336 bpf_jit_compile(fp); 1337 1338 /* JIT compiler couldn't process this filter, so do the eBPF translation 1339 * for the optimized interpreter. 1340 */ 1341 if (!fp->jited) 1342 fp = bpf_migrate_filter(fp); 1343 1344 return fp; 1345 } 1346 1347 /** 1348 * bpf_prog_create - create an unattached filter 1349 * @pfp: the unattached filter that is created 1350 * @fprog: the filter program 1351 * 1352 * Create a filter independent of any socket. We first run some 1353 * sanity checks on it to make sure it does not explode on us later. 1354 * If an error occurs or there is insufficient memory for the filter 1355 * a negative errno code is returned. On success the return is zero. 1356 */ 1357 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) 1358 { 1359 unsigned int fsize = bpf_classic_proglen(fprog); 1360 struct bpf_prog *fp; 1361 1362 /* Make sure new filter is there and in the right amounts. */ 1363 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1364 return -EINVAL; 1365 1366 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1367 if (!fp) 1368 return -ENOMEM; 1369 1370 memcpy(fp->insns, fprog->filter, fsize); 1371 1372 fp->len = fprog->len; 1373 /* Since unattached filters are not copied back to user 1374 * space through sk_get_filter(), we do not need to hold 1375 * a copy here, and can spare us the work. 1376 */ 1377 fp->orig_prog = NULL; 1378 1379 /* bpf_prepare_filter() already takes care of freeing 1380 * memory in case something goes wrong. 1381 */ 1382 fp = bpf_prepare_filter(fp, NULL); 1383 if (IS_ERR(fp)) 1384 return PTR_ERR(fp); 1385 1386 *pfp = fp; 1387 return 0; 1388 } 1389 EXPORT_SYMBOL_GPL(bpf_prog_create); 1390 1391 /** 1392 * bpf_prog_create_from_user - create an unattached filter from user buffer 1393 * @pfp: the unattached filter that is created 1394 * @fprog: the filter program 1395 * @trans: post-classic verifier transformation handler 1396 * @save_orig: save classic BPF program 1397 * 1398 * This function effectively does the same as bpf_prog_create(), only 1399 * that it builds up its insns buffer from user space provided buffer. 1400 * It also allows for passing a bpf_aux_classic_check_t handler. 1401 */ 1402 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, 1403 bpf_aux_classic_check_t trans, bool save_orig) 1404 { 1405 unsigned int fsize = bpf_classic_proglen(fprog); 1406 struct bpf_prog *fp; 1407 int err; 1408 1409 /* Make sure new filter is there and in the right amounts. */ 1410 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1411 return -EINVAL; 1412 1413 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1414 if (!fp) 1415 return -ENOMEM; 1416 1417 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1418 __bpf_prog_free(fp); 1419 return -EFAULT; 1420 } 1421 1422 fp->len = fprog->len; 1423 fp->orig_prog = NULL; 1424 1425 if (save_orig) { 1426 err = bpf_prog_store_orig_filter(fp, fprog); 1427 if (err) { 1428 __bpf_prog_free(fp); 1429 return -ENOMEM; 1430 } 1431 } 1432 1433 /* bpf_prepare_filter() already takes care of freeing 1434 * memory in case something goes wrong. 1435 */ 1436 fp = bpf_prepare_filter(fp, trans); 1437 if (IS_ERR(fp)) 1438 return PTR_ERR(fp); 1439 1440 *pfp = fp; 1441 return 0; 1442 } 1443 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); 1444 1445 void bpf_prog_destroy(struct bpf_prog *fp) 1446 { 1447 __bpf_prog_release(fp); 1448 } 1449 EXPORT_SYMBOL_GPL(bpf_prog_destroy); 1450 1451 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) 1452 { 1453 struct sk_filter *fp, *old_fp; 1454 1455 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 1456 if (!fp) 1457 return -ENOMEM; 1458 1459 fp->prog = prog; 1460 1461 if (!__sk_filter_charge(sk, fp)) { 1462 kfree(fp); 1463 return -ENOMEM; 1464 } 1465 refcount_set(&fp->refcnt, 1); 1466 1467 old_fp = rcu_dereference_protected(sk->sk_filter, 1468 lockdep_sock_is_held(sk)); 1469 rcu_assign_pointer(sk->sk_filter, fp); 1470 1471 if (old_fp) 1472 sk_filter_uncharge(sk, old_fp); 1473 1474 return 0; 1475 } 1476 1477 static 1478 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) 1479 { 1480 unsigned int fsize = bpf_classic_proglen(fprog); 1481 struct bpf_prog *prog; 1482 int err; 1483 1484 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1485 return ERR_PTR(-EPERM); 1486 1487 /* Make sure new filter is there and in the right amounts. */ 1488 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1489 return ERR_PTR(-EINVAL); 1490 1491 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1492 if (!prog) 1493 return ERR_PTR(-ENOMEM); 1494 1495 if (copy_from_user(prog->insns, fprog->filter, fsize)) { 1496 __bpf_prog_free(prog); 1497 return ERR_PTR(-EFAULT); 1498 } 1499 1500 prog->len = fprog->len; 1501 1502 err = bpf_prog_store_orig_filter(prog, fprog); 1503 if (err) { 1504 __bpf_prog_free(prog); 1505 return ERR_PTR(-ENOMEM); 1506 } 1507 1508 /* bpf_prepare_filter() already takes care of freeing 1509 * memory in case something goes wrong. 1510 */ 1511 return bpf_prepare_filter(prog, NULL); 1512 } 1513 1514 /** 1515 * sk_attach_filter - attach a socket filter 1516 * @fprog: the filter program 1517 * @sk: the socket to use 1518 * 1519 * Attach the user's filter code. We first run some sanity checks on 1520 * it to make sure it does not explode on us later. If an error 1521 * occurs or there is insufficient memory for the filter a negative 1522 * errno code is returned. On success the return is zero. 1523 */ 1524 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1525 { 1526 struct bpf_prog *prog = __get_filter(fprog, sk); 1527 int err; 1528 1529 if (IS_ERR(prog)) 1530 return PTR_ERR(prog); 1531 1532 err = __sk_attach_prog(prog, sk); 1533 if (err < 0) { 1534 __bpf_prog_release(prog); 1535 return err; 1536 } 1537 1538 return 0; 1539 } 1540 EXPORT_SYMBOL_GPL(sk_attach_filter); 1541 1542 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1543 { 1544 struct bpf_prog *prog = __get_filter(fprog, sk); 1545 int err; 1546 1547 if (IS_ERR(prog)) 1548 return PTR_ERR(prog); 1549 1550 if (bpf_prog_size(prog->len) > sysctl_optmem_max) 1551 err = -ENOMEM; 1552 else 1553 err = reuseport_attach_prog(sk, prog); 1554 1555 if (err) 1556 __bpf_prog_release(prog); 1557 1558 return err; 1559 } 1560 1561 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) 1562 { 1563 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1564 return ERR_PTR(-EPERM); 1565 1566 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1567 } 1568 1569 int sk_attach_bpf(u32 ufd, struct sock *sk) 1570 { 1571 struct bpf_prog *prog = __get_bpf(ufd, sk); 1572 int err; 1573 1574 if (IS_ERR(prog)) 1575 return PTR_ERR(prog); 1576 1577 err = __sk_attach_prog(prog, sk); 1578 if (err < 0) { 1579 bpf_prog_put(prog); 1580 return err; 1581 } 1582 1583 return 0; 1584 } 1585 1586 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) 1587 { 1588 struct bpf_prog *prog; 1589 int err; 1590 1591 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1592 return -EPERM; 1593 1594 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1595 if (PTR_ERR(prog) == -EINVAL) 1596 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); 1597 if (IS_ERR(prog)) 1598 return PTR_ERR(prog); 1599 1600 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { 1601 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER 1602 * bpf prog (e.g. sockmap). It depends on the 1603 * limitation imposed by bpf_prog_load(). 1604 * Hence, sysctl_optmem_max is not checked. 1605 */ 1606 if ((sk->sk_type != SOCK_STREAM && 1607 sk->sk_type != SOCK_DGRAM) || 1608 (sk->sk_protocol != IPPROTO_UDP && 1609 sk->sk_protocol != IPPROTO_TCP) || 1610 (sk->sk_family != AF_INET && 1611 sk->sk_family != AF_INET6)) { 1612 err = -ENOTSUPP; 1613 goto err_prog_put; 1614 } 1615 } else { 1616 /* BPF_PROG_TYPE_SOCKET_FILTER */ 1617 if (bpf_prog_size(prog->len) > sysctl_optmem_max) { 1618 err = -ENOMEM; 1619 goto err_prog_put; 1620 } 1621 } 1622 1623 err = reuseport_attach_prog(sk, prog); 1624 err_prog_put: 1625 if (err) 1626 bpf_prog_put(prog); 1627 1628 return err; 1629 } 1630 1631 void sk_reuseport_prog_free(struct bpf_prog *prog) 1632 { 1633 if (!prog) 1634 return; 1635 1636 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) 1637 bpf_prog_put(prog); 1638 else 1639 bpf_prog_destroy(prog); 1640 } 1641 1642 struct bpf_scratchpad { 1643 union { 1644 __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; 1645 u8 buff[MAX_BPF_STACK]; 1646 }; 1647 }; 1648 1649 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); 1650 1651 static inline int __bpf_try_make_writable(struct sk_buff *skb, 1652 unsigned int write_len) 1653 { 1654 return skb_ensure_writable(skb, write_len); 1655 } 1656 1657 static inline int bpf_try_make_writable(struct sk_buff *skb, 1658 unsigned int write_len) 1659 { 1660 int err = __bpf_try_make_writable(skb, write_len); 1661 1662 bpf_compute_data_pointers(skb); 1663 return err; 1664 } 1665 1666 static int bpf_try_make_head_writable(struct sk_buff *skb) 1667 { 1668 return bpf_try_make_writable(skb, skb_headlen(skb)); 1669 } 1670 1671 static inline void bpf_push_mac_rcsum(struct sk_buff *skb) 1672 { 1673 if (skb_at_tc_ingress(skb)) 1674 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1675 } 1676 1677 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) 1678 { 1679 if (skb_at_tc_ingress(skb)) 1680 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1681 } 1682 1683 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, 1684 const void *, from, u32, len, u64, flags) 1685 { 1686 void *ptr; 1687 1688 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1689 return -EINVAL; 1690 if (unlikely(offset > 0xffff)) 1691 return -EFAULT; 1692 if (unlikely(bpf_try_make_writable(skb, offset + len))) 1693 return -EFAULT; 1694 1695 ptr = skb->data + offset; 1696 if (flags & BPF_F_RECOMPUTE_CSUM) 1697 __skb_postpull_rcsum(skb, ptr, len, offset); 1698 1699 memcpy(ptr, from, len); 1700 1701 if (flags & BPF_F_RECOMPUTE_CSUM) 1702 __skb_postpush_rcsum(skb, ptr, len, offset); 1703 if (flags & BPF_F_INVALIDATE_HASH) 1704 skb_clear_hash(skb); 1705 1706 return 0; 1707 } 1708 1709 static const struct bpf_func_proto bpf_skb_store_bytes_proto = { 1710 .func = bpf_skb_store_bytes, 1711 .gpl_only = false, 1712 .ret_type = RET_INTEGER, 1713 .arg1_type = ARG_PTR_TO_CTX, 1714 .arg2_type = ARG_ANYTHING, 1715 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1716 .arg4_type = ARG_CONST_SIZE, 1717 .arg5_type = ARG_ANYTHING, 1718 }; 1719 1720 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, 1721 void *, to, u32, len) 1722 { 1723 void *ptr; 1724 1725 if (unlikely(offset > 0xffff)) 1726 goto err_clear; 1727 1728 ptr = skb_header_pointer(skb, offset, len, to); 1729 if (unlikely(!ptr)) 1730 goto err_clear; 1731 if (ptr != to) 1732 memcpy(to, ptr, len); 1733 1734 return 0; 1735 err_clear: 1736 memset(to, 0, len); 1737 return -EFAULT; 1738 } 1739 1740 static const struct bpf_func_proto bpf_skb_load_bytes_proto = { 1741 .func = bpf_skb_load_bytes, 1742 .gpl_only = false, 1743 .ret_type = RET_INTEGER, 1744 .arg1_type = ARG_PTR_TO_CTX, 1745 .arg2_type = ARG_ANYTHING, 1746 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1747 .arg4_type = ARG_CONST_SIZE, 1748 }; 1749 1750 BPF_CALL_4(bpf_flow_dissector_load_bytes, 1751 const struct bpf_flow_dissector *, ctx, u32, offset, 1752 void *, to, u32, len) 1753 { 1754 void *ptr; 1755 1756 if (unlikely(offset > 0xffff)) 1757 goto err_clear; 1758 1759 if (unlikely(!ctx->skb)) 1760 goto err_clear; 1761 1762 ptr = skb_header_pointer(ctx->skb, offset, len, to); 1763 if (unlikely(!ptr)) 1764 goto err_clear; 1765 if (ptr != to) 1766 memcpy(to, ptr, len); 1767 1768 return 0; 1769 err_clear: 1770 memset(to, 0, len); 1771 return -EFAULT; 1772 } 1773 1774 static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = { 1775 .func = bpf_flow_dissector_load_bytes, 1776 .gpl_only = false, 1777 .ret_type = RET_INTEGER, 1778 .arg1_type = ARG_PTR_TO_CTX, 1779 .arg2_type = ARG_ANYTHING, 1780 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1781 .arg4_type = ARG_CONST_SIZE, 1782 }; 1783 1784 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, 1785 u32, offset, void *, to, u32, len, u32, start_header) 1786 { 1787 u8 *end = skb_tail_pointer(skb); 1788 u8 *start, *ptr; 1789 1790 if (unlikely(offset > 0xffff)) 1791 goto err_clear; 1792 1793 switch (start_header) { 1794 case BPF_HDR_START_MAC: 1795 if (unlikely(!skb_mac_header_was_set(skb))) 1796 goto err_clear; 1797 start = skb_mac_header(skb); 1798 break; 1799 case BPF_HDR_START_NET: 1800 start = skb_network_header(skb); 1801 break; 1802 default: 1803 goto err_clear; 1804 } 1805 1806 ptr = start + offset; 1807 1808 if (likely(ptr + len <= end)) { 1809 memcpy(to, ptr, len); 1810 return 0; 1811 } 1812 1813 err_clear: 1814 memset(to, 0, len); 1815 return -EFAULT; 1816 } 1817 1818 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { 1819 .func = bpf_skb_load_bytes_relative, 1820 .gpl_only = false, 1821 .ret_type = RET_INTEGER, 1822 .arg1_type = ARG_PTR_TO_CTX, 1823 .arg2_type = ARG_ANYTHING, 1824 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1825 .arg4_type = ARG_CONST_SIZE, 1826 .arg5_type = ARG_ANYTHING, 1827 }; 1828 1829 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) 1830 { 1831 /* Idea is the following: should the needed direct read/write 1832 * test fail during runtime, we can pull in more data and redo 1833 * again, since implicitly, we invalidate previous checks here. 1834 * 1835 * Or, since we know how much we need to make read/writeable, 1836 * this can be done once at the program beginning for direct 1837 * access case. By this we overcome limitations of only current 1838 * headroom being accessible. 1839 */ 1840 return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); 1841 } 1842 1843 static const struct bpf_func_proto bpf_skb_pull_data_proto = { 1844 .func = bpf_skb_pull_data, 1845 .gpl_only = false, 1846 .ret_type = RET_INTEGER, 1847 .arg1_type = ARG_PTR_TO_CTX, 1848 .arg2_type = ARG_ANYTHING, 1849 }; 1850 1851 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) 1852 { 1853 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; 1854 } 1855 1856 static const struct bpf_func_proto bpf_sk_fullsock_proto = { 1857 .func = bpf_sk_fullsock, 1858 .gpl_only = false, 1859 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 1860 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 1861 }; 1862 1863 static inline int sk_skb_try_make_writable(struct sk_buff *skb, 1864 unsigned int write_len) 1865 { 1866 return __bpf_try_make_writable(skb, write_len); 1867 } 1868 1869 BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) 1870 { 1871 /* Idea is the following: should the needed direct read/write 1872 * test fail during runtime, we can pull in more data and redo 1873 * again, since implicitly, we invalidate previous checks here. 1874 * 1875 * Or, since we know how much we need to make read/writeable, 1876 * this can be done once at the program beginning for direct 1877 * access case. By this we overcome limitations of only current 1878 * headroom being accessible. 1879 */ 1880 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); 1881 } 1882 1883 static const struct bpf_func_proto sk_skb_pull_data_proto = { 1884 .func = sk_skb_pull_data, 1885 .gpl_only = false, 1886 .ret_type = RET_INTEGER, 1887 .arg1_type = ARG_PTR_TO_CTX, 1888 .arg2_type = ARG_ANYTHING, 1889 }; 1890 1891 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, 1892 u64, from, u64, to, u64, flags) 1893 { 1894 __sum16 *ptr; 1895 1896 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1897 return -EINVAL; 1898 if (unlikely(offset > 0xffff || offset & 1)) 1899 return -EFAULT; 1900 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1901 return -EFAULT; 1902 1903 ptr = (__sum16 *)(skb->data + offset); 1904 switch (flags & BPF_F_HDR_FIELD_MASK) { 1905 case 0: 1906 if (unlikely(from != 0)) 1907 return -EINVAL; 1908 1909 csum_replace_by_diff(ptr, to); 1910 break; 1911 case 2: 1912 csum_replace2(ptr, from, to); 1913 break; 1914 case 4: 1915 csum_replace4(ptr, from, to); 1916 break; 1917 default: 1918 return -EINVAL; 1919 } 1920 1921 return 0; 1922 } 1923 1924 static const struct bpf_func_proto bpf_l3_csum_replace_proto = { 1925 .func = bpf_l3_csum_replace, 1926 .gpl_only = false, 1927 .ret_type = RET_INTEGER, 1928 .arg1_type = ARG_PTR_TO_CTX, 1929 .arg2_type = ARG_ANYTHING, 1930 .arg3_type = ARG_ANYTHING, 1931 .arg4_type = ARG_ANYTHING, 1932 .arg5_type = ARG_ANYTHING, 1933 }; 1934 1935 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, 1936 u64, from, u64, to, u64, flags) 1937 { 1938 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1939 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1940 bool do_mforce = flags & BPF_F_MARK_ENFORCE; 1941 __sum16 *ptr; 1942 1943 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | 1944 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) 1945 return -EINVAL; 1946 if (unlikely(offset > 0xffff || offset & 1)) 1947 return -EFAULT; 1948 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1949 return -EFAULT; 1950 1951 ptr = (__sum16 *)(skb->data + offset); 1952 if (is_mmzero && !do_mforce && !*ptr) 1953 return 0; 1954 1955 switch (flags & BPF_F_HDR_FIELD_MASK) { 1956 case 0: 1957 if (unlikely(from != 0)) 1958 return -EINVAL; 1959 1960 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); 1961 break; 1962 case 2: 1963 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); 1964 break; 1965 case 4: 1966 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); 1967 break; 1968 default: 1969 return -EINVAL; 1970 } 1971 1972 if (is_mmzero && !*ptr) 1973 *ptr = CSUM_MANGLED_0; 1974 return 0; 1975 } 1976 1977 static const struct bpf_func_proto bpf_l4_csum_replace_proto = { 1978 .func = bpf_l4_csum_replace, 1979 .gpl_only = false, 1980 .ret_type = RET_INTEGER, 1981 .arg1_type = ARG_PTR_TO_CTX, 1982 .arg2_type = ARG_ANYTHING, 1983 .arg3_type = ARG_ANYTHING, 1984 .arg4_type = ARG_ANYTHING, 1985 .arg5_type = ARG_ANYTHING, 1986 }; 1987 1988 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, 1989 __be32 *, to, u32, to_size, __wsum, seed) 1990 { 1991 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); 1992 u32 diff_size = from_size + to_size; 1993 int i, j = 0; 1994 1995 /* This is quite flexible, some examples: 1996 * 1997 * from_size == 0, to_size > 0, seed := csum --> pushing data 1998 * from_size > 0, to_size == 0, seed := csum --> pulling data 1999 * from_size > 0, to_size > 0, seed := 0 --> diffing data 2000 * 2001 * Even for diffing, from_size and to_size don't need to be equal. 2002 */ 2003 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || 2004 diff_size > sizeof(sp->diff))) 2005 return -EINVAL; 2006 2007 for (i = 0; i < from_size / sizeof(__be32); i++, j++) 2008 sp->diff[j] = ~from[i]; 2009 for (i = 0; i < to_size / sizeof(__be32); i++, j++) 2010 sp->diff[j] = to[i]; 2011 2012 return csum_partial(sp->diff, diff_size, seed); 2013 } 2014 2015 static const struct bpf_func_proto bpf_csum_diff_proto = { 2016 .func = bpf_csum_diff, 2017 .gpl_only = false, 2018 .pkt_access = true, 2019 .ret_type = RET_INTEGER, 2020 .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2021 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2022 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2023 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 2024 .arg5_type = ARG_ANYTHING, 2025 }; 2026 2027 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) 2028 { 2029 /* The interface is to be used in combination with bpf_csum_diff() 2030 * for direct packet writes. csum rotation for alignment as well 2031 * as emulating csum_sub() can be done from the eBPF program. 2032 */ 2033 if (skb->ip_summed == CHECKSUM_COMPLETE) 2034 return (skb->csum = csum_add(skb->csum, csum)); 2035 2036 return -ENOTSUPP; 2037 } 2038 2039 static const struct bpf_func_proto bpf_csum_update_proto = { 2040 .func = bpf_csum_update, 2041 .gpl_only = false, 2042 .ret_type = RET_INTEGER, 2043 .arg1_type = ARG_PTR_TO_CTX, 2044 .arg2_type = ARG_ANYTHING, 2045 }; 2046 2047 BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level) 2048 { 2049 /* The interface is to be used in combination with bpf_skb_adjust_room() 2050 * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET 2051 * is passed as flags, for example. 2052 */ 2053 switch (level) { 2054 case BPF_CSUM_LEVEL_INC: 2055 __skb_incr_checksum_unnecessary(skb); 2056 break; 2057 case BPF_CSUM_LEVEL_DEC: 2058 __skb_decr_checksum_unnecessary(skb); 2059 break; 2060 case BPF_CSUM_LEVEL_RESET: 2061 __skb_reset_checksum_unnecessary(skb); 2062 break; 2063 case BPF_CSUM_LEVEL_QUERY: 2064 return skb->ip_summed == CHECKSUM_UNNECESSARY ? 2065 skb->csum_level : -EACCES; 2066 default: 2067 return -EINVAL; 2068 } 2069 2070 return 0; 2071 } 2072 2073 static const struct bpf_func_proto bpf_csum_level_proto = { 2074 .func = bpf_csum_level, 2075 .gpl_only = false, 2076 .ret_type = RET_INTEGER, 2077 .arg1_type = ARG_PTR_TO_CTX, 2078 .arg2_type = ARG_ANYTHING, 2079 }; 2080 2081 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) 2082 { 2083 return dev_forward_skb_nomtu(dev, skb); 2084 } 2085 2086 static inline int __bpf_rx_skb_no_mac(struct net_device *dev, 2087 struct sk_buff *skb) 2088 { 2089 int ret = ____dev_forward_skb(dev, skb, false); 2090 2091 if (likely(!ret)) { 2092 skb->dev = dev; 2093 ret = netif_rx(skb); 2094 } 2095 2096 return ret; 2097 } 2098 2099 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) 2100 { 2101 int ret; 2102 2103 if (dev_xmit_recursion()) { 2104 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2105 kfree_skb(skb); 2106 return -ENETDOWN; 2107 } 2108 2109 skb->dev = dev; 2110 skb_clear_tstamp(skb); 2111 2112 dev_xmit_recursion_inc(); 2113 ret = dev_queue_xmit(skb); 2114 dev_xmit_recursion_dec(); 2115 2116 return ret; 2117 } 2118 2119 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2120 u32 flags) 2121 { 2122 unsigned int mlen = skb_network_offset(skb); 2123 2124 if (mlen) { 2125 __skb_pull(skb, mlen); 2126 2127 /* At ingress, the mac header has already been pulled once. 2128 * At egress, skb_pospull_rcsum has to be done in case that 2129 * the skb is originated from ingress (i.e. a forwarded skb) 2130 * to ensure that rcsum starts at net header. 2131 */ 2132 if (!skb_at_tc_ingress(skb)) 2133 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2134 } 2135 skb_pop_mac_header(skb); 2136 skb_reset_mac_len(skb); 2137 return flags & BPF_F_INGRESS ? 2138 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); 2139 } 2140 2141 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, 2142 u32 flags) 2143 { 2144 /* Verify that a link layer header is carried */ 2145 if (unlikely(skb->mac_header >= skb->network_header)) { 2146 kfree_skb(skb); 2147 return -ERANGE; 2148 } 2149 2150 bpf_push_mac_rcsum(skb); 2151 return flags & BPF_F_INGRESS ? 2152 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 2153 } 2154 2155 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, 2156 u32 flags) 2157 { 2158 if (dev_is_mac_header_xmit(dev)) 2159 return __bpf_redirect_common(skb, dev, flags); 2160 else 2161 return __bpf_redirect_no_mac(skb, dev, flags); 2162 } 2163 2164 #if IS_ENABLED(CONFIG_IPV6) 2165 static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, 2166 struct net_device *dev, struct bpf_nh_params *nh) 2167 { 2168 u32 hh_len = LL_RESERVED_SPACE(dev); 2169 const struct in6_addr *nexthop; 2170 struct dst_entry *dst = NULL; 2171 struct neighbour *neigh; 2172 2173 if (dev_xmit_recursion()) { 2174 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2175 goto out_drop; 2176 } 2177 2178 skb->dev = dev; 2179 skb_clear_tstamp(skb); 2180 2181 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2182 skb = skb_expand_head(skb, hh_len); 2183 if (!skb) 2184 return -ENOMEM; 2185 } 2186 2187 rcu_read_lock_bh(); 2188 if (!nh) { 2189 dst = skb_dst(skb); 2190 nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst), 2191 &ipv6_hdr(skb)->daddr); 2192 } else { 2193 nexthop = &nh->ipv6_nh; 2194 } 2195 neigh = ip_neigh_gw6(dev, nexthop); 2196 if (likely(!IS_ERR(neigh))) { 2197 int ret; 2198 2199 sock_confirm_neigh(skb, neigh); 2200 dev_xmit_recursion_inc(); 2201 ret = neigh_output(neigh, skb, false); 2202 dev_xmit_recursion_dec(); 2203 rcu_read_unlock_bh(); 2204 return ret; 2205 } 2206 rcu_read_unlock_bh(); 2207 if (dst) 2208 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 2209 out_drop: 2210 kfree_skb(skb); 2211 return -ENETDOWN; 2212 } 2213 2214 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2215 struct bpf_nh_params *nh) 2216 { 2217 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 2218 struct net *net = dev_net(dev); 2219 int err, ret = NET_XMIT_DROP; 2220 2221 if (!nh) { 2222 struct dst_entry *dst; 2223 struct flowi6 fl6 = { 2224 .flowi6_flags = FLOWI_FLAG_ANYSRC, 2225 .flowi6_mark = skb->mark, 2226 .flowlabel = ip6_flowinfo(ip6h), 2227 .flowi6_oif = dev->ifindex, 2228 .flowi6_proto = ip6h->nexthdr, 2229 .daddr = ip6h->daddr, 2230 .saddr = ip6h->saddr, 2231 }; 2232 2233 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); 2234 if (IS_ERR(dst)) 2235 goto out_drop; 2236 2237 skb_dst_set(skb, dst); 2238 } else if (nh->nh_family != AF_INET6) { 2239 goto out_drop; 2240 } 2241 2242 err = bpf_out_neigh_v6(net, skb, dev, nh); 2243 if (unlikely(net_xmit_eval(err))) 2244 dev->stats.tx_errors++; 2245 else 2246 ret = NET_XMIT_SUCCESS; 2247 goto out_xmit; 2248 out_drop: 2249 dev->stats.tx_errors++; 2250 kfree_skb(skb); 2251 out_xmit: 2252 return ret; 2253 } 2254 #else 2255 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2256 struct bpf_nh_params *nh) 2257 { 2258 kfree_skb(skb); 2259 return NET_XMIT_DROP; 2260 } 2261 #endif /* CONFIG_IPV6 */ 2262 2263 #if IS_ENABLED(CONFIG_INET) 2264 static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, 2265 struct net_device *dev, struct bpf_nh_params *nh) 2266 { 2267 u32 hh_len = LL_RESERVED_SPACE(dev); 2268 struct neighbour *neigh; 2269 bool is_v6gw = false; 2270 2271 if (dev_xmit_recursion()) { 2272 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2273 goto out_drop; 2274 } 2275 2276 skb->dev = dev; 2277 skb_clear_tstamp(skb); 2278 2279 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2280 skb = skb_expand_head(skb, hh_len); 2281 if (!skb) 2282 return -ENOMEM; 2283 } 2284 2285 rcu_read_lock_bh(); 2286 if (!nh) { 2287 struct dst_entry *dst = skb_dst(skb); 2288 struct rtable *rt = container_of(dst, struct rtable, dst); 2289 2290 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 2291 } else if (nh->nh_family == AF_INET6) { 2292 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); 2293 is_v6gw = true; 2294 } else if (nh->nh_family == AF_INET) { 2295 neigh = ip_neigh_gw4(dev, nh->ipv4_nh); 2296 } else { 2297 rcu_read_unlock_bh(); 2298 goto out_drop; 2299 } 2300 2301 if (likely(!IS_ERR(neigh))) { 2302 int ret; 2303 2304 sock_confirm_neigh(skb, neigh); 2305 dev_xmit_recursion_inc(); 2306 ret = neigh_output(neigh, skb, is_v6gw); 2307 dev_xmit_recursion_dec(); 2308 rcu_read_unlock_bh(); 2309 return ret; 2310 } 2311 rcu_read_unlock_bh(); 2312 out_drop: 2313 kfree_skb(skb); 2314 return -ENETDOWN; 2315 } 2316 2317 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2318 struct bpf_nh_params *nh) 2319 { 2320 const struct iphdr *ip4h = ip_hdr(skb); 2321 struct net *net = dev_net(dev); 2322 int err, ret = NET_XMIT_DROP; 2323 2324 if (!nh) { 2325 struct flowi4 fl4 = { 2326 .flowi4_flags = FLOWI_FLAG_ANYSRC, 2327 .flowi4_mark = skb->mark, 2328 .flowi4_tos = RT_TOS(ip4h->tos), 2329 .flowi4_oif = dev->ifindex, 2330 .flowi4_proto = ip4h->protocol, 2331 .daddr = ip4h->daddr, 2332 .saddr = ip4h->saddr, 2333 }; 2334 struct rtable *rt; 2335 2336 rt = ip_route_output_flow(net, &fl4, NULL); 2337 if (IS_ERR(rt)) 2338 goto out_drop; 2339 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { 2340 ip_rt_put(rt); 2341 goto out_drop; 2342 } 2343 2344 skb_dst_set(skb, &rt->dst); 2345 } 2346 2347 err = bpf_out_neigh_v4(net, skb, dev, nh); 2348 if (unlikely(net_xmit_eval(err))) 2349 dev->stats.tx_errors++; 2350 else 2351 ret = NET_XMIT_SUCCESS; 2352 goto out_xmit; 2353 out_drop: 2354 dev->stats.tx_errors++; 2355 kfree_skb(skb); 2356 out_xmit: 2357 return ret; 2358 } 2359 #else 2360 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2361 struct bpf_nh_params *nh) 2362 { 2363 kfree_skb(skb); 2364 return NET_XMIT_DROP; 2365 } 2366 #endif /* CONFIG_INET */ 2367 2368 static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev, 2369 struct bpf_nh_params *nh) 2370 { 2371 struct ethhdr *ethh = eth_hdr(skb); 2372 2373 if (unlikely(skb->mac_header >= skb->network_header)) 2374 goto out; 2375 bpf_push_mac_rcsum(skb); 2376 if (is_multicast_ether_addr(ethh->h_dest)) 2377 goto out; 2378 2379 skb_pull(skb, sizeof(*ethh)); 2380 skb_unset_mac_header(skb); 2381 skb_reset_network_header(skb); 2382 2383 if (skb->protocol == htons(ETH_P_IP)) 2384 return __bpf_redirect_neigh_v4(skb, dev, nh); 2385 else if (skb->protocol == htons(ETH_P_IPV6)) 2386 return __bpf_redirect_neigh_v6(skb, dev, nh); 2387 out: 2388 kfree_skb(skb); 2389 return -ENOTSUPP; 2390 } 2391 2392 /* Internal, non-exposed redirect flags. */ 2393 enum { 2394 BPF_F_NEIGH = (1ULL << 1), 2395 BPF_F_PEER = (1ULL << 2), 2396 BPF_F_NEXTHOP = (1ULL << 3), 2397 #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) 2398 }; 2399 2400 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) 2401 { 2402 struct net_device *dev; 2403 struct sk_buff *clone; 2404 int ret; 2405 2406 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2407 return -EINVAL; 2408 2409 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); 2410 if (unlikely(!dev)) 2411 return -EINVAL; 2412 2413 clone = skb_clone(skb, GFP_ATOMIC); 2414 if (unlikely(!clone)) 2415 return -ENOMEM; 2416 2417 /* For direct write, we need to keep the invariant that the skbs 2418 * we're dealing with need to be uncloned. Should uncloning fail 2419 * here, we need to free the just generated clone to unclone once 2420 * again. 2421 */ 2422 ret = bpf_try_make_head_writable(skb); 2423 if (unlikely(ret)) { 2424 kfree_skb(clone); 2425 return -ENOMEM; 2426 } 2427 2428 return __bpf_redirect(clone, dev, flags); 2429 } 2430 2431 static const struct bpf_func_proto bpf_clone_redirect_proto = { 2432 .func = bpf_clone_redirect, 2433 .gpl_only = false, 2434 .ret_type = RET_INTEGER, 2435 .arg1_type = ARG_PTR_TO_CTX, 2436 .arg2_type = ARG_ANYTHING, 2437 .arg3_type = ARG_ANYTHING, 2438 }; 2439 2440 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); 2441 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); 2442 2443 int skb_do_redirect(struct sk_buff *skb) 2444 { 2445 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2446 struct net *net = dev_net(skb->dev); 2447 struct net_device *dev; 2448 u32 flags = ri->flags; 2449 2450 dev = dev_get_by_index_rcu(net, ri->tgt_index); 2451 ri->tgt_index = 0; 2452 ri->flags = 0; 2453 if (unlikely(!dev)) 2454 goto out_drop; 2455 if (flags & BPF_F_PEER) { 2456 const struct net_device_ops *ops = dev->netdev_ops; 2457 2458 if (unlikely(!ops->ndo_get_peer_dev || 2459 !skb_at_tc_ingress(skb))) 2460 goto out_drop; 2461 dev = ops->ndo_get_peer_dev(dev); 2462 if (unlikely(!dev || 2463 !(dev->flags & IFF_UP) || 2464 net_eq(net, dev_net(dev)))) 2465 goto out_drop; 2466 skb->dev = dev; 2467 return -EAGAIN; 2468 } 2469 return flags & BPF_F_NEIGH ? 2470 __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ? 2471 &ri->nh : NULL) : 2472 __bpf_redirect(skb, dev, flags); 2473 out_drop: 2474 kfree_skb(skb); 2475 return -EINVAL; 2476 } 2477 2478 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) 2479 { 2480 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2481 2482 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2483 return TC_ACT_SHOT; 2484 2485 ri->flags = flags; 2486 ri->tgt_index = ifindex; 2487 2488 return TC_ACT_REDIRECT; 2489 } 2490 2491 static const struct bpf_func_proto bpf_redirect_proto = { 2492 .func = bpf_redirect, 2493 .gpl_only = false, 2494 .ret_type = RET_INTEGER, 2495 .arg1_type = ARG_ANYTHING, 2496 .arg2_type = ARG_ANYTHING, 2497 }; 2498 2499 BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) 2500 { 2501 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2502 2503 if (unlikely(flags)) 2504 return TC_ACT_SHOT; 2505 2506 ri->flags = BPF_F_PEER; 2507 ri->tgt_index = ifindex; 2508 2509 return TC_ACT_REDIRECT; 2510 } 2511 2512 static const struct bpf_func_proto bpf_redirect_peer_proto = { 2513 .func = bpf_redirect_peer, 2514 .gpl_only = false, 2515 .ret_type = RET_INTEGER, 2516 .arg1_type = ARG_ANYTHING, 2517 .arg2_type = ARG_ANYTHING, 2518 }; 2519 2520 BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, 2521 int, plen, u64, flags) 2522 { 2523 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2524 2525 if (unlikely((plen && plen < sizeof(*params)) || flags)) 2526 return TC_ACT_SHOT; 2527 2528 ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0); 2529 ri->tgt_index = ifindex; 2530 2531 BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params)); 2532 if (plen) 2533 memcpy(&ri->nh, params, sizeof(ri->nh)); 2534 2535 return TC_ACT_REDIRECT; 2536 } 2537 2538 static const struct bpf_func_proto bpf_redirect_neigh_proto = { 2539 .func = bpf_redirect_neigh, 2540 .gpl_only = false, 2541 .ret_type = RET_INTEGER, 2542 .arg1_type = ARG_ANYTHING, 2543 .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2544 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 2545 .arg4_type = ARG_ANYTHING, 2546 }; 2547 2548 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) 2549 { 2550 msg->apply_bytes = bytes; 2551 return 0; 2552 } 2553 2554 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { 2555 .func = bpf_msg_apply_bytes, 2556 .gpl_only = false, 2557 .ret_type = RET_INTEGER, 2558 .arg1_type = ARG_PTR_TO_CTX, 2559 .arg2_type = ARG_ANYTHING, 2560 }; 2561 2562 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) 2563 { 2564 msg->cork_bytes = bytes; 2565 return 0; 2566 } 2567 2568 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { 2569 .func = bpf_msg_cork_bytes, 2570 .gpl_only = false, 2571 .ret_type = RET_INTEGER, 2572 .arg1_type = ARG_PTR_TO_CTX, 2573 .arg2_type = ARG_ANYTHING, 2574 }; 2575 2576 BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, 2577 u32, end, u64, flags) 2578 { 2579 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; 2580 u32 first_sge, last_sge, i, shift, bytes_sg_total; 2581 struct scatterlist *sge; 2582 u8 *raw, *to, *from; 2583 struct page *page; 2584 2585 if (unlikely(flags || end <= start)) 2586 return -EINVAL; 2587 2588 /* First find the starting scatterlist element */ 2589 i = msg->sg.start; 2590 do { 2591 offset += len; 2592 len = sk_msg_elem(msg, i)->length; 2593 if (start < offset + len) 2594 break; 2595 sk_msg_iter_var_next(i); 2596 } while (i != msg->sg.end); 2597 2598 if (unlikely(start >= offset + len)) 2599 return -EINVAL; 2600 2601 first_sge = i; 2602 /* The start may point into the sg element so we need to also 2603 * account for the headroom. 2604 */ 2605 bytes_sg_total = start - offset + bytes; 2606 if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len) 2607 goto out; 2608 2609 /* At this point we need to linearize multiple scatterlist 2610 * elements or a single shared page. Either way we need to 2611 * copy into a linear buffer exclusively owned by BPF. Then 2612 * place the buffer in the scatterlist and fixup the original 2613 * entries by removing the entries now in the linear buffer 2614 * and shifting the remaining entries. For now we do not try 2615 * to copy partial entries to avoid complexity of running out 2616 * of sg_entry slots. The downside is reading a single byte 2617 * will copy the entire sg entry. 2618 */ 2619 do { 2620 copy += sk_msg_elem(msg, i)->length; 2621 sk_msg_iter_var_next(i); 2622 if (bytes_sg_total <= copy) 2623 break; 2624 } while (i != msg->sg.end); 2625 last_sge = i; 2626 2627 if (unlikely(bytes_sg_total > copy)) 2628 return -EINVAL; 2629 2630 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2631 get_order(copy)); 2632 if (unlikely(!page)) 2633 return -ENOMEM; 2634 2635 raw = page_address(page); 2636 i = first_sge; 2637 do { 2638 sge = sk_msg_elem(msg, i); 2639 from = sg_virt(sge); 2640 len = sge->length; 2641 to = raw + poffset; 2642 2643 memcpy(to, from, len); 2644 poffset += len; 2645 sge->length = 0; 2646 put_page(sg_page(sge)); 2647 2648 sk_msg_iter_var_next(i); 2649 } while (i != last_sge); 2650 2651 sg_set_page(&msg->sg.data[first_sge], page, copy, 0); 2652 2653 /* To repair sg ring we need to shift entries. If we only 2654 * had a single entry though we can just replace it and 2655 * be done. Otherwise walk the ring and shift the entries. 2656 */ 2657 WARN_ON_ONCE(last_sge == first_sge); 2658 shift = last_sge > first_sge ? 2659 last_sge - first_sge - 1 : 2660 NR_MSG_FRAG_IDS - first_sge + last_sge - 1; 2661 if (!shift) 2662 goto out; 2663 2664 i = first_sge; 2665 sk_msg_iter_var_next(i); 2666 do { 2667 u32 move_from; 2668 2669 if (i + shift >= NR_MSG_FRAG_IDS) 2670 move_from = i + shift - NR_MSG_FRAG_IDS; 2671 else 2672 move_from = i + shift; 2673 if (move_from == msg->sg.end) 2674 break; 2675 2676 msg->sg.data[i] = msg->sg.data[move_from]; 2677 msg->sg.data[move_from].length = 0; 2678 msg->sg.data[move_from].page_link = 0; 2679 msg->sg.data[move_from].offset = 0; 2680 sk_msg_iter_var_next(i); 2681 } while (1); 2682 2683 msg->sg.end = msg->sg.end - shift > msg->sg.end ? 2684 msg->sg.end - shift + NR_MSG_FRAG_IDS : 2685 msg->sg.end - shift; 2686 out: 2687 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; 2688 msg->data_end = msg->data + bytes; 2689 return 0; 2690 } 2691 2692 static const struct bpf_func_proto bpf_msg_pull_data_proto = { 2693 .func = bpf_msg_pull_data, 2694 .gpl_only = false, 2695 .ret_type = RET_INTEGER, 2696 .arg1_type = ARG_PTR_TO_CTX, 2697 .arg2_type = ARG_ANYTHING, 2698 .arg3_type = ARG_ANYTHING, 2699 .arg4_type = ARG_ANYTHING, 2700 }; 2701 2702 BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, 2703 u32, len, u64, flags) 2704 { 2705 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; 2706 u32 new, i = 0, l = 0, space, copy = 0, offset = 0; 2707 u8 *raw, *to, *from; 2708 struct page *page; 2709 2710 if (unlikely(flags)) 2711 return -EINVAL; 2712 2713 if (unlikely(len == 0)) 2714 return 0; 2715 2716 /* First find the starting scatterlist element */ 2717 i = msg->sg.start; 2718 do { 2719 offset += l; 2720 l = sk_msg_elem(msg, i)->length; 2721 2722 if (start < offset + l) 2723 break; 2724 sk_msg_iter_var_next(i); 2725 } while (i != msg->sg.end); 2726 2727 if (start >= offset + l) 2728 return -EINVAL; 2729 2730 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2731 2732 /* If no space available will fallback to copy, we need at 2733 * least one scatterlist elem available to push data into 2734 * when start aligns to the beginning of an element or two 2735 * when it falls inside an element. We handle the start equals 2736 * offset case because its the common case for inserting a 2737 * header. 2738 */ 2739 if (!space || (space == 1 && start != offset)) 2740 copy = msg->sg.data[i].length; 2741 2742 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2743 get_order(copy + len)); 2744 if (unlikely(!page)) 2745 return -ENOMEM; 2746 2747 if (copy) { 2748 int front, back; 2749 2750 raw = page_address(page); 2751 2752 psge = sk_msg_elem(msg, i); 2753 front = start - offset; 2754 back = psge->length - front; 2755 from = sg_virt(psge); 2756 2757 if (front) 2758 memcpy(raw, from, front); 2759 2760 if (back) { 2761 from += front; 2762 to = raw + front + len; 2763 2764 memcpy(to, from, back); 2765 } 2766 2767 put_page(sg_page(psge)); 2768 } else if (start - offset) { 2769 psge = sk_msg_elem(msg, i); 2770 rsge = sk_msg_elem_cpy(msg, i); 2771 2772 psge->length = start - offset; 2773 rsge.length -= psge->length; 2774 rsge.offset += start; 2775 2776 sk_msg_iter_var_next(i); 2777 sg_unmark_end(psge); 2778 sg_unmark_end(&rsge); 2779 sk_msg_iter_next(msg, end); 2780 } 2781 2782 /* Slot(s) to place newly allocated data */ 2783 new = i; 2784 2785 /* Shift one or two slots as needed */ 2786 if (!copy) { 2787 sge = sk_msg_elem_cpy(msg, i); 2788 2789 sk_msg_iter_var_next(i); 2790 sg_unmark_end(&sge); 2791 sk_msg_iter_next(msg, end); 2792 2793 nsge = sk_msg_elem_cpy(msg, i); 2794 if (rsge.length) { 2795 sk_msg_iter_var_next(i); 2796 nnsge = sk_msg_elem_cpy(msg, i); 2797 } 2798 2799 while (i != msg->sg.end) { 2800 msg->sg.data[i] = sge; 2801 sge = nsge; 2802 sk_msg_iter_var_next(i); 2803 if (rsge.length) { 2804 nsge = nnsge; 2805 nnsge = sk_msg_elem_cpy(msg, i); 2806 } else { 2807 nsge = sk_msg_elem_cpy(msg, i); 2808 } 2809 } 2810 } 2811 2812 /* Place newly allocated data buffer */ 2813 sk_mem_charge(msg->sk, len); 2814 msg->sg.size += len; 2815 __clear_bit(new, msg->sg.copy); 2816 sg_set_page(&msg->sg.data[new], page, len + copy, 0); 2817 if (rsge.length) { 2818 get_page(sg_page(&rsge)); 2819 sk_msg_iter_var_next(new); 2820 msg->sg.data[new] = rsge; 2821 } 2822 2823 sk_msg_compute_data_pointers(msg); 2824 return 0; 2825 } 2826 2827 static const struct bpf_func_proto bpf_msg_push_data_proto = { 2828 .func = bpf_msg_push_data, 2829 .gpl_only = false, 2830 .ret_type = RET_INTEGER, 2831 .arg1_type = ARG_PTR_TO_CTX, 2832 .arg2_type = ARG_ANYTHING, 2833 .arg3_type = ARG_ANYTHING, 2834 .arg4_type = ARG_ANYTHING, 2835 }; 2836 2837 static void sk_msg_shift_left(struct sk_msg *msg, int i) 2838 { 2839 int prev; 2840 2841 do { 2842 prev = i; 2843 sk_msg_iter_var_next(i); 2844 msg->sg.data[prev] = msg->sg.data[i]; 2845 } while (i != msg->sg.end); 2846 2847 sk_msg_iter_prev(msg, end); 2848 } 2849 2850 static void sk_msg_shift_right(struct sk_msg *msg, int i) 2851 { 2852 struct scatterlist tmp, sge; 2853 2854 sk_msg_iter_next(msg, end); 2855 sge = sk_msg_elem_cpy(msg, i); 2856 sk_msg_iter_var_next(i); 2857 tmp = sk_msg_elem_cpy(msg, i); 2858 2859 while (i != msg->sg.end) { 2860 msg->sg.data[i] = sge; 2861 sk_msg_iter_var_next(i); 2862 sge = tmp; 2863 tmp = sk_msg_elem_cpy(msg, i); 2864 } 2865 } 2866 2867 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, 2868 u32, len, u64, flags) 2869 { 2870 u32 i = 0, l = 0, space, offset = 0; 2871 u64 last = start + len; 2872 int pop; 2873 2874 if (unlikely(flags)) 2875 return -EINVAL; 2876 2877 /* First find the starting scatterlist element */ 2878 i = msg->sg.start; 2879 do { 2880 offset += l; 2881 l = sk_msg_elem(msg, i)->length; 2882 2883 if (start < offset + l) 2884 break; 2885 sk_msg_iter_var_next(i); 2886 } while (i != msg->sg.end); 2887 2888 /* Bounds checks: start and pop must be inside message */ 2889 if (start >= offset + l || last >= msg->sg.size) 2890 return -EINVAL; 2891 2892 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2893 2894 pop = len; 2895 /* --------------| offset 2896 * -| start |-------- len -------| 2897 * 2898 * |----- a ----|-------- pop -------|----- b ----| 2899 * |______________________________________________| length 2900 * 2901 * 2902 * a: region at front of scatter element to save 2903 * b: region at back of scatter element to save when length > A + pop 2904 * pop: region to pop from element, same as input 'pop' here will be 2905 * decremented below per iteration. 2906 * 2907 * Two top-level cases to handle when start != offset, first B is non 2908 * zero and second B is zero corresponding to when a pop includes more 2909 * than one element. 2910 * 2911 * Then if B is non-zero AND there is no space allocate space and 2912 * compact A, B regions into page. If there is space shift ring to 2913 * the rigth free'ing the next element in ring to place B, leaving 2914 * A untouched except to reduce length. 2915 */ 2916 if (start != offset) { 2917 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); 2918 int a = start; 2919 int b = sge->length - pop - a; 2920 2921 sk_msg_iter_var_next(i); 2922 2923 if (pop < sge->length - a) { 2924 if (space) { 2925 sge->length = a; 2926 sk_msg_shift_right(msg, i); 2927 nsge = sk_msg_elem(msg, i); 2928 get_page(sg_page(sge)); 2929 sg_set_page(nsge, 2930 sg_page(sge), 2931 b, sge->offset + pop + a); 2932 } else { 2933 struct page *page, *orig; 2934 u8 *to, *from; 2935 2936 page = alloc_pages(__GFP_NOWARN | 2937 __GFP_COMP | GFP_ATOMIC, 2938 get_order(a + b)); 2939 if (unlikely(!page)) 2940 return -ENOMEM; 2941 2942 sge->length = a; 2943 orig = sg_page(sge); 2944 from = sg_virt(sge); 2945 to = page_address(page); 2946 memcpy(to, from, a); 2947 memcpy(to + a, from + a + pop, b); 2948 sg_set_page(sge, page, a + b, 0); 2949 put_page(orig); 2950 } 2951 pop = 0; 2952 } else if (pop >= sge->length - a) { 2953 pop -= (sge->length - a); 2954 sge->length = a; 2955 } 2956 } 2957 2958 /* From above the current layout _must_ be as follows, 2959 * 2960 * -| offset 2961 * -| start 2962 * 2963 * |---- pop ---|---------------- b ------------| 2964 * |____________________________________________| length 2965 * 2966 * Offset and start of the current msg elem are equal because in the 2967 * previous case we handled offset != start and either consumed the 2968 * entire element and advanced to the next element OR pop == 0. 2969 * 2970 * Two cases to handle here are first pop is less than the length 2971 * leaving some remainder b above. Simply adjust the element's layout 2972 * in this case. Or pop >= length of the element so that b = 0. In this 2973 * case advance to next element decrementing pop. 2974 */ 2975 while (pop) { 2976 struct scatterlist *sge = sk_msg_elem(msg, i); 2977 2978 if (pop < sge->length) { 2979 sge->length -= pop; 2980 sge->offset += pop; 2981 pop = 0; 2982 } else { 2983 pop -= sge->length; 2984 sk_msg_shift_left(msg, i); 2985 } 2986 sk_msg_iter_var_next(i); 2987 } 2988 2989 sk_mem_uncharge(msg->sk, len - pop); 2990 msg->sg.size -= (len - pop); 2991 sk_msg_compute_data_pointers(msg); 2992 return 0; 2993 } 2994 2995 static const struct bpf_func_proto bpf_msg_pop_data_proto = { 2996 .func = bpf_msg_pop_data, 2997 .gpl_only = false, 2998 .ret_type = RET_INTEGER, 2999 .arg1_type = ARG_PTR_TO_CTX, 3000 .arg2_type = ARG_ANYTHING, 3001 .arg3_type = ARG_ANYTHING, 3002 .arg4_type = ARG_ANYTHING, 3003 }; 3004 3005 #ifdef CONFIG_CGROUP_NET_CLASSID 3006 BPF_CALL_0(bpf_get_cgroup_classid_curr) 3007 { 3008 return __task_get_classid(current); 3009 } 3010 3011 static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = { 3012 .func = bpf_get_cgroup_classid_curr, 3013 .gpl_only = false, 3014 .ret_type = RET_INTEGER, 3015 }; 3016 3017 BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb) 3018 { 3019 struct sock *sk = skb_to_full_sk(skb); 3020 3021 if (!sk || !sk_fullsock(sk)) 3022 return 0; 3023 3024 return sock_cgroup_classid(&sk->sk_cgrp_data); 3025 } 3026 3027 static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = { 3028 .func = bpf_skb_cgroup_classid, 3029 .gpl_only = false, 3030 .ret_type = RET_INTEGER, 3031 .arg1_type = ARG_PTR_TO_CTX, 3032 }; 3033 #endif 3034 3035 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 3036 { 3037 return task_get_classid(skb); 3038 } 3039 3040 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { 3041 .func = bpf_get_cgroup_classid, 3042 .gpl_only = false, 3043 .ret_type = RET_INTEGER, 3044 .arg1_type = ARG_PTR_TO_CTX, 3045 }; 3046 3047 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) 3048 { 3049 return dst_tclassid(skb); 3050 } 3051 3052 static const struct bpf_func_proto bpf_get_route_realm_proto = { 3053 .func = bpf_get_route_realm, 3054 .gpl_only = false, 3055 .ret_type = RET_INTEGER, 3056 .arg1_type = ARG_PTR_TO_CTX, 3057 }; 3058 3059 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) 3060 { 3061 /* If skb_clear_hash() was called due to mangling, we can 3062 * trigger SW recalculation here. Later access to hash 3063 * can then use the inline skb->hash via context directly 3064 * instead of calling this helper again. 3065 */ 3066 return skb_get_hash(skb); 3067 } 3068 3069 static const struct bpf_func_proto bpf_get_hash_recalc_proto = { 3070 .func = bpf_get_hash_recalc, 3071 .gpl_only = false, 3072 .ret_type = RET_INTEGER, 3073 .arg1_type = ARG_PTR_TO_CTX, 3074 }; 3075 3076 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) 3077 { 3078 /* After all direct packet write, this can be used once for 3079 * triggering a lazy recalc on next skb_get_hash() invocation. 3080 */ 3081 skb_clear_hash(skb); 3082 return 0; 3083 } 3084 3085 static const struct bpf_func_proto bpf_set_hash_invalid_proto = { 3086 .func = bpf_set_hash_invalid, 3087 .gpl_only = false, 3088 .ret_type = RET_INTEGER, 3089 .arg1_type = ARG_PTR_TO_CTX, 3090 }; 3091 3092 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) 3093 { 3094 /* Set user specified hash as L4(+), so that it gets returned 3095 * on skb_get_hash() call unless BPF prog later on triggers a 3096 * skb_clear_hash(). 3097 */ 3098 __skb_set_sw_hash(skb, hash, true); 3099 return 0; 3100 } 3101 3102 static const struct bpf_func_proto bpf_set_hash_proto = { 3103 .func = bpf_set_hash, 3104 .gpl_only = false, 3105 .ret_type = RET_INTEGER, 3106 .arg1_type = ARG_PTR_TO_CTX, 3107 .arg2_type = ARG_ANYTHING, 3108 }; 3109 3110 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, 3111 u16, vlan_tci) 3112 { 3113 int ret; 3114 3115 if (unlikely(vlan_proto != htons(ETH_P_8021Q) && 3116 vlan_proto != htons(ETH_P_8021AD))) 3117 vlan_proto = htons(ETH_P_8021Q); 3118 3119 bpf_push_mac_rcsum(skb); 3120 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 3121 bpf_pull_mac_rcsum(skb); 3122 3123 bpf_compute_data_pointers(skb); 3124 return ret; 3125 } 3126 3127 static const struct bpf_func_proto bpf_skb_vlan_push_proto = { 3128 .func = bpf_skb_vlan_push, 3129 .gpl_only = false, 3130 .ret_type = RET_INTEGER, 3131 .arg1_type = ARG_PTR_TO_CTX, 3132 .arg2_type = ARG_ANYTHING, 3133 .arg3_type = ARG_ANYTHING, 3134 }; 3135 3136 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) 3137 { 3138 int ret; 3139 3140 bpf_push_mac_rcsum(skb); 3141 ret = skb_vlan_pop(skb); 3142 bpf_pull_mac_rcsum(skb); 3143 3144 bpf_compute_data_pointers(skb); 3145 return ret; 3146 } 3147 3148 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { 3149 .func = bpf_skb_vlan_pop, 3150 .gpl_only = false, 3151 .ret_type = RET_INTEGER, 3152 .arg1_type = ARG_PTR_TO_CTX, 3153 }; 3154 3155 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) 3156 { 3157 /* Caller already did skb_cow() with len as headroom, 3158 * so no need to do it here. 3159 */ 3160 skb_push(skb, len); 3161 memmove(skb->data, skb->data + len, off); 3162 memset(skb->data + off, 0, len); 3163 3164 /* No skb_postpush_rcsum(skb, skb->data + off, len) 3165 * needed here as it does not change the skb->csum 3166 * result for checksum complete when summing over 3167 * zeroed blocks. 3168 */ 3169 return 0; 3170 } 3171 3172 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) 3173 { 3174 /* skb_ensure_writable() is not needed here, as we're 3175 * already working on an uncloned skb. 3176 */ 3177 if (unlikely(!pskb_may_pull(skb, off + len))) 3178 return -ENOMEM; 3179 3180 skb_postpull_rcsum(skb, skb->data + off, len); 3181 memmove(skb->data + len, skb->data, off); 3182 __skb_pull(skb, len); 3183 3184 return 0; 3185 } 3186 3187 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) 3188 { 3189 bool trans_same = skb->transport_header == skb->network_header; 3190 int ret; 3191 3192 /* There's no need for __skb_push()/__skb_pull() pair to 3193 * get to the start of the mac header as we're guaranteed 3194 * to always start from here under eBPF. 3195 */ 3196 ret = bpf_skb_generic_push(skb, off, len); 3197 if (likely(!ret)) { 3198 skb->mac_header -= len; 3199 skb->network_header -= len; 3200 if (trans_same) 3201 skb->transport_header = skb->network_header; 3202 } 3203 3204 return ret; 3205 } 3206 3207 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) 3208 { 3209 bool trans_same = skb->transport_header == skb->network_header; 3210 int ret; 3211 3212 /* Same here, __skb_push()/__skb_pull() pair not needed. */ 3213 ret = bpf_skb_generic_pop(skb, off, len); 3214 if (likely(!ret)) { 3215 skb->mac_header += len; 3216 skb->network_header += len; 3217 if (trans_same) 3218 skb->transport_header = skb->network_header; 3219 } 3220 3221 return ret; 3222 } 3223 3224 static int bpf_skb_proto_4_to_6(struct sk_buff *skb) 3225 { 3226 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3227 u32 off = skb_mac_header_len(skb); 3228 int ret; 3229 3230 ret = skb_cow(skb, len_diff); 3231 if (unlikely(ret < 0)) 3232 return ret; 3233 3234 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3235 if (unlikely(ret < 0)) 3236 return ret; 3237 3238 if (skb_is_gso(skb)) { 3239 struct skb_shared_info *shinfo = skb_shinfo(skb); 3240 3241 /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */ 3242 if (shinfo->gso_type & SKB_GSO_TCPV4) { 3243 shinfo->gso_type &= ~SKB_GSO_TCPV4; 3244 shinfo->gso_type |= SKB_GSO_TCPV6; 3245 } 3246 } 3247 3248 skb->protocol = htons(ETH_P_IPV6); 3249 skb_clear_hash(skb); 3250 3251 return 0; 3252 } 3253 3254 static int bpf_skb_proto_6_to_4(struct sk_buff *skb) 3255 { 3256 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3257 u32 off = skb_mac_header_len(skb); 3258 int ret; 3259 3260 ret = skb_unclone(skb, GFP_ATOMIC); 3261 if (unlikely(ret < 0)) 3262 return ret; 3263 3264 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3265 if (unlikely(ret < 0)) 3266 return ret; 3267 3268 if (skb_is_gso(skb)) { 3269 struct skb_shared_info *shinfo = skb_shinfo(skb); 3270 3271 /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */ 3272 if (shinfo->gso_type & SKB_GSO_TCPV6) { 3273 shinfo->gso_type &= ~SKB_GSO_TCPV6; 3274 shinfo->gso_type |= SKB_GSO_TCPV4; 3275 } 3276 } 3277 3278 skb->protocol = htons(ETH_P_IP); 3279 skb_clear_hash(skb); 3280 3281 return 0; 3282 } 3283 3284 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) 3285 { 3286 __be16 from_proto = skb->protocol; 3287 3288 if (from_proto == htons(ETH_P_IP) && 3289 to_proto == htons(ETH_P_IPV6)) 3290 return bpf_skb_proto_4_to_6(skb); 3291 3292 if (from_proto == htons(ETH_P_IPV6) && 3293 to_proto == htons(ETH_P_IP)) 3294 return bpf_skb_proto_6_to_4(skb); 3295 3296 return -ENOTSUPP; 3297 } 3298 3299 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, 3300 u64, flags) 3301 { 3302 int ret; 3303 3304 if (unlikely(flags)) 3305 return -EINVAL; 3306 3307 /* General idea is that this helper does the basic groundwork 3308 * needed for changing the protocol, and eBPF program fills the 3309 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() 3310 * and other helpers, rather than passing a raw buffer here. 3311 * 3312 * The rationale is to keep this minimal and without a need to 3313 * deal with raw packet data. F.e. even if we would pass buffers 3314 * here, the program still needs to call the bpf_lX_csum_replace() 3315 * helpers anyway. Plus, this way we keep also separation of 3316 * concerns, since f.e. bpf_skb_store_bytes() should only take 3317 * care of stores. 3318 * 3319 * Currently, additional options and extension header space are 3320 * not supported, but flags register is reserved so we can adapt 3321 * that. For offloads, we mark packet as dodgy, so that headers 3322 * need to be verified first. 3323 */ 3324 ret = bpf_skb_proto_xlat(skb, proto); 3325 bpf_compute_data_pointers(skb); 3326 return ret; 3327 } 3328 3329 static const struct bpf_func_proto bpf_skb_change_proto_proto = { 3330 .func = bpf_skb_change_proto, 3331 .gpl_only = false, 3332 .ret_type = RET_INTEGER, 3333 .arg1_type = ARG_PTR_TO_CTX, 3334 .arg2_type = ARG_ANYTHING, 3335 .arg3_type = ARG_ANYTHING, 3336 }; 3337 3338 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) 3339 { 3340 /* We only allow a restricted subset to be changed for now. */ 3341 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || 3342 !skb_pkt_type_ok(pkt_type))) 3343 return -EINVAL; 3344 3345 skb->pkt_type = pkt_type; 3346 return 0; 3347 } 3348 3349 static const struct bpf_func_proto bpf_skb_change_type_proto = { 3350 .func = bpf_skb_change_type, 3351 .gpl_only = false, 3352 .ret_type = RET_INTEGER, 3353 .arg1_type = ARG_PTR_TO_CTX, 3354 .arg2_type = ARG_ANYTHING, 3355 }; 3356 3357 static u32 bpf_skb_net_base_len(const struct sk_buff *skb) 3358 { 3359 switch (skb->protocol) { 3360 case htons(ETH_P_IP): 3361 return sizeof(struct iphdr); 3362 case htons(ETH_P_IPV6): 3363 return sizeof(struct ipv6hdr); 3364 default: 3365 return ~0U; 3366 } 3367 } 3368 3369 #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \ 3370 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3371 3372 #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ 3373 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ 3374 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ 3375 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ 3376 BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \ 3377 BPF_F_ADJ_ROOM_ENCAP_L2( \ 3378 BPF_ADJ_ROOM_ENCAP_L2_MASK)) 3379 3380 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, 3381 u64 flags) 3382 { 3383 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT; 3384 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; 3385 u16 mac_len = 0, inner_net = 0, inner_trans = 0; 3386 unsigned int gso_type = SKB_GSO_DODGY; 3387 int ret; 3388 3389 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3390 /* udp gso_size delineates datagrams, only allow if fixed */ 3391 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3392 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3393 return -ENOTSUPP; 3394 } 3395 3396 ret = skb_cow_head(skb, len_diff); 3397 if (unlikely(ret < 0)) 3398 return ret; 3399 3400 if (encap) { 3401 if (skb->protocol != htons(ETH_P_IP) && 3402 skb->protocol != htons(ETH_P_IPV6)) 3403 return -ENOTSUPP; 3404 3405 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 && 3406 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3407 return -EINVAL; 3408 3409 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE && 3410 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3411 return -EINVAL; 3412 3413 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH && 3414 inner_mac_len < ETH_HLEN) 3415 return -EINVAL; 3416 3417 if (skb->encapsulation) 3418 return -EALREADY; 3419 3420 mac_len = skb->network_header - skb->mac_header; 3421 inner_net = skb->network_header; 3422 if (inner_mac_len > len_diff) 3423 return -EINVAL; 3424 inner_trans = skb->transport_header; 3425 } 3426 3427 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3428 if (unlikely(ret < 0)) 3429 return ret; 3430 3431 if (encap) { 3432 skb->inner_mac_header = inner_net - inner_mac_len; 3433 skb->inner_network_header = inner_net; 3434 skb->inner_transport_header = inner_trans; 3435 3436 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH) 3437 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 3438 else 3439 skb_set_inner_protocol(skb, skb->protocol); 3440 3441 skb->encapsulation = 1; 3442 skb_set_network_header(skb, mac_len); 3443 3444 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3445 gso_type |= SKB_GSO_UDP_TUNNEL; 3446 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE) 3447 gso_type |= SKB_GSO_GRE; 3448 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3449 gso_type |= SKB_GSO_IPXIP6; 3450 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3451 gso_type |= SKB_GSO_IPXIP4; 3452 3453 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE || 3454 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) { 3455 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ? 3456 sizeof(struct ipv6hdr) : 3457 sizeof(struct iphdr); 3458 3459 skb_set_transport_header(skb, mac_len + nh_len); 3460 } 3461 3462 /* Match skb->protocol to new outer l3 protocol */ 3463 if (skb->protocol == htons(ETH_P_IP) && 3464 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3465 skb->protocol = htons(ETH_P_IPV6); 3466 else if (skb->protocol == htons(ETH_P_IPV6) && 3467 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3468 skb->protocol = htons(ETH_P_IP); 3469 } 3470 3471 if (skb_is_gso(skb)) { 3472 struct skb_shared_info *shinfo = skb_shinfo(skb); 3473 3474 /* Due to header grow, MSS needs to be downgraded. */ 3475 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3476 skb_decrease_gso_size(shinfo, len_diff); 3477 3478 /* Header must be checked, and gso_segs recomputed. */ 3479 shinfo->gso_type |= gso_type; 3480 shinfo->gso_segs = 0; 3481 } 3482 3483 return 0; 3484 } 3485 3486 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, 3487 u64 flags) 3488 { 3489 int ret; 3490 3491 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO | 3492 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3493 return -EINVAL; 3494 3495 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3496 /* udp gso_size delineates datagrams, only allow if fixed */ 3497 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3498 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3499 return -ENOTSUPP; 3500 } 3501 3502 ret = skb_unclone(skb, GFP_ATOMIC); 3503 if (unlikely(ret < 0)) 3504 return ret; 3505 3506 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3507 if (unlikely(ret < 0)) 3508 return ret; 3509 3510 if (skb_is_gso(skb)) { 3511 struct skb_shared_info *shinfo = skb_shinfo(skb); 3512 3513 /* Due to header shrink, MSS can be upgraded. */ 3514 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3515 skb_increase_gso_size(shinfo, len_diff); 3516 3517 /* Header must be checked, and gso_segs recomputed. */ 3518 shinfo->gso_type |= SKB_GSO_DODGY; 3519 shinfo->gso_segs = 0; 3520 } 3521 3522 return 0; 3523 } 3524 3525 #define BPF_SKB_MAX_LEN SKB_MAX_ALLOC 3526 3527 BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3528 u32, mode, u64, flags) 3529 { 3530 u32 len_diff_abs = abs(len_diff); 3531 bool shrink = len_diff < 0; 3532 int ret = 0; 3533 3534 if (unlikely(flags || mode)) 3535 return -EINVAL; 3536 if (unlikely(len_diff_abs > 0xfffU)) 3537 return -EFAULT; 3538 3539 if (!shrink) { 3540 ret = skb_cow(skb, len_diff); 3541 if (unlikely(ret < 0)) 3542 return ret; 3543 __skb_push(skb, len_diff_abs); 3544 memset(skb->data, 0, len_diff_abs); 3545 } else { 3546 if (unlikely(!pskb_may_pull(skb, len_diff_abs))) 3547 return -ENOMEM; 3548 __skb_pull(skb, len_diff_abs); 3549 } 3550 if (tls_sw_has_ctx_rx(skb->sk)) { 3551 struct strp_msg *rxm = strp_msg(skb); 3552 3553 rxm->full_len += len_diff; 3554 } 3555 return ret; 3556 } 3557 3558 static const struct bpf_func_proto sk_skb_adjust_room_proto = { 3559 .func = sk_skb_adjust_room, 3560 .gpl_only = false, 3561 .ret_type = RET_INTEGER, 3562 .arg1_type = ARG_PTR_TO_CTX, 3563 .arg2_type = ARG_ANYTHING, 3564 .arg3_type = ARG_ANYTHING, 3565 .arg4_type = ARG_ANYTHING, 3566 }; 3567 3568 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3569 u32, mode, u64, flags) 3570 { 3571 u32 len_cur, len_diff_abs = abs(len_diff); 3572 u32 len_min = bpf_skb_net_base_len(skb); 3573 u32 len_max = BPF_SKB_MAX_LEN; 3574 __be16 proto = skb->protocol; 3575 bool shrink = len_diff < 0; 3576 u32 off; 3577 int ret; 3578 3579 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK | 3580 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3581 return -EINVAL; 3582 if (unlikely(len_diff_abs > 0xfffU)) 3583 return -EFAULT; 3584 if (unlikely(proto != htons(ETH_P_IP) && 3585 proto != htons(ETH_P_IPV6))) 3586 return -ENOTSUPP; 3587 3588 off = skb_mac_header_len(skb); 3589 switch (mode) { 3590 case BPF_ADJ_ROOM_NET: 3591 off += bpf_skb_net_base_len(skb); 3592 break; 3593 case BPF_ADJ_ROOM_MAC: 3594 break; 3595 default: 3596 return -ENOTSUPP; 3597 } 3598 3599 len_cur = skb->len - skb_network_offset(skb); 3600 if ((shrink && (len_diff_abs >= len_cur || 3601 len_cur - len_diff_abs < len_min)) || 3602 (!shrink && (skb->len + len_diff_abs > len_max && 3603 !skb_is_gso(skb)))) 3604 return -ENOTSUPP; 3605 3606 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : 3607 bpf_skb_net_grow(skb, off, len_diff_abs, flags); 3608 if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET)) 3609 __skb_reset_checksum_unnecessary(skb); 3610 3611 bpf_compute_data_pointers(skb); 3612 return ret; 3613 } 3614 3615 static const struct bpf_func_proto bpf_skb_adjust_room_proto = { 3616 .func = bpf_skb_adjust_room, 3617 .gpl_only = false, 3618 .ret_type = RET_INTEGER, 3619 .arg1_type = ARG_PTR_TO_CTX, 3620 .arg2_type = ARG_ANYTHING, 3621 .arg3_type = ARG_ANYTHING, 3622 .arg4_type = ARG_ANYTHING, 3623 }; 3624 3625 static u32 __bpf_skb_min_len(const struct sk_buff *skb) 3626 { 3627 u32 min_len = skb_network_offset(skb); 3628 3629 if (skb_transport_header_was_set(skb)) 3630 min_len = skb_transport_offset(skb); 3631 if (skb->ip_summed == CHECKSUM_PARTIAL) 3632 min_len = skb_checksum_start_offset(skb) + 3633 skb->csum_offset + sizeof(__sum16); 3634 return min_len; 3635 } 3636 3637 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) 3638 { 3639 unsigned int old_len = skb->len; 3640 int ret; 3641 3642 ret = __skb_grow_rcsum(skb, new_len); 3643 if (!ret) 3644 memset(skb->data + old_len, 0, new_len - old_len); 3645 return ret; 3646 } 3647 3648 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) 3649 { 3650 return __skb_trim_rcsum(skb, new_len); 3651 } 3652 3653 static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, 3654 u64 flags) 3655 { 3656 u32 max_len = BPF_SKB_MAX_LEN; 3657 u32 min_len = __bpf_skb_min_len(skb); 3658 int ret; 3659 3660 if (unlikely(flags || new_len > max_len || new_len < min_len)) 3661 return -EINVAL; 3662 if (skb->encapsulation) 3663 return -ENOTSUPP; 3664 3665 /* The basic idea of this helper is that it's performing the 3666 * needed work to either grow or trim an skb, and eBPF program 3667 * rewrites the rest via helpers like bpf_skb_store_bytes(), 3668 * bpf_lX_csum_replace() and others rather than passing a raw 3669 * buffer here. This one is a slow path helper and intended 3670 * for replies with control messages. 3671 * 3672 * Like in bpf_skb_change_proto(), we want to keep this rather 3673 * minimal and without protocol specifics so that we are able 3674 * to separate concerns as in bpf_skb_store_bytes() should only 3675 * be the one responsible for writing buffers. 3676 * 3677 * It's really expected to be a slow path operation here for 3678 * control message replies, so we're implicitly linearizing, 3679 * uncloning and drop offloads from the skb by this. 3680 */ 3681 ret = __bpf_try_make_writable(skb, skb->len); 3682 if (!ret) { 3683 if (new_len > skb->len) 3684 ret = bpf_skb_grow_rcsum(skb, new_len); 3685 else if (new_len < skb->len) 3686 ret = bpf_skb_trim_rcsum(skb, new_len); 3687 if (!ret && skb_is_gso(skb)) 3688 skb_gso_reset(skb); 3689 } 3690 return ret; 3691 } 3692 3693 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3694 u64, flags) 3695 { 3696 int ret = __bpf_skb_change_tail(skb, new_len, flags); 3697 3698 bpf_compute_data_pointers(skb); 3699 return ret; 3700 } 3701 3702 static const struct bpf_func_proto bpf_skb_change_tail_proto = { 3703 .func = bpf_skb_change_tail, 3704 .gpl_only = false, 3705 .ret_type = RET_INTEGER, 3706 .arg1_type = ARG_PTR_TO_CTX, 3707 .arg2_type = ARG_ANYTHING, 3708 .arg3_type = ARG_ANYTHING, 3709 }; 3710 3711 BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3712 u64, flags) 3713 { 3714 return __bpf_skb_change_tail(skb, new_len, flags); 3715 } 3716 3717 static const struct bpf_func_proto sk_skb_change_tail_proto = { 3718 .func = sk_skb_change_tail, 3719 .gpl_only = false, 3720 .ret_type = RET_INTEGER, 3721 .arg1_type = ARG_PTR_TO_CTX, 3722 .arg2_type = ARG_ANYTHING, 3723 .arg3_type = ARG_ANYTHING, 3724 }; 3725 3726 static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, 3727 u64 flags) 3728 { 3729 u32 max_len = BPF_SKB_MAX_LEN; 3730 u32 new_len = skb->len + head_room; 3731 int ret; 3732 3733 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || 3734 new_len < skb->len)) 3735 return -EINVAL; 3736 3737 ret = skb_cow(skb, head_room); 3738 if (likely(!ret)) { 3739 /* Idea for this helper is that we currently only 3740 * allow to expand on mac header. This means that 3741 * skb->protocol network header, etc, stay as is. 3742 * Compared to bpf_skb_change_tail(), we're more 3743 * flexible due to not needing to linearize or 3744 * reset GSO. Intention for this helper is to be 3745 * used by an L3 skb that needs to push mac header 3746 * for redirection into L2 device. 3747 */ 3748 __skb_push(skb, head_room); 3749 memset(skb->data, 0, head_room); 3750 skb_reset_mac_header(skb); 3751 skb_reset_mac_len(skb); 3752 } 3753 3754 return ret; 3755 } 3756 3757 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, 3758 u64, flags) 3759 { 3760 int ret = __bpf_skb_change_head(skb, head_room, flags); 3761 3762 bpf_compute_data_pointers(skb); 3763 return ret; 3764 } 3765 3766 static const struct bpf_func_proto bpf_skb_change_head_proto = { 3767 .func = bpf_skb_change_head, 3768 .gpl_only = false, 3769 .ret_type = RET_INTEGER, 3770 .arg1_type = ARG_PTR_TO_CTX, 3771 .arg2_type = ARG_ANYTHING, 3772 .arg3_type = ARG_ANYTHING, 3773 }; 3774 3775 BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, 3776 u64, flags) 3777 { 3778 return __bpf_skb_change_head(skb, head_room, flags); 3779 } 3780 3781 static const struct bpf_func_proto sk_skb_change_head_proto = { 3782 .func = sk_skb_change_head, 3783 .gpl_only = false, 3784 .ret_type = RET_INTEGER, 3785 .arg1_type = ARG_PTR_TO_CTX, 3786 .arg2_type = ARG_ANYTHING, 3787 .arg3_type = ARG_ANYTHING, 3788 }; 3789 3790 BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) 3791 { 3792 return xdp_get_buff_len(xdp); 3793 } 3794 3795 static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = { 3796 .func = bpf_xdp_get_buff_len, 3797 .gpl_only = false, 3798 .ret_type = RET_INTEGER, 3799 .arg1_type = ARG_PTR_TO_CTX, 3800 }; 3801 3802 BTF_ID_LIST_SINGLE(bpf_xdp_get_buff_len_bpf_ids, struct, xdp_buff) 3803 3804 const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto = { 3805 .func = bpf_xdp_get_buff_len, 3806 .gpl_only = false, 3807 .arg1_type = ARG_PTR_TO_BTF_ID, 3808 .arg1_btf_id = &bpf_xdp_get_buff_len_bpf_ids[0], 3809 }; 3810 3811 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) 3812 { 3813 return xdp_data_meta_unsupported(xdp) ? 0 : 3814 xdp->data - xdp->data_meta; 3815 } 3816 3817 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) 3818 { 3819 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 3820 unsigned long metalen = xdp_get_metalen(xdp); 3821 void *data_start = xdp_frame_end + metalen; 3822 void *data = xdp->data + offset; 3823 3824 if (unlikely(data < data_start || 3825 data > xdp->data_end - ETH_HLEN)) 3826 return -EINVAL; 3827 3828 if (metalen) 3829 memmove(xdp->data_meta + offset, 3830 xdp->data_meta, metalen); 3831 xdp->data_meta += offset; 3832 xdp->data = data; 3833 3834 return 0; 3835 } 3836 3837 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { 3838 .func = bpf_xdp_adjust_head, 3839 .gpl_only = false, 3840 .ret_type = RET_INTEGER, 3841 .arg1_type = ARG_PTR_TO_CTX, 3842 .arg2_type = ARG_ANYTHING, 3843 }; 3844 3845 static void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, 3846 void *buf, unsigned long len, bool flush) 3847 { 3848 unsigned long ptr_len, ptr_off = 0; 3849 skb_frag_t *next_frag, *end_frag; 3850 struct skb_shared_info *sinfo; 3851 void *src, *dst; 3852 u8 *ptr_buf; 3853 3854 if (likely(xdp->data_end - xdp->data >= off + len)) { 3855 src = flush ? buf : xdp->data + off; 3856 dst = flush ? xdp->data + off : buf; 3857 memcpy(dst, src, len); 3858 return; 3859 } 3860 3861 sinfo = xdp_get_shared_info_from_buff(xdp); 3862 end_frag = &sinfo->frags[sinfo->nr_frags]; 3863 next_frag = &sinfo->frags[0]; 3864 3865 ptr_len = xdp->data_end - xdp->data; 3866 ptr_buf = xdp->data; 3867 3868 while (true) { 3869 if (off < ptr_off + ptr_len) { 3870 unsigned long copy_off = off - ptr_off; 3871 unsigned long copy_len = min(len, ptr_len - copy_off); 3872 3873 src = flush ? buf : ptr_buf + copy_off; 3874 dst = flush ? ptr_buf + copy_off : buf; 3875 memcpy(dst, src, copy_len); 3876 3877 off += copy_len; 3878 len -= copy_len; 3879 buf += copy_len; 3880 } 3881 3882 if (!len || next_frag == end_frag) 3883 break; 3884 3885 ptr_off += ptr_len; 3886 ptr_buf = skb_frag_address(next_frag); 3887 ptr_len = skb_frag_size(next_frag); 3888 next_frag++; 3889 } 3890 } 3891 3892 static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) 3893 { 3894 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 3895 u32 size = xdp->data_end - xdp->data; 3896 void *addr = xdp->data; 3897 int i; 3898 3899 if (unlikely(offset > 0xffff || len > 0xffff)) 3900 return ERR_PTR(-EFAULT); 3901 3902 if (offset + len > xdp_get_buff_len(xdp)) 3903 return ERR_PTR(-EINVAL); 3904 3905 if (offset < size) /* linear area */ 3906 goto out; 3907 3908 offset -= size; 3909 for (i = 0; i < sinfo->nr_frags; i++) { /* paged area */ 3910 u32 frag_size = skb_frag_size(&sinfo->frags[i]); 3911 3912 if (offset < frag_size) { 3913 addr = skb_frag_address(&sinfo->frags[i]); 3914 size = frag_size; 3915 break; 3916 } 3917 offset -= frag_size; 3918 } 3919 out: 3920 return offset + len < size ? addr + offset : NULL; 3921 } 3922 3923 BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset, 3924 void *, buf, u32, len) 3925 { 3926 void *ptr; 3927 3928 ptr = bpf_xdp_pointer(xdp, offset, len); 3929 if (IS_ERR(ptr)) 3930 return PTR_ERR(ptr); 3931 3932 if (!ptr) 3933 bpf_xdp_copy_buf(xdp, offset, buf, len, false); 3934 else 3935 memcpy(buf, ptr, len); 3936 3937 return 0; 3938 } 3939 3940 static const struct bpf_func_proto bpf_xdp_load_bytes_proto = { 3941 .func = bpf_xdp_load_bytes, 3942 .gpl_only = false, 3943 .ret_type = RET_INTEGER, 3944 .arg1_type = ARG_PTR_TO_CTX, 3945 .arg2_type = ARG_ANYTHING, 3946 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 3947 .arg4_type = ARG_CONST_SIZE, 3948 }; 3949 3950 BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset, 3951 void *, buf, u32, len) 3952 { 3953 void *ptr; 3954 3955 ptr = bpf_xdp_pointer(xdp, offset, len); 3956 if (IS_ERR(ptr)) 3957 return PTR_ERR(ptr); 3958 3959 if (!ptr) 3960 bpf_xdp_copy_buf(xdp, offset, buf, len, true); 3961 else 3962 memcpy(ptr, buf, len); 3963 3964 return 0; 3965 } 3966 3967 static const struct bpf_func_proto bpf_xdp_store_bytes_proto = { 3968 .func = bpf_xdp_store_bytes, 3969 .gpl_only = false, 3970 .ret_type = RET_INTEGER, 3971 .arg1_type = ARG_PTR_TO_CTX, 3972 .arg2_type = ARG_ANYTHING, 3973 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 3974 .arg4_type = ARG_CONST_SIZE, 3975 }; 3976 3977 static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset) 3978 { 3979 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 3980 skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1]; 3981 struct xdp_rxq_info *rxq = xdp->rxq; 3982 unsigned int tailroom; 3983 3984 if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz) 3985 return -EOPNOTSUPP; 3986 3987 tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag); 3988 if (unlikely(offset > tailroom)) 3989 return -EINVAL; 3990 3991 memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset); 3992 skb_frag_size_add(frag, offset); 3993 sinfo->xdp_frags_size += offset; 3994 3995 return 0; 3996 } 3997 3998 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) 3999 { 4000 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 4001 int i, n_frags_free = 0, len_free = 0; 4002 4003 if (unlikely(offset > (int)xdp_get_buff_len(xdp) - ETH_HLEN)) 4004 return -EINVAL; 4005 4006 for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) { 4007 skb_frag_t *frag = &sinfo->frags[i]; 4008 int shrink = min_t(int, offset, skb_frag_size(frag)); 4009 4010 len_free += shrink; 4011 offset -= shrink; 4012 4013 if (skb_frag_size(frag) == shrink) { 4014 struct page *page = skb_frag_page(frag); 4015 4016 __xdp_return(page_address(page), &xdp->rxq->mem, 4017 false, NULL); 4018 n_frags_free++; 4019 } else { 4020 skb_frag_size_sub(frag, shrink); 4021 break; 4022 } 4023 } 4024 sinfo->nr_frags -= n_frags_free; 4025 sinfo->xdp_frags_size -= len_free; 4026 4027 if (unlikely(!sinfo->nr_frags)) { 4028 xdp_buff_clear_frags_flag(xdp); 4029 xdp->data_end -= offset; 4030 } 4031 4032 return 0; 4033 } 4034 4035 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) 4036 { 4037 void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */ 4038 void *data_end = xdp->data_end + offset; 4039 4040 if (unlikely(xdp_buff_has_frags(xdp))) { /* non-linear xdp buff */ 4041 if (offset < 0) 4042 return bpf_xdp_frags_shrink_tail(xdp, -offset); 4043 4044 return bpf_xdp_frags_increase_tail(xdp, offset); 4045 } 4046 4047 /* Notice that xdp_data_hard_end have reserved some tailroom */ 4048 if (unlikely(data_end > data_hard_end)) 4049 return -EINVAL; 4050 4051 /* ALL drivers MUST init xdp->frame_sz, chicken check below */ 4052 if (unlikely(xdp->frame_sz > PAGE_SIZE)) { 4053 WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz); 4054 return -EINVAL; 4055 } 4056 4057 if (unlikely(data_end < xdp->data + ETH_HLEN)) 4058 return -EINVAL; 4059 4060 /* Clear memory area on grow, can contain uninit kernel memory */ 4061 if (offset > 0) 4062 memset(xdp->data_end, 0, offset); 4063 4064 xdp->data_end = data_end; 4065 4066 return 0; 4067 } 4068 4069 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = { 4070 .func = bpf_xdp_adjust_tail, 4071 .gpl_only = false, 4072 .ret_type = RET_INTEGER, 4073 .arg1_type = ARG_PTR_TO_CTX, 4074 .arg2_type = ARG_ANYTHING, 4075 }; 4076 4077 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) 4078 { 4079 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 4080 void *meta = xdp->data_meta + offset; 4081 unsigned long metalen = xdp->data - meta; 4082 4083 if (xdp_data_meta_unsupported(xdp)) 4084 return -ENOTSUPP; 4085 if (unlikely(meta < xdp_frame_end || 4086 meta > xdp->data)) 4087 return -EINVAL; 4088 if (unlikely(xdp_metalen_invalid(metalen))) 4089 return -EACCES; 4090 4091 xdp->data_meta = meta; 4092 4093 return 0; 4094 } 4095 4096 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { 4097 .func = bpf_xdp_adjust_meta, 4098 .gpl_only = false, 4099 .ret_type = RET_INTEGER, 4100 .arg1_type = ARG_PTR_TO_CTX, 4101 .arg2_type = ARG_ANYTHING, 4102 }; 4103 4104 /* XDP_REDIRECT works by a three-step process, implemented in the functions 4105 * below: 4106 * 4107 * 1. The bpf_redirect() and bpf_redirect_map() helpers will lookup the target 4108 * of the redirect and store it (along with some other metadata) in a per-CPU 4109 * struct bpf_redirect_info. 4110 * 4111 * 2. When the program returns the XDP_REDIRECT return code, the driver will 4112 * call xdp_do_redirect() which will use the information in struct 4113 * bpf_redirect_info to actually enqueue the frame into a map type-specific 4114 * bulk queue structure. 4115 * 4116 * 3. Before exiting its NAPI poll loop, the driver will call xdp_do_flush(), 4117 * which will flush all the different bulk queues, thus completing the 4118 * redirect. 4119 * 4120 * Pointers to the map entries will be kept around for this whole sequence of 4121 * steps, protected by RCU. However, there is no top-level rcu_read_lock() in 4122 * the core code; instead, the RCU protection relies on everything happening 4123 * inside a single NAPI poll sequence, which means it's between a pair of calls 4124 * to local_bh_disable()/local_bh_enable(). 4125 * 4126 * The map entries are marked as __rcu and the map code makes sure to 4127 * dereference those pointers with rcu_dereference_check() in a way that works 4128 * for both sections that to hold an rcu_read_lock() and sections that are 4129 * called from NAPI without a separate rcu_read_lock(). The code below does not 4130 * use RCU annotations, but relies on those in the map code. 4131 */ 4132 void xdp_do_flush(void) 4133 { 4134 __dev_flush(); 4135 __cpu_map_flush(); 4136 __xsk_map_flush(); 4137 } 4138 EXPORT_SYMBOL_GPL(xdp_do_flush); 4139 4140 void bpf_clear_redirect_map(struct bpf_map *map) 4141 { 4142 struct bpf_redirect_info *ri; 4143 int cpu; 4144 4145 for_each_possible_cpu(cpu) { 4146 ri = per_cpu_ptr(&bpf_redirect_info, cpu); 4147 /* Avoid polluting remote cacheline due to writes if 4148 * not needed. Once we pass this test, we need the 4149 * cmpxchg() to make sure it hasn't been changed in 4150 * the meantime by remote CPU. 4151 */ 4152 if (unlikely(READ_ONCE(ri->map) == map)) 4153 cmpxchg(&ri->map, map, NULL); 4154 } 4155 } 4156 4157 DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); 4158 EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key); 4159 4160 u32 xdp_master_redirect(struct xdp_buff *xdp) 4161 { 4162 struct net_device *master, *slave; 4163 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4164 4165 master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); 4166 slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); 4167 if (slave && slave != xdp->rxq->dev) { 4168 /* The target device is different from the receiving device, so 4169 * redirect it to the new device. 4170 * Using XDP_REDIRECT gets the correct behaviour from XDP enabled 4171 * drivers to unmap the packet from their rx ring. 4172 */ 4173 ri->tgt_index = slave->ifindex; 4174 ri->map_id = INT_MAX; 4175 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4176 return XDP_REDIRECT; 4177 } 4178 return XDP_TX; 4179 } 4180 EXPORT_SYMBOL_GPL(xdp_master_redirect); 4181 4182 static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri, 4183 struct net_device *dev, 4184 struct xdp_buff *xdp, 4185 struct bpf_prog *xdp_prog) 4186 { 4187 enum bpf_map_type map_type = ri->map_type; 4188 void *fwd = ri->tgt_value; 4189 u32 map_id = ri->map_id; 4190 int err; 4191 4192 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4193 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4194 4195 err = __xsk_map_redirect(fwd, xdp); 4196 if (unlikely(err)) 4197 goto err; 4198 4199 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4200 return 0; 4201 err: 4202 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4203 return err; 4204 } 4205 4206 static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, 4207 struct net_device *dev, 4208 struct xdp_frame *xdpf, 4209 struct bpf_prog *xdp_prog) 4210 { 4211 enum bpf_map_type map_type = ri->map_type; 4212 void *fwd = ri->tgt_value; 4213 u32 map_id = ri->map_id; 4214 struct bpf_map *map; 4215 int err; 4216 4217 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4218 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4219 4220 if (unlikely(!xdpf)) { 4221 err = -EOVERFLOW; 4222 goto err; 4223 } 4224 4225 switch (map_type) { 4226 case BPF_MAP_TYPE_DEVMAP: 4227 fallthrough; 4228 case BPF_MAP_TYPE_DEVMAP_HASH: 4229 map = READ_ONCE(ri->map); 4230 if (unlikely(map)) { 4231 WRITE_ONCE(ri->map, NULL); 4232 err = dev_map_enqueue_multi(xdpf, dev, map, 4233 ri->flags & BPF_F_EXCLUDE_INGRESS); 4234 } else { 4235 err = dev_map_enqueue(fwd, xdpf, dev); 4236 } 4237 break; 4238 case BPF_MAP_TYPE_CPUMAP: 4239 err = cpu_map_enqueue(fwd, xdpf, dev); 4240 break; 4241 case BPF_MAP_TYPE_UNSPEC: 4242 if (map_id == INT_MAX) { 4243 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); 4244 if (unlikely(!fwd)) { 4245 err = -EINVAL; 4246 break; 4247 } 4248 err = dev_xdp_enqueue(fwd, xdpf, dev); 4249 break; 4250 } 4251 fallthrough; 4252 default: 4253 err = -EBADRQC; 4254 } 4255 4256 if (unlikely(err)) 4257 goto err; 4258 4259 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4260 return 0; 4261 err: 4262 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4263 return err; 4264 } 4265 4266 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, 4267 struct bpf_prog *xdp_prog) 4268 { 4269 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4270 enum bpf_map_type map_type = ri->map_type; 4271 4272 /* XDP_REDIRECT is not fully supported yet for xdp frags since 4273 * not all XDP capable drivers can map non-linear xdp_frame in 4274 * ndo_xdp_xmit. 4275 */ 4276 if (unlikely(xdp_buff_has_frags(xdp) && 4277 map_type != BPF_MAP_TYPE_CPUMAP)) 4278 return -EOPNOTSUPP; 4279 4280 if (map_type == BPF_MAP_TYPE_XSKMAP) 4281 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4282 4283 return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp), 4284 xdp_prog); 4285 } 4286 EXPORT_SYMBOL_GPL(xdp_do_redirect); 4287 4288 int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, 4289 struct xdp_frame *xdpf, struct bpf_prog *xdp_prog) 4290 { 4291 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4292 enum bpf_map_type map_type = ri->map_type; 4293 4294 if (map_type == BPF_MAP_TYPE_XSKMAP) 4295 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4296 4297 return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog); 4298 } 4299 EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); 4300 4301 static int xdp_do_generic_redirect_map(struct net_device *dev, 4302 struct sk_buff *skb, 4303 struct xdp_buff *xdp, 4304 struct bpf_prog *xdp_prog, 4305 void *fwd, 4306 enum bpf_map_type map_type, u32 map_id) 4307 { 4308 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4309 struct bpf_map *map; 4310 int err; 4311 4312 switch (map_type) { 4313 case BPF_MAP_TYPE_DEVMAP: 4314 fallthrough; 4315 case BPF_MAP_TYPE_DEVMAP_HASH: 4316 map = READ_ONCE(ri->map); 4317 if (unlikely(map)) { 4318 WRITE_ONCE(ri->map, NULL); 4319 err = dev_map_redirect_multi(dev, skb, xdp_prog, map, 4320 ri->flags & BPF_F_EXCLUDE_INGRESS); 4321 } else { 4322 err = dev_map_generic_redirect(fwd, skb, xdp_prog); 4323 } 4324 if (unlikely(err)) 4325 goto err; 4326 break; 4327 case BPF_MAP_TYPE_XSKMAP: 4328 err = xsk_generic_rcv(fwd, xdp); 4329 if (err) 4330 goto err; 4331 consume_skb(skb); 4332 break; 4333 case BPF_MAP_TYPE_CPUMAP: 4334 err = cpu_map_generic_redirect(fwd, skb); 4335 if (unlikely(err)) 4336 goto err; 4337 break; 4338 default: 4339 err = -EBADRQC; 4340 goto err; 4341 } 4342 4343 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4344 return 0; 4345 err: 4346 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4347 return err; 4348 } 4349 4350 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, 4351 struct xdp_buff *xdp, struct bpf_prog *xdp_prog) 4352 { 4353 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4354 enum bpf_map_type map_type = ri->map_type; 4355 void *fwd = ri->tgt_value; 4356 u32 map_id = ri->map_id; 4357 int err; 4358 4359 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4360 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4361 4362 if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { 4363 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); 4364 if (unlikely(!fwd)) { 4365 err = -EINVAL; 4366 goto err; 4367 } 4368 4369 err = xdp_ok_fwd_dev(fwd, skb->len); 4370 if (unlikely(err)) 4371 goto err; 4372 4373 skb->dev = fwd; 4374 _trace_xdp_redirect(dev, xdp_prog, ri->tgt_index); 4375 generic_xdp_tx(skb, xdp_prog); 4376 return 0; 4377 } 4378 4379 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id); 4380 err: 4381 _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err); 4382 return err; 4383 } 4384 4385 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) 4386 { 4387 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4388 4389 if (unlikely(flags)) 4390 return XDP_ABORTED; 4391 4392 /* NB! Map type UNSPEC and map_id == INT_MAX (never generated 4393 * by map_idr) is used for ifindex based XDP redirect. 4394 */ 4395 ri->tgt_index = ifindex; 4396 ri->map_id = INT_MAX; 4397 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4398 4399 return XDP_REDIRECT; 4400 } 4401 4402 static const struct bpf_func_proto bpf_xdp_redirect_proto = { 4403 .func = bpf_xdp_redirect, 4404 .gpl_only = false, 4405 .ret_type = RET_INTEGER, 4406 .arg1_type = ARG_ANYTHING, 4407 .arg2_type = ARG_ANYTHING, 4408 }; 4409 4410 BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, 4411 u64, flags) 4412 { 4413 return map->ops->map_redirect(map, ifindex, flags); 4414 } 4415 4416 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { 4417 .func = bpf_xdp_redirect_map, 4418 .gpl_only = false, 4419 .ret_type = RET_INTEGER, 4420 .arg1_type = ARG_CONST_MAP_PTR, 4421 .arg2_type = ARG_ANYTHING, 4422 .arg3_type = ARG_ANYTHING, 4423 }; 4424 4425 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, 4426 unsigned long off, unsigned long len) 4427 { 4428 void *ptr = skb_header_pointer(skb, off, len, dst_buff); 4429 4430 if (unlikely(!ptr)) 4431 return len; 4432 if (ptr != dst_buff) 4433 memcpy(dst_buff, ptr, len); 4434 4435 return 0; 4436 } 4437 4438 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, 4439 u64, flags, void *, meta, u64, meta_size) 4440 { 4441 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4442 4443 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4444 return -EINVAL; 4445 if (unlikely(!skb || skb_size > skb->len)) 4446 return -EFAULT; 4447 4448 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, 4449 bpf_skb_copy); 4450 } 4451 4452 static const struct bpf_func_proto bpf_skb_event_output_proto = { 4453 .func = bpf_skb_event_output, 4454 .gpl_only = true, 4455 .ret_type = RET_INTEGER, 4456 .arg1_type = ARG_PTR_TO_CTX, 4457 .arg2_type = ARG_CONST_MAP_PTR, 4458 .arg3_type = ARG_ANYTHING, 4459 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4460 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4461 }; 4462 4463 BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff) 4464 4465 const struct bpf_func_proto bpf_skb_output_proto = { 4466 .func = bpf_skb_event_output, 4467 .gpl_only = true, 4468 .ret_type = RET_INTEGER, 4469 .arg1_type = ARG_PTR_TO_BTF_ID, 4470 .arg1_btf_id = &bpf_skb_output_btf_ids[0], 4471 .arg2_type = ARG_CONST_MAP_PTR, 4472 .arg3_type = ARG_ANYTHING, 4473 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4474 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4475 }; 4476 4477 static unsigned short bpf_tunnel_key_af(u64 flags) 4478 { 4479 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; 4480 } 4481 4482 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, 4483 u32, size, u64, flags) 4484 { 4485 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4486 u8 compat[sizeof(struct bpf_tunnel_key)]; 4487 void *to_orig = to; 4488 int err; 4489 4490 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { 4491 err = -EINVAL; 4492 goto err_clear; 4493 } 4494 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { 4495 err = -EPROTO; 4496 goto err_clear; 4497 } 4498 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4499 err = -EINVAL; 4500 switch (size) { 4501 case offsetof(struct bpf_tunnel_key, tunnel_label): 4502 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4503 goto set_compat; 4504 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4505 /* Fixup deprecated structure layouts here, so we have 4506 * a common path later on. 4507 */ 4508 if (ip_tunnel_info_af(info) != AF_INET) 4509 goto err_clear; 4510 set_compat: 4511 to = (struct bpf_tunnel_key *)compat; 4512 break; 4513 default: 4514 goto err_clear; 4515 } 4516 } 4517 4518 to->tunnel_id = be64_to_cpu(info->key.tun_id); 4519 to->tunnel_tos = info->key.tos; 4520 to->tunnel_ttl = info->key.ttl; 4521 to->tunnel_ext = 0; 4522 4523 if (flags & BPF_F_TUNINFO_IPV6) { 4524 memcpy(to->remote_ipv6, &info->key.u.ipv6.src, 4525 sizeof(to->remote_ipv6)); 4526 to->tunnel_label = be32_to_cpu(info->key.label); 4527 } else { 4528 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); 4529 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 4530 to->tunnel_label = 0; 4531 } 4532 4533 if (unlikely(size != sizeof(struct bpf_tunnel_key))) 4534 memcpy(to_orig, to, size); 4535 4536 return 0; 4537 err_clear: 4538 memset(to_orig, 0, size); 4539 return err; 4540 } 4541 4542 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { 4543 .func = bpf_skb_get_tunnel_key, 4544 .gpl_only = false, 4545 .ret_type = RET_INTEGER, 4546 .arg1_type = ARG_PTR_TO_CTX, 4547 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4548 .arg3_type = ARG_CONST_SIZE, 4549 .arg4_type = ARG_ANYTHING, 4550 }; 4551 4552 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) 4553 { 4554 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4555 int err; 4556 4557 if (unlikely(!info || 4558 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { 4559 err = -ENOENT; 4560 goto err_clear; 4561 } 4562 if (unlikely(size < info->options_len)) { 4563 err = -ENOMEM; 4564 goto err_clear; 4565 } 4566 4567 ip_tunnel_info_opts_get(to, info); 4568 if (size > info->options_len) 4569 memset(to + info->options_len, 0, size - info->options_len); 4570 4571 return info->options_len; 4572 err_clear: 4573 memset(to, 0, size); 4574 return err; 4575 } 4576 4577 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { 4578 .func = bpf_skb_get_tunnel_opt, 4579 .gpl_only = false, 4580 .ret_type = RET_INTEGER, 4581 .arg1_type = ARG_PTR_TO_CTX, 4582 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4583 .arg3_type = ARG_CONST_SIZE, 4584 }; 4585 4586 static struct metadata_dst __percpu *md_dst; 4587 4588 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, 4589 const struct bpf_tunnel_key *, from, u32, size, u64, flags) 4590 { 4591 struct metadata_dst *md = this_cpu_ptr(md_dst); 4592 u8 compat[sizeof(struct bpf_tunnel_key)]; 4593 struct ip_tunnel_info *info; 4594 4595 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | 4596 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) 4597 return -EINVAL; 4598 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4599 switch (size) { 4600 case offsetof(struct bpf_tunnel_key, tunnel_label): 4601 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4602 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4603 /* Fixup deprecated structure layouts here, so we have 4604 * a common path later on. 4605 */ 4606 memcpy(compat, from, size); 4607 memset(compat + size, 0, sizeof(compat) - size); 4608 from = (const struct bpf_tunnel_key *) compat; 4609 break; 4610 default: 4611 return -EINVAL; 4612 } 4613 } 4614 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || 4615 from->tunnel_ext)) 4616 return -EINVAL; 4617 4618 skb_dst_drop(skb); 4619 dst_hold((struct dst_entry *) md); 4620 skb_dst_set(skb, (struct dst_entry *) md); 4621 4622 info = &md->u.tun_info; 4623 memset(info, 0, sizeof(*info)); 4624 info->mode = IP_TUNNEL_INFO_TX; 4625 4626 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 4627 if (flags & BPF_F_DONT_FRAGMENT) 4628 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; 4629 if (flags & BPF_F_ZERO_CSUM_TX) 4630 info->key.tun_flags &= ~TUNNEL_CSUM; 4631 if (flags & BPF_F_SEQ_NUMBER) 4632 info->key.tun_flags |= TUNNEL_SEQ; 4633 4634 info->key.tun_id = cpu_to_be64(from->tunnel_id); 4635 info->key.tos = from->tunnel_tos; 4636 info->key.ttl = from->tunnel_ttl; 4637 4638 if (flags & BPF_F_TUNINFO_IPV6) { 4639 info->mode |= IP_TUNNEL_INFO_IPV6; 4640 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, 4641 sizeof(from->remote_ipv6)); 4642 info->key.label = cpu_to_be32(from->tunnel_label) & 4643 IPV6_FLOWLABEL_MASK; 4644 } else { 4645 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); 4646 } 4647 4648 return 0; 4649 } 4650 4651 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { 4652 .func = bpf_skb_set_tunnel_key, 4653 .gpl_only = false, 4654 .ret_type = RET_INTEGER, 4655 .arg1_type = ARG_PTR_TO_CTX, 4656 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4657 .arg3_type = ARG_CONST_SIZE, 4658 .arg4_type = ARG_ANYTHING, 4659 }; 4660 4661 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, 4662 const u8 *, from, u32, size) 4663 { 4664 struct ip_tunnel_info *info = skb_tunnel_info(skb); 4665 const struct metadata_dst *md = this_cpu_ptr(md_dst); 4666 4667 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) 4668 return -EINVAL; 4669 if (unlikely(size > IP_TUNNEL_OPTS_MAX)) 4670 return -ENOMEM; 4671 4672 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); 4673 4674 return 0; 4675 } 4676 4677 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { 4678 .func = bpf_skb_set_tunnel_opt, 4679 .gpl_only = false, 4680 .ret_type = RET_INTEGER, 4681 .arg1_type = ARG_PTR_TO_CTX, 4682 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4683 .arg3_type = ARG_CONST_SIZE, 4684 }; 4685 4686 static const struct bpf_func_proto * 4687 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) 4688 { 4689 if (!md_dst) { 4690 struct metadata_dst __percpu *tmp; 4691 4692 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX, 4693 METADATA_IP_TUNNEL, 4694 GFP_KERNEL); 4695 if (!tmp) 4696 return NULL; 4697 if (cmpxchg(&md_dst, NULL, tmp)) 4698 metadata_dst_free_percpu(tmp); 4699 } 4700 4701 switch (which) { 4702 case BPF_FUNC_skb_set_tunnel_key: 4703 return &bpf_skb_set_tunnel_key_proto; 4704 case BPF_FUNC_skb_set_tunnel_opt: 4705 return &bpf_skb_set_tunnel_opt_proto; 4706 default: 4707 return NULL; 4708 } 4709 } 4710 4711 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, 4712 u32, idx) 4713 { 4714 struct bpf_array *array = container_of(map, struct bpf_array, map); 4715 struct cgroup *cgrp; 4716 struct sock *sk; 4717 4718 sk = skb_to_full_sk(skb); 4719 if (!sk || !sk_fullsock(sk)) 4720 return -ENOENT; 4721 if (unlikely(idx >= array->map.max_entries)) 4722 return -E2BIG; 4723 4724 cgrp = READ_ONCE(array->ptrs[idx]); 4725 if (unlikely(!cgrp)) 4726 return -EAGAIN; 4727 4728 return sk_under_cgroup_hierarchy(sk, cgrp); 4729 } 4730 4731 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { 4732 .func = bpf_skb_under_cgroup, 4733 .gpl_only = false, 4734 .ret_type = RET_INTEGER, 4735 .arg1_type = ARG_PTR_TO_CTX, 4736 .arg2_type = ARG_CONST_MAP_PTR, 4737 .arg3_type = ARG_ANYTHING, 4738 }; 4739 4740 #ifdef CONFIG_SOCK_CGROUP_DATA 4741 static inline u64 __bpf_sk_cgroup_id(struct sock *sk) 4742 { 4743 struct cgroup *cgrp; 4744 4745 sk = sk_to_full_sk(sk); 4746 if (!sk || !sk_fullsock(sk)) 4747 return 0; 4748 4749 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4750 return cgroup_id(cgrp); 4751 } 4752 4753 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) 4754 { 4755 return __bpf_sk_cgroup_id(skb->sk); 4756 } 4757 4758 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { 4759 .func = bpf_skb_cgroup_id, 4760 .gpl_only = false, 4761 .ret_type = RET_INTEGER, 4762 .arg1_type = ARG_PTR_TO_CTX, 4763 }; 4764 4765 static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk, 4766 int ancestor_level) 4767 { 4768 struct cgroup *ancestor; 4769 struct cgroup *cgrp; 4770 4771 sk = sk_to_full_sk(sk); 4772 if (!sk || !sk_fullsock(sk)) 4773 return 0; 4774 4775 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4776 ancestor = cgroup_ancestor(cgrp, ancestor_level); 4777 if (!ancestor) 4778 return 0; 4779 4780 return cgroup_id(ancestor); 4781 } 4782 4783 BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, 4784 ancestor_level) 4785 { 4786 return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level); 4787 } 4788 4789 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { 4790 .func = bpf_skb_ancestor_cgroup_id, 4791 .gpl_only = false, 4792 .ret_type = RET_INTEGER, 4793 .arg1_type = ARG_PTR_TO_CTX, 4794 .arg2_type = ARG_ANYTHING, 4795 }; 4796 4797 BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk) 4798 { 4799 return __bpf_sk_cgroup_id(sk); 4800 } 4801 4802 static const struct bpf_func_proto bpf_sk_cgroup_id_proto = { 4803 .func = bpf_sk_cgroup_id, 4804 .gpl_only = false, 4805 .ret_type = RET_INTEGER, 4806 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4807 }; 4808 4809 BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level) 4810 { 4811 return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level); 4812 } 4813 4814 static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = { 4815 .func = bpf_sk_ancestor_cgroup_id, 4816 .gpl_only = false, 4817 .ret_type = RET_INTEGER, 4818 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4819 .arg2_type = ARG_ANYTHING, 4820 }; 4821 #endif 4822 4823 static unsigned long bpf_xdp_copy(void *dst, const void *ctx, 4824 unsigned long off, unsigned long len) 4825 { 4826 struct xdp_buff *xdp = (struct xdp_buff *)ctx; 4827 4828 bpf_xdp_copy_buf(xdp, off, dst, len, false); 4829 return 0; 4830 } 4831 4832 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, 4833 u64, flags, void *, meta, u64, meta_size) 4834 { 4835 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4836 4837 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4838 return -EINVAL; 4839 4840 if (unlikely(!xdp || xdp_size > xdp_get_buff_len(xdp))) 4841 return -EFAULT; 4842 4843 return bpf_event_output(map, flags, meta, meta_size, xdp, 4844 xdp_size, bpf_xdp_copy); 4845 } 4846 4847 static const struct bpf_func_proto bpf_xdp_event_output_proto = { 4848 .func = bpf_xdp_event_output, 4849 .gpl_only = true, 4850 .ret_type = RET_INTEGER, 4851 .arg1_type = ARG_PTR_TO_CTX, 4852 .arg2_type = ARG_CONST_MAP_PTR, 4853 .arg3_type = ARG_ANYTHING, 4854 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4855 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4856 }; 4857 4858 BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff) 4859 4860 const struct bpf_func_proto bpf_xdp_output_proto = { 4861 .func = bpf_xdp_event_output, 4862 .gpl_only = true, 4863 .ret_type = RET_INTEGER, 4864 .arg1_type = ARG_PTR_TO_BTF_ID, 4865 .arg1_btf_id = &bpf_xdp_output_btf_ids[0], 4866 .arg2_type = ARG_CONST_MAP_PTR, 4867 .arg3_type = ARG_ANYTHING, 4868 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4869 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4870 }; 4871 4872 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) 4873 { 4874 return skb->sk ? __sock_gen_cookie(skb->sk) : 0; 4875 } 4876 4877 static const struct bpf_func_proto bpf_get_socket_cookie_proto = { 4878 .func = bpf_get_socket_cookie, 4879 .gpl_only = false, 4880 .ret_type = RET_INTEGER, 4881 .arg1_type = ARG_PTR_TO_CTX, 4882 }; 4883 4884 BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4885 { 4886 return __sock_gen_cookie(ctx->sk); 4887 } 4888 4889 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { 4890 .func = bpf_get_socket_cookie_sock_addr, 4891 .gpl_only = false, 4892 .ret_type = RET_INTEGER, 4893 .arg1_type = ARG_PTR_TO_CTX, 4894 }; 4895 4896 BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx) 4897 { 4898 return __sock_gen_cookie(ctx); 4899 } 4900 4901 static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = { 4902 .func = bpf_get_socket_cookie_sock, 4903 .gpl_only = false, 4904 .ret_type = RET_INTEGER, 4905 .arg1_type = ARG_PTR_TO_CTX, 4906 }; 4907 4908 BPF_CALL_1(bpf_get_socket_ptr_cookie, struct sock *, sk) 4909 { 4910 return sk ? sock_gen_cookie(sk) : 0; 4911 } 4912 4913 const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = { 4914 .func = bpf_get_socket_ptr_cookie, 4915 .gpl_only = false, 4916 .ret_type = RET_INTEGER, 4917 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4918 }; 4919 4920 BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) 4921 { 4922 return __sock_gen_cookie(ctx->sk); 4923 } 4924 4925 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { 4926 .func = bpf_get_socket_cookie_sock_ops, 4927 .gpl_only = false, 4928 .ret_type = RET_INTEGER, 4929 .arg1_type = ARG_PTR_TO_CTX, 4930 }; 4931 4932 static u64 __bpf_get_netns_cookie(struct sock *sk) 4933 { 4934 const struct net *net = sk ? sock_net(sk) : &init_net; 4935 4936 return net->net_cookie; 4937 } 4938 4939 BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx) 4940 { 4941 return __bpf_get_netns_cookie(ctx); 4942 } 4943 4944 static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = { 4945 .func = bpf_get_netns_cookie_sock, 4946 .gpl_only = false, 4947 .ret_type = RET_INTEGER, 4948 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4949 }; 4950 4951 BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4952 { 4953 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4954 } 4955 4956 static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = { 4957 .func = bpf_get_netns_cookie_sock_addr, 4958 .gpl_only = false, 4959 .ret_type = RET_INTEGER, 4960 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4961 }; 4962 4963 BPF_CALL_1(bpf_get_netns_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) 4964 { 4965 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4966 } 4967 4968 static const struct bpf_func_proto bpf_get_netns_cookie_sock_ops_proto = { 4969 .func = bpf_get_netns_cookie_sock_ops, 4970 .gpl_only = false, 4971 .ret_type = RET_INTEGER, 4972 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4973 }; 4974 4975 BPF_CALL_1(bpf_get_netns_cookie_sk_msg, struct sk_msg *, ctx) 4976 { 4977 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4978 } 4979 4980 static const struct bpf_func_proto bpf_get_netns_cookie_sk_msg_proto = { 4981 .func = bpf_get_netns_cookie_sk_msg, 4982 .gpl_only = false, 4983 .ret_type = RET_INTEGER, 4984 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4985 }; 4986 4987 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) 4988 { 4989 struct sock *sk = sk_to_full_sk(skb->sk); 4990 kuid_t kuid; 4991 4992 if (!sk || !sk_fullsock(sk)) 4993 return overflowuid; 4994 kuid = sock_net_uid(sock_net(sk), sk); 4995 return from_kuid_munged(sock_net(sk)->user_ns, kuid); 4996 } 4997 4998 static const struct bpf_func_proto bpf_get_socket_uid_proto = { 4999 .func = bpf_get_socket_uid, 5000 .gpl_only = false, 5001 .ret_type = RET_INTEGER, 5002 .arg1_type = ARG_PTR_TO_CTX, 5003 }; 5004 5005 static int _bpf_setsockopt(struct sock *sk, int level, int optname, 5006 char *optval, int optlen) 5007 { 5008 char devname[IFNAMSIZ]; 5009 int val, valbool; 5010 struct net *net; 5011 int ifindex; 5012 int ret = 0; 5013 5014 if (!sk_fullsock(sk)) 5015 return -EINVAL; 5016 5017 sock_owned_by_me(sk); 5018 5019 if (level == SOL_SOCKET) { 5020 if (optlen != sizeof(int) && optname != SO_BINDTODEVICE) 5021 return -EINVAL; 5022 val = *((int *)optval); 5023 valbool = val ? 1 : 0; 5024 5025 /* Only some socketops are supported */ 5026 switch (optname) { 5027 case SO_RCVBUF: 5028 val = min_t(u32, val, sysctl_rmem_max); 5029 val = min_t(int, val, INT_MAX / 2); 5030 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 5031 WRITE_ONCE(sk->sk_rcvbuf, 5032 max_t(int, val * 2, SOCK_MIN_RCVBUF)); 5033 break; 5034 case SO_SNDBUF: 5035 val = min_t(u32, val, sysctl_wmem_max); 5036 val = min_t(int, val, INT_MAX / 2); 5037 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 5038 WRITE_ONCE(sk->sk_sndbuf, 5039 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 5040 break; 5041 case SO_MAX_PACING_RATE: /* 32bit version */ 5042 if (val != ~0U) 5043 cmpxchg(&sk->sk_pacing_status, 5044 SK_PACING_NONE, 5045 SK_PACING_NEEDED); 5046 sk->sk_max_pacing_rate = (val == ~0U) ? 5047 ~0UL : (unsigned int)val; 5048 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 5049 sk->sk_max_pacing_rate); 5050 break; 5051 case SO_PRIORITY: 5052 sk->sk_priority = val; 5053 break; 5054 case SO_RCVLOWAT: 5055 if (val < 0) 5056 val = INT_MAX; 5057 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 5058 break; 5059 case SO_MARK: 5060 if (sk->sk_mark != val) { 5061 sk->sk_mark = val; 5062 sk_dst_reset(sk); 5063 } 5064 break; 5065 case SO_BINDTODEVICE: 5066 optlen = min_t(long, optlen, IFNAMSIZ - 1); 5067 strncpy(devname, optval, optlen); 5068 devname[optlen] = 0; 5069 5070 ifindex = 0; 5071 if (devname[0] != '\0') { 5072 struct net_device *dev; 5073 5074 ret = -ENODEV; 5075 5076 net = sock_net(sk); 5077 dev = dev_get_by_name(net, devname); 5078 if (!dev) 5079 break; 5080 ifindex = dev->ifindex; 5081 dev_put(dev); 5082 } 5083 fallthrough; 5084 case SO_BINDTOIFINDEX: 5085 if (optname == SO_BINDTOIFINDEX) 5086 ifindex = val; 5087 ret = sock_bindtoindex(sk, ifindex, false); 5088 break; 5089 case SO_KEEPALIVE: 5090 if (sk->sk_prot->keepalive) 5091 sk->sk_prot->keepalive(sk, valbool); 5092 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 5093 break; 5094 case SO_REUSEPORT: 5095 sk->sk_reuseport = valbool; 5096 break; 5097 case SO_TXREHASH: 5098 if (val < -1 || val > 1) { 5099 ret = -EINVAL; 5100 break; 5101 } 5102 sk->sk_txrehash = (u8)val; 5103 break; 5104 default: 5105 ret = -EINVAL; 5106 } 5107 #ifdef CONFIG_INET 5108 } else if (level == SOL_IP) { 5109 if (optlen != sizeof(int) || sk->sk_family != AF_INET) 5110 return -EINVAL; 5111 5112 val = *((int *)optval); 5113 /* Only some options are supported */ 5114 switch (optname) { 5115 case IP_TOS: 5116 if (val < -1 || val > 0xff) { 5117 ret = -EINVAL; 5118 } else { 5119 struct inet_sock *inet = inet_sk(sk); 5120 5121 if (val == -1) 5122 val = 0; 5123 inet->tos = val; 5124 } 5125 break; 5126 default: 5127 ret = -EINVAL; 5128 } 5129 #if IS_ENABLED(CONFIG_IPV6) 5130 } else if (level == SOL_IPV6) { 5131 if (optlen != sizeof(int) || sk->sk_family != AF_INET6) 5132 return -EINVAL; 5133 5134 val = *((int *)optval); 5135 /* Only some options are supported */ 5136 switch (optname) { 5137 case IPV6_TCLASS: 5138 if (val < -1 || val > 0xff) { 5139 ret = -EINVAL; 5140 } else { 5141 struct ipv6_pinfo *np = inet6_sk(sk); 5142 5143 if (val == -1) 5144 val = 0; 5145 np->tclass = val; 5146 } 5147 break; 5148 default: 5149 ret = -EINVAL; 5150 } 5151 #endif 5152 } else if (level == SOL_TCP && 5153 sk->sk_prot->setsockopt == tcp_setsockopt) { 5154 if (optname == TCP_CONGESTION) { 5155 char name[TCP_CA_NAME_MAX]; 5156 5157 strncpy(name, optval, min_t(long, optlen, 5158 TCP_CA_NAME_MAX-1)); 5159 name[TCP_CA_NAME_MAX-1] = 0; 5160 ret = tcp_set_congestion_control(sk, name, false, true); 5161 } else { 5162 struct inet_connection_sock *icsk = inet_csk(sk); 5163 struct tcp_sock *tp = tcp_sk(sk); 5164 unsigned long timeout; 5165 5166 if (optlen != sizeof(int)) 5167 return -EINVAL; 5168 5169 val = *((int *)optval); 5170 /* Only some options are supported */ 5171 switch (optname) { 5172 case TCP_BPF_IW: 5173 if (val <= 0 || tp->data_segs_out > tp->syn_data) 5174 ret = -EINVAL; 5175 else 5176 tp->snd_cwnd = val; 5177 break; 5178 case TCP_BPF_SNDCWND_CLAMP: 5179 if (val <= 0) { 5180 ret = -EINVAL; 5181 } else { 5182 tp->snd_cwnd_clamp = val; 5183 tp->snd_ssthresh = val; 5184 } 5185 break; 5186 case TCP_BPF_DELACK_MAX: 5187 timeout = usecs_to_jiffies(val); 5188 if (timeout > TCP_DELACK_MAX || 5189 timeout < TCP_TIMEOUT_MIN) 5190 return -EINVAL; 5191 inet_csk(sk)->icsk_delack_max = timeout; 5192 break; 5193 case TCP_BPF_RTO_MIN: 5194 timeout = usecs_to_jiffies(val); 5195 if (timeout > TCP_RTO_MIN || 5196 timeout < TCP_TIMEOUT_MIN) 5197 return -EINVAL; 5198 inet_csk(sk)->icsk_rto_min = timeout; 5199 break; 5200 case TCP_SAVE_SYN: 5201 if (val < 0 || val > 1) 5202 ret = -EINVAL; 5203 else 5204 tp->save_syn = val; 5205 break; 5206 case TCP_KEEPIDLE: 5207 ret = tcp_sock_set_keepidle_locked(sk, val); 5208 break; 5209 case TCP_KEEPINTVL: 5210 if (val < 1 || val > MAX_TCP_KEEPINTVL) 5211 ret = -EINVAL; 5212 else 5213 tp->keepalive_intvl = val * HZ; 5214 break; 5215 case TCP_KEEPCNT: 5216 if (val < 1 || val > MAX_TCP_KEEPCNT) 5217 ret = -EINVAL; 5218 else 5219 tp->keepalive_probes = val; 5220 break; 5221 case TCP_SYNCNT: 5222 if (val < 1 || val > MAX_TCP_SYNCNT) 5223 ret = -EINVAL; 5224 else 5225 icsk->icsk_syn_retries = val; 5226 break; 5227 case TCP_USER_TIMEOUT: 5228 if (val < 0) 5229 ret = -EINVAL; 5230 else 5231 icsk->icsk_user_timeout = val; 5232 break; 5233 case TCP_NOTSENT_LOWAT: 5234 tp->notsent_lowat = val; 5235 sk->sk_write_space(sk); 5236 break; 5237 case TCP_WINDOW_CLAMP: 5238 ret = tcp_set_window_clamp(sk, val); 5239 break; 5240 default: 5241 ret = -EINVAL; 5242 } 5243 } 5244 #endif 5245 } else { 5246 ret = -EINVAL; 5247 } 5248 return ret; 5249 } 5250 5251 static int _bpf_getsockopt(struct sock *sk, int level, int optname, 5252 char *optval, int optlen) 5253 { 5254 if (!sk_fullsock(sk)) 5255 goto err_clear; 5256 5257 sock_owned_by_me(sk); 5258 5259 if (level == SOL_SOCKET) { 5260 if (optlen != sizeof(int)) 5261 goto err_clear; 5262 5263 switch (optname) { 5264 case SO_RCVBUF: 5265 *((int *)optval) = sk->sk_rcvbuf; 5266 break; 5267 case SO_SNDBUF: 5268 *((int *)optval) = sk->sk_sndbuf; 5269 break; 5270 case SO_MARK: 5271 *((int *)optval) = sk->sk_mark; 5272 break; 5273 case SO_PRIORITY: 5274 *((int *)optval) = sk->sk_priority; 5275 break; 5276 case SO_BINDTOIFINDEX: 5277 *((int *)optval) = sk->sk_bound_dev_if; 5278 break; 5279 case SO_REUSEPORT: 5280 *((int *)optval) = sk->sk_reuseport; 5281 break; 5282 case SO_TXREHASH: 5283 *((int *)optval) = sk->sk_txrehash; 5284 break; 5285 default: 5286 goto err_clear; 5287 } 5288 #ifdef CONFIG_INET 5289 } else if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { 5290 struct inet_connection_sock *icsk; 5291 struct tcp_sock *tp; 5292 5293 switch (optname) { 5294 case TCP_CONGESTION: 5295 icsk = inet_csk(sk); 5296 5297 if (!icsk->icsk_ca_ops || optlen <= 1) 5298 goto err_clear; 5299 strncpy(optval, icsk->icsk_ca_ops->name, optlen); 5300 optval[optlen - 1] = 0; 5301 break; 5302 case TCP_SAVED_SYN: 5303 tp = tcp_sk(sk); 5304 5305 if (optlen <= 0 || !tp->saved_syn || 5306 optlen > tcp_saved_syn_len(tp->saved_syn)) 5307 goto err_clear; 5308 memcpy(optval, tp->saved_syn->data, optlen); 5309 break; 5310 default: 5311 goto err_clear; 5312 } 5313 } else if (level == SOL_IP) { 5314 struct inet_sock *inet = inet_sk(sk); 5315 5316 if (optlen != sizeof(int) || sk->sk_family != AF_INET) 5317 goto err_clear; 5318 5319 /* Only some options are supported */ 5320 switch (optname) { 5321 case IP_TOS: 5322 *((int *)optval) = (int)inet->tos; 5323 break; 5324 default: 5325 goto err_clear; 5326 } 5327 #if IS_ENABLED(CONFIG_IPV6) 5328 } else if (level == SOL_IPV6) { 5329 struct ipv6_pinfo *np = inet6_sk(sk); 5330 5331 if (optlen != sizeof(int) || sk->sk_family != AF_INET6) 5332 goto err_clear; 5333 5334 /* Only some options are supported */ 5335 switch (optname) { 5336 case IPV6_TCLASS: 5337 *((int *)optval) = (int)np->tclass; 5338 break; 5339 default: 5340 goto err_clear; 5341 } 5342 #endif 5343 #endif 5344 } else { 5345 goto err_clear; 5346 } 5347 return 0; 5348 err_clear: 5349 memset(optval, 0, optlen); 5350 return -EINVAL; 5351 } 5352 5353 BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level, 5354 int, optname, char *, optval, int, optlen) 5355 { 5356 if (level == SOL_TCP && optname == TCP_CONGESTION) { 5357 if (optlen >= sizeof("cdg") - 1 && 5358 !strncmp("cdg", optval, optlen)) 5359 return -ENOTSUPP; 5360 } 5361 5362 return _bpf_setsockopt(sk, level, optname, optval, optlen); 5363 } 5364 5365 const struct bpf_func_proto bpf_sk_setsockopt_proto = { 5366 .func = bpf_sk_setsockopt, 5367 .gpl_only = false, 5368 .ret_type = RET_INTEGER, 5369 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5370 .arg2_type = ARG_ANYTHING, 5371 .arg3_type = ARG_ANYTHING, 5372 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5373 .arg5_type = ARG_CONST_SIZE, 5374 }; 5375 5376 BPF_CALL_5(bpf_sk_getsockopt, struct sock *, sk, int, level, 5377 int, optname, char *, optval, int, optlen) 5378 { 5379 return _bpf_getsockopt(sk, level, optname, optval, optlen); 5380 } 5381 5382 const struct bpf_func_proto bpf_sk_getsockopt_proto = { 5383 .func = bpf_sk_getsockopt, 5384 .gpl_only = false, 5385 .ret_type = RET_INTEGER, 5386 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5387 .arg2_type = ARG_ANYTHING, 5388 .arg3_type = ARG_ANYTHING, 5389 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5390 .arg5_type = ARG_CONST_SIZE, 5391 }; 5392 5393 BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx, 5394 int, level, int, optname, char *, optval, int, optlen) 5395 { 5396 return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen); 5397 } 5398 5399 static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = { 5400 .func = bpf_sock_addr_setsockopt, 5401 .gpl_only = false, 5402 .ret_type = RET_INTEGER, 5403 .arg1_type = ARG_PTR_TO_CTX, 5404 .arg2_type = ARG_ANYTHING, 5405 .arg3_type = ARG_ANYTHING, 5406 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5407 .arg5_type = ARG_CONST_SIZE, 5408 }; 5409 5410 BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx, 5411 int, level, int, optname, char *, optval, int, optlen) 5412 { 5413 return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen); 5414 } 5415 5416 static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = { 5417 .func = bpf_sock_addr_getsockopt, 5418 .gpl_only = false, 5419 .ret_type = RET_INTEGER, 5420 .arg1_type = ARG_PTR_TO_CTX, 5421 .arg2_type = ARG_ANYTHING, 5422 .arg3_type = ARG_ANYTHING, 5423 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5424 .arg5_type = ARG_CONST_SIZE, 5425 }; 5426 5427 BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5428 int, level, int, optname, char *, optval, int, optlen) 5429 { 5430 return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen); 5431 } 5432 5433 static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = { 5434 .func = bpf_sock_ops_setsockopt, 5435 .gpl_only = false, 5436 .ret_type = RET_INTEGER, 5437 .arg1_type = ARG_PTR_TO_CTX, 5438 .arg2_type = ARG_ANYTHING, 5439 .arg3_type = ARG_ANYTHING, 5440 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5441 .arg5_type = ARG_CONST_SIZE, 5442 }; 5443 5444 static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock, 5445 int optname, const u8 **start) 5446 { 5447 struct sk_buff *syn_skb = bpf_sock->syn_skb; 5448 const u8 *hdr_start; 5449 int ret; 5450 5451 if (syn_skb) { 5452 /* sk is a request_sock here */ 5453 5454 if (optname == TCP_BPF_SYN) { 5455 hdr_start = syn_skb->data; 5456 ret = tcp_hdrlen(syn_skb); 5457 } else if (optname == TCP_BPF_SYN_IP) { 5458 hdr_start = skb_network_header(syn_skb); 5459 ret = skb_network_header_len(syn_skb) + 5460 tcp_hdrlen(syn_skb); 5461 } else { 5462 /* optname == TCP_BPF_SYN_MAC */ 5463 hdr_start = skb_mac_header(syn_skb); 5464 ret = skb_mac_header_len(syn_skb) + 5465 skb_network_header_len(syn_skb) + 5466 tcp_hdrlen(syn_skb); 5467 } 5468 } else { 5469 struct sock *sk = bpf_sock->sk; 5470 struct saved_syn *saved_syn; 5471 5472 if (sk->sk_state == TCP_NEW_SYN_RECV) 5473 /* synack retransmit. bpf_sock->syn_skb will 5474 * not be available. It has to resort to 5475 * saved_syn (if it is saved). 5476 */ 5477 saved_syn = inet_reqsk(sk)->saved_syn; 5478 else 5479 saved_syn = tcp_sk(sk)->saved_syn; 5480 5481 if (!saved_syn) 5482 return -ENOENT; 5483 5484 if (optname == TCP_BPF_SYN) { 5485 hdr_start = saved_syn->data + 5486 saved_syn->mac_hdrlen + 5487 saved_syn->network_hdrlen; 5488 ret = saved_syn->tcp_hdrlen; 5489 } else if (optname == TCP_BPF_SYN_IP) { 5490 hdr_start = saved_syn->data + 5491 saved_syn->mac_hdrlen; 5492 ret = saved_syn->network_hdrlen + 5493 saved_syn->tcp_hdrlen; 5494 } else { 5495 /* optname == TCP_BPF_SYN_MAC */ 5496 5497 /* TCP_SAVE_SYN may not have saved the mac hdr */ 5498 if (!saved_syn->mac_hdrlen) 5499 return -ENOENT; 5500 5501 hdr_start = saved_syn->data; 5502 ret = saved_syn->mac_hdrlen + 5503 saved_syn->network_hdrlen + 5504 saved_syn->tcp_hdrlen; 5505 } 5506 } 5507 5508 *start = hdr_start; 5509 return ret; 5510 } 5511 5512 BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5513 int, level, int, optname, char *, optval, int, optlen) 5514 { 5515 if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP && 5516 optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) { 5517 int ret, copy_len = 0; 5518 const u8 *start; 5519 5520 ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start); 5521 if (ret > 0) { 5522 copy_len = ret; 5523 if (optlen < copy_len) { 5524 copy_len = optlen; 5525 ret = -ENOSPC; 5526 } 5527 5528 memcpy(optval, start, copy_len); 5529 } 5530 5531 /* Zero out unused buffer at the end */ 5532 memset(optval + copy_len, 0, optlen - copy_len); 5533 5534 return ret; 5535 } 5536 5537 return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen); 5538 } 5539 5540 static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = { 5541 .func = bpf_sock_ops_getsockopt, 5542 .gpl_only = false, 5543 .ret_type = RET_INTEGER, 5544 .arg1_type = ARG_PTR_TO_CTX, 5545 .arg2_type = ARG_ANYTHING, 5546 .arg3_type = ARG_ANYTHING, 5547 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5548 .arg5_type = ARG_CONST_SIZE, 5549 }; 5550 5551 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, 5552 int, argval) 5553 { 5554 struct sock *sk = bpf_sock->sk; 5555 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; 5556 5557 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) 5558 return -EINVAL; 5559 5560 tcp_sk(sk)->bpf_sock_ops_cb_flags = val; 5561 5562 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); 5563 } 5564 5565 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { 5566 .func = bpf_sock_ops_cb_flags_set, 5567 .gpl_only = false, 5568 .ret_type = RET_INTEGER, 5569 .arg1_type = ARG_PTR_TO_CTX, 5570 .arg2_type = ARG_ANYTHING, 5571 }; 5572 5573 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; 5574 EXPORT_SYMBOL_GPL(ipv6_bpf_stub); 5575 5576 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, 5577 int, addr_len) 5578 { 5579 #ifdef CONFIG_INET 5580 struct sock *sk = ctx->sk; 5581 u32 flags = BIND_FROM_BPF; 5582 int err; 5583 5584 err = -EINVAL; 5585 if (addr_len < offsetofend(struct sockaddr, sa_family)) 5586 return err; 5587 if (addr->sa_family == AF_INET) { 5588 if (addr_len < sizeof(struct sockaddr_in)) 5589 return err; 5590 if (((struct sockaddr_in *)addr)->sin_port == htons(0)) 5591 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5592 return __inet_bind(sk, addr, addr_len, flags); 5593 #if IS_ENABLED(CONFIG_IPV6) 5594 } else if (addr->sa_family == AF_INET6) { 5595 if (addr_len < SIN6_LEN_RFC2133) 5596 return err; 5597 if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0)) 5598 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5599 /* ipv6_bpf_stub cannot be NULL, since it's called from 5600 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded 5601 */ 5602 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags); 5603 #endif /* CONFIG_IPV6 */ 5604 } 5605 #endif /* CONFIG_INET */ 5606 5607 return -EAFNOSUPPORT; 5608 } 5609 5610 static const struct bpf_func_proto bpf_bind_proto = { 5611 .func = bpf_bind, 5612 .gpl_only = false, 5613 .ret_type = RET_INTEGER, 5614 .arg1_type = ARG_PTR_TO_CTX, 5615 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5616 .arg3_type = ARG_CONST_SIZE, 5617 }; 5618 5619 #ifdef CONFIG_XFRM 5620 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index, 5621 struct bpf_xfrm_state *, to, u32, size, u64, flags) 5622 { 5623 const struct sec_path *sp = skb_sec_path(skb); 5624 const struct xfrm_state *x; 5625 5626 if (!sp || unlikely(index >= sp->len || flags)) 5627 goto err_clear; 5628 5629 x = sp->xvec[index]; 5630 5631 if (unlikely(size != sizeof(struct bpf_xfrm_state))) 5632 goto err_clear; 5633 5634 to->reqid = x->props.reqid; 5635 to->spi = x->id.spi; 5636 to->family = x->props.family; 5637 to->ext = 0; 5638 5639 if (to->family == AF_INET6) { 5640 memcpy(to->remote_ipv6, x->props.saddr.a6, 5641 sizeof(to->remote_ipv6)); 5642 } else { 5643 to->remote_ipv4 = x->props.saddr.a4; 5644 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 5645 } 5646 5647 return 0; 5648 err_clear: 5649 memset(to, 0, size); 5650 return -EINVAL; 5651 } 5652 5653 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { 5654 .func = bpf_skb_get_xfrm_state, 5655 .gpl_only = false, 5656 .ret_type = RET_INTEGER, 5657 .arg1_type = ARG_PTR_TO_CTX, 5658 .arg2_type = ARG_ANYTHING, 5659 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 5660 .arg4_type = ARG_CONST_SIZE, 5661 .arg5_type = ARG_ANYTHING, 5662 }; 5663 #endif 5664 5665 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) 5666 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, 5667 const struct neighbour *neigh, 5668 const struct net_device *dev, u32 mtu) 5669 { 5670 memcpy(params->dmac, neigh->ha, ETH_ALEN); 5671 memcpy(params->smac, dev->dev_addr, ETH_ALEN); 5672 params->h_vlan_TCI = 0; 5673 params->h_vlan_proto = 0; 5674 if (mtu) 5675 params->mtu_result = mtu; /* union with tot_len */ 5676 5677 return 0; 5678 } 5679 #endif 5680 5681 #if IS_ENABLED(CONFIG_INET) 5682 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5683 u32 flags, bool check_mtu) 5684 { 5685 struct fib_nh_common *nhc; 5686 struct in_device *in_dev; 5687 struct neighbour *neigh; 5688 struct net_device *dev; 5689 struct fib_result res; 5690 struct flowi4 fl4; 5691 u32 mtu = 0; 5692 int err; 5693 5694 dev = dev_get_by_index_rcu(net, params->ifindex); 5695 if (unlikely(!dev)) 5696 return -ENODEV; 5697 5698 /* verify forwarding is enabled on this interface */ 5699 in_dev = __in_dev_get_rcu(dev); 5700 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) 5701 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5702 5703 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 5704 fl4.flowi4_iif = 1; 5705 fl4.flowi4_oif = params->ifindex; 5706 } else { 5707 fl4.flowi4_iif = params->ifindex; 5708 fl4.flowi4_oif = 0; 5709 } 5710 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK; 5711 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 5712 fl4.flowi4_flags = 0; 5713 5714 fl4.flowi4_proto = params->l4_protocol; 5715 fl4.daddr = params->ipv4_dst; 5716 fl4.saddr = params->ipv4_src; 5717 fl4.fl4_sport = params->sport; 5718 fl4.fl4_dport = params->dport; 5719 fl4.flowi4_multipath_hash = 0; 5720 5721 if (flags & BPF_FIB_LOOKUP_DIRECT) { 5722 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 5723 struct fib_table *tb; 5724 5725 tb = fib_get_table(net, tbid); 5726 if (unlikely(!tb)) 5727 return BPF_FIB_LKUP_RET_NOT_FWDED; 5728 5729 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); 5730 } else { 5731 fl4.flowi4_mark = 0; 5732 fl4.flowi4_secid = 0; 5733 fl4.flowi4_tun_key.tun_id = 0; 5734 fl4.flowi4_uid = sock_net_uid(net, NULL); 5735 5736 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); 5737 } 5738 5739 if (err) { 5740 /* map fib lookup errors to RTN_ type */ 5741 if (err == -EINVAL) 5742 return BPF_FIB_LKUP_RET_BLACKHOLE; 5743 if (err == -EHOSTUNREACH) 5744 return BPF_FIB_LKUP_RET_UNREACHABLE; 5745 if (err == -EACCES) 5746 return BPF_FIB_LKUP_RET_PROHIBIT; 5747 5748 return BPF_FIB_LKUP_RET_NOT_FWDED; 5749 } 5750 5751 if (res.type != RTN_UNICAST) 5752 return BPF_FIB_LKUP_RET_NOT_FWDED; 5753 5754 if (fib_info_num_path(res.fi) > 1) 5755 fib_select_path(net, &res, &fl4, NULL); 5756 5757 if (check_mtu) { 5758 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); 5759 if (params->tot_len > mtu) { 5760 params->mtu_result = mtu; /* union with tot_len */ 5761 return BPF_FIB_LKUP_RET_FRAG_NEEDED; 5762 } 5763 } 5764 5765 nhc = res.nhc; 5766 5767 /* do not handle lwt encaps right now */ 5768 if (nhc->nhc_lwtstate) 5769 return BPF_FIB_LKUP_RET_UNSUPP_LWT; 5770 5771 dev = nhc->nhc_dev; 5772 5773 params->rt_metric = res.fi->fib_priority; 5774 params->ifindex = dev->ifindex; 5775 5776 /* xdp and cls_bpf programs are run in RCU-bh so 5777 * rcu_read_lock_bh is not needed here 5778 */ 5779 if (likely(nhc->nhc_gw_family != AF_INET6)) { 5780 if (nhc->nhc_gw_family) 5781 params->ipv4_dst = nhc->nhc_gw.ipv4; 5782 5783 neigh = __ipv4_neigh_lookup_noref(dev, 5784 (__force u32)params->ipv4_dst); 5785 } else { 5786 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; 5787 5788 params->family = AF_INET6; 5789 *dst = nhc->nhc_gw.ipv6; 5790 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); 5791 } 5792 5793 if (!neigh) 5794 return BPF_FIB_LKUP_RET_NO_NEIGH; 5795 5796 return bpf_fib_set_fwd_params(params, neigh, dev, mtu); 5797 } 5798 #endif 5799 5800 #if IS_ENABLED(CONFIG_IPV6) 5801 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5802 u32 flags, bool check_mtu) 5803 { 5804 struct in6_addr *src = (struct in6_addr *) params->ipv6_src; 5805 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst; 5806 struct fib6_result res = {}; 5807 struct neighbour *neigh; 5808 struct net_device *dev; 5809 struct inet6_dev *idev; 5810 struct flowi6 fl6; 5811 int strict = 0; 5812 int oif, err; 5813 u32 mtu = 0; 5814 5815 /* link local addresses are never forwarded */ 5816 if (rt6_need_strict(dst) || rt6_need_strict(src)) 5817 return BPF_FIB_LKUP_RET_NOT_FWDED; 5818 5819 dev = dev_get_by_index_rcu(net, params->ifindex); 5820 if (unlikely(!dev)) 5821 return -ENODEV; 5822 5823 idev = __in6_dev_get_safely(dev); 5824 if (unlikely(!idev || !idev->cnf.forwarding)) 5825 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5826 5827 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 5828 fl6.flowi6_iif = 1; 5829 oif = fl6.flowi6_oif = params->ifindex; 5830 } else { 5831 oif = fl6.flowi6_iif = params->ifindex; 5832 fl6.flowi6_oif = 0; 5833 strict = RT6_LOOKUP_F_HAS_SADDR; 5834 } 5835 fl6.flowlabel = params->flowinfo; 5836 fl6.flowi6_scope = 0; 5837 fl6.flowi6_flags = 0; 5838 fl6.mp_hash = 0; 5839 5840 fl6.flowi6_proto = params->l4_protocol; 5841 fl6.daddr = *dst; 5842 fl6.saddr = *src; 5843 fl6.fl6_sport = params->sport; 5844 fl6.fl6_dport = params->dport; 5845 5846 if (flags & BPF_FIB_LOOKUP_DIRECT) { 5847 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 5848 struct fib6_table *tb; 5849 5850 tb = ipv6_stub->fib6_get_table(net, tbid); 5851 if (unlikely(!tb)) 5852 return BPF_FIB_LKUP_RET_NOT_FWDED; 5853 5854 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res, 5855 strict); 5856 } else { 5857 fl6.flowi6_mark = 0; 5858 fl6.flowi6_secid = 0; 5859 fl6.flowi6_tun_key.tun_id = 0; 5860 fl6.flowi6_uid = sock_net_uid(net, NULL); 5861 5862 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict); 5863 } 5864 5865 if (unlikely(err || IS_ERR_OR_NULL(res.f6i) || 5866 res.f6i == net->ipv6.fib6_null_entry)) 5867 return BPF_FIB_LKUP_RET_NOT_FWDED; 5868 5869 switch (res.fib6_type) { 5870 /* only unicast is forwarded */ 5871 case RTN_UNICAST: 5872 break; 5873 case RTN_BLACKHOLE: 5874 return BPF_FIB_LKUP_RET_BLACKHOLE; 5875 case RTN_UNREACHABLE: 5876 return BPF_FIB_LKUP_RET_UNREACHABLE; 5877 case RTN_PROHIBIT: 5878 return BPF_FIB_LKUP_RET_PROHIBIT; 5879 default: 5880 return BPF_FIB_LKUP_RET_NOT_FWDED; 5881 } 5882 5883 ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif, 5884 fl6.flowi6_oif != 0, NULL, strict); 5885 5886 if (check_mtu) { 5887 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src); 5888 if (params->tot_len > mtu) { 5889 params->mtu_result = mtu; /* union with tot_len */ 5890 return BPF_FIB_LKUP_RET_FRAG_NEEDED; 5891 } 5892 } 5893 5894 if (res.nh->fib_nh_lws) 5895 return BPF_FIB_LKUP_RET_UNSUPP_LWT; 5896 5897 if (res.nh->fib_nh_gw_family) 5898 *dst = res.nh->fib_nh_gw6; 5899 5900 dev = res.nh->fib_nh_dev; 5901 params->rt_metric = res.f6i->fib6_metric; 5902 params->ifindex = dev->ifindex; 5903 5904 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is 5905 * not needed here. 5906 */ 5907 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); 5908 if (!neigh) 5909 return BPF_FIB_LKUP_RET_NO_NEIGH; 5910 5911 return bpf_fib_set_fwd_params(params, neigh, dev, mtu); 5912 } 5913 #endif 5914 5915 BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, 5916 struct bpf_fib_lookup *, params, int, plen, u32, flags) 5917 { 5918 if (plen < sizeof(*params)) 5919 return -EINVAL; 5920 5921 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) 5922 return -EINVAL; 5923 5924 switch (params->family) { 5925 #if IS_ENABLED(CONFIG_INET) 5926 case AF_INET: 5927 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params, 5928 flags, true); 5929 #endif 5930 #if IS_ENABLED(CONFIG_IPV6) 5931 case AF_INET6: 5932 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params, 5933 flags, true); 5934 #endif 5935 } 5936 return -EAFNOSUPPORT; 5937 } 5938 5939 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = { 5940 .func = bpf_xdp_fib_lookup, 5941 .gpl_only = true, 5942 .ret_type = RET_INTEGER, 5943 .arg1_type = ARG_PTR_TO_CTX, 5944 .arg2_type = ARG_PTR_TO_MEM, 5945 .arg3_type = ARG_CONST_SIZE, 5946 .arg4_type = ARG_ANYTHING, 5947 }; 5948 5949 BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, 5950 struct bpf_fib_lookup *, params, int, plen, u32, flags) 5951 { 5952 struct net *net = dev_net(skb->dev); 5953 int rc = -EAFNOSUPPORT; 5954 bool check_mtu = false; 5955 5956 if (plen < sizeof(*params)) 5957 return -EINVAL; 5958 5959 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) 5960 return -EINVAL; 5961 5962 if (params->tot_len) 5963 check_mtu = true; 5964 5965 switch (params->family) { 5966 #if IS_ENABLED(CONFIG_INET) 5967 case AF_INET: 5968 rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu); 5969 break; 5970 #endif 5971 #if IS_ENABLED(CONFIG_IPV6) 5972 case AF_INET6: 5973 rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu); 5974 break; 5975 #endif 5976 } 5977 5978 if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) { 5979 struct net_device *dev; 5980 5981 /* When tot_len isn't provided by user, check skb 5982 * against MTU of FIB lookup resulting net_device 5983 */ 5984 dev = dev_get_by_index_rcu(net, params->ifindex); 5985 if (!is_skb_forwardable(dev, skb)) 5986 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; 5987 5988 params->mtu_result = dev->mtu; /* union with tot_len */ 5989 } 5990 5991 return rc; 5992 } 5993 5994 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { 5995 .func = bpf_skb_fib_lookup, 5996 .gpl_only = true, 5997 .ret_type = RET_INTEGER, 5998 .arg1_type = ARG_PTR_TO_CTX, 5999 .arg2_type = ARG_PTR_TO_MEM, 6000 .arg3_type = ARG_CONST_SIZE, 6001 .arg4_type = ARG_ANYTHING, 6002 }; 6003 6004 static struct net_device *__dev_via_ifindex(struct net_device *dev_curr, 6005 u32 ifindex) 6006 { 6007 struct net *netns = dev_net(dev_curr); 6008 6009 /* Non-redirect use-cases can use ifindex=0 and save ifindex lookup */ 6010 if (ifindex == 0) 6011 return dev_curr; 6012 6013 return dev_get_by_index_rcu(netns, ifindex); 6014 } 6015 6016 BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, 6017 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags) 6018 { 6019 int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; 6020 struct net_device *dev = skb->dev; 6021 int skb_len, dev_len; 6022 int mtu; 6023 6024 if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) 6025 return -EINVAL; 6026 6027 if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) 6028 return -EINVAL; 6029 6030 dev = __dev_via_ifindex(dev, ifindex); 6031 if (unlikely(!dev)) 6032 return -ENODEV; 6033 6034 mtu = READ_ONCE(dev->mtu); 6035 6036 dev_len = mtu + dev->hard_header_len; 6037 6038 /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ 6039 skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len; 6040 6041 skb_len += len_diff; /* minus result pass check */ 6042 if (skb_len <= dev_len) { 6043 ret = BPF_MTU_CHK_RET_SUCCESS; 6044 goto out; 6045 } 6046 /* At this point, skb->len exceed MTU, but as it include length of all 6047 * segments, it can still be below MTU. The SKB can possibly get 6048 * re-segmented in transmit path (see validate_xmit_skb). Thus, user 6049 * must choose if segs are to be MTU checked. 6050 */ 6051 if (skb_is_gso(skb)) { 6052 ret = BPF_MTU_CHK_RET_SUCCESS; 6053 6054 if (flags & BPF_MTU_CHK_SEGS && 6055 !skb_gso_validate_network_len(skb, mtu)) 6056 ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; 6057 } 6058 out: 6059 /* BPF verifier guarantees valid pointer */ 6060 *mtu_len = mtu; 6061 6062 return ret; 6063 } 6064 6065 BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, 6066 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags) 6067 { 6068 struct net_device *dev = xdp->rxq->dev; 6069 int xdp_len = xdp->data_end - xdp->data; 6070 int ret = BPF_MTU_CHK_RET_SUCCESS; 6071 int mtu, dev_len; 6072 6073 /* XDP variant doesn't support multi-buffer segment check (yet) */ 6074 if (unlikely(flags)) 6075 return -EINVAL; 6076 6077 dev = __dev_via_ifindex(dev, ifindex); 6078 if (unlikely(!dev)) 6079 return -ENODEV; 6080 6081 mtu = READ_ONCE(dev->mtu); 6082 6083 /* Add L2-header as dev MTU is L3 size */ 6084 dev_len = mtu + dev->hard_header_len; 6085 6086 /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ 6087 if (*mtu_len) 6088 xdp_len = *mtu_len + dev->hard_header_len; 6089 6090 xdp_len += len_diff; /* minus result pass check */ 6091 if (xdp_len > dev_len) 6092 ret = BPF_MTU_CHK_RET_FRAG_NEEDED; 6093 6094 /* BPF verifier guarantees valid pointer */ 6095 *mtu_len = mtu; 6096 6097 return ret; 6098 } 6099 6100 static const struct bpf_func_proto bpf_skb_check_mtu_proto = { 6101 .func = bpf_skb_check_mtu, 6102 .gpl_only = true, 6103 .ret_type = RET_INTEGER, 6104 .arg1_type = ARG_PTR_TO_CTX, 6105 .arg2_type = ARG_ANYTHING, 6106 .arg3_type = ARG_PTR_TO_INT, 6107 .arg4_type = ARG_ANYTHING, 6108 .arg5_type = ARG_ANYTHING, 6109 }; 6110 6111 static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { 6112 .func = bpf_xdp_check_mtu, 6113 .gpl_only = true, 6114 .ret_type = RET_INTEGER, 6115 .arg1_type = ARG_PTR_TO_CTX, 6116 .arg2_type = ARG_ANYTHING, 6117 .arg3_type = ARG_PTR_TO_INT, 6118 .arg4_type = ARG_ANYTHING, 6119 .arg5_type = ARG_ANYTHING, 6120 }; 6121 6122 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 6123 static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 6124 { 6125 int err; 6126 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr; 6127 6128 if (!seg6_validate_srh(srh, len, false)) 6129 return -EINVAL; 6130 6131 switch (type) { 6132 case BPF_LWT_ENCAP_SEG6_INLINE: 6133 if (skb->protocol != htons(ETH_P_IPV6)) 6134 return -EBADMSG; 6135 6136 err = seg6_do_srh_inline(skb, srh); 6137 break; 6138 case BPF_LWT_ENCAP_SEG6: 6139 skb_reset_inner_headers(skb); 6140 skb->encapsulation = 1; 6141 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6); 6142 break; 6143 default: 6144 return -EINVAL; 6145 } 6146 6147 bpf_compute_data_pointers(skb); 6148 if (err) 6149 return err; 6150 6151 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 6152 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 6153 6154 return seg6_lookup_nexthop(skb, NULL, 0); 6155 } 6156 #endif /* CONFIG_IPV6_SEG6_BPF */ 6157 6158 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 6159 static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, 6160 bool ingress) 6161 { 6162 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress); 6163 } 6164 #endif 6165 6166 BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr, 6167 u32, len) 6168 { 6169 switch (type) { 6170 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 6171 case BPF_LWT_ENCAP_SEG6: 6172 case BPF_LWT_ENCAP_SEG6_INLINE: 6173 return bpf_push_seg6_encap(skb, type, hdr, len); 6174 #endif 6175 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 6176 case BPF_LWT_ENCAP_IP: 6177 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */); 6178 #endif 6179 default: 6180 return -EINVAL; 6181 } 6182 } 6183 6184 BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type, 6185 void *, hdr, u32, len) 6186 { 6187 switch (type) { 6188 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 6189 case BPF_LWT_ENCAP_IP: 6190 return bpf_push_ip_encap(skb, hdr, len, false /* egress */); 6191 #endif 6192 default: 6193 return -EINVAL; 6194 } 6195 } 6196 6197 static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = { 6198 .func = bpf_lwt_in_push_encap, 6199 .gpl_only = false, 6200 .ret_type = RET_INTEGER, 6201 .arg1_type = ARG_PTR_TO_CTX, 6202 .arg2_type = ARG_ANYTHING, 6203 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6204 .arg4_type = ARG_CONST_SIZE 6205 }; 6206 6207 static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = { 6208 .func = bpf_lwt_xmit_push_encap, 6209 .gpl_only = false, 6210 .ret_type = RET_INTEGER, 6211 .arg1_type = ARG_PTR_TO_CTX, 6212 .arg2_type = ARG_ANYTHING, 6213 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6214 .arg4_type = ARG_CONST_SIZE 6215 }; 6216 6217 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 6218 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, 6219 const void *, from, u32, len) 6220 { 6221 struct seg6_bpf_srh_state *srh_state = 6222 this_cpu_ptr(&seg6_bpf_srh_states); 6223 struct ipv6_sr_hdr *srh = srh_state->srh; 6224 void *srh_tlvs, *srh_end, *ptr; 6225 int srhoff = 0; 6226 6227 if (srh == NULL) 6228 return -EINVAL; 6229 6230 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4)); 6231 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen); 6232 6233 ptr = skb->data + offset; 6234 if (ptr >= srh_tlvs && ptr + len <= srh_end) 6235 srh_state->valid = false; 6236 else if (ptr < (void *)&srh->flags || 6237 ptr + len > (void *)&srh->segments) 6238 return -EFAULT; 6239 6240 if (unlikely(bpf_try_make_writable(skb, offset + len))) 6241 return -EFAULT; 6242 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) 6243 return -EINVAL; 6244 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 6245 6246 memcpy(skb->data + offset, from, len); 6247 return 0; 6248 } 6249 6250 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { 6251 .func = bpf_lwt_seg6_store_bytes, 6252 .gpl_only = false, 6253 .ret_type = RET_INTEGER, 6254 .arg1_type = ARG_PTR_TO_CTX, 6255 .arg2_type = ARG_ANYTHING, 6256 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6257 .arg4_type = ARG_CONST_SIZE 6258 }; 6259 6260 static void bpf_update_srh_state(struct sk_buff *skb) 6261 { 6262 struct seg6_bpf_srh_state *srh_state = 6263 this_cpu_ptr(&seg6_bpf_srh_states); 6264 int srhoff = 0; 6265 6266 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) { 6267 srh_state->srh = NULL; 6268 } else { 6269 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 6270 srh_state->hdrlen = srh_state->srh->hdrlen << 3; 6271 srh_state->valid = true; 6272 } 6273 } 6274 6275 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, 6276 u32, action, void *, param, u32, param_len) 6277 { 6278 struct seg6_bpf_srh_state *srh_state = 6279 this_cpu_ptr(&seg6_bpf_srh_states); 6280 int hdroff = 0; 6281 int err; 6282 6283 switch (action) { 6284 case SEG6_LOCAL_ACTION_END_X: 6285 if (!seg6_bpf_has_valid_srh(skb)) 6286 return -EBADMSG; 6287 if (param_len != sizeof(struct in6_addr)) 6288 return -EINVAL; 6289 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0); 6290 case SEG6_LOCAL_ACTION_END_T: 6291 if (!seg6_bpf_has_valid_srh(skb)) 6292 return -EBADMSG; 6293 if (param_len != sizeof(int)) 6294 return -EINVAL; 6295 return seg6_lookup_nexthop(skb, NULL, *(int *)param); 6296 case SEG6_LOCAL_ACTION_END_DT6: 6297 if (!seg6_bpf_has_valid_srh(skb)) 6298 return -EBADMSG; 6299 if (param_len != sizeof(int)) 6300 return -EINVAL; 6301 6302 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0) 6303 return -EBADMSG; 6304 if (!pskb_pull(skb, hdroff)) 6305 return -EBADMSG; 6306 6307 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff); 6308 skb_reset_network_header(skb); 6309 skb_reset_transport_header(skb); 6310 skb->encapsulation = 0; 6311 6312 bpf_compute_data_pointers(skb); 6313 bpf_update_srh_state(skb); 6314 return seg6_lookup_nexthop(skb, NULL, *(int *)param); 6315 case SEG6_LOCAL_ACTION_END_B6: 6316 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) 6317 return -EBADMSG; 6318 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE, 6319 param, param_len); 6320 if (!err) 6321 bpf_update_srh_state(skb); 6322 6323 return err; 6324 case SEG6_LOCAL_ACTION_END_B6_ENCAP: 6325 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) 6326 return -EBADMSG; 6327 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6, 6328 param, param_len); 6329 if (!err) 6330 bpf_update_srh_state(skb); 6331 6332 return err; 6333 default: 6334 return -EINVAL; 6335 } 6336 } 6337 6338 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { 6339 .func = bpf_lwt_seg6_action, 6340 .gpl_only = false, 6341 .ret_type = RET_INTEGER, 6342 .arg1_type = ARG_PTR_TO_CTX, 6343 .arg2_type = ARG_ANYTHING, 6344 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6345 .arg4_type = ARG_CONST_SIZE 6346 }; 6347 6348 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, 6349 s32, len) 6350 { 6351 struct seg6_bpf_srh_state *srh_state = 6352 this_cpu_ptr(&seg6_bpf_srh_states); 6353 struct ipv6_sr_hdr *srh = srh_state->srh; 6354 void *srh_end, *srh_tlvs, *ptr; 6355 struct ipv6hdr *hdr; 6356 int srhoff = 0; 6357 int ret; 6358 6359 if (unlikely(srh == NULL)) 6360 return -EINVAL; 6361 6362 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) + 6363 ((srh->first_segment + 1) << 4)); 6364 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) + 6365 srh_state->hdrlen); 6366 ptr = skb->data + offset; 6367 6368 if (unlikely(ptr < srh_tlvs || ptr > srh_end)) 6369 return -EFAULT; 6370 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end)) 6371 return -EFAULT; 6372 6373 if (len > 0) { 6374 ret = skb_cow_head(skb, len); 6375 if (unlikely(ret < 0)) 6376 return ret; 6377 6378 ret = bpf_skb_net_hdr_push(skb, offset, len); 6379 } else { 6380 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len); 6381 } 6382 6383 bpf_compute_data_pointers(skb); 6384 if (unlikely(ret < 0)) 6385 return ret; 6386 6387 hdr = (struct ipv6hdr *)skb->data; 6388 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 6389 6390 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) 6391 return -EINVAL; 6392 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 6393 srh_state->hdrlen += len; 6394 srh_state->valid = false; 6395 return 0; 6396 } 6397 6398 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { 6399 .func = bpf_lwt_seg6_adjust_srh, 6400 .gpl_only = false, 6401 .ret_type = RET_INTEGER, 6402 .arg1_type = ARG_PTR_TO_CTX, 6403 .arg2_type = ARG_ANYTHING, 6404 .arg3_type = ARG_ANYTHING, 6405 }; 6406 #endif /* CONFIG_IPV6_SEG6_BPF */ 6407 6408 #ifdef CONFIG_INET 6409 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, 6410 int dif, int sdif, u8 family, u8 proto) 6411 { 6412 bool refcounted = false; 6413 struct sock *sk = NULL; 6414 6415 if (family == AF_INET) { 6416 __be32 src4 = tuple->ipv4.saddr; 6417 __be32 dst4 = tuple->ipv4.daddr; 6418 6419 if (proto == IPPROTO_TCP) 6420 sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0, 6421 src4, tuple->ipv4.sport, 6422 dst4, tuple->ipv4.dport, 6423 dif, sdif, &refcounted); 6424 else 6425 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, 6426 dst4, tuple->ipv4.dport, 6427 dif, sdif, &udp_table, NULL); 6428 #if IS_ENABLED(CONFIG_IPV6) 6429 } else { 6430 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; 6431 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; 6432 6433 if (proto == IPPROTO_TCP) 6434 sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0, 6435 src6, tuple->ipv6.sport, 6436 dst6, ntohs(tuple->ipv6.dport), 6437 dif, sdif, &refcounted); 6438 else if (likely(ipv6_bpf_stub)) 6439 sk = ipv6_bpf_stub->udp6_lib_lookup(net, 6440 src6, tuple->ipv6.sport, 6441 dst6, tuple->ipv6.dport, 6442 dif, sdif, 6443 &udp_table, NULL); 6444 #endif 6445 } 6446 6447 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { 6448 WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); 6449 sk = NULL; 6450 } 6451 return sk; 6452 } 6453 6454 /* bpf_skc_lookup performs the core lookup for different types of sockets, 6455 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. 6456 * Returns the socket as an 'unsigned long' to simplify the casting in the 6457 * callers to satisfy BPF_CALL declarations. 6458 */ 6459 static struct sock * 6460 __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6461 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, 6462 u64 flags) 6463 { 6464 struct sock *sk = NULL; 6465 u8 family = AF_UNSPEC; 6466 struct net *net; 6467 int sdif; 6468 6469 if (len == sizeof(tuple->ipv4)) 6470 family = AF_INET; 6471 else if (len == sizeof(tuple->ipv6)) 6472 family = AF_INET6; 6473 else 6474 return NULL; 6475 6476 if (unlikely(family == AF_UNSPEC || flags || 6477 !((s32)netns_id < 0 || netns_id <= S32_MAX))) 6478 goto out; 6479 6480 if (family == AF_INET) 6481 sdif = inet_sdif(skb); 6482 else 6483 sdif = inet6_sdif(skb); 6484 6485 if ((s32)netns_id < 0) { 6486 net = caller_net; 6487 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); 6488 } else { 6489 net = get_net_ns_by_id(caller_net, netns_id); 6490 if (unlikely(!net)) 6491 goto out; 6492 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); 6493 put_net(net); 6494 } 6495 6496 out: 6497 return sk; 6498 } 6499 6500 static struct sock * 6501 __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6502 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, 6503 u64 flags) 6504 { 6505 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net, 6506 ifindex, proto, netns_id, flags); 6507 6508 if (sk) { 6509 sk = sk_to_full_sk(sk); 6510 if (!sk_fullsock(sk)) { 6511 sock_gen_put(sk); 6512 return NULL; 6513 } 6514 } 6515 6516 return sk; 6517 } 6518 6519 static struct sock * 6520 bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6521 u8 proto, u64 netns_id, u64 flags) 6522 { 6523 struct net *caller_net; 6524 int ifindex; 6525 6526 if (skb->dev) { 6527 caller_net = dev_net(skb->dev); 6528 ifindex = skb->dev->ifindex; 6529 } else { 6530 caller_net = sock_net(skb->sk); 6531 ifindex = 0; 6532 } 6533 6534 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, 6535 netns_id, flags); 6536 } 6537 6538 static struct sock * 6539 bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6540 u8 proto, u64 netns_id, u64 flags) 6541 { 6542 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id, 6543 flags); 6544 6545 if (sk) { 6546 sk = sk_to_full_sk(sk); 6547 if (!sk_fullsock(sk)) { 6548 sock_gen_put(sk); 6549 return NULL; 6550 } 6551 } 6552 6553 return sk; 6554 } 6555 6556 BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb, 6557 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6558 { 6559 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP, 6560 netns_id, flags); 6561 } 6562 6563 static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = { 6564 .func = bpf_skc_lookup_tcp, 6565 .gpl_only = false, 6566 .pkt_access = true, 6567 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6568 .arg1_type = ARG_PTR_TO_CTX, 6569 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6570 .arg3_type = ARG_CONST_SIZE, 6571 .arg4_type = ARG_ANYTHING, 6572 .arg5_type = ARG_ANYTHING, 6573 }; 6574 6575 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, 6576 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6577 { 6578 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, 6579 netns_id, flags); 6580 } 6581 6582 static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { 6583 .func = bpf_sk_lookup_tcp, 6584 .gpl_only = false, 6585 .pkt_access = true, 6586 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6587 .arg1_type = ARG_PTR_TO_CTX, 6588 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6589 .arg3_type = ARG_CONST_SIZE, 6590 .arg4_type = ARG_ANYTHING, 6591 .arg5_type = ARG_ANYTHING, 6592 }; 6593 6594 BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, 6595 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6596 { 6597 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, 6598 netns_id, flags); 6599 } 6600 6601 static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { 6602 .func = bpf_sk_lookup_udp, 6603 .gpl_only = false, 6604 .pkt_access = true, 6605 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6606 .arg1_type = ARG_PTR_TO_CTX, 6607 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6608 .arg3_type = ARG_CONST_SIZE, 6609 .arg4_type = ARG_ANYTHING, 6610 .arg5_type = ARG_ANYTHING, 6611 }; 6612 6613 BPF_CALL_1(bpf_sk_release, struct sock *, sk) 6614 { 6615 if (sk && sk_is_refcounted(sk)) 6616 sock_gen_put(sk); 6617 return 0; 6618 } 6619 6620 static const struct bpf_func_proto bpf_sk_release_proto = { 6621 .func = bpf_sk_release, 6622 .gpl_only = false, 6623 .ret_type = RET_INTEGER, 6624 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 6625 }; 6626 6627 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, 6628 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6629 { 6630 struct net *caller_net = dev_net(ctx->rxq->dev); 6631 int ifindex = ctx->rxq->dev->ifindex; 6632 6633 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, 6634 ifindex, IPPROTO_UDP, netns_id, 6635 flags); 6636 } 6637 6638 static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { 6639 .func = bpf_xdp_sk_lookup_udp, 6640 .gpl_only = false, 6641 .pkt_access = true, 6642 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6643 .arg1_type = ARG_PTR_TO_CTX, 6644 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6645 .arg3_type = ARG_CONST_SIZE, 6646 .arg4_type = ARG_ANYTHING, 6647 .arg5_type = ARG_ANYTHING, 6648 }; 6649 6650 BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx, 6651 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6652 { 6653 struct net *caller_net = dev_net(ctx->rxq->dev); 6654 int ifindex = ctx->rxq->dev->ifindex; 6655 6656 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net, 6657 ifindex, IPPROTO_TCP, netns_id, 6658 flags); 6659 } 6660 6661 static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { 6662 .func = bpf_xdp_skc_lookup_tcp, 6663 .gpl_only = false, 6664 .pkt_access = true, 6665 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6666 .arg1_type = ARG_PTR_TO_CTX, 6667 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6668 .arg3_type = ARG_CONST_SIZE, 6669 .arg4_type = ARG_ANYTHING, 6670 .arg5_type = ARG_ANYTHING, 6671 }; 6672 6673 BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx, 6674 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6675 { 6676 struct net *caller_net = dev_net(ctx->rxq->dev); 6677 int ifindex = ctx->rxq->dev->ifindex; 6678 6679 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, 6680 ifindex, IPPROTO_TCP, netns_id, 6681 flags); 6682 } 6683 6684 static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { 6685 .func = bpf_xdp_sk_lookup_tcp, 6686 .gpl_only = false, 6687 .pkt_access = true, 6688 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6689 .arg1_type = ARG_PTR_TO_CTX, 6690 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6691 .arg3_type = ARG_CONST_SIZE, 6692 .arg4_type = ARG_ANYTHING, 6693 .arg5_type = ARG_ANYTHING, 6694 }; 6695 6696 BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx, 6697 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6698 { 6699 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, 6700 sock_net(ctx->sk), 0, 6701 IPPROTO_TCP, netns_id, flags); 6702 } 6703 6704 static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { 6705 .func = bpf_sock_addr_skc_lookup_tcp, 6706 .gpl_only = false, 6707 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6708 .arg1_type = ARG_PTR_TO_CTX, 6709 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6710 .arg3_type = ARG_CONST_SIZE, 6711 .arg4_type = ARG_ANYTHING, 6712 .arg5_type = ARG_ANYTHING, 6713 }; 6714 6715 BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx, 6716 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6717 { 6718 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, 6719 sock_net(ctx->sk), 0, IPPROTO_TCP, 6720 netns_id, flags); 6721 } 6722 6723 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { 6724 .func = bpf_sock_addr_sk_lookup_tcp, 6725 .gpl_only = false, 6726 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6727 .arg1_type = ARG_PTR_TO_CTX, 6728 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6729 .arg3_type = ARG_CONST_SIZE, 6730 .arg4_type = ARG_ANYTHING, 6731 .arg5_type = ARG_ANYTHING, 6732 }; 6733 6734 BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx, 6735 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6736 { 6737 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, 6738 sock_net(ctx->sk), 0, IPPROTO_UDP, 6739 netns_id, flags); 6740 } 6741 6742 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { 6743 .func = bpf_sock_addr_sk_lookup_udp, 6744 .gpl_only = false, 6745 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6746 .arg1_type = ARG_PTR_TO_CTX, 6747 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6748 .arg3_type = ARG_CONST_SIZE, 6749 .arg4_type = ARG_ANYTHING, 6750 .arg5_type = ARG_ANYTHING, 6751 }; 6752 6753 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 6754 struct bpf_insn_access_aux *info) 6755 { 6756 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, 6757 icsk_retransmits)) 6758 return false; 6759 6760 if (off % size != 0) 6761 return false; 6762 6763 switch (off) { 6764 case offsetof(struct bpf_tcp_sock, bytes_received): 6765 case offsetof(struct bpf_tcp_sock, bytes_acked): 6766 return size == sizeof(__u64); 6767 default: 6768 return size == sizeof(__u32); 6769 } 6770 } 6771 6772 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 6773 const struct bpf_insn *si, 6774 struct bpf_insn *insn_buf, 6775 struct bpf_prog *prog, u32 *target_size) 6776 { 6777 struct bpf_insn *insn = insn_buf; 6778 6779 #define BPF_TCP_SOCK_GET_COMMON(FIELD) \ 6780 do { \ 6781 BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \ 6782 sizeof_field(struct bpf_tcp_sock, FIELD)); \ 6783 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\ 6784 si->dst_reg, si->src_reg, \ 6785 offsetof(struct tcp_sock, FIELD)); \ 6786 } while (0) 6787 6788 #define BPF_INET_SOCK_GET_COMMON(FIELD) \ 6789 do { \ 6790 BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \ 6791 FIELD) > \ 6792 sizeof_field(struct bpf_tcp_sock, FIELD)); \ 6793 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 6794 struct inet_connection_sock, \ 6795 FIELD), \ 6796 si->dst_reg, si->src_reg, \ 6797 offsetof( \ 6798 struct inet_connection_sock, \ 6799 FIELD)); \ 6800 } while (0) 6801 6802 if (insn > insn_buf) 6803 return insn - insn_buf; 6804 6805 switch (si->off) { 6806 case offsetof(struct bpf_tcp_sock, rtt_min): 6807 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 6808 sizeof(struct minmax)); 6809 BUILD_BUG_ON(sizeof(struct minmax) < 6810 sizeof(struct minmax_sample)); 6811 6812 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 6813 offsetof(struct tcp_sock, rtt_min) + 6814 offsetof(struct minmax_sample, v)); 6815 break; 6816 case offsetof(struct bpf_tcp_sock, snd_cwnd): 6817 BPF_TCP_SOCK_GET_COMMON(snd_cwnd); 6818 break; 6819 case offsetof(struct bpf_tcp_sock, srtt_us): 6820 BPF_TCP_SOCK_GET_COMMON(srtt_us); 6821 break; 6822 case offsetof(struct bpf_tcp_sock, snd_ssthresh): 6823 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh); 6824 break; 6825 case offsetof(struct bpf_tcp_sock, rcv_nxt): 6826 BPF_TCP_SOCK_GET_COMMON(rcv_nxt); 6827 break; 6828 case offsetof(struct bpf_tcp_sock, snd_nxt): 6829 BPF_TCP_SOCK_GET_COMMON(snd_nxt); 6830 break; 6831 case offsetof(struct bpf_tcp_sock, snd_una): 6832 BPF_TCP_SOCK_GET_COMMON(snd_una); 6833 break; 6834 case offsetof(struct bpf_tcp_sock, mss_cache): 6835 BPF_TCP_SOCK_GET_COMMON(mss_cache); 6836 break; 6837 case offsetof(struct bpf_tcp_sock, ecn_flags): 6838 BPF_TCP_SOCK_GET_COMMON(ecn_flags); 6839 break; 6840 case offsetof(struct bpf_tcp_sock, rate_delivered): 6841 BPF_TCP_SOCK_GET_COMMON(rate_delivered); 6842 break; 6843 case offsetof(struct bpf_tcp_sock, rate_interval_us): 6844 BPF_TCP_SOCK_GET_COMMON(rate_interval_us); 6845 break; 6846 case offsetof(struct bpf_tcp_sock, packets_out): 6847 BPF_TCP_SOCK_GET_COMMON(packets_out); 6848 break; 6849 case offsetof(struct bpf_tcp_sock, retrans_out): 6850 BPF_TCP_SOCK_GET_COMMON(retrans_out); 6851 break; 6852 case offsetof(struct bpf_tcp_sock, total_retrans): 6853 BPF_TCP_SOCK_GET_COMMON(total_retrans); 6854 break; 6855 case offsetof(struct bpf_tcp_sock, segs_in): 6856 BPF_TCP_SOCK_GET_COMMON(segs_in); 6857 break; 6858 case offsetof(struct bpf_tcp_sock, data_segs_in): 6859 BPF_TCP_SOCK_GET_COMMON(data_segs_in); 6860 break; 6861 case offsetof(struct bpf_tcp_sock, segs_out): 6862 BPF_TCP_SOCK_GET_COMMON(segs_out); 6863 break; 6864 case offsetof(struct bpf_tcp_sock, data_segs_out): 6865 BPF_TCP_SOCK_GET_COMMON(data_segs_out); 6866 break; 6867 case offsetof(struct bpf_tcp_sock, lost_out): 6868 BPF_TCP_SOCK_GET_COMMON(lost_out); 6869 break; 6870 case offsetof(struct bpf_tcp_sock, sacked_out): 6871 BPF_TCP_SOCK_GET_COMMON(sacked_out); 6872 break; 6873 case offsetof(struct bpf_tcp_sock, bytes_received): 6874 BPF_TCP_SOCK_GET_COMMON(bytes_received); 6875 break; 6876 case offsetof(struct bpf_tcp_sock, bytes_acked): 6877 BPF_TCP_SOCK_GET_COMMON(bytes_acked); 6878 break; 6879 case offsetof(struct bpf_tcp_sock, dsack_dups): 6880 BPF_TCP_SOCK_GET_COMMON(dsack_dups); 6881 break; 6882 case offsetof(struct bpf_tcp_sock, delivered): 6883 BPF_TCP_SOCK_GET_COMMON(delivered); 6884 break; 6885 case offsetof(struct bpf_tcp_sock, delivered_ce): 6886 BPF_TCP_SOCK_GET_COMMON(delivered_ce); 6887 break; 6888 case offsetof(struct bpf_tcp_sock, icsk_retransmits): 6889 BPF_INET_SOCK_GET_COMMON(icsk_retransmits); 6890 break; 6891 } 6892 6893 return insn - insn_buf; 6894 } 6895 6896 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) 6897 { 6898 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) 6899 return (unsigned long)sk; 6900 6901 return (unsigned long)NULL; 6902 } 6903 6904 const struct bpf_func_proto bpf_tcp_sock_proto = { 6905 .func = bpf_tcp_sock, 6906 .gpl_only = false, 6907 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL, 6908 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 6909 }; 6910 6911 BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk) 6912 { 6913 sk = sk_to_full_sk(sk); 6914 6915 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) 6916 return (unsigned long)sk; 6917 6918 return (unsigned long)NULL; 6919 } 6920 6921 static const struct bpf_func_proto bpf_get_listener_sock_proto = { 6922 .func = bpf_get_listener_sock, 6923 .gpl_only = false, 6924 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6925 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 6926 }; 6927 6928 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) 6929 { 6930 unsigned int iphdr_len; 6931 6932 switch (skb_protocol(skb, true)) { 6933 case cpu_to_be16(ETH_P_IP): 6934 iphdr_len = sizeof(struct iphdr); 6935 break; 6936 case cpu_to_be16(ETH_P_IPV6): 6937 iphdr_len = sizeof(struct ipv6hdr); 6938 break; 6939 default: 6940 return 0; 6941 } 6942 6943 if (skb_headlen(skb) < iphdr_len) 6944 return 0; 6945 6946 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len)) 6947 return 0; 6948 6949 return INET_ECN_set_ce(skb); 6950 } 6951 6952 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 6953 struct bpf_insn_access_aux *info) 6954 { 6955 if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id)) 6956 return false; 6957 6958 if (off % size != 0) 6959 return false; 6960 6961 switch (off) { 6962 default: 6963 return size == sizeof(__u32); 6964 } 6965 } 6966 6967 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 6968 const struct bpf_insn *si, 6969 struct bpf_insn *insn_buf, 6970 struct bpf_prog *prog, u32 *target_size) 6971 { 6972 struct bpf_insn *insn = insn_buf; 6973 6974 #define BPF_XDP_SOCK_GET(FIELD) \ 6975 do { \ 6976 BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \ 6977 sizeof_field(struct bpf_xdp_sock, FIELD)); \ 6978 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\ 6979 si->dst_reg, si->src_reg, \ 6980 offsetof(struct xdp_sock, FIELD)); \ 6981 } while (0) 6982 6983 switch (si->off) { 6984 case offsetof(struct bpf_xdp_sock, queue_id): 6985 BPF_XDP_SOCK_GET(queue_id); 6986 break; 6987 } 6988 6989 return insn - insn_buf; 6990 } 6991 6992 static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { 6993 .func = bpf_skb_ecn_set_ce, 6994 .gpl_only = false, 6995 .ret_type = RET_INTEGER, 6996 .arg1_type = ARG_PTR_TO_CTX, 6997 }; 6998 6999 BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len, 7000 struct tcphdr *, th, u32, th_len) 7001 { 7002 #ifdef CONFIG_SYN_COOKIES 7003 u32 cookie; 7004 int ret; 7005 7006 if (unlikely(!sk || th_len < sizeof(*th))) 7007 return -EINVAL; 7008 7009 /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */ 7010 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) 7011 return -EINVAL; 7012 7013 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) 7014 return -EINVAL; 7015 7016 if (!th->ack || th->rst || th->syn) 7017 return -ENOENT; 7018 7019 if (tcp_synq_no_recent_overflow(sk)) 7020 return -ENOENT; 7021 7022 cookie = ntohl(th->ack_seq) - 1; 7023 7024 switch (sk->sk_family) { 7025 case AF_INET: 7026 if (unlikely(iph_len < sizeof(struct iphdr))) 7027 return -EINVAL; 7028 7029 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie); 7030 break; 7031 7032 #if IS_BUILTIN(CONFIG_IPV6) 7033 case AF_INET6: 7034 if (unlikely(iph_len < sizeof(struct ipv6hdr))) 7035 return -EINVAL; 7036 7037 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie); 7038 break; 7039 #endif /* CONFIG_IPV6 */ 7040 7041 default: 7042 return -EPROTONOSUPPORT; 7043 } 7044 7045 if (ret > 0) 7046 return 0; 7047 7048 return -ENOENT; 7049 #else 7050 return -ENOTSUPP; 7051 #endif 7052 } 7053 7054 static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = { 7055 .func = bpf_tcp_check_syncookie, 7056 .gpl_only = true, 7057 .pkt_access = true, 7058 .ret_type = RET_INTEGER, 7059 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 7060 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7061 .arg3_type = ARG_CONST_SIZE, 7062 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7063 .arg5_type = ARG_CONST_SIZE, 7064 }; 7065 7066 BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, 7067 struct tcphdr *, th, u32, th_len) 7068 { 7069 #ifdef CONFIG_SYN_COOKIES 7070 u32 cookie; 7071 u16 mss; 7072 7073 if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4)) 7074 return -EINVAL; 7075 7076 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) 7077 return -EINVAL; 7078 7079 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) 7080 return -ENOENT; 7081 7082 if (!th->syn || th->ack || th->fin || th->rst) 7083 return -EINVAL; 7084 7085 if (unlikely(iph_len < sizeof(struct iphdr))) 7086 return -EINVAL; 7087 7088 /* Both struct iphdr and struct ipv6hdr have the version field at the 7089 * same offset so we can cast to the shorter header (struct iphdr). 7090 */ 7091 switch (((struct iphdr *)iph)->version) { 7092 case 4: 7093 if (sk->sk_family == AF_INET6 && sk->sk_ipv6only) 7094 return -EINVAL; 7095 7096 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie); 7097 break; 7098 7099 #if IS_BUILTIN(CONFIG_IPV6) 7100 case 6: 7101 if (unlikely(iph_len < sizeof(struct ipv6hdr))) 7102 return -EINVAL; 7103 7104 if (sk->sk_family != AF_INET6) 7105 return -EINVAL; 7106 7107 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie); 7108 break; 7109 #endif /* CONFIG_IPV6 */ 7110 7111 default: 7112 return -EPROTONOSUPPORT; 7113 } 7114 if (mss == 0) 7115 return -ENOENT; 7116 7117 return cookie | ((u64)mss << 32); 7118 #else 7119 return -EOPNOTSUPP; 7120 #endif /* CONFIG_SYN_COOKIES */ 7121 } 7122 7123 static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { 7124 .func = bpf_tcp_gen_syncookie, 7125 .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */ 7126 .pkt_access = true, 7127 .ret_type = RET_INTEGER, 7128 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 7129 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7130 .arg3_type = ARG_CONST_SIZE, 7131 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7132 .arg5_type = ARG_CONST_SIZE, 7133 }; 7134 7135 BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags) 7136 { 7137 if (!sk || flags != 0) 7138 return -EINVAL; 7139 if (!skb_at_tc_ingress(skb)) 7140 return -EOPNOTSUPP; 7141 if (unlikely(dev_net(skb->dev) != sock_net(sk))) 7142 return -ENETUNREACH; 7143 if (unlikely(sk_fullsock(sk) && sk->sk_reuseport)) 7144 return -ESOCKTNOSUPPORT; 7145 if (sk_is_refcounted(sk) && 7146 unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 7147 return -ENOENT; 7148 7149 skb_orphan(skb); 7150 skb->sk = sk; 7151 skb->destructor = sock_pfree; 7152 7153 return 0; 7154 } 7155 7156 static const struct bpf_func_proto bpf_sk_assign_proto = { 7157 .func = bpf_sk_assign, 7158 .gpl_only = false, 7159 .ret_type = RET_INTEGER, 7160 .arg1_type = ARG_PTR_TO_CTX, 7161 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 7162 .arg3_type = ARG_ANYTHING, 7163 }; 7164 7165 static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend, 7166 u8 search_kind, const u8 *magic, 7167 u8 magic_len, bool *eol) 7168 { 7169 u8 kind, kind_len; 7170 7171 *eol = false; 7172 7173 while (op < opend) { 7174 kind = op[0]; 7175 7176 if (kind == TCPOPT_EOL) { 7177 *eol = true; 7178 return ERR_PTR(-ENOMSG); 7179 } else if (kind == TCPOPT_NOP) { 7180 op++; 7181 continue; 7182 } 7183 7184 if (opend - op < 2 || opend - op < op[1] || op[1] < 2) 7185 /* Something is wrong in the received header. 7186 * Follow the TCP stack's tcp_parse_options() 7187 * and just bail here. 7188 */ 7189 return ERR_PTR(-EFAULT); 7190 7191 kind_len = op[1]; 7192 if (search_kind == kind) { 7193 if (!magic_len) 7194 return op; 7195 7196 if (magic_len > kind_len - 2) 7197 return ERR_PTR(-ENOMSG); 7198 7199 if (!memcmp(&op[2], magic, magic_len)) 7200 return op; 7201 } 7202 7203 op += kind_len; 7204 } 7205 7206 return ERR_PTR(-ENOMSG); 7207 } 7208 7209 BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 7210 void *, search_res, u32, len, u64, flags) 7211 { 7212 bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN; 7213 const u8 *op, *opend, *magic, *search = search_res; 7214 u8 search_kind, search_len, copy_len, magic_len; 7215 int ret; 7216 7217 /* 2 byte is the minimal option len except TCPOPT_NOP and 7218 * TCPOPT_EOL which are useless for the bpf prog to learn 7219 * and this helper disallow loading them also. 7220 */ 7221 if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN) 7222 return -EINVAL; 7223 7224 search_kind = search[0]; 7225 search_len = search[1]; 7226 7227 if (search_len > len || search_kind == TCPOPT_NOP || 7228 search_kind == TCPOPT_EOL) 7229 return -EINVAL; 7230 7231 if (search_kind == TCPOPT_EXP || search_kind == 253) { 7232 /* 16 or 32 bit magic. +2 for kind and kind length */ 7233 if (search_len != 4 && search_len != 6) 7234 return -EINVAL; 7235 magic = &search[2]; 7236 magic_len = search_len - 2; 7237 } else { 7238 if (search_len) 7239 return -EINVAL; 7240 magic = NULL; 7241 magic_len = 0; 7242 } 7243 7244 if (load_syn) { 7245 ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op); 7246 if (ret < 0) 7247 return ret; 7248 7249 opend = op + ret; 7250 op += sizeof(struct tcphdr); 7251 } else { 7252 if (!bpf_sock->skb || 7253 bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB) 7254 /* This bpf_sock->op cannot call this helper */ 7255 return -EPERM; 7256 7257 opend = bpf_sock->skb_data_end; 7258 op = bpf_sock->skb->data + sizeof(struct tcphdr); 7259 } 7260 7261 op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len, 7262 &eol); 7263 if (IS_ERR(op)) 7264 return PTR_ERR(op); 7265 7266 copy_len = op[1]; 7267 ret = copy_len; 7268 if (copy_len > len) { 7269 ret = -ENOSPC; 7270 copy_len = len; 7271 } 7272 7273 memcpy(search_res, op, copy_len); 7274 return ret; 7275 } 7276 7277 static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = { 7278 .func = bpf_sock_ops_load_hdr_opt, 7279 .gpl_only = false, 7280 .ret_type = RET_INTEGER, 7281 .arg1_type = ARG_PTR_TO_CTX, 7282 .arg2_type = ARG_PTR_TO_MEM, 7283 .arg3_type = ARG_CONST_SIZE, 7284 .arg4_type = ARG_ANYTHING, 7285 }; 7286 7287 BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 7288 const void *, from, u32, len, u64, flags) 7289 { 7290 u8 new_kind, new_kind_len, magic_len = 0, *opend; 7291 const u8 *op, *new_op, *magic = NULL; 7292 struct sk_buff *skb; 7293 bool eol; 7294 7295 if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB) 7296 return -EPERM; 7297 7298 if (len < 2 || flags) 7299 return -EINVAL; 7300 7301 new_op = from; 7302 new_kind = new_op[0]; 7303 new_kind_len = new_op[1]; 7304 7305 if (new_kind_len > len || new_kind == TCPOPT_NOP || 7306 new_kind == TCPOPT_EOL) 7307 return -EINVAL; 7308 7309 if (new_kind_len > bpf_sock->remaining_opt_len) 7310 return -ENOSPC; 7311 7312 /* 253 is another experimental kind */ 7313 if (new_kind == TCPOPT_EXP || new_kind == 253) { 7314 if (new_kind_len < 4) 7315 return -EINVAL; 7316 /* Match for the 2 byte magic also. 7317 * RFC 6994: the magic could be 2 or 4 bytes. 7318 * Hence, matching by 2 byte only is on the 7319 * conservative side but it is the right 7320 * thing to do for the 'search-for-duplication' 7321 * purpose. 7322 */ 7323 magic = &new_op[2]; 7324 magic_len = 2; 7325 } 7326 7327 /* Check for duplication */ 7328 skb = bpf_sock->skb; 7329 op = skb->data + sizeof(struct tcphdr); 7330 opend = bpf_sock->skb_data_end; 7331 7332 op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len, 7333 &eol); 7334 if (!IS_ERR(op)) 7335 return -EEXIST; 7336 7337 if (PTR_ERR(op) != -ENOMSG) 7338 return PTR_ERR(op); 7339 7340 if (eol) 7341 /* The option has been ended. Treat it as no more 7342 * header option can be written. 7343 */ 7344 return -ENOSPC; 7345 7346 /* No duplication found. Store the header option. */ 7347 memcpy(opend, from, new_kind_len); 7348 7349 bpf_sock->remaining_opt_len -= new_kind_len; 7350 bpf_sock->skb_data_end += new_kind_len; 7351 7352 return 0; 7353 } 7354 7355 static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = { 7356 .func = bpf_sock_ops_store_hdr_opt, 7357 .gpl_only = false, 7358 .ret_type = RET_INTEGER, 7359 .arg1_type = ARG_PTR_TO_CTX, 7360 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7361 .arg3_type = ARG_CONST_SIZE, 7362 .arg4_type = ARG_ANYTHING, 7363 }; 7364 7365 BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 7366 u32, len, u64, flags) 7367 { 7368 if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB) 7369 return -EPERM; 7370 7371 if (flags || len < 2) 7372 return -EINVAL; 7373 7374 if (len > bpf_sock->remaining_opt_len) 7375 return -ENOSPC; 7376 7377 bpf_sock->remaining_opt_len -= len; 7378 7379 return 0; 7380 } 7381 7382 static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { 7383 .func = bpf_sock_ops_reserve_hdr_opt, 7384 .gpl_only = false, 7385 .ret_type = RET_INTEGER, 7386 .arg1_type = ARG_PTR_TO_CTX, 7387 .arg2_type = ARG_ANYTHING, 7388 .arg3_type = ARG_ANYTHING, 7389 }; 7390 7391 BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, 7392 u64, tstamp, u32, tstamp_type) 7393 { 7394 /* skb_clear_delivery_time() is done for inet protocol */ 7395 if (skb->protocol != htons(ETH_P_IP) && 7396 skb->protocol != htons(ETH_P_IPV6)) 7397 return -EOPNOTSUPP; 7398 7399 switch (tstamp_type) { 7400 case BPF_SKB_TSTAMP_DELIVERY_MONO: 7401 if (!tstamp) 7402 return -EINVAL; 7403 skb->tstamp = tstamp; 7404 skb->mono_delivery_time = 1; 7405 break; 7406 case BPF_SKB_TSTAMP_UNSPEC: 7407 if (tstamp) 7408 return -EINVAL; 7409 skb->tstamp = 0; 7410 skb->mono_delivery_time = 0; 7411 break; 7412 default: 7413 return -EINVAL; 7414 } 7415 7416 return 0; 7417 } 7418 7419 static const struct bpf_func_proto bpf_skb_set_tstamp_proto = { 7420 .func = bpf_skb_set_tstamp, 7421 .gpl_only = false, 7422 .ret_type = RET_INTEGER, 7423 .arg1_type = ARG_PTR_TO_CTX, 7424 .arg2_type = ARG_ANYTHING, 7425 .arg3_type = ARG_ANYTHING, 7426 }; 7427 7428 #endif /* CONFIG_INET */ 7429 7430 bool bpf_helper_changes_pkt_data(void *func) 7431 { 7432 if (func == bpf_skb_vlan_push || 7433 func == bpf_skb_vlan_pop || 7434 func == bpf_skb_store_bytes || 7435 func == bpf_skb_change_proto || 7436 func == bpf_skb_change_head || 7437 func == sk_skb_change_head || 7438 func == bpf_skb_change_tail || 7439 func == sk_skb_change_tail || 7440 func == bpf_skb_adjust_room || 7441 func == sk_skb_adjust_room || 7442 func == bpf_skb_pull_data || 7443 func == sk_skb_pull_data || 7444 func == bpf_clone_redirect || 7445 func == bpf_l3_csum_replace || 7446 func == bpf_l4_csum_replace || 7447 func == bpf_xdp_adjust_head || 7448 func == bpf_xdp_adjust_meta || 7449 func == bpf_msg_pull_data || 7450 func == bpf_msg_push_data || 7451 func == bpf_msg_pop_data || 7452 func == bpf_xdp_adjust_tail || 7453 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 7454 func == bpf_lwt_seg6_store_bytes || 7455 func == bpf_lwt_seg6_adjust_srh || 7456 func == bpf_lwt_seg6_action || 7457 #endif 7458 #ifdef CONFIG_INET 7459 func == bpf_sock_ops_store_hdr_opt || 7460 #endif 7461 func == bpf_lwt_in_push_encap || 7462 func == bpf_lwt_xmit_push_encap) 7463 return true; 7464 7465 return false; 7466 } 7467 7468 const struct bpf_func_proto bpf_event_output_data_proto __weak; 7469 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak; 7470 7471 static const struct bpf_func_proto * 7472 sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7473 { 7474 switch (func_id) { 7475 /* inet and inet6 sockets are created in a process 7476 * context so there is always a valid uid/gid 7477 */ 7478 case BPF_FUNC_get_current_uid_gid: 7479 return &bpf_get_current_uid_gid_proto; 7480 case BPF_FUNC_get_local_storage: 7481 return &bpf_get_local_storage_proto; 7482 case BPF_FUNC_get_socket_cookie: 7483 return &bpf_get_socket_cookie_sock_proto; 7484 case BPF_FUNC_get_netns_cookie: 7485 return &bpf_get_netns_cookie_sock_proto; 7486 case BPF_FUNC_perf_event_output: 7487 return &bpf_event_output_data_proto; 7488 case BPF_FUNC_get_current_pid_tgid: 7489 return &bpf_get_current_pid_tgid_proto; 7490 case BPF_FUNC_get_current_comm: 7491 return &bpf_get_current_comm_proto; 7492 #ifdef CONFIG_CGROUPS 7493 case BPF_FUNC_get_current_cgroup_id: 7494 return &bpf_get_current_cgroup_id_proto; 7495 case BPF_FUNC_get_current_ancestor_cgroup_id: 7496 return &bpf_get_current_ancestor_cgroup_id_proto; 7497 #endif 7498 #ifdef CONFIG_CGROUP_NET_CLASSID 7499 case BPF_FUNC_get_cgroup_classid: 7500 return &bpf_get_cgroup_classid_curr_proto; 7501 #endif 7502 case BPF_FUNC_sk_storage_get: 7503 return &bpf_sk_storage_get_cg_sock_proto; 7504 case BPF_FUNC_ktime_get_coarse_ns: 7505 return &bpf_ktime_get_coarse_ns_proto; 7506 default: 7507 return bpf_base_func_proto(func_id); 7508 } 7509 } 7510 7511 static const struct bpf_func_proto * 7512 sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7513 { 7514 switch (func_id) { 7515 /* inet and inet6 sockets are created in a process 7516 * context so there is always a valid uid/gid 7517 */ 7518 case BPF_FUNC_get_current_uid_gid: 7519 return &bpf_get_current_uid_gid_proto; 7520 case BPF_FUNC_bind: 7521 switch (prog->expected_attach_type) { 7522 case BPF_CGROUP_INET4_CONNECT: 7523 case BPF_CGROUP_INET6_CONNECT: 7524 return &bpf_bind_proto; 7525 default: 7526 return NULL; 7527 } 7528 case BPF_FUNC_get_socket_cookie: 7529 return &bpf_get_socket_cookie_sock_addr_proto; 7530 case BPF_FUNC_get_netns_cookie: 7531 return &bpf_get_netns_cookie_sock_addr_proto; 7532 case BPF_FUNC_get_local_storage: 7533 return &bpf_get_local_storage_proto; 7534 case BPF_FUNC_perf_event_output: 7535 return &bpf_event_output_data_proto; 7536 case BPF_FUNC_get_current_pid_tgid: 7537 return &bpf_get_current_pid_tgid_proto; 7538 case BPF_FUNC_get_current_comm: 7539 return &bpf_get_current_comm_proto; 7540 #ifdef CONFIG_CGROUPS 7541 case BPF_FUNC_get_current_cgroup_id: 7542 return &bpf_get_current_cgroup_id_proto; 7543 case BPF_FUNC_get_current_ancestor_cgroup_id: 7544 return &bpf_get_current_ancestor_cgroup_id_proto; 7545 #endif 7546 #ifdef CONFIG_CGROUP_NET_CLASSID 7547 case BPF_FUNC_get_cgroup_classid: 7548 return &bpf_get_cgroup_classid_curr_proto; 7549 #endif 7550 #ifdef CONFIG_INET 7551 case BPF_FUNC_sk_lookup_tcp: 7552 return &bpf_sock_addr_sk_lookup_tcp_proto; 7553 case BPF_FUNC_sk_lookup_udp: 7554 return &bpf_sock_addr_sk_lookup_udp_proto; 7555 case BPF_FUNC_sk_release: 7556 return &bpf_sk_release_proto; 7557 case BPF_FUNC_skc_lookup_tcp: 7558 return &bpf_sock_addr_skc_lookup_tcp_proto; 7559 #endif /* CONFIG_INET */ 7560 case BPF_FUNC_sk_storage_get: 7561 return &bpf_sk_storage_get_proto; 7562 case BPF_FUNC_sk_storage_delete: 7563 return &bpf_sk_storage_delete_proto; 7564 case BPF_FUNC_setsockopt: 7565 switch (prog->expected_attach_type) { 7566 case BPF_CGROUP_INET4_BIND: 7567 case BPF_CGROUP_INET6_BIND: 7568 case BPF_CGROUP_INET4_CONNECT: 7569 case BPF_CGROUP_INET6_CONNECT: 7570 case BPF_CGROUP_UDP4_RECVMSG: 7571 case BPF_CGROUP_UDP6_RECVMSG: 7572 case BPF_CGROUP_UDP4_SENDMSG: 7573 case BPF_CGROUP_UDP6_SENDMSG: 7574 case BPF_CGROUP_INET4_GETPEERNAME: 7575 case BPF_CGROUP_INET6_GETPEERNAME: 7576 case BPF_CGROUP_INET4_GETSOCKNAME: 7577 case BPF_CGROUP_INET6_GETSOCKNAME: 7578 return &bpf_sock_addr_setsockopt_proto; 7579 default: 7580 return NULL; 7581 } 7582 case BPF_FUNC_getsockopt: 7583 switch (prog->expected_attach_type) { 7584 case BPF_CGROUP_INET4_BIND: 7585 case BPF_CGROUP_INET6_BIND: 7586 case BPF_CGROUP_INET4_CONNECT: 7587 case BPF_CGROUP_INET6_CONNECT: 7588 case BPF_CGROUP_UDP4_RECVMSG: 7589 case BPF_CGROUP_UDP6_RECVMSG: 7590 case BPF_CGROUP_UDP4_SENDMSG: 7591 case BPF_CGROUP_UDP6_SENDMSG: 7592 case BPF_CGROUP_INET4_GETPEERNAME: 7593 case BPF_CGROUP_INET6_GETPEERNAME: 7594 case BPF_CGROUP_INET4_GETSOCKNAME: 7595 case BPF_CGROUP_INET6_GETSOCKNAME: 7596 return &bpf_sock_addr_getsockopt_proto; 7597 default: 7598 return NULL; 7599 } 7600 default: 7601 return bpf_sk_base_func_proto(func_id); 7602 } 7603 } 7604 7605 static const struct bpf_func_proto * 7606 sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7607 { 7608 switch (func_id) { 7609 case BPF_FUNC_skb_load_bytes: 7610 return &bpf_skb_load_bytes_proto; 7611 case BPF_FUNC_skb_load_bytes_relative: 7612 return &bpf_skb_load_bytes_relative_proto; 7613 case BPF_FUNC_get_socket_cookie: 7614 return &bpf_get_socket_cookie_proto; 7615 case BPF_FUNC_get_socket_uid: 7616 return &bpf_get_socket_uid_proto; 7617 case BPF_FUNC_perf_event_output: 7618 return &bpf_skb_event_output_proto; 7619 default: 7620 return bpf_sk_base_func_proto(func_id); 7621 } 7622 } 7623 7624 const struct bpf_func_proto bpf_sk_storage_get_proto __weak; 7625 const struct bpf_func_proto bpf_sk_storage_delete_proto __weak; 7626 7627 static const struct bpf_func_proto * 7628 cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7629 { 7630 switch (func_id) { 7631 case BPF_FUNC_get_local_storage: 7632 return &bpf_get_local_storage_proto; 7633 case BPF_FUNC_sk_fullsock: 7634 return &bpf_sk_fullsock_proto; 7635 case BPF_FUNC_sk_storage_get: 7636 return &bpf_sk_storage_get_proto; 7637 case BPF_FUNC_sk_storage_delete: 7638 return &bpf_sk_storage_delete_proto; 7639 case BPF_FUNC_perf_event_output: 7640 return &bpf_skb_event_output_proto; 7641 #ifdef CONFIG_SOCK_CGROUP_DATA 7642 case BPF_FUNC_skb_cgroup_id: 7643 return &bpf_skb_cgroup_id_proto; 7644 case BPF_FUNC_skb_ancestor_cgroup_id: 7645 return &bpf_skb_ancestor_cgroup_id_proto; 7646 case BPF_FUNC_sk_cgroup_id: 7647 return &bpf_sk_cgroup_id_proto; 7648 case BPF_FUNC_sk_ancestor_cgroup_id: 7649 return &bpf_sk_ancestor_cgroup_id_proto; 7650 #endif 7651 #ifdef CONFIG_INET 7652 case BPF_FUNC_sk_lookup_tcp: 7653 return &bpf_sk_lookup_tcp_proto; 7654 case BPF_FUNC_sk_lookup_udp: 7655 return &bpf_sk_lookup_udp_proto; 7656 case BPF_FUNC_sk_release: 7657 return &bpf_sk_release_proto; 7658 case BPF_FUNC_skc_lookup_tcp: 7659 return &bpf_skc_lookup_tcp_proto; 7660 case BPF_FUNC_tcp_sock: 7661 return &bpf_tcp_sock_proto; 7662 case BPF_FUNC_get_listener_sock: 7663 return &bpf_get_listener_sock_proto; 7664 case BPF_FUNC_skb_ecn_set_ce: 7665 return &bpf_skb_ecn_set_ce_proto; 7666 #endif 7667 default: 7668 return sk_filter_func_proto(func_id, prog); 7669 } 7670 } 7671 7672 static const struct bpf_func_proto * 7673 tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7674 { 7675 switch (func_id) { 7676 case BPF_FUNC_skb_store_bytes: 7677 return &bpf_skb_store_bytes_proto; 7678 case BPF_FUNC_skb_load_bytes: 7679 return &bpf_skb_load_bytes_proto; 7680 case BPF_FUNC_skb_load_bytes_relative: 7681 return &bpf_skb_load_bytes_relative_proto; 7682 case BPF_FUNC_skb_pull_data: 7683 return &bpf_skb_pull_data_proto; 7684 case BPF_FUNC_csum_diff: 7685 return &bpf_csum_diff_proto; 7686 case BPF_FUNC_csum_update: 7687 return &bpf_csum_update_proto; 7688 case BPF_FUNC_csum_level: 7689 return &bpf_csum_level_proto; 7690 case BPF_FUNC_l3_csum_replace: 7691 return &bpf_l3_csum_replace_proto; 7692 case BPF_FUNC_l4_csum_replace: 7693 return &bpf_l4_csum_replace_proto; 7694 case BPF_FUNC_clone_redirect: 7695 return &bpf_clone_redirect_proto; 7696 case BPF_FUNC_get_cgroup_classid: 7697 return &bpf_get_cgroup_classid_proto; 7698 case BPF_FUNC_skb_vlan_push: 7699 return &bpf_skb_vlan_push_proto; 7700 case BPF_FUNC_skb_vlan_pop: 7701 return &bpf_skb_vlan_pop_proto; 7702 case BPF_FUNC_skb_change_proto: 7703 return &bpf_skb_change_proto_proto; 7704 case BPF_FUNC_skb_change_type: 7705 return &bpf_skb_change_type_proto; 7706 case BPF_FUNC_skb_adjust_room: 7707 return &bpf_skb_adjust_room_proto; 7708 case BPF_FUNC_skb_change_tail: 7709 return &bpf_skb_change_tail_proto; 7710 case BPF_FUNC_skb_change_head: 7711 return &bpf_skb_change_head_proto; 7712 case BPF_FUNC_skb_get_tunnel_key: 7713 return &bpf_skb_get_tunnel_key_proto; 7714 case BPF_FUNC_skb_set_tunnel_key: 7715 return bpf_get_skb_set_tunnel_proto(func_id); 7716 case BPF_FUNC_skb_get_tunnel_opt: 7717 return &bpf_skb_get_tunnel_opt_proto; 7718 case BPF_FUNC_skb_set_tunnel_opt: 7719 return bpf_get_skb_set_tunnel_proto(func_id); 7720 case BPF_FUNC_redirect: 7721 return &bpf_redirect_proto; 7722 case BPF_FUNC_redirect_neigh: 7723 return &bpf_redirect_neigh_proto; 7724 case BPF_FUNC_redirect_peer: 7725 return &bpf_redirect_peer_proto; 7726 case BPF_FUNC_get_route_realm: 7727 return &bpf_get_route_realm_proto; 7728 case BPF_FUNC_get_hash_recalc: 7729 return &bpf_get_hash_recalc_proto; 7730 case BPF_FUNC_set_hash_invalid: 7731 return &bpf_set_hash_invalid_proto; 7732 case BPF_FUNC_set_hash: 7733 return &bpf_set_hash_proto; 7734 case BPF_FUNC_perf_event_output: 7735 return &bpf_skb_event_output_proto; 7736 case BPF_FUNC_get_smp_processor_id: 7737 return &bpf_get_smp_processor_id_proto; 7738 case BPF_FUNC_skb_under_cgroup: 7739 return &bpf_skb_under_cgroup_proto; 7740 case BPF_FUNC_get_socket_cookie: 7741 return &bpf_get_socket_cookie_proto; 7742 case BPF_FUNC_get_socket_uid: 7743 return &bpf_get_socket_uid_proto; 7744 case BPF_FUNC_fib_lookup: 7745 return &bpf_skb_fib_lookup_proto; 7746 case BPF_FUNC_check_mtu: 7747 return &bpf_skb_check_mtu_proto; 7748 case BPF_FUNC_sk_fullsock: 7749 return &bpf_sk_fullsock_proto; 7750 case BPF_FUNC_sk_storage_get: 7751 return &bpf_sk_storage_get_proto; 7752 case BPF_FUNC_sk_storage_delete: 7753 return &bpf_sk_storage_delete_proto; 7754 #ifdef CONFIG_XFRM 7755 case BPF_FUNC_skb_get_xfrm_state: 7756 return &bpf_skb_get_xfrm_state_proto; 7757 #endif 7758 #ifdef CONFIG_CGROUP_NET_CLASSID 7759 case BPF_FUNC_skb_cgroup_classid: 7760 return &bpf_skb_cgroup_classid_proto; 7761 #endif 7762 #ifdef CONFIG_SOCK_CGROUP_DATA 7763 case BPF_FUNC_skb_cgroup_id: 7764 return &bpf_skb_cgroup_id_proto; 7765 case BPF_FUNC_skb_ancestor_cgroup_id: 7766 return &bpf_skb_ancestor_cgroup_id_proto; 7767 #endif 7768 #ifdef CONFIG_INET 7769 case BPF_FUNC_sk_lookup_tcp: 7770 return &bpf_sk_lookup_tcp_proto; 7771 case BPF_FUNC_sk_lookup_udp: 7772 return &bpf_sk_lookup_udp_proto; 7773 case BPF_FUNC_sk_release: 7774 return &bpf_sk_release_proto; 7775 case BPF_FUNC_tcp_sock: 7776 return &bpf_tcp_sock_proto; 7777 case BPF_FUNC_get_listener_sock: 7778 return &bpf_get_listener_sock_proto; 7779 case BPF_FUNC_skc_lookup_tcp: 7780 return &bpf_skc_lookup_tcp_proto; 7781 case BPF_FUNC_tcp_check_syncookie: 7782 return &bpf_tcp_check_syncookie_proto; 7783 case BPF_FUNC_skb_ecn_set_ce: 7784 return &bpf_skb_ecn_set_ce_proto; 7785 case BPF_FUNC_tcp_gen_syncookie: 7786 return &bpf_tcp_gen_syncookie_proto; 7787 case BPF_FUNC_sk_assign: 7788 return &bpf_sk_assign_proto; 7789 case BPF_FUNC_skb_set_tstamp: 7790 return &bpf_skb_set_tstamp_proto; 7791 #endif 7792 default: 7793 return bpf_sk_base_func_proto(func_id); 7794 } 7795 } 7796 7797 static const struct bpf_func_proto * 7798 xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7799 { 7800 switch (func_id) { 7801 case BPF_FUNC_perf_event_output: 7802 return &bpf_xdp_event_output_proto; 7803 case BPF_FUNC_get_smp_processor_id: 7804 return &bpf_get_smp_processor_id_proto; 7805 case BPF_FUNC_csum_diff: 7806 return &bpf_csum_diff_proto; 7807 case BPF_FUNC_xdp_adjust_head: 7808 return &bpf_xdp_adjust_head_proto; 7809 case BPF_FUNC_xdp_adjust_meta: 7810 return &bpf_xdp_adjust_meta_proto; 7811 case BPF_FUNC_redirect: 7812 return &bpf_xdp_redirect_proto; 7813 case BPF_FUNC_redirect_map: 7814 return &bpf_xdp_redirect_map_proto; 7815 case BPF_FUNC_xdp_adjust_tail: 7816 return &bpf_xdp_adjust_tail_proto; 7817 case BPF_FUNC_xdp_get_buff_len: 7818 return &bpf_xdp_get_buff_len_proto; 7819 case BPF_FUNC_xdp_load_bytes: 7820 return &bpf_xdp_load_bytes_proto; 7821 case BPF_FUNC_xdp_store_bytes: 7822 return &bpf_xdp_store_bytes_proto; 7823 case BPF_FUNC_fib_lookup: 7824 return &bpf_xdp_fib_lookup_proto; 7825 case BPF_FUNC_check_mtu: 7826 return &bpf_xdp_check_mtu_proto; 7827 #ifdef CONFIG_INET 7828 case BPF_FUNC_sk_lookup_udp: 7829 return &bpf_xdp_sk_lookup_udp_proto; 7830 case BPF_FUNC_sk_lookup_tcp: 7831 return &bpf_xdp_sk_lookup_tcp_proto; 7832 case BPF_FUNC_sk_release: 7833 return &bpf_sk_release_proto; 7834 case BPF_FUNC_skc_lookup_tcp: 7835 return &bpf_xdp_skc_lookup_tcp_proto; 7836 case BPF_FUNC_tcp_check_syncookie: 7837 return &bpf_tcp_check_syncookie_proto; 7838 case BPF_FUNC_tcp_gen_syncookie: 7839 return &bpf_tcp_gen_syncookie_proto; 7840 #endif 7841 default: 7842 return bpf_sk_base_func_proto(func_id); 7843 } 7844 } 7845 7846 const struct bpf_func_proto bpf_sock_map_update_proto __weak; 7847 const struct bpf_func_proto bpf_sock_hash_update_proto __weak; 7848 7849 static const struct bpf_func_proto * 7850 sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7851 { 7852 switch (func_id) { 7853 case BPF_FUNC_setsockopt: 7854 return &bpf_sock_ops_setsockopt_proto; 7855 case BPF_FUNC_getsockopt: 7856 return &bpf_sock_ops_getsockopt_proto; 7857 case BPF_FUNC_sock_ops_cb_flags_set: 7858 return &bpf_sock_ops_cb_flags_set_proto; 7859 case BPF_FUNC_sock_map_update: 7860 return &bpf_sock_map_update_proto; 7861 case BPF_FUNC_sock_hash_update: 7862 return &bpf_sock_hash_update_proto; 7863 case BPF_FUNC_get_socket_cookie: 7864 return &bpf_get_socket_cookie_sock_ops_proto; 7865 case BPF_FUNC_get_local_storage: 7866 return &bpf_get_local_storage_proto; 7867 case BPF_FUNC_perf_event_output: 7868 return &bpf_event_output_data_proto; 7869 case BPF_FUNC_sk_storage_get: 7870 return &bpf_sk_storage_get_proto; 7871 case BPF_FUNC_sk_storage_delete: 7872 return &bpf_sk_storage_delete_proto; 7873 case BPF_FUNC_get_netns_cookie: 7874 return &bpf_get_netns_cookie_sock_ops_proto; 7875 #ifdef CONFIG_INET 7876 case BPF_FUNC_load_hdr_opt: 7877 return &bpf_sock_ops_load_hdr_opt_proto; 7878 case BPF_FUNC_store_hdr_opt: 7879 return &bpf_sock_ops_store_hdr_opt_proto; 7880 case BPF_FUNC_reserve_hdr_opt: 7881 return &bpf_sock_ops_reserve_hdr_opt_proto; 7882 case BPF_FUNC_tcp_sock: 7883 return &bpf_tcp_sock_proto; 7884 #endif /* CONFIG_INET */ 7885 default: 7886 return bpf_sk_base_func_proto(func_id); 7887 } 7888 } 7889 7890 const struct bpf_func_proto bpf_msg_redirect_map_proto __weak; 7891 const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak; 7892 7893 static const struct bpf_func_proto * 7894 sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7895 { 7896 switch (func_id) { 7897 case BPF_FUNC_msg_redirect_map: 7898 return &bpf_msg_redirect_map_proto; 7899 case BPF_FUNC_msg_redirect_hash: 7900 return &bpf_msg_redirect_hash_proto; 7901 case BPF_FUNC_msg_apply_bytes: 7902 return &bpf_msg_apply_bytes_proto; 7903 case BPF_FUNC_msg_cork_bytes: 7904 return &bpf_msg_cork_bytes_proto; 7905 case BPF_FUNC_msg_pull_data: 7906 return &bpf_msg_pull_data_proto; 7907 case BPF_FUNC_msg_push_data: 7908 return &bpf_msg_push_data_proto; 7909 case BPF_FUNC_msg_pop_data: 7910 return &bpf_msg_pop_data_proto; 7911 case BPF_FUNC_perf_event_output: 7912 return &bpf_event_output_data_proto; 7913 case BPF_FUNC_get_current_uid_gid: 7914 return &bpf_get_current_uid_gid_proto; 7915 case BPF_FUNC_get_current_pid_tgid: 7916 return &bpf_get_current_pid_tgid_proto; 7917 case BPF_FUNC_sk_storage_get: 7918 return &bpf_sk_storage_get_proto; 7919 case BPF_FUNC_sk_storage_delete: 7920 return &bpf_sk_storage_delete_proto; 7921 case BPF_FUNC_get_netns_cookie: 7922 return &bpf_get_netns_cookie_sk_msg_proto; 7923 #ifdef CONFIG_CGROUPS 7924 case BPF_FUNC_get_current_cgroup_id: 7925 return &bpf_get_current_cgroup_id_proto; 7926 case BPF_FUNC_get_current_ancestor_cgroup_id: 7927 return &bpf_get_current_ancestor_cgroup_id_proto; 7928 #endif 7929 #ifdef CONFIG_CGROUP_NET_CLASSID 7930 case BPF_FUNC_get_cgroup_classid: 7931 return &bpf_get_cgroup_classid_curr_proto; 7932 #endif 7933 default: 7934 return bpf_sk_base_func_proto(func_id); 7935 } 7936 } 7937 7938 const struct bpf_func_proto bpf_sk_redirect_map_proto __weak; 7939 const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak; 7940 7941 static const struct bpf_func_proto * 7942 sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7943 { 7944 switch (func_id) { 7945 case BPF_FUNC_skb_store_bytes: 7946 return &bpf_skb_store_bytes_proto; 7947 case BPF_FUNC_skb_load_bytes: 7948 return &bpf_skb_load_bytes_proto; 7949 case BPF_FUNC_skb_pull_data: 7950 return &sk_skb_pull_data_proto; 7951 case BPF_FUNC_skb_change_tail: 7952 return &sk_skb_change_tail_proto; 7953 case BPF_FUNC_skb_change_head: 7954 return &sk_skb_change_head_proto; 7955 case BPF_FUNC_skb_adjust_room: 7956 return &sk_skb_adjust_room_proto; 7957 case BPF_FUNC_get_socket_cookie: 7958 return &bpf_get_socket_cookie_proto; 7959 case BPF_FUNC_get_socket_uid: 7960 return &bpf_get_socket_uid_proto; 7961 case BPF_FUNC_sk_redirect_map: 7962 return &bpf_sk_redirect_map_proto; 7963 case BPF_FUNC_sk_redirect_hash: 7964 return &bpf_sk_redirect_hash_proto; 7965 case BPF_FUNC_perf_event_output: 7966 return &bpf_skb_event_output_proto; 7967 #ifdef CONFIG_INET 7968 case BPF_FUNC_sk_lookup_tcp: 7969 return &bpf_sk_lookup_tcp_proto; 7970 case BPF_FUNC_sk_lookup_udp: 7971 return &bpf_sk_lookup_udp_proto; 7972 case BPF_FUNC_sk_release: 7973 return &bpf_sk_release_proto; 7974 case BPF_FUNC_skc_lookup_tcp: 7975 return &bpf_skc_lookup_tcp_proto; 7976 #endif 7977 default: 7978 return bpf_sk_base_func_proto(func_id); 7979 } 7980 } 7981 7982 static const struct bpf_func_proto * 7983 flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7984 { 7985 switch (func_id) { 7986 case BPF_FUNC_skb_load_bytes: 7987 return &bpf_flow_dissector_load_bytes_proto; 7988 default: 7989 return bpf_sk_base_func_proto(func_id); 7990 } 7991 } 7992 7993 static const struct bpf_func_proto * 7994 lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7995 { 7996 switch (func_id) { 7997 case BPF_FUNC_skb_load_bytes: 7998 return &bpf_skb_load_bytes_proto; 7999 case BPF_FUNC_skb_pull_data: 8000 return &bpf_skb_pull_data_proto; 8001 case BPF_FUNC_csum_diff: 8002 return &bpf_csum_diff_proto; 8003 case BPF_FUNC_get_cgroup_classid: 8004 return &bpf_get_cgroup_classid_proto; 8005 case BPF_FUNC_get_route_realm: 8006 return &bpf_get_route_realm_proto; 8007 case BPF_FUNC_get_hash_recalc: 8008 return &bpf_get_hash_recalc_proto; 8009 case BPF_FUNC_perf_event_output: 8010 return &bpf_skb_event_output_proto; 8011 case BPF_FUNC_get_smp_processor_id: 8012 return &bpf_get_smp_processor_id_proto; 8013 case BPF_FUNC_skb_under_cgroup: 8014 return &bpf_skb_under_cgroup_proto; 8015 default: 8016 return bpf_sk_base_func_proto(func_id); 8017 } 8018 } 8019 8020 static const struct bpf_func_proto * 8021 lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8022 { 8023 switch (func_id) { 8024 case BPF_FUNC_lwt_push_encap: 8025 return &bpf_lwt_in_push_encap_proto; 8026 default: 8027 return lwt_out_func_proto(func_id, prog); 8028 } 8029 } 8030 8031 static const struct bpf_func_proto * 8032 lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8033 { 8034 switch (func_id) { 8035 case BPF_FUNC_skb_get_tunnel_key: 8036 return &bpf_skb_get_tunnel_key_proto; 8037 case BPF_FUNC_skb_set_tunnel_key: 8038 return bpf_get_skb_set_tunnel_proto(func_id); 8039 case BPF_FUNC_skb_get_tunnel_opt: 8040 return &bpf_skb_get_tunnel_opt_proto; 8041 case BPF_FUNC_skb_set_tunnel_opt: 8042 return bpf_get_skb_set_tunnel_proto(func_id); 8043 case BPF_FUNC_redirect: 8044 return &bpf_redirect_proto; 8045 case BPF_FUNC_clone_redirect: 8046 return &bpf_clone_redirect_proto; 8047 case BPF_FUNC_skb_change_tail: 8048 return &bpf_skb_change_tail_proto; 8049 case BPF_FUNC_skb_change_head: 8050 return &bpf_skb_change_head_proto; 8051 case BPF_FUNC_skb_store_bytes: 8052 return &bpf_skb_store_bytes_proto; 8053 case BPF_FUNC_csum_update: 8054 return &bpf_csum_update_proto; 8055 case BPF_FUNC_csum_level: 8056 return &bpf_csum_level_proto; 8057 case BPF_FUNC_l3_csum_replace: 8058 return &bpf_l3_csum_replace_proto; 8059 case BPF_FUNC_l4_csum_replace: 8060 return &bpf_l4_csum_replace_proto; 8061 case BPF_FUNC_set_hash_invalid: 8062 return &bpf_set_hash_invalid_proto; 8063 case BPF_FUNC_lwt_push_encap: 8064 return &bpf_lwt_xmit_push_encap_proto; 8065 default: 8066 return lwt_out_func_proto(func_id, prog); 8067 } 8068 } 8069 8070 static const struct bpf_func_proto * 8071 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8072 { 8073 switch (func_id) { 8074 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 8075 case BPF_FUNC_lwt_seg6_store_bytes: 8076 return &bpf_lwt_seg6_store_bytes_proto; 8077 case BPF_FUNC_lwt_seg6_action: 8078 return &bpf_lwt_seg6_action_proto; 8079 case BPF_FUNC_lwt_seg6_adjust_srh: 8080 return &bpf_lwt_seg6_adjust_srh_proto; 8081 #endif 8082 default: 8083 return lwt_out_func_proto(func_id, prog); 8084 } 8085 } 8086 8087 static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type, 8088 const struct bpf_prog *prog, 8089 struct bpf_insn_access_aux *info) 8090 { 8091 const int size_default = sizeof(__u32); 8092 8093 if (off < 0 || off >= sizeof(struct __sk_buff)) 8094 return false; 8095 8096 /* The verifier guarantees that size > 0. */ 8097 if (off % size != 0) 8098 return false; 8099 8100 switch (off) { 8101 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8102 if (off + size > offsetofend(struct __sk_buff, cb[4])) 8103 return false; 8104 break; 8105 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): 8106 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): 8107 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): 8108 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): 8109 case bpf_ctx_range(struct __sk_buff, data): 8110 case bpf_ctx_range(struct __sk_buff, data_meta): 8111 case bpf_ctx_range(struct __sk_buff, data_end): 8112 if (size != size_default) 8113 return false; 8114 break; 8115 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 8116 return false; 8117 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8118 if (type == BPF_WRITE || size != sizeof(__u64)) 8119 return false; 8120 break; 8121 case bpf_ctx_range(struct __sk_buff, tstamp): 8122 if (size != sizeof(__u64)) 8123 return false; 8124 break; 8125 case offsetof(struct __sk_buff, sk): 8126 if (type == BPF_WRITE || size != sizeof(__u64)) 8127 return false; 8128 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; 8129 break; 8130 case offsetof(struct __sk_buff, tstamp_type): 8131 return false; 8132 case offsetofend(struct __sk_buff, tstamp_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: 8133 /* Explicitly prohibit access to padding in __sk_buff. */ 8134 return false; 8135 default: 8136 /* Only narrow read access allowed for now. */ 8137 if (type == BPF_WRITE) { 8138 if (size != size_default) 8139 return false; 8140 } else { 8141 bpf_ctx_record_field_size(info, size_default); 8142 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 8143 return false; 8144 } 8145 } 8146 8147 return true; 8148 } 8149 8150 static bool sk_filter_is_valid_access(int off, int size, 8151 enum bpf_access_type type, 8152 const struct bpf_prog *prog, 8153 struct bpf_insn_access_aux *info) 8154 { 8155 switch (off) { 8156 case bpf_ctx_range(struct __sk_buff, tc_classid): 8157 case bpf_ctx_range(struct __sk_buff, data): 8158 case bpf_ctx_range(struct __sk_buff, data_meta): 8159 case bpf_ctx_range(struct __sk_buff, data_end): 8160 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 8161 case bpf_ctx_range(struct __sk_buff, tstamp): 8162 case bpf_ctx_range(struct __sk_buff, wire_len): 8163 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8164 return false; 8165 } 8166 8167 if (type == BPF_WRITE) { 8168 switch (off) { 8169 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8170 break; 8171 default: 8172 return false; 8173 } 8174 } 8175 8176 return bpf_skb_is_valid_access(off, size, type, prog, info); 8177 } 8178 8179 static bool cg_skb_is_valid_access(int off, int size, 8180 enum bpf_access_type type, 8181 const struct bpf_prog *prog, 8182 struct bpf_insn_access_aux *info) 8183 { 8184 switch (off) { 8185 case bpf_ctx_range(struct __sk_buff, tc_classid): 8186 case bpf_ctx_range(struct __sk_buff, data_meta): 8187 case bpf_ctx_range(struct __sk_buff, wire_len): 8188 return false; 8189 case bpf_ctx_range(struct __sk_buff, data): 8190 case bpf_ctx_range(struct __sk_buff, data_end): 8191 if (!bpf_capable()) 8192 return false; 8193 break; 8194 } 8195 8196 if (type == BPF_WRITE) { 8197 switch (off) { 8198 case bpf_ctx_range(struct __sk_buff, mark): 8199 case bpf_ctx_range(struct __sk_buff, priority): 8200 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8201 break; 8202 case bpf_ctx_range(struct __sk_buff, tstamp): 8203 if (!bpf_capable()) 8204 return false; 8205 break; 8206 default: 8207 return false; 8208 } 8209 } 8210 8211 switch (off) { 8212 case bpf_ctx_range(struct __sk_buff, data): 8213 info->reg_type = PTR_TO_PACKET; 8214 break; 8215 case bpf_ctx_range(struct __sk_buff, data_end): 8216 info->reg_type = PTR_TO_PACKET_END; 8217 break; 8218 } 8219 8220 return bpf_skb_is_valid_access(off, size, type, prog, info); 8221 } 8222 8223 static bool lwt_is_valid_access(int off, int size, 8224 enum bpf_access_type type, 8225 const struct bpf_prog *prog, 8226 struct bpf_insn_access_aux *info) 8227 { 8228 switch (off) { 8229 case bpf_ctx_range(struct __sk_buff, tc_classid): 8230 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 8231 case bpf_ctx_range(struct __sk_buff, data_meta): 8232 case bpf_ctx_range(struct __sk_buff, tstamp): 8233 case bpf_ctx_range(struct __sk_buff, wire_len): 8234 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8235 return false; 8236 } 8237 8238 if (type == BPF_WRITE) { 8239 switch (off) { 8240 case bpf_ctx_range(struct __sk_buff, mark): 8241 case bpf_ctx_range(struct __sk_buff, priority): 8242 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8243 break; 8244 default: 8245 return false; 8246 } 8247 } 8248 8249 switch (off) { 8250 case bpf_ctx_range(struct __sk_buff, data): 8251 info->reg_type = PTR_TO_PACKET; 8252 break; 8253 case bpf_ctx_range(struct __sk_buff, data_end): 8254 info->reg_type = PTR_TO_PACKET_END; 8255 break; 8256 } 8257 8258 return bpf_skb_is_valid_access(off, size, type, prog, info); 8259 } 8260 8261 /* Attach type specific accesses */ 8262 static bool __sock_filter_check_attach_type(int off, 8263 enum bpf_access_type access_type, 8264 enum bpf_attach_type attach_type) 8265 { 8266 switch (off) { 8267 case offsetof(struct bpf_sock, bound_dev_if): 8268 case offsetof(struct bpf_sock, mark): 8269 case offsetof(struct bpf_sock, priority): 8270 switch (attach_type) { 8271 case BPF_CGROUP_INET_SOCK_CREATE: 8272 case BPF_CGROUP_INET_SOCK_RELEASE: 8273 goto full_access; 8274 default: 8275 return false; 8276 } 8277 case bpf_ctx_range(struct bpf_sock, src_ip4): 8278 switch (attach_type) { 8279 case BPF_CGROUP_INET4_POST_BIND: 8280 goto read_only; 8281 default: 8282 return false; 8283 } 8284 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 8285 switch (attach_type) { 8286 case BPF_CGROUP_INET6_POST_BIND: 8287 goto read_only; 8288 default: 8289 return false; 8290 } 8291 case bpf_ctx_range(struct bpf_sock, src_port): 8292 switch (attach_type) { 8293 case BPF_CGROUP_INET4_POST_BIND: 8294 case BPF_CGROUP_INET6_POST_BIND: 8295 goto read_only; 8296 default: 8297 return false; 8298 } 8299 } 8300 read_only: 8301 return access_type == BPF_READ; 8302 full_access: 8303 return true; 8304 } 8305 8306 bool bpf_sock_common_is_valid_access(int off, int size, 8307 enum bpf_access_type type, 8308 struct bpf_insn_access_aux *info) 8309 { 8310 switch (off) { 8311 case bpf_ctx_range_till(struct bpf_sock, type, priority): 8312 return false; 8313 default: 8314 return bpf_sock_is_valid_access(off, size, type, info); 8315 } 8316 } 8317 8318 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 8319 struct bpf_insn_access_aux *info) 8320 { 8321 const int size_default = sizeof(__u32); 8322 int field_size; 8323 8324 if (off < 0 || off >= sizeof(struct bpf_sock)) 8325 return false; 8326 if (off % size != 0) 8327 return false; 8328 8329 switch (off) { 8330 case offsetof(struct bpf_sock, state): 8331 case offsetof(struct bpf_sock, family): 8332 case offsetof(struct bpf_sock, type): 8333 case offsetof(struct bpf_sock, protocol): 8334 case offsetof(struct bpf_sock, src_port): 8335 case offsetof(struct bpf_sock, rx_queue_mapping): 8336 case bpf_ctx_range(struct bpf_sock, src_ip4): 8337 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 8338 case bpf_ctx_range(struct bpf_sock, dst_ip4): 8339 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): 8340 bpf_ctx_record_field_size(info, size_default); 8341 return bpf_ctx_narrow_access_ok(off, size, size_default); 8342 case bpf_ctx_range(struct bpf_sock, dst_port): 8343 field_size = size == size_default ? 8344 size_default : sizeof_field(struct bpf_sock, dst_port); 8345 bpf_ctx_record_field_size(info, field_size); 8346 return bpf_ctx_narrow_access_ok(off, size, field_size); 8347 case offsetofend(struct bpf_sock, dst_port) ... 8348 offsetof(struct bpf_sock, dst_ip4) - 1: 8349 return false; 8350 } 8351 8352 return size == size_default; 8353 } 8354 8355 static bool sock_filter_is_valid_access(int off, int size, 8356 enum bpf_access_type type, 8357 const struct bpf_prog *prog, 8358 struct bpf_insn_access_aux *info) 8359 { 8360 if (!bpf_sock_is_valid_access(off, size, type, info)) 8361 return false; 8362 return __sock_filter_check_attach_type(off, type, 8363 prog->expected_attach_type); 8364 } 8365 8366 static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write, 8367 const struct bpf_prog *prog) 8368 { 8369 /* Neither direct read nor direct write requires any preliminary 8370 * action. 8371 */ 8372 return 0; 8373 } 8374 8375 static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, 8376 const struct bpf_prog *prog, int drop_verdict) 8377 { 8378 struct bpf_insn *insn = insn_buf; 8379 8380 if (!direct_write) 8381 return 0; 8382 8383 /* if (!skb->cloned) 8384 * goto start; 8385 * 8386 * (Fast-path, otherwise approximation that we might be 8387 * a clone, do the rest in helper.) 8388 */ 8389 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET); 8390 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); 8391 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); 8392 8393 /* ret = bpf_skb_pull_data(skb, 0); */ 8394 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 8395 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); 8396 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 8397 BPF_FUNC_skb_pull_data); 8398 /* if (!ret) 8399 * goto restore; 8400 * return TC_ACT_SHOT; 8401 */ 8402 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); 8403 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); 8404 *insn++ = BPF_EXIT_INSN(); 8405 8406 /* restore: */ 8407 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 8408 /* start: */ 8409 *insn++ = prog->insnsi[0]; 8410 8411 return insn - insn_buf; 8412 } 8413 8414 static int bpf_gen_ld_abs(const struct bpf_insn *orig, 8415 struct bpf_insn *insn_buf) 8416 { 8417 bool indirect = BPF_MODE(orig->code) == BPF_IND; 8418 struct bpf_insn *insn = insn_buf; 8419 8420 if (!indirect) { 8421 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); 8422 } else { 8423 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg); 8424 if (orig->imm) 8425 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); 8426 } 8427 /* We're guaranteed here that CTX is in R6. */ 8428 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); 8429 8430 switch (BPF_SIZE(orig->code)) { 8431 case BPF_B: 8432 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache); 8433 break; 8434 case BPF_H: 8435 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache); 8436 break; 8437 case BPF_W: 8438 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache); 8439 break; 8440 } 8441 8442 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2); 8443 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); 8444 *insn++ = BPF_EXIT_INSN(); 8445 8446 return insn - insn_buf; 8447 } 8448 8449 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, 8450 const struct bpf_prog *prog) 8451 { 8452 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT); 8453 } 8454 8455 static bool tc_cls_act_is_valid_access(int off, int size, 8456 enum bpf_access_type type, 8457 const struct bpf_prog *prog, 8458 struct bpf_insn_access_aux *info) 8459 { 8460 if (type == BPF_WRITE) { 8461 switch (off) { 8462 case bpf_ctx_range(struct __sk_buff, mark): 8463 case bpf_ctx_range(struct __sk_buff, tc_index): 8464 case bpf_ctx_range(struct __sk_buff, priority): 8465 case bpf_ctx_range(struct __sk_buff, tc_classid): 8466 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8467 case bpf_ctx_range(struct __sk_buff, tstamp): 8468 case bpf_ctx_range(struct __sk_buff, queue_mapping): 8469 break; 8470 default: 8471 return false; 8472 } 8473 } 8474 8475 switch (off) { 8476 case bpf_ctx_range(struct __sk_buff, data): 8477 info->reg_type = PTR_TO_PACKET; 8478 break; 8479 case bpf_ctx_range(struct __sk_buff, data_meta): 8480 info->reg_type = PTR_TO_PACKET_META; 8481 break; 8482 case bpf_ctx_range(struct __sk_buff, data_end): 8483 info->reg_type = PTR_TO_PACKET_END; 8484 break; 8485 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 8486 return false; 8487 case offsetof(struct __sk_buff, tstamp_type): 8488 /* The convert_ctx_access() on reading and writing 8489 * __sk_buff->tstamp depends on whether the bpf prog 8490 * has used __sk_buff->tstamp_type or not. 8491 * Thus, we need to set prog->tstamp_type_access 8492 * earlier during is_valid_access() here. 8493 */ 8494 ((struct bpf_prog *)prog)->tstamp_type_access = 1; 8495 return size == sizeof(__u8); 8496 } 8497 8498 return bpf_skb_is_valid_access(off, size, type, prog, info); 8499 } 8500 8501 static bool __is_valid_xdp_access(int off, int size) 8502 { 8503 if (off < 0 || off >= sizeof(struct xdp_md)) 8504 return false; 8505 if (off % size != 0) 8506 return false; 8507 if (size != sizeof(__u32)) 8508 return false; 8509 8510 return true; 8511 } 8512 8513 static bool xdp_is_valid_access(int off, int size, 8514 enum bpf_access_type type, 8515 const struct bpf_prog *prog, 8516 struct bpf_insn_access_aux *info) 8517 { 8518 if (prog->expected_attach_type != BPF_XDP_DEVMAP) { 8519 switch (off) { 8520 case offsetof(struct xdp_md, egress_ifindex): 8521 return false; 8522 } 8523 } 8524 8525 if (type == BPF_WRITE) { 8526 if (bpf_prog_is_dev_bound(prog->aux)) { 8527 switch (off) { 8528 case offsetof(struct xdp_md, rx_queue_index): 8529 return __is_valid_xdp_access(off, size); 8530 } 8531 } 8532 return false; 8533 } 8534 8535 switch (off) { 8536 case offsetof(struct xdp_md, data): 8537 info->reg_type = PTR_TO_PACKET; 8538 break; 8539 case offsetof(struct xdp_md, data_meta): 8540 info->reg_type = PTR_TO_PACKET_META; 8541 break; 8542 case offsetof(struct xdp_md, data_end): 8543 info->reg_type = PTR_TO_PACKET_END; 8544 break; 8545 } 8546 8547 return __is_valid_xdp_access(off, size); 8548 } 8549 8550 void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act) 8551 { 8552 const u32 act_max = XDP_REDIRECT; 8553 8554 pr_warn_once("%s XDP return value %u on prog %s (id %d) dev %s, expect packet loss!\n", 8555 act > act_max ? "Illegal" : "Driver unsupported", 8556 act, prog->aux->name, prog->aux->id, dev ? dev->name : "N/A"); 8557 } 8558 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); 8559 8560 static bool sock_addr_is_valid_access(int off, int size, 8561 enum bpf_access_type type, 8562 const struct bpf_prog *prog, 8563 struct bpf_insn_access_aux *info) 8564 { 8565 const int size_default = sizeof(__u32); 8566 8567 if (off < 0 || off >= sizeof(struct bpf_sock_addr)) 8568 return false; 8569 if (off % size != 0) 8570 return false; 8571 8572 /* Disallow access to IPv6 fields from IPv4 contex and vise 8573 * versa. 8574 */ 8575 switch (off) { 8576 case bpf_ctx_range(struct bpf_sock_addr, user_ip4): 8577 switch (prog->expected_attach_type) { 8578 case BPF_CGROUP_INET4_BIND: 8579 case BPF_CGROUP_INET4_CONNECT: 8580 case BPF_CGROUP_INET4_GETPEERNAME: 8581 case BPF_CGROUP_INET4_GETSOCKNAME: 8582 case BPF_CGROUP_UDP4_SENDMSG: 8583 case BPF_CGROUP_UDP4_RECVMSG: 8584 break; 8585 default: 8586 return false; 8587 } 8588 break; 8589 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 8590 switch (prog->expected_attach_type) { 8591 case BPF_CGROUP_INET6_BIND: 8592 case BPF_CGROUP_INET6_CONNECT: 8593 case BPF_CGROUP_INET6_GETPEERNAME: 8594 case BPF_CGROUP_INET6_GETSOCKNAME: 8595 case BPF_CGROUP_UDP6_SENDMSG: 8596 case BPF_CGROUP_UDP6_RECVMSG: 8597 break; 8598 default: 8599 return false; 8600 } 8601 break; 8602 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): 8603 switch (prog->expected_attach_type) { 8604 case BPF_CGROUP_UDP4_SENDMSG: 8605 break; 8606 default: 8607 return false; 8608 } 8609 break; 8610 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 8611 msg_src_ip6[3]): 8612 switch (prog->expected_attach_type) { 8613 case BPF_CGROUP_UDP6_SENDMSG: 8614 break; 8615 default: 8616 return false; 8617 } 8618 break; 8619 } 8620 8621 switch (off) { 8622 case bpf_ctx_range(struct bpf_sock_addr, user_ip4): 8623 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 8624 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): 8625 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 8626 msg_src_ip6[3]): 8627 case bpf_ctx_range(struct bpf_sock_addr, user_port): 8628 if (type == BPF_READ) { 8629 bpf_ctx_record_field_size(info, size_default); 8630 8631 if (bpf_ctx_wide_access_ok(off, size, 8632 struct bpf_sock_addr, 8633 user_ip6)) 8634 return true; 8635 8636 if (bpf_ctx_wide_access_ok(off, size, 8637 struct bpf_sock_addr, 8638 msg_src_ip6)) 8639 return true; 8640 8641 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 8642 return false; 8643 } else { 8644 if (bpf_ctx_wide_access_ok(off, size, 8645 struct bpf_sock_addr, 8646 user_ip6)) 8647 return true; 8648 8649 if (bpf_ctx_wide_access_ok(off, size, 8650 struct bpf_sock_addr, 8651 msg_src_ip6)) 8652 return true; 8653 8654 if (size != size_default) 8655 return false; 8656 } 8657 break; 8658 case offsetof(struct bpf_sock_addr, sk): 8659 if (type != BPF_READ) 8660 return false; 8661 if (size != sizeof(__u64)) 8662 return false; 8663 info->reg_type = PTR_TO_SOCKET; 8664 break; 8665 default: 8666 if (type == BPF_READ) { 8667 if (size != size_default) 8668 return false; 8669 } else { 8670 return false; 8671 } 8672 } 8673 8674 return true; 8675 } 8676 8677 static bool sock_ops_is_valid_access(int off, int size, 8678 enum bpf_access_type type, 8679 const struct bpf_prog *prog, 8680 struct bpf_insn_access_aux *info) 8681 { 8682 const int size_default = sizeof(__u32); 8683 8684 if (off < 0 || off >= sizeof(struct bpf_sock_ops)) 8685 return false; 8686 8687 /* The verifier guarantees that size > 0. */ 8688 if (off % size != 0) 8689 return false; 8690 8691 if (type == BPF_WRITE) { 8692 switch (off) { 8693 case offsetof(struct bpf_sock_ops, reply): 8694 case offsetof(struct bpf_sock_ops, sk_txhash): 8695 if (size != size_default) 8696 return false; 8697 break; 8698 default: 8699 return false; 8700 } 8701 } else { 8702 switch (off) { 8703 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, 8704 bytes_acked): 8705 if (size != sizeof(__u64)) 8706 return false; 8707 break; 8708 case offsetof(struct bpf_sock_ops, sk): 8709 if (size != sizeof(__u64)) 8710 return false; 8711 info->reg_type = PTR_TO_SOCKET_OR_NULL; 8712 break; 8713 case offsetof(struct bpf_sock_ops, skb_data): 8714 if (size != sizeof(__u64)) 8715 return false; 8716 info->reg_type = PTR_TO_PACKET; 8717 break; 8718 case offsetof(struct bpf_sock_ops, skb_data_end): 8719 if (size != sizeof(__u64)) 8720 return false; 8721 info->reg_type = PTR_TO_PACKET_END; 8722 break; 8723 case offsetof(struct bpf_sock_ops, skb_tcp_flags): 8724 bpf_ctx_record_field_size(info, size_default); 8725 return bpf_ctx_narrow_access_ok(off, size, 8726 size_default); 8727 default: 8728 if (size != size_default) 8729 return false; 8730 break; 8731 } 8732 } 8733 8734 return true; 8735 } 8736 8737 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, 8738 const struct bpf_prog *prog) 8739 { 8740 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP); 8741 } 8742 8743 static bool sk_skb_is_valid_access(int off, int size, 8744 enum bpf_access_type type, 8745 const struct bpf_prog *prog, 8746 struct bpf_insn_access_aux *info) 8747 { 8748 switch (off) { 8749 case bpf_ctx_range(struct __sk_buff, tc_classid): 8750 case bpf_ctx_range(struct __sk_buff, data_meta): 8751 case bpf_ctx_range(struct __sk_buff, tstamp): 8752 case bpf_ctx_range(struct __sk_buff, wire_len): 8753 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8754 return false; 8755 } 8756 8757 if (type == BPF_WRITE) { 8758 switch (off) { 8759 case bpf_ctx_range(struct __sk_buff, tc_index): 8760 case bpf_ctx_range(struct __sk_buff, priority): 8761 break; 8762 default: 8763 return false; 8764 } 8765 } 8766 8767 switch (off) { 8768 case bpf_ctx_range(struct __sk_buff, mark): 8769 return false; 8770 case bpf_ctx_range(struct __sk_buff, data): 8771 info->reg_type = PTR_TO_PACKET; 8772 break; 8773 case bpf_ctx_range(struct __sk_buff, data_end): 8774 info->reg_type = PTR_TO_PACKET_END; 8775 break; 8776 } 8777 8778 return bpf_skb_is_valid_access(off, size, type, prog, info); 8779 } 8780 8781 static bool sk_msg_is_valid_access(int off, int size, 8782 enum bpf_access_type type, 8783 const struct bpf_prog *prog, 8784 struct bpf_insn_access_aux *info) 8785 { 8786 if (type == BPF_WRITE) 8787 return false; 8788 8789 if (off % size != 0) 8790 return false; 8791 8792 switch (off) { 8793 case offsetof(struct sk_msg_md, data): 8794 info->reg_type = PTR_TO_PACKET; 8795 if (size != sizeof(__u64)) 8796 return false; 8797 break; 8798 case offsetof(struct sk_msg_md, data_end): 8799 info->reg_type = PTR_TO_PACKET_END; 8800 if (size != sizeof(__u64)) 8801 return false; 8802 break; 8803 case offsetof(struct sk_msg_md, sk): 8804 if (size != sizeof(__u64)) 8805 return false; 8806 info->reg_type = PTR_TO_SOCKET; 8807 break; 8808 case bpf_ctx_range(struct sk_msg_md, family): 8809 case bpf_ctx_range(struct sk_msg_md, remote_ip4): 8810 case bpf_ctx_range(struct sk_msg_md, local_ip4): 8811 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]): 8812 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]): 8813 case bpf_ctx_range(struct sk_msg_md, remote_port): 8814 case bpf_ctx_range(struct sk_msg_md, local_port): 8815 case bpf_ctx_range(struct sk_msg_md, size): 8816 if (size != sizeof(__u32)) 8817 return false; 8818 break; 8819 default: 8820 return false; 8821 } 8822 return true; 8823 } 8824 8825 static bool flow_dissector_is_valid_access(int off, int size, 8826 enum bpf_access_type type, 8827 const struct bpf_prog *prog, 8828 struct bpf_insn_access_aux *info) 8829 { 8830 const int size_default = sizeof(__u32); 8831 8832 if (off < 0 || off >= sizeof(struct __sk_buff)) 8833 return false; 8834 8835 if (type == BPF_WRITE) 8836 return false; 8837 8838 switch (off) { 8839 case bpf_ctx_range(struct __sk_buff, data): 8840 if (size != size_default) 8841 return false; 8842 info->reg_type = PTR_TO_PACKET; 8843 return true; 8844 case bpf_ctx_range(struct __sk_buff, data_end): 8845 if (size != size_default) 8846 return false; 8847 info->reg_type = PTR_TO_PACKET_END; 8848 return true; 8849 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 8850 if (size != sizeof(__u64)) 8851 return false; 8852 info->reg_type = PTR_TO_FLOW_KEYS; 8853 return true; 8854 default: 8855 return false; 8856 } 8857 } 8858 8859 static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, 8860 const struct bpf_insn *si, 8861 struct bpf_insn *insn_buf, 8862 struct bpf_prog *prog, 8863 u32 *target_size) 8864 8865 { 8866 struct bpf_insn *insn = insn_buf; 8867 8868 switch (si->off) { 8869 case offsetof(struct __sk_buff, data): 8870 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data), 8871 si->dst_reg, si->src_reg, 8872 offsetof(struct bpf_flow_dissector, data)); 8873 break; 8874 8875 case offsetof(struct __sk_buff, data_end): 8876 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end), 8877 si->dst_reg, si->src_reg, 8878 offsetof(struct bpf_flow_dissector, data_end)); 8879 break; 8880 8881 case offsetof(struct __sk_buff, flow_keys): 8882 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys), 8883 si->dst_reg, si->src_reg, 8884 offsetof(struct bpf_flow_dissector, flow_keys)); 8885 break; 8886 } 8887 8888 return insn - insn_buf; 8889 } 8890 8891 static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si, 8892 struct bpf_insn *insn) 8893 { 8894 __u8 value_reg = si->dst_reg; 8895 __u8 skb_reg = si->src_reg; 8896 /* AX is needed because src_reg and dst_reg could be the same */ 8897 __u8 tmp_reg = BPF_REG_AX; 8898 8899 *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, 8900 PKT_VLAN_PRESENT_OFFSET); 8901 *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, 8902 SKB_MONO_DELIVERY_TIME_MASK, 2); 8903 *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC); 8904 *insn++ = BPF_JMP_A(1); 8905 *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO); 8906 8907 return insn; 8908 } 8909 8910 static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, 8911 struct bpf_insn *insn) 8912 { 8913 /* si->dst_reg = skb_shinfo(SKB); */ 8914 #ifdef NET_SKBUFF_DATA_USES_OFFSET 8915 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 8916 BPF_REG_AX, si->src_reg, 8917 offsetof(struct sk_buff, end)); 8918 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), 8919 si->dst_reg, si->src_reg, 8920 offsetof(struct sk_buff, head)); 8921 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); 8922 #else 8923 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 8924 si->dst_reg, si->src_reg, 8925 offsetof(struct sk_buff, end)); 8926 #endif 8927 8928 return insn; 8929 } 8930 8931 static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, 8932 const struct bpf_insn *si, 8933 struct bpf_insn *insn) 8934 { 8935 __u8 value_reg = si->dst_reg; 8936 __u8 skb_reg = si->src_reg; 8937 8938 #ifdef CONFIG_NET_CLS_ACT 8939 /* If the tstamp_type is read, 8940 * the bpf prog is aware the tstamp could have delivery time. 8941 * Thus, read skb->tstamp as is if tstamp_type_access is true. 8942 */ 8943 if (!prog->tstamp_type_access) { 8944 /* AX is needed because src_reg and dst_reg could be the same */ 8945 __u8 tmp_reg = BPF_REG_AX; 8946 8947 *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); 8948 *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, 8949 TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK); 8950 *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg, 8951 TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2); 8952 /* skb->tc_at_ingress && skb->mono_delivery_time, 8953 * read 0 as the (rcv) timestamp. 8954 */ 8955 *insn++ = BPF_MOV64_IMM(value_reg, 0); 8956 *insn++ = BPF_JMP_A(1); 8957 } 8958 #endif 8959 8960 *insn++ = BPF_LDX_MEM(BPF_DW, value_reg, skb_reg, 8961 offsetof(struct sk_buff, tstamp)); 8962 return insn; 8963 } 8964 8965 static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, 8966 const struct bpf_insn *si, 8967 struct bpf_insn *insn) 8968 { 8969 __u8 value_reg = si->src_reg; 8970 __u8 skb_reg = si->dst_reg; 8971 8972 #ifdef CONFIG_NET_CLS_ACT 8973 /* If the tstamp_type is read, 8974 * the bpf prog is aware the tstamp could have delivery time. 8975 * Thus, write skb->tstamp as is if tstamp_type_access is true. 8976 * Otherwise, writing at ingress will have to clear the 8977 * mono_delivery_time bit also. 8978 */ 8979 if (!prog->tstamp_type_access) { 8980 __u8 tmp_reg = BPF_REG_AX; 8981 8982 *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); 8983 /* Writing __sk_buff->tstamp as ingress, goto <clear> */ 8984 *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); 8985 /* goto <store> */ 8986 *insn++ = BPF_JMP_A(2); 8987 /* <clear>: mono_delivery_time */ 8988 *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); 8989 *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, PKT_VLAN_PRESENT_OFFSET); 8990 } 8991 #endif 8992 8993 /* <store>: skb->tstamp = tstamp */ 8994 *insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg, 8995 offsetof(struct sk_buff, tstamp)); 8996 return insn; 8997 } 8998 8999 static u32 bpf_convert_ctx_access(enum bpf_access_type type, 9000 const struct bpf_insn *si, 9001 struct bpf_insn *insn_buf, 9002 struct bpf_prog *prog, u32 *target_size) 9003 { 9004 struct bpf_insn *insn = insn_buf; 9005 int off; 9006 9007 switch (si->off) { 9008 case offsetof(struct __sk_buff, len): 9009 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9010 bpf_target_off(struct sk_buff, len, 4, 9011 target_size)); 9012 break; 9013 9014 case offsetof(struct __sk_buff, protocol): 9015 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9016 bpf_target_off(struct sk_buff, protocol, 2, 9017 target_size)); 9018 break; 9019 9020 case offsetof(struct __sk_buff, vlan_proto): 9021 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9022 bpf_target_off(struct sk_buff, vlan_proto, 2, 9023 target_size)); 9024 break; 9025 9026 case offsetof(struct __sk_buff, priority): 9027 if (type == BPF_WRITE) 9028 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9029 bpf_target_off(struct sk_buff, priority, 4, 9030 target_size)); 9031 else 9032 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9033 bpf_target_off(struct sk_buff, priority, 4, 9034 target_size)); 9035 break; 9036 9037 case offsetof(struct __sk_buff, ingress_ifindex): 9038 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9039 bpf_target_off(struct sk_buff, skb_iif, 4, 9040 target_size)); 9041 break; 9042 9043 case offsetof(struct __sk_buff, ifindex): 9044 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 9045 si->dst_reg, si->src_reg, 9046 offsetof(struct sk_buff, dev)); 9047 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 9048 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9049 bpf_target_off(struct net_device, ifindex, 4, 9050 target_size)); 9051 break; 9052 9053 case offsetof(struct __sk_buff, hash): 9054 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9055 bpf_target_off(struct sk_buff, hash, 4, 9056 target_size)); 9057 break; 9058 9059 case offsetof(struct __sk_buff, mark): 9060 if (type == BPF_WRITE) 9061 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9062 bpf_target_off(struct sk_buff, mark, 4, 9063 target_size)); 9064 else 9065 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9066 bpf_target_off(struct sk_buff, mark, 4, 9067 target_size)); 9068 break; 9069 9070 case offsetof(struct __sk_buff, pkt_type): 9071 *target_size = 1; 9072 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, 9073 PKT_TYPE_OFFSET); 9074 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); 9075 #ifdef __BIG_ENDIAN_BITFIELD 9076 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); 9077 #endif 9078 break; 9079 9080 case offsetof(struct __sk_buff, queue_mapping): 9081 if (type == BPF_WRITE) { 9082 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); 9083 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, 9084 bpf_target_off(struct sk_buff, 9085 queue_mapping, 9086 2, target_size)); 9087 } else { 9088 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9089 bpf_target_off(struct sk_buff, 9090 queue_mapping, 9091 2, target_size)); 9092 } 9093 break; 9094 9095 case offsetof(struct __sk_buff, vlan_present): 9096 *target_size = 1; 9097 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, 9098 PKT_VLAN_PRESENT_OFFSET); 9099 if (PKT_VLAN_PRESENT_BIT) 9100 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); 9101 if (PKT_VLAN_PRESENT_BIT < 7) 9102 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); 9103 break; 9104 9105 case offsetof(struct __sk_buff, vlan_tci): 9106 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9107 bpf_target_off(struct sk_buff, vlan_tci, 2, 9108 target_size)); 9109 break; 9110 9111 case offsetof(struct __sk_buff, cb[0]) ... 9112 offsetofend(struct __sk_buff, cb[4]) - 1: 9113 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20); 9114 BUILD_BUG_ON((offsetof(struct sk_buff, cb) + 9115 offsetof(struct qdisc_skb_cb, data)) % 9116 sizeof(__u64)); 9117 9118 prog->cb_access = 1; 9119 off = si->off; 9120 off -= offsetof(struct __sk_buff, cb[0]); 9121 off += offsetof(struct sk_buff, cb); 9122 off += offsetof(struct qdisc_skb_cb, data); 9123 if (type == BPF_WRITE) 9124 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, 9125 si->src_reg, off); 9126 else 9127 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, 9128 si->src_reg, off); 9129 break; 9130 9131 case offsetof(struct __sk_buff, tc_classid): 9132 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2); 9133 9134 off = si->off; 9135 off -= offsetof(struct __sk_buff, tc_classid); 9136 off += offsetof(struct sk_buff, cb); 9137 off += offsetof(struct qdisc_skb_cb, tc_classid); 9138 *target_size = 2; 9139 if (type == BPF_WRITE) 9140 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, 9141 si->src_reg, off); 9142 else 9143 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, 9144 si->src_reg, off); 9145 break; 9146 9147 case offsetof(struct __sk_buff, data): 9148 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 9149 si->dst_reg, si->src_reg, 9150 offsetof(struct sk_buff, data)); 9151 break; 9152 9153 case offsetof(struct __sk_buff, data_meta): 9154 off = si->off; 9155 off -= offsetof(struct __sk_buff, data_meta); 9156 off += offsetof(struct sk_buff, cb); 9157 off += offsetof(struct bpf_skb_data_end, data_meta); 9158 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 9159 si->src_reg, off); 9160 break; 9161 9162 case offsetof(struct __sk_buff, data_end): 9163 off = si->off; 9164 off -= offsetof(struct __sk_buff, data_end); 9165 off += offsetof(struct sk_buff, cb); 9166 off += offsetof(struct bpf_skb_data_end, data_end); 9167 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 9168 si->src_reg, off); 9169 break; 9170 9171 case offsetof(struct __sk_buff, tc_index): 9172 #ifdef CONFIG_NET_SCHED 9173 if (type == BPF_WRITE) 9174 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, 9175 bpf_target_off(struct sk_buff, tc_index, 2, 9176 target_size)); 9177 else 9178 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9179 bpf_target_off(struct sk_buff, tc_index, 2, 9180 target_size)); 9181 #else 9182 *target_size = 2; 9183 if (type == BPF_WRITE) 9184 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); 9185 else 9186 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 9187 #endif 9188 break; 9189 9190 case offsetof(struct __sk_buff, napi_id): 9191 #if defined(CONFIG_NET_RX_BUSY_POLL) 9192 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9193 bpf_target_off(struct sk_buff, napi_id, 4, 9194 target_size)); 9195 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1); 9196 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 9197 #else 9198 *target_size = 4; 9199 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 9200 #endif 9201 break; 9202 case offsetof(struct __sk_buff, family): 9203 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 9204 9205 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9206 si->dst_reg, si->src_reg, 9207 offsetof(struct sk_buff, sk)); 9208 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9209 bpf_target_off(struct sock_common, 9210 skc_family, 9211 2, target_size)); 9212 break; 9213 case offsetof(struct __sk_buff, remote_ip4): 9214 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 9215 9216 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9217 si->dst_reg, si->src_reg, 9218 offsetof(struct sk_buff, sk)); 9219 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9220 bpf_target_off(struct sock_common, 9221 skc_daddr, 9222 4, target_size)); 9223 break; 9224 case offsetof(struct __sk_buff, local_ip4): 9225 BUILD_BUG_ON(sizeof_field(struct sock_common, 9226 skc_rcv_saddr) != 4); 9227 9228 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9229 si->dst_reg, si->src_reg, 9230 offsetof(struct sk_buff, sk)); 9231 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9232 bpf_target_off(struct sock_common, 9233 skc_rcv_saddr, 9234 4, target_size)); 9235 break; 9236 case offsetof(struct __sk_buff, remote_ip6[0]) ... 9237 offsetof(struct __sk_buff, remote_ip6[3]): 9238 #if IS_ENABLED(CONFIG_IPV6) 9239 BUILD_BUG_ON(sizeof_field(struct sock_common, 9240 skc_v6_daddr.s6_addr32[0]) != 4); 9241 9242 off = si->off; 9243 off -= offsetof(struct __sk_buff, remote_ip6[0]); 9244 9245 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9246 si->dst_reg, si->src_reg, 9247 offsetof(struct sk_buff, sk)); 9248 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9249 offsetof(struct sock_common, 9250 skc_v6_daddr.s6_addr32[0]) + 9251 off); 9252 #else 9253 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9254 #endif 9255 break; 9256 case offsetof(struct __sk_buff, local_ip6[0]) ... 9257 offsetof(struct __sk_buff, local_ip6[3]): 9258 #if IS_ENABLED(CONFIG_IPV6) 9259 BUILD_BUG_ON(sizeof_field(struct sock_common, 9260 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 9261 9262 off = si->off; 9263 off -= offsetof(struct __sk_buff, local_ip6[0]); 9264 9265 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9266 si->dst_reg, si->src_reg, 9267 offsetof(struct sk_buff, sk)); 9268 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9269 offsetof(struct sock_common, 9270 skc_v6_rcv_saddr.s6_addr32[0]) + 9271 off); 9272 #else 9273 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9274 #endif 9275 break; 9276 9277 case offsetof(struct __sk_buff, remote_port): 9278 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 9279 9280 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9281 si->dst_reg, si->src_reg, 9282 offsetof(struct sk_buff, sk)); 9283 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9284 bpf_target_off(struct sock_common, 9285 skc_dport, 9286 2, target_size)); 9287 #ifndef __BIG_ENDIAN_BITFIELD 9288 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 9289 #endif 9290 break; 9291 9292 case offsetof(struct __sk_buff, local_port): 9293 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 9294 9295 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9296 si->dst_reg, si->src_reg, 9297 offsetof(struct sk_buff, sk)); 9298 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9299 bpf_target_off(struct sock_common, 9300 skc_num, 2, target_size)); 9301 break; 9302 9303 case offsetof(struct __sk_buff, tstamp): 9304 BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); 9305 9306 if (type == BPF_WRITE) 9307 insn = bpf_convert_tstamp_write(prog, si, insn); 9308 else 9309 insn = bpf_convert_tstamp_read(prog, si, insn); 9310 break; 9311 9312 case offsetof(struct __sk_buff, tstamp_type): 9313 insn = bpf_convert_tstamp_type_read(si, insn); 9314 break; 9315 9316 case offsetof(struct __sk_buff, gso_segs): 9317 insn = bpf_convert_shinfo_access(si, insn); 9318 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs), 9319 si->dst_reg, si->dst_reg, 9320 bpf_target_off(struct skb_shared_info, 9321 gso_segs, 2, 9322 target_size)); 9323 break; 9324 case offsetof(struct __sk_buff, gso_size): 9325 insn = bpf_convert_shinfo_access(si, insn); 9326 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size), 9327 si->dst_reg, si->dst_reg, 9328 bpf_target_off(struct skb_shared_info, 9329 gso_size, 2, 9330 target_size)); 9331 break; 9332 case offsetof(struct __sk_buff, wire_len): 9333 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4); 9334 9335 off = si->off; 9336 off -= offsetof(struct __sk_buff, wire_len); 9337 off += offsetof(struct sk_buff, cb); 9338 off += offsetof(struct qdisc_skb_cb, pkt_len); 9339 *target_size = 4; 9340 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); 9341 break; 9342 9343 case offsetof(struct __sk_buff, sk): 9344 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9345 si->dst_reg, si->src_reg, 9346 offsetof(struct sk_buff, sk)); 9347 break; 9348 case offsetof(struct __sk_buff, hwtstamp): 9349 BUILD_BUG_ON(sizeof_field(struct skb_shared_hwtstamps, hwtstamp) != 8); 9350 BUILD_BUG_ON(offsetof(struct skb_shared_hwtstamps, hwtstamp) != 0); 9351 9352 insn = bpf_convert_shinfo_access(si, insn); 9353 *insn++ = BPF_LDX_MEM(BPF_DW, 9354 si->dst_reg, si->dst_reg, 9355 bpf_target_off(struct skb_shared_info, 9356 hwtstamps, 8, 9357 target_size)); 9358 break; 9359 } 9360 9361 return insn - insn_buf; 9362 } 9363 9364 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 9365 const struct bpf_insn *si, 9366 struct bpf_insn *insn_buf, 9367 struct bpf_prog *prog, u32 *target_size) 9368 { 9369 struct bpf_insn *insn = insn_buf; 9370 int off; 9371 9372 switch (si->off) { 9373 case offsetof(struct bpf_sock, bound_dev_if): 9374 BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4); 9375 9376 if (type == BPF_WRITE) 9377 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9378 offsetof(struct sock, sk_bound_dev_if)); 9379 else 9380 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9381 offsetof(struct sock, sk_bound_dev_if)); 9382 break; 9383 9384 case offsetof(struct bpf_sock, mark): 9385 BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4); 9386 9387 if (type == BPF_WRITE) 9388 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9389 offsetof(struct sock, sk_mark)); 9390 else 9391 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9392 offsetof(struct sock, sk_mark)); 9393 break; 9394 9395 case offsetof(struct bpf_sock, priority): 9396 BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4); 9397 9398 if (type == BPF_WRITE) 9399 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9400 offsetof(struct sock, sk_priority)); 9401 else 9402 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9403 offsetof(struct sock, sk_priority)); 9404 break; 9405 9406 case offsetof(struct bpf_sock, family): 9407 *insn++ = BPF_LDX_MEM( 9408 BPF_FIELD_SIZEOF(struct sock_common, skc_family), 9409 si->dst_reg, si->src_reg, 9410 bpf_target_off(struct sock_common, 9411 skc_family, 9412 sizeof_field(struct sock_common, 9413 skc_family), 9414 target_size)); 9415 break; 9416 9417 case offsetof(struct bpf_sock, type): 9418 *insn++ = BPF_LDX_MEM( 9419 BPF_FIELD_SIZEOF(struct sock, sk_type), 9420 si->dst_reg, si->src_reg, 9421 bpf_target_off(struct sock, sk_type, 9422 sizeof_field(struct sock, sk_type), 9423 target_size)); 9424 break; 9425 9426 case offsetof(struct bpf_sock, protocol): 9427 *insn++ = BPF_LDX_MEM( 9428 BPF_FIELD_SIZEOF(struct sock, sk_protocol), 9429 si->dst_reg, si->src_reg, 9430 bpf_target_off(struct sock, sk_protocol, 9431 sizeof_field(struct sock, sk_protocol), 9432 target_size)); 9433 break; 9434 9435 case offsetof(struct bpf_sock, src_ip4): 9436 *insn++ = BPF_LDX_MEM( 9437 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9438 bpf_target_off(struct sock_common, skc_rcv_saddr, 9439 sizeof_field(struct sock_common, 9440 skc_rcv_saddr), 9441 target_size)); 9442 break; 9443 9444 case offsetof(struct bpf_sock, dst_ip4): 9445 *insn++ = BPF_LDX_MEM( 9446 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9447 bpf_target_off(struct sock_common, skc_daddr, 9448 sizeof_field(struct sock_common, 9449 skc_daddr), 9450 target_size)); 9451 break; 9452 9453 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 9454 #if IS_ENABLED(CONFIG_IPV6) 9455 off = si->off; 9456 off -= offsetof(struct bpf_sock, src_ip6[0]); 9457 *insn++ = BPF_LDX_MEM( 9458 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9459 bpf_target_off( 9460 struct sock_common, 9461 skc_v6_rcv_saddr.s6_addr32[0], 9462 sizeof_field(struct sock_common, 9463 skc_v6_rcv_saddr.s6_addr32[0]), 9464 target_size) + off); 9465 #else 9466 (void)off; 9467 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9468 #endif 9469 break; 9470 9471 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): 9472 #if IS_ENABLED(CONFIG_IPV6) 9473 off = si->off; 9474 off -= offsetof(struct bpf_sock, dst_ip6[0]); 9475 *insn++ = BPF_LDX_MEM( 9476 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9477 bpf_target_off(struct sock_common, 9478 skc_v6_daddr.s6_addr32[0], 9479 sizeof_field(struct sock_common, 9480 skc_v6_daddr.s6_addr32[0]), 9481 target_size) + off); 9482 #else 9483 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9484 *target_size = 4; 9485 #endif 9486 break; 9487 9488 case offsetof(struct bpf_sock, src_port): 9489 *insn++ = BPF_LDX_MEM( 9490 BPF_FIELD_SIZEOF(struct sock_common, skc_num), 9491 si->dst_reg, si->src_reg, 9492 bpf_target_off(struct sock_common, skc_num, 9493 sizeof_field(struct sock_common, 9494 skc_num), 9495 target_size)); 9496 break; 9497 9498 case offsetof(struct bpf_sock, dst_port): 9499 *insn++ = BPF_LDX_MEM( 9500 BPF_FIELD_SIZEOF(struct sock_common, skc_dport), 9501 si->dst_reg, si->src_reg, 9502 bpf_target_off(struct sock_common, skc_dport, 9503 sizeof_field(struct sock_common, 9504 skc_dport), 9505 target_size)); 9506 break; 9507 9508 case offsetof(struct bpf_sock, state): 9509 *insn++ = BPF_LDX_MEM( 9510 BPF_FIELD_SIZEOF(struct sock_common, skc_state), 9511 si->dst_reg, si->src_reg, 9512 bpf_target_off(struct sock_common, skc_state, 9513 sizeof_field(struct sock_common, 9514 skc_state), 9515 target_size)); 9516 break; 9517 case offsetof(struct bpf_sock, rx_queue_mapping): 9518 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING 9519 *insn++ = BPF_LDX_MEM( 9520 BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping), 9521 si->dst_reg, si->src_reg, 9522 bpf_target_off(struct sock, sk_rx_queue_mapping, 9523 sizeof_field(struct sock, 9524 sk_rx_queue_mapping), 9525 target_size)); 9526 *insn++ = BPF_JMP_IMM(BPF_JNE, si->dst_reg, NO_QUEUE_MAPPING, 9527 1); 9528 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1); 9529 #else 9530 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1); 9531 *target_size = 2; 9532 #endif 9533 break; 9534 } 9535 9536 return insn - insn_buf; 9537 } 9538 9539 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, 9540 const struct bpf_insn *si, 9541 struct bpf_insn *insn_buf, 9542 struct bpf_prog *prog, u32 *target_size) 9543 { 9544 struct bpf_insn *insn = insn_buf; 9545 9546 switch (si->off) { 9547 case offsetof(struct __sk_buff, ifindex): 9548 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 9549 si->dst_reg, si->src_reg, 9550 offsetof(struct sk_buff, dev)); 9551 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9552 bpf_target_off(struct net_device, ifindex, 4, 9553 target_size)); 9554 break; 9555 default: 9556 return bpf_convert_ctx_access(type, si, insn_buf, prog, 9557 target_size); 9558 } 9559 9560 return insn - insn_buf; 9561 } 9562 9563 static u32 xdp_convert_ctx_access(enum bpf_access_type type, 9564 const struct bpf_insn *si, 9565 struct bpf_insn *insn_buf, 9566 struct bpf_prog *prog, u32 *target_size) 9567 { 9568 struct bpf_insn *insn = insn_buf; 9569 9570 switch (si->off) { 9571 case offsetof(struct xdp_md, data): 9572 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), 9573 si->dst_reg, si->src_reg, 9574 offsetof(struct xdp_buff, data)); 9575 break; 9576 case offsetof(struct xdp_md, data_meta): 9577 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta), 9578 si->dst_reg, si->src_reg, 9579 offsetof(struct xdp_buff, data_meta)); 9580 break; 9581 case offsetof(struct xdp_md, data_end): 9582 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), 9583 si->dst_reg, si->src_reg, 9584 offsetof(struct xdp_buff, data_end)); 9585 break; 9586 case offsetof(struct xdp_md, ingress_ifindex): 9587 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), 9588 si->dst_reg, si->src_reg, 9589 offsetof(struct xdp_buff, rxq)); 9590 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev), 9591 si->dst_reg, si->dst_reg, 9592 offsetof(struct xdp_rxq_info, dev)); 9593 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9594 offsetof(struct net_device, ifindex)); 9595 break; 9596 case offsetof(struct xdp_md, rx_queue_index): 9597 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), 9598 si->dst_reg, si->src_reg, 9599 offsetof(struct xdp_buff, rxq)); 9600 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9601 offsetof(struct xdp_rxq_info, 9602 queue_index)); 9603 break; 9604 case offsetof(struct xdp_md, egress_ifindex): 9605 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, txq), 9606 si->dst_reg, si->src_reg, 9607 offsetof(struct xdp_buff, txq)); 9608 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_txq_info, dev), 9609 si->dst_reg, si->dst_reg, 9610 offsetof(struct xdp_txq_info, dev)); 9611 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9612 offsetof(struct net_device, ifindex)); 9613 break; 9614 } 9615 9616 return insn - insn_buf; 9617 } 9618 9619 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of 9620 * context Structure, F is Field in context structure that contains a pointer 9621 * to Nested Structure of type NS that has the field NF. 9622 * 9623 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make 9624 * sure that SIZE is not greater than actual size of S.F.NF. 9625 * 9626 * If offset OFF is provided, the load happens from that offset relative to 9627 * offset of NF. 9628 */ 9629 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \ 9630 do { \ 9631 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \ 9632 si->src_reg, offsetof(S, F)); \ 9633 *insn++ = BPF_LDX_MEM( \ 9634 SIZE, si->dst_reg, si->dst_reg, \ 9635 bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 9636 target_size) \ 9637 + OFF); \ 9638 } while (0) 9639 9640 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \ 9641 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \ 9642 BPF_FIELD_SIZEOF(NS, NF), 0) 9643 9644 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to 9645 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation. 9646 * 9647 * In addition it uses Temporary Field TF (member of struct S) as the 3rd 9648 * "register" since two registers available in convert_ctx_access are not 9649 * enough: we can't override neither SRC, since it contains value to store, nor 9650 * DST since it contains pointer to context that may be used by later 9651 * instructions. But we need a temporary place to save pointer to nested 9652 * structure whose field we want to store to. 9653 */ 9654 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \ 9655 do { \ 9656 int tmp_reg = BPF_REG_9; \ 9657 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ 9658 --tmp_reg; \ 9659 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ 9660 --tmp_reg; \ 9661 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \ 9662 offsetof(S, TF)); \ 9663 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ 9664 si->dst_reg, offsetof(S, F)); \ 9665 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ 9666 bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 9667 target_size) \ 9668 + OFF); \ 9669 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ 9670 offsetof(S, TF)); \ 9671 } while (0) 9672 9673 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \ 9674 TF) \ 9675 do { \ 9676 if (type == BPF_WRITE) { \ 9677 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \ 9678 OFF, TF); \ 9679 } else { \ 9680 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \ 9681 S, NS, F, NF, SIZE, OFF); \ 9682 } \ 9683 } while (0) 9684 9685 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \ 9686 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \ 9687 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) 9688 9689 static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, 9690 const struct bpf_insn *si, 9691 struct bpf_insn *insn_buf, 9692 struct bpf_prog *prog, u32 *target_size) 9693 { 9694 int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port); 9695 struct bpf_insn *insn = insn_buf; 9696 9697 switch (si->off) { 9698 case offsetof(struct bpf_sock_addr, user_family): 9699 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9700 struct sockaddr, uaddr, sa_family); 9701 break; 9702 9703 case offsetof(struct bpf_sock_addr, user_ip4): 9704 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9705 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr, 9706 sin_addr, BPF_SIZE(si->code), 0, tmp_reg); 9707 break; 9708 9709 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 9710 off = si->off; 9711 off -= offsetof(struct bpf_sock_addr, user_ip6[0]); 9712 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9713 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, 9714 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off, 9715 tmp_reg); 9716 break; 9717 9718 case offsetof(struct bpf_sock_addr, user_port): 9719 /* To get port we need to know sa_family first and then treat 9720 * sockaddr as either sockaddr_in or sockaddr_in6. 9721 * Though we can simplify since port field has same offset and 9722 * size in both structures. 9723 * Here we check this invariant and use just one of the 9724 * structures if it's true. 9725 */ 9726 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != 9727 offsetof(struct sockaddr_in6, sin6_port)); 9728 BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) != 9729 sizeof_field(struct sockaddr_in6, sin6_port)); 9730 /* Account for sin6_port being smaller than user_port. */ 9731 port_size = min(port_size, BPF_LDST_BYTES(si)); 9732 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9733 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, 9734 sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg); 9735 break; 9736 9737 case offsetof(struct bpf_sock_addr, family): 9738 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9739 struct sock, sk, sk_family); 9740 break; 9741 9742 case offsetof(struct bpf_sock_addr, type): 9743 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9744 struct sock, sk, sk_type); 9745 break; 9746 9747 case offsetof(struct bpf_sock_addr, protocol): 9748 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9749 struct sock, sk, sk_protocol); 9750 break; 9751 9752 case offsetof(struct bpf_sock_addr, msg_src_ip4): 9753 /* Treat t_ctx as struct in_addr for msg_src_ip4. */ 9754 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9755 struct bpf_sock_addr_kern, struct in_addr, t_ctx, 9756 s_addr, BPF_SIZE(si->code), 0, tmp_reg); 9757 break; 9758 9759 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 9760 msg_src_ip6[3]): 9761 off = si->off; 9762 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]); 9763 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */ 9764 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9765 struct bpf_sock_addr_kern, struct in6_addr, t_ctx, 9766 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); 9767 break; 9768 case offsetof(struct bpf_sock_addr, sk): 9769 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk), 9770 si->dst_reg, si->src_reg, 9771 offsetof(struct bpf_sock_addr_kern, sk)); 9772 break; 9773 } 9774 9775 return insn - insn_buf; 9776 } 9777 9778 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, 9779 const struct bpf_insn *si, 9780 struct bpf_insn *insn_buf, 9781 struct bpf_prog *prog, 9782 u32 *target_size) 9783 { 9784 struct bpf_insn *insn = insn_buf; 9785 int off; 9786 9787 /* Helper macro for adding read access to tcp_sock or sock fields. */ 9788 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 9789 do { \ 9790 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \ 9791 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 9792 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 9793 if (si->dst_reg == reg || si->src_reg == reg) \ 9794 reg--; \ 9795 if (si->dst_reg == reg || si->src_reg == reg) \ 9796 reg--; \ 9797 if (si->dst_reg == si->src_reg) { \ 9798 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ 9799 offsetof(struct bpf_sock_ops_kern, \ 9800 temp)); \ 9801 fullsock_reg = reg; \ 9802 jmp += 2; \ 9803 } \ 9804 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9805 struct bpf_sock_ops_kern, \ 9806 is_fullsock), \ 9807 fullsock_reg, si->src_reg, \ 9808 offsetof(struct bpf_sock_ops_kern, \ 9809 is_fullsock)); \ 9810 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ 9811 if (si->dst_reg == si->src_reg) \ 9812 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9813 offsetof(struct bpf_sock_ops_kern, \ 9814 temp)); \ 9815 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9816 struct bpf_sock_ops_kern, sk),\ 9817 si->dst_reg, si->src_reg, \ 9818 offsetof(struct bpf_sock_ops_kern, sk));\ 9819 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \ 9820 OBJ_FIELD), \ 9821 si->dst_reg, si->dst_reg, \ 9822 offsetof(OBJ, OBJ_FIELD)); \ 9823 if (si->dst_reg == si->src_reg) { \ 9824 *insn++ = BPF_JMP_A(1); \ 9825 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9826 offsetof(struct bpf_sock_ops_kern, \ 9827 temp)); \ 9828 } \ 9829 } while (0) 9830 9831 #define SOCK_OPS_GET_SK() \ 9832 do { \ 9833 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \ 9834 if (si->dst_reg == reg || si->src_reg == reg) \ 9835 reg--; \ 9836 if (si->dst_reg == reg || si->src_reg == reg) \ 9837 reg--; \ 9838 if (si->dst_reg == si->src_reg) { \ 9839 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ 9840 offsetof(struct bpf_sock_ops_kern, \ 9841 temp)); \ 9842 fullsock_reg = reg; \ 9843 jmp += 2; \ 9844 } \ 9845 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9846 struct bpf_sock_ops_kern, \ 9847 is_fullsock), \ 9848 fullsock_reg, si->src_reg, \ 9849 offsetof(struct bpf_sock_ops_kern, \ 9850 is_fullsock)); \ 9851 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ 9852 if (si->dst_reg == si->src_reg) \ 9853 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9854 offsetof(struct bpf_sock_ops_kern, \ 9855 temp)); \ 9856 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9857 struct bpf_sock_ops_kern, sk),\ 9858 si->dst_reg, si->src_reg, \ 9859 offsetof(struct bpf_sock_ops_kern, sk));\ 9860 if (si->dst_reg == si->src_reg) { \ 9861 *insn++ = BPF_JMP_A(1); \ 9862 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 9863 offsetof(struct bpf_sock_ops_kern, \ 9864 temp)); \ 9865 } \ 9866 } while (0) 9867 9868 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ 9869 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock) 9870 9871 /* Helper macro for adding write access to tcp_sock or sock fields. 9872 * The macro is called with two registers, dst_reg which contains a pointer 9873 * to ctx (context) and src_reg which contains the value that should be 9874 * stored. However, we need an additional register since we cannot overwrite 9875 * dst_reg because it may be used later in the program. 9876 * Instead we "borrow" one of the other register. We first save its value 9877 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore 9878 * it at the end of the macro. 9879 */ 9880 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 9881 do { \ 9882 int reg = BPF_REG_9; \ 9883 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 9884 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 9885 if (si->dst_reg == reg || si->src_reg == reg) \ 9886 reg--; \ 9887 if (si->dst_reg == reg || si->src_reg == reg) \ 9888 reg--; \ 9889 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \ 9890 offsetof(struct bpf_sock_ops_kern, \ 9891 temp)); \ 9892 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9893 struct bpf_sock_ops_kern, \ 9894 is_fullsock), \ 9895 reg, si->dst_reg, \ 9896 offsetof(struct bpf_sock_ops_kern, \ 9897 is_fullsock)); \ 9898 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ 9899 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 9900 struct bpf_sock_ops_kern, sk),\ 9901 reg, si->dst_reg, \ 9902 offsetof(struct bpf_sock_ops_kern, sk));\ 9903 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \ 9904 reg, si->src_reg, \ 9905 offsetof(OBJ, OBJ_FIELD)); \ 9906 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \ 9907 offsetof(struct bpf_sock_ops_kern, \ 9908 temp)); \ 9909 } while (0) 9910 9911 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \ 9912 do { \ 9913 if (TYPE == BPF_WRITE) \ 9914 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ 9915 else \ 9916 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ 9917 } while (0) 9918 9919 if (insn > insn_buf) 9920 return insn - insn_buf; 9921 9922 switch (si->off) { 9923 case offsetof(struct bpf_sock_ops, op): 9924 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 9925 op), 9926 si->dst_reg, si->src_reg, 9927 offsetof(struct bpf_sock_ops_kern, op)); 9928 break; 9929 9930 case offsetof(struct bpf_sock_ops, replylong[0]) ... 9931 offsetof(struct bpf_sock_ops, replylong[3]): 9932 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) != 9933 sizeof_field(struct bpf_sock_ops_kern, reply)); 9934 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) != 9935 sizeof_field(struct bpf_sock_ops_kern, replylong)); 9936 off = si->off; 9937 off -= offsetof(struct bpf_sock_ops, replylong[0]); 9938 off += offsetof(struct bpf_sock_ops_kern, replylong[0]); 9939 if (type == BPF_WRITE) 9940 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9941 off); 9942 else 9943 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9944 off); 9945 break; 9946 9947 case offsetof(struct bpf_sock_ops, family): 9948 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 9949 9950 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9951 struct bpf_sock_ops_kern, sk), 9952 si->dst_reg, si->src_reg, 9953 offsetof(struct bpf_sock_ops_kern, sk)); 9954 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9955 offsetof(struct sock_common, skc_family)); 9956 break; 9957 9958 case offsetof(struct bpf_sock_ops, remote_ip4): 9959 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 9960 9961 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9962 struct bpf_sock_ops_kern, sk), 9963 si->dst_reg, si->src_reg, 9964 offsetof(struct bpf_sock_ops_kern, sk)); 9965 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9966 offsetof(struct sock_common, skc_daddr)); 9967 break; 9968 9969 case offsetof(struct bpf_sock_ops, local_ip4): 9970 BUILD_BUG_ON(sizeof_field(struct sock_common, 9971 skc_rcv_saddr) != 4); 9972 9973 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9974 struct bpf_sock_ops_kern, sk), 9975 si->dst_reg, si->src_reg, 9976 offsetof(struct bpf_sock_ops_kern, sk)); 9977 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9978 offsetof(struct sock_common, 9979 skc_rcv_saddr)); 9980 break; 9981 9982 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... 9983 offsetof(struct bpf_sock_ops, remote_ip6[3]): 9984 #if IS_ENABLED(CONFIG_IPV6) 9985 BUILD_BUG_ON(sizeof_field(struct sock_common, 9986 skc_v6_daddr.s6_addr32[0]) != 4); 9987 9988 off = si->off; 9989 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]); 9990 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 9991 struct bpf_sock_ops_kern, sk), 9992 si->dst_reg, si->src_reg, 9993 offsetof(struct bpf_sock_ops_kern, sk)); 9994 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9995 offsetof(struct sock_common, 9996 skc_v6_daddr.s6_addr32[0]) + 9997 off); 9998 #else 9999 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10000 #endif 10001 break; 10002 10003 case offsetof(struct bpf_sock_ops, local_ip6[0]) ... 10004 offsetof(struct bpf_sock_ops, local_ip6[3]): 10005 #if IS_ENABLED(CONFIG_IPV6) 10006 BUILD_BUG_ON(sizeof_field(struct sock_common, 10007 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 10008 10009 off = si->off; 10010 off -= offsetof(struct bpf_sock_ops, local_ip6[0]); 10011 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10012 struct bpf_sock_ops_kern, sk), 10013 si->dst_reg, si->src_reg, 10014 offsetof(struct bpf_sock_ops_kern, sk)); 10015 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10016 offsetof(struct sock_common, 10017 skc_v6_rcv_saddr.s6_addr32[0]) + 10018 off); 10019 #else 10020 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10021 #endif 10022 break; 10023 10024 case offsetof(struct bpf_sock_ops, remote_port): 10025 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 10026 10027 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10028 struct bpf_sock_ops_kern, sk), 10029 si->dst_reg, si->src_reg, 10030 offsetof(struct bpf_sock_ops_kern, sk)); 10031 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10032 offsetof(struct sock_common, skc_dport)); 10033 #ifndef __BIG_ENDIAN_BITFIELD 10034 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 10035 #endif 10036 break; 10037 10038 case offsetof(struct bpf_sock_ops, local_port): 10039 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 10040 10041 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10042 struct bpf_sock_ops_kern, sk), 10043 si->dst_reg, si->src_reg, 10044 offsetof(struct bpf_sock_ops_kern, sk)); 10045 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10046 offsetof(struct sock_common, skc_num)); 10047 break; 10048 10049 case offsetof(struct bpf_sock_ops, is_fullsock): 10050 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10051 struct bpf_sock_ops_kern, 10052 is_fullsock), 10053 si->dst_reg, si->src_reg, 10054 offsetof(struct bpf_sock_ops_kern, 10055 is_fullsock)); 10056 break; 10057 10058 case offsetof(struct bpf_sock_ops, state): 10059 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1); 10060 10061 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10062 struct bpf_sock_ops_kern, sk), 10063 si->dst_reg, si->src_reg, 10064 offsetof(struct bpf_sock_ops_kern, sk)); 10065 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg, 10066 offsetof(struct sock_common, skc_state)); 10067 break; 10068 10069 case offsetof(struct bpf_sock_ops, rtt_min): 10070 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 10071 sizeof(struct minmax)); 10072 BUILD_BUG_ON(sizeof(struct minmax) < 10073 sizeof(struct minmax_sample)); 10074 10075 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10076 struct bpf_sock_ops_kern, sk), 10077 si->dst_reg, si->src_reg, 10078 offsetof(struct bpf_sock_ops_kern, sk)); 10079 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10080 offsetof(struct tcp_sock, rtt_min) + 10081 sizeof_field(struct minmax_sample, t)); 10082 break; 10083 10084 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags): 10085 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags, 10086 struct tcp_sock); 10087 break; 10088 10089 case offsetof(struct bpf_sock_ops, sk_txhash): 10090 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, 10091 struct sock, type); 10092 break; 10093 case offsetof(struct bpf_sock_ops, snd_cwnd): 10094 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd); 10095 break; 10096 case offsetof(struct bpf_sock_ops, srtt_us): 10097 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us); 10098 break; 10099 case offsetof(struct bpf_sock_ops, snd_ssthresh): 10100 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh); 10101 break; 10102 case offsetof(struct bpf_sock_ops, rcv_nxt): 10103 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt); 10104 break; 10105 case offsetof(struct bpf_sock_ops, snd_nxt): 10106 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt); 10107 break; 10108 case offsetof(struct bpf_sock_ops, snd_una): 10109 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una); 10110 break; 10111 case offsetof(struct bpf_sock_ops, mss_cache): 10112 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache); 10113 break; 10114 case offsetof(struct bpf_sock_ops, ecn_flags): 10115 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags); 10116 break; 10117 case offsetof(struct bpf_sock_ops, rate_delivered): 10118 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered); 10119 break; 10120 case offsetof(struct bpf_sock_ops, rate_interval_us): 10121 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us); 10122 break; 10123 case offsetof(struct bpf_sock_ops, packets_out): 10124 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out); 10125 break; 10126 case offsetof(struct bpf_sock_ops, retrans_out): 10127 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out); 10128 break; 10129 case offsetof(struct bpf_sock_ops, total_retrans): 10130 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans); 10131 break; 10132 case offsetof(struct bpf_sock_ops, segs_in): 10133 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in); 10134 break; 10135 case offsetof(struct bpf_sock_ops, data_segs_in): 10136 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in); 10137 break; 10138 case offsetof(struct bpf_sock_ops, segs_out): 10139 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out); 10140 break; 10141 case offsetof(struct bpf_sock_ops, data_segs_out): 10142 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out); 10143 break; 10144 case offsetof(struct bpf_sock_ops, lost_out): 10145 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out); 10146 break; 10147 case offsetof(struct bpf_sock_ops, sacked_out): 10148 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out); 10149 break; 10150 case offsetof(struct bpf_sock_ops, bytes_received): 10151 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received); 10152 break; 10153 case offsetof(struct bpf_sock_ops, bytes_acked): 10154 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); 10155 break; 10156 case offsetof(struct bpf_sock_ops, sk): 10157 SOCK_OPS_GET_SK(); 10158 break; 10159 case offsetof(struct bpf_sock_ops, skb_data_end): 10160 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10161 skb_data_end), 10162 si->dst_reg, si->src_reg, 10163 offsetof(struct bpf_sock_ops_kern, 10164 skb_data_end)); 10165 break; 10166 case offsetof(struct bpf_sock_ops, skb_data): 10167 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10168 skb), 10169 si->dst_reg, si->src_reg, 10170 offsetof(struct bpf_sock_ops_kern, 10171 skb)); 10172 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10173 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 10174 si->dst_reg, si->dst_reg, 10175 offsetof(struct sk_buff, data)); 10176 break; 10177 case offsetof(struct bpf_sock_ops, skb_len): 10178 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10179 skb), 10180 si->dst_reg, si->src_reg, 10181 offsetof(struct bpf_sock_ops_kern, 10182 skb)); 10183 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10184 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len), 10185 si->dst_reg, si->dst_reg, 10186 offsetof(struct sk_buff, len)); 10187 break; 10188 case offsetof(struct bpf_sock_ops, skb_tcp_flags): 10189 off = offsetof(struct sk_buff, cb); 10190 off += offsetof(struct tcp_skb_cb, tcp_flags); 10191 *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags); 10192 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10193 skb), 10194 si->dst_reg, si->src_reg, 10195 offsetof(struct bpf_sock_ops_kern, 10196 skb)); 10197 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10198 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb, 10199 tcp_flags), 10200 si->dst_reg, si->dst_reg, off); 10201 break; 10202 } 10203 return insn - insn_buf; 10204 } 10205 10206 /* data_end = skb->data + skb_headlen() */ 10207 static struct bpf_insn *bpf_convert_data_end_access(const struct bpf_insn *si, 10208 struct bpf_insn *insn) 10209 { 10210 int reg; 10211 int temp_reg_off = offsetof(struct sk_buff, cb) + 10212 offsetof(struct sk_skb_cb, temp_reg); 10213 10214 if (si->src_reg == si->dst_reg) { 10215 /* We need an extra register, choose and save a register. */ 10216 reg = BPF_REG_9; 10217 if (si->src_reg == reg || si->dst_reg == reg) 10218 reg--; 10219 if (si->src_reg == reg || si->dst_reg == reg) 10220 reg--; 10221 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, temp_reg_off); 10222 } else { 10223 reg = si->dst_reg; 10224 } 10225 10226 /* reg = skb->data */ 10227 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 10228 reg, si->src_reg, 10229 offsetof(struct sk_buff, data)); 10230 /* AX = skb->len */ 10231 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len), 10232 BPF_REG_AX, si->src_reg, 10233 offsetof(struct sk_buff, len)); 10234 /* reg = skb->data + skb->len */ 10235 *insn++ = BPF_ALU64_REG(BPF_ADD, reg, BPF_REG_AX); 10236 /* AX = skb->data_len */ 10237 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data_len), 10238 BPF_REG_AX, si->src_reg, 10239 offsetof(struct sk_buff, data_len)); 10240 10241 /* reg = skb->data + skb->len - skb->data_len */ 10242 *insn++ = BPF_ALU64_REG(BPF_SUB, reg, BPF_REG_AX); 10243 10244 if (si->src_reg == si->dst_reg) { 10245 /* Restore the saved register */ 10246 *insn++ = BPF_MOV64_REG(BPF_REG_AX, si->src_reg); 10247 *insn++ = BPF_MOV64_REG(si->dst_reg, reg); 10248 *insn++ = BPF_LDX_MEM(BPF_DW, reg, BPF_REG_AX, temp_reg_off); 10249 } 10250 10251 return insn; 10252 } 10253 10254 static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, 10255 const struct bpf_insn *si, 10256 struct bpf_insn *insn_buf, 10257 struct bpf_prog *prog, u32 *target_size) 10258 { 10259 struct bpf_insn *insn = insn_buf; 10260 int off; 10261 10262 switch (si->off) { 10263 case offsetof(struct __sk_buff, data_end): 10264 insn = bpf_convert_data_end_access(si, insn); 10265 break; 10266 case offsetof(struct __sk_buff, cb[0]) ... 10267 offsetofend(struct __sk_buff, cb[4]) - 1: 10268 BUILD_BUG_ON(sizeof_field(struct sk_skb_cb, data) < 20); 10269 BUILD_BUG_ON((offsetof(struct sk_buff, cb) + 10270 offsetof(struct sk_skb_cb, data)) % 10271 sizeof(__u64)); 10272 10273 prog->cb_access = 1; 10274 off = si->off; 10275 off -= offsetof(struct __sk_buff, cb[0]); 10276 off += offsetof(struct sk_buff, cb); 10277 off += offsetof(struct sk_skb_cb, data); 10278 if (type == BPF_WRITE) 10279 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, 10280 si->src_reg, off); 10281 else 10282 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, 10283 si->src_reg, off); 10284 break; 10285 10286 10287 default: 10288 return bpf_convert_ctx_access(type, si, insn_buf, prog, 10289 target_size); 10290 } 10291 10292 return insn - insn_buf; 10293 } 10294 10295 static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, 10296 const struct bpf_insn *si, 10297 struct bpf_insn *insn_buf, 10298 struct bpf_prog *prog, u32 *target_size) 10299 { 10300 struct bpf_insn *insn = insn_buf; 10301 #if IS_ENABLED(CONFIG_IPV6) 10302 int off; 10303 #endif 10304 10305 /* convert ctx uses the fact sg element is first in struct */ 10306 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0); 10307 10308 switch (si->off) { 10309 case offsetof(struct sk_msg_md, data): 10310 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), 10311 si->dst_reg, si->src_reg, 10312 offsetof(struct sk_msg, data)); 10313 break; 10314 case offsetof(struct sk_msg_md, data_end): 10315 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end), 10316 si->dst_reg, si->src_reg, 10317 offsetof(struct sk_msg, data_end)); 10318 break; 10319 case offsetof(struct sk_msg_md, family): 10320 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 10321 10322 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10323 struct sk_msg, sk), 10324 si->dst_reg, si->src_reg, 10325 offsetof(struct sk_msg, sk)); 10326 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10327 offsetof(struct sock_common, skc_family)); 10328 break; 10329 10330 case offsetof(struct sk_msg_md, remote_ip4): 10331 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 10332 10333 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10334 struct sk_msg, sk), 10335 si->dst_reg, si->src_reg, 10336 offsetof(struct sk_msg, sk)); 10337 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10338 offsetof(struct sock_common, skc_daddr)); 10339 break; 10340 10341 case offsetof(struct sk_msg_md, local_ip4): 10342 BUILD_BUG_ON(sizeof_field(struct sock_common, 10343 skc_rcv_saddr) != 4); 10344 10345 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10346 struct sk_msg, sk), 10347 si->dst_reg, si->src_reg, 10348 offsetof(struct sk_msg, sk)); 10349 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10350 offsetof(struct sock_common, 10351 skc_rcv_saddr)); 10352 break; 10353 10354 case offsetof(struct sk_msg_md, remote_ip6[0]) ... 10355 offsetof(struct sk_msg_md, remote_ip6[3]): 10356 #if IS_ENABLED(CONFIG_IPV6) 10357 BUILD_BUG_ON(sizeof_field(struct sock_common, 10358 skc_v6_daddr.s6_addr32[0]) != 4); 10359 10360 off = si->off; 10361 off -= offsetof(struct sk_msg_md, remote_ip6[0]); 10362 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10363 struct sk_msg, sk), 10364 si->dst_reg, si->src_reg, 10365 offsetof(struct sk_msg, sk)); 10366 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10367 offsetof(struct sock_common, 10368 skc_v6_daddr.s6_addr32[0]) + 10369 off); 10370 #else 10371 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10372 #endif 10373 break; 10374 10375 case offsetof(struct sk_msg_md, local_ip6[0]) ... 10376 offsetof(struct sk_msg_md, local_ip6[3]): 10377 #if IS_ENABLED(CONFIG_IPV6) 10378 BUILD_BUG_ON(sizeof_field(struct sock_common, 10379 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 10380 10381 off = si->off; 10382 off -= offsetof(struct sk_msg_md, local_ip6[0]); 10383 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10384 struct sk_msg, sk), 10385 si->dst_reg, si->src_reg, 10386 offsetof(struct sk_msg, sk)); 10387 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10388 offsetof(struct sock_common, 10389 skc_v6_rcv_saddr.s6_addr32[0]) + 10390 off); 10391 #else 10392 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10393 #endif 10394 break; 10395 10396 case offsetof(struct sk_msg_md, remote_port): 10397 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 10398 10399 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10400 struct sk_msg, sk), 10401 si->dst_reg, si->src_reg, 10402 offsetof(struct sk_msg, sk)); 10403 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10404 offsetof(struct sock_common, skc_dport)); 10405 #ifndef __BIG_ENDIAN_BITFIELD 10406 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 10407 #endif 10408 break; 10409 10410 case offsetof(struct sk_msg_md, local_port): 10411 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 10412 10413 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10414 struct sk_msg, sk), 10415 si->dst_reg, si->src_reg, 10416 offsetof(struct sk_msg, sk)); 10417 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10418 offsetof(struct sock_common, skc_num)); 10419 break; 10420 10421 case offsetof(struct sk_msg_md, size): 10422 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size), 10423 si->dst_reg, si->src_reg, 10424 offsetof(struct sk_msg_sg, size)); 10425 break; 10426 10427 case offsetof(struct sk_msg_md, sk): 10428 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk), 10429 si->dst_reg, si->src_reg, 10430 offsetof(struct sk_msg, sk)); 10431 break; 10432 } 10433 10434 return insn - insn_buf; 10435 } 10436 10437 const struct bpf_verifier_ops sk_filter_verifier_ops = { 10438 .get_func_proto = sk_filter_func_proto, 10439 .is_valid_access = sk_filter_is_valid_access, 10440 .convert_ctx_access = bpf_convert_ctx_access, 10441 .gen_ld_abs = bpf_gen_ld_abs, 10442 }; 10443 10444 const struct bpf_prog_ops sk_filter_prog_ops = { 10445 .test_run = bpf_prog_test_run_skb, 10446 }; 10447 10448 const struct bpf_verifier_ops tc_cls_act_verifier_ops = { 10449 .get_func_proto = tc_cls_act_func_proto, 10450 .is_valid_access = tc_cls_act_is_valid_access, 10451 .convert_ctx_access = tc_cls_act_convert_ctx_access, 10452 .gen_prologue = tc_cls_act_prologue, 10453 .gen_ld_abs = bpf_gen_ld_abs, 10454 }; 10455 10456 const struct bpf_prog_ops tc_cls_act_prog_ops = { 10457 .test_run = bpf_prog_test_run_skb, 10458 }; 10459 10460 const struct bpf_verifier_ops xdp_verifier_ops = { 10461 .get_func_proto = xdp_func_proto, 10462 .is_valid_access = xdp_is_valid_access, 10463 .convert_ctx_access = xdp_convert_ctx_access, 10464 .gen_prologue = bpf_noop_prologue, 10465 }; 10466 10467 const struct bpf_prog_ops xdp_prog_ops = { 10468 .test_run = bpf_prog_test_run_xdp, 10469 }; 10470 10471 const struct bpf_verifier_ops cg_skb_verifier_ops = { 10472 .get_func_proto = cg_skb_func_proto, 10473 .is_valid_access = cg_skb_is_valid_access, 10474 .convert_ctx_access = bpf_convert_ctx_access, 10475 }; 10476 10477 const struct bpf_prog_ops cg_skb_prog_ops = { 10478 .test_run = bpf_prog_test_run_skb, 10479 }; 10480 10481 const struct bpf_verifier_ops lwt_in_verifier_ops = { 10482 .get_func_proto = lwt_in_func_proto, 10483 .is_valid_access = lwt_is_valid_access, 10484 .convert_ctx_access = bpf_convert_ctx_access, 10485 }; 10486 10487 const struct bpf_prog_ops lwt_in_prog_ops = { 10488 .test_run = bpf_prog_test_run_skb, 10489 }; 10490 10491 const struct bpf_verifier_ops lwt_out_verifier_ops = { 10492 .get_func_proto = lwt_out_func_proto, 10493 .is_valid_access = lwt_is_valid_access, 10494 .convert_ctx_access = bpf_convert_ctx_access, 10495 }; 10496 10497 const struct bpf_prog_ops lwt_out_prog_ops = { 10498 .test_run = bpf_prog_test_run_skb, 10499 }; 10500 10501 const struct bpf_verifier_ops lwt_xmit_verifier_ops = { 10502 .get_func_proto = lwt_xmit_func_proto, 10503 .is_valid_access = lwt_is_valid_access, 10504 .convert_ctx_access = bpf_convert_ctx_access, 10505 .gen_prologue = tc_cls_act_prologue, 10506 }; 10507 10508 const struct bpf_prog_ops lwt_xmit_prog_ops = { 10509 .test_run = bpf_prog_test_run_skb, 10510 }; 10511 10512 const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { 10513 .get_func_proto = lwt_seg6local_func_proto, 10514 .is_valid_access = lwt_is_valid_access, 10515 .convert_ctx_access = bpf_convert_ctx_access, 10516 }; 10517 10518 const struct bpf_prog_ops lwt_seg6local_prog_ops = { 10519 .test_run = bpf_prog_test_run_skb, 10520 }; 10521 10522 const struct bpf_verifier_ops cg_sock_verifier_ops = { 10523 .get_func_proto = sock_filter_func_proto, 10524 .is_valid_access = sock_filter_is_valid_access, 10525 .convert_ctx_access = bpf_sock_convert_ctx_access, 10526 }; 10527 10528 const struct bpf_prog_ops cg_sock_prog_ops = { 10529 }; 10530 10531 const struct bpf_verifier_ops cg_sock_addr_verifier_ops = { 10532 .get_func_proto = sock_addr_func_proto, 10533 .is_valid_access = sock_addr_is_valid_access, 10534 .convert_ctx_access = sock_addr_convert_ctx_access, 10535 }; 10536 10537 const struct bpf_prog_ops cg_sock_addr_prog_ops = { 10538 }; 10539 10540 const struct bpf_verifier_ops sock_ops_verifier_ops = { 10541 .get_func_proto = sock_ops_func_proto, 10542 .is_valid_access = sock_ops_is_valid_access, 10543 .convert_ctx_access = sock_ops_convert_ctx_access, 10544 }; 10545 10546 const struct bpf_prog_ops sock_ops_prog_ops = { 10547 }; 10548 10549 const struct bpf_verifier_ops sk_skb_verifier_ops = { 10550 .get_func_proto = sk_skb_func_proto, 10551 .is_valid_access = sk_skb_is_valid_access, 10552 .convert_ctx_access = sk_skb_convert_ctx_access, 10553 .gen_prologue = sk_skb_prologue, 10554 }; 10555 10556 const struct bpf_prog_ops sk_skb_prog_ops = { 10557 }; 10558 10559 const struct bpf_verifier_ops sk_msg_verifier_ops = { 10560 .get_func_proto = sk_msg_func_proto, 10561 .is_valid_access = sk_msg_is_valid_access, 10562 .convert_ctx_access = sk_msg_convert_ctx_access, 10563 .gen_prologue = bpf_noop_prologue, 10564 }; 10565 10566 const struct bpf_prog_ops sk_msg_prog_ops = { 10567 }; 10568 10569 const struct bpf_verifier_ops flow_dissector_verifier_ops = { 10570 .get_func_proto = flow_dissector_func_proto, 10571 .is_valid_access = flow_dissector_is_valid_access, 10572 .convert_ctx_access = flow_dissector_convert_ctx_access, 10573 }; 10574 10575 const struct bpf_prog_ops flow_dissector_prog_ops = { 10576 .test_run = bpf_prog_test_run_flow_dissector, 10577 }; 10578 10579 int sk_detach_filter(struct sock *sk) 10580 { 10581 int ret = -ENOENT; 10582 struct sk_filter *filter; 10583 10584 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 10585 return -EPERM; 10586 10587 filter = rcu_dereference_protected(sk->sk_filter, 10588 lockdep_sock_is_held(sk)); 10589 if (filter) { 10590 RCU_INIT_POINTER(sk->sk_filter, NULL); 10591 sk_filter_uncharge(sk, filter); 10592 ret = 0; 10593 } 10594 10595 return ret; 10596 } 10597 EXPORT_SYMBOL_GPL(sk_detach_filter); 10598 10599 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, 10600 unsigned int len) 10601 { 10602 struct sock_fprog_kern *fprog; 10603 struct sk_filter *filter; 10604 int ret = 0; 10605 10606 lock_sock(sk); 10607 filter = rcu_dereference_protected(sk->sk_filter, 10608 lockdep_sock_is_held(sk)); 10609 if (!filter) 10610 goto out; 10611 10612 /* We're copying the filter that has been originally attached, 10613 * so no conversion/decode needed anymore. eBPF programs that 10614 * have no original program cannot be dumped through this. 10615 */ 10616 ret = -EACCES; 10617 fprog = filter->prog->orig_prog; 10618 if (!fprog) 10619 goto out; 10620 10621 ret = fprog->len; 10622 if (!len) 10623 /* User space only enquires number of filter blocks. */ 10624 goto out; 10625 10626 ret = -EINVAL; 10627 if (len < fprog->len) 10628 goto out; 10629 10630 ret = -EFAULT; 10631 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) 10632 goto out; 10633 10634 /* Instead of bytes, the API requests to return the number 10635 * of filter blocks. 10636 */ 10637 ret = fprog->len; 10638 out: 10639 release_sock(sk); 10640 return ret; 10641 } 10642 10643 #ifdef CONFIG_INET 10644 static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, 10645 struct sock_reuseport *reuse, 10646 struct sock *sk, struct sk_buff *skb, 10647 struct sock *migrating_sk, 10648 u32 hash) 10649 { 10650 reuse_kern->skb = skb; 10651 reuse_kern->sk = sk; 10652 reuse_kern->selected_sk = NULL; 10653 reuse_kern->migrating_sk = migrating_sk; 10654 reuse_kern->data_end = skb->data + skb_headlen(skb); 10655 reuse_kern->hash = hash; 10656 reuse_kern->reuseport_id = reuse->reuseport_id; 10657 reuse_kern->bind_inany = reuse->bind_inany; 10658 } 10659 10660 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, 10661 struct bpf_prog *prog, struct sk_buff *skb, 10662 struct sock *migrating_sk, 10663 u32 hash) 10664 { 10665 struct sk_reuseport_kern reuse_kern; 10666 enum sk_action action; 10667 10668 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, migrating_sk, hash); 10669 action = bpf_prog_run(prog, &reuse_kern); 10670 10671 if (action == SK_PASS) 10672 return reuse_kern.selected_sk; 10673 else 10674 return ERR_PTR(-ECONNREFUSED); 10675 } 10676 10677 BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, 10678 struct bpf_map *, map, void *, key, u32, flags) 10679 { 10680 bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; 10681 struct sock_reuseport *reuse; 10682 struct sock *selected_sk; 10683 10684 selected_sk = map->ops->map_lookup_elem(map, key); 10685 if (!selected_sk) 10686 return -ENOENT; 10687 10688 reuse = rcu_dereference(selected_sk->sk_reuseport_cb); 10689 if (!reuse) { 10690 /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ 10691 if (sk_is_refcounted(selected_sk)) 10692 sock_put(selected_sk); 10693 10694 /* reuseport_array has only sk with non NULL sk_reuseport_cb. 10695 * The only (!reuse) case here is - the sk has already been 10696 * unhashed (e.g. by close()), so treat it as -ENOENT. 10697 * 10698 * Other maps (e.g. sock_map) do not provide this guarantee and 10699 * the sk may never be in the reuseport group to begin with. 10700 */ 10701 return is_sockarray ? -ENOENT : -EINVAL; 10702 } 10703 10704 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { 10705 struct sock *sk = reuse_kern->sk; 10706 10707 if (sk->sk_protocol != selected_sk->sk_protocol) 10708 return -EPROTOTYPE; 10709 else if (sk->sk_family != selected_sk->sk_family) 10710 return -EAFNOSUPPORT; 10711 10712 /* Catch all. Likely bound to a different sockaddr. */ 10713 return -EBADFD; 10714 } 10715 10716 reuse_kern->selected_sk = selected_sk; 10717 10718 return 0; 10719 } 10720 10721 static const struct bpf_func_proto sk_select_reuseport_proto = { 10722 .func = sk_select_reuseport, 10723 .gpl_only = false, 10724 .ret_type = RET_INTEGER, 10725 .arg1_type = ARG_PTR_TO_CTX, 10726 .arg2_type = ARG_CONST_MAP_PTR, 10727 .arg3_type = ARG_PTR_TO_MAP_KEY, 10728 .arg4_type = ARG_ANYTHING, 10729 }; 10730 10731 BPF_CALL_4(sk_reuseport_load_bytes, 10732 const struct sk_reuseport_kern *, reuse_kern, u32, offset, 10733 void *, to, u32, len) 10734 { 10735 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len); 10736 } 10737 10738 static const struct bpf_func_proto sk_reuseport_load_bytes_proto = { 10739 .func = sk_reuseport_load_bytes, 10740 .gpl_only = false, 10741 .ret_type = RET_INTEGER, 10742 .arg1_type = ARG_PTR_TO_CTX, 10743 .arg2_type = ARG_ANYTHING, 10744 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 10745 .arg4_type = ARG_CONST_SIZE, 10746 }; 10747 10748 BPF_CALL_5(sk_reuseport_load_bytes_relative, 10749 const struct sk_reuseport_kern *, reuse_kern, u32, offset, 10750 void *, to, u32, len, u32, start_header) 10751 { 10752 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to, 10753 len, start_header); 10754 } 10755 10756 static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = { 10757 .func = sk_reuseport_load_bytes_relative, 10758 .gpl_only = false, 10759 .ret_type = RET_INTEGER, 10760 .arg1_type = ARG_PTR_TO_CTX, 10761 .arg2_type = ARG_ANYTHING, 10762 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 10763 .arg4_type = ARG_CONST_SIZE, 10764 .arg5_type = ARG_ANYTHING, 10765 }; 10766 10767 static const struct bpf_func_proto * 10768 sk_reuseport_func_proto(enum bpf_func_id func_id, 10769 const struct bpf_prog *prog) 10770 { 10771 switch (func_id) { 10772 case BPF_FUNC_sk_select_reuseport: 10773 return &sk_select_reuseport_proto; 10774 case BPF_FUNC_skb_load_bytes: 10775 return &sk_reuseport_load_bytes_proto; 10776 case BPF_FUNC_skb_load_bytes_relative: 10777 return &sk_reuseport_load_bytes_relative_proto; 10778 case BPF_FUNC_get_socket_cookie: 10779 return &bpf_get_socket_ptr_cookie_proto; 10780 case BPF_FUNC_ktime_get_coarse_ns: 10781 return &bpf_ktime_get_coarse_ns_proto; 10782 default: 10783 return bpf_base_func_proto(func_id); 10784 } 10785 } 10786 10787 static bool 10788 sk_reuseport_is_valid_access(int off, int size, 10789 enum bpf_access_type type, 10790 const struct bpf_prog *prog, 10791 struct bpf_insn_access_aux *info) 10792 { 10793 const u32 size_default = sizeof(__u32); 10794 10795 if (off < 0 || off >= sizeof(struct sk_reuseport_md) || 10796 off % size || type != BPF_READ) 10797 return false; 10798 10799 switch (off) { 10800 case offsetof(struct sk_reuseport_md, data): 10801 info->reg_type = PTR_TO_PACKET; 10802 return size == sizeof(__u64); 10803 10804 case offsetof(struct sk_reuseport_md, data_end): 10805 info->reg_type = PTR_TO_PACKET_END; 10806 return size == sizeof(__u64); 10807 10808 case offsetof(struct sk_reuseport_md, hash): 10809 return size == size_default; 10810 10811 case offsetof(struct sk_reuseport_md, sk): 10812 info->reg_type = PTR_TO_SOCKET; 10813 return size == sizeof(__u64); 10814 10815 case offsetof(struct sk_reuseport_md, migrating_sk): 10816 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; 10817 return size == sizeof(__u64); 10818 10819 /* Fields that allow narrowing */ 10820 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): 10821 if (size < sizeof_field(struct sk_buff, protocol)) 10822 return false; 10823 fallthrough; 10824 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): 10825 case bpf_ctx_range(struct sk_reuseport_md, bind_inany): 10826 case bpf_ctx_range(struct sk_reuseport_md, len): 10827 bpf_ctx_record_field_size(info, size_default); 10828 return bpf_ctx_narrow_access_ok(off, size, size_default); 10829 10830 default: 10831 return false; 10832 } 10833 } 10834 10835 #define SK_REUSEPORT_LOAD_FIELD(F) ({ \ 10836 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ 10837 si->dst_reg, si->src_reg, \ 10838 bpf_target_off(struct sk_reuseport_kern, F, \ 10839 sizeof_field(struct sk_reuseport_kern, F), \ 10840 target_size)); \ 10841 }) 10842 10843 #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \ 10844 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ 10845 struct sk_buff, \ 10846 skb, \ 10847 SKB_FIELD) 10848 10849 #define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) \ 10850 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ 10851 struct sock, \ 10852 sk, \ 10853 SK_FIELD) 10854 10855 static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, 10856 const struct bpf_insn *si, 10857 struct bpf_insn *insn_buf, 10858 struct bpf_prog *prog, 10859 u32 *target_size) 10860 { 10861 struct bpf_insn *insn = insn_buf; 10862 10863 switch (si->off) { 10864 case offsetof(struct sk_reuseport_md, data): 10865 SK_REUSEPORT_LOAD_SKB_FIELD(data); 10866 break; 10867 10868 case offsetof(struct sk_reuseport_md, len): 10869 SK_REUSEPORT_LOAD_SKB_FIELD(len); 10870 break; 10871 10872 case offsetof(struct sk_reuseport_md, eth_protocol): 10873 SK_REUSEPORT_LOAD_SKB_FIELD(protocol); 10874 break; 10875 10876 case offsetof(struct sk_reuseport_md, ip_protocol): 10877 SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol); 10878 break; 10879 10880 case offsetof(struct sk_reuseport_md, data_end): 10881 SK_REUSEPORT_LOAD_FIELD(data_end); 10882 break; 10883 10884 case offsetof(struct sk_reuseport_md, hash): 10885 SK_REUSEPORT_LOAD_FIELD(hash); 10886 break; 10887 10888 case offsetof(struct sk_reuseport_md, bind_inany): 10889 SK_REUSEPORT_LOAD_FIELD(bind_inany); 10890 break; 10891 10892 case offsetof(struct sk_reuseport_md, sk): 10893 SK_REUSEPORT_LOAD_FIELD(sk); 10894 break; 10895 10896 case offsetof(struct sk_reuseport_md, migrating_sk): 10897 SK_REUSEPORT_LOAD_FIELD(migrating_sk); 10898 break; 10899 } 10900 10901 return insn - insn_buf; 10902 } 10903 10904 const struct bpf_verifier_ops sk_reuseport_verifier_ops = { 10905 .get_func_proto = sk_reuseport_func_proto, 10906 .is_valid_access = sk_reuseport_is_valid_access, 10907 .convert_ctx_access = sk_reuseport_convert_ctx_access, 10908 }; 10909 10910 const struct bpf_prog_ops sk_reuseport_prog_ops = { 10911 }; 10912 10913 DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled); 10914 EXPORT_SYMBOL(bpf_sk_lookup_enabled); 10915 10916 BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, 10917 struct sock *, sk, u64, flags) 10918 { 10919 if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE | 10920 BPF_SK_LOOKUP_F_NO_REUSEPORT))) 10921 return -EINVAL; 10922 if (unlikely(sk && sk_is_refcounted(sk))) 10923 return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */ 10924 if (unlikely(sk && sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN)) 10925 return -ESOCKTNOSUPPORT; /* only accept TCP socket in LISTEN */ 10926 if (unlikely(sk && sk_is_udp(sk) && sk->sk_state != TCP_CLOSE)) 10927 return -ESOCKTNOSUPPORT; /* only accept UDP socket in CLOSE */ 10928 10929 /* Check if socket is suitable for packet L3/L4 protocol */ 10930 if (sk && sk->sk_protocol != ctx->protocol) 10931 return -EPROTOTYPE; 10932 if (sk && sk->sk_family != ctx->family && 10933 (sk->sk_family == AF_INET || ipv6_only_sock(sk))) 10934 return -EAFNOSUPPORT; 10935 10936 if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE)) 10937 return -EEXIST; 10938 10939 /* Select socket as lookup result */ 10940 ctx->selected_sk = sk; 10941 ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT; 10942 return 0; 10943 } 10944 10945 static const struct bpf_func_proto bpf_sk_lookup_assign_proto = { 10946 .func = bpf_sk_lookup_assign, 10947 .gpl_only = false, 10948 .ret_type = RET_INTEGER, 10949 .arg1_type = ARG_PTR_TO_CTX, 10950 .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL, 10951 .arg3_type = ARG_ANYTHING, 10952 }; 10953 10954 static const struct bpf_func_proto * 10955 sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 10956 { 10957 switch (func_id) { 10958 case BPF_FUNC_perf_event_output: 10959 return &bpf_event_output_data_proto; 10960 case BPF_FUNC_sk_assign: 10961 return &bpf_sk_lookup_assign_proto; 10962 case BPF_FUNC_sk_release: 10963 return &bpf_sk_release_proto; 10964 default: 10965 return bpf_sk_base_func_proto(func_id); 10966 } 10967 } 10968 10969 static bool sk_lookup_is_valid_access(int off, int size, 10970 enum bpf_access_type type, 10971 const struct bpf_prog *prog, 10972 struct bpf_insn_access_aux *info) 10973 { 10974 if (off < 0 || off >= sizeof(struct bpf_sk_lookup)) 10975 return false; 10976 if (off % size != 0) 10977 return false; 10978 if (type != BPF_READ) 10979 return false; 10980 10981 switch (off) { 10982 case offsetof(struct bpf_sk_lookup, sk): 10983 info->reg_type = PTR_TO_SOCKET_OR_NULL; 10984 return size == sizeof(__u64); 10985 10986 case bpf_ctx_range(struct bpf_sk_lookup, family): 10987 case bpf_ctx_range(struct bpf_sk_lookup, protocol): 10988 case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4): 10989 case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): 10990 case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): 10991 case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): 10992 case bpf_ctx_range(struct bpf_sk_lookup, local_port): 10993 case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex): 10994 bpf_ctx_record_field_size(info, sizeof(__u32)); 10995 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); 10996 10997 case bpf_ctx_range(struct bpf_sk_lookup, remote_port): 10998 /* Allow 4-byte access to 2-byte field for backward compatibility */ 10999 if (size == sizeof(__u32)) 11000 return true; 11001 bpf_ctx_record_field_size(info, sizeof(__be16)); 11002 return bpf_ctx_narrow_access_ok(off, size, sizeof(__be16)); 11003 11004 case offsetofend(struct bpf_sk_lookup, remote_port) ... 11005 offsetof(struct bpf_sk_lookup, local_ip4) - 1: 11006 /* Allow access to zero padding for backward compatibility */ 11007 bpf_ctx_record_field_size(info, sizeof(__u16)); 11008 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u16)); 11009 11010 default: 11011 return false; 11012 } 11013 } 11014 11015 static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, 11016 const struct bpf_insn *si, 11017 struct bpf_insn *insn_buf, 11018 struct bpf_prog *prog, 11019 u32 *target_size) 11020 { 11021 struct bpf_insn *insn = insn_buf; 11022 11023 switch (si->off) { 11024 case offsetof(struct bpf_sk_lookup, sk): 11025 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 11026 offsetof(struct bpf_sk_lookup_kern, selected_sk)); 11027 break; 11028 11029 case offsetof(struct bpf_sk_lookup, family): 11030 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11031 bpf_target_off(struct bpf_sk_lookup_kern, 11032 family, 2, target_size)); 11033 break; 11034 11035 case offsetof(struct bpf_sk_lookup, protocol): 11036 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11037 bpf_target_off(struct bpf_sk_lookup_kern, 11038 protocol, 2, target_size)); 11039 break; 11040 11041 case offsetof(struct bpf_sk_lookup, remote_ip4): 11042 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 11043 bpf_target_off(struct bpf_sk_lookup_kern, 11044 v4.saddr, 4, target_size)); 11045 break; 11046 11047 case offsetof(struct bpf_sk_lookup, local_ip4): 11048 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 11049 bpf_target_off(struct bpf_sk_lookup_kern, 11050 v4.daddr, 4, target_size)); 11051 break; 11052 11053 case bpf_ctx_range_till(struct bpf_sk_lookup, 11054 remote_ip6[0], remote_ip6[3]): { 11055 #if IS_ENABLED(CONFIG_IPV6) 11056 int off = si->off; 11057 11058 off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]); 11059 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); 11060 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 11061 offsetof(struct bpf_sk_lookup_kern, v6.saddr)); 11062 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 11063 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); 11064 #else 11065 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 11066 #endif 11067 break; 11068 } 11069 case bpf_ctx_range_till(struct bpf_sk_lookup, 11070 local_ip6[0], local_ip6[3]): { 11071 #if IS_ENABLED(CONFIG_IPV6) 11072 int off = si->off; 11073 11074 off -= offsetof(struct bpf_sk_lookup, local_ip6[0]); 11075 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); 11076 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 11077 offsetof(struct bpf_sk_lookup_kern, v6.daddr)); 11078 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 11079 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); 11080 #else 11081 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 11082 #endif 11083 break; 11084 } 11085 case offsetof(struct bpf_sk_lookup, remote_port): 11086 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11087 bpf_target_off(struct bpf_sk_lookup_kern, 11088 sport, 2, target_size)); 11089 break; 11090 11091 case offsetofend(struct bpf_sk_lookup, remote_port): 11092 *target_size = 2; 11093 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 11094 break; 11095 11096 case offsetof(struct bpf_sk_lookup, local_port): 11097 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11098 bpf_target_off(struct bpf_sk_lookup_kern, 11099 dport, 2, target_size)); 11100 break; 11101 11102 case offsetof(struct bpf_sk_lookup, ingress_ifindex): 11103 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 11104 bpf_target_off(struct bpf_sk_lookup_kern, 11105 ingress_ifindex, 4, target_size)); 11106 break; 11107 } 11108 11109 return insn - insn_buf; 11110 } 11111 11112 const struct bpf_prog_ops sk_lookup_prog_ops = { 11113 .test_run = bpf_prog_test_run_sk_lookup, 11114 }; 11115 11116 const struct bpf_verifier_ops sk_lookup_verifier_ops = { 11117 .get_func_proto = sk_lookup_func_proto, 11118 .is_valid_access = sk_lookup_is_valid_access, 11119 .convert_ctx_access = sk_lookup_convert_ctx_access, 11120 }; 11121 11122 #endif /* CONFIG_INET */ 11123 11124 DEFINE_BPF_DISPATCHER(xdp) 11125 11126 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) 11127 { 11128 bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); 11129 } 11130 11131 BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE) 11132 #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) 11133 BTF_SOCK_TYPE_xxx 11134 #undef BTF_SOCK_TYPE 11135 11136 BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) 11137 { 11138 /* tcp6_sock type is not generated in dwarf and hence btf, 11139 * trigger an explicit type generation here. 11140 */ 11141 BTF_TYPE_EMIT(struct tcp6_sock); 11142 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && 11143 sk->sk_family == AF_INET6) 11144 return (unsigned long)sk; 11145 11146 return (unsigned long)NULL; 11147 } 11148 11149 const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { 11150 .func = bpf_skc_to_tcp6_sock, 11151 .gpl_only = false, 11152 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11153 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11154 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], 11155 }; 11156 11157 BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) 11158 { 11159 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) 11160 return (unsigned long)sk; 11161 11162 return (unsigned long)NULL; 11163 } 11164 11165 const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { 11166 .func = bpf_skc_to_tcp_sock, 11167 .gpl_only = false, 11168 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11169 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11170 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 11171 }; 11172 11173 BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) 11174 { 11175 /* BTF types for tcp_timewait_sock and inet_timewait_sock are not 11176 * generated if CONFIG_INET=n. Trigger an explicit generation here. 11177 */ 11178 BTF_TYPE_EMIT(struct inet_timewait_sock); 11179 BTF_TYPE_EMIT(struct tcp_timewait_sock); 11180 11181 #ifdef CONFIG_INET 11182 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) 11183 return (unsigned long)sk; 11184 #endif 11185 11186 #if IS_BUILTIN(CONFIG_IPV6) 11187 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) 11188 return (unsigned long)sk; 11189 #endif 11190 11191 return (unsigned long)NULL; 11192 } 11193 11194 const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { 11195 .func = bpf_skc_to_tcp_timewait_sock, 11196 .gpl_only = false, 11197 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11198 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11199 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], 11200 }; 11201 11202 BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) 11203 { 11204 #ifdef CONFIG_INET 11205 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) 11206 return (unsigned long)sk; 11207 #endif 11208 11209 #if IS_BUILTIN(CONFIG_IPV6) 11210 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) 11211 return (unsigned long)sk; 11212 #endif 11213 11214 return (unsigned long)NULL; 11215 } 11216 11217 const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { 11218 .func = bpf_skc_to_tcp_request_sock, 11219 .gpl_only = false, 11220 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11221 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11222 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], 11223 }; 11224 11225 BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) 11226 { 11227 /* udp6_sock type is not generated in dwarf and hence btf, 11228 * trigger an explicit type generation here. 11229 */ 11230 BTF_TYPE_EMIT(struct udp6_sock); 11231 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && 11232 sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) 11233 return (unsigned long)sk; 11234 11235 return (unsigned long)NULL; 11236 } 11237 11238 const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { 11239 .func = bpf_skc_to_udp6_sock, 11240 .gpl_only = false, 11241 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11242 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11243 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], 11244 }; 11245 11246 BPF_CALL_1(bpf_skc_to_unix_sock, struct sock *, sk) 11247 { 11248 /* unix_sock type is not generated in dwarf and hence btf, 11249 * trigger an explicit type generation here. 11250 */ 11251 BTF_TYPE_EMIT(struct unix_sock); 11252 if (sk && sk_fullsock(sk) && sk->sk_family == AF_UNIX) 11253 return (unsigned long)sk; 11254 11255 return (unsigned long)NULL; 11256 } 11257 11258 const struct bpf_func_proto bpf_skc_to_unix_sock_proto = { 11259 .func = bpf_skc_to_unix_sock, 11260 .gpl_only = false, 11261 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11262 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11263 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UNIX], 11264 }; 11265 11266 BPF_CALL_1(bpf_sock_from_file, struct file *, file) 11267 { 11268 return (unsigned long)sock_from_file(file); 11269 } 11270 11271 BTF_ID_LIST(bpf_sock_from_file_btf_ids) 11272 BTF_ID(struct, socket) 11273 BTF_ID(struct, file) 11274 11275 const struct bpf_func_proto bpf_sock_from_file_proto = { 11276 .func = bpf_sock_from_file, 11277 .gpl_only = false, 11278 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11279 .ret_btf_id = &bpf_sock_from_file_btf_ids[0], 11280 .arg1_type = ARG_PTR_TO_BTF_ID, 11281 .arg1_btf_id = &bpf_sock_from_file_btf_ids[1], 11282 }; 11283 11284 static const struct bpf_func_proto * 11285 bpf_sk_base_func_proto(enum bpf_func_id func_id) 11286 { 11287 const struct bpf_func_proto *func; 11288 11289 switch (func_id) { 11290 case BPF_FUNC_skc_to_tcp6_sock: 11291 func = &bpf_skc_to_tcp6_sock_proto; 11292 break; 11293 case BPF_FUNC_skc_to_tcp_sock: 11294 func = &bpf_skc_to_tcp_sock_proto; 11295 break; 11296 case BPF_FUNC_skc_to_tcp_timewait_sock: 11297 func = &bpf_skc_to_tcp_timewait_sock_proto; 11298 break; 11299 case BPF_FUNC_skc_to_tcp_request_sock: 11300 func = &bpf_skc_to_tcp_request_sock_proto; 11301 break; 11302 case BPF_FUNC_skc_to_udp6_sock: 11303 func = &bpf_skc_to_udp6_sock_proto; 11304 break; 11305 case BPF_FUNC_skc_to_unix_sock: 11306 func = &bpf_skc_to_unix_sock_proto; 11307 break; 11308 case BPF_FUNC_ktime_get_coarse_ns: 11309 return &bpf_ktime_get_coarse_ns_proto; 11310 default: 11311 return bpf_base_func_proto(func_id); 11312 } 11313 11314 if (!perfmon_capable()) 11315 return NULL; 11316 11317 return func; 11318 } 11319