1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/bpf-cgroup.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/net.h> 26 #include <linux/in6.h> 27 #include <linux/netdevice.h> 28 #include <linux/if_arp.h> 29 #include <linux/ipv6.h> 30 #include <linux/icmpv6.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/skbuff.h> 34 #include <linux/slab.h> 35 #include <linux/uaccess.h> 36 #include <linux/indirect_call_wrapper.h> 37 38 #include <net/addrconf.h> 39 #include <net/ndisc.h> 40 #include <net/protocol.h> 41 #include <net/transp_v6.h> 42 #include <net/ip6_route.h> 43 #include <net/raw.h> 44 #include <net/seg6.h> 45 #include <net/tcp_states.h> 46 #include <net/ip6_checksum.h> 47 #include <net/ip6_tunnel.h> 48 #include <net/xfrm.h> 49 #include <net/inet_hashtables.h> 50 #include <net/inet6_hashtables.h> 51 #include <net/busy_poll.h> 52 #include <net/sock_reuseport.h> 53 54 #include <linux/proc_fs.h> 55 #include <linux/seq_file.h> 56 #include <trace/events/skb.h> 57 #include "udp_impl.h" 58 59 static void udpv6_destruct_sock(struct sock *sk) 60 { 61 udp_destruct_common(sk); 62 inet6_sock_destruct(sk); 63 } 64 65 int udpv6_init_sock(struct sock *sk) 66 { 67 udp_lib_init_sock(sk); 68 sk->sk_destruct = udpv6_destruct_sock; 69 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 70 return 0; 71 } 72 73 static u32 udp6_ehashfn(const struct net *net, 74 const struct in6_addr *laddr, 75 const u16 lport, 76 const struct in6_addr *faddr, 77 const __be16 fport) 78 { 79 static u32 udp6_ehash_secret __read_mostly; 80 static u32 udp_ipv6_hash_secret __read_mostly; 81 82 u32 lhash, fhash; 83 84 net_get_random_once(&udp6_ehash_secret, 85 sizeof(udp6_ehash_secret)); 86 net_get_random_once(&udp_ipv6_hash_secret, 87 sizeof(udp_ipv6_hash_secret)); 88 89 lhash = (__force u32)laddr->s6_addr32[3]; 90 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 91 92 return __inet6_ehashfn(lhash, lport, fhash, fport, 93 udp_ipv6_hash_secret + net_hash_mix(net)); 94 } 95 96 int udp_v6_get_port(struct sock *sk, unsigned short snum) 97 { 98 unsigned int hash2_nulladdr = 99 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 100 unsigned int hash2_partial = 101 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 102 103 /* precompute partial secondary hash */ 104 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 105 return udp_lib_get_port(sk, snum, hash2_nulladdr); 106 } 107 108 void udp_v6_rehash(struct sock *sk) 109 { 110 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 111 &sk->sk_v6_rcv_saddr, 112 inet_sk(sk)->inet_num); 113 114 udp_lib_rehash(sk, new_hash); 115 } 116 117 static int compute_score(struct sock *sk, struct net *net, 118 const struct in6_addr *saddr, __be16 sport, 119 const struct in6_addr *daddr, unsigned short hnum, 120 int dif, int sdif) 121 { 122 int bound_dev_if, score; 123 struct inet_sock *inet; 124 bool dev_match; 125 126 if (!net_eq(sock_net(sk), net) || 127 udp_sk(sk)->udp_port_hash != hnum || 128 sk->sk_family != PF_INET6) 129 return -1; 130 131 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 132 return -1; 133 134 score = 0; 135 inet = inet_sk(sk); 136 137 if (inet->inet_dport) { 138 if (inet->inet_dport != sport) 139 return -1; 140 score++; 141 } 142 143 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 144 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 145 return -1; 146 score++; 147 } 148 149 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 150 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif); 151 if (!dev_match) 152 return -1; 153 if (bound_dev_if) 154 score++; 155 156 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 157 score++; 158 159 return score; 160 } 161 162 static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 163 struct sk_buff *skb, 164 const struct in6_addr *saddr, 165 __be16 sport, 166 const struct in6_addr *daddr, 167 unsigned int hnum) 168 { 169 struct sock *reuse_sk = NULL; 170 u32 hash; 171 172 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 173 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); 174 reuse_sk = reuseport_select_sock(sk, hash, skb, 175 sizeof(struct udphdr)); 176 } 177 return reuse_sk; 178 } 179 180 /* called with rcu_read_lock() */ 181 static struct sock *udp6_lib_lookup2(struct net *net, 182 const struct in6_addr *saddr, __be16 sport, 183 const struct in6_addr *daddr, unsigned int hnum, 184 int dif, int sdif, struct udp_hslot *hslot2, 185 struct sk_buff *skb) 186 { 187 struct sock *sk, *result; 188 int score, badness; 189 190 result = NULL; 191 badness = -1; 192 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 193 score = compute_score(sk, net, saddr, sport, 194 daddr, hnum, dif, sdif); 195 if (score > badness) { 196 result = lookup_reuseport(net, sk, skb, 197 saddr, sport, daddr, hnum); 198 /* Fall back to scoring if group has connections */ 199 if (result && !reuseport_has_conns(sk)) 200 return result; 201 202 result = result ? : sk; 203 badness = score; 204 } 205 } 206 return result; 207 } 208 209 static inline struct sock *udp6_lookup_run_bpf(struct net *net, 210 struct udp_table *udptable, 211 struct sk_buff *skb, 212 const struct in6_addr *saddr, 213 __be16 sport, 214 const struct in6_addr *daddr, 215 u16 hnum, const int dif) 216 { 217 struct sock *sk, *reuse_sk; 218 bool no_reuseport; 219 220 if (udptable != net->ipv4.udp_table) 221 return NULL; /* only UDP is supported */ 222 223 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport, 224 daddr, hnum, dif, &sk); 225 if (no_reuseport || IS_ERR_OR_NULL(sk)) 226 return sk; 227 228 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 229 if (reuse_sk) 230 sk = reuse_sk; 231 return sk; 232 } 233 234 /* rcu_read_lock() must be held */ 235 struct sock *__udp6_lib_lookup(struct net *net, 236 const struct in6_addr *saddr, __be16 sport, 237 const struct in6_addr *daddr, __be16 dport, 238 int dif, int sdif, struct udp_table *udptable, 239 struct sk_buff *skb) 240 { 241 unsigned short hnum = ntohs(dport); 242 unsigned int hash2, slot2; 243 struct udp_hslot *hslot2; 244 struct sock *result, *sk; 245 246 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 247 slot2 = hash2 & udptable->mask; 248 hslot2 = &udptable->hash2[slot2]; 249 250 /* Lookup connected or non-wildcard sockets */ 251 result = udp6_lib_lookup2(net, saddr, sport, 252 daddr, hnum, dif, sdif, 253 hslot2, skb); 254 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 255 goto done; 256 257 /* Lookup redirect from BPF */ 258 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 259 sk = udp6_lookup_run_bpf(net, udptable, skb, 260 saddr, sport, daddr, hnum, dif); 261 if (sk) { 262 result = sk; 263 goto done; 264 } 265 } 266 267 /* Got non-wildcard socket or error on first lookup */ 268 if (result) 269 goto done; 270 271 /* Lookup wildcard sockets */ 272 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 273 slot2 = hash2 & udptable->mask; 274 hslot2 = &udptable->hash2[slot2]; 275 276 result = udp6_lib_lookup2(net, saddr, sport, 277 &in6addr_any, hnum, dif, sdif, 278 hslot2, skb); 279 done: 280 if (IS_ERR(result)) 281 return NULL; 282 return result; 283 } 284 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 285 286 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 287 __be16 sport, __be16 dport, 288 struct udp_table *udptable) 289 { 290 const struct ipv6hdr *iph = ipv6_hdr(skb); 291 292 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 293 &iph->daddr, dport, inet6_iif(skb), 294 inet6_sdif(skb), udptable, skb); 295 } 296 297 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 298 __be16 sport, __be16 dport) 299 { 300 const struct ipv6hdr *iph = ipv6_hdr(skb); 301 struct net *net = dev_net(skb->dev); 302 303 return __udp6_lib_lookup(net, &iph->saddr, sport, 304 &iph->daddr, dport, inet6_iif(skb), 305 inet6_sdif(skb), net->ipv4.udp_table, NULL); 306 } 307 308 /* Must be called under rcu_read_lock(). 309 * Does increment socket refcount. 310 */ 311 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 312 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 313 const struct in6_addr *daddr, __be16 dport, int dif) 314 { 315 struct sock *sk; 316 317 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 318 dif, 0, net->ipv4.udp_table, NULL); 319 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 320 sk = NULL; 321 return sk; 322 } 323 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 324 #endif 325 326 /* do not use the scratch area len for jumbogram: their length execeeds the 327 * scratch area space; note that the IP6CB flags is still in the first 328 * cacheline, so checking for jumbograms is cheap 329 */ 330 static int udp6_skb_len(struct sk_buff *skb) 331 { 332 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 333 } 334 335 /* 336 * This should be easy, if there is something there we 337 * return it, otherwise we block. 338 */ 339 340 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 341 int flags, int *addr_len) 342 { 343 struct ipv6_pinfo *np = inet6_sk(sk); 344 struct inet_sock *inet = inet_sk(sk); 345 struct sk_buff *skb; 346 unsigned int ulen, copied; 347 int off, err, peeking = flags & MSG_PEEK; 348 int is_udplite = IS_UDPLITE(sk); 349 struct udp_mib __percpu *mib; 350 bool checksum_valid = false; 351 int is_udp4; 352 353 if (flags & MSG_ERRQUEUE) 354 return ipv6_recv_error(sk, msg, len, addr_len); 355 356 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 357 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 358 359 try_again: 360 off = sk_peek_offset(sk, flags); 361 skb = __skb_recv_udp(sk, flags, &off, &err); 362 if (!skb) 363 return err; 364 365 ulen = udp6_skb_len(skb); 366 copied = len; 367 if (copied > ulen - off) 368 copied = ulen - off; 369 else if (copied < ulen) 370 msg->msg_flags |= MSG_TRUNC; 371 372 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 373 mib = __UDPX_MIB(sk, is_udp4); 374 375 /* 376 * If checksum is needed at all, try to do it while copying the 377 * data. If the data is truncated, or if we only want a partial 378 * coverage checksum (UDP-Lite), do it before the copy. 379 */ 380 381 if (copied < ulen || peeking || 382 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 383 checksum_valid = udp_skb_csum_unnecessary(skb) || 384 !__udp_lib_checksum_complete(skb); 385 if (!checksum_valid) 386 goto csum_copy_err; 387 } 388 389 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 390 if (udp_skb_is_linear(skb)) 391 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 392 else 393 err = skb_copy_datagram_msg(skb, off, msg, copied); 394 } else { 395 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 396 if (err == -EINVAL) 397 goto csum_copy_err; 398 } 399 if (unlikely(err)) { 400 if (!peeking) { 401 atomic_inc(&sk->sk_drops); 402 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 403 } 404 kfree_skb(skb); 405 return err; 406 } 407 if (!peeking) 408 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 409 410 sock_recv_cmsgs(msg, sk, skb); 411 412 /* Copy the address. */ 413 if (msg->msg_name) { 414 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 415 sin6->sin6_family = AF_INET6; 416 sin6->sin6_port = udp_hdr(skb)->source; 417 sin6->sin6_flowinfo = 0; 418 419 if (is_udp4) { 420 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 421 &sin6->sin6_addr); 422 sin6->sin6_scope_id = 0; 423 } else { 424 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 425 sin6->sin6_scope_id = 426 ipv6_iface_scope_id(&sin6->sin6_addr, 427 inet6_iif(skb)); 428 } 429 *addr_len = sizeof(*sin6); 430 431 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 432 (struct sockaddr *)sin6); 433 } 434 435 if (udp_sk(sk)->gro_enabled) 436 udp_cmsg_recv(msg, sk, skb); 437 438 if (np->rxopt.all) 439 ip6_datagram_recv_common_ctl(sk, msg, skb); 440 441 if (is_udp4) { 442 if (inet->cmsg_flags) 443 ip_cmsg_recv_offset(msg, sk, skb, 444 sizeof(struct udphdr), off); 445 } else { 446 if (np->rxopt.all) 447 ip6_datagram_recv_specific_ctl(sk, msg, skb); 448 } 449 450 err = copied; 451 if (flags & MSG_TRUNC) 452 err = ulen; 453 454 skb_consume_udp(sk, skb, peeking ? -err : err); 455 return err; 456 457 csum_copy_err: 458 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 459 udp_skb_destructor)) { 460 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 461 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 462 } 463 kfree_skb(skb); 464 465 /* starting over for a new packet, but check if we need to yield */ 466 cond_resched(); 467 msg->msg_flags &= ~MSG_TRUNC; 468 goto try_again; 469 } 470 471 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 472 void udpv6_encap_enable(void) 473 { 474 static_branch_inc(&udpv6_encap_needed_key); 475 } 476 EXPORT_SYMBOL(udpv6_encap_enable); 477 478 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 479 * through error handlers in encapsulations looking for a match. 480 */ 481 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 482 struct inet6_skb_parm *opt, 483 u8 type, u8 code, int offset, __be32 info) 484 { 485 int i; 486 487 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 488 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 489 u8 type, u8 code, int offset, __be32 info); 490 const struct ip6_tnl_encap_ops *encap; 491 492 encap = rcu_dereference(ip6tun_encaps[i]); 493 if (!encap) 494 continue; 495 handler = encap->err_handler; 496 if (handler && !handler(skb, opt, type, code, offset, info)) 497 return 0; 498 } 499 500 return -ENOENT; 501 } 502 503 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 504 * reversing source and destination port: this will match tunnels that force the 505 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 506 * lwtunnels might actually break this assumption by being configured with 507 * different destination ports on endpoints, in this case we won't be able to 508 * trace ICMP messages back to them. 509 * 510 * If this doesn't match any socket, probe tunnels with arbitrary destination 511 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 512 * we've sent packets to won't necessarily match the local destination port. 513 * 514 * Then ask the tunnel implementation to match the error against a valid 515 * association. 516 * 517 * Return an error if we can't find a match, the socket if we need further 518 * processing, zero otherwise. 519 */ 520 static struct sock *__udp6_lib_err_encap(struct net *net, 521 const struct ipv6hdr *hdr, int offset, 522 struct udphdr *uh, 523 struct udp_table *udptable, 524 struct sock *sk, 525 struct sk_buff *skb, 526 struct inet6_skb_parm *opt, 527 u8 type, u8 code, __be32 info) 528 { 529 int (*lookup)(struct sock *sk, struct sk_buff *skb); 530 int network_offset, transport_offset; 531 struct udp_sock *up; 532 533 network_offset = skb_network_offset(skb); 534 transport_offset = skb_transport_offset(skb); 535 536 /* Network header needs to point to the outer IPv6 header inside ICMP */ 537 skb_reset_network_header(skb); 538 539 /* Transport header needs to point to the UDP header */ 540 skb_set_transport_header(skb, offset); 541 542 if (sk) { 543 up = udp_sk(sk); 544 545 lookup = READ_ONCE(up->encap_err_lookup); 546 if (lookup && lookup(sk, skb)) 547 sk = NULL; 548 549 goto out; 550 } 551 552 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 553 &hdr->saddr, uh->dest, 554 inet6_iif(skb), 0, udptable, skb); 555 if (sk) { 556 up = udp_sk(sk); 557 558 lookup = READ_ONCE(up->encap_err_lookup); 559 if (!lookup || lookup(sk, skb)) 560 sk = NULL; 561 } 562 563 out: 564 if (!sk) { 565 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 566 offset, info)); 567 } 568 569 skb_set_transport_header(skb, transport_offset); 570 skb_set_network_header(skb, network_offset); 571 572 return sk; 573 } 574 575 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 576 u8 type, u8 code, int offset, __be32 info, 577 struct udp_table *udptable) 578 { 579 struct ipv6_pinfo *np; 580 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 581 const struct in6_addr *saddr = &hdr->saddr; 582 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr; 583 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 584 bool tunnel = false; 585 struct sock *sk; 586 int harderr; 587 int err; 588 struct net *net = dev_net(skb->dev); 589 590 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 591 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 592 593 if (!sk || udp_sk(sk)->encap_type) { 594 /* No socket for error: try tunnels before discarding */ 595 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 596 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 597 udptable, sk, skb, 598 opt, type, code, info); 599 if (!sk) 600 return 0; 601 } else 602 sk = ERR_PTR(-ENOENT); 603 604 if (IS_ERR(sk)) { 605 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 606 ICMP6_MIB_INERRORS); 607 return PTR_ERR(sk); 608 } 609 610 tunnel = true; 611 } 612 613 harderr = icmpv6_err_convert(type, code, &err); 614 np = inet6_sk(sk); 615 616 if (type == ICMPV6_PKT_TOOBIG) { 617 if (!ip6_sk_accept_pmtu(sk)) 618 goto out; 619 ip6_sk_update_pmtu(skb, sk, info); 620 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 621 harderr = 1; 622 } 623 if (type == NDISC_REDIRECT) { 624 if (tunnel) { 625 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 626 sk->sk_mark, sk->sk_uid); 627 } else { 628 ip6_sk_redirect(skb, sk); 629 } 630 goto out; 631 } 632 633 /* Tunnels don't have an application socket: don't pass errors back */ 634 if (tunnel) { 635 if (udp_sk(sk)->encap_err_rcv) 636 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, 637 ntohl(info), (u8 *)(uh+1)); 638 goto out; 639 } 640 641 if (!np->recverr) { 642 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 643 goto out; 644 } else { 645 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 646 } 647 648 sk->sk_err = err; 649 sk_error_report(sk); 650 out: 651 return 0; 652 } 653 654 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 655 { 656 int rc; 657 658 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 659 sock_rps_save_rxhash(sk, skb); 660 sk_mark_napi_id(sk, skb); 661 sk_incoming_cpu_update(sk); 662 } else { 663 sk_mark_napi_id_once(sk, skb); 664 } 665 666 rc = __udp_enqueue_schedule_skb(sk, skb); 667 if (rc < 0) { 668 int is_udplite = IS_UDPLITE(sk); 669 enum skb_drop_reason drop_reason; 670 671 /* Note that an ENOMEM error is charged twice */ 672 if (rc == -ENOMEM) { 673 UDP6_INC_STATS(sock_net(sk), 674 UDP_MIB_RCVBUFERRORS, is_udplite); 675 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 676 } else { 677 UDP6_INC_STATS(sock_net(sk), 678 UDP_MIB_MEMERRORS, is_udplite); 679 drop_reason = SKB_DROP_REASON_PROTO_MEM; 680 } 681 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 682 kfree_skb_reason(skb, drop_reason); 683 return -1; 684 } 685 686 return 0; 687 } 688 689 static __inline__ int udpv6_err(struct sk_buff *skb, 690 struct inet6_skb_parm *opt, u8 type, 691 u8 code, int offset, __be32 info) 692 { 693 return __udp6_lib_err(skb, opt, type, code, offset, info, 694 dev_net(skb->dev)->ipv4.udp_table); 695 } 696 697 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 698 { 699 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 700 struct udp_sock *up = udp_sk(sk); 701 int is_udplite = IS_UDPLITE(sk); 702 703 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 704 drop_reason = SKB_DROP_REASON_XFRM_POLICY; 705 goto drop; 706 } 707 nf_reset_ct(skb); 708 709 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 710 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 711 712 /* 713 * This is an encapsulation socket so pass the skb to 714 * the socket's udp_encap_rcv() hook. Otherwise, just 715 * fall through and pass this up the UDP socket. 716 * up->encap_rcv() returns the following value: 717 * =0 if skb was successfully passed to the encap 718 * handler or was discarded by it. 719 * >0 if skb should be passed on to UDP. 720 * <0 if skb should be resubmitted as proto -N 721 */ 722 723 /* if we're overly short, let UDP handle it */ 724 encap_rcv = READ_ONCE(up->encap_rcv); 725 if (encap_rcv) { 726 int ret; 727 728 /* Verify checksum before giving to encap */ 729 if (udp_lib_checksum_complete(skb)) 730 goto csum_error; 731 732 ret = encap_rcv(sk, skb); 733 if (ret <= 0) { 734 __UDP6_INC_STATS(sock_net(sk), 735 UDP_MIB_INDATAGRAMS, 736 is_udplite); 737 return -ret; 738 } 739 } 740 741 /* FALLTHROUGH -- it's a UDP Packet */ 742 } 743 744 /* 745 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 746 */ 747 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 748 749 if (up->pcrlen == 0) { /* full coverage was set */ 750 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 751 UDP_SKB_CB(skb)->cscov, skb->len); 752 goto drop; 753 } 754 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 755 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 756 UDP_SKB_CB(skb)->cscov, up->pcrlen); 757 goto drop; 758 } 759 } 760 761 prefetch(&sk->sk_rmem_alloc); 762 if (rcu_access_pointer(sk->sk_filter) && 763 udp_lib_checksum_complete(skb)) 764 goto csum_error; 765 766 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 767 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 768 goto drop; 769 } 770 771 udp_csum_pull_header(skb); 772 773 skb_dst_drop(skb); 774 775 return __udpv6_queue_rcv_skb(sk, skb); 776 777 csum_error: 778 drop_reason = SKB_DROP_REASON_UDP_CSUM; 779 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 780 drop: 781 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 782 atomic_inc(&sk->sk_drops); 783 kfree_skb_reason(skb, drop_reason); 784 return -1; 785 } 786 787 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 788 { 789 struct sk_buff *next, *segs; 790 int ret; 791 792 if (likely(!udp_unexpected_gso(sk, skb))) 793 return udpv6_queue_rcv_one_skb(sk, skb); 794 795 __skb_push(skb, -skb_mac_offset(skb)); 796 segs = udp_rcv_segment(sk, skb, false); 797 skb_list_walk_safe(segs, skb, next) { 798 __skb_pull(skb, skb_transport_offset(skb)); 799 800 udp_post_segment_fix_csum(skb); 801 ret = udpv6_queue_rcv_one_skb(sk, skb); 802 if (ret > 0) 803 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 804 true); 805 } 806 return 0; 807 } 808 809 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk, 810 __be16 loc_port, const struct in6_addr *loc_addr, 811 __be16 rmt_port, const struct in6_addr *rmt_addr, 812 int dif, int sdif, unsigned short hnum) 813 { 814 const struct inet_sock *inet = inet_sk(sk); 815 816 if (!net_eq(sock_net(sk), net)) 817 return false; 818 819 if (udp_sk(sk)->udp_port_hash != hnum || 820 sk->sk_family != PF_INET6 || 821 (inet->inet_dport && inet->inet_dport != rmt_port) || 822 (!ipv6_addr_any(&sk->sk_v6_daddr) && 823 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 824 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) || 825 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 826 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 827 return false; 828 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 829 return false; 830 return true; 831 } 832 833 static void udp6_csum_zero_error(struct sk_buff *skb) 834 { 835 /* RFC 2460 section 8.1 says that we SHOULD log 836 * this error. Well, it is reasonable. 837 */ 838 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 839 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 840 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 841 } 842 843 /* 844 * Note: called only from the BH handler context, 845 * so we don't need to lock the hashes. 846 */ 847 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 848 const struct in6_addr *saddr, const struct in6_addr *daddr, 849 struct udp_table *udptable, int proto) 850 { 851 struct sock *sk, *first = NULL; 852 const struct udphdr *uh = udp_hdr(skb); 853 unsigned short hnum = ntohs(uh->dest); 854 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 855 unsigned int offset = offsetof(typeof(*sk), sk_node); 856 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 857 int dif = inet6_iif(skb); 858 int sdif = inet6_sdif(skb); 859 struct hlist_node *node; 860 struct sk_buff *nskb; 861 862 if (use_hash2) { 863 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 864 udptable->mask; 865 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 866 start_lookup: 867 hslot = &udptable->hash2[hash2]; 868 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 869 } 870 871 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 872 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 873 uh->source, saddr, dif, sdif, 874 hnum)) 875 continue; 876 /* If zero checksum and no_check is not on for 877 * the socket then skip it. 878 */ 879 if (!uh->check && !udp_sk(sk)->no_check6_rx) 880 continue; 881 if (!first) { 882 first = sk; 883 continue; 884 } 885 nskb = skb_clone(skb, GFP_ATOMIC); 886 if (unlikely(!nskb)) { 887 atomic_inc(&sk->sk_drops); 888 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 889 IS_UDPLITE(sk)); 890 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 891 IS_UDPLITE(sk)); 892 continue; 893 } 894 895 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 896 consume_skb(nskb); 897 } 898 899 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 900 if (use_hash2 && hash2 != hash2_any) { 901 hash2 = hash2_any; 902 goto start_lookup; 903 } 904 905 if (first) { 906 if (udpv6_queue_rcv_skb(first, skb) > 0) 907 consume_skb(skb); 908 } else { 909 kfree_skb(skb); 910 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 911 proto == IPPROTO_UDPLITE); 912 } 913 return 0; 914 } 915 916 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 917 { 918 if (udp_sk_rx_dst_set(sk, dst)) { 919 const struct rt6_info *rt = (const struct rt6_info *)dst; 920 921 sk->sk_rx_dst_cookie = rt6_get_cookie(rt); 922 } 923 } 924 925 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 926 * return code conversion for ip layer consumption 927 */ 928 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 929 struct udphdr *uh) 930 { 931 int ret; 932 933 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 934 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 935 936 ret = udpv6_queue_rcv_skb(sk, skb); 937 938 /* a return value > 0 means to resubmit the input */ 939 if (ret > 0) 940 return ret; 941 return 0; 942 } 943 944 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 945 int proto) 946 { 947 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 948 const struct in6_addr *saddr, *daddr; 949 struct net *net = dev_net(skb->dev); 950 struct udphdr *uh; 951 struct sock *sk; 952 bool refcounted; 953 u32 ulen = 0; 954 955 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 956 goto discard; 957 958 saddr = &ipv6_hdr(skb)->saddr; 959 daddr = &ipv6_hdr(skb)->daddr; 960 uh = udp_hdr(skb); 961 962 ulen = ntohs(uh->len); 963 if (ulen > skb->len) 964 goto short_packet; 965 966 if (proto == IPPROTO_UDP) { 967 /* UDP validates ulen. */ 968 969 /* Check for jumbo payload */ 970 if (ulen == 0) 971 ulen = skb->len; 972 973 if (ulen < sizeof(*uh)) 974 goto short_packet; 975 976 if (ulen < skb->len) { 977 if (pskb_trim_rcsum(skb, ulen)) 978 goto short_packet; 979 saddr = &ipv6_hdr(skb)->saddr; 980 daddr = &ipv6_hdr(skb)->daddr; 981 uh = udp_hdr(skb); 982 } 983 } 984 985 if (udp6_csum_init(skb, uh, proto)) 986 goto csum_error; 987 988 /* Check if the socket is already available, e.g. due to early demux */ 989 sk = skb_steal_sock(skb, &refcounted); 990 if (sk) { 991 struct dst_entry *dst = skb_dst(skb); 992 int ret; 993 994 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 995 udp6_sk_rx_dst_set(sk, dst); 996 997 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 998 if (refcounted) 999 sock_put(sk); 1000 goto report_csum_error; 1001 } 1002 1003 ret = udp6_unicast_rcv_skb(sk, skb, uh); 1004 if (refcounted) 1005 sock_put(sk); 1006 return ret; 1007 } 1008 1009 /* 1010 * Multicast receive code 1011 */ 1012 if (ipv6_addr_is_multicast(daddr)) 1013 return __udp6_lib_mcast_deliver(net, skb, 1014 saddr, daddr, udptable, proto); 1015 1016 /* Unicast */ 1017 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1018 if (sk) { 1019 if (!uh->check && !udp_sk(sk)->no_check6_rx) 1020 goto report_csum_error; 1021 return udp6_unicast_rcv_skb(sk, skb, uh); 1022 } 1023 1024 reason = SKB_DROP_REASON_NO_SOCKET; 1025 1026 if (!uh->check) 1027 goto report_csum_error; 1028 1029 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1030 goto discard; 1031 nf_reset_ct(skb); 1032 1033 if (udp_lib_checksum_complete(skb)) 1034 goto csum_error; 1035 1036 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1037 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1038 1039 kfree_skb_reason(skb, reason); 1040 return 0; 1041 1042 short_packet: 1043 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1044 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1045 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1046 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1047 saddr, ntohs(uh->source), 1048 ulen, skb->len, 1049 daddr, ntohs(uh->dest)); 1050 goto discard; 1051 1052 report_csum_error: 1053 udp6_csum_zero_error(skb); 1054 csum_error: 1055 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1056 reason = SKB_DROP_REASON_UDP_CSUM; 1057 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1058 discard: 1059 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1060 kfree_skb_reason(skb, reason); 1061 return 0; 1062 } 1063 1064 1065 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1066 __be16 loc_port, const struct in6_addr *loc_addr, 1067 __be16 rmt_port, const struct in6_addr *rmt_addr, 1068 int dif, int sdif) 1069 { 1070 struct udp_table *udptable = net->ipv4.udp_table; 1071 unsigned short hnum = ntohs(loc_port); 1072 unsigned int hash2, slot2; 1073 struct udp_hslot *hslot2; 1074 __portpair ports; 1075 struct sock *sk; 1076 1077 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1078 slot2 = hash2 & udptable->mask; 1079 hslot2 = &udptable->hash2[slot2]; 1080 ports = INET_COMBINED_PORTS(rmt_port, hnum); 1081 1082 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1083 if (sk->sk_state == TCP_ESTABLISHED && 1084 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) 1085 return sk; 1086 /* Only check first socket in chain */ 1087 break; 1088 } 1089 return NULL; 1090 } 1091 1092 void udp_v6_early_demux(struct sk_buff *skb) 1093 { 1094 struct net *net = dev_net(skb->dev); 1095 const struct udphdr *uh; 1096 struct sock *sk; 1097 struct dst_entry *dst; 1098 int dif = skb->dev->ifindex; 1099 int sdif = inet6_sdif(skb); 1100 1101 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1102 sizeof(struct udphdr))) 1103 return; 1104 1105 uh = udp_hdr(skb); 1106 1107 if (skb->pkt_type == PACKET_HOST) 1108 sk = __udp6_lib_demux_lookup(net, uh->dest, 1109 &ipv6_hdr(skb)->daddr, 1110 uh->source, &ipv6_hdr(skb)->saddr, 1111 dif, sdif); 1112 else 1113 return; 1114 1115 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1116 return; 1117 1118 skb->sk = sk; 1119 skb->destructor = sock_efree; 1120 dst = rcu_dereference(sk->sk_rx_dst); 1121 1122 if (dst) 1123 dst = dst_check(dst, sk->sk_rx_dst_cookie); 1124 if (dst) { 1125 /* set noref for now. 1126 * any place which wants to hold dst has to call 1127 * dst_hold_safe() 1128 */ 1129 skb_dst_set_noref(skb, dst); 1130 } 1131 } 1132 1133 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1134 { 1135 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); 1136 } 1137 1138 /* 1139 * Throw away all pending data and cancel the corking. Socket is locked. 1140 */ 1141 static void udp_v6_flush_pending_frames(struct sock *sk) 1142 { 1143 struct udp_sock *up = udp_sk(sk); 1144 1145 if (up->pending == AF_INET) 1146 udp_flush_pending_frames(sk); 1147 else if (up->pending) { 1148 up->len = 0; 1149 up->pending = 0; 1150 ip6_flush_pending_frames(sk); 1151 } 1152 } 1153 1154 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1155 int addr_len) 1156 { 1157 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1158 return -EINVAL; 1159 /* The following checks are replicated from __ip6_datagram_connect() 1160 * and intended to prevent BPF program called below from accessing 1161 * bytes that are out of the bound specified by user in addr_len. 1162 */ 1163 if (uaddr->sa_family == AF_INET) { 1164 if (ipv6_only_sock(sk)) 1165 return -EAFNOSUPPORT; 1166 return udp_pre_connect(sk, uaddr, addr_len); 1167 } 1168 1169 if (addr_len < SIN6_LEN_RFC2133) 1170 return -EINVAL; 1171 1172 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1173 } 1174 1175 /** 1176 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1177 * @sk: socket we are sending on 1178 * @skb: sk_buff containing the filled-in UDP header 1179 * (checksum field must be zeroed out) 1180 * @saddr: source address 1181 * @daddr: destination address 1182 * @len: length of packet 1183 */ 1184 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1185 const struct in6_addr *saddr, 1186 const struct in6_addr *daddr, int len) 1187 { 1188 unsigned int offset; 1189 struct udphdr *uh = udp_hdr(skb); 1190 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1191 __wsum csum = 0; 1192 1193 if (!frags) { 1194 /* Only one fragment on the socket. */ 1195 skb->csum_start = skb_transport_header(skb) - skb->head; 1196 skb->csum_offset = offsetof(struct udphdr, check); 1197 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1198 } else { 1199 /* 1200 * HW-checksum won't work as there are two or more 1201 * fragments on the socket so that all csums of sk_buffs 1202 * should be together 1203 */ 1204 offset = skb_transport_offset(skb); 1205 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1206 csum = skb->csum; 1207 1208 skb->ip_summed = CHECKSUM_NONE; 1209 1210 do { 1211 csum = csum_add(csum, frags->csum); 1212 } while ((frags = frags->next)); 1213 1214 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1215 csum); 1216 if (uh->check == 0) 1217 uh->check = CSUM_MANGLED_0; 1218 } 1219 } 1220 1221 /* 1222 * Sending 1223 */ 1224 1225 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1226 struct inet_cork *cork) 1227 { 1228 struct sock *sk = skb->sk; 1229 struct udphdr *uh; 1230 int err = 0; 1231 int is_udplite = IS_UDPLITE(sk); 1232 __wsum csum = 0; 1233 int offset = skb_transport_offset(skb); 1234 int len = skb->len - offset; 1235 int datalen = len - sizeof(*uh); 1236 1237 /* 1238 * Create a UDP header 1239 */ 1240 uh = udp_hdr(skb); 1241 uh->source = fl6->fl6_sport; 1242 uh->dest = fl6->fl6_dport; 1243 uh->len = htons(len); 1244 uh->check = 0; 1245 1246 if (cork->gso_size) { 1247 const int hlen = skb_network_header_len(skb) + 1248 sizeof(struct udphdr); 1249 1250 if (hlen + cork->gso_size > cork->fragsize) { 1251 kfree_skb(skb); 1252 return -EINVAL; 1253 } 1254 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1255 kfree_skb(skb); 1256 return -EINVAL; 1257 } 1258 if (udp_sk(sk)->no_check6_tx) { 1259 kfree_skb(skb); 1260 return -EINVAL; 1261 } 1262 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1263 dst_xfrm(skb_dst(skb))) { 1264 kfree_skb(skb); 1265 return -EIO; 1266 } 1267 1268 if (datalen > cork->gso_size) { 1269 skb_shinfo(skb)->gso_size = cork->gso_size; 1270 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1271 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1272 cork->gso_size); 1273 } 1274 goto csum_partial; 1275 } 1276 1277 if (is_udplite) 1278 csum = udplite_csum(skb); 1279 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1280 skb->ip_summed = CHECKSUM_NONE; 1281 goto send; 1282 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1283 csum_partial: 1284 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1285 goto send; 1286 } else 1287 csum = udp_csum(skb); 1288 1289 /* add protocol-dependent pseudo-header */ 1290 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1291 len, fl6->flowi6_proto, csum); 1292 if (uh->check == 0) 1293 uh->check = CSUM_MANGLED_0; 1294 1295 send: 1296 err = ip6_send_skb(skb); 1297 if (err) { 1298 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1299 UDP6_INC_STATS(sock_net(sk), 1300 UDP_MIB_SNDBUFERRORS, is_udplite); 1301 err = 0; 1302 } 1303 } else { 1304 UDP6_INC_STATS(sock_net(sk), 1305 UDP_MIB_OUTDATAGRAMS, is_udplite); 1306 } 1307 return err; 1308 } 1309 1310 static int udp_v6_push_pending_frames(struct sock *sk) 1311 { 1312 struct sk_buff *skb; 1313 struct udp_sock *up = udp_sk(sk); 1314 int err = 0; 1315 1316 if (up->pending == AF_INET) 1317 return udp_push_pending_frames(sk); 1318 1319 skb = ip6_finish_skb(sk); 1320 if (!skb) 1321 goto out; 1322 1323 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6, 1324 &inet_sk(sk)->cork.base); 1325 out: 1326 up->len = 0; 1327 up->pending = 0; 1328 return err; 1329 } 1330 1331 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1332 { 1333 struct ipv6_txoptions opt_space; 1334 struct udp_sock *up = udp_sk(sk); 1335 struct inet_sock *inet = inet_sk(sk); 1336 struct ipv6_pinfo *np = inet6_sk(sk); 1337 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1338 struct in6_addr *daddr, *final_p, final; 1339 struct ipv6_txoptions *opt = NULL; 1340 struct ipv6_txoptions *opt_to_free = NULL; 1341 struct ip6_flowlabel *flowlabel = NULL; 1342 struct inet_cork_full cork; 1343 struct flowi6 *fl6 = &cork.fl.u.ip6; 1344 struct dst_entry *dst; 1345 struct ipcm6_cookie ipc6; 1346 int addr_len = msg->msg_namelen; 1347 bool connected = false; 1348 int ulen = len; 1349 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1350 int err; 1351 int is_udplite = IS_UDPLITE(sk); 1352 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1353 1354 ipcm6_init(&ipc6); 1355 ipc6.gso_size = READ_ONCE(up->gso_size); 1356 ipc6.sockc.tsflags = sk->sk_tsflags; 1357 ipc6.sockc.mark = sk->sk_mark; 1358 1359 /* destination address check */ 1360 if (sin6) { 1361 if (addr_len < offsetof(struct sockaddr, sa_data)) 1362 return -EINVAL; 1363 1364 switch (sin6->sin6_family) { 1365 case AF_INET6: 1366 if (addr_len < SIN6_LEN_RFC2133) 1367 return -EINVAL; 1368 daddr = &sin6->sin6_addr; 1369 if (ipv6_addr_any(daddr) && 1370 ipv6_addr_v4mapped(&np->saddr)) 1371 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1372 daddr); 1373 break; 1374 case AF_INET: 1375 goto do_udp_sendmsg; 1376 case AF_UNSPEC: 1377 msg->msg_name = sin6 = NULL; 1378 msg->msg_namelen = addr_len = 0; 1379 daddr = NULL; 1380 break; 1381 default: 1382 return -EINVAL; 1383 } 1384 } else if (!up->pending) { 1385 if (sk->sk_state != TCP_ESTABLISHED) 1386 return -EDESTADDRREQ; 1387 daddr = &sk->sk_v6_daddr; 1388 } else 1389 daddr = NULL; 1390 1391 if (daddr) { 1392 if (ipv6_addr_v4mapped(daddr)) { 1393 struct sockaddr_in sin; 1394 sin.sin_family = AF_INET; 1395 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1396 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1397 msg->msg_name = &sin; 1398 msg->msg_namelen = sizeof(sin); 1399 do_udp_sendmsg: 1400 err = ipv6_only_sock(sk) ? 1401 -ENETUNREACH : udp_sendmsg(sk, msg, len); 1402 msg->msg_name = sin6; 1403 msg->msg_namelen = addr_len; 1404 return err; 1405 } 1406 } 1407 1408 /* Rough check on arithmetic overflow, 1409 better check is made in ip6_append_data(). 1410 */ 1411 if (len > INT_MAX - sizeof(struct udphdr)) 1412 return -EMSGSIZE; 1413 1414 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1415 if (up->pending) { 1416 if (up->pending == AF_INET) 1417 return udp_sendmsg(sk, msg, len); 1418 /* 1419 * There are pending frames. 1420 * The socket lock must be held while it's corked. 1421 */ 1422 lock_sock(sk); 1423 if (likely(up->pending)) { 1424 if (unlikely(up->pending != AF_INET6)) { 1425 release_sock(sk); 1426 return -EAFNOSUPPORT; 1427 } 1428 dst = NULL; 1429 goto do_append_data; 1430 } 1431 release_sock(sk); 1432 } 1433 ulen += sizeof(struct udphdr); 1434 1435 memset(fl6, 0, sizeof(*fl6)); 1436 1437 if (sin6) { 1438 if (sin6->sin6_port == 0) 1439 return -EINVAL; 1440 1441 fl6->fl6_dport = sin6->sin6_port; 1442 daddr = &sin6->sin6_addr; 1443 1444 if (np->sndflow) { 1445 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1446 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) { 1447 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1448 if (IS_ERR(flowlabel)) 1449 return -EINVAL; 1450 } 1451 } 1452 1453 /* 1454 * Otherwise it will be difficult to maintain 1455 * sk->sk_dst_cache. 1456 */ 1457 if (sk->sk_state == TCP_ESTABLISHED && 1458 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1459 daddr = &sk->sk_v6_daddr; 1460 1461 if (addr_len >= sizeof(struct sockaddr_in6) && 1462 sin6->sin6_scope_id && 1463 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1464 fl6->flowi6_oif = sin6->sin6_scope_id; 1465 } else { 1466 if (sk->sk_state != TCP_ESTABLISHED) 1467 return -EDESTADDRREQ; 1468 1469 fl6->fl6_dport = inet->inet_dport; 1470 daddr = &sk->sk_v6_daddr; 1471 fl6->flowlabel = np->flow_label; 1472 connected = true; 1473 } 1474 1475 if (!fl6->flowi6_oif) 1476 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if); 1477 1478 if (!fl6->flowi6_oif) 1479 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1480 1481 fl6->flowi6_uid = sk->sk_uid; 1482 1483 if (msg->msg_controllen) { 1484 opt = &opt_space; 1485 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1486 opt->tot_len = sizeof(*opt); 1487 ipc6.opt = opt; 1488 1489 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1490 if (err > 0) 1491 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6, 1492 &ipc6); 1493 if (err < 0) { 1494 fl6_sock_release(flowlabel); 1495 return err; 1496 } 1497 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1498 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1499 if (IS_ERR(flowlabel)) 1500 return -EINVAL; 1501 } 1502 if (!(opt->opt_nflen|opt->opt_flen)) 1503 opt = NULL; 1504 connected = false; 1505 } 1506 if (!opt) { 1507 opt = txopt_get(np); 1508 opt_to_free = opt; 1509 } 1510 if (flowlabel) 1511 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1512 opt = ipv6_fixup_options(&opt_space, opt); 1513 ipc6.opt = opt; 1514 1515 fl6->flowi6_proto = sk->sk_protocol; 1516 fl6->flowi6_mark = ipc6.sockc.mark; 1517 fl6->daddr = *daddr; 1518 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr)) 1519 fl6->saddr = np->saddr; 1520 fl6->fl6_sport = inet->inet_sport; 1521 1522 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1523 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1524 (struct sockaddr *)sin6, 1525 &fl6->saddr); 1526 if (err) 1527 goto out_no_dst; 1528 if (sin6) { 1529 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1530 /* BPF program rewrote IPv6-only by IPv4-mapped 1531 * IPv6. It's currently unsupported. 1532 */ 1533 err = -ENOTSUPP; 1534 goto out_no_dst; 1535 } 1536 if (sin6->sin6_port == 0) { 1537 /* BPF program set invalid port. Reject it. */ 1538 err = -EINVAL; 1539 goto out_no_dst; 1540 } 1541 fl6->fl6_dport = sin6->sin6_port; 1542 fl6->daddr = sin6->sin6_addr; 1543 } 1544 } 1545 1546 if (ipv6_addr_any(&fl6->daddr)) 1547 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1548 1549 final_p = fl6_update_dst(fl6, opt, &final); 1550 if (final_p) 1551 connected = false; 1552 1553 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) { 1554 fl6->flowi6_oif = np->mcast_oif; 1555 connected = false; 1556 } else if (!fl6->flowi6_oif) 1557 fl6->flowi6_oif = np->ucast_oif; 1558 1559 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1560 1561 if (ipc6.tclass < 0) 1562 ipc6.tclass = np->tclass; 1563 1564 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); 1565 1566 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); 1567 if (IS_ERR(dst)) { 1568 err = PTR_ERR(dst); 1569 dst = NULL; 1570 goto out; 1571 } 1572 1573 if (ipc6.hlimit < 0) 1574 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst); 1575 1576 if (msg->msg_flags&MSG_CONFIRM) 1577 goto do_confirm; 1578 back_from_confirm: 1579 1580 /* Lockless fast path for the non-corking case */ 1581 if (!corkreq) { 1582 struct sk_buff *skb; 1583 1584 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1585 sizeof(struct udphdr), &ipc6, 1586 (struct rt6_info *)dst, 1587 msg->msg_flags, &cork); 1588 err = PTR_ERR(skb); 1589 if (!IS_ERR_OR_NULL(skb)) 1590 err = udp_v6_send_skb(skb, fl6, &cork.base); 1591 /* ip6_make_skb steals dst reference */ 1592 goto out_no_dst; 1593 } 1594 1595 lock_sock(sk); 1596 if (unlikely(up->pending)) { 1597 /* The socket is already corked while preparing it. */ 1598 /* ... which is an evident application bug. --ANK */ 1599 release_sock(sk); 1600 1601 net_dbg_ratelimited("udp cork app bug 2\n"); 1602 err = -EINVAL; 1603 goto out; 1604 } 1605 1606 up->pending = AF_INET6; 1607 1608 do_append_data: 1609 if (ipc6.dontfrag < 0) 1610 ipc6.dontfrag = np->dontfrag; 1611 up->len += ulen; 1612 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1613 &ipc6, fl6, (struct rt6_info *)dst, 1614 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1615 if (err) 1616 udp_v6_flush_pending_frames(sk); 1617 else if (!corkreq) 1618 err = udp_v6_push_pending_frames(sk); 1619 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1620 up->pending = 0; 1621 1622 if (err > 0) 1623 err = np->recverr ? net_xmit_errno(err) : 0; 1624 release_sock(sk); 1625 1626 out: 1627 dst_release(dst); 1628 out_no_dst: 1629 fl6_sock_release(flowlabel); 1630 txopt_put(opt_to_free); 1631 if (!err) 1632 return len; 1633 /* 1634 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1635 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1636 * we don't have a good statistic (IpOutDiscards but it can be too many 1637 * things). We could add another new stat but at least for now that 1638 * seems like overkill. 1639 */ 1640 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1641 UDP6_INC_STATS(sock_net(sk), 1642 UDP_MIB_SNDBUFERRORS, is_udplite); 1643 } 1644 return err; 1645 1646 do_confirm: 1647 if (msg->msg_flags & MSG_PROBE) 1648 dst_confirm_neigh(dst, &fl6->daddr); 1649 if (!(msg->msg_flags&MSG_PROBE) || len) 1650 goto back_from_confirm; 1651 err = 0; 1652 goto out; 1653 } 1654 EXPORT_SYMBOL(udpv6_sendmsg); 1655 1656 static void udpv6_splice_eof(struct socket *sock) 1657 { 1658 struct sock *sk = sock->sk; 1659 struct udp_sock *up = udp_sk(sk); 1660 1661 if (!up->pending || READ_ONCE(up->corkflag)) 1662 return; 1663 1664 lock_sock(sk); 1665 if (up->pending && !READ_ONCE(up->corkflag)) 1666 udp_v6_push_pending_frames(sk); 1667 release_sock(sk); 1668 } 1669 1670 void udpv6_destroy_sock(struct sock *sk) 1671 { 1672 struct udp_sock *up = udp_sk(sk); 1673 lock_sock(sk); 1674 1675 /* protects from races with udp_abort() */ 1676 sock_set_flag(sk, SOCK_DEAD); 1677 udp_v6_flush_pending_frames(sk); 1678 release_sock(sk); 1679 1680 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1681 if (up->encap_type) { 1682 void (*encap_destroy)(struct sock *sk); 1683 encap_destroy = READ_ONCE(up->encap_destroy); 1684 if (encap_destroy) 1685 encap_destroy(sk); 1686 } 1687 if (up->encap_enabled) { 1688 static_branch_dec(&udpv6_encap_needed_key); 1689 udp_encap_disable(); 1690 } 1691 } 1692 } 1693 1694 /* 1695 * Socket option code for UDP 1696 */ 1697 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1698 unsigned int optlen) 1699 { 1700 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 1701 return udp_lib_setsockopt(sk, level, optname, 1702 optval, optlen, 1703 udp_v6_push_pending_frames); 1704 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1705 } 1706 1707 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1708 char __user *optval, int __user *optlen) 1709 { 1710 if (level == SOL_UDP || level == SOL_UDPLITE) 1711 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1712 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1713 } 1714 1715 static const struct inet6_protocol udpv6_protocol = { 1716 .handler = udpv6_rcv, 1717 .err_handler = udpv6_err, 1718 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1719 }; 1720 1721 /* ------------------------------------------------------------------------ */ 1722 #ifdef CONFIG_PROC_FS 1723 int udp6_seq_show(struct seq_file *seq, void *v) 1724 { 1725 if (v == SEQ_START_TOKEN) { 1726 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1727 } else { 1728 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1729 const struct inet_sock *inet = inet_sk((const struct sock *)v); 1730 __u16 srcp = ntohs(inet->inet_sport); 1731 __u16 destp = ntohs(inet->inet_dport); 1732 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1733 udp_rqueue_get(v), bucket); 1734 } 1735 return 0; 1736 } 1737 1738 const struct seq_operations udp6_seq_ops = { 1739 .start = udp_seq_start, 1740 .next = udp_seq_next, 1741 .stop = udp_seq_stop, 1742 .show = udp6_seq_show, 1743 }; 1744 EXPORT_SYMBOL(udp6_seq_ops); 1745 1746 static struct udp_seq_afinfo udp6_seq_afinfo = { 1747 .family = AF_INET6, 1748 .udp_table = NULL, 1749 }; 1750 1751 int __net_init udp6_proc_init(struct net *net) 1752 { 1753 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1754 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1755 return -ENOMEM; 1756 return 0; 1757 } 1758 1759 void udp6_proc_exit(struct net *net) 1760 { 1761 remove_proc_entry("udp6", net->proc_net); 1762 } 1763 #endif /* CONFIG_PROC_FS */ 1764 1765 /* ------------------------------------------------------------------------ */ 1766 1767 struct proto udpv6_prot = { 1768 .name = "UDPv6", 1769 .owner = THIS_MODULE, 1770 .close = udp_lib_close, 1771 .pre_connect = udpv6_pre_connect, 1772 .connect = ip6_datagram_connect, 1773 .disconnect = udp_disconnect, 1774 .ioctl = udp_ioctl, 1775 .init = udpv6_init_sock, 1776 .destroy = udpv6_destroy_sock, 1777 .setsockopt = udpv6_setsockopt, 1778 .getsockopt = udpv6_getsockopt, 1779 .sendmsg = udpv6_sendmsg, 1780 .recvmsg = udpv6_recvmsg, 1781 .splice_eof = udpv6_splice_eof, 1782 .release_cb = ip6_datagram_release_cb, 1783 .hash = udp_lib_hash, 1784 .unhash = udp_lib_unhash, 1785 .rehash = udp_v6_rehash, 1786 .get_port = udp_v6_get_port, 1787 .put_port = udp_lib_unhash, 1788 #ifdef CONFIG_BPF_SYSCALL 1789 .psock_update_sk_prot = udp_bpf_update_proto, 1790 #endif 1791 1792 .memory_allocated = &udp_memory_allocated, 1793 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 1794 1795 .sysctl_mem = sysctl_udp_mem, 1796 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1797 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1798 .obj_size = sizeof(struct udp6_sock), 1799 .h.udp_table = NULL, 1800 .diag_destroy = udp_abort, 1801 }; 1802 1803 static struct inet_protosw udpv6_protosw = { 1804 .type = SOCK_DGRAM, 1805 .protocol = IPPROTO_UDP, 1806 .prot = &udpv6_prot, 1807 .ops = &inet6_dgram_ops, 1808 .flags = INET_PROTOSW_PERMANENT, 1809 }; 1810 1811 int __init udpv6_init(void) 1812 { 1813 int ret; 1814 1815 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1816 if (ret) 1817 goto out; 1818 1819 ret = inet6_register_protosw(&udpv6_protosw); 1820 if (ret) 1821 goto out_udpv6_protocol; 1822 out: 1823 return ret; 1824 1825 out_udpv6_protocol: 1826 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1827 goto out; 1828 } 1829 1830 void udpv6_exit(void) 1831 { 1832 inet6_unregister_protosw(&udpv6_protosw); 1833 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1834 } 1835