1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/bpf-cgroup.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/net.h> 26 #include <linux/in6.h> 27 #include <linux/netdevice.h> 28 #include <linux/if_arp.h> 29 #include <linux/ipv6.h> 30 #include <linux/icmpv6.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/skbuff.h> 34 #include <linux/slab.h> 35 #include <linux/uaccess.h> 36 #include <linux/indirect_call_wrapper.h> 37 38 #include <net/addrconf.h> 39 #include <net/ndisc.h> 40 #include <net/protocol.h> 41 #include <net/transp_v6.h> 42 #include <net/ip6_route.h> 43 #include <net/raw.h> 44 #include <net/seg6.h> 45 #include <net/tcp_states.h> 46 #include <net/ip6_checksum.h> 47 #include <net/ip6_tunnel.h> 48 #include <net/xfrm.h> 49 #include <net/inet_hashtables.h> 50 #include <net/inet6_hashtables.h> 51 #include <net/busy_poll.h> 52 #include <net/sock_reuseport.h> 53 54 #include <linux/proc_fs.h> 55 #include <linux/seq_file.h> 56 #include <trace/events/skb.h> 57 #include "udp_impl.h" 58 59 static u32 udp6_ehashfn(const struct net *net, 60 const struct in6_addr *laddr, 61 const u16 lport, 62 const struct in6_addr *faddr, 63 const __be16 fport) 64 { 65 static u32 udp6_ehash_secret __read_mostly; 66 static u32 udp_ipv6_hash_secret __read_mostly; 67 68 u32 lhash, fhash; 69 70 net_get_random_once(&udp6_ehash_secret, 71 sizeof(udp6_ehash_secret)); 72 net_get_random_once(&udp_ipv6_hash_secret, 73 sizeof(udp_ipv6_hash_secret)); 74 75 lhash = (__force u32)laddr->s6_addr32[3]; 76 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 77 78 return __inet6_ehashfn(lhash, lport, fhash, fport, 79 udp_ipv6_hash_secret + net_hash_mix(net)); 80 } 81 82 int udp_v6_get_port(struct sock *sk, unsigned short snum) 83 { 84 unsigned int hash2_nulladdr = 85 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 86 unsigned int hash2_partial = 87 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 88 89 /* precompute partial secondary hash */ 90 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 91 return udp_lib_get_port(sk, snum, hash2_nulladdr); 92 } 93 94 void udp_v6_rehash(struct sock *sk) 95 { 96 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 97 &sk->sk_v6_rcv_saddr, 98 inet_sk(sk)->inet_num); 99 100 udp_lib_rehash(sk, new_hash); 101 } 102 103 static int compute_score(struct sock *sk, struct net *net, 104 const struct in6_addr *saddr, __be16 sport, 105 const struct in6_addr *daddr, unsigned short hnum, 106 int dif, int sdif) 107 { 108 int score; 109 struct inet_sock *inet; 110 bool dev_match; 111 112 if (!net_eq(sock_net(sk), net) || 113 udp_sk(sk)->udp_port_hash != hnum || 114 sk->sk_family != PF_INET6) 115 return -1; 116 117 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 118 return -1; 119 120 score = 0; 121 inet = inet_sk(sk); 122 123 if (inet->inet_dport) { 124 if (inet->inet_dport != sport) 125 return -1; 126 score++; 127 } 128 129 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 130 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 131 return -1; 132 score++; 133 } 134 135 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif); 136 if (!dev_match) 137 return -1; 138 if (sk->sk_bound_dev_if) 139 score++; 140 141 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 142 score++; 143 144 return score; 145 } 146 147 static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 148 struct sk_buff *skb, 149 const struct in6_addr *saddr, 150 __be16 sport, 151 const struct in6_addr *daddr, 152 unsigned int hnum) 153 { 154 struct sock *reuse_sk = NULL; 155 u32 hash; 156 157 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 158 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); 159 reuse_sk = reuseport_select_sock(sk, hash, skb, 160 sizeof(struct udphdr)); 161 } 162 return reuse_sk; 163 } 164 165 /* called with rcu_read_lock() */ 166 static struct sock *udp6_lib_lookup2(struct net *net, 167 const struct in6_addr *saddr, __be16 sport, 168 const struct in6_addr *daddr, unsigned int hnum, 169 int dif, int sdif, struct udp_hslot *hslot2, 170 struct sk_buff *skb) 171 { 172 struct sock *sk, *result; 173 int score, badness; 174 175 result = NULL; 176 badness = -1; 177 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 178 score = compute_score(sk, net, saddr, sport, 179 daddr, hnum, dif, sdif); 180 if (score > badness) { 181 result = lookup_reuseport(net, sk, skb, 182 saddr, sport, daddr, hnum); 183 /* Fall back to scoring if group has connections */ 184 if (result && !reuseport_has_conns(sk, false)) 185 return result; 186 187 result = result ? : sk; 188 badness = score; 189 } 190 } 191 return result; 192 } 193 194 static inline struct sock *udp6_lookup_run_bpf(struct net *net, 195 struct udp_table *udptable, 196 struct sk_buff *skb, 197 const struct in6_addr *saddr, 198 __be16 sport, 199 const struct in6_addr *daddr, 200 u16 hnum, const int dif) 201 { 202 struct sock *sk, *reuse_sk; 203 bool no_reuseport; 204 205 if (udptable != &udp_table) 206 return NULL; /* only UDP is supported */ 207 208 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport, 209 daddr, hnum, dif, &sk); 210 if (no_reuseport || IS_ERR_OR_NULL(sk)) 211 return sk; 212 213 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 214 if (reuse_sk) 215 sk = reuse_sk; 216 return sk; 217 } 218 219 /* rcu_read_lock() must be held */ 220 struct sock *__udp6_lib_lookup(struct net *net, 221 const struct in6_addr *saddr, __be16 sport, 222 const struct in6_addr *daddr, __be16 dport, 223 int dif, int sdif, struct udp_table *udptable, 224 struct sk_buff *skb) 225 { 226 unsigned short hnum = ntohs(dport); 227 unsigned int hash2, slot2; 228 struct udp_hslot *hslot2; 229 struct sock *result, *sk; 230 231 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 232 slot2 = hash2 & udptable->mask; 233 hslot2 = &udptable->hash2[slot2]; 234 235 /* Lookup connected or non-wildcard sockets */ 236 result = udp6_lib_lookup2(net, saddr, sport, 237 daddr, hnum, dif, sdif, 238 hslot2, skb); 239 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 240 goto done; 241 242 /* Lookup redirect from BPF */ 243 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 244 sk = udp6_lookup_run_bpf(net, udptable, skb, 245 saddr, sport, daddr, hnum, dif); 246 if (sk) { 247 result = sk; 248 goto done; 249 } 250 } 251 252 /* Got non-wildcard socket or error on first lookup */ 253 if (result) 254 goto done; 255 256 /* Lookup wildcard sockets */ 257 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 258 slot2 = hash2 & udptable->mask; 259 hslot2 = &udptable->hash2[slot2]; 260 261 result = udp6_lib_lookup2(net, saddr, sport, 262 &in6addr_any, hnum, dif, sdif, 263 hslot2, skb); 264 done: 265 if (IS_ERR(result)) 266 return NULL; 267 return result; 268 } 269 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 270 271 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 272 __be16 sport, __be16 dport, 273 struct udp_table *udptable) 274 { 275 const struct ipv6hdr *iph = ipv6_hdr(skb); 276 277 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 278 &iph->daddr, dport, inet6_iif(skb), 279 inet6_sdif(skb), udptable, skb); 280 } 281 282 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 283 __be16 sport, __be16 dport) 284 { 285 const struct ipv6hdr *iph = ipv6_hdr(skb); 286 287 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 288 &iph->daddr, dport, inet6_iif(skb), 289 inet6_sdif(skb), &udp_table, NULL); 290 } 291 292 /* Must be called under rcu_read_lock(). 293 * Does increment socket refcount. 294 */ 295 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 296 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 297 const struct in6_addr *daddr, __be16 dport, int dif) 298 { 299 struct sock *sk; 300 301 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 302 dif, 0, &udp_table, NULL); 303 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 304 sk = NULL; 305 return sk; 306 } 307 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 308 #endif 309 310 /* do not use the scratch area len for jumbogram: their length execeeds the 311 * scratch area space; note that the IP6CB flags is still in the first 312 * cacheline, so checking for jumbograms is cheap 313 */ 314 static int udp6_skb_len(struct sk_buff *skb) 315 { 316 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 317 } 318 319 /* 320 * This should be easy, if there is something there we 321 * return it, otherwise we block. 322 */ 323 324 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 325 int noblock, int flags, int *addr_len) 326 { 327 struct ipv6_pinfo *np = inet6_sk(sk); 328 struct inet_sock *inet = inet_sk(sk); 329 struct sk_buff *skb; 330 unsigned int ulen, copied; 331 int off, err, peeking = flags & MSG_PEEK; 332 int is_udplite = IS_UDPLITE(sk); 333 struct udp_mib __percpu *mib; 334 bool checksum_valid = false; 335 int is_udp4; 336 337 if (flags & MSG_ERRQUEUE) 338 return ipv6_recv_error(sk, msg, len, addr_len); 339 340 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 341 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 342 343 try_again: 344 off = sk_peek_offset(sk, flags); 345 skb = __skb_recv_udp(sk, flags, noblock, &off, &err); 346 if (!skb) 347 return err; 348 349 ulen = udp6_skb_len(skb); 350 copied = len; 351 if (copied > ulen - off) 352 copied = ulen - off; 353 else if (copied < ulen) 354 msg->msg_flags |= MSG_TRUNC; 355 356 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 357 mib = __UDPX_MIB(sk, is_udp4); 358 359 /* 360 * If checksum is needed at all, try to do it while copying the 361 * data. If the data is truncated, or if we only want a partial 362 * coverage checksum (UDP-Lite), do it before the copy. 363 */ 364 365 if (copied < ulen || peeking || 366 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 367 checksum_valid = udp_skb_csum_unnecessary(skb) || 368 !__udp_lib_checksum_complete(skb); 369 if (!checksum_valid) 370 goto csum_copy_err; 371 } 372 373 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 374 if (udp_skb_is_linear(skb)) 375 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 376 else 377 err = skb_copy_datagram_msg(skb, off, msg, copied); 378 } else { 379 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 380 if (err == -EINVAL) 381 goto csum_copy_err; 382 } 383 if (unlikely(err)) { 384 if (!peeking) { 385 atomic_inc(&sk->sk_drops); 386 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 387 } 388 kfree_skb(skb); 389 return err; 390 } 391 if (!peeking) 392 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 393 394 sock_recv_ts_and_drops(msg, sk, skb); 395 396 /* Copy the address. */ 397 if (msg->msg_name) { 398 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 399 sin6->sin6_family = AF_INET6; 400 sin6->sin6_port = udp_hdr(skb)->source; 401 sin6->sin6_flowinfo = 0; 402 403 if (is_udp4) { 404 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 405 &sin6->sin6_addr); 406 sin6->sin6_scope_id = 0; 407 } else { 408 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 409 sin6->sin6_scope_id = 410 ipv6_iface_scope_id(&sin6->sin6_addr, 411 inet6_iif(skb)); 412 } 413 *addr_len = sizeof(*sin6); 414 415 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 416 (struct sockaddr *)sin6); 417 } 418 419 if (udp_sk(sk)->gro_enabled) 420 udp_cmsg_recv(msg, sk, skb); 421 422 if (np->rxopt.all) 423 ip6_datagram_recv_common_ctl(sk, msg, skb); 424 425 if (is_udp4) { 426 if (inet->cmsg_flags) 427 ip_cmsg_recv_offset(msg, sk, skb, 428 sizeof(struct udphdr), off); 429 } else { 430 if (np->rxopt.all) 431 ip6_datagram_recv_specific_ctl(sk, msg, skb); 432 } 433 434 err = copied; 435 if (flags & MSG_TRUNC) 436 err = ulen; 437 438 skb_consume_udp(sk, skb, peeking ? -err : err); 439 return err; 440 441 csum_copy_err: 442 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 443 udp_skb_destructor)) { 444 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 445 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 446 } 447 kfree_skb(skb); 448 449 /* starting over for a new packet, but check if we need to yield */ 450 cond_resched(); 451 msg->msg_flags &= ~MSG_TRUNC; 452 goto try_again; 453 } 454 455 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 456 void udpv6_encap_enable(void) 457 { 458 static_branch_inc(&udpv6_encap_needed_key); 459 } 460 EXPORT_SYMBOL(udpv6_encap_enable); 461 462 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 463 * through error handlers in encapsulations looking for a match. 464 */ 465 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 466 struct inet6_skb_parm *opt, 467 u8 type, u8 code, int offset, __be32 info) 468 { 469 int i; 470 471 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 472 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 473 u8 type, u8 code, int offset, __be32 info); 474 const struct ip6_tnl_encap_ops *encap; 475 476 encap = rcu_dereference(ip6tun_encaps[i]); 477 if (!encap) 478 continue; 479 handler = encap->err_handler; 480 if (handler && !handler(skb, opt, type, code, offset, info)) 481 return 0; 482 } 483 484 return -ENOENT; 485 } 486 487 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 488 * reversing source and destination port: this will match tunnels that force the 489 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 490 * lwtunnels might actually break this assumption by being configured with 491 * different destination ports on endpoints, in this case we won't be able to 492 * trace ICMP messages back to them. 493 * 494 * If this doesn't match any socket, probe tunnels with arbitrary destination 495 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 496 * we've sent packets to won't necessarily match the local destination port. 497 * 498 * Then ask the tunnel implementation to match the error against a valid 499 * association. 500 * 501 * Return an error if we can't find a match, the socket if we need further 502 * processing, zero otherwise. 503 */ 504 static struct sock *__udp6_lib_err_encap(struct net *net, 505 const struct ipv6hdr *hdr, int offset, 506 struct udphdr *uh, 507 struct udp_table *udptable, 508 struct sock *sk, 509 struct sk_buff *skb, 510 struct inet6_skb_parm *opt, 511 u8 type, u8 code, __be32 info) 512 { 513 int (*lookup)(struct sock *sk, struct sk_buff *skb); 514 int network_offset, transport_offset; 515 struct udp_sock *up; 516 517 network_offset = skb_network_offset(skb); 518 transport_offset = skb_transport_offset(skb); 519 520 /* Network header needs to point to the outer IPv6 header inside ICMP */ 521 skb_reset_network_header(skb); 522 523 /* Transport header needs to point to the UDP header */ 524 skb_set_transport_header(skb, offset); 525 526 if (sk) { 527 up = udp_sk(sk); 528 529 lookup = READ_ONCE(up->encap_err_lookup); 530 if (lookup && lookup(sk, skb)) 531 sk = NULL; 532 533 goto out; 534 } 535 536 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 537 &hdr->saddr, uh->dest, 538 inet6_iif(skb), 0, udptable, skb); 539 if (sk) { 540 up = udp_sk(sk); 541 542 lookup = READ_ONCE(up->encap_err_lookup); 543 if (!lookup || lookup(sk, skb)) 544 sk = NULL; 545 } 546 547 out: 548 if (!sk) { 549 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 550 offset, info)); 551 } 552 553 skb_set_transport_header(skb, transport_offset); 554 skb_set_network_header(skb, network_offset); 555 556 return sk; 557 } 558 559 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 560 u8 type, u8 code, int offset, __be32 info, 561 struct udp_table *udptable) 562 { 563 struct ipv6_pinfo *np; 564 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 565 const struct in6_addr *saddr = &hdr->saddr; 566 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr; 567 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 568 bool tunnel = false; 569 struct sock *sk; 570 int harderr; 571 int err; 572 struct net *net = dev_net(skb->dev); 573 574 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 575 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 576 577 if (!sk || udp_sk(sk)->encap_type) { 578 /* No socket for error: try tunnels before discarding */ 579 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 580 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 581 udptable, sk, skb, 582 opt, type, code, info); 583 if (!sk) 584 return 0; 585 } else 586 sk = ERR_PTR(-ENOENT); 587 588 if (IS_ERR(sk)) { 589 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 590 ICMP6_MIB_INERRORS); 591 return PTR_ERR(sk); 592 } 593 594 tunnel = true; 595 } 596 597 harderr = icmpv6_err_convert(type, code, &err); 598 np = inet6_sk(sk); 599 600 if (type == ICMPV6_PKT_TOOBIG) { 601 if (!ip6_sk_accept_pmtu(sk)) 602 goto out; 603 ip6_sk_update_pmtu(skb, sk, info); 604 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 605 harderr = 1; 606 } 607 if (type == NDISC_REDIRECT) { 608 if (tunnel) { 609 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 610 sk->sk_mark, sk->sk_uid); 611 } else { 612 ip6_sk_redirect(skb, sk); 613 } 614 goto out; 615 } 616 617 /* Tunnels don't have an application socket: don't pass errors back */ 618 if (tunnel) 619 goto out; 620 621 if (!np->recverr) { 622 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 623 goto out; 624 } else { 625 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 626 } 627 628 sk->sk_err = err; 629 sk_error_report(sk); 630 out: 631 return 0; 632 } 633 634 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 635 { 636 int rc; 637 638 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 639 sock_rps_save_rxhash(sk, skb); 640 sk_mark_napi_id(sk, skb); 641 sk_incoming_cpu_update(sk); 642 } else { 643 sk_mark_napi_id_once(sk, skb); 644 } 645 646 rc = __udp_enqueue_schedule_skb(sk, skb); 647 if (rc < 0) { 648 int is_udplite = IS_UDPLITE(sk); 649 650 /* Note that an ENOMEM error is charged twice */ 651 if (rc == -ENOMEM) 652 UDP6_INC_STATS(sock_net(sk), 653 UDP_MIB_RCVBUFERRORS, is_udplite); 654 else 655 UDP6_INC_STATS(sock_net(sk), 656 UDP_MIB_MEMERRORS, is_udplite); 657 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 658 kfree_skb(skb); 659 return -1; 660 } 661 662 return 0; 663 } 664 665 static __inline__ int udpv6_err(struct sk_buff *skb, 666 struct inet6_skb_parm *opt, u8 type, 667 u8 code, int offset, __be32 info) 668 { 669 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 670 } 671 672 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 673 { 674 struct udp_sock *up = udp_sk(sk); 675 int is_udplite = IS_UDPLITE(sk); 676 677 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 678 goto drop; 679 680 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 681 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 682 683 /* 684 * This is an encapsulation socket so pass the skb to 685 * the socket's udp_encap_rcv() hook. Otherwise, just 686 * fall through and pass this up the UDP socket. 687 * up->encap_rcv() returns the following value: 688 * =0 if skb was successfully passed to the encap 689 * handler or was discarded by it. 690 * >0 if skb should be passed on to UDP. 691 * <0 if skb should be resubmitted as proto -N 692 */ 693 694 /* if we're overly short, let UDP handle it */ 695 encap_rcv = READ_ONCE(up->encap_rcv); 696 if (encap_rcv) { 697 int ret; 698 699 /* Verify checksum before giving to encap */ 700 if (udp_lib_checksum_complete(skb)) 701 goto csum_error; 702 703 ret = encap_rcv(sk, skb); 704 if (ret <= 0) { 705 __UDP6_INC_STATS(sock_net(sk), 706 UDP_MIB_INDATAGRAMS, 707 is_udplite); 708 return -ret; 709 } 710 } 711 712 /* FALLTHROUGH -- it's a UDP Packet */ 713 } 714 715 /* 716 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 717 */ 718 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 719 720 if (up->pcrlen == 0) { /* full coverage was set */ 721 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 722 UDP_SKB_CB(skb)->cscov, skb->len); 723 goto drop; 724 } 725 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 726 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 727 UDP_SKB_CB(skb)->cscov, up->pcrlen); 728 goto drop; 729 } 730 } 731 732 prefetch(&sk->sk_rmem_alloc); 733 if (rcu_access_pointer(sk->sk_filter) && 734 udp_lib_checksum_complete(skb)) 735 goto csum_error; 736 737 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 738 goto drop; 739 740 udp_csum_pull_header(skb); 741 742 skb_dst_drop(skb); 743 744 return __udpv6_queue_rcv_skb(sk, skb); 745 746 csum_error: 747 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 748 drop: 749 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 750 atomic_inc(&sk->sk_drops); 751 kfree_skb(skb); 752 return -1; 753 } 754 755 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 756 { 757 struct sk_buff *next, *segs; 758 int ret; 759 760 if (likely(!udp_unexpected_gso(sk, skb))) 761 return udpv6_queue_rcv_one_skb(sk, skb); 762 763 __skb_push(skb, -skb_mac_offset(skb)); 764 segs = udp_rcv_segment(sk, skb, false); 765 skb_list_walk_safe(segs, skb, next) { 766 __skb_pull(skb, skb_transport_offset(skb)); 767 768 udp_post_segment_fix_csum(skb); 769 ret = udpv6_queue_rcv_one_skb(sk, skb); 770 if (ret > 0) 771 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 772 true); 773 } 774 return 0; 775 } 776 777 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, 778 __be16 loc_port, const struct in6_addr *loc_addr, 779 __be16 rmt_port, const struct in6_addr *rmt_addr, 780 int dif, int sdif, unsigned short hnum) 781 { 782 struct inet_sock *inet = inet_sk(sk); 783 784 if (!net_eq(sock_net(sk), net)) 785 return false; 786 787 if (udp_sk(sk)->udp_port_hash != hnum || 788 sk->sk_family != PF_INET6 || 789 (inet->inet_dport && inet->inet_dport != rmt_port) || 790 (!ipv6_addr_any(&sk->sk_v6_daddr) && 791 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 792 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) || 793 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 794 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 795 return false; 796 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 797 return false; 798 return true; 799 } 800 801 static void udp6_csum_zero_error(struct sk_buff *skb) 802 { 803 /* RFC 2460 section 8.1 says that we SHOULD log 804 * this error. Well, it is reasonable. 805 */ 806 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 807 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 808 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 809 } 810 811 /* 812 * Note: called only from the BH handler context, 813 * so we don't need to lock the hashes. 814 */ 815 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 816 const struct in6_addr *saddr, const struct in6_addr *daddr, 817 struct udp_table *udptable, int proto) 818 { 819 struct sock *sk, *first = NULL; 820 const struct udphdr *uh = udp_hdr(skb); 821 unsigned short hnum = ntohs(uh->dest); 822 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 823 unsigned int offset = offsetof(typeof(*sk), sk_node); 824 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 825 int dif = inet6_iif(skb); 826 int sdif = inet6_sdif(skb); 827 struct hlist_node *node; 828 struct sk_buff *nskb; 829 830 if (use_hash2) { 831 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 832 udptable->mask; 833 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 834 start_lookup: 835 hslot = &udptable->hash2[hash2]; 836 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 837 } 838 839 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 840 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 841 uh->source, saddr, dif, sdif, 842 hnum)) 843 continue; 844 /* If zero checksum and no_check is not on for 845 * the socket then skip it. 846 */ 847 if (!uh->check && !udp_sk(sk)->no_check6_rx) 848 continue; 849 if (!first) { 850 first = sk; 851 continue; 852 } 853 nskb = skb_clone(skb, GFP_ATOMIC); 854 if (unlikely(!nskb)) { 855 atomic_inc(&sk->sk_drops); 856 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 857 IS_UDPLITE(sk)); 858 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 859 IS_UDPLITE(sk)); 860 continue; 861 } 862 863 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 864 consume_skb(nskb); 865 } 866 867 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 868 if (use_hash2 && hash2 != hash2_any) { 869 hash2 = hash2_any; 870 goto start_lookup; 871 } 872 873 if (first) { 874 if (udpv6_queue_rcv_skb(first, skb) > 0) 875 consume_skb(skb); 876 } else { 877 kfree_skb(skb); 878 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 879 proto == IPPROTO_UDPLITE); 880 } 881 return 0; 882 } 883 884 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 885 { 886 if (udp_sk_rx_dst_set(sk, dst)) { 887 const struct rt6_info *rt = (const struct rt6_info *)dst; 888 889 sk->sk_rx_dst_cookie = rt6_get_cookie(rt); 890 } 891 } 892 893 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 894 * return code conversion for ip layer consumption 895 */ 896 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 897 struct udphdr *uh) 898 { 899 int ret; 900 901 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 902 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 903 904 ret = udpv6_queue_rcv_skb(sk, skb); 905 906 /* a return value > 0 means to resubmit the input */ 907 if (ret > 0) 908 return ret; 909 return 0; 910 } 911 912 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 913 int proto) 914 { 915 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 916 const struct in6_addr *saddr, *daddr; 917 struct net *net = dev_net(skb->dev); 918 struct udphdr *uh; 919 struct sock *sk; 920 bool refcounted; 921 u32 ulen = 0; 922 923 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 924 goto discard; 925 926 saddr = &ipv6_hdr(skb)->saddr; 927 daddr = &ipv6_hdr(skb)->daddr; 928 uh = udp_hdr(skb); 929 930 ulen = ntohs(uh->len); 931 if (ulen > skb->len) 932 goto short_packet; 933 934 if (proto == IPPROTO_UDP) { 935 /* UDP validates ulen. */ 936 937 /* Check for jumbo payload */ 938 if (ulen == 0) 939 ulen = skb->len; 940 941 if (ulen < sizeof(*uh)) 942 goto short_packet; 943 944 if (ulen < skb->len) { 945 if (pskb_trim_rcsum(skb, ulen)) 946 goto short_packet; 947 saddr = &ipv6_hdr(skb)->saddr; 948 daddr = &ipv6_hdr(skb)->daddr; 949 uh = udp_hdr(skb); 950 } 951 } 952 953 if (udp6_csum_init(skb, uh, proto)) 954 goto csum_error; 955 956 /* Check if the socket is already available, e.g. due to early demux */ 957 sk = skb_steal_sock(skb, &refcounted); 958 if (sk) { 959 struct dst_entry *dst = skb_dst(skb); 960 int ret; 961 962 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 963 udp6_sk_rx_dst_set(sk, dst); 964 965 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 966 if (refcounted) 967 sock_put(sk); 968 goto report_csum_error; 969 } 970 971 ret = udp6_unicast_rcv_skb(sk, skb, uh); 972 if (refcounted) 973 sock_put(sk); 974 return ret; 975 } 976 977 /* 978 * Multicast receive code 979 */ 980 if (ipv6_addr_is_multicast(daddr)) 981 return __udp6_lib_mcast_deliver(net, skb, 982 saddr, daddr, udptable, proto); 983 984 /* Unicast */ 985 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 986 if (sk) { 987 if (!uh->check && !udp_sk(sk)->no_check6_rx) 988 goto report_csum_error; 989 return udp6_unicast_rcv_skb(sk, skb, uh); 990 } 991 992 reason = SKB_DROP_REASON_NO_SOCKET; 993 994 if (!uh->check) 995 goto report_csum_error; 996 997 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 998 goto discard; 999 1000 if (udp_lib_checksum_complete(skb)) 1001 goto csum_error; 1002 1003 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1004 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1005 1006 kfree_skb_reason(skb, reason); 1007 return 0; 1008 1009 short_packet: 1010 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1011 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1012 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1013 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1014 saddr, ntohs(uh->source), 1015 ulen, skb->len, 1016 daddr, ntohs(uh->dest)); 1017 goto discard; 1018 1019 report_csum_error: 1020 udp6_csum_zero_error(skb); 1021 csum_error: 1022 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1023 reason = SKB_DROP_REASON_UDP_CSUM; 1024 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1025 discard: 1026 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1027 kfree_skb_reason(skb, reason); 1028 return 0; 1029 } 1030 1031 1032 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1033 __be16 loc_port, const struct in6_addr *loc_addr, 1034 __be16 rmt_port, const struct in6_addr *rmt_addr, 1035 int dif, int sdif) 1036 { 1037 unsigned short hnum = ntohs(loc_port); 1038 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1039 unsigned int slot2 = hash2 & udp_table.mask; 1040 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 1041 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 1042 struct sock *sk; 1043 1044 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1045 if (sk->sk_state == TCP_ESTABLISHED && 1046 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif)) 1047 return sk; 1048 /* Only check first socket in chain */ 1049 break; 1050 } 1051 return NULL; 1052 } 1053 1054 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) 1055 { 1056 struct net *net = dev_net(skb->dev); 1057 const struct udphdr *uh; 1058 struct sock *sk; 1059 struct dst_entry *dst; 1060 int dif = skb->dev->ifindex; 1061 int sdif = inet6_sdif(skb); 1062 1063 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1064 sizeof(struct udphdr))) 1065 return; 1066 1067 uh = udp_hdr(skb); 1068 1069 if (skb->pkt_type == PACKET_HOST) 1070 sk = __udp6_lib_demux_lookup(net, uh->dest, 1071 &ipv6_hdr(skb)->daddr, 1072 uh->source, &ipv6_hdr(skb)->saddr, 1073 dif, sdif); 1074 else 1075 return; 1076 1077 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1078 return; 1079 1080 skb->sk = sk; 1081 skb->destructor = sock_efree; 1082 dst = rcu_dereference(sk->sk_rx_dst); 1083 1084 if (dst) 1085 dst = dst_check(dst, sk->sk_rx_dst_cookie); 1086 if (dst) { 1087 /* set noref for now. 1088 * any place which wants to hold dst has to call 1089 * dst_hold_safe() 1090 */ 1091 skb_dst_set_noref(skb, dst); 1092 } 1093 } 1094 1095 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1096 { 1097 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 1098 } 1099 1100 /* 1101 * Throw away all pending data and cancel the corking. Socket is locked. 1102 */ 1103 static void udp_v6_flush_pending_frames(struct sock *sk) 1104 { 1105 struct udp_sock *up = udp_sk(sk); 1106 1107 if (up->pending == AF_INET) 1108 udp_flush_pending_frames(sk); 1109 else if (up->pending) { 1110 up->len = 0; 1111 up->pending = 0; 1112 ip6_flush_pending_frames(sk); 1113 } 1114 } 1115 1116 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1117 int addr_len) 1118 { 1119 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1120 return -EINVAL; 1121 /* The following checks are replicated from __ip6_datagram_connect() 1122 * and intended to prevent BPF program called below from accessing 1123 * bytes that are out of the bound specified by user in addr_len. 1124 */ 1125 if (uaddr->sa_family == AF_INET) { 1126 if (__ipv6_only_sock(sk)) 1127 return -EAFNOSUPPORT; 1128 return udp_pre_connect(sk, uaddr, addr_len); 1129 } 1130 1131 if (addr_len < SIN6_LEN_RFC2133) 1132 return -EINVAL; 1133 1134 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1135 } 1136 1137 /** 1138 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1139 * @sk: socket we are sending on 1140 * @skb: sk_buff containing the filled-in UDP header 1141 * (checksum field must be zeroed out) 1142 * @saddr: source address 1143 * @daddr: destination address 1144 * @len: length of packet 1145 */ 1146 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1147 const struct in6_addr *saddr, 1148 const struct in6_addr *daddr, int len) 1149 { 1150 unsigned int offset; 1151 struct udphdr *uh = udp_hdr(skb); 1152 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1153 __wsum csum = 0; 1154 1155 if (!frags) { 1156 /* Only one fragment on the socket. */ 1157 skb->csum_start = skb_transport_header(skb) - skb->head; 1158 skb->csum_offset = offsetof(struct udphdr, check); 1159 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1160 } else { 1161 /* 1162 * HW-checksum won't work as there are two or more 1163 * fragments on the socket so that all csums of sk_buffs 1164 * should be together 1165 */ 1166 offset = skb_transport_offset(skb); 1167 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1168 csum = skb->csum; 1169 1170 skb->ip_summed = CHECKSUM_NONE; 1171 1172 do { 1173 csum = csum_add(csum, frags->csum); 1174 } while ((frags = frags->next)); 1175 1176 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1177 csum); 1178 if (uh->check == 0) 1179 uh->check = CSUM_MANGLED_0; 1180 } 1181 } 1182 1183 /* 1184 * Sending 1185 */ 1186 1187 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1188 struct inet_cork *cork) 1189 { 1190 struct sock *sk = skb->sk; 1191 struct udphdr *uh; 1192 int err = 0; 1193 int is_udplite = IS_UDPLITE(sk); 1194 __wsum csum = 0; 1195 int offset = skb_transport_offset(skb); 1196 int len = skb->len - offset; 1197 int datalen = len - sizeof(*uh); 1198 1199 /* 1200 * Create a UDP header 1201 */ 1202 uh = udp_hdr(skb); 1203 uh->source = fl6->fl6_sport; 1204 uh->dest = fl6->fl6_dport; 1205 uh->len = htons(len); 1206 uh->check = 0; 1207 1208 if (cork->gso_size) { 1209 const int hlen = skb_network_header_len(skb) + 1210 sizeof(struct udphdr); 1211 1212 if (hlen + cork->gso_size > cork->fragsize) { 1213 kfree_skb(skb); 1214 return -EINVAL; 1215 } 1216 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1217 kfree_skb(skb); 1218 return -EINVAL; 1219 } 1220 if (udp_sk(sk)->no_check6_tx) { 1221 kfree_skb(skb); 1222 return -EINVAL; 1223 } 1224 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1225 dst_xfrm(skb_dst(skb))) { 1226 kfree_skb(skb); 1227 return -EIO; 1228 } 1229 1230 if (datalen > cork->gso_size) { 1231 skb_shinfo(skb)->gso_size = cork->gso_size; 1232 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1233 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1234 cork->gso_size); 1235 } 1236 goto csum_partial; 1237 } 1238 1239 if (is_udplite) 1240 csum = udplite_csum(skb); 1241 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1242 skb->ip_summed = CHECKSUM_NONE; 1243 goto send; 1244 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1245 csum_partial: 1246 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1247 goto send; 1248 } else 1249 csum = udp_csum(skb); 1250 1251 /* add protocol-dependent pseudo-header */ 1252 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1253 len, fl6->flowi6_proto, csum); 1254 if (uh->check == 0) 1255 uh->check = CSUM_MANGLED_0; 1256 1257 send: 1258 err = ip6_send_skb(skb); 1259 if (err) { 1260 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1261 UDP6_INC_STATS(sock_net(sk), 1262 UDP_MIB_SNDBUFERRORS, is_udplite); 1263 err = 0; 1264 } 1265 } else { 1266 UDP6_INC_STATS(sock_net(sk), 1267 UDP_MIB_OUTDATAGRAMS, is_udplite); 1268 } 1269 return err; 1270 } 1271 1272 static int udp_v6_push_pending_frames(struct sock *sk) 1273 { 1274 struct sk_buff *skb; 1275 struct udp_sock *up = udp_sk(sk); 1276 int err = 0; 1277 1278 if (up->pending == AF_INET) 1279 return udp_push_pending_frames(sk); 1280 1281 skb = ip6_finish_skb(sk); 1282 if (!skb) 1283 goto out; 1284 1285 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6, 1286 &inet_sk(sk)->cork.base); 1287 out: 1288 up->len = 0; 1289 up->pending = 0; 1290 return err; 1291 } 1292 1293 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1294 { 1295 struct ipv6_txoptions opt_space; 1296 struct udp_sock *up = udp_sk(sk); 1297 struct inet_sock *inet = inet_sk(sk); 1298 struct ipv6_pinfo *np = inet6_sk(sk); 1299 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1300 struct in6_addr *daddr, *final_p, final; 1301 struct ipv6_txoptions *opt = NULL; 1302 struct ipv6_txoptions *opt_to_free = NULL; 1303 struct ip6_flowlabel *flowlabel = NULL; 1304 struct inet_cork_full cork; 1305 struct flowi6 *fl6 = &cork.fl.u.ip6; 1306 struct dst_entry *dst; 1307 struct ipcm6_cookie ipc6; 1308 int addr_len = msg->msg_namelen; 1309 bool connected = false; 1310 int ulen = len; 1311 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1312 int err; 1313 int is_udplite = IS_UDPLITE(sk); 1314 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1315 1316 ipcm6_init(&ipc6); 1317 ipc6.gso_size = READ_ONCE(up->gso_size); 1318 ipc6.sockc.tsflags = sk->sk_tsflags; 1319 ipc6.sockc.mark = sk->sk_mark; 1320 1321 /* destination address check */ 1322 if (sin6) { 1323 if (addr_len < offsetof(struct sockaddr, sa_data)) 1324 return -EINVAL; 1325 1326 switch (sin6->sin6_family) { 1327 case AF_INET6: 1328 if (addr_len < SIN6_LEN_RFC2133) 1329 return -EINVAL; 1330 daddr = &sin6->sin6_addr; 1331 if (ipv6_addr_any(daddr) && 1332 ipv6_addr_v4mapped(&np->saddr)) 1333 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1334 daddr); 1335 break; 1336 case AF_INET: 1337 goto do_udp_sendmsg; 1338 case AF_UNSPEC: 1339 msg->msg_name = sin6 = NULL; 1340 msg->msg_namelen = addr_len = 0; 1341 daddr = NULL; 1342 break; 1343 default: 1344 return -EINVAL; 1345 } 1346 } else if (!up->pending) { 1347 if (sk->sk_state != TCP_ESTABLISHED) 1348 return -EDESTADDRREQ; 1349 daddr = &sk->sk_v6_daddr; 1350 } else 1351 daddr = NULL; 1352 1353 if (daddr) { 1354 if (ipv6_addr_v4mapped(daddr)) { 1355 struct sockaddr_in sin; 1356 sin.sin_family = AF_INET; 1357 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1358 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1359 msg->msg_name = &sin; 1360 msg->msg_namelen = sizeof(sin); 1361 do_udp_sendmsg: 1362 if (__ipv6_only_sock(sk)) 1363 return -ENETUNREACH; 1364 return udp_sendmsg(sk, msg, len); 1365 } 1366 } 1367 1368 /* Rough check on arithmetic overflow, 1369 better check is made in ip6_append_data(). 1370 */ 1371 if (len > INT_MAX - sizeof(struct udphdr)) 1372 return -EMSGSIZE; 1373 1374 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1375 if (up->pending) { 1376 if (up->pending == AF_INET) 1377 return udp_sendmsg(sk, msg, len); 1378 /* 1379 * There are pending frames. 1380 * The socket lock must be held while it's corked. 1381 */ 1382 lock_sock(sk); 1383 if (likely(up->pending)) { 1384 if (unlikely(up->pending != AF_INET6)) { 1385 release_sock(sk); 1386 return -EAFNOSUPPORT; 1387 } 1388 dst = NULL; 1389 goto do_append_data; 1390 } 1391 release_sock(sk); 1392 } 1393 ulen += sizeof(struct udphdr); 1394 1395 memset(fl6, 0, sizeof(*fl6)); 1396 1397 if (sin6) { 1398 if (sin6->sin6_port == 0) 1399 return -EINVAL; 1400 1401 fl6->fl6_dport = sin6->sin6_port; 1402 daddr = &sin6->sin6_addr; 1403 1404 if (np->sndflow) { 1405 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1406 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) { 1407 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1408 if (IS_ERR(flowlabel)) 1409 return -EINVAL; 1410 } 1411 } 1412 1413 /* 1414 * Otherwise it will be difficult to maintain 1415 * sk->sk_dst_cache. 1416 */ 1417 if (sk->sk_state == TCP_ESTABLISHED && 1418 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1419 daddr = &sk->sk_v6_daddr; 1420 1421 if (addr_len >= sizeof(struct sockaddr_in6) && 1422 sin6->sin6_scope_id && 1423 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1424 fl6->flowi6_oif = sin6->sin6_scope_id; 1425 } else { 1426 if (sk->sk_state != TCP_ESTABLISHED) 1427 return -EDESTADDRREQ; 1428 1429 fl6->fl6_dport = inet->inet_dport; 1430 daddr = &sk->sk_v6_daddr; 1431 fl6->flowlabel = np->flow_label; 1432 connected = true; 1433 } 1434 1435 if (!fl6->flowi6_oif) 1436 fl6->flowi6_oif = sk->sk_bound_dev_if; 1437 1438 if (!fl6->flowi6_oif) 1439 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1440 1441 fl6->flowi6_uid = sk->sk_uid; 1442 1443 if (msg->msg_controllen) { 1444 opt = &opt_space; 1445 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1446 opt->tot_len = sizeof(*opt); 1447 ipc6.opt = opt; 1448 1449 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1450 if (err > 0) 1451 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6, 1452 &ipc6); 1453 if (err < 0) { 1454 fl6_sock_release(flowlabel); 1455 return err; 1456 } 1457 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1458 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1459 if (IS_ERR(flowlabel)) 1460 return -EINVAL; 1461 } 1462 if (!(opt->opt_nflen|opt->opt_flen)) 1463 opt = NULL; 1464 connected = false; 1465 } 1466 if (!opt) { 1467 opt = txopt_get(np); 1468 opt_to_free = opt; 1469 } 1470 if (flowlabel) 1471 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1472 opt = ipv6_fixup_options(&opt_space, opt); 1473 ipc6.opt = opt; 1474 1475 fl6->flowi6_proto = sk->sk_protocol; 1476 fl6->flowi6_mark = ipc6.sockc.mark; 1477 fl6->daddr = *daddr; 1478 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr)) 1479 fl6->saddr = np->saddr; 1480 fl6->fl6_sport = inet->inet_sport; 1481 1482 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1483 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1484 (struct sockaddr *)sin6, 1485 &fl6->saddr); 1486 if (err) 1487 goto out_no_dst; 1488 if (sin6) { 1489 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1490 /* BPF program rewrote IPv6-only by IPv4-mapped 1491 * IPv6. It's currently unsupported. 1492 */ 1493 err = -ENOTSUPP; 1494 goto out_no_dst; 1495 } 1496 if (sin6->sin6_port == 0) { 1497 /* BPF program set invalid port. Reject it. */ 1498 err = -EINVAL; 1499 goto out_no_dst; 1500 } 1501 fl6->fl6_dport = sin6->sin6_port; 1502 fl6->daddr = sin6->sin6_addr; 1503 } 1504 } 1505 1506 if (ipv6_addr_any(&fl6->daddr)) 1507 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1508 1509 final_p = fl6_update_dst(fl6, opt, &final); 1510 if (final_p) 1511 connected = false; 1512 1513 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) { 1514 fl6->flowi6_oif = np->mcast_oif; 1515 connected = false; 1516 } else if (!fl6->flowi6_oif) 1517 fl6->flowi6_oif = np->ucast_oif; 1518 1519 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1520 1521 if (ipc6.tclass < 0) 1522 ipc6.tclass = np->tclass; 1523 1524 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); 1525 1526 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); 1527 if (IS_ERR(dst)) { 1528 err = PTR_ERR(dst); 1529 dst = NULL; 1530 goto out; 1531 } 1532 1533 if (ipc6.hlimit < 0) 1534 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst); 1535 1536 if (msg->msg_flags&MSG_CONFIRM) 1537 goto do_confirm; 1538 back_from_confirm: 1539 1540 /* Lockless fast path for the non-corking case */ 1541 if (!corkreq) { 1542 struct sk_buff *skb; 1543 1544 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1545 sizeof(struct udphdr), &ipc6, 1546 (struct rt6_info *)dst, 1547 msg->msg_flags, &cork); 1548 err = PTR_ERR(skb); 1549 if (!IS_ERR_OR_NULL(skb)) 1550 err = udp_v6_send_skb(skb, fl6, &cork.base); 1551 /* ip6_make_skb steals dst reference */ 1552 goto out_no_dst; 1553 } 1554 1555 lock_sock(sk); 1556 if (unlikely(up->pending)) { 1557 /* The socket is already corked while preparing it. */ 1558 /* ... which is an evident application bug. --ANK */ 1559 release_sock(sk); 1560 1561 net_dbg_ratelimited("udp cork app bug 2\n"); 1562 err = -EINVAL; 1563 goto out; 1564 } 1565 1566 up->pending = AF_INET6; 1567 1568 do_append_data: 1569 if (ipc6.dontfrag < 0) 1570 ipc6.dontfrag = np->dontfrag; 1571 up->len += ulen; 1572 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1573 &ipc6, fl6, (struct rt6_info *)dst, 1574 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1575 if (err) 1576 udp_v6_flush_pending_frames(sk); 1577 else if (!corkreq) 1578 err = udp_v6_push_pending_frames(sk); 1579 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1580 up->pending = 0; 1581 1582 if (err > 0) 1583 err = np->recverr ? net_xmit_errno(err) : 0; 1584 release_sock(sk); 1585 1586 out: 1587 dst_release(dst); 1588 out_no_dst: 1589 fl6_sock_release(flowlabel); 1590 txopt_put(opt_to_free); 1591 if (!err) 1592 return len; 1593 /* 1594 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1595 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1596 * we don't have a good statistic (IpOutDiscards but it can be too many 1597 * things). We could add another new stat but at least for now that 1598 * seems like overkill. 1599 */ 1600 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1601 UDP6_INC_STATS(sock_net(sk), 1602 UDP_MIB_SNDBUFERRORS, is_udplite); 1603 } 1604 return err; 1605 1606 do_confirm: 1607 if (msg->msg_flags & MSG_PROBE) 1608 dst_confirm_neigh(dst, &fl6->daddr); 1609 if (!(msg->msg_flags&MSG_PROBE) || len) 1610 goto back_from_confirm; 1611 err = 0; 1612 goto out; 1613 } 1614 1615 void udpv6_destroy_sock(struct sock *sk) 1616 { 1617 struct udp_sock *up = udp_sk(sk); 1618 lock_sock(sk); 1619 1620 /* protects from races with udp_abort() */ 1621 sock_set_flag(sk, SOCK_DEAD); 1622 udp_v6_flush_pending_frames(sk); 1623 release_sock(sk); 1624 1625 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1626 if (up->encap_type) { 1627 void (*encap_destroy)(struct sock *sk); 1628 encap_destroy = READ_ONCE(up->encap_destroy); 1629 if (encap_destroy) 1630 encap_destroy(sk); 1631 } 1632 if (up->encap_enabled) { 1633 static_branch_dec(&udpv6_encap_needed_key); 1634 udp_encap_disable(); 1635 } 1636 } 1637 1638 inet6_destroy_sock(sk); 1639 } 1640 1641 /* 1642 * Socket option code for UDP 1643 */ 1644 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1645 unsigned int optlen) 1646 { 1647 if (level == SOL_UDP || level == SOL_UDPLITE) 1648 return udp_lib_setsockopt(sk, level, optname, 1649 optval, optlen, 1650 udp_v6_push_pending_frames); 1651 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1652 } 1653 1654 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1655 char __user *optval, int __user *optlen) 1656 { 1657 if (level == SOL_UDP || level == SOL_UDPLITE) 1658 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1659 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1660 } 1661 1662 /* thinking of making this const? Don't. 1663 * early_demux can change based on sysctl. 1664 */ 1665 static struct inet6_protocol udpv6_protocol = { 1666 .early_demux = udp_v6_early_demux, 1667 .early_demux_handler = udp_v6_early_demux, 1668 .handler = udpv6_rcv, 1669 .err_handler = udpv6_err, 1670 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1671 }; 1672 1673 /* ------------------------------------------------------------------------ */ 1674 #ifdef CONFIG_PROC_FS 1675 int udp6_seq_show(struct seq_file *seq, void *v) 1676 { 1677 if (v == SEQ_START_TOKEN) { 1678 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1679 } else { 1680 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1681 struct inet_sock *inet = inet_sk(v); 1682 __u16 srcp = ntohs(inet->inet_sport); 1683 __u16 destp = ntohs(inet->inet_dport); 1684 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1685 udp_rqueue_get(v), bucket); 1686 } 1687 return 0; 1688 } 1689 1690 const struct seq_operations udp6_seq_ops = { 1691 .start = udp_seq_start, 1692 .next = udp_seq_next, 1693 .stop = udp_seq_stop, 1694 .show = udp6_seq_show, 1695 }; 1696 EXPORT_SYMBOL(udp6_seq_ops); 1697 1698 static struct udp_seq_afinfo udp6_seq_afinfo = { 1699 .family = AF_INET6, 1700 .udp_table = &udp_table, 1701 }; 1702 1703 int __net_init udp6_proc_init(struct net *net) 1704 { 1705 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1706 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1707 return -ENOMEM; 1708 return 0; 1709 } 1710 1711 void udp6_proc_exit(struct net *net) 1712 { 1713 remove_proc_entry("udp6", net->proc_net); 1714 } 1715 #endif /* CONFIG_PROC_FS */ 1716 1717 /* ------------------------------------------------------------------------ */ 1718 1719 struct proto udpv6_prot = { 1720 .name = "UDPv6", 1721 .owner = THIS_MODULE, 1722 .close = udp_lib_close, 1723 .pre_connect = udpv6_pre_connect, 1724 .connect = ip6_datagram_connect, 1725 .disconnect = udp_disconnect, 1726 .ioctl = udp_ioctl, 1727 .init = udp_init_sock, 1728 .destroy = udpv6_destroy_sock, 1729 .setsockopt = udpv6_setsockopt, 1730 .getsockopt = udpv6_getsockopt, 1731 .sendmsg = udpv6_sendmsg, 1732 .recvmsg = udpv6_recvmsg, 1733 .release_cb = ip6_datagram_release_cb, 1734 .hash = udp_lib_hash, 1735 .unhash = udp_lib_unhash, 1736 .rehash = udp_v6_rehash, 1737 .get_port = udp_v6_get_port, 1738 .put_port = udp_lib_unhash, 1739 #ifdef CONFIG_BPF_SYSCALL 1740 .psock_update_sk_prot = udp_bpf_update_proto, 1741 #endif 1742 .memory_allocated = &udp_memory_allocated, 1743 .sysctl_mem = sysctl_udp_mem, 1744 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1745 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1746 .obj_size = sizeof(struct udp6_sock), 1747 .h.udp_table = &udp_table, 1748 .diag_destroy = udp_abort, 1749 }; 1750 1751 static struct inet_protosw udpv6_protosw = { 1752 .type = SOCK_DGRAM, 1753 .protocol = IPPROTO_UDP, 1754 .prot = &udpv6_prot, 1755 .ops = &inet6_dgram_ops, 1756 .flags = INET_PROTOSW_PERMANENT, 1757 }; 1758 1759 int __init udpv6_init(void) 1760 { 1761 int ret; 1762 1763 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1764 if (ret) 1765 goto out; 1766 1767 ret = inet6_register_protosw(&udpv6_protosw); 1768 if (ret) 1769 goto out_udpv6_protocol; 1770 out: 1771 return ret; 1772 1773 out_udpv6_protocol: 1774 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1775 goto out; 1776 } 1777 1778 void udpv6_exit(void) 1779 { 1780 inet6_unregister_protosw(&udpv6_protosw); 1781 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1782 } 1783