1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/socket.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/in6.h> 26 #include <linux/netdevice.h> 27 #include <linux/if_arp.h> 28 #include <linux/ipv6.h> 29 #include <linux/icmpv6.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/skbuff.h> 33 #include <linux/slab.h> 34 #include <linux/uaccess.h> 35 #include <linux/indirect_call_wrapper.h> 36 37 #include <net/addrconf.h> 38 #include <net/ndisc.h> 39 #include <net/protocol.h> 40 #include <net/transp_v6.h> 41 #include <net/ip6_route.h> 42 #include <net/raw.h> 43 #include <net/tcp_states.h> 44 #include <net/ip6_checksum.h> 45 #include <net/ip6_tunnel.h> 46 #include <net/xfrm.h> 47 #include <net/inet_hashtables.h> 48 #include <net/inet6_hashtables.h> 49 #include <net/busy_poll.h> 50 #include <net/sock_reuseport.h> 51 52 #include <linux/proc_fs.h> 53 #include <linux/seq_file.h> 54 #include <trace/events/skb.h> 55 #include "udp_impl.h" 56 57 static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb) 58 { 59 #if defined(CONFIG_NET_L3_MASTER_DEV) 60 if (!net->ipv4.sysctl_udp_l3mdev_accept && 61 skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) 62 return true; 63 #endif 64 return false; 65 } 66 67 static u32 udp6_ehashfn(const struct net *net, 68 const struct in6_addr *laddr, 69 const u16 lport, 70 const struct in6_addr *faddr, 71 const __be16 fport) 72 { 73 static u32 udp6_ehash_secret __read_mostly; 74 static u32 udp_ipv6_hash_secret __read_mostly; 75 76 u32 lhash, fhash; 77 78 net_get_random_once(&udp6_ehash_secret, 79 sizeof(udp6_ehash_secret)); 80 net_get_random_once(&udp_ipv6_hash_secret, 81 sizeof(udp_ipv6_hash_secret)); 82 83 lhash = (__force u32)laddr->s6_addr32[3]; 84 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 85 86 return __inet6_ehashfn(lhash, lport, fhash, fport, 87 udp_ipv6_hash_secret + net_hash_mix(net)); 88 } 89 90 int udp_v6_get_port(struct sock *sk, unsigned short snum) 91 { 92 unsigned int hash2_nulladdr = 93 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 94 unsigned int hash2_partial = 95 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 96 97 /* precompute partial secondary hash */ 98 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 99 return udp_lib_get_port(sk, snum, hash2_nulladdr); 100 } 101 102 void udp_v6_rehash(struct sock *sk) 103 { 104 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 105 &sk->sk_v6_rcv_saddr, 106 inet_sk(sk)->inet_num); 107 108 udp_lib_rehash(sk, new_hash); 109 } 110 111 static int compute_score(struct sock *sk, struct net *net, 112 const struct in6_addr *saddr, __be16 sport, 113 const struct in6_addr *daddr, unsigned short hnum, 114 int dif, int sdif, bool exact_dif) 115 { 116 int score; 117 struct inet_sock *inet; 118 bool dev_match; 119 120 if (!net_eq(sock_net(sk), net) || 121 udp_sk(sk)->udp_port_hash != hnum || 122 sk->sk_family != PF_INET6) 123 return -1; 124 125 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 126 return -1; 127 128 score = 0; 129 inet = inet_sk(sk); 130 131 if (inet->inet_dport) { 132 if (inet->inet_dport != sport) 133 return -1; 134 score++; 135 } 136 137 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 138 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 139 return -1; 140 score++; 141 } 142 143 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif); 144 if (!dev_match) 145 return -1; 146 score++; 147 148 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 149 score++; 150 151 return score; 152 } 153 154 /* called with rcu_read_lock() */ 155 static struct sock *udp6_lib_lookup2(struct net *net, 156 const struct in6_addr *saddr, __be16 sport, 157 const struct in6_addr *daddr, unsigned int hnum, 158 int dif, int sdif, bool exact_dif, 159 struct udp_hslot *hslot2, struct sk_buff *skb) 160 { 161 struct sock *sk, *result; 162 int score, badness; 163 u32 hash = 0; 164 165 result = NULL; 166 badness = -1; 167 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 168 score = compute_score(sk, net, saddr, sport, 169 daddr, hnum, dif, sdif, exact_dif); 170 if (score > badness) { 171 if (sk->sk_reuseport) { 172 hash = udp6_ehashfn(net, daddr, hnum, 173 saddr, sport); 174 175 result = reuseport_select_sock(sk, hash, skb, 176 sizeof(struct udphdr)); 177 if (result) 178 return result; 179 } 180 result = sk; 181 badness = score; 182 } 183 } 184 return result; 185 } 186 187 /* rcu_read_lock() must be held */ 188 struct sock *__udp6_lib_lookup(struct net *net, 189 const struct in6_addr *saddr, __be16 sport, 190 const struct in6_addr *daddr, __be16 dport, 191 int dif, int sdif, struct udp_table *udptable, 192 struct sk_buff *skb) 193 { 194 unsigned short hnum = ntohs(dport); 195 unsigned int hash2, slot2; 196 struct udp_hslot *hslot2; 197 struct sock *result; 198 bool exact_dif = udp6_lib_exact_dif_match(net, skb); 199 200 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 201 slot2 = hash2 & udptable->mask; 202 hslot2 = &udptable->hash2[slot2]; 203 204 result = udp6_lib_lookup2(net, saddr, sport, 205 daddr, hnum, dif, sdif, exact_dif, 206 hslot2, skb); 207 if (!result) { 208 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 209 slot2 = hash2 & udptable->mask; 210 211 hslot2 = &udptable->hash2[slot2]; 212 213 result = udp6_lib_lookup2(net, saddr, sport, 214 &in6addr_any, hnum, dif, sdif, 215 exact_dif, hslot2, 216 skb); 217 } 218 if (unlikely(IS_ERR(result))) 219 return NULL; 220 return result; 221 } 222 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 223 224 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 225 __be16 sport, __be16 dport, 226 struct udp_table *udptable) 227 { 228 const struct ipv6hdr *iph = ipv6_hdr(skb); 229 230 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 231 &iph->daddr, dport, inet6_iif(skb), 232 inet6_sdif(skb), udptable, skb); 233 } 234 235 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, 236 __be16 sport, __be16 dport) 237 { 238 const struct ipv6hdr *iph = ipv6_hdr(skb); 239 240 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 241 &iph->daddr, dport, inet6_iif(skb), 242 inet6_sdif(skb), &udp_table, skb); 243 } 244 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb); 245 246 /* Must be called under rcu_read_lock(). 247 * Does increment socket refcount. 248 */ 249 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 250 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 251 const struct in6_addr *daddr, __be16 dport, int dif) 252 { 253 struct sock *sk; 254 255 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 256 dif, 0, &udp_table, NULL); 257 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 258 sk = NULL; 259 return sk; 260 } 261 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 262 #endif 263 264 /* do not use the scratch area len for jumbogram: their length execeeds the 265 * scratch area space; note that the IP6CB flags is still in the first 266 * cacheline, so checking for jumbograms is cheap 267 */ 268 static int udp6_skb_len(struct sk_buff *skb) 269 { 270 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 271 } 272 273 /* 274 * This should be easy, if there is something there we 275 * return it, otherwise we block. 276 */ 277 278 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 279 int noblock, int flags, int *addr_len) 280 { 281 struct ipv6_pinfo *np = inet6_sk(sk); 282 struct inet_sock *inet = inet_sk(sk); 283 struct sk_buff *skb; 284 unsigned int ulen, copied; 285 int off, err, peeking = flags & MSG_PEEK; 286 int is_udplite = IS_UDPLITE(sk); 287 struct udp_mib __percpu *mib; 288 bool checksum_valid = false; 289 int is_udp4; 290 291 if (flags & MSG_ERRQUEUE) 292 return ipv6_recv_error(sk, msg, len, addr_len); 293 294 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 295 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 296 297 try_again: 298 off = sk_peek_offset(sk, flags); 299 skb = __skb_recv_udp(sk, flags, noblock, &off, &err); 300 if (!skb) 301 return err; 302 303 ulen = udp6_skb_len(skb); 304 copied = len; 305 if (copied > ulen - off) 306 copied = ulen - off; 307 else if (copied < ulen) 308 msg->msg_flags |= MSG_TRUNC; 309 310 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 311 mib = __UDPX_MIB(sk, is_udp4); 312 313 /* 314 * If checksum is needed at all, try to do it while copying the 315 * data. If the data is truncated, or if we only want a partial 316 * coverage checksum (UDP-Lite), do it before the copy. 317 */ 318 319 if (copied < ulen || peeking || 320 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 321 checksum_valid = udp_skb_csum_unnecessary(skb) || 322 !__udp_lib_checksum_complete(skb); 323 if (!checksum_valid) 324 goto csum_copy_err; 325 } 326 327 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 328 if (udp_skb_is_linear(skb)) 329 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 330 else 331 err = skb_copy_datagram_msg(skb, off, msg, copied); 332 } else { 333 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 334 if (err == -EINVAL) 335 goto csum_copy_err; 336 } 337 if (unlikely(err)) { 338 if (!peeking) { 339 atomic_inc(&sk->sk_drops); 340 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 341 } 342 kfree_skb(skb); 343 return err; 344 } 345 if (!peeking) 346 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 347 348 sock_recv_ts_and_drops(msg, sk, skb); 349 350 /* Copy the address. */ 351 if (msg->msg_name) { 352 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 353 sin6->sin6_family = AF_INET6; 354 sin6->sin6_port = udp_hdr(skb)->source; 355 sin6->sin6_flowinfo = 0; 356 357 if (is_udp4) { 358 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 359 &sin6->sin6_addr); 360 sin6->sin6_scope_id = 0; 361 } else { 362 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 363 sin6->sin6_scope_id = 364 ipv6_iface_scope_id(&sin6->sin6_addr, 365 inet6_iif(skb)); 366 } 367 *addr_len = sizeof(*sin6); 368 } 369 370 if (udp_sk(sk)->gro_enabled) 371 udp_cmsg_recv(msg, sk, skb); 372 373 if (np->rxopt.all) 374 ip6_datagram_recv_common_ctl(sk, msg, skb); 375 376 if (is_udp4) { 377 if (inet->cmsg_flags) 378 ip_cmsg_recv_offset(msg, sk, skb, 379 sizeof(struct udphdr), off); 380 } else { 381 if (np->rxopt.all) 382 ip6_datagram_recv_specific_ctl(sk, msg, skb); 383 } 384 385 err = copied; 386 if (flags & MSG_TRUNC) 387 err = ulen; 388 389 skb_consume_udp(sk, skb, peeking ? -err : err); 390 return err; 391 392 csum_copy_err: 393 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 394 udp_skb_destructor)) { 395 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 396 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 397 } 398 kfree_skb(skb); 399 400 /* starting over for a new packet, but check if we need to yield */ 401 cond_resched(); 402 msg->msg_flags &= ~MSG_TRUNC; 403 goto try_again; 404 } 405 406 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 407 void udpv6_encap_enable(void) 408 { 409 static_branch_inc(&udpv6_encap_needed_key); 410 } 411 EXPORT_SYMBOL(udpv6_encap_enable); 412 413 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 414 * through error handlers in encapsulations looking for a match. 415 */ 416 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 417 struct inet6_skb_parm *opt, 418 u8 type, u8 code, int offset, __be32 info) 419 { 420 int i; 421 422 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 423 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 424 u8 type, u8 code, int offset, __be32 info); 425 const struct ip6_tnl_encap_ops *encap; 426 427 encap = rcu_dereference(ip6tun_encaps[i]); 428 if (!encap) 429 continue; 430 handler = encap->err_handler; 431 if (handler && !handler(skb, opt, type, code, offset, info)) 432 return 0; 433 } 434 435 return -ENOENT; 436 } 437 438 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 439 * reversing source and destination port: this will match tunnels that force the 440 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 441 * lwtunnels might actually break this assumption by being configured with 442 * different destination ports on endpoints, in this case we won't be able to 443 * trace ICMP messages back to them. 444 * 445 * If this doesn't match any socket, probe tunnels with arbitrary destination 446 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 447 * we've sent packets to won't necessarily match the local destination port. 448 * 449 * Then ask the tunnel implementation to match the error against a valid 450 * association. 451 * 452 * Return an error if we can't find a match, the socket if we need further 453 * processing, zero otherwise. 454 */ 455 static struct sock *__udp6_lib_err_encap(struct net *net, 456 const struct ipv6hdr *hdr, int offset, 457 struct udphdr *uh, 458 struct udp_table *udptable, 459 struct sk_buff *skb, 460 struct inet6_skb_parm *opt, 461 u8 type, u8 code, __be32 info) 462 { 463 int network_offset, transport_offset; 464 struct sock *sk; 465 466 network_offset = skb_network_offset(skb); 467 transport_offset = skb_transport_offset(skb); 468 469 /* Network header needs to point to the outer IPv6 header inside ICMP */ 470 skb_reset_network_header(skb); 471 472 /* Transport header needs to point to the UDP header */ 473 skb_set_transport_header(skb, offset); 474 475 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 476 &hdr->saddr, uh->dest, 477 inet6_iif(skb), 0, udptable, skb); 478 if (sk) { 479 int (*lookup)(struct sock *sk, struct sk_buff *skb); 480 struct udp_sock *up = udp_sk(sk); 481 482 lookup = READ_ONCE(up->encap_err_lookup); 483 if (!lookup || lookup(sk, skb)) 484 sk = NULL; 485 } 486 487 if (!sk) { 488 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 489 offset, info)); 490 } 491 492 skb_set_transport_header(skb, transport_offset); 493 skb_set_network_header(skb, network_offset); 494 495 return sk; 496 } 497 498 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 499 u8 type, u8 code, int offset, __be32 info, 500 struct udp_table *udptable) 501 { 502 struct ipv6_pinfo *np; 503 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 504 const struct in6_addr *saddr = &hdr->saddr; 505 const struct in6_addr *daddr = &hdr->daddr; 506 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 507 bool tunnel = false; 508 struct sock *sk; 509 int harderr; 510 int err; 511 struct net *net = dev_net(skb->dev); 512 513 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 514 inet6_iif(skb), inet6_sdif(skb), udptable, skb); 515 if (!sk) { 516 /* No socket for error: try tunnels before discarding */ 517 sk = ERR_PTR(-ENOENT); 518 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 519 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 520 udptable, skb, 521 opt, type, code, info); 522 if (!sk) 523 return 0; 524 } 525 526 if (IS_ERR(sk)) { 527 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 528 ICMP6_MIB_INERRORS); 529 return PTR_ERR(sk); 530 } 531 532 tunnel = true; 533 } 534 535 harderr = icmpv6_err_convert(type, code, &err); 536 np = inet6_sk(sk); 537 538 if (type == ICMPV6_PKT_TOOBIG) { 539 if (!ip6_sk_accept_pmtu(sk)) 540 goto out; 541 ip6_sk_update_pmtu(skb, sk, info); 542 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 543 harderr = 1; 544 } 545 if (type == NDISC_REDIRECT) { 546 if (tunnel) { 547 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 548 sk->sk_mark, sk->sk_uid); 549 } else { 550 ip6_sk_redirect(skb, sk); 551 } 552 goto out; 553 } 554 555 /* Tunnels don't have an application socket: don't pass errors back */ 556 if (tunnel) 557 goto out; 558 559 if (!np->recverr) { 560 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 561 goto out; 562 } else { 563 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 564 } 565 566 sk->sk_err = err; 567 sk->sk_error_report(sk); 568 out: 569 return 0; 570 } 571 572 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 573 { 574 int rc; 575 576 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 577 sock_rps_save_rxhash(sk, skb); 578 sk_mark_napi_id(sk, skb); 579 sk_incoming_cpu_update(sk); 580 } else { 581 sk_mark_napi_id_once(sk, skb); 582 } 583 584 rc = __udp_enqueue_schedule_skb(sk, skb); 585 if (rc < 0) { 586 int is_udplite = IS_UDPLITE(sk); 587 588 /* Note that an ENOMEM error is charged twice */ 589 if (rc == -ENOMEM) 590 UDP6_INC_STATS(sock_net(sk), 591 UDP_MIB_RCVBUFERRORS, is_udplite); 592 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 593 kfree_skb(skb); 594 return -1; 595 } 596 597 return 0; 598 } 599 600 static __inline__ int udpv6_err(struct sk_buff *skb, 601 struct inet6_skb_parm *opt, u8 type, 602 u8 code, int offset, __be32 info) 603 { 604 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 605 } 606 607 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 608 { 609 struct udp_sock *up = udp_sk(sk); 610 int is_udplite = IS_UDPLITE(sk); 611 612 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 613 goto drop; 614 615 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 616 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 617 618 /* 619 * This is an encapsulation socket so pass the skb to 620 * the socket's udp_encap_rcv() hook. Otherwise, just 621 * fall through and pass this up the UDP socket. 622 * up->encap_rcv() returns the following value: 623 * =0 if skb was successfully passed to the encap 624 * handler or was discarded by it. 625 * >0 if skb should be passed on to UDP. 626 * <0 if skb should be resubmitted as proto -N 627 */ 628 629 /* if we're overly short, let UDP handle it */ 630 encap_rcv = READ_ONCE(up->encap_rcv); 631 if (encap_rcv) { 632 int ret; 633 634 /* Verify checksum before giving to encap */ 635 if (udp_lib_checksum_complete(skb)) 636 goto csum_error; 637 638 ret = encap_rcv(sk, skb); 639 if (ret <= 0) { 640 __UDP_INC_STATS(sock_net(sk), 641 UDP_MIB_INDATAGRAMS, 642 is_udplite); 643 return -ret; 644 } 645 } 646 647 /* FALLTHROUGH -- it's a UDP Packet */ 648 } 649 650 /* 651 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 652 */ 653 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 654 655 if (up->pcrlen == 0) { /* full coverage was set */ 656 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 657 UDP_SKB_CB(skb)->cscov, skb->len); 658 goto drop; 659 } 660 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 661 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 662 UDP_SKB_CB(skb)->cscov, up->pcrlen); 663 goto drop; 664 } 665 } 666 667 prefetch(&sk->sk_rmem_alloc); 668 if (rcu_access_pointer(sk->sk_filter) && 669 udp_lib_checksum_complete(skb)) 670 goto csum_error; 671 672 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 673 goto drop; 674 675 udp_csum_pull_header(skb); 676 677 skb_dst_drop(skb); 678 679 return __udpv6_queue_rcv_skb(sk, skb); 680 681 csum_error: 682 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 683 drop: 684 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 685 atomic_inc(&sk->sk_drops); 686 kfree_skb(skb); 687 return -1; 688 } 689 690 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 691 { 692 struct sk_buff *next, *segs; 693 int ret; 694 695 if (likely(!udp_unexpected_gso(sk, skb))) 696 return udpv6_queue_rcv_one_skb(sk, skb); 697 698 __skb_push(skb, -skb_mac_offset(skb)); 699 segs = udp_rcv_segment(sk, skb, false); 700 for (skb = segs; skb; skb = next) { 701 next = skb->next; 702 __skb_pull(skb, skb_transport_offset(skb)); 703 704 ret = udpv6_queue_rcv_one_skb(sk, skb); 705 if (ret > 0) 706 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 707 true); 708 } 709 return 0; 710 } 711 712 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, 713 __be16 loc_port, const struct in6_addr *loc_addr, 714 __be16 rmt_port, const struct in6_addr *rmt_addr, 715 int dif, int sdif, unsigned short hnum) 716 { 717 struct inet_sock *inet = inet_sk(sk); 718 719 if (!net_eq(sock_net(sk), net)) 720 return false; 721 722 if (udp_sk(sk)->udp_port_hash != hnum || 723 sk->sk_family != PF_INET6 || 724 (inet->inet_dport && inet->inet_dport != rmt_port) || 725 (!ipv6_addr_any(&sk->sk_v6_daddr) && 726 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 727 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) || 728 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 729 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 730 return false; 731 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 732 return false; 733 return true; 734 } 735 736 static void udp6_csum_zero_error(struct sk_buff *skb) 737 { 738 /* RFC 2460 section 8.1 says that we SHOULD log 739 * this error. Well, it is reasonable. 740 */ 741 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 742 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 743 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 744 } 745 746 /* 747 * Note: called only from the BH handler context, 748 * so we don't need to lock the hashes. 749 */ 750 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 751 const struct in6_addr *saddr, const struct in6_addr *daddr, 752 struct udp_table *udptable, int proto) 753 { 754 struct sock *sk, *first = NULL; 755 const struct udphdr *uh = udp_hdr(skb); 756 unsigned short hnum = ntohs(uh->dest); 757 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 758 unsigned int offset = offsetof(typeof(*sk), sk_node); 759 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 760 int dif = inet6_iif(skb); 761 int sdif = inet6_sdif(skb); 762 struct hlist_node *node; 763 struct sk_buff *nskb; 764 765 if (use_hash2) { 766 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 767 udptable->mask; 768 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 769 start_lookup: 770 hslot = &udptable->hash2[hash2]; 771 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 772 } 773 774 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 775 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 776 uh->source, saddr, dif, sdif, 777 hnum)) 778 continue; 779 /* If zero checksum and no_check is not on for 780 * the socket then skip it. 781 */ 782 if (!uh->check && !udp_sk(sk)->no_check6_rx) 783 continue; 784 if (!first) { 785 first = sk; 786 continue; 787 } 788 nskb = skb_clone(skb, GFP_ATOMIC); 789 if (unlikely(!nskb)) { 790 atomic_inc(&sk->sk_drops); 791 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 792 IS_UDPLITE(sk)); 793 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 794 IS_UDPLITE(sk)); 795 continue; 796 } 797 798 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 799 consume_skb(nskb); 800 } 801 802 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 803 if (use_hash2 && hash2 != hash2_any) { 804 hash2 = hash2_any; 805 goto start_lookup; 806 } 807 808 if (first) { 809 if (udpv6_queue_rcv_skb(first, skb) > 0) 810 consume_skb(skb); 811 } else { 812 kfree_skb(skb); 813 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 814 proto == IPPROTO_UDPLITE); 815 } 816 return 0; 817 } 818 819 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 820 { 821 if (udp_sk_rx_dst_set(sk, dst)) { 822 const struct rt6_info *rt = (const struct rt6_info *)dst; 823 824 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 825 } 826 } 827 828 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 829 * return code conversion for ip layer consumption 830 */ 831 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 832 struct udphdr *uh) 833 { 834 int ret; 835 836 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 837 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, 838 ip6_compute_pseudo); 839 840 ret = udpv6_queue_rcv_skb(sk, skb); 841 842 /* a return value > 0 means to resubmit the input */ 843 if (ret > 0) 844 return ret; 845 return 0; 846 } 847 848 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 849 int proto) 850 { 851 const struct in6_addr *saddr, *daddr; 852 struct net *net = dev_net(skb->dev); 853 struct udphdr *uh; 854 struct sock *sk; 855 u32 ulen = 0; 856 857 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 858 goto discard; 859 860 saddr = &ipv6_hdr(skb)->saddr; 861 daddr = &ipv6_hdr(skb)->daddr; 862 uh = udp_hdr(skb); 863 864 ulen = ntohs(uh->len); 865 if (ulen > skb->len) 866 goto short_packet; 867 868 if (proto == IPPROTO_UDP) { 869 /* UDP validates ulen. */ 870 871 /* Check for jumbo payload */ 872 if (ulen == 0) 873 ulen = skb->len; 874 875 if (ulen < sizeof(*uh)) 876 goto short_packet; 877 878 if (ulen < skb->len) { 879 if (pskb_trim_rcsum(skb, ulen)) 880 goto short_packet; 881 saddr = &ipv6_hdr(skb)->saddr; 882 daddr = &ipv6_hdr(skb)->daddr; 883 uh = udp_hdr(skb); 884 } 885 } 886 887 if (udp6_csum_init(skb, uh, proto)) 888 goto csum_error; 889 890 /* Check if the socket is already available, e.g. due to early demux */ 891 sk = skb_steal_sock(skb); 892 if (sk) { 893 struct dst_entry *dst = skb_dst(skb); 894 int ret; 895 896 if (unlikely(sk->sk_rx_dst != dst)) 897 udp6_sk_rx_dst_set(sk, dst); 898 899 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 900 sock_put(sk); 901 goto report_csum_error; 902 } 903 904 ret = udp6_unicast_rcv_skb(sk, skb, uh); 905 sock_put(sk); 906 return ret; 907 } 908 909 /* 910 * Multicast receive code 911 */ 912 if (ipv6_addr_is_multicast(daddr)) 913 return __udp6_lib_mcast_deliver(net, skb, 914 saddr, daddr, udptable, proto); 915 916 /* Unicast */ 917 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 918 if (sk) { 919 if (!uh->check && !udp_sk(sk)->no_check6_rx) 920 goto report_csum_error; 921 return udp6_unicast_rcv_skb(sk, skb, uh); 922 } 923 924 if (!uh->check) 925 goto report_csum_error; 926 927 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 928 goto discard; 929 930 if (udp_lib_checksum_complete(skb)) 931 goto csum_error; 932 933 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 934 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 935 936 kfree_skb(skb); 937 return 0; 938 939 short_packet: 940 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 941 proto == IPPROTO_UDPLITE ? "-Lite" : "", 942 saddr, ntohs(uh->source), 943 ulen, skb->len, 944 daddr, ntohs(uh->dest)); 945 goto discard; 946 947 report_csum_error: 948 udp6_csum_zero_error(skb); 949 csum_error: 950 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 951 discard: 952 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 953 kfree_skb(skb); 954 return 0; 955 } 956 957 958 static struct sock *__udp6_lib_demux_lookup(struct net *net, 959 __be16 loc_port, const struct in6_addr *loc_addr, 960 __be16 rmt_port, const struct in6_addr *rmt_addr, 961 int dif, int sdif) 962 { 963 unsigned short hnum = ntohs(loc_port); 964 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 965 unsigned int slot2 = hash2 & udp_table.mask; 966 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 967 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 968 struct sock *sk; 969 970 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 971 if (sk->sk_state == TCP_ESTABLISHED && 972 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif)) 973 return sk; 974 /* Only check first socket in chain */ 975 break; 976 } 977 return NULL; 978 } 979 980 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) 981 { 982 struct net *net = dev_net(skb->dev); 983 const struct udphdr *uh; 984 struct sock *sk; 985 struct dst_entry *dst; 986 int dif = skb->dev->ifindex; 987 int sdif = inet6_sdif(skb); 988 989 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 990 sizeof(struct udphdr))) 991 return; 992 993 uh = udp_hdr(skb); 994 995 if (skb->pkt_type == PACKET_HOST) 996 sk = __udp6_lib_demux_lookup(net, uh->dest, 997 &ipv6_hdr(skb)->daddr, 998 uh->source, &ipv6_hdr(skb)->saddr, 999 dif, sdif); 1000 else 1001 return; 1002 1003 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1004 return; 1005 1006 skb->sk = sk; 1007 skb->destructor = sock_efree; 1008 dst = READ_ONCE(sk->sk_rx_dst); 1009 1010 if (dst) 1011 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1012 if (dst) { 1013 /* set noref for now. 1014 * any place which wants to hold dst has to call 1015 * dst_hold_safe() 1016 */ 1017 skb_dst_set_noref(skb, dst); 1018 } 1019 } 1020 1021 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1022 { 1023 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 1024 } 1025 1026 /* 1027 * Throw away all pending data and cancel the corking. Socket is locked. 1028 */ 1029 static void udp_v6_flush_pending_frames(struct sock *sk) 1030 { 1031 struct udp_sock *up = udp_sk(sk); 1032 1033 if (up->pending == AF_INET) 1034 udp_flush_pending_frames(sk); 1035 else if (up->pending) { 1036 up->len = 0; 1037 up->pending = 0; 1038 ip6_flush_pending_frames(sk); 1039 } 1040 } 1041 1042 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1043 int addr_len) 1044 { 1045 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1046 return -EINVAL; 1047 /* The following checks are replicated from __ip6_datagram_connect() 1048 * and intended to prevent BPF program called below from accessing 1049 * bytes that are out of the bound specified by user in addr_len. 1050 */ 1051 if (uaddr->sa_family == AF_INET) { 1052 if (__ipv6_only_sock(sk)) 1053 return -EAFNOSUPPORT; 1054 return udp_pre_connect(sk, uaddr, addr_len); 1055 } 1056 1057 if (addr_len < SIN6_LEN_RFC2133) 1058 return -EINVAL; 1059 1060 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1061 } 1062 1063 /** 1064 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1065 * @sk: socket we are sending on 1066 * @skb: sk_buff containing the filled-in UDP header 1067 * (checksum field must be zeroed out) 1068 */ 1069 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1070 const struct in6_addr *saddr, 1071 const struct in6_addr *daddr, int len) 1072 { 1073 unsigned int offset; 1074 struct udphdr *uh = udp_hdr(skb); 1075 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1076 __wsum csum = 0; 1077 1078 if (!frags) { 1079 /* Only one fragment on the socket. */ 1080 skb->csum_start = skb_transport_header(skb) - skb->head; 1081 skb->csum_offset = offsetof(struct udphdr, check); 1082 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1083 } else { 1084 /* 1085 * HW-checksum won't work as there are two or more 1086 * fragments on the socket so that all csums of sk_buffs 1087 * should be together 1088 */ 1089 offset = skb_transport_offset(skb); 1090 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1091 csum = skb->csum; 1092 1093 skb->ip_summed = CHECKSUM_NONE; 1094 1095 do { 1096 csum = csum_add(csum, frags->csum); 1097 } while ((frags = frags->next)); 1098 1099 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1100 csum); 1101 if (uh->check == 0) 1102 uh->check = CSUM_MANGLED_0; 1103 } 1104 } 1105 1106 /* 1107 * Sending 1108 */ 1109 1110 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1111 struct inet_cork *cork) 1112 { 1113 struct sock *sk = skb->sk; 1114 struct udphdr *uh; 1115 int err = 0; 1116 int is_udplite = IS_UDPLITE(sk); 1117 __wsum csum = 0; 1118 int offset = skb_transport_offset(skb); 1119 int len = skb->len - offset; 1120 1121 /* 1122 * Create a UDP header 1123 */ 1124 uh = udp_hdr(skb); 1125 uh->source = fl6->fl6_sport; 1126 uh->dest = fl6->fl6_dport; 1127 uh->len = htons(len); 1128 uh->check = 0; 1129 1130 if (cork->gso_size) { 1131 const int hlen = skb_network_header_len(skb) + 1132 sizeof(struct udphdr); 1133 1134 if (hlen + cork->gso_size > cork->fragsize) { 1135 kfree_skb(skb); 1136 return -EINVAL; 1137 } 1138 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { 1139 kfree_skb(skb); 1140 return -EINVAL; 1141 } 1142 if (udp_sk(sk)->no_check6_tx) { 1143 kfree_skb(skb); 1144 return -EINVAL; 1145 } 1146 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1147 dst_xfrm(skb_dst(skb))) { 1148 kfree_skb(skb); 1149 return -EIO; 1150 } 1151 1152 skb_shinfo(skb)->gso_size = cork->gso_size; 1153 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1154 goto csum_partial; 1155 } 1156 1157 if (is_udplite) 1158 csum = udplite_csum(skb); 1159 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1160 skb->ip_summed = CHECKSUM_NONE; 1161 goto send; 1162 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1163 csum_partial: 1164 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1165 goto send; 1166 } else 1167 csum = udp_csum(skb); 1168 1169 /* add protocol-dependent pseudo-header */ 1170 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1171 len, fl6->flowi6_proto, csum); 1172 if (uh->check == 0) 1173 uh->check = CSUM_MANGLED_0; 1174 1175 send: 1176 err = ip6_send_skb(skb); 1177 if (err) { 1178 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1179 UDP6_INC_STATS(sock_net(sk), 1180 UDP_MIB_SNDBUFERRORS, is_udplite); 1181 err = 0; 1182 } 1183 } else { 1184 UDP6_INC_STATS(sock_net(sk), 1185 UDP_MIB_OUTDATAGRAMS, is_udplite); 1186 } 1187 return err; 1188 } 1189 1190 static int udp_v6_push_pending_frames(struct sock *sk) 1191 { 1192 struct sk_buff *skb; 1193 struct udp_sock *up = udp_sk(sk); 1194 struct flowi6 fl6; 1195 int err = 0; 1196 1197 if (up->pending == AF_INET) 1198 return udp_push_pending_frames(sk); 1199 1200 /* ip6_finish_skb will release the cork, so make a copy of 1201 * fl6 here. 1202 */ 1203 fl6 = inet_sk(sk)->cork.fl.u.ip6; 1204 1205 skb = ip6_finish_skb(sk); 1206 if (!skb) 1207 goto out; 1208 1209 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base); 1210 1211 out: 1212 up->len = 0; 1213 up->pending = 0; 1214 return err; 1215 } 1216 1217 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1218 { 1219 struct ipv6_txoptions opt_space; 1220 struct udp_sock *up = udp_sk(sk); 1221 struct inet_sock *inet = inet_sk(sk); 1222 struct ipv6_pinfo *np = inet6_sk(sk); 1223 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1224 struct in6_addr *daddr, *final_p, final; 1225 struct ipv6_txoptions *opt = NULL; 1226 struct ipv6_txoptions *opt_to_free = NULL; 1227 struct ip6_flowlabel *flowlabel = NULL; 1228 struct flowi6 fl6; 1229 struct dst_entry *dst; 1230 struct ipcm6_cookie ipc6; 1231 int addr_len = msg->msg_namelen; 1232 bool connected = false; 1233 int ulen = len; 1234 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 1235 int err; 1236 int is_udplite = IS_UDPLITE(sk); 1237 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1238 1239 ipcm6_init(&ipc6); 1240 ipc6.gso_size = up->gso_size; 1241 ipc6.sockc.tsflags = sk->sk_tsflags; 1242 1243 /* destination address check */ 1244 if (sin6) { 1245 if (addr_len < offsetof(struct sockaddr, sa_data)) 1246 return -EINVAL; 1247 1248 switch (sin6->sin6_family) { 1249 case AF_INET6: 1250 if (addr_len < SIN6_LEN_RFC2133) 1251 return -EINVAL; 1252 daddr = &sin6->sin6_addr; 1253 if (ipv6_addr_any(daddr) && 1254 ipv6_addr_v4mapped(&np->saddr)) 1255 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1256 daddr); 1257 break; 1258 case AF_INET: 1259 goto do_udp_sendmsg; 1260 case AF_UNSPEC: 1261 msg->msg_name = sin6 = NULL; 1262 msg->msg_namelen = addr_len = 0; 1263 daddr = NULL; 1264 break; 1265 default: 1266 return -EINVAL; 1267 } 1268 } else if (!up->pending) { 1269 if (sk->sk_state != TCP_ESTABLISHED) 1270 return -EDESTADDRREQ; 1271 daddr = &sk->sk_v6_daddr; 1272 } else 1273 daddr = NULL; 1274 1275 if (daddr) { 1276 if (ipv6_addr_v4mapped(daddr)) { 1277 struct sockaddr_in sin; 1278 sin.sin_family = AF_INET; 1279 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1280 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1281 msg->msg_name = &sin; 1282 msg->msg_namelen = sizeof(sin); 1283 do_udp_sendmsg: 1284 if (__ipv6_only_sock(sk)) 1285 return -ENETUNREACH; 1286 return udp_sendmsg(sk, msg, len); 1287 } 1288 } 1289 1290 if (up->pending == AF_INET) 1291 return udp_sendmsg(sk, msg, len); 1292 1293 /* Rough check on arithmetic overflow, 1294 better check is made in ip6_append_data(). 1295 */ 1296 if (len > INT_MAX - sizeof(struct udphdr)) 1297 return -EMSGSIZE; 1298 1299 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1300 if (up->pending) { 1301 /* 1302 * There are pending frames. 1303 * The socket lock must be held while it's corked. 1304 */ 1305 lock_sock(sk); 1306 if (likely(up->pending)) { 1307 if (unlikely(up->pending != AF_INET6)) { 1308 release_sock(sk); 1309 return -EAFNOSUPPORT; 1310 } 1311 dst = NULL; 1312 goto do_append_data; 1313 } 1314 release_sock(sk); 1315 } 1316 ulen += sizeof(struct udphdr); 1317 1318 memset(&fl6, 0, sizeof(fl6)); 1319 1320 if (sin6) { 1321 if (sin6->sin6_port == 0) 1322 return -EINVAL; 1323 1324 fl6.fl6_dport = sin6->sin6_port; 1325 daddr = &sin6->sin6_addr; 1326 1327 if (np->sndflow) { 1328 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1329 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 1330 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1331 if (!flowlabel) 1332 return -EINVAL; 1333 } 1334 } 1335 1336 /* 1337 * Otherwise it will be difficult to maintain 1338 * sk->sk_dst_cache. 1339 */ 1340 if (sk->sk_state == TCP_ESTABLISHED && 1341 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1342 daddr = &sk->sk_v6_daddr; 1343 1344 if (addr_len >= sizeof(struct sockaddr_in6) && 1345 sin6->sin6_scope_id && 1346 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1347 fl6.flowi6_oif = sin6->sin6_scope_id; 1348 } else { 1349 if (sk->sk_state != TCP_ESTABLISHED) 1350 return -EDESTADDRREQ; 1351 1352 fl6.fl6_dport = inet->inet_dport; 1353 daddr = &sk->sk_v6_daddr; 1354 fl6.flowlabel = np->flow_label; 1355 connected = true; 1356 } 1357 1358 if (!fl6.flowi6_oif) 1359 fl6.flowi6_oif = sk->sk_bound_dev_if; 1360 1361 if (!fl6.flowi6_oif) 1362 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1363 1364 fl6.flowi6_mark = sk->sk_mark; 1365 fl6.flowi6_uid = sk->sk_uid; 1366 1367 if (msg->msg_controllen) { 1368 opt = &opt_space; 1369 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1370 opt->tot_len = sizeof(*opt); 1371 ipc6.opt = opt; 1372 1373 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1374 if (err > 0) 1375 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, 1376 &ipc6); 1377 if (err < 0) { 1378 fl6_sock_release(flowlabel); 1379 return err; 1380 } 1381 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1382 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1383 if (!flowlabel) 1384 return -EINVAL; 1385 } 1386 if (!(opt->opt_nflen|opt->opt_flen)) 1387 opt = NULL; 1388 connected = false; 1389 } 1390 if (!opt) { 1391 opt = txopt_get(np); 1392 opt_to_free = opt; 1393 } 1394 if (flowlabel) 1395 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1396 opt = ipv6_fixup_options(&opt_space, opt); 1397 ipc6.opt = opt; 1398 1399 fl6.flowi6_proto = sk->sk_protocol; 1400 fl6.daddr = *daddr; 1401 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1402 fl6.saddr = np->saddr; 1403 fl6.fl6_sport = inet->inet_sport; 1404 1405 if (cgroup_bpf_enabled && !connected) { 1406 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1407 (struct sockaddr *)sin6, &fl6.saddr); 1408 if (err) 1409 goto out_no_dst; 1410 if (sin6) { 1411 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1412 /* BPF program rewrote IPv6-only by IPv4-mapped 1413 * IPv6. It's currently unsupported. 1414 */ 1415 err = -ENOTSUPP; 1416 goto out_no_dst; 1417 } 1418 if (sin6->sin6_port == 0) { 1419 /* BPF program set invalid port. Reject it. */ 1420 err = -EINVAL; 1421 goto out_no_dst; 1422 } 1423 fl6.fl6_dport = sin6->sin6_port; 1424 fl6.daddr = sin6->sin6_addr; 1425 } 1426 } 1427 1428 if (ipv6_addr_any(&fl6.daddr)) 1429 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1430 1431 final_p = fl6_update_dst(&fl6, opt, &final); 1432 if (final_p) 1433 connected = false; 1434 1435 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1436 fl6.flowi6_oif = np->mcast_oif; 1437 connected = false; 1438 } else if (!fl6.flowi6_oif) 1439 fl6.flowi6_oif = np->ucast_oif; 1440 1441 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 1442 1443 if (ipc6.tclass < 0) 1444 ipc6.tclass = np->tclass; 1445 1446 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 1447 1448 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected); 1449 if (IS_ERR(dst)) { 1450 err = PTR_ERR(dst); 1451 dst = NULL; 1452 goto out; 1453 } 1454 1455 if (ipc6.hlimit < 0) 1456 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 1457 1458 if (msg->msg_flags&MSG_CONFIRM) 1459 goto do_confirm; 1460 back_from_confirm: 1461 1462 /* Lockless fast path for the non-corking case */ 1463 if (!corkreq) { 1464 struct inet_cork_full cork; 1465 struct sk_buff *skb; 1466 1467 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1468 sizeof(struct udphdr), &ipc6, 1469 &fl6, (struct rt6_info *)dst, 1470 msg->msg_flags, &cork); 1471 err = PTR_ERR(skb); 1472 if (!IS_ERR_OR_NULL(skb)) 1473 err = udp_v6_send_skb(skb, &fl6, &cork.base); 1474 goto out; 1475 } 1476 1477 lock_sock(sk); 1478 if (unlikely(up->pending)) { 1479 /* The socket is already corked while preparing it. */ 1480 /* ... which is an evident application bug. --ANK */ 1481 release_sock(sk); 1482 1483 net_dbg_ratelimited("udp cork app bug 2\n"); 1484 err = -EINVAL; 1485 goto out; 1486 } 1487 1488 up->pending = AF_INET6; 1489 1490 do_append_data: 1491 if (ipc6.dontfrag < 0) 1492 ipc6.dontfrag = np->dontfrag; 1493 up->len += ulen; 1494 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1495 &ipc6, &fl6, (struct rt6_info *)dst, 1496 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1497 if (err) 1498 udp_v6_flush_pending_frames(sk); 1499 else if (!corkreq) 1500 err = udp_v6_push_pending_frames(sk); 1501 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1502 up->pending = 0; 1503 1504 if (err > 0) 1505 err = np->recverr ? net_xmit_errno(err) : 0; 1506 release_sock(sk); 1507 1508 out: 1509 dst_release(dst); 1510 out_no_dst: 1511 fl6_sock_release(flowlabel); 1512 txopt_put(opt_to_free); 1513 if (!err) 1514 return len; 1515 /* 1516 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1517 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1518 * we don't have a good statistic (IpOutDiscards but it can be too many 1519 * things). We could add another new stat but at least for now that 1520 * seems like overkill. 1521 */ 1522 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1523 UDP6_INC_STATS(sock_net(sk), 1524 UDP_MIB_SNDBUFERRORS, is_udplite); 1525 } 1526 return err; 1527 1528 do_confirm: 1529 if (msg->msg_flags & MSG_PROBE) 1530 dst_confirm_neigh(dst, &fl6.daddr); 1531 if (!(msg->msg_flags&MSG_PROBE) || len) 1532 goto back_from_confirm; 1533 err = 0; 1534 goto out; 1535 } 1536 1537 void udpv6_destroy_sock(struct sock *sk) 1538 { 1539 struct udp_sock *up = udp_sk(sk); 1540 lock_sock(sk); 1541 udp_v6_flush_pending_frames(sk); 1542 release_sock(sk); 1543 1544 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1545 if (up->encap_type) { 1546 void (*encap_destroy)(struct sock *sk); 1547 encap_destroy = READ_ONCE(up->encap_destroy); 1548 if (encap_destroy) 1549 encap_destroy(sk); 1550 } 1551 if (up->encap_enabled) 1552 static_branch_dec(&udpv6_encap_needed_key); 1553 } 1554 1555 inet6_destroy_sock(sk); 1556 } 1557 1558 /* 1559 * Socket option code for UDP 1560 */ 1561 int udpv6_setsockopt(struct sock *sk, int level, int optname, 1562 char __user *optval, unsigned int optlen) 1563 { 1564 if (level == SOL_UDP || level == SOL_UDPLITE) 1565 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1566 udp_v6_push_pending_frames); 1567 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1568 } 1569 1570 #ifdef CONFIG_COMPAT 1571 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 1572 char __user *optval, unsigned int optlen) 1573 { 1574 if (level == SOL_UDP || level == SOL_UDPLITE) 1575 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1576 udp_v6_push_pending_frames); 1577 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); 1578 } 1579 #endif 1580 1581 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1582 char __user *optval, int __user *optlen) 1583 { 1584 if (level == SOL_UDP || level == SOL_UDPLITE) 1585 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1586 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1587 } 1588 1589 #ifdef CONFIG_COMPAT 1590 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, 1591 char __user *optval, int __user *optlen) 1592 { 1593 if (level == SOL_UDP || level == SOL_UDPLITE) 1594 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1595 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); 1596 } 1597 #endif 1598 1599 /* thinking of making this const? Don't. 1600 * early_demux can change based on sysctl. 1601 */ 1602 static struct inet6_protocol udpv6_protocol = { 1603 .early_demux = udp_v6_early_demux, 1604 .early_demux_handler = udp_v6_early_demux, 1605 .handler = udpv6_rcv, 1606 .err_handler = udpv6_err, 1607 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1608 }; 1609 1610 /* ------------------------------------------------------------------------ */ 1611 #ifdef CONFIG_PROC_FS 1612 int udp6_seq_show(struct seq_file *seq, void *v) 1613 { 1614 if (v == SEQ_START_TOKEN) { 1615 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1616 } else { 1617 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1618 struct inet_sock *inet = inet_sk(v); 1619 __u16 srcp = ntohs(inet->inet_sport); 1620 __u16 destp = ntohs(inet->inet_dport); 1621 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1622 udp_rqueue_get(v), bucket); 1623 } 1624 return 0; 1625 } 1626 1627 const struct seq_operations udp6_seq_ops = { 1628 .start = udp_seq_start, 1629 .next = udp_seq_next, 1630 .stop = udp_seq_stop, 1631 .show = udp6_seq_show, 1632 }; 1633 EXPORT_SYMBOL(udp6_seq_ops); 1634 1635 static struct udp_seq_afinfo udp6_seq_afinfo = { 1636 .family = AF_INET6, 1637 .udp_table = &udp_table, 1638 }; 1639 1640 int __net_init udp6_proc_init(struct net *net) 1641 { 1642 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1643 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1644 return -ENOMEM; 1645 return 0; 1646 } 1647 1648 void udp6_proc_exit(struct net *net) 1649 { 1650 remove_proc_entry("udp6", net->proc_net); 1651 } 1652 #endif /* CONFIG_PROC_FS */ 1653 1654 /* ------------------------------------------------------------------------ */ 1655 1656 struct proto udpv6_prot = { 1657 .name = "UDPv6", 1658 .owner = THIS_MODULE, 1659 .close = udp_lib_close, 1660 .pre_connect = udpv6_pre_connect, 1661 .connect = ip6_datagram_connect, 1662 .disconnect = udp_disconnect, 1663 .ioctl = udp_ioctl, 1664 .init = udp_init_sock, 1665 .destroy = udpv6_destroy_sock, 1666 .setsockopt = udpv6_setsockopt, 1667 .getsockopt = udpv6_getsockopt, 1668 .sendmsg = udpv6_sendmsg, 1669 .recvmsg = udpv6_recvmsg, 1670 .release_cb = ip6_datagram_release_cb, 1671 .hash = udp_lib_hash, 1672 .unhash = udp_lib_unhash, 1673 .rehash = udp_v6_rehash, 1674 .get_port = udp_v6_get_port, 1675 .memory_allocated = &udp_memory_allocated, 1676 .sysctl_mem = sysctl_udp_mem, 1677 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1678 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1679 .obj_size = sizeof(struct udp6_sock), 1680 .h.udp_table = &udp_table, 1681 #ifdef CONFIG_COMPAT 1682 .compat_setsockopt = compat_udpv6_setsockopt, 1683 .compat_getsockopt = compat_udpv6_getsockopt, 1684 #endif 1685 .diag_destroy = udp_abort, 1686 }; 1687 1688 static struct inet_protosw udpv6_protosw = { 1689 .type = SOCK_DGRAM, 1690 .protocol = IPPROTO_UDP, 1691 .prot = &udpv6_prot, 1692 .ops = &inet6_dgram_ops, 1693 .flags = INET_PROTOSW_PERMANENT, 1694 }; 1695 1696 int __init udpv6_init(void) 1697 { 1698 int ret; 1699 1700 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1701 if (ret) 1702 goto out; 1703 1704 ret = inet6_register_protosw(&udpv6_protosw); 1705 if (ret) 1706 goto out_udpv6_protocol; 1707 out: 1708 return ret; 1709 1710 out_udpv6_protocol: 1711 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1712 goto out; 1713 } 1714 1715 void udpv6_exit(void) 1716 { 1717 inet6_unregister_protosw(&udpv6_protosw); 1718 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1719 } 1720