1 /* 2 * UDP over IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on linux/ipv4/udp.c 9 * 10 * Fixes: 11 * Hideaki YOSHIFUJI : sin6_scope_id support 12 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 13 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 14 * a single port at the same time. 15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 16 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 */ 23 24 #include <linux/errno.h> 25 #include <linux/types.h> 26 #include <linux/socket.h> 27 #include <linux/sockios.h> 28 #include <linux/net.h> 29 #include <linux/in6.h> 30 #include <linux/netdevice.h> 31 #include <linux/if_arp.h> 32 #include <linux/ipv6.h> 33 #include <linux/icmpv6.h> 34 #include <linux/init.h> 35 #include <linux/module.h> 36 #include <linux/skbuff.h> 37 #include <linux/slab.h> 38 #include <linux/uaccess.h> 39 40 #include <net/addrconf.h> 41 #include <net/ndisc.h> 42 #include <net/protocol.h> 43 #include <net/transp_v6.h> 44 #include <net/ip6_route.h> 45 #include <net/raw.h> 46 #include <net/tcp_states.h> 47 #include <net/ip6_checksum.h> 48 #include <net/ip6_tunnel.h> 49 #include <net/xfrm.h> 50 #include <net/inet_hashtables.h> 51 #include <net/inet6_hashtables.h> 52 #include <net/busy_poll.h> 53 #include <net/sock_reuseport.h> 54 55 #include <linux/proc_fs.h> 56 #include <linux/seq_file.h> 57 #include <trace/events/skb.h> 58 #include "udp_impl.h" 59 60 static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb) 61 { 62 #if defined(CONFIG_NET_L3_MASTER_DEV) 63 if (!net->ipv4.sysctl_udp_l3mdev_accept && 64 skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) 65 return true; 66 #endif 67 return false; 68 } 69 70 static u32 udp6_ehashfn(const struct net *net, 71 const struct in6_addr *laddr, 72 const u16 lport, 73 const struct in6_addr *faddr, 74 const __be16 fport) 75 { 76 static u32 udp6_ehash_secret __read_mostly; 77 static u32 udp_ipv6_hash_secret __read_mostly; 78 79 u32 lhash, fhash; 80 81 net_get_random_once(&udp6_ehash_secret, 82 sizeof(udp6_ehash_secret)); 83 net_get_random_once(&udp_ipv6_hash_secret, 84 sizeof(udp_ipv6_hash_secret)); 85 86 lhash = (__force u32)laddr->s6_addr32[3]; 87 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 88 89 return __inet6_ehashfn(lhash, lport, fhash, fport, 90 udp_ipv6_hash_secret + net_hash_mix(net)); 91 } 92 93 int udp_v6_get_port(struct sock *sk, unsigned short snum) 94 { 95 unsigned int hash2_nulladdr = 96 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 97 unsigned int hash2_partial = 98 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 99 100 /* precompute partial secondary hash */ 101 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 102 return udp_lib_get_port(sk, snum, hash2_nulladdr); 103 } 104 105 static void udp_v6_rehash(struct sock *sk) 106 { 107 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 108 &sk->sk_v6_rcv_saddr, 109 inet_sk(sk)->inet_num); 110 111 udp_lib_rehash(sk, new_hash); 112 } 113 114 static int compute_score(struct sock *sk, struct net *net, 115 const struct in6_addr *saddr, __be16 sport, 116 const struct in6_addr *daddr, unsigned short hnum, 117 int dif, int sdif, bool exact_dif) 118 { 119 int score; 120 struct inet_sock *inet; 121 bool dev_match; 122 123 if (!net_eq(sock_net(sk), net) || 124 udp_sk(sk)->udp_port_hash != hnum || 125 sk->sk_family != PF_INET6) 126 return -1; 127 128 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 129 return -1; 130 131 score = 0; 132 inet = inet_sk(sk); 133 134 if (inet->inet_dport) { 135 if (inet->inet_dport != sport) 136 return -1; 137 score++; 138 } 139 140 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 141 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 142 return -1; 143 score++; 144 } 145 146 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif); 147 if (!dev_match) 148 return -1; 149 score++; 150 151 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 152 score++; 153 154 return score; 155 } 156 157 /* called with rcu_read_lock() */ 158 static struct sock *udp6_lib_lookup2(struct net *net, 159 const struct in6_addr *saddr, __be16 sport, 160 const struct in6_addr *daddr, unsigned int hnum, 161 int dif, int sdif, bool exact_dif, 162 struct udp_hslot *hslot2, struct sk_buff *skb) 163 { 164 struct sock *sk, *result; 165 int score, badness; 166 u32 hash = 0; 167 168 result = NULL; 169 badness = -1; 170 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 171 score = compute_score(sk, net, saddr, sport, 172 daddr, hnum, dif, sdif, exact_dif); 173 if (score > badness) { 174 if (sk->sk_reuseport) { 175 hash = udp6_ehashfn(net, daddr, hnum, 176 saddr, sport); 177 178 result = reuseport_select_sock(sk, hash, skb, 179 sizeof(struct udphdr)); 180 if (result) 181 return result; 182 } 183 result = sk; 184 badness = score; 185 } 186 } 187 return result; 188 } 189 190 /* rcu_read_lock() must be held */ 191 struct sock *__udp6_lib_lookup(struct net *net, 192 const struct in6_addr *saddr, __be16 sport, 193 const struct in6_addr *daddr, __be16 dport, 194 int dif, int sdif, struct udp_table *udptable, 195 struct sk_buff *skb) 196 { 197 unsigned short hnum = ntohs(dport); 198 unsigned int hash2, slot2; 199 struct udp_hslot *hslot2; 200 struct sock *result; 201 bool exact_dif = udp6_lib_exact_dif_match(net, skb); 202 203 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 204 slot2 = hash2 & udptable->mask; 205 hslot2 = &udptable->hash2[slot2]; 206 207 result = udp6_lib_lookup2(net, saddr, sport, 208 daddr, hnum, dif, sdif, exact_dif, 209 hslot2, skb); 210 if (!result) { 211 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 212 slot2 = hash2 & udptable->mask; 213 214 hslot2 = &udptable->hash2[slot2]; 215 216 result = udp6_lib_lookup2(net, saddr, sport, 217 &in6addr_any, hnum, dif, sdif, 218 exact_dif, hslot2, 219 skb); 220 } 221 if (unlikely(IS_ERR(result))) 222 return NULL; 223 return result; 224 } 225 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 226 227 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 228 __be16 sport, __be16 dport, 229 struct udp_table *udptable) 230 { 231 const struct ipv6hdr *iph = ipv6_hdr(skb); 232 233 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 234 &iph->daddr, dport, inet6_iif(skb), 235 inet6_sdif(skb), udptable, skb); 236 } 237 238 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, 239 __be16 sport, __be16 dport) 240 { 241 const struct ipv6hdr *iph = ipv6_hdr(skb); 242 243 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 244 &iph->daddr, dport, inet6_iif(skb), 245 inet6_sdif(skb), &udp_table, skb); 246 } 247 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb); 248 249 /* Must be called under rcu_read_lock(). 250 * Does increment socket refcount. 251 */ 252 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 253 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 254 const struct in6_addr *daddr, __be16 dport, int dif) 255 { 256 struct sock *sk; 257 258 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 259 dif, 0, &udp_table, NULL); 260 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 261 sk = NULL; 262 return sk; 263 } 264 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 265 #endif 266 267 /* do not use the scratch area len for jumbogram: their length execeeds the 268 * scratch area space; note that the IP6CB flags is still in the first 269 * cacheline, so checking for jumbograms is cheap 270 */ 271 static int udp6_skb_len(struct sk_buff *skb) 272 { 273 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 274 } 275 276 /* 277 * This should be easy, if there is something there we 278 * return it, otherwise we block. 279 */ 280 281 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 282 int noblock, int flags, int *addr_len) 283 { 284 struct ipv6_pinfo *np = inet6_sk(sk); 285 struct inet_sock *inet = inet_sk(sk); 286 struct sk_buff *skb; 287 unsigned int ulen, copied; 288 int peeked, peeking, off; 289 int err; 290 int is_udplite = IS_UDPLITE(sk); 291 bool checksum_valid = false; 292 struct udp_mib *mib; 293 int is_udp4; 294 295 if (flags & MSG_ERRQUEUE) 296 return ipv6_recv_error(sk, msg, len, addr_len); 297 298 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 299 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 300 301 try_again: 302 peeking = flags & MSG_PEEK; 303 off = sk_peek_offset(sk, flags); 304 skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); 305 if (!skb) 306 return err; 307 308 ulen = udp6_skb_len(skb); 309 copied = len; 310 if (copied > ulen - off) 311 copied = ulen - off; 312 else if (copied < ulen) 313 msg->msg_flags |= MSG_TRUNC; 314 315 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 316 mib = __UDPX_MIB(sk, is_udp4); 317 318 /* 319 * If checksum is needed at all, try to do it while copying the 320 * data. If the data is truncated, or if we only want a partial 321 * coverage checksum (UDP-Lite), do it before the copy. 322 */ 323 324 if (copied < ulen || peeking || 325 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 326 checksum_valid = udp_skb_csum_unnecessary(skb) || 327 !__udp_lib_checksum_complete(skb); 328 if (!checksum_valid) 329 goto csum_copy_err; 330 } 331 332 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 333 if (udp_skb_is_linear(skb)) 334 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 335 else 336 err = skb_copy_datagram_msg(skb, off, msg, copied); 337 } else { 338 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 339 if (err == -EINVAL) 340 goto csum_copy_err; 341 } 342 if (unlikely(err)) { 343 if (!peeked) { 344 atomic_inc(&sk->sk_drops); 345 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 346 } 347 kfree_skb(skb); 348 return err; 349 } 350 if (!peeked) 351 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 352 353 sock_recv_ts_and_drops(msg, sk, skb); 354 355 /* Copy the address. */ 356 if (msg->msg_name) { 357 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 358 sin6->sin6_family = AF_INET6; 359 sin6->sin6_port = udp_hdr(skb)->source; 360 sin6->sin6_flowinfo = 0; 361 362 if (is_udp4) { 363 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 364 &sin6->sin6_addr); 365 sin6->sin6_scope_id = 0; 366 } else { 367 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 368 sin6->sin6_scope_id = 369 ipv6_iface_scope_id(&sin6->sin6_addr, 370 inet6_iif(skb)); 371 } 372 *addr_len = sizeof(*sin6); 373 } 374 375 if (udp_sk(sk)->gro_enabled) 376 udp_cmsg_recv(msg, sk, skb); 377 378 if (np->rxopt.all) 379 ip6_datagram_recv_common_ctl(sk, msg, skb); 380 381 if (is_udp4) { 382 if (inet->cmsg_flags) 383 ip_cmsg_recv_offset(msg, sk, skb, 384 sizeof(struct udphdr), off); 385 } else { 386 if (np->rxopt.all) 387 ip6_datagram_recv_specific_ctl(sk, msg, skb); 388 } 389 390 err = copied; 391 if (flags & MSG_TRUNC) 392 err = ulen; 393 394 skb_consume_udp(sk, skb, peeking ? -err : err); 395 return err; 396 397 csum_copy_err: 398 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 399 udp_skb_destructor)) { 400 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 401 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 402 } 403 kfree_skb(skb); 404 405 /* starting over for a new packet, but check if we need to yield */ 406 cond_resched(); 407 msg->msg_flags &= ~MSG_TRUNC; 408 goto try_again; 409 } 410 411 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 412 void udpv6_encap_enable(void) 413 { 414 static_branch_inc(&udpv6_encap_needed_key); 415 } 416 EXPORT_SYMBOL(udpv6_encap_enable); 417 418 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 419 * through error handlers in encapsulations looking for a match. 420 */ 421 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 422 struct inet6_skb_parm *opt, 423 u8 type, u8 code, int offset, u32 info) 424 { 425 int i; 426 427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 429 u8 type, u8 code, int offset, u32 info); 430 431 if (!ip6tun_encaps[i]) 432 continue; 433 handler = rcu_dereference(ip6tun_encaps[i]->err_handler); 434 if (handler && !handler(skb, opt, type, code, offset, info)) 435 return 0; 436 } 437 438 return -ENOENT; 439 } 440 441 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 442 * reversing source and destination port: this will match tunnels that force the 443 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 444 * lwtunnels might actually break this assumption by being configured with 445 * different destination ports on endpoints, in this case we won't be able to 446 * trace ICMP messages back to them. 447 * 448 * If this doesn't match any socket, probe tunnels with arbitrary destination 449 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 450 * we've sent packets to won't necessarily match the local destination port. 451 * 452 * Then ask the tunnel implementation to match the error against a valid 453 * association. 454 * 455 * Return an error if we can't find a match, the socket if we need further 456 * processing, zero otherwise. 457 */ 458 static struct sock *__udp6_lib_err_encap(struct net *net, 459 const struct ipv6hdr *hdr, int offset, 460 struct udphdr *uh, 461 struct udp_table *udptable, 462 struct sk_buff *skb, 463 struct inet6_skb_parm *opt, 464 u8 type, u8 code, __be32 info) 465 { 466 int network_offset, transport_offset; 467 struct sock *sk; 468 469 network_offset = skb_network_offset(skb); 470 transport_offset = skb_transport_offset(skb); 471 472 /* Network header needs to point to the outer IPv6 header inside ICMP */ 473 skb_reset_network_header(skb); 474 475 /* Transport header needs to point to the UDP header */ 476 skb_set_transport_header(skb, offset); 477 478 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 479 &hdr->saddr, uh->dest, 480 inet6_iif(skb), 0, udptable, skb); 481 if (sk) { 482 int (*lookup)(struct sock *sk, struct sk_buff *skb); 483 struct udp_sock *up = udp_sk(sk); 484 485 lookup = READ_ONCE(up->encap_err_lookup); 486 if (!lookup || lookup(sk, skb)) 487 sk = NULL; 488 } 489 490 if (!sk) { 491 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 492 offset, info)); 493 } 494 495 skb_set_transport_header(skb, transport_offset); 496 skb_set_network_header(skb, network_offset); 497 498 return sk; 499 } 500 501 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 502 u8 type, u8 code, int offset, __be32 info, 503 struct udp_table *udptable) 504 { 505 struct ipv6_pinfo *np; 506 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 507 const struct in6_addr *saddr = &hdr->saddr; 508 const struct in6_addr *daddr = &hdr->daddr; 509 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 510 bool tunnel = false; 511 struct sock *sk; 512 int harderr; 513 int err; 514 struct net *net = dev_net(skb->dev); 515 516 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 517 inet6_iif(skb), inet6_sdif(skb), udptable, skb); 518 if (!sk) { 519 /* No socket for error: try tunnels before discarding */ 520 sk = ERR_PTR(-ENOENT); 521 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 522 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 523 udptable, skb, 524 opt, type, code, info); 525 if (!sk) 526 return 0; 527 } 528 529 if (IS_ERR(sk)) { 530 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 531 ICMP6_MIB_INERRORS); 532 return PTR_ERR(sk); 533 } 534 535 tunnel = true; 536 } 537 538 harderr = icmpv6_err_convert(type, code, &err); 539 np = inet6_sk(sk); 540 541 if (type == ICMPV6_PKT_TOOBIG) { 542 if (!ip6_sk_accept_pmtu(sk)) 543 goto out; 544 ip6_sk_update_pmtu(skb, sk, info); 545 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 546 harderr = 1; 547 } 548 if (type == NDISC_REDIRECT) { 549 if (tunnel) { 550 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 551 sk->sk_mark, sk->sk_uid); 552 } else { 553 ip6_sk_redirect(skb, sk); 554 } 555 goto out; 556 } 557 558 /* Tunnels don't have an application socket: don't pass errors back */ 559 if (tunnel) 560 goto out; 561 562 if (!np->recverr) { 563 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 564 goto out; 565 } else { 566 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 567 } 568 569 sk->sk_err = err; 570 sk->sk_error_report(sk); 571 out: 572 return 0; 573 } 574 575 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 576 { 577 int rc; 578 579 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 580 sock_rps_save_rxhash(sk, skb); 581 sk_mark_napi_id(sk, skb); 582 sk_incoming_cpu_update(sk); 583 } else { 584 sk_mark_napi_id_once(sk, skb); 585 } 586 587 rc = __udp_enqueue_schedule_skb(sk, skb); 588 if (rc < 0) { 589 int is_udplite = IS_UDPLITE(sk); 590 591 /* Note that an ENOMEM error is charged twice */ 592 if (rc == -ENOMEM) 593 UDP6_INC_STATS(sock_net(sk), 594 UDP_MIB_RCVBUFERRORS, is_udplite); 595 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 596 kfree_skb(skb); 597 return -1; 598 } 599 600 return 0; 601 } 602 603 static __inline__ int udpv6_err(struct sk_buff *skb, 604 struct inet6_skb_parm *opt, u8 type, 605 u8 code, int offset, __be32 info) 606 { 607 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 608 } 609 610 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 611 { 612 struct udp_sock *up = udp_sk(sk); 613 int is_udplite = IS_UDPLITE(sk); 614 615 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 616 goto drop; 617 618 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 619 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 620 621 /* 622 * This is an encapsulation socket so pass the skb to 623 * the socket's udp_encap_rcv() hook. Otherwise, just 624 * fall through and pass this up the UDP socket. 625 * up->encap_rcv() returns the following value: 626 * =0 if skb was successfully passed to the encap 627 * handler or was discarded by it. 628 * >0 if skb should be passed on to UDP. 629 * <0 if skb should be resubmitted as proto -N 630 */ 631 632 /* if we're overly short, let UDP handle it */ 633 encap_rcv = READ_ONCE(up->encap_rcv); 634 if (encap_rcv) { 635 int ret; 636 637 /* Verify checksum before giving to encap */ 638 if (udp_lib_checksum_complete(skb)) 639 goto csum_error; 640 641 ret = encap_rcv(sk, skb); 642 if (ret <= 0) { 643 __UDP_INC_STATS(sock_net(sk), 644 UDP_MIB_INDATAGRAMS, 645 is_udplite); 646 return -ret; 647 } 648 } 649 650 /* FALLTHROUGH -- it's a UDP Packet */ 651 } 652 653 /* 654 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 655 */ 656 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 657 658 if (up->pcrlen == 0) { /* full coverage was set */ 659 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 660 UDP_SKB_CB(skb)->cscov, skb->len); 661 goto drop; 662 } 663 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 664 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 665 UDP_SKB_CB(skb)->cscov, up->pcrlen); 666 goto drop; 667 } 668 } 669 670 prefetch(&sk->sk_rmem_alloc); 671 if (rcu_access_pointer(sk->sk_filter) && 672 udp_lib_checksum_complete(skb)) 673 goto csum_error; 674 675 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 676 goto drop; 677 678 udp_csum_pull_header(skb); 679 680 skb_dst_drop(skb); 681 682 return __udpv6_queue_rcv_skb(sk, skb); 683 684 csum_error: 685 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 686 drop: 687 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 688 atomic_inc(&sk->sk_drops); 689 kfree_skb(skb); 690 return -1; 691 } 692 693 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 694 { 695 struct sk_buff *next, *segs; 696 int ret; 697 698 if (likely(!udp_unexpected_gso(sk, skb))) 699 return udpv6_queue_rcv_one_skb(sk, skb); 700 701 __skb_push(skb, -skb_mac_offset(skb)); 702 segs = udp_rcv_segment(sk, skb, false); 703 for (skb = segs; skb; skb = next) { 704 next = skb->next; 705 __skb_pull(skb, skb_transport_offset(skb)); 706 707 ret = udpv6_queue_rcv_one_skb(sk, skb); 708 if (ret > 0) 709 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 710 true); 711 } 712 return 0; 713 } 714 715 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, 716 __be16 loc_port, const struct in6_addr *loc_addr, 717 __be16 rmt_port, const struct in6_addr *rmt_addr, 718 int dif, int sdif, unsigned short hnum) 719 { 720 struct inet_sock *inet = inet_sk(sk); 721 722 if (!net_eq(sock_net(sk), net)) 723 return false; 724 725 if (udp_sk(sk)->udp_port_hash != hnum || 726 sk->sk_family != PF_INET6 || 727 (inet->inet_dport && inet->inet_dport != rmt_port) || 728 (!ipv6_addr_any(&sk->sk_v6_daddr) && 729 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 730 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) || 731 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 732 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 733 return false; 734 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 735 return false; 736 return true; 737 } 738 739 static void udp6_csum_zero_error(struct sk_buff *skb) 740 { 741 /* RFC 2460 section 8.1 says that we SHOULD log 742 * this error. Well, it is reasonable. 743 */ 744 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 745 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 746 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 747 } 748 749 /* 750 * Note: called only from the BH handler context, 751 * so we don't need to lock the hashes. 752 */ 753 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 754 const struct in6_addr *saddr, const struct in6_addr *daddr, 755 struct udp_table *udptable, int proto) 756 { 757 struct sock *sk, *first = NULL; 758 const struct udphdr *uh = udp_hdr(skb); 759 unsigned short hnum = ntohs(uh->dest); 760 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 761 unsigned int offset = offsetof(typeof(*sk), sk_node); 762 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 763 int dif = inet6_iif(skb); 764 int sdif = inet6_sdif(skb); 765 struct hlist_node *node; 766 struct sk_buff *nskb; 767 768 if (use_hash2) { 769 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 770 udptable->mask; 771 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 772 start_lookup: 773 hslot = &udptable->hash2[hash2]; 774 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 775 } 776 777 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 778 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 779 uh->source, saddr, dif, sdif, 780 hnum)) 781 continue; 782 /* If zero checksum and no_check is not on for 783 * the socket then skip it. 784 */ 785 if (!uh->check && !udp_sk(sk)->no_check6_rx) 786 continue; 787 if (!first) { 788 first = sk; 789 continue; 790 } 791 nskb = skb_clone(skb, GFP_ATOMIC); 792 if (unlikely(!nskb)) { 793 atomic_inc(&sk->sk_drops); 794 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 795 IS_UDPLITE(sk)); 796 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 797 IS_UDPLITE(sk)); 798 continue; 799 } 800 801 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 802 consume_skb(nskb); 803 } 804 805 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 806 if (use_hash2 && hash2 != hash2_any) { 807 hash2 = hash2_any; 808 goto start_lookup; 809 } 810 811 if (first) { 812 if (udpv6_queue_rcv_skb(first, skb) > 0) 813 consume_skb(skb); 814 } else { 815 kfree_skb(skb); 816 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 817 proto == IPPROTO_UDPLITE); 818 } 819 return 0; 820 } 821 822 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 823 { 824 if (udp_sk_rx_dst_set(sk, dst)) { 825 const struct rt6_info *rt = (const struct rt6_info *)dst; 826 827 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 828 } 829 } 830 831 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 832 * return code conversion for ip layer consumption 833 */ 834 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 835 struct udphdr *uh) 836 { 837 int ret; 838 839 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 840 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, 841 ip6_compute_pseudo); 842 843 ret = udpv6_queue_rcv_skb(sk, skb); 844 845 /* a return value > 0 means to resubmit the input */ 846 if (ret > 0) 847 return ret; 848 return 0; 849 } 850 851 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 852 int proto) 853 { 854 const struct in6_addr *saddr, *daddr; 855 struct net *net = dev_net(skb->dev); 856 struct udphdr *uh; 857 struct sock *sk; 858 u32 ulen = 0; 859 860 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 861 goto discard; 862 863 saddr = &ipv6_hdr(skb)->saddr; 864 daddr = &ipv6_hdr(skb)->daddr; 865 uh = udp_hdr(skb); 866 867 ulen = ntohs(uh->len); 868 if (ulen > skb->len) 869 goto short_packet; 870 871 if (proto == IPPROTO_UDP) { 872 /* UDP validates ulen. */ 873 874 /* Check for jumbo payload */ 875 if (ulen == 0) 876 ulen = skb->len; 877 878 if (ulen < sizeof(*uh)) 879 goto short_packet; 880 881 if (ulen < skb->len) { 882 if (pskb_trim_rcsum(skb, ulen)) 883 goto short_packet; 884 saddr = &ipv6_hdr(skb)->saddr; 885 daddr = &ipv6_hdr(skb)->daddr; 886 uh = udp_hdr(skb); 887 } 888 } 889 890 if (udp6_csum_init(skb, uh, proto)) 891 goto csum_error; 892 893 /* Check if the socket is already available, e.g. due to early demux */ 894 sk = skb_steal_sock(skb); 895 if (sk) { 896 struct dst_entry *dst = skb_dst(skb); 897 int ret; 898 899 if (unlikely(sk->sk_rx_dst != dst)) 900 udp6_sk_rx_dst_set(sk, dst); 901 902 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 903 sock_put(sk); 904 goto report_csum_error; 905 } 906 907 ret = udp6_unicast_rcv_skb(sk, skb, uh); 908 sock_put(sk); 909 return ret; 910 } 911 912 /* 913 * Multicast receive code 914 */ 915 if (ipv6_addr_is_multicast(daddr)) 916 return __udp6_lib_mcast_deliver(net, skb, 917 saddr, daddr, udptable, proto); 918 919 /* Unicast */ 920 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 921 if (sk) { 922 if (!uh->check && !udp_sk(sk)->no_check6_rx) 923 goto report_csum_error; 924 return udp6_unicast_rcv_skb(sk, skb, uh); 925 } 926 927 if (!uh->check) 928 goto report_csum_error; 929 930 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 931 goto discard; 932 933 if (udp_lib_checksum_complete(skb)) 934 goto csum_error; 935 936 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 937 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 938 939 kfree_skb(skb); 940 return 0; 941 942 short_packet: 943 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 944 proto == IPPROTO_UDPLITE ? "-Lite" : "", 945 saddr, ntohs(uh->source), 946 ulen, skb->len, 947 daddr, ntohs(uh->dest)); 948 goto discard; 949 950 report_csum_error: 951 udp6_csum_zero_error(skb); 952 csum_error: 953 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 954 discard: 955 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 956 kfree_skb(skb); 957 return 0; 958 } 959 960 961 static struct sock *__udp6_lib_demux_lookup(struct net *net, 962 __be16 loc_port, const struct in6_addr *loc_addr, 963 __be16 rmt_port, const struct in6_addr *rmt_addr, 964 int dif, int sdif) 965 { 966 unsigned short hnum = ntohs(loc_port); 967 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 968 unsigned int slot2 = hash2 & udp_table.mask; 969 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 970 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 971 struct sock *sk; 972 973 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 974 if (sk->sk_state == TCP_ESTABLISHED && 975 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif)) 976 return sk; 977 /* Only check first socket in chain */ 978 break; 979 } 980 return NULL; 981 } 982 983 static void udp_v6_early_demux(struct sk_buff *skb) 984 { 985 struct net *net = dev_net(skb->dev); 986 const struct udphdr *uh; 987 struct sock *sk; 988 struct dst_entry *dst; 989 int dif = skb->dev->ifindex; 990 int sdif = inet6_sdif(skb); 991 992 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 993 sizeof(struct udphdr))) 994 return; 995 996 uh = udp_hdr(skb); 997 998 if (skb->pkt_type == PACKET_HOST) 999 sk = __udp6_lib_demux_lookup(net, uh->dest, 1000 &ipv6_hdr(skb)->daddr, 1001 uh->source, &ipv6_hdr(skb)->saddr, 1002 dif, sdif); 1003 else 1004 return; 1005 1006 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1007 return; 1008 1009 skb->sk = sk; 1010 skb->destructor = sock_efree; 1011 dst = READ_ONCE(sk->sk_rx_dst); 1012 1013 if (dst) 1014 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1015 if (dst) { 1016 /* set noref for now. 1017 * any place which wants to hold dst has to call 1018 * dst_hold_safe() 1019 */ 1020 skb_dst_set_noref(skb, dst); 1021 } 1022 } 1023 1024 static __inline__ int udpv6_rcv(struct sk_buff *skb) 1025 { 1026 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 1027 } 1028 1029 /* 1030 * Throw away all pending data and cancel the corking. Socket is locked. 1031 */ 1032 static void udp_v6_flush_pending_frames(struct sock *sk) 1033 { 1034 struct udp_sock *up = udp_sk(sk); 1035 1036 if (up->pending == AF_INET) 1037 udp_flush_pending_frames(sk); 1038 else if (up->pending) { 1039 up->len = 0; 1040 up->pending = 0; 1041 ip6_flush_pending_frames(sk); 1042 } 1043 } 1044 1045 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1046 int addr_len) 1047 { 1048 /* The following checks are replicated from __ip6_datagram_connect() 1049 * and intended to prevent BPF program called below from accessing 1050 * bytes that are out of the bound specified by user in addr_len. 1051 */ 1052 if (uaddr->sa_family == AF_INET) { 1053 if (__ipv6_only_sock(sk)) 1054 return -EAFNOSUPPORT; 1055 return udp_pre_connect(sk, uaddr, addr_len); 1056 } 1057 1058 if (addr_len < SIN6_LEN_RFC2133) 1059 return -EINVAL; 1060 1061 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1062 } 1063 1064 /** 1065 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1066 * @sk: socket we are sending on 1067 * @skb: sk_buff containing the filled-in UDP header 1068 * (checksum field must be zeroed out) 1069 */ 1070 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1071 const struct in6_addr *saddr, 1072 const struct in6_addr *daddr, int len) 1073 { 1074 unsigned int offset; 1075 struct udphdr *uh = udp_hdr(skb); 1076 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1077 __wsum csum = 0; 1078 1079 if (!frags) { 1080 /* Only one fragment on the socket. */ 1081 skb->csum_start = skb_transport_header(skb) - skb->head; 1082 skb->csum_offset = offsetof(struct udphdr, check); 1083 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1084 } else { 1085 /* 1086 * HW-checksum won't work as there are two or more 1087 * fragments on the socket so that all csums of sk_buffs 1088 * should be together 1089 */ 1090 offset = skb_transport_offset(skb); 1091 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1092 csum = skb->csum; 1093 1094 skb->ip_summed = CHECKSUM_NONE; 1095 1096 do { 1097 csum = csum_add(csum, frags->csum); 1098 } while ((frags = frags->next)); 1099 1100 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1101 csum); 1102 if (uh->check == 0) 1103 uh->check = CSUM_MANGLED_0; 1104 } 1105 } 1106 1107 /* 1108 * Sending 1109 */ 1110 1111 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1112 struct inet_cork *cork) 1113 { 1114 struct sock *sk = skb->sk; 1115 struct udphdr *uh; 1116 int err = 0; 1117 int is_udplite = IS_UDPLITE(sk); 1118 __wsum csum = 0; 1119 int offset = skb_transport_offset(skb); 1120 int len = skb->len - offset; 1121 1122 /* 1123 * Create a UDP header 1124 */ 1125 uh = udp_hdr(skb); 1126 uh->source = fl6->fl6_sport; 1127 uh->dest = fl6->fl6_dport; 1128 uh->len = htons(len); 1129 uh->check = 0; 1130 1131 if (cork->gso_size) { 1132 const int hlen = skb_network_header_len(skb) + 1133 sizeof(struct udphdr); 1134 1135 if (hlen + cork->gso_size > cork->fragsize) 1136 return -EINVAL; 1137 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 1138 return -EINVAL; 1139 if (udp_sk(sk)->no_check6_tx) 1140 return -EINVAL; 1141 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1142 dst_xfrm(skb_dst(skb))) 1143 return -EIO; 1144 1145 skb_shinfo(skb)->gso_size = cork->gso_size; 1146 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1147 goto csum_partial; 1148 } 1149 1150 if (is_udplite) 1151 csum = udplite_csum(skb); 1152 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1153 skb->ip_summed = CHECKSUM_NONE; 1154 goto send; 1155 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1156 csum_partial: 1157 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1158 goto send; 1159 } else 1160 csum = udp_csum(skb); 1161 1162 /* add protocol-dependent pseudo-header */ 1163 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1164 len, fl6->flowi6_proto, csum); 1165 if (uh->check == 0) 1166 uh->check = CSUM_MANGLED_0; 1167 1168 send: 1169 err = ip6_send_skb(skb); 1170 if (err) { 1171 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1172 UDP6_INC_STATS(sock_net(sk), 1173 UDP_MIB_SNDBUFERRORS, is_udplite); 1174 err = 0; 1175 } 1176 } else { 1177 UDP6_INC_STATS(sock_net(sk), 1178 UDP_MIB_OUTDATAGRAMS, is_udplite); 1179 } 1180 return err; 1181 } 1182 1183 static int udp_v6_push_pending_frames(struct sock *sk) 1184 { 1185 struct sk_buff *skb; 1186 struct udp_sock *up = udp_sk(sk); 1187 struct flowi6 fl6; 1188 int err = 0; 1189 1190 if (up->pending == AF_INET) 1191 return udp_push_pending_frames(sk); 1192 1193 /* ip6_finish_skb will release the cork, so make a copy of 1194 * fl6 here. 1195 */ 1196 fl6 = inet_sk(sk)->cork.fl.u.ip6; 1197 1198 skb = ip6_finish_skb(sk); 1199 if (!skb) 1200 goto out; 1201 1202 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base); 1203 1204 out: 1205 up->len = 0; 1206 up->pending = 0; 1207 return err; 1208 } 1209 1210 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1211 { 1212 struct ipv6_txoptions opt_space; 1213 struct udp_sock *up = udp_sk(sk); 1214 struct inet_sock *inet = inet_sk(sk); 1215 struct ipv6_pinfo *np = inet6_sk(sk); 1216 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1217 struct in6_addr *daddr, *final_p, final; 1218 struct ipv6_txoptions *opt = NULL; 1219 struct ipv6_txoptions *opt_to_free = NULL; 1220 struct ip6_flowlabel *flowlabel = NULL; 1221 struct flowi6 fl6; 1222 struct dst_entry *dst; 1223 struct ipcm6_cookie ipc6; 1224 int addr_len = msg->msg_namelen; 1225 bool connected = false; 1226 int ulen = len; 1227 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 1228 int err; 1229 int is_udplite = IS_UDPLITE(sk); 1230 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1231 1232 ipcm6_init(&ipc6); 1233 ipc6.gso_size = up->gso_size; 1234 ipc6.sockc.tsflags = sk->sk_tsflags; 1235 1236 /* destination address check */ 1237 if (sin6) { 1238 if (addr_len < offsetof(struct sockaddr, sa_data)) 1239 return -EINVAL; 1240 1241 switch (sin6->sin6_family) { 1242 case AF_INET6: 1243 if (addr_len < SIN6_LEN_RFC2133) 1244 return -EINVAL; 1245 daddr = &sin6->sin6_addr; 1246 if (ipv6_addr_any(daddr) && 1247 ipv6_addr_v4mapped(&np->saddr)) 1248 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1249 daddr); 1250 break; 1251 case AF_INET: 1252 goto do_udp_sendmsg; 1253 case AF_UNSPEC: 1254 msg->msg_name = sin6 = NULL; 1255 msg->msg_namelen = addr_len = 0; 1256 daddr = NULL; 1257 break; 1258 default: 1259 return -EINVAL; 1260 } 1261 } else if (!up->pending) { 1262 if (sk->sk_state != TCP_ESTABLISHED) 1263 return -EDESTADDRREQ; 1264 daddr = &sk->sk_v6_daddr; 1265 } else 1266 daddr = NULL; 1267 1268 if (daddr) { 1269 if (ipv6_addr_v4mapped(daddr)) { 1270 struct sockaddr_in sin; 1271 sin.sin_family = AF_INET; 1272 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1273 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1274 msg->msg_name = &sin; 1275 msg->msg_namelen = sizeof(sin); 1276 do_udp_sendmsg: 1277 if (__ipv6_only_sock(sk)) 1278 return -ENETUNREACH; 1279 return udp_sendmsg(sk, msg, len); 1280 } 1281 } 1282 1283 if (up->pending == AF_INET) 1284 return udp_sendmsg(sk, msg, len); 1285 1286 /* Rough check on arithmetic overflow, 1287 better check is made in ip6_append_data(). 1288 */ 1289 if (len > INT_MAX - sizeof(struct udphdr)) 1290 return -EMSGSIZE; 1291 1292 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1293 if (up->pending) { 1294 /* 1295 * There are pending frames. 1296 * The socket lock must be held while it's corked. 1297 */ 1298 lock_sock(sk); 1299 if (likely(up->pending)) { 1300 if (unlikely(up->pending != AF_INET6)) { 1301 release_sock(sk); 1302 return -EAFNOSUPPORT; 1303 } 1304 dst = NULL; 1305 goto do_append_data; 1306 } 1307 release_sock(sk); 1308 } 1309 ulen += sizeof(struct udphdr); 1310 1311 memset(&fl6, 0, sizeof(fl6)); 1312 1313 if (sin6) { 1314 if (sin6->sin6_port == 0) 1315 return -EINVAL; 1316 1317 fl6.fl6_dport = sin6->sin6_port; 1318 daddr = &sin6->sin6_addr; 1319 1320 if (np->sndflow) { 1321 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1322 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 1323 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1324 if (!flowlabel) 1325 return -EINVAL; 1326 } 1327 } 1328 1329 /* 1330 * Otherwise it will be difficult to maintain 1331 * sk->sk_dst_cache. 1332 */ 1333 if (sk->sk_state == TCP_ESTABLISHED && 1334 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1335 daddr = &sk->sk_v6_daddr; 1336 1337 if (addr_len >= sizeof(struct sockaddr_in6) && 1338 sin6->sin6_scope_id && 1339 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1340 fl6.flowi6_oif = sin6->sin6_scope_id; 1341 } else { 1342 if (sk->sk_state != TCP_ESTABLISHED) 1343 return -EDESTADDRREQ; 1344 1345 fl6.fl6_dport = inet->inet_dport; 1346 daddr = &sk->sk_v6_daddr; 1347 fl6.flowlabel = np->flow_label; 1348 connected = true; 1349 } 1350 1351 if (!fl6.flowi6_oif) 1352 fl6.flowi6_oif = sk->sk_bound_dev_if; 1353 1354 if (!fl6.flowi6_oif) 1355 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1356 1357 fl6.flowi6_mark = sk->sk_mark; 1358 fl6.flowi6_uid = sk->sk_uid; 1359 1360 if (msg->msg_controllen) { 1361 opt = &opt_space; 1362 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1363 opt->tot_len = sizeof(*opt); 1364 ipc6.opt = opt; 1365 1366 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1367 if (err > 0) 1368 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, 1369 &ipc6); 1370 if (err < 0) { 1371 fl6_sock_release(flowlabel); 1372 return err; 1373 } 1374 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1375 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1376 if (!flowlabel) 1377 return -EINVAL; 1378 } 1379 if (!(opt->opt_nflen|opt->opt_flen)) 1380 opt = NULL; 1381 connected = false; 1382 } 1383 if (!opt) { 1384 opt = txopt_get(np); 1385 opt_to_free = opt; 1386 } 1387 if (flowlabel) 1388 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1389 opt = ipv6_fixup_options(&opt_space, opt); 1390 ipc6.opt = opt; 1391 1392 fl6.flowi6_proto = sk->sk_protocol; 1393 fl6.daddr = *daddr; 1394 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1395 fl6.saddr = np->saddr; 1396 fl6.fl6_sport = inet->inet_sport; 1397 1398 if (cgroup_bpf_enabled && !connected) { 1399 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1400 (struct sockaddr *)sin6, &fl6.saddr); 1401 if (err) 1402 goto out_no_dst; 1403 if (sin6) { 1404 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1405 /* BPF program rewrote IPv6-only by IPv4-mapped 1406 * IPv6. It's currently unsupported. 1407 */ 1408 err = -ENOTSUPP; 1409 goto out_no_dst; 1410 } 1411 if (sin6->sin6_port == 0) { 1412 /* BPF program set invalid port. Reject it. */ 1413 err = -EINVAL; 1414 goto out_no_dst; 1415 } 1416 fl6.fl6_dport = sin6->sin6_port; 1417 fl6.daddr = sin6->sin6_addr; 1418 } 1419 } 1420 1421 if (ipv6_addr_any(&fl6.daddr)) 1422 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1423 1424 final_p = fl6_update_dst(&fl6, opt, &final); 1425 if (final_p) 1426 connected = false; 1427 1428 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1429 fl6.flowi6_oif = np->mcast_oif; 1430 connected = false; 1431 } else if (!fl6.flowi6_oif) 1432 fl6.flowi6_oif = np->ucast_oif; 1433 1434 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 1435 1436 if (ipc6.tclass < 0) 1437 ipc6.tclass = np->tclass; 1438 1439 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 1440 1441 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected); 1442 if (IS_ERR(dst)) { 1443 err = PTR_ERR(dst); 1444 dst = NULL; 1445 goto out; 1446 } 1447 1448 if (ipc6.hlimit < 0) 1449 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 1450 1451 if (msg->msg_flags&MSG_CONFIRM) 1452 goto do_confirm; 1453 back_from_confirm: 1454 1455 /* Lockless fast path for the non-corking case */ 1456 if (!corkreq) { 1457 struct inet_cork_full cork; 1458 struct sk_buff *skb; 1459 1460 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1461 sizeof(struct udphdr), &ipc6, 1462 &fl6, (struct rt6_info *)dst, 1463 msg->msg_flags, &cork); 1464 err = PTR_ERR(skb); 1465 if (!IS_ERR_OR_NULL(skb)) 1466 err = udp_v6_send_skb(skb, &fl6, &cork.base); 1467 goto out; 1468 } 1469 1470 lock_sock(sk); 1471 if (unlikely(up->pending)) { 1472 /* The socket is already corked while preparing it. */ 1473 /* ... which is an evident application bug. --ANK */ 1474 release_sock(sk); 1475 1476 net_dbg_ratelimited("udp cork app bug 2\n"); 1477 err = -EINVAL; 1478 goto out; 1479 } 1480 1481 up->pending = AF_INET6; 1482 1483 do_append_data: 1484 if (ipc6.dontfrag < 0) 1485 ipc6.dontfrag = np->dontfrag; 1486 up->len += ulen; 1487 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1488 &ipc6, &fl6, (struct rt6_info *)dst, 1489 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1490 if (err) 1491 udp_v6_flush_pending_frames(sk); 1492 else if (!corkreq) 1493 err = udp_v6_push_pending_frames(sk); 1494 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1495 up->pending = 0; 1496 1497 if (err > 0) 1498 err = np->recverr ? net_xmit_errno(err) : 0; 1499 release_sock(sk); 1500 1501 out: 1502 dst_release(dst); 1503 out_no_dst: 1504 fl6_sock_release(flowlabel); 1505 txopt_put(opt_to_free); 1506 if (!err) 1507 return len; 1508 /* 1509 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1510 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1511 * we don't have a good statistic (IpOutDiscards but it can be too many 1512 * things). We could add another new stat but at least for now that 1513 * seems like overkill. 1514 */ 1515 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1516 UDP6_INC_STATS(sock_net(sk), 1517 UDP_MIB_SNDBUFERRORS, is_udplite); 1518 } 1519 return err; 1520 1521 do_confirm: 1522 if (msg->msg_flags & MSG_PROBE) 1523 dst_confirm_neigh(dst, &fl6.daddr); 1524 if (!(msg->msg_flags&MSG_PROBE) || len) 1525 goto back_from_confirm; 1526 err = 0; 1527 goto out; 1528 } 1529 1530 void udpv6_destroy_sock(struct sock *sk) 1531 { 1532 struct udp_sock *up = udp_sk(sk); 1533 lock_sock(sk); 1534 udp_v6_flush_pending_frames(sk); 1535 release_sock(sk); 1536 1537 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1538 if (up->encap_type) { 1539 void (*encap_destroy)(struct sock *sk); 1540 encap_destroy = READ_ONCE(up->encap_destroy); 1541 if (encap_destroy) 1542 encap_destroy(sk); 1543 } 1544 if (up->encap_enabled) 1545 static_branch_dec(&udpv6_encap_needed_key); 1546 } 1547 1548 inet6_destroy_sock(sk); 1549 } 1550 1551 /* 1552 * Socket option code for UDP 1553 */ 1554 int udpv6_setsockopt(struct sock *sk, int level, int optname, 1555 char __user *optval, unsigned int optlen) 1556 { 1557 if (level == SOL_UDP || level == SOL_UDPLITE) 1558 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1559 udp_v6_push_pending_frames); 1560 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1561 } 1562 1563 #ifdef CONFIG_COMPAT 1564 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 1565 char __user *optval, unsigned int optlen) 1566 { 1567 if (level == SOL_UDP || level == SOL_UDPLITE) 1568 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1569 udp_v6_push_pending_frames); 1570 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); 1571 } 1572 #endif 1573 1574 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1575 char __user *optval, int __user *optlen) 1576 { 1577 if (level == SOL_UDP || level == SOL_UDPLITE) 1578 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1579 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1580 } 1581 1582 #ifdef CONFIG_COMPAT 1583 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, 1584 char __user *optval, int __user *optlen) 1585 { 1586 if (level == SOL_UDP || level == SOL_UDPLITE) 1587 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1588 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); 1589 } 1590 #endif 1591 1592 /* thinking of making this const? Don't. 1593 * early_demux can change based on sysctl. 1594 */ 1595 static struct inet6_protocol udpv6_protocol = { 1596 .early_demux = udp_v6_early_demux, 1597 .early_demux_handler = udp_v6_early_demux, 1598 .handler = udpv6_rcv, 1599 .err_handler = udpv6_err, 1600 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1601 }; 1602 1603 /* ------------------------------------------------------------------------ */ 1604 #ifdef CONFIG_PROC_FS 1605 int udp6_seq_show(struct seq_file *seq, void *v) 1606 { 1607 if (v == SEQ_START_TOKEN) { 1608 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1609 } else { 1610 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1611 struct inet_sock *inet = inet_sk(v); 1612 __u16 srcp = ntohs(inet->inet_sport); 1613 __u16 destp = ntohs(inet->inet_dport); 1614 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1615 udp_rqueue_get(v), bucket); 1616 } 1617 return 0; 1618 } 1619 1620 const struct seq_operations udp6_seq_ops = { 1621 .start = udp_seq_start, 1622 .next = udp_seq_next, 1623 .stop = udp_seq_stop, 1624 .show = udp6_seq_show, 1625 }; 1626 EXPORT_SYMBOL(udp6_seq_ops); 1627 1628 static struct udp_seq_afinfo udp6_seq_afinfo = { 1629 .family = AF_INET6, 1630 .udp_table = &udp_table, 1631 }; 1632 1633 int __net_init udp6_proc_init(struct net *net) 1634 { 1635 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1636 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1637 return -ENOMEM; 1638 return 0; 1639 } 1640 1641 void udp6_proc_exit(struct net *net) 1642 { 1643 remove_proc_entry("udp6", net->proc_net); 1644 } 1645 #endif /* CONFIG_PROC_FS */ 1646 1647 /* ------------------------------------------------------------------------ */ 1648 1649 struct proto udpv6_prot = { 1650 .name = "UDPv6", 1651 .owner = THIS_MODULE, 1652 .close = udp_lib_close, 1653 .pre_connect = udpv6_pre_connect, 1654 .connect = ip6_datagram_connect, 1655 .disconnect = udp_disconnect, 1656 .ioctl = udp_ioctl, 1657 .init = udp_init_sock, 1658 .destroy = udpv6_destroy_sock, 1659 .setsockopt = udpv6_setsockopt, 1660 .getsockopt = udpv6_getsockopt, 1661 .sendmsg = udpv6_sendmsg, 1662 .recvmsg = udpv6_recvmsg, 1663 .release_cb = ip6_datagram_release_cb, 1664 .hash = udp_lib_hash, 1665 .unhash = udp_lib_unhash, 1666 .rehash = udp_v6_rehash, 1667 .get_port = udp_v6_get_port, 1668 .memory_allocated = &udp_memory_allocated, 1669 .sysctl_mem = sysctl_udp_mem, 1670 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1671 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1672 .obj_size = sizeof(struct udp6_sock), 1673 .h.udp_table = &udp_table, 1674 #ifdef CONFIG_COMPAT 1675 .compat_setsockopt = compat_udpv6_setsockopt, 1676 .compat_getsockopt = compat_udpv6_getsockopt, 1677 #endif 1678 .diag_destroy = udp_abort, 1679 }; 1680 1681 static struct inet_protosw udpv6_protosw = { 1682 .type = SOCK_DGRAM, 1683 .protocol = IPPROTO_UDP, 1684 .prot = &udpv6_prot, 1685 .ops = &inet6_dgram_ops, 1686 .flags = INET_PROTOSW_PERMANENT, 1687 }; 1688 1689 int __init udpv6_init(void) 1690 { 1691 int ret; 1692 1693 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1694 if (ret) 1695 goto out; 1696 1697 ret = inet6_register_protosw(&udpv6_protosw); 1698 if (ret) 1699 goto out_udpv6_protocol; 1700 out: 1701 return ret; 1702 1703 out_udpv6_protocol: 1704 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1705 goto out; 1706 } 1707 1708 void udpv6_exit(void) 1709 { 1710 inet6_unregister_protosw(&udpv6_protosw); 1711 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1712 } 1713