1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/bpf-cgroup.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/net.h> 26 #include <linux/in6.h> 27 #include <linux/netdevice.h> 28 #include <linux/if_arp.h> 29 #include <linux/ipv6.h> 30 #include <linux/icmpv6.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/skbuff.h> 34 #include <linux/slab.h> 35 #include <linux/uaccess.h> 36 #include <linux/indirect_call_wrapper.h> 37 38 #include <net/addrconf.h> 39 #include <net/ndisc.h> 40 #include <net/protocol.h> 41 #include <net/transp_v6.h> 42 #include <net/ip6_route.h> 43 #include <net/raw.h> 44 #include <net/seg6.h> 45 #include <net/tcp_states.h> 46 #include <net/ip6_checksum.h> 47 #include <net/ip6_tunnel.h> 48 #include <trace/events/udp.h> 49 #include <net/xfrm.h> 50 #include <net/inet_hashtables.h> 51 #include <net/inet6_hashtables.h> 52 #include <net/busy_poll.h> 53 #include <net/sock_reuseport.h> 54 #include <net/gro.h> 55 56 #include <linux/proc_fs.h> 57 #include <linux/seq_file.h> 58 #include <trace/events/skb.h> 59 #include "udp_impl.h" 60 61 static void udpv6_destruct_sock(struct sock *sk) 62 { 63 udp_destruct_common(sk); 64 inet6_sock_destruct(sk); 65 } 66 67 int udpv6_init_sock(struct sock *sk) 68 { 69 udp_lib_init_sock(sk); 70 sk->sk_destruct = udpv6_destruct_sock; 71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 72 return 0; 73 } 74 75 INDIRECT_CALLABLE_SCOPE 76 u32 udp6_ehashfn(const struct net *net, 77 const struct in6_addr *laddr, 78 const u16 lport, 79 const struct in6_addr *faddr, 80 const __be16 fport) 81 { 82 static u32 udp6_ehash_secret __read_mostly; 83 static u32 udp_ipv6_hash_secret __read_mostly; 84 85 u32 lhash, fhash; 86 87 net_get_random_once(&udp6_ehash_secret, 88 sizeof(udp6_ehash_secret)); 89 net_get_random_once(&udp_ipv6_hash_secret, 90 sizeof(udp_ipv6_hash_secret)); 91 92 lhash = (__force u32)laddr->s6_addr32[3]; 93 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 94 95 return __inet6_ehashfn(lhash, lport, fhash, fport, 96 udp6_ehash_secret + net_hash_mix(net)); 97 } 98 99 int udp_v6_get_port(struct sock *sk, unsigned short snum) 100 { 101 unsigned int hash2_nulladdr = 102 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 103 unsigned int hash2_partial = 104 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 105 106 /* precompute partial secondary hash */ 107 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 108 return udp_lib_get_port(sk, snum, hash2_nulladdr); 109 } 110 111 void udp_v6_rehash(struct sock *sk) 112 { 113 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 114 &sk->sk_v6_rcv_saddr, 115 inet_sk(sk)->inet_num); 116 117 udp_lib_rehash(sk, new_hash); 118 } 119 120 static int compute_score(struct sock *sk, struct net *net, 121 const struct in6_addr *saddr, __be16 sport, 122 const struct in6_addr *daddr, unsigned short hnum, 123 int dif, int sdif) 124 { 125 int bound_dev_if, score; 126 struct inet_sock *inet; 127 bool dev_match; 128 129 if (!net_eq(sock_net(sk), net) || 130 udp_sk(sk)->udp_port_hash != hnum || 131 sk->sk_family != PF_INET6) 132 return -1; 133 134 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 135 return -1; 136 137 score = 0; 138 inet = inet_sk(sk); 139 140 if (inet->inet_dport) { 141 if (inet->inet_dport != sport) 142 return -1; 143 score++; 144 } 145 146 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 147 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 148 return -1; 149 score++; 150 } 151 152 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 153 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif); 154 if (!dev_match) 155 return -1; 156 if (bound_dev_if) 157 score++; 158 159 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 160 score++; 161 162 return score; 163 } 164 165 /* called with rcu_read_lock() */ 166 static struct sock *udp6_lib_lookup2(struct net *net, 167 const struct in6_addr *saddr, __be16 sport, 168 const struct in6_addr *daddr, unsigned int hnum, 169 int dif, int sdif, struct udp_hslot *hslot2, 170 struct sk_buff *skb) 171 { 172 struct sock *sk, *result; 173 int score, badness; 174 175 result = NULL; 176 badness = -1; 177 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 178 score = compute_score(sk, net, saddr, sport, 179 daddr, hnum, dif, sdif); 180 if (score > badness) { 181 badness = score; 182 183 if (sk->sk_state == TCP_ESTABLISHED) { 184 result = sk; 185 continue; 186 } 187 188 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr), 189 saddr, sport, daddr, hnum, udp6_ehashfn); 190 if (!result) { 191 result = sk; 192 continue; 193 } 194 195 /* Fall back to scoring if group has connections */ 196 if (!reuseport_has_conns(sk)) 197 return result; 198 199 /* Reuseport logic returned an error, keep original score. */ 200 if (IS_ERR(result)) 201 continue; 202 203 badness = compute_score(sk, net, saddr, sport, 204 daddr, hnum, dif, sdif); 205 } 206 } 207 return result; 208 } 209 210 /* rcu_read_lock() must be held */ 211 struct sock *__udp6_lib_lookup(struct net *net, 212 const struct in6_addr *saddr, __be16 sport, 213 const struct in6_addr *daddr, __be16 dport, 214 int dif, int sdif, struct udp_table *udptable, 215 struct sk_buff *skb) 216 { 217 unsigned short hnum = ntohs(dport); 218 unsigned int hash2, slot2; 219 struct udp_hslot *hslot2; 220 struct sock *result, *sk; 221 222 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 223 slot2 = hash2 & udptable->mask; 224 hslot2 = &udptable->hash2[slot2]; 225 226 /* Lookup connected or non-wildcard sockets */ 227 result = udp6_lib_lookup2(net, saddr, sport, 228 daddr, hnum, dif, sdif, 229 hslot2, skb); 230 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 231 goto done; 232 233 /* Lookup redirect from BPF */ 234 if (static_branch_unlikely(&bpf_sk_lookup_enabled) && 235 udptable == net->ipv4.udp_table) { 236 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr), 237 saddr, sport, daddr, hnum, dif, 238 udp6_ehashfn); 239 if (sk) { 240 result = sk; 241 goto done; 242 } 243 } 244 245 /* Got non-wildcard socket or error on first lookup */ 246 if (result) 247 goto done; 248 249 /* Lookup wildcard sockets */ 250 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 251 slot2 = hash2 & udptable->mask; 252 hslot2 = &udptable->hash2[slot2]; 253 254 result = udp6_lib_lookup2(net, saddr, sport, 255 &in6addr_any, hnum, dif, sdif, 256 hslot2, skb); 257 done: 258 if (IS_ERR(result)) 259 return NULL; 260 return result; 261 } 262 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 263 264 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 265 __be16 sport, __be16 dport, 266 struct udp_table *udptable) 267 { 268 const struct ipv6hdr *iph = ipv6_hdr(skb); 269 270 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 271 &iph->daddr, dport, inet6_iif(skb), 272 inet6_sdif(skb), udptable, skb); 273 } 274 275 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 276 __be16 sport, __be16 dport) 277 { 278 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; 279 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset); 280 struct net *net = dev_net(skb->dev); 281 int iif, sdif; 282 283 inet6_get_iif_sdif(skb, &iif, &sdif); 284 285 return __udp6_lib_lookup(net, &iph->saddr, sport, 286 &iph->daddr, dport, iif, 287 sdif, net->ipv4.udp_table, NULL); 288 } 289 290 /* Must be called under rcu_read_lock(). 291 * Does increment socket refcount. 292 */ 293 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 294 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 295 const struct in6_addr *daddr, __be16 dport, int dif) 296 { 297 struct sock *sk; 298 299 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 300 dif, 0, net->ipv4.udp_table, NULL); 301 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 302 sk = NULL; 303 return sk; 304 } 305 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 306 #endif 307 308 /* do not use the scratch area len for jumbogram: their length execeeds the 309 * scratch area space; note that the IP6CB flags is still in the first 310 * cacheline, so checking for jumbograms is cheap 311 */ 312 static int udp6_skb_len(struct sk_buff *skb) 313 { 314 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 315 } 316 317 /* 318 * This should be easy, if there is something there we 319 * return it, otherwise we block. 320 */ 321 322 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 323 int flags, int *addr_len) 324 { 325 struct ipv6_pinfo *np = inet6_sk(sk); 326 struct inet_sock *inet = inet_sk(sk); 327 struct sk_buff *skb; 328 unsigned int ulen, copied; 329 int off, err, peeking = flags & MSG_PEEK; 330 int is_udplite = IS_UDPLITE(sk); 331 struct udp_mib __percpu *mib; 332 bool checksum_valid = false; 333 int is_udp4; 334 335 if (flags & MSG_ERRQUEUE) 336 return ipv6_recv_error(sk, msg, len, addr_len); 337 338 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 339 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 340 341 try_again: 342 off = sk_peek_offset(sk, flags); 343 skb = __skb_recv_udp(sk, flags, &off, &err); 344 if (!skb) 345 return err; 346 347 ulen = udp6_skb_len(skb); 348 copied = len; 349 if (copied > ulen - off) 350 copied = ulen - off; 351 else if (copied < ulen) 352 msg->msg_flags |= MSG_TRUNC; 353 354 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 355 mib = __UDPX_MIB(sk, is_udp4); 356 357 /* 358 * If checksum is needed at all, try to do it while copying the 359 * data. If the data is truncated, or if we only want a partial 360 * coverage checksum (UDP-Lite), do it before the copy. 361 */ 362 363 if (copied < ulen || peeking || 364 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 365 checksum_valid = udp_skb_csum_unnecessary(skb) || 366 !__udp_lib_checksum_complete(skb); 367 if (!checksum_valid) 368 goto csum_copy_err; 369 } 370 371 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 372 if (udp_skb_is_linear(skb)) 373 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 374 else 375 err = skb_copy_datagram_msg(skb, off, msg, copied); 376 } else { 377 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 378 if (err == -EINVAL) 379 goto csum_copy_err; 380 } 381 if (unlikely(err)) { 382 if (!peeking) { 383 atomic_inc(&sk->sk_drops); 384 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 385 } 386 kfree_skb(skb); 387 return err; 388 } 389 if (!peeking) 390 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 391 392 sock_recv_cmsgs(msg, sk, skb); 393 394 /* Copy the address. */ 395 if (msg->msg_name) { 396 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 397 sin6->sin6_family = AF_INET6; 398 sin6->sin6_port = udp_hdr(skb)->source; 399 sin6->sin6_flowinfo = 0; 400 401 if (is_udp4) { 402 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 403 &sin6->sin6_addr); 404 sin6->sin6_scope_id = 0; 405 } else { 406 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 407 sin6->sin6_scope_id = 408 ipv6_iface_scope_id(&sin6->sin6_addr, 409 inet6_iif(skb)); 410 } 411 *addr_len = sizeof(*sin6); 412 413 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 414 (struct sockaddr *)sin6, 415 addr_len); 416 } 417 418 if (udp_test_bit(GRO_ENABLED, sk)) 419 udp_cmsg_recv(msg, sk, skb); 420 421 if (np->rxopt.all) 422 ip6_datagram_recv_common_ctl(sk, msg, skb); 423 424 if (is_udp4) { 425 if (inet_cmsg_flags(inet)) 426 ip_cmsg_recv_offset(msg, sk, skb, 427 sizeof(struct udphdr), off); 428 } else { 429 if (np->rxopt.all) 430 ip6_datagram_recv_specific_ctl(sk, msg, skb); 431 } 432 433 err = copied; 434 if (flags & MSG_TRUNC) 435 err = ulen; 436 437 skb_consume_udp(sk, skb, peeking ? -err : err); 438 return err; 439 440 csum_copy_err: 441 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 442 udp_skb_destructor)) { 443 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 444 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 445 } 446 kfree_skb(skb); 447 448 /* starting over for a new packet, but check if we need to yield */ 449 cond_resched(); 450 msg->msg_flags &= ~MSG_TRUNC; 451 goto try_again; 452 } 453 454 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 455 void udpv6_encap_enable(void) 456 { 457 static_branch_inc(&udpv6_encap_needed_key); 458 } 459 EXPORT_SYMBOL(udpv6_encap_enable); 460 461 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 462 * through error handlers in encapsulations looking for a match. 463 */ 464 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 465 struct inet6_skb_parm *opt, 466 u8 type, u8 code, int offset, __be32 info) 467 { 468 int i; 469 470 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 471 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 472 u8 type, u8 code, int offset, __be32 info); 473 const struct ip6_tnl_encap_ops *encap; 474 475 encap = rcu_dereference(ip6tun_encaps[i]); 476 if (!encap) 477 continue; 478 handler = encap->err_handler; 479 if (handler && !handler(skb, opt, type, code, offset, info)) 480 return 0; 481 } 482 483 return -ENOENT; 484 } 485 486 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 487 * reversing source and destination port: this will match tunnels that force the 488 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 489 * lwtunnels might actually break this assumption by being configured with 490 * different destination ports on endpoints, in this case we won't be able to 491 * trace ICMP messages back to them. 492 * 493 * If this doesn't match any socket, probe tunnels with arbitrary destination 494 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 495 * we've sent packets to won't necessarily match the local destination port. 496 * 497 * Then ask the tunnel implementation to match the error against a valid 498 * association. 499 * 500 * Return an error if we can't find a match, the socket if we need further 501 * processing, zero otherwise. 502 */ 503 static struct sock *__udp6_lib_err_encap(struct net *net, 504 const struct ipv6hdr *hdr, int offset, 505 struct udphdr *uh, 506 struct udp_table *udptable, 507 struct sock *sk, 508 struct sk_buff *skb, 509 struct inet6_skb_parm *opt, 510 u8 type, u8 code, __be32 info) 511 { 512 int (*lookup)(struct sock *sk, struct sk_buff *skb); 513 int network_offset, transport_offset; 514 struct udp_sock *up; 515 516 network_offset = skb_network_offset(skb); 517 transport_offset = skb_transport_offset(skb); 518 519 /* Network header needs to point to the outer IPv6 header inside ICMP */ 520 skb_reset_network_header(skb); 521 522 /* Transport header needs to point to the UDP header */ 523 skb_set_transport_header(skb, offset); 524 525 if (sk) { 526 up = udp_sk(sk); 527 528 lookup = READ_ONCE(up->encap_err_lookup); 529 if (lookup && lookup(sk, skb)) 530 sk = NULL; 531 532 goto out; 533 } 534 535 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 536 &hdr->saddr, uh->dest, 537 inet6_iif(skb), 0, udptable, skb); 538 if (sk) { 539 up = udp_sk(sk); 540 541 lookup = READ_ONCE(up->encap_err_lookup); 542 if (!lookup || lookup(sk, skb)) 543 sk = NULL; 544 } 545 546 out: 547 if (!sk) { 548 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 549 offset, info)); 550 } 551 552 skb_set_transport_header(skb, transport_offset); 553 skb_set_network_header(skb, network_offset); 554 555 return sk; 556 } 557 558 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 559 u8 type, u8 code, int offset, __be32 info, 560 struct udp_table *udptable) 561 { 562 struct ipv6_pinfo *np; 563 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 564 const struct in6_addr *saddr = &hdr->saddr; 565 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr; 566 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 567 bool tunnel = false; 568 struct sock *sk; 569 int harderr; 570 int err; 571 struct net *net = dev_net(skb->dev); 572 573 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 574 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 575 576 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) { 577 /* No socket for error: try tunnels before discarding */ 578 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 579 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 580 udptable, sk, skb, 581 opt, type, code, info); 582 if (!sk) 583 return 0; 584 } else 585 sk = ERR_PTR(-ENOENT); 586 587 if (IS_ERR(sk)) { 588 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 589 ICMP6_MIB_INERRORS); 590 return PTR_ERR(sk); 591 } 592 593 tunnel = true; 594 } 595 596 harderr = icmpv6_err_convert(type, code, &err); 597 np = inet6_sk(sk); 598 599 if (type == ICMPV6_PKT_TOOBIG) { 600 if (!ip6_sk_accept_pmtu(sk)) 601 goto out; 602 ip6_sk_update_pmtu(skb, sk, info); 603 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 604 harderr = 1; 605 } 606 if (type == NDISC_REDIRECT) { 607 if (tunnel) { 608 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 609 READ_ONCE(sk->sk_mark), sk->sk_uid); 610 } else { 611 ip6_sk_redirect(skb, sk); 612 } 613 goto out; 614 } 615 616 /* Tunnels don't have an application socket: don't pass errors back */ 617 if (tunnel) { 618 if (udp_sk(sk)->encap_err_rcv) 619 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, 620 ntohl(info), (u8 *)(uh+1)); 621 goto out; 622 } 623 624 if (!np->recverr) { 625 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 626 goto out; 627 } else { 628 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 629 } 630 631 sk->sk_err = err; 632 sk_error_report(sk); 633 out: 634 return 0; 635 } 636 637 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 638 { 639 int rc; 640 641 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 642 sock_rps_save_rxhash(sk, skb); 643 sk_mark_napi_id(sk, skb); 644 sk_incoming_cpu_update(sk); 645 } else { 646 sk_mark_napi_id_once(sk, skb); 647 } 648 649 rc = __udp_enqueue_schedule_skb(sk, skb); 650 if (rc < 0) { 651 int is_udplite = IS_UDPLITE(sk); 652 enum skb_drop_reason drop_reason; 653 654 /* Note that an ENOMEM error is charged twice */ 655 if (rc == -ENOMEM) { 656 UDP6_INC_STATS(sock_net(sk), 657 UDP_MIB_RCVBUFERRORS, is_udplite); 658 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 659 } else { 660 UDP6_INC_STATS(sock_net(sk), 661 UDP_MIB_MEMERRORS, is_udplite); 662 drop_reason = SKB_DROP_REASON_PROTO_MEM; 663 } 664 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 665 kfree_skb_reason(skb, drop_reason); 666 trace_udp_fail_queue_rcv_skb(rc, sk); 667 return -1; 668 } 669 670 return 0; 671 } 672 673 static __inline__ int udpv6_err(struct sk_buff *skb, 674 struct inet6_skb_parm *opt, u8 type, 675 u8 code, int offset, __be32 info) 676 { 677 return __udp6_lib_err(skb, opt, type, code, offset, info, 678 dev_net(skb->dev)->ipv4.udp_table); 679 } 680 681 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 682 { 683 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 684 struct udp_sock *up = udp_sk(sk); 685 int is_udplite = IS_UDPLITE(sk); 686 687 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 688 drop_reason = SKB_DROP_REASON_XFRM_POLICY; 689 goto drop; 690 } 691 nf_reset_ct(skb); 692 693 if (static_branch_unlikely(&udpv6_encap_needed_key) && 694 READ_ONCE(up->encap_type)) { 695 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 696 697 /* 698 * This is an encapsulation socket so pass the skb to 699 * the socket's udp_encap_rcv() hook. Otherwise, just 700 * fall through and pass this up the UDP socket. 701 * up->encap_rcv() returns the following value: 702 * =0 if skb was successfully passed to the encap 703 * handler or was discarded by it. 704 * >0 if skb should be passed on to UDP. 705 * <0 if skb should be resubmitted as proto -N 706 */ 707 708 /* if we're overly short, let UDP handle it */ 709 encap_rcv = READ_ONCE(up->encap_rcv); 710 if (encap_rcv) { 711 int ret; 712 713 /* Verify checksum before giving to encap */ 714 if (udp_lib_checksum_complete(skb)) 715 goto csum_error; 716 717 ret = encap_rcv(sk, skb); 718 if (ret <= 0) { 719 __UDP6_INC_STATS(sock_net(sk), 720 UDP_MIB_INDATAGRAMS, 721 is_udplite); 722 return -ret; 723 } 724 } 725 726 /* FALLTHROUGH -- it's a UDP Packet */ 727 } 728 729 /* 730 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 731 */ 732 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { 733 u16 pcrlen = READ_ONCE(up->pcrlen); 734 735 if (pcrlen == 0) { /* full coverage was set */ 736 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 737 UDP_SKB_CB(skb)->cscov, skb->len); 738 goto drop; 739 } 740 if (UDP_SKB_CB(skb)->cscov < pcrlen) { 741 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 742 UDP_SKB_CB(skb)->cscov, pcrlen); 743 goto drop; 744 } 745 } 746 747 prefetch(&sk->sk_rmem_alloc); 748 if (rcu_access_pointer(sk->sk_filter) && 749 udp_lib_checksum_complete(skb)) 750 goto csum_error; 751 752 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 753 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 754 goto drop; 755 } 756 757 udp_csum_pull_header(skb); 758 759 skb_dst_drop(skb); 760 761 return __udpv6_queue_rcv_skb(sk, skb); 762 763 csum_error: 764 drop_reason = SKB_DROP_REASON_UDP_CSUM; 765 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 766 drop: 767 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 768 atomic_inc(&sk->sk_drops); 769 kfree_skb_reason(skb, drop_reason); 770 return -1; 771 } 772 773 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 774 { 775 struct sk_buff *next, *segs; 776 int ret; 777 778 if (likely(!udp_unexpected_gso(sk, skb))) 779 return udpv6_queue_rcv_one_skb(sk, skb); 780 781 __skb_push(skb, -skb_mac_offset(skb)); 782 segs = udp_rcv_segment(sk, skb, false); 783 skb_list_walk_safe(segs, skb, next) { 784 __skb_pull(skb, skb_transport_offset(skb)); 785 786 udp_post_segment_fix_csum(skb); 787 ret = udpv6_queue_rcv_one_skb(sk, skb); 788 if (ret > 0) 789 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 790 true); 791 } 792 return 0; 793 } 794 795 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk, 796 __be16 loc_port, const struct in6_addr *loc_addr, 797 __be16 rmt_port, const struct in6_addr *rmt_addr, 798 int dif, int sdif, unsigned short hnum) 799 { 800 const struct inet_sock *inet = inet_sk(sk); 801 802 if (!net_eq(sock_net(sk), net)) 803 return false; 804 805 if (udp_sk(sk)->udp_port_hash != hnum || 806 sk->sk_family != PF_INET6 || 807 (inet->inet_dport && inet->inet_dport != rmt_port) || 808 (!ipv6_addr_any(&sk->sk_v6_daddr) && 809 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 810 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) || 811 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 812 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 813 return false; 814 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 815 return false; 816 return true; 817 } 818 819 static void udp6_csum_zero_error(struct sk_buff *skb) 820 { 821 /* RFC 2460 section 8.1 says that we SHOULD log 822 * this error. Well, it is reasonable. 823 */ 824 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 825 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 826 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 827 } 828 829 /* 830 * Note: called only from the BH handler context, 831 * so we don't need to lock the hashes. 832 */ 833 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 834 const struct in6_addr *saddr, const struct in6_addr *daddr, 835 struct udp_table *udptable, int proto) 836 { 837 struct sock *sk, *first = NULL; 838 const struct udphdr *uh = udp_hdr(skb); 839 unsigned short hnum = ntohs(uh->dest); 840 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 841 unsigned int offset = offsetof(typeof(*sk), sk_node); 842 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 843 int dif = inet6_iif(skb); 844 int sdif = inet6_sdif(skb); 845 struct hlist_node *node; 846 struct sk_buff *nskb; 847 848 if (use_hash2) { 849 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 850 udptable->mask; 851 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 852 start_lookup: 853 hslot = &udptable->hash2[hash2]; 854 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 855 } 856 857 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 858 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 859 uh->source, saddr, dif, sdif, 860 hnum)) 861 continue; 862 /* If zero checksum and no_check is not on for 863 * the socket then skip it. 864 */ 865 if (!uh->check && !udp_get_no_check6_rx(sk)) 866 continue; 867 if (!first) { 868 first = sk; 869 continue; 870 } 871 nskb = skb_clone(skb, GFP_ATOMIC); 872 if (unlikely(!nskb)) { 873 atomic_inc(&sk->sk_drops); 874 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 875 IS_UDPLITE(sk)); 876 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 877 IS_UDPLITE(sk)); 878 continue; 879 } 880 881 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 882 consume_skb(nskb); 883 } 884 885 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 886 if (use_hash2 && hash2 != hash2_any) { 887 hash2 = hash2_any; 888 goto start_lookup; 889 } 890 891 if (first) { 892 if (udpv6_queue_rcv_skb(first, skb) > 0) 893 consume_skb(skb); 894 } else { 895 kfree_skb(skb); 896 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 897 proto == IPPROTO_UDPLITE); 898 } 899 return 0; 900 } 901 902 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 903 { 904 if (udp_sk_rx_dst_set(sk, dst)) { 905 const struct rt6_info *rt = (const struct rt6_info *)dst; 906 907 sk->sk_rx_dst_cookie = rt6_get_cookie(rt); 908 } 909 } 910 911 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 912 * return code conversion for ip layer consumption 913 */ 914 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 915 struct udphdr *uh) 916 { 917 int ret; 918 919 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 920 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 921 922 ret = udpv6_queue_rcv_skb(sk, skb); 923 924 /* a return value > 0 means to resubmit the input */ 925 if (ret > 0) 926 return ret; 927 return 0; 928 } 929 930 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 931 int proto) 932 { 933 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 934 const struct in6_addr *saddr, *daddr; 935 struct net *net = dev_net(skb->dev); 936 struct udphdr *uh; 937 struct sock *sk; 938 bool refcounted; 939 u32 ulen = 0; 940 941 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 942 goto discard; 943 944 saddr = &ipv6_hdr(skb)->saddr; 945 daddr = &ipv6_hdr(skb)->daddr; 946 uh = udp_hdr(skb); 947 948 ulen = ntohs(uh->len); 949 if (ulen > skb->len) 950 goto short_packet; 951 952 if (proto == IPPROTO_UDP) { 953 /* UDP validates ulen. */ 954 955 /* Check for jumbo payload */ 956 if (ulen == 0) 957 ulen = skb->len; 958 959 if (ulen < sizeof(*uh)) 960 goto short_packet; 961 962 if (ulen < skb->len) { 963 if (pskb_trim_rcsum(skb, ulen)) 964 goto short_packet; 965 saddr = &ipv6_hdr(skb)->saddr; 966 daddr = &ipv6_hdr(skb)->daddr; 967 uh = udp_hdr(skb); 968 } 969 } 970 971 if (udp6_csum_init(skb, uh, proto)) 972 goto csum_error; 973 974 /* Check if the socket is already available, e.g. due to early demux */ 975 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest, 976 &refcounted, udp6_ehashfn); 977 if (IS_ERR(sk)) 978 goto no_sk; 979 980 if (sk) { 981 struct dst_entry *dst = skb_dst(skb); 982 int ret; 983 984 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 985 udp6_sk_rx_dst_set(sk, dst); 986 987 if (!uh->check && !udp_get_no_check6_rx(sk)) { 988 if (refcounted) 989 sock_put(sk); 990 goto report_csum_error; 991 } 992 993 ret = udp6_unicast_rcv_skb(sk, skb, uh); 994 if (refcounted) 995 sock_put(sk); 996 return ret; 997 } 998 999 /* 1000 * Multicast receive code 1001 */ 1002 if (ipv6_addr_is_multicast(daddr)) 1003 return __udp6_lib_mcast_deliver(net, skb, 1004 saddr, daddr, udptable, proto); 1005 1006 /* Unicast */ 1007 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1008 if (sk) { 1009 if (!uh->check && !udp_get_no_check6_rx(sk)) 1010 goto report_csum_error; 1011 return udp6_unicast_rcv_skb(sk, skb, uh); 1012 } 1013 no_sk: 1014 reason = SKB_DROP_REASON_NO_SOCKET; 1015 1016 if (!uh->check) 1017 goto report_csum_error; 1018 1019 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1020 goto discard; 1021 nf_reset_ct(skb); 1022 1023 if (udp_lib_checksum_complete(skb)) 1024 goto csum_error; 1025 1026 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1027 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1028 1029 kfree_skb_reason(skb, reason); 1030 return 0; 1031 1032 short_packet: 1033 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1034 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1035 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1036 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1037 saddr, ntohs(uh->source), 1038 ulen, skb->len, 1039 daddr, ntohs(uh->dest)); 1040 goto discard; 1041 1042 report_csum_error: 1043 udp6_csum_zero_error(skb); 1044 csum_error: 1045 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1046 reason = SKB_DROP_REASON_UDP_CSUM; 1047 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1048 discard: 1049 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1050 kfree_skb_reason(skb, reason); 1051 return 0; 1052 } 1053 1054 1055 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1056 __be16 loc_port, const struct in6_addr *loc_addr, 1057 __be16 rmt_port, const struct in6_addr *rmt_addr, 1058 int dif, int sdif) 1059 { 1060 struct udp_table *udptable = net->ipv4.udp_table; 1061 unsigned short hnum = ntohs(loc_port); 1062 unsigned int hash2, slot2; 1063 struct udp_hslot *hslot2; 1064 __portpair ports; 1065 struct sock *sk; 1066 1067 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1068 slot2 = hash2 & udptable->mask; 1069 hslot2 = &udptable->hash2[slot2]; 1070 ports = INET_COMBINED_PORTS(rmt_port, hnum); 1071 1072 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1073 if (sk->sk_state == TCP_ESTABLISHED && 1074 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) 1075 return sk; 1076 /* Only check first socket in chain */ 1077 break; 1078 } 1079 return NULL; 1080 } 1081 1082 void udp_v6_early_demux(struct sk_buff *skb) 1083 { 1084 struct net *net = dev_net(skb->dev); 1085 const struct udphdr *uh; 1086 struct sock *sk; 1087 struct dst_entry *dst; 1088 int dif = skb->dev->ifindex; 1089 int sdif = inet6_sdif(skb); 1090 1091 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1092 sizeof(struct udphdr))) 1093 return; 1094 1095 uh = udp_hdr(skb); 1096 1097 if (skb->pkt_type == PACKET_HOST) 1098 sk = __udp6_lib_demux_lookup(net, uh->dest, 1099 &ipv6_hdr(skb)->daddr, 1100 uh->source, &ipv6_hdr(skb)->saddr, 1101 dif, sdif); 1102 else 1103 return; 1104 1105 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1106 return; 1107 1108 skb->sk = sk; 1109 skb->destructor = sock_efree; 1110 dst = rcu_dereference(sk->sk_rx_dst); 1111 1112 if (dst) 1113 dst = dst_check(dst, sk->sk_rx_dst_cookie); 1114 if (dst) { 1115 /* set noref for now. 1116 * any place which wants to hold dst has to call 1117 * dst_hold_safe() 1118 */ 1119 skb_dst_set_noref(skb, dst); 1120 } 1121 } 1122 1123 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1124 { 1125 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); 1126 } 1127 1128 /* 1129 * Throw away all pending data and cancel the corking. Socket is locked. 1130 */ 1131 static void udp_v6_flush_pending_frames(struct sock *sk) 1132 { 1133 struct udp_sock *up = udp_sk(sk); 1134 1135 if (up->pending == AF_INET) 1136 udp_flush_pending_frames(sk); 1137 else if (up->pending) { 1138 up->len = 0; 1139 WRITE_ONCE(up->pending, 0); 1140 ip6_flush_pending_frames(sk); 1141 } 1142 } 1143 1144 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1145 int addr_len) 1146 { 1147 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1148 return -EINVAL; 1149 /* The following checks are replicated from __ip6_datagram_connect() 1150 * and intended to prevent BPF program called below from accessing 1151 * bytes that are out of the bound specified by user in addr_len. 1152 */ 1153 if (uaddr->sa_family == AF_INET) { 1154 if (ipv6_only_sock(sk)) 1155 return -EAFNOSUPPORT; 1156 return udp_pre_connect(sk, uaddr, addr_len); 1157 } 1158 1159 if (addr_len < SIN6_LEN_RFC2133) 1160 return -EINVAL; 1161 1162 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); 1163 } 1164 1165 /** 1166 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1167 * @sk: socket we are sending on 1168 * @skb: sk_buff containing the filled-in UDP header 1169 * (checksum field must be zeroed out) 1170 * @saddr: source address 1171 * @daddr: destination address 1172 * @len: length of packet 1173 */ 1174 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1175 const struct in6_addr *saddr, 1176 const struct in6_addr *daddr, int len) 1177 { 1178 unsigned int offset; 1179 struct udphdr *uh = udp_hdr(skb); 1180 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1181 __wsum csum = 0; 1182 1183 if (!frags) { 1184 /* Only one fragment on the socket. */ 1185 skb->csum_start = skb_transport_header(skb) - skb->head; 1186 skb->csum_offset = offsetof(struct udphdr, check); 1187 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1188 } else { 1189 /* 1190 * HW-checksum won't work as there are two or more 1191 * fragments on the socket so that all csums of sk_buffs 1192 * should be together 1193 */ 1194 offset = skb_transport_offset(skb); 1195 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1196 csum = skb->csum; 1197 1198 skb->ip_summed = CHECKSUM_NONE; 1199 1200 do { 1201 csum = csum_add(csum, frags->csum); 1202 } while ((frags = frags->next)); 1203 1204 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1205 csum); 1206 if (uh->check == 0) 1207 uh->check = CSUM_MANGLED_0; 1208 } 1209 } 1210 1211 /* 1212 * Sending 1213 */ 1214 1215 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1216 struct inet_cork *cork) 1217 { 1218 struct sock *sk = skb->sk; 1219 struct udphdr *uh; 1220 int err = 0; 1221 int is_udplite = IS_UDPLITE(sk); 1222 __wsum csum = 0; 1223 int offset = skb_transport_offset(skb); 1224 int len = skb->len - offset; 1225 int datalen = len - sizeof(*uh); 1226 1227 /* 1228 * Create a UDP header 1229 */ 1230 uh = udp_hdr(skb); 1231 uh->source = fl6->fl6_sport; 1232 uh->dest = fl6->fl6_dport; 1233 uh->len = htons(len); 1234 uh->check = 0; 1235 1236 if (cork->gso_size) { 1237 const int hlen = skb_network_header_len(skb) + 1238 sizeof(struct udphdr); 1239 1240 if (hlen + cork->gso_size > cork->fragsize) { 1241 kfree_skb(skb); 1242 return -EINVAL; 1243 } 1244 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1245 kfree_skb(skb); 1246 return -EINVAL; 1247 } 1248 if (udp_get_no_check6_tx(sk)) { 1249 kfree_skb(skb); 1250 return -EINVAL; 1251 } 1252 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1253 dst_xfrm(skb_dst(skb))) { 1254 kfree_skb(skb); 1255 return -EIO; 1256 } 1257 1258 if (datalen > cork->gso_size) { 1259 skb_shinfo(skb)->gso_size = cork->gso_size; 1260 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1261 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1262 cork->gso_size); 1263 } 1264 goto csum_partial; 1265 } 1266 1267 if (is_udplite) 1268 csum = udplite_csum(skb); 1269 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */ 1270 skb->ip_summed = CHECKSUM_NONE; 1271 goto send; 1272 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1273 csum_partial: 1274 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1275 goto send; 1276 } else 1277 csum = udp_csum(skb); 1278 1279 /* add protocol-dependent pseudo-header */ 1280 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1281 len, fl6->flowi6_proto, csum); 1282 if (uh->check == 0) 1283 uh->check = CSUM_MANGLED_0; 1284 1285 send: 1286 err = ip6_send_skb(skb); 1287 if (err) { 1288 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1289 UDP6_INC_STATS(sock_net(sk), 1290 UDP_MIB_SNDBUFERRORS, is_udplite); 1291 err = 0; 1292 } 1293 } else { 1294 UDP6_INC_STATS(sock_net(sk), 1295 UDP_MIB_OUTDATAGRAMS, is_udplite); 1296 } 1297 return err; 1298 } 1299 1300 static int udp_v6_push_pending_frames(struct sock *sk) 1301 { 1302 struct sk_buff *skb; 1303 struct udp_sock *up = udp_sk(sk); 1304 int err = 0; 1305 1306 if (up->pending == AF_INET) 1307 return udp_push_pending_frames(sk); 1308 1309 skb = ip6_finish_skb(sk); 1310 if (!skb) 1311 goto out; 1312 1313 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6, 1314 &inet_sk(sk)->cork.base); 1315 out: 1316 up->len = 0; 1317 WRITE_ONCE(up->pending, 0); 1318 return err; 1319 } 1320 1321 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1322 { 1323 struct ipv6_txoptions opt_space; 1324 struct udp_sock *up = udp_sk(sk); 1325 struct inet_sock *inet = inet_sk(sk); 1326 struct ipv6_pinfo *np = inet6_sk(sk); 1327 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1328 struct in6_addr *daddr, *final_p, final; 1329 struct ipv6_txoptions *opt = NULL; 1330 struct ipv6_txoptions *opt_to_free = NULL; 1331 struct ip6_flowlabel *flowlabel = NULL; 1332 struct inet_cork_full cork; 1333 struct flowi6 *fl6 = &cork.fl.u.ip6; 1334 struct dst_entry *dst; 1335 struct ipcm6_cookie ipc6; 1336 int addr_len = msg->msg_namelen; 1337 bool connected = false; 1338 int ulen = len; 1339 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; 1340 int err; 1341 int is_udplite = IS_UDPLITE(sk); 1342 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1343 1344 ipcm6_init(&ipc6); 1345 ipc6.gso_size = READ_ONCE(up->gso_size); 1346 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); 1347 ipc6.sockc.mark = READ_ONCE(sk->sk_mark); 1348 1349 /* destination address check */ 1350 if (sin6) { 1351 if (addr_len < offsetof(struct sockaddr, sa_data)) 1352 return -EINVAL; 1353 1354 switch (sin6->sin6_family) { 1355 case AF_INET6: 1356 if (addr_len < SIN6_LEN_RFC2133) 1357 return -EINVAL; 1358 daddr = &sin6->sin6_addr; 1359 if (ipv6_addr_any(daddr) && 1360 ipv6_addr_v4mapped(&np->saddr)) 1361 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1362 daddr); 1363 break; 1364 case AF_INET: 1365 goto do_udp_sendmsg; 1366 case AF_UNSPEC: 1367 msg->msg_name = sin6 = NULL; 1368 msg->msg_namelen = addr_len = 0; 1369 daddr = NULL; 1370 break; 1371 default: 1372 return -EINVAL; 1373 } 1374 } else if (!READ_ONCE(up->pending)) { 1375 if (sk->sk_state != TCP_ESTABLISHED) 1376 return -EDESTADDRREQ; 1377 daddr = &sk->sk_v6_daddr; 1378 } else 1379 daddr = NULL; 1380 1381 if (daddr) { 1382 if (ipv6_addr_v4mapped(daddr)) { 1383 struct sockaddr_in sin; 1384 sin.sin_family = AF_INET; 1385 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1386 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1387 msg->msg_name = &sin; 1388 msg->msg_namelen = sizeof(sin); 1389 do_udp_sendmsg: 1390 err = ipv6_only_sock(sk) ? 1391 -ENETUNREACH : udp_sendmsg(sk, msg, len); 1392 msg->msg_name = sin6; 1393 msg->msg_namelen = addr_len; 1394 return err; 1395 } 1396 } 1397 1398 /* Rough check on arithmetic overflow, 1399 better check is made in ip6_append_data(). 1400 */ 1401 if (len > INT_MAX - sizeof(struct udphdr)) 1402 return -EMSGSIZE; 1403 1404 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1405 if (READ_ONCE(up->pending)) { 1406 if (READ_ONCE(up->pending) == AF_INET) 1407 return udp_sendmsg(sk, msg, len); 1408 /* 1409 * There are pending frames. 1410 * The socket lock must be held while it's corked. 1411 */ 1412 lock_sock(sk); 1413 if (likely(up->pending)) { 1414 if (unlikely(up->pending != AF_INET6)) { 1415 release_sock(sk); 1416 return -EAFNOSUPPORT; 1417 } 1418 dst = NULL; 1419 goto do_append_data; 1420 } 1421 release_sock(sk); 1422 } 1423 ulen += sizeof(struct udphdr); 1424 1425 memset(fl6, 0, sizeof(*fl6)); 1426 1427 if (sin6) { 1428 if (sin6->sin6_port == 0) 1429 return -EINVAL; 1430 1431 fl6->fl6_dport = sin6->sin6_port; 1432 daddr = &sin6->sin6_addr; 1433 1434 if (np->sndflow) { 1435 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1436 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) { 1437 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1438 if (IS_ERR(flowlabel)) 1439 return -EINVAL; 1440 } 1441 } 1442 1443 /* 1444 * Otherwise it will be difficult to maintain 1445 * sk->sk_dst_cache. 1446 */ 1447 if (sk->sk_state == TCP_ESTABLISHED && 1448 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1449 daddr = &sk->sk_v6_daddr; 1450 1451 if (addr_len >= sizeof(struct sockaddr_in6) && 1452 sin6->sin6_scope_id && 1453 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1454 fl6->flowi6_oif = sin6->sin6_scope_id; 1455 } else { 1456 if (sk->sk_state != TCP_ESTABLISHED) 1457 return -EDESTADDRREQ; 1458 1459 fl6->fl6_dport = inet->inet_dport; 1460 daddr = &sk->sk_v6_daddr; 1461 fl6->flowlabel = np->flow_label; 1462 connected = true; 1463 } 1464 1465 if (!fl6->flowi6_oif) 1466 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if); 1467 1468 if (!fl6->flowi6_oif) 1469 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1470 1471 fl6->flowi6_uid = sk->sk_uid; 1472 1473 if (msg->msg_controllen) { 1474 opt = &opt_space; 1475 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1476 opt->tot_len = sizeof(*opt); 1477 ipc6.opt = opt; 1478 1479 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1480 if (err > 0) { 1481 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6, 1482 &ipc6); 1483 connected = false; 1484 } 1485 if (err < 0) { 1486 fl6_sock_release(flowlabel); 1487 return err; 1488 } 1489 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1490 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1491 if (IS_ERR(flowlabel)) 1492 return -EINVAL; 1493 } 1494 if (!(opt->opt_nflen|opt->opt_flen)) 1495 opt = NULL; 1496 } 1497 if (!opt) { 1498 opt = txopt_get(np); 1499 opt_to_free = opt; 1500 } 1501 if (flowlabel) 1502 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1503 opt = ipv6_fixup_options(&opt_space, opt); 1504 ipc6.opt = opt; 1505 1506 fl6->flowi6_proto = sk->sk_protocol; 1507 fl6->flowi6_mark = ipc6.sockc.mark; 1508 fl6->daddr = *daddr; 1509 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr)) 1510 fl6->saddr = np->saddr; 1511 fl6->fl6_sport = inet->inet_sport; 1512 1513 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1514 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1515 (struct sockaddr *)sin6, 1516 &addr_len, 1517 &fl6->saddr); 1518 if (err) 1519 goto out_no_dst; 1520 if (sin6) { 1521 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1522 /* BPF program rewrote IPv6-only by IPv4-mapped 1523 * IPv6. It's currently unsupported. 1524 */ 1525 err = -ENOTSUPP; 1526 goto out_no_dst; 1527 } 1528 if (sin6->sin6_port == 0) { 1529 /* BPF program set invalid port. Reject it. */ 1530 err = -EINVAL; 1531 goto out_no_dst; 1532 } 1533 fl6->fl6_dport = sin6->sin6_port; 1534 fl6->daddr = sin6->sin6_addr; 1535 } 1536 } 1537 1538 if (ipv6_addr_any(&fl6->daddr)) 1539 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1540 1541 final_p = fl6_update_dst(fl6, opt, &final); 1542 if (final_p) 1543 connected = false; 1544 1545 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) { 1546 fl6->flowi6_oif = np->mcast_oif; 1547 connected = false; 1548 } else if (!fl6->flowi6_oif) 1549 fl6->flowi6_oif = np->ucast_oif; 1550 1551 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1552 1553 if (ipc6.tclass < 0) 1554 ipc6.tclass = np->tclass; 1555 1556 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); 1557 1558 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); 1559 if (IS_ERR(dst)) { 1560 err = PTR_ERR(dst); 1561 dst = NULL; 1562 goto out; 1563 } 1564 1565 if (ipc6.hlimit < 0) 1566 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst); 1567 1568 if (msg->msg_flags&MSG_CONFIRM) 1569 goto do_confirm; 1570 back_from_confirm: 1571 1572 /* Lockless fast path for the non-corking case */ 1573 if (!corkreq) { 1574 struct sk_buff *skb; 1575 1576 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1577 sizeof(struct udphdr), &ipc6, 1578 (struct rt6_info *)dst, 1579 msg->msg_flags, &cork); 1580 err = PTR_ERR(skb); 1581 if (!IS_ERR_OR_NULL(skb)) 1582 err = udp_v6_send_skb(skb, fl6, &cork.base); 1583 /* ip6_make_skb steals dst reference */ 1584 goto out_no_dst; 1585 } 1586 1587 lock_sock(sk); 1588 if (unlikely(up->pending)) { 1589 /* The socket is already corked while preparing it. */ 1590 /* ... which is an evident application bug. --ANK */ 1591 release_sock(sk); 1592 1593 net_dbg_ratelimited("udp cork app bug 2\n"); 1594 err = -EINVAL; 1595 goto out; 1596 } 1597 1598 WRITE_ONCE(up->pending, AF_INET6); 1599 1600 do_append_data: 1601 if (ipc6.dontfrag < 0) 1602 ipc6.dontfrag = np->dontfrag; 1603 up->len += ulen; 1604 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1605 &ipc6, fl6, (struct rt6_info *)dst, 1606 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1607 if (err) 1608 udp_v6_flush_pending_frames(sk); 1609 else if (!corkreq) 1610 err = udp_v6_push_pending_frames(sk); 1611 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1612 WRITE_ONCE(up->pending, 0); 1613 1614 if (err > 0) 1615 err = np->recverr ? net_xmit_errno(err) : 0; 1616 release_sock(sk); 1617 1618 out: 1619 dst_release(dst); 1620 out_no_dst: 1621 fl6_sock_release(flowlabel); 1622 txopt_put(opt_to_free); 1623 if (!err) 1624 return len; 1625 /* 1626 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1627 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1628 * we don't have a good statistic (IpOutDiscards but it can be too many 1629 * things). We could add another new stat but at least for now that 1630 * seems like overkill. 1631 */ 1632 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1633 UDP6_INC_STATS(sock_net(sk), 1634 UDP_MIB_SNDBUFERRORS, is_udplite); 1635 } 1636 return err; 1637 1638 do_confirm: 1639 if (msg->msg_flags & MSG_PROBE) 1640 dst_confirm_neigh(dst, &fl6->daddr); 1641 if (!(msg->msg_flags&MSG_PROBE) || len) 1642 goto back_from_confirm; 1643 err = 0; 1644 goto out; 1645 } 1646 EXPORT_SYMBOL(udpv6_sendmsg); 1647 1648 static void udpv6_splice_eof(struct socket *sock) 1649 { 1650 struct sock *sk = sock->sk; 1651 struct udp_sock *up = udp_sk(sk); 1652 1653 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk)) 1654 return; 1655 1656 lock_sock(sk); 1657 if (up->pending && !udp_test_bit(CORK, sk)) 1658 udp_v6_push_pending_frames(sk); 1659 release_sock(sk); 1660 } 1661 1662 void udpv6_destroy_sock(struct sock *sk) 1663 { 1664 struct udp_sock *up = udp_sk(sk); 1665 lock_sock(sk); 1666 1667 /* protects from races with udp_abort() */ 1668 sock_set_flag(sk, SOCK_DEAD); 1669 udp_v6_flush_pending_frames(sk); 1670 release_sock(sk); 1671 1672 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1673 if (up->encap_type) { 1674 void (*encap_destroy)(struct sock *sk); 1675 encap_destroy = READ_ONCE(up->encap_destroy); 1676 if (encap_destroy) 1677 encap_destroy(sk); 1678 } 1679 if (udp_test_bit(ENCAP_ENABLED, sk)) { 1680 static_branch_dec(&udpv6_encap_needed_key); 1681 udp_encap_disable(); 1682 } 1683 } 1684 } 1685 1686 /* 1687 * Socket option code for UDP 1688 */ 1689 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1690 unsigned int optlen) 1691 { 1692 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 1693 return udp_lib_setsockopt(sk, level, optname, 1694 optval, optlen, 1695 udp_v6_push_pending_frames); 1696 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1697 } 1698 1699 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1700 char __user *optval, int __user *optlen) 1701 { 1702 if (level == SOL_UDP || level == SOL_UDPLITE) 1703 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1704 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1705 } 1706 1707 static const struct inet6_protocol udpv6_protocol = { 1708 .handler = udpv6_rcv, 1709 .err_handler = udpv6_err, 1710 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1711 }; 1712 1713 /* ------------------------------------------------------------------------ */ 1714 #ifdef CONFIG_PROC_FS 1715 int udp6_seq_show(struct seq_file *seq, void *v) 1716 { 1717 if (v == SEQ_START_TOKEN) { 1718 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1719 } else { 1720 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1721 const struct inet_sock *inet = inet_sk((const struct sock *)v); 1722 __u16 srcp = ntohs(inet->inet_sport); 1723 __u16 destp = ntohs(inet->inet_dport); 1724 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1725 udp_rqueue_get(v), bucket); 1726 } 1727 return 0; 1728 } 1729 1730 const struct seq_operations udp6_seq_ops = { 1731 .start = udp_seq_start, 1732 .next = udp_seq_next, 1733 .stop = udp_seq_stop, 1734 .show = udp6_seq_show, 1735 }; 1736 EXPORT_SYMBOL(udp6_seq_ops); 1737 1738 static struct udp_seq_afinfo udp6_seq_afinfo = { 1739 .family = AF_INET6, 1740 .udp_table = NULL, 1741 }; 1742 1743 int __net_init udp6_proc_init(struct net *net) 1744 { 1745 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1746 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1747 return -ENOMEM; 1748 return 0; 1749 } 1750 1751 void udp6_proc_exit(struct net *net) 1752 { 1753 remove_proc_entry("udp6", net->proc_net); 1754 } 1755 #endif /* CONFIG_PROC_FS */ 1756 1757 /* ------------------------------------------------------------------------ */ 1758 1759 struct proto udpv6_prot = { 1760 .name = "UDPv6", 1761 .owner = THIS_MODULE, 1762 .close = udp_lib_close, 1763 .pre_connect = udpv6_pre_connect, 1764 .connect = ip6_datagram_connect, 1765 .disconnect = udp_disconnect, 1766 .ioctl = udp_ioctl, 1767 .init = udpv6_init_sock, 1768 .destroy = udpv6_destroy_sock, 1769 .setsockopt = udpv6_setsockopt, 1770 .getsockopt = udpv6_getsockopt, 1771 .sendmsg = udpv6_sendmsg, 1772 .recvmsg = udpv6_recvmsg, 1773 .splice_eof = udpv6_splice_eof, 1774 .release_cb = ip6_datagram_release_cb, 1775 .hash = udp_lib_hash, 1776 .unhash = udp_lib_unhash, 1777 .rehash = udp_v6_rehash, 1778 .get_port = udp_v6_get_port, 1779 .put_port = udp_lib_unhash, 1780 #ifdef CONFIG_BPF_SYSCALL 1781 .psock_update_sk_prot = udp_bpf_update_proto, 1782 #endif 1783 1784 .memory_allocated = &udp_memory_allocated, 1785 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 1786 1787 .sysctl_mem = sysctl_udp_mem, 1788 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1789 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1790 .obj_size = sizeof(struct udp6_sock), 1791 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6), 1792 .h.udp_table = NULL, 1793 .diag_destroy = udp_abort, 1794 }; 1795 1796 static struct inet_protosw udpv6_protosw = { 1797 .type = SOCK_DGRAM, 1798 .protocol = IPPROTO_UDP, 1799 .prot = &udpv6_prot, 1800 .ops = &inet6_dgram_ops, 1801 .flags = INET_PROTOSW_PERMANENT, 1802 }; 1803 1804 int __init udpv6_init(void) 1805 { 1806 int ret; 1807 1808 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1809 if (ret) 1810 goto out; 1811 1812 ret = inet6_register_protosw(&udpv6_protosw); 1813 if (ret) 1814 goto out_udpv6_protocol; 1815 out: 1816 return ret; 1817 1818 out_udpv6_protocol: 1819 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1820 goto out; 1821 } 1822 1823 void udpv6_exit(void) 1824 { 1825 inet6_unregister_protosw(&udpv6_protosw); 1826 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1827 } 1828