1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/socket.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/in6.h> 26 #include <linux/netdevice.h> 27 #include <linux/if_arp.h> 28 #include <linux/ipv6.h> 29 #include <linux/icmpv6.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/skbuff.h> 33 #include <linux/slab.h> 34 #include <linux/uaccess.h> 35 #include <linux/indirect_call_wrapper.h> 36 37 #include <net/addrconf.h> 38 #include <net/ndisc.h> 39 #include <net/protocol.h> 40 #include <net/transp_v6.h> 41 #include <net/ip6_route.h> 42 #include <net/raw.h> 43 #include <net/tcp_states.h> 44 #include <net/ip6_checksum.h> 45 #include <net/ip6_tunnel.h> 46 #include <net/xfrm.h> 47 #include <net/inet_hashtables.h> 48 #include <net/inet6_hashtables.h> 49 #include <net/busy_poll.h> 50 #include <net/sock_reuseport.h> 51 52 #include <linux/proc_fs.h> 53 #include <linux/seq_file.h> 54 #include <trace/events/skb.h> 55 #include "udp_impl.h" 56 57 static u32 udp6_ehashfn(const struct net *net, 58 const struct in6_addr *laddr, 59 const u16 lport, 60 const struct in6_addr *faddr, 61 const __be16 fport) 62 { 63 static u32 udp6_ehash_secret __read_mostly; 64 static u32 udp_ipv6_hash_secret __read_mostly; 65 66 u32 lhash, fhash; 67 68 net_get_random_once(&udp6_ehash_secret, 69 sizeof(udp6_ehash_secret)); 70 net_get_random_once(&udp_ipv6_hash_secret, 71 sizeof(udp_ipv6_hash_secret)); 72 73 lhash = (__force u32)laddr->s6_addr32[3]; 74 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 75 76 return __inet6_ehashfn(lhash, lport, fhash, fport, 77 udp_ipv6_hash_secret + net_hash_mix(net)); 78 } 79 80 int udp_v6_get_port(struct sock *sk, unsigned short snum) 81 { 82 unsigned int hash2_nulladdr = 83 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 84 unsigned int hash2_partial = 85 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 86 87 /* precompute partial secondary hash */ 88 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 89 return udp_lib_get_port(sk, snum, hash2_nulladdr); 90 } 91 92 void udp_v6_rehash(struct sock *sk) 93 { 94 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 95 &sk->sk_v6_rcv_saddr, 96 inet_sk(sk)->inet_num); 97 98 udp_lib_rehash(sk, new_hash); 99 } 100 101 static int compute_score(struct sock *sk, struct net *net, 102 const struct in6_addr *saddr, __be16 sport, 103 const struct in6_addr *daddr, unsigned short hnum, 104 int dif, int sdif) 105 { 106 int score; 107 struct inet_sock *inet; 108 bool dev_match; 109 110 if (!net_eq(sock_net(sk), net) || 111 udp_sk(sk)->udp_port_hash != hnum || 112 sk->sk_family != PF_INET6) 113 return -1; 114 115 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 116 return -1; 117 118 score = 0; 119 inet = inet_sk(sk); 120 121 if (inet->inet_dport) { 122 if (inet->inet_dport != sport) 123 return -1; 124 score++; 125 } 126 127 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 128 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 129 return -1; 130 score++; 131 } 132 133 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif); 134 if (!dev_match) 135 return -1; 136 if (sk->sk_bound_dev_if) 137 score++; 138 139 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 140 score++; 141 142 return score; 143 } 144 145 static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 146 struct sk_buff *skb, 147 const struct in6_addr *saddr, 148 __be16 sport, 149 const struct in6_addr *daddr, 150 unsigned int hnum) 151 { 152 struct sock *reuse_sk = NULL; 153 u32 hash; 154 155 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 156 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); 157 reuse_sk = reuseport_select_sock(sk, hash, skb, 158 sizeof(struct udphdr)); 159 } 160 return reuse_sk; 161 } 162 163 /* called with rcu_read_lock() */ 164 static struct sock *udp6_lib_lookup2(struct net *net, 165 const struct in6_addr *saddr, __be16 sport, 166 const struct in6_addr *daddr, unsigned int hnum, 167 int dif, int sdif, struct udp_hslot *hslot2, 168 struct sk_buff *skb) 169 { 170 struct sock *sk, *result; 171 int score, badness; 172 173 result = NULL; 174 badness = -1; 175 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 176 score = compute_score(sk, net, saddr, sport, 177 daddr, hnum, dif, sdif); 178 if (score > badness) { 179 result = lookup_reuseport(net, sk, skb, 180 saddr, sport, daddr, hnum); 181 /* Fall back to scoring if group has connections */ 182 if (result && !reuseport_has_conns(sk, false)) 183 return result; 184 185 result = result ? : sk; 186 badness = score; 187 } 188 } 189 return result; 190 } 191 192 static inline struct sock *udp6_lookup_run_bpf(struct net *net, 193 struct udp_table *udptable, 194 struct sk_buff *skb, 195 const struct in6_addr *saddr, 196 __be16 sport, 197 const struct in6_addr *daddr, 198 u16 hnum) 199 { 200 struct sock *sk, *reuse_sk; 201 bool no_reuseport; 202 203 if (udptable != &udp_table) 204 return NULL; /* only UDP is supported */ 205 206 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, 207 saddr, sport, daddr, hnum, &sk); 208 if (no_reuseport || IS_ERR_OR_NULL(sk)) 209 return sk; 210 211 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 212 if (reuse_sk) 213 sk = reuse_sk; 214 return sk; 215 } 216 217 /* rcu_read_lock() must be held */ 218 struct sock *__udp6_lib_lookup(struct net *net, 219 const struct in6_addr *saddr, __be16 sport, 220 const struct in6_addr *daddr, __be16 dport, 221 int dif, int sdif, struct udp_table *udptable, 222 struct sk_buff *skb) 223 { 224 unsigned short hnum = ntohs(dport); 225 unsigned int hash2, slot2; 226 struct udp_hslot *hslot2; 227 struct sock *result, *sk; 228 229 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 230 slot2 = hash2 & udptable->mask; 231 hslot2 = &udptable->hash2[slot2]; 232 233 /* Lookup connected or non-wildcard sockets */ 234 result = udp6_lib_lookup2(net, saddr, sport, 235 daddr, hnum, dif, sdif, 236 hslot2, skb); 237 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 238 goto done; 239 240 /* Lookup redirect from BPF */ 241 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 242 sk = udp6_lookup_run_bpf(net, udptable, skb, 243 saddr, sport, daddr, hnum); 244 if (sk) { 245 result = sk; 246 goto done; 247 } 248 } 249 250 /* Got non-wildcard socket or error on first lookup */ 251 if (result) 252 goto done; 253 254 /* Lookup wildcard sockets */ 255 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 256 slot2 = hash2 & udptable->mask; 257 hslot2 = &udptable->hash2[slot2]; 258 259 result = udp6_lib_lookup2(net, saddr, sport, 260 &in6addr_any, hnum, dif, sdif, 261 hslot2, skb); 262 done: 263 if (IS_ERR(result)) 264 return NULL; 265 return result; 266 } 267 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 268 269 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 270 __be16 sport, __be16 dport, 271 struct udp_table *udptable) 272 { 273 const struct ipv6hdr *iph = ipv6_hdr(skb); 274 275 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 276 &iph->daddr, dport, inet6_iif(skb), 277 inet6_sdif(skb), udptable, skb); 278 } 279 280 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 281 __be16 sport, __be16 dport) 282 { 283 const struct ipv6hdr *iph = ipv6_hdr(skb); 284 285 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 286 &iph->daddr, dport, inet6_iif(skb), 287 inet6_sdif(skb), &udp_table, NULL); 288 } 289 290 /* Must be called under rcu_read_lock(). 291 * Does increment socket refcount. 292 */ 293 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 294 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 295 const struct in6_addr *daddr, __be16 dport, int dif) 296 { 297 struct sock *sk; 298 299 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 300 dif, 0, &udp_table, NULL); 301 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 302 sk = NULL; 303 return sk; 304 } 305 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 306 #endif 307 308 /* do not use the scratch area len for jumbogram: their length execeeds the 309 * scratch area space; note that the IP6CB flags is still in the first 310 * cacheline, so checking for jumbograms is cheap 311 */ 312 static int udp6_skb_len(struct sk_buff *skb) 313 { 314 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 315 } 316 317 /* 318 * This should be easy, if there is something there we 319 * return it, otherwise we block. 320 */ 321 322 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 323 int noblock, int flags, int *addr_len) 324 { 325 struct ipv6_pinfo *np = inet6_sk(sk); 326 struct inet_sock *inet = inet_sk(sk); 327 struct sk_buff *skb; 328 unsigned int ulen, copied; 329 int off, err, peeking = flags & MSG_PEEK; 330 int is_udplite = IS_UDPLITE(sk); 331 struct udp_mib __percpu *mib; 332 bool checksum_valid = false; 333 int is_udp4; 334 335 if (flags & MSG_ERRQUEUE) 336 return ipv6_recv_error(sk, msg, len, addr_len); 337 338 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 339 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 340 341 try_again: 342 off = sk_peek_offset(sk, flags); 343 skb = __skb_recv_udp(sk, flags, noblock, &off, &err); 344 if (!skb) 345 return err; 346 347 ulen = udp6_skb_len(skb); 348 copied = len; 349 if (copied > ulen - off) 350 copied = ulen - off; 351 else if (copied < ulen) 352 msg->msg_flags |= MSG_TRUNC; 353 354 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 355 mib = __UDPX_MIB(sk, is_udp4); 356 357 /* 358 * If checksum is needed at all, try to do it while copying the 359 * data. If the data is truncated, or if we only want a partial 360 * coverage checksum (UDP-Lite), do it before the copy. 361 */ 362 363 if (copied < ulen || peeking || 364 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 365 checksum_valid = udp_skb_csum_unnecessary(skb) || 366 !__udp_lib_checksum_complete(skb); 367 if (!checksum_valid) 368 goto csum_copy_err; 369 } 370 371 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 372 if (udp_skb_is_linear(skb)) 373 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 374 else 375 err = skb_copy_datagram_msg(skb, off, msg, copied); 376 } else { 377 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 378 if (err == -EINVAL) 379 goto csum_copy_err; 380 } 381 if (unlikely(err)) { 382 if (!peeking) { 383 atomic_inc(&sk->sk_drops); 384 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 385 } 386 kfree_skb(skb); 387 return err; 388 } 389 if (!peeking) 390 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 391 392 sock_recv_ts_and_drops(msg, sk, skb); 393 394 /* Copy the address. */ 395 if (msg->msg_name) { 396 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 397 sin6->sin6_family = AF_INET6; 398 sin6->sin6_port = udp_hdr(skb)->source; 399 sin6->sin6_flowinfo = 0; 400 401 if (is_udp4) { 402 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 403 &sin6->sin6_addr); 404 sin6->sin6_scope_id = 0; 405 } else { 406 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 407 sin6->sin6_scope_id = 408 ipv6_iface_scope_id(&sin6->sin6_addr, 409 inet6_iif(skb)); 410 } 411 *addr_len = sizeof(*sin6); 412 413 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 414 (struct sockaddr *)sin6); 415 } 416 417 if (udp_sk(sk)->gro_enabled) 418 udp_cmsg_recv(msg, sk, skb); 419 420 if (np->rxopt.all) 421 ip6_datagram_recv_common_ctl(sk, msg, skb); 422 423 if (is_udp4) { 424 if (inet->cmsg_flags) 425 ip_cmsg_recv_offset(msg, sk, skb, 426 sizeof(struct udphdr), off); 427 } else { 428 if (np->rxopt.all) 429 ip6_datagram_recv_specific_ctl(sk, msg, skb); 430 } 431 432 err = copied; 433 if (flags & MSG_TRUNC) 434 err = ulen; 435 436 skb_consume_udp(sk, skb, peeking ? -err : err); 437 return err; 438 439 csum_copy_err: 440 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 441 udp_skb_destructor)) { 442 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 443 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 444 } 445 kfree_skb(skb); 446 447 /* starting over for a new packet, but check if we need to yield */ 448 cond_resched(); 449 msg->msg_flags &= ~MSG_TRUNC; 450 goto try_again; 451 } 452 453 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 454 void udpv6_encap_enable(void) 455 { 456 static_branch_inc(&udpv6_encap_needed_key); 457 } 458 EXPORT_SYMBOL(udpv6_encap_enable); 459 460 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 461 * through error handlers in encapsulations looking for a match. 462 */ 463 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 464 struct inet6_skb_parm *opt, 465 u8 type, u8 code, int offset, __be32 info) 466 { 467 int i; 468 469 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 470 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 471 u8 type, u8 code, int offset, __be32 info); 472 const struct ip6_tnl_encap_ops *encap; 473 474 encap = rcu_dereference(ip6tun_encaps[i]); 475 if (!encap) 476 continue; 477 handler = encap->err_handler; 478 if (handler && !handler(skb, opt, type, code, offset, info)) 479 return 0; 480 } 481 482 return -ENOENT; 483 } 484 485 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 486 * reversing source and destination port: this will match tunnels that force the 487 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 488 * lwtunnels might actually break this assumption by being configured with 489 * different destination ports on endpoints, in this case we won't be able to 490 * trace ICMP messages back to them. 491 * 492 * If this doesn't match any socket, probe tunnels with arbitrary destination 493 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 494 * we've sent packets to won't necessarily match the local destination port. 495 * 496 * Then ask the tunnel implementation to match the error against a valid 497 * association. 498 * 499 * Return an error if we can't find a match, the socket if we need further 500 * processing, zero otherwise. 501 */ 502 static struct sock *__udp6_lib_err_encap(struct net *net, 503 const struct ipv6hdr *hdr, int offset, 504 struct udphdr *uh, 505 struct udp_table *udptable, 506 struct sock *sk, 507 struct sk_buff *skb, 508 struct inet6_skb_parm *opt, 509 u8 type, u8 code, __be32 info) 510 { 511 int (*lookup)(struct sock *sk, struct sk_buff *skb); 512 int network_offset, transport_offset; 513 struct udp_sock *up; 514 515 network_offset = skb_network_offset(skb); 516 transport_offset = skb_transport_offset(skb); 517 518 /* Network header needs to point to the outer IPv6 header inside ICMP */ 519 skb_reset_network_header(skb); 520 521 /* Transport header needs to point to the UDP header */ 522 skb_set_transport_header(skb, offset); 523 524 if (sk) { 525 up = udp_sk(sk); 526 527 lookup = READ_ONCE(up->encap_err_lookup); 528 if (lookup && lookup(sk, skb)) 529 sk = NULL; 530 531 goto out; 532 } 533 534 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 535 &hdr->saddr, uh->dest, 536 inet6_iif(skb), 0, udptable, skb); 537 if (sk) { 538 up = udp_sk(sk); 539 540 lookup = READ_ONCE(up->encap_err_lookup); 541 if (!lookup || lookup(sk, skb)) 542 sk = NULL; 543 } 544 545 out: 546 if (!sk) { 547 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 548 offset, info)); 549 } 550 551 skb_set_transport_header(skb, transport_offset); 552 skb_set_network_header(skb, network_offset); 553 554 return sk; 555 } 556 557 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 558 u8 type, u8 code, int offset, __be32 info, 559 struct udp_table *udptable) 560 { 561 struct ipv6_pinfo *np; 562 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 563 const struct in6_addr *saddr = &hdr->saddr; 564 const struct in6_addr *daddr = &hdr->daddr; 565 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 566 bool tunnel = false; 567 struct sock *sk; 568 int harderr; 569 int err; 570 struct net *net = dev_net(skb->dev); 571 572 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 573 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 574 575 if (!sk || udp_sk(sk)->encap_type) { 576 /* No socket for error: try tunnels before discarding */ 577 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 578 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 579 udptable, sk, skb, 580 opt, type, code, info); 581 if (!sk) 582 return 0; 583 } else 584 sk = ERR_PTR(-ENOENT); 585 586 if (IS_ERR(sk)) { 587 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 588 ICMP6_MIB_INERRORS); 589 return PTR_ERR(sk); 590 } 591 592 tunnel = true; 593 } 594 595 harderr = icmpv6_err_convert(type, code, &err); 596 np = inet6_sk(sk); 597 598 if (type == ICMPV6_PKT_TOOBIG) { 599 if (!ip6_sk_accept_pmtu(sk)) 600 goto out; 601 ip6_sk_update_pmtu(skb, sk, info); 602 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 603 harderr = 1; 604 } 605 if (type == NDISC_REDIRECT) { 606 if (tunnel) { 607 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 608 sk->sk_mark, sk->sk_uid); 609 } else { 610 ip6_sk_redirect(skb, sk); 611 } 612 goto out; 613 } 614 615 /* Tunnels don't have an application socket: don't pass errors back */ 616 if (tunnel) 617 goto out; 618 619 if (!np->recverr) { 620 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 621 goto out; 622 } else { 623 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 624 } 625 626 sk->sk_err = err; 627 sk_error_report(sk); 628 out: 629 return 0; 630 } 631 632 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 633 { 634 int rc; 635 636 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 637 sock_rps_save_rxhash(sk, skb); 638 sk_mark_napi_id(sk, skb); 639 sk_incoming_cpu_update(sk); 640 } else { 641 sk_mark_napi_id_once(sk, skb); 642 } 643 644 rc = __udp_enqueue_schedule_skb(sk, skb); 645 if (rc < 0) { 646 int is_udplite = IS_UDPLITE(sk); 647 648 /* Note that an ENOMEM error is charged twice */ 649 if (rc == -ENOMEM) 650 UDP6_INC_STATS(sock_net(sk), 651 UDP_MIB_RCVBUFERRORS, is_udplite); 652 else 653 UDP6_INC_STATS(sock_net(sk), 654 UDP_MIB_MEMERRORS, is_udplite); 655 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 656 kfree_skb(skb); 657 return -1; 658 } 659 660 return 0; 661 } 662 663 static __inline__ int udpv6_err(struct sk_buff *skb, 664 struct inet6_skb_parm *opt, u8 type, 665 u8 code, int offset, __be32 info) 666 { 667 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 668 } 669 670 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 671 { 672 struct udp_sock *up = udp_sk(sk); 673 int is_udplite = IS_UDPLITE(sk); 674 675 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 676 goto drop; 677 678 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 679 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 680 681 /* 682 * This is an encapsulation socket so pass the skb to 683 * the socket's udp_encap_rcv() hook. Otherwise, just 684 * fall through and pass this up the UDP socket. 685 * up->encap_rcv() returns the following value: 686 * =0 if skb was successfully passed to the encap 687 * handler or was discarded by it. 688 * >0 if skb should be passed on to UDP. 689 * <0 if skb should be resubmitted as proto -N 690 */ 691 692 /* if we're overly short, let UDP handle it */ 693 encap_rcv = READ_ONCE(up->encap_rcv); 694 if (encap_rcv) { 695 int ret; 696 697 /* Verify checksum before giving to encap */ 698 if (udp_lib_checksum_complete(skb)) 699 goto csum_error; 700 701 ret = encap_rcv(sk, skb); 702 if (ret <= 0) { 703 __UDP_INC_STATS(sock_net(sk), 704 UDP_MIB_INDATAGRAMS, 705 is_udplite); 706 return -ret; 707 } 708 } 709 710 /* FALLTHROUGH -- it's a UDP Packet */ 711 } 712 713 /* 714 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 715 */ 716 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 717 718 if (up->pcrlen == 0) { /* full coverage was set */ 719 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 720 UDP_SKB_CB(skb)->cscov, skb->len); 721 goto drop; 722 } 723 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 724 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 725 UDP_SKB_CB(skb)->cscov, up->pcrlen); 726 goto drop; 727 } 728 } 729 730 prefetch(&sk->sk_rmem_alloc); 731 if (rcu_access_pointer(sk->sk_filter) && 732 udp_lib_checksum_complete(skb)) 733 goto csum_error; 734 735 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 736 goto drop; 737 738 udp_csum_pull_header(skb); 739 740 skb_dst_drop(skb); 741 742 return __udpv6_queue_rcv_skb(sk, skb); 743 744 csum_error: 745 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 746 drop: 747 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 748 atomic_inc(&sk->sk_drops); 749 kfree_skb(skb); 750 return -1; 751 } 752 753 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 754 { 755 struct sk_buff *next, *segs; 756 int ret; 757 758 if (likely(!udp_unexpected_gso(sk, skb))) 759 return udpv6_queue_rcv_one_skb(sk, skb); 760 761 __skb_push(skb, -skb_mac_offset(skb)); 762 segs = udp_rcv_segment(sk, skb, false); 763 skb_list_walk_safe(segs, skb, next) { 764 __skb_pull(skb, skb_transport_offset(skb)); 765 766 udp_post_segment_fix_csum(skb); 767 ret = udpv6_queue_rcv_one_skb(sk, skb); 768 if (ret > 0) 769 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 770 true); 771 } 772 return 0; 773 } 774 775 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, 776 __be16 loc_port, const struct in6_addr *loc_addr, 777 __be16 rmt_port, const struct in6_addr *rmt_addr, 778 int dif, int sdif, unsigned short hnum) 779 { 780 struct inet_sock *inet = inet_sk(sk); 781 782 if (!net_eq(sock_net(sk), net)) 783 return false; 784 785 if (udp_sk(sk)->udp_port_hash != hnum || 786 sk->sk_family != PF_INET6 || 787 (inet->inet_dport && inet->inet_dport != rmt_port) || 788 (!ipv6_addr_any(&sk->sk_v6_daddr) && 789 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 790 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) || 791 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 792 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 793 return false; 794 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 795 return false; 796 return true; 797 } 798 799 static void udp6_csum_zero_error(struct sk_buff *skb) 800 { 801 /* RFC 2460 section 8.1 says that we SHOULD log 802 * this error. Well, it is reasonable. 803 */ 804 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 805 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 806 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 807 } 808 809 /* 810 * Note: called only from the BH handler context, 811 * so we don't need to lock the hashes. 812 */ 813 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 814 const struct in6_addr *saddr, const struct in6_addr *daddr, 815 struct udp_table *udptable, int proto) 816 { 817 struct sock *sk, *first = NULL; 818 const struct udphdr *uh = udp_hdr(skb); 819 unsigned short hnum = ntohs(uh->dest); 820 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 821 unsigned int offset = offsetof(typeof(*sk), sk_node); 822 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 823 int dif = inet6_iif(skb); 824 int sdif = inet6_sdif(skb); 825 struct hlist_node *node; 826 struct sk_buff *nskb; 827 828 if (use_hash2) { 829 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 830 udptable->mask; 831 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 832 start_lookup: 833 hslot = &udptable->hash2[hash2]; 834 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 835 } 836 837 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 838 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 839 uh->source, saddr, dif, sdif, 840 hnum)) 841 continue; 842 /* If zero checksum and no_check is not on for 843 * the socket then skip it. 844 */ 845 if (!uh->check && !udp_sk(sk)->no_check6_rx) 846 continue; 847 if (!first) { 848 first = sk; 849 continue; 850 } 851 nskb = skb_clone(skb, GFP_ATOMIC); 852 if (unlikely(!nskb)) { 853 atomic_inc(&sk->sk_drops); 854 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 855 IS_UDPLITE(sk)); 856 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 857 IS_UDPLITE(sk)); 858 continue; 859 } 860 861 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 862 consume_skb(nskb); 863 } 864 865 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 866 if (use_hash2 && hash2 != hash2_any) { 867 hash2 = hash2_any; 868 goto start_lookup; 869 } 870 871 if (first) { 872 if (udpv6_queue_rcv_skb(first, skb) > 0) 873 consume_skb(skb); 874 } else { 875 kfree_skb(skb); 876 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 877 proto == IPPROTO_UDPLITE); 878 } 879 return 0; 880 } 881 882 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 883 { 884 if (udp_sk_rx_dst_set(sk, dst)) { 885 const struct rt6_info *rt = (const struct rt6_info *)dst; 886 887 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 888 } 889 } 890 891 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 892 * return code conversion for ip layer consumption 893 */ 894 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 895 struct udphdr *uh) 896 { 897 int ret; 898 899 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 900 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 901 902 ret = udpv6_queue_rcv_skb(sk, skb); 903 904 /* a return value > 0 means to resubmit the input */ 905 if (ret > 0) 906 return ret; 907 return 0; 908 } 909 910 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 911 int proto) 912 { 913 const struct in6_addr *saddr, *daddr; 914 struct net *net = dev_net(skb->dev); 915 struct udphdr *uh; 916 struct sock *sk; 917 bool refcounted; 918 u32 ulen = 0; 919 920 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 921 goto discard; 922 923 saddr = &ipv6_hdr(skb)->saddr; 924 daddr = &ipv6_hdr(skb)->daddr; 925 uh = udp_hdr(skb); 926 927 ulen = ntohs(uh->len); 928 if (ulen > skb->len) 929 goto short_packet; 930 931 if (proto == IPPROTO_UDP) { 932 /* UDP validates ulen. */ 933 934 /* Check for jumbo payload */ 935 if (ulen == 0) 936 ulen = skb->len; 937 938 if (ulen < sizeof(*uh)) 939 goto short_packet; 940 941 if (ulen < skb->len) { 942 if (pskb_trim_rcsum(skb, ulen)) 943 goto short_packet; 944 saddr = &ipv6_hdr(skb)->saddr; 945 daddr = &ipv6_hdr(skb)->daddr; 946 uh = udp_hdr(skb); 947 } 948 } 949 950 if (udp6_csum_init(skb, uh, proto)) 951 goto csum_error; 952 953 /* Check if the socket is already available, e.g. due to early demux */ 954 sk = skb_steal_sock(skb, &refcounted); 955 if (sk) { 956 struct dst_entry *dst = skb_dst(skb); 957 int ret; 958 959 if (unlikely(sk->sk_rx_dst != dst)) 960 udp6_sk_rx_dst_set(sk, dst); 961 962 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 963 if (refcounted) 964 sock_put(sk); 965 goto report_csum_error; 966 } 967 968 ret = udp6_unicast_rcv_skb(sk, skb, uh); 969 if (refcounted) 970 sock_put(sk); 971 return ret; 972 } 973 974 /* 975 * Multicast receive code 976 */ 977 if (ipv6_addr_is_multicast(daddr)) 978 return __udp6_lib_mcast_deliver(net, skb, 979 saddr, daddr, udptable, proto); 980 981 /* Unicast */ 982 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 983 if (sk) { 984 if (!uh->check && !udp_sk(sk)->no_check6_rx) 985 goto report_csum_error; 986 return udp6_unicast_rcv_skb(sk, skb, uh); 987 } 988 989 if (!uh->check) 990 goto report_csum_error; 991 992 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 993 goto discard; 994 995 if (udp_lib_checksum_complete(skb)) 996 goto csum_error; 997 998 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 999 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1000 1001 kfree_skb(skb); 1002 return 0; 1003 1004 short_packet: 1005 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1006 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1007 saddr, ntohs(uh->source), 1008 ulen, skb->len, 1009 daddr, ntohs(uh->dest)); 1010 goto discard; 1011 1012 report_csum_error: 1013 udp6_csum_zero_error(skb); 1014 csum_error: 1015 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1016 discard: 1017 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1018 kfree_skb(skb); 1019 return 0; 1020 } 1021 1022 1023 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1024 __be16 loc_port, const struct in6_addr *loc_addr, 1025 __be16 rmt_port, const struct in6_addr *rmt_addr, 1026 int dif, int sdif) 1027 { 1028 unsigned short hnum = ntohs(loc_port); 1029 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1030 unsigned int slot2 = hash2 & udp_table.mask; 1031 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 1032 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 1033 struct sock *sk; 1034 1035 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1036 if (sk->sk_state == TCP_ESTABLISHED && 1037 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif)) 1038 return sk; 1039 /* Only check first socket in chain */ 1040 break; 1041 } 1042 return NULL; 1043 } 1044 1045 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) 1046 { 1047 struct net *net = dev_net(skb->dev); 1048 const struct udphdr *uh; 1049 struct sock *sk; 1050 struct dst_entry *dst; 1051 int dif = skb->dev->ifindex; 1052 int sdif = inet6_sdif(skb); 1053 1054 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1055 sizeof(struct udphdr))) 1056 return; 1057 1058 uh = udp_hdr(skb); 1059 1060 if (skb->pkt_type == PACKET_HOST) 1061 sk = __udp6_lib_demux_lookup(net, uh->dest, 1062 &ipv6_hdr(skb)->daddr, 1063 uh->source, &ipv6_hdr(skb)->saddr, 1064 dif, sdif); 1065 else 1066 return; 1067 1068 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1069 return; 1070 1071 skb->sk = sk; 1072 skb->destructor = sock_efree; 1073 dst = READ_ONCE(sk->sk_rx_dst); 1074 1075 if (dst) 1076 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1077 if (dst) { 1078 /* set noref for now. 1079 * any place which wants to hold dst has to call 1080 * dst_hold_safe() 1081 */ 1082 skb_dst_set_noref(skb, dst); 1083 } 1084 } 1085 1086 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1087 { 1088 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 1089 } 1090 1091 /* 1092 * Throw away all pending data and cancel the corking. Socket is locked. 1093 */ 1094 static void udp_v6_flush_pending_frames(struct sock *sk) 1095 { 1096 struct udp_sock *up = udp_sk(sk); 1097 1098 if (up->pending == AF_INET) 1099 udp_flush_pending_frames(sk); 1100 else if (up->pending) { 1101 up->len = 0; 1102 up->pending = 0; 1103 ip6_flush_pending_frames(sk); 1104 } 1105 } 1106 1107 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1108 int addr_len) 1109 { 1110 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1111 return -EINVAL; 1112 /* The following checks are replicated from __ip6_datagram_connect() 1113 * and intended to prevent BPF program called below from accessing 1114 * bytes that are out of the bound specified by user in addr_len. 1115 */ 1116 if (uaddr->sa_family == AF_INET) { 1117 if (__ipv6_only_sock(sk)) 1118 return -EAFNOSUPPORT; 1119 return udp_pre_connect(sk, uaddr, addr_len); 1120 } 1121 1122 if (addr_len < SIN6_LEN_RFC2133) 1123 return -EINVAL; 1124 1125 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1126 } 1127 1128 /** 1129 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1130 * @sk: socket we are sending on 1131 * @skb: sk_buff containing the filled-in UDP header 1132 * (checksum field must be zeroed out) 1133 * @saddr: source address 1134 * @daddr: destination address 1135 * @len: length of packet 1136 */ 1137 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1138 const struct in6_addr *saddr, 1139 const struct in6_addr *daddr, int len) 1140 { 1141 unsigned int offset; 1142 struct udphdr *uh = udp_hdr(skb); 1143 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1144 __wsum csum = 0; 1145 1146 if (!frags) { 1147 /* Only one fragment on the socket. */ 1148 skb->csum_start = skb_transport_header(skb) - skb->head; 1149 skb->csum_offset = offsetof(struct udphdr, check); 1150 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1151 } else { 1152 /* 1153 * HW-checksum won't work as there are two or more 1154 * fragments on the socket so that all csums of sk_buffs 1155 * should be together 1156 */ 1157 offset = skb_transport_offset(skb); 1158 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1159 csum = skb->csum; 1160 1161 skb->ip_summed = CHECKSUM_NONE; 1162 1163 do { 1164 csum = csum_add(csum, frags->csum); 1165 } while ((frags = frags->next)); 1166 1167 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1168 csum); 1169 if (uh->check == 0) 1170 uh->check = CSUM_MANGLED_0; 1171 } 1172 } 1173 1174 /* 1175 * Sending 1176 */ 1177 1178 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1179 struct inet_cork *cork) 1180 { 1181 struct sock *sk = skb->sk; 1182 struct udphdr *uh; 1183 int err = 0; 1184 int is_udplite = IS_UDPLITE(sk); 1185 __wsum csum = 0; 1186 int offset = skb_transport_offset(skb); 1187 int len = skb->len - offset; 1188 int datalen = len - sizeof(*uh); 1189 1190 /* 1191 * Create a UDP header 1192 */ 1193 uh = udp_hdr(skb); 1194 uh->source = fl6->fl6_sport; 1195 uh->dest = fl6->fl6_dport; 1196 uh->len = htons(len); 1197 uh->check = 0; 1198 1199 if (cork->gso_size) { 1200 const int hlen = skb_network_header_len(skb) + 1201 sizeof(struct udphdr); 1202 1203 if (hlen + cork->gso_size > cork->fragsize) { 1204 kfree_skb(skb); 1205 return -EINVAL; 1206 } 1207 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { 1208 kfree_skb(skb); 1209 return -EINVAL; 1210 } 1211 if (udp_sk(sk)->no_check6_tx) { 1212 kfree_skb(skb); 1213 return -EINVAL; 1214 } 1215 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1216 dst_xfrm(skb_dst(skb))) { 1217 kfree_skb(skb); 1218 return -EIO; 1219 } 1220 1221 if (datalen > cork->gso_size) { 1222 skb_shinfo(skb)->gso_size = cork->gso_size; 1223 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1224 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1225 cork->gso_size); 1226 } 1227 goto csum_partial; 1228 } 1229 1230 if (is_udplite) 1231 csum = udplite_csum(skb); 1232 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1233 skb->ip_summed = CHECKSUM_NONE; 1234 goto send; 1235 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1236 csum_partial: 1237 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1238 goto send; 1239 } else 1240 csum = udp_csum(skb); 1241 1242 /* add protocol-dependent pseudo-header */ 1243 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1244 len, fl6->flowi6_proto, csum); 1245 if (uh->check == 0) 1246 uh->check = CSUM_MANGLED_0; 1247 1248 send: 1249 err = ip6_send_skb(skb); 1250 if (err) { 1251 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1252 UDP6_INC_STATS(sock_net(sk), 1253 UDP_MIB_SNDBUFERRORS, is_udplite); 1254 err = 0; 1255 } 1256 } else { 1257 UDP6_INC_STATS(sock_net(sk), 1258 UDP_MIB_OUTDATAGRAMS, is_udplite); 1259 } 1260 return err; 1261 } 1262 1263 static int udp_v6_push_pending_frames(struct sock *sk) 1264 { 1265 struct sk_buff *skb; 1266 struct udp_sock *up = udp_sk(sk); 1267 struct flowi6 fl6; 1268 int err = 0; 1269 1270 if (up->pending == AF_INET) 1271 return udp_push_pending_frames(sk); 1272 1273 /* ip6_finish_skb will release the cork, so make a copy of 1274 * fl6 here. 1275 */ 1276 fl6 = inet_sk(sk)->cork.fl.u.ip6; 1277 1278 skb = ip6_finish_skb(sk); 1279 if (!skb) 1280 goto out; 1281 1282 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base); 1283 1284 out: 1285 up->len = 0; 1286 up->pending = 0; 1287 return err; 1288 } 1289 1290 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1291 { 1292 struct ipv6_txoptions opt_space; 1293 struct udp_sock *up = udp_sk(sk); 1294 struct inet_sock *inet = inet_sk(sk); 1295 struct ipv6_pinfo *np = inet6_sk(sk); 1296 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1297 struct in6_addr *daddr, *final_p, final; 1298 struct ipv6_txoptions *opt = NULL; 1299 struct ipv6_txoptions *opt_to_free = NULL; 1300 struct ip6_flowlabel *flowlabel = NULL; 1301 struct flowi6 fl6; 1302 struct dst_entry *dst; 1303 struct ipcm6_cookie ipc6; 1304 int addr_len = msg->msg_namelen; 1305 bool connected = false; 1306 int ulen = len; 1307 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1308 int err; 1309 int is_udplite = IS_UDPLITE(sk); 1310 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1311 1312 ipcm6_init(&ipc6); 1313 ipc6.gso_size = READ_ONCE(up->gso_size); 1314 ipc6.sockc.tsflags = sk->sk_tsflags; 1315 ipc6.sockc.mark = sk->sk_mark; 1316 1317 /* destination address check */ 1318 if (sin6) { 1319 if (addr_len < offsetof(struct sockaddr, sa_data)) 1320 return -EINVAL; 1321 1322 switch (sin6->sin6_family) { 1323 case AF_INET6: 1324 if (addr_len < SIN6_LEN_RFC2133) 1325 return -EINVAL; 1326 daddr = &sin6->sin6_addr; 1327 if (ipv6_addr_any(daddr) && 1328 ipv6_addr_v4mapped(&np->saddr)) 1329 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1330 daddr); 1331 break; 1332 case AF_INET: 1333 goto do_udp_sendmsg; 1334 case AF_UNSPEC: 1335 msg->msg_name = sin6 = NULL; 1336 msg->msg_namelen = addr_len = 0; 1337 daddr = NULL; 1338 break; 1339 default: 1340 return -EINVAL; 1341 } 1342 } else if (!up->pending) { 1343 if (sk->sk_state != TCP_ESTABLISHED) 1344 return -EDESTADDRREQ; 1345 daddr = &sk->sk_v6_daddr; 1346 } else 1347 daddr = NULL; 1348 1349 if (daddr) { 1350 if (ipv6_addr_v4mapped(daddr)) { 1351 struct sockaddr_in sin; 1352 sin.sin_family = AF_INET; 1353 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1354 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1355 msg->msg_name = &sin; 1356 msg->msg_namelen = sizeof(sin); 1357 do_udp_sendmsg: 1358 if (__ipv6_only_sock(sk)) 1359 return -ENETUNREACH; 1360 return udp_sendmsg(sk, msg, len); 1361 } 1362 } 1363 1364 if (up->pending == AF_INET) 1365 return udp_sendmsg(sk, msg, len); 1366 1367 /* Rough check on arithmetic overflow, 1368 better check is made in ip6_append_data(). 1369 */ 1370 if (len > INT_MAX - sizeof(struct udphdr)) 1371 return -EMSGSIZE; 1372 1373 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1374 if (up->pending) { 1375 /* 1376 * There are pending frames. 1377 * The socket lock must be held while it's corked. 1378 */ 1379 lock_sock(sk); 1380 if (likely(up->pending)) { 1381 if (unlikely(up->pending != AF_INET6)) { 1382 release_sock(sk); 1383 return -EAFNOSUPPORT; 1384 } 1385 dst = NULL; 1386 goto do_append_data; 1387 } 1388 release_sock(sk); 1389 } 1390 ulen += sizeof(struct udphdr); 1391 1392 memset(&fl6, 0, sizeof(fl6)); 1393 1394 if (sin6) { 1395 if (sin6->sin6_port == 0) 1396 return -EINVAL; 1397 1398 fl6.fl6_dport = sin6->sin6_port; 1399 daddr = &sin6->sin6_addr; 1400 1401 if (np->sndflow) { 1402 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1403 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 1404 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1405 if (IS_ERR(flowlabel)) 1406 return -EINVAL; 1407 } 1408 } 1409 1410 /* 1411 * Otherwise it will be difficult to maintain 1412 * sk->sk_dst_cache. 1413 */ 1414 if (sk->sk_state == TCP_ESTABLISHED && 1415 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1416 daddr = &sk->sk_v6_daddr; 1417 1418 if (addr_len >= sizeof(struct sockaddr_in6) && 1419 sin6->sin6_scope_id && 1420 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1421 fl6.flowi6_oif = sin6->sin6_scope_id; 1422 } else { 1423 if (sk->sk_state != TCP_ESTABLISHED) 1424 return -EDESTADDRREQ; 1425 1426 fl6.fl6_dport = inet->inet_dport; 1427 daddr = &sk->sk_v6_daddr; 1428 fl6.flowlabel = np->flow_label; 1429 connected = true; 1430 } 1431 1432 if (!fl6.flowi6_oif) 1433 fl6.flowi6_oif = sk->sk_bound_dev_if; 1434 1435 if (!fl6.flowi6_oif) 1436 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1437 1438 fl6.flowi6_mark = ipc6.sockc.mark; 1439 fl6.flowi6_uid = sk->sk_uid; 1440 1441 if (msg->msg_controllen) { 1442 opt = &opt_space; 1443 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1444 opt->tot_len = sizeof(*opt); 1445 ipc6.opt = opt; 1446 1447 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1448 if (err > 0) 1449 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, 1450 &ipc6); 1451 if (err < 0) { 1452 fl6_sock_release(flowlabel); 1453 return err; 1454 } 1455 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1456 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1457 if (IS_ERR(flowlabel)) 1458 return -EINVAL; 1459 } 1460 if (!(opt->opt_nflen|opt->opt_flen)) 1461 opt = NULL; 1462 connected = false; 1463 } 1464 if (!opt) { 1465 opt = txopt_get(np); 1466 opt_to_free = opt; 1467 } 1468 if (flowlabel) 1469 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1470 opt = ipv6_fixup_options(&opt_space, opt); 1471 ipc6.opt = opt; 1472 1473 fl6.flowi6_proto = sk->sk_protocol; 1474 fl6.daddr = *daddr; 1475 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1476 fl6.saddr = np->saddr; 1477 fl6.fl6_sport = inet->inet_sport; 1478 1479 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1480 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1481 (struct sockaddr *)sin6, &fl6.saddr); 1482 if (err) 1483 goto out_no_dst; 1484 if (sin6) { 1485 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1486 /* BPF program rewrote IPv6-only by IPv4-mapped 1487 * IPv6. It's currently unsupported. 1488 */ 1489 err = -ENOTSUPP; 1490 goto out_no_dst; 1491 } 1492 if (sin6->sin6_port == 0) { 1493 /* BPF program set invalid port. Reject it. */ 1494 err = -EINVAL; 1495 goto out_no_dst; 1496 } 1497 fl6.fl6_dport = sin6->sin6_port; 1498 fl6.daddr = sin6->sin6_addr; 1499 } 1500 } 1501 1502 if (ipv6_addr_any(&fl6.daddr)) 1503 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1504 1505 final_p = fl6_update_dst(&fl6, opt, &final); 1506 if (final_p) 1507 connected = false; 1508 1509 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1510 fl6.flowi6_oif = np->mcast_oif; 1511 connected = false; 1512 } else if (!fl6.flowi6_oif) 1513 fl6.flowi6_oif = np->ucast_oif; 1514 1515 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 1516 1517 if (ipc6.tclass < 0) 1518 ipc6.tclass = np->tclass; 1519 1520 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 1521 1522 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected); 1523 if (IS_ERR(dst)) { 1524 err = PTR_ERR(dst); 1525 dst = NULL; 1526 goto out; 1527 } 1528 1529 if (ipc6.hlimit < 0) 1530 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 1531 1532 if (msg->msg_flags&MSG_CONFIRM) 1533 goto do_confirm; 1534 back_from_confirm: 1535 1536 /* Lockless fast path for the non-corking case */ 1537 if (!corkreq) { 1538 struct inet_cork_full cork; 1539 struct sk_buff *skb; 1540 1541 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1542 sizeof(struct udphdr), &ipc6, 1543 &fl6, (struct rt6_info *)dst, 1544 msg->msg_flags, &cork); 1545 err = PTR_ERR(skb); 1546 if (!IS_ERR_OR_NULL(skb)) 1547 err = udp_v6_send_skb(skb, &fl6, &cork.base); 1548 goto out; 1549 } 1550 1551 lock_sock(sk); 1552 if (unlikely(up->pending)) { 1553 /* The socket is already corked while preparing it. */ 1554 /* ... which is an evident application bug. --ANK */ 1555 release_sock(sk); 1556 1557 net_dbg_ratelimited("udp cork app bug 2\n"); 1558 err = -EINVAL; 1559 goto out; 1560 } 1561 1562 up->pending = AF_INET6; 1563 1564 do_append_data: 1565 if (ipc6.dontfrag < 0) 1566 ipc6.dontfrag = np->dontfrag; 1567 up->len += ulen; 1568 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1569 &ipc6, &fl6, (struct rt6_info *)dst, 1570 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1571 if (err) 1572 udp_v6_flush_pending_frames(sk); 1573 else if (!corkreq) 1574 err = udp_v6_push_pending_frames(sk); 1575 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1576 up->pending = 0; 1577 1578 if (err > 0) 1579 err = np->recverr ? net_xmit_errno(err) : 0; 1580 release_sock(sk); 1581 1582 out: 1583 dst_release(dst); 1584 out_no_dst: 1585 fl6_sock_release(flowlabel); 1586 txopt_put(opt_to_free); 1587 if (!err) 1588 return len; 1589 /* 1590 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1591 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1592 * we don't have a good statistic (IpOutDiscards but it can be too many 1593 * things). We could add another new stat but at least for now that 1594 * seems like overkill. 1595 */ 1596 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1597 UDP6_INC_STATS(sock_net(sk), 1598 UDP_MIB_SNDBUFERRORS, is_udplite); 1599 } 1600 return err; 1601 1602 do_confirm: 1603 if (msg->msg_flags & MSG_PROBE) 1604 dst_confirm_neigh(dst, &fl6.daddr); 1605 if (!(msg->msg_flags&MSG_PROBE) || len) 1606 goto back_from_confirm; 1607 err = 0; 1608 goto out; 1609 } 1610 1611 void udpv6_destroy_sock(struct sock *sk) 1612 { 1613 struct udp_sock *up = udp_sk(sk); 1614 lock_sock(sk); 1615 1616 /* protects from races with udp_abort() */ 1617 sock_set_flag(sk, SOCK_DEAD); 1618 udp_v6_flush_pending_frames(sk); 1619 release_sock(sk); 1620 1621 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1622 if (up->encap_type) { 1623 void (*encap_destroy)(struct sock *sk); 1624 encap_destroy = READ_ONCE(up->encap_destroy); 1625 if (encap_destroy) 1626 encap_destroy(sk); 1627 } 1628 if (up->encap_enabled) { 1629 static_branch_dec(&udpv6_encap_needed_key); 1630 udp_encap_disable(); 1631 } 1632 } 1633 1634 inet6_destroy_sock(sk); 1635 } 1636 1637 /* 1638 * Socket option code for UDP 1639 */ 1640 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1641 unsigned int optlen) 1642 { 1643 if (level == SOL_UDP || level == SOL_UDPLITE) 1644 return udp_lib_setsockopt(sk, level, optname, 1645 optval, optlen, 1646 udp_v6_push_pending_frames); 1647 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1648 } 1649 1650 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1651 char __user *optval, int __user *optlen) 1652 { 1653 if (level == SOL_UDP || level == SOL_UDPLITE) 1654 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1655 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1656 } 1657 1658 /* thinking of making this const? Don't. 1659 * early_demux can change based on sysctl. 1660 */ 1661 static struct inet6_protocol udpv6_protocol = { 1662 .early_demux = udp_v6_early_demux, 1663 .early_demux_handler = udp_v6_early_demux, 1664 .handler = udpv6_rcv, 1665 .err_handler = udpv6_err, 1666 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1667 }; 1668 1669 /* ------------------------------------------------------------------------ */ 1670 #ifdef CONFIG_PROC_FS 1671 int udp6_seq_show(struct seq_file *seq, void *v) 1672 { 1673 if (v == SEQ_START_TOKEN) { 1674 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1675 } else { 1676 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1677 struct inet_sock *inet = inet_sk(v); 1678 __u16 srcp = ntohs(inet->inet_sport); 1679 __u16 destp = ntohs(inet->inet_dport); 1680 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1681 udp_rqueue_get(v), bucket); 1682 } 1683 return 0; 1684 } 1685 1686 const struct seq_operations udp6_seq_ops = { 1687 .start = udp_seq_start, 1688 .next = udp_seq_next, 1689 .stop = udp_seq_stop, 1690 .show = udp6_seq_show, 1691 }; 1692 EXPORT_SYMBOL(udp6_seq_ops); 1693 1694 static struct udp_seq_afinfo udp6_seq_afinfo = { 1695 .family = AF_INET6, 1696 .udp_table = &udp_table, 1697 }; 1698 1699 int __net_init udp6_proc_init(struct net *net) 1700 { 1701 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1702 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1703 return -ENOMEM; 1704 return 0; 1705 } 1706 1707 void udp6_proc_exit(struct net *net) 1708 { 1709 remove_proc_entry("udp6", net->proc_net); 1710 } 1711 #endif /* CONFIG_PROC_FS */ 1712 1713 /* ------------------------------------------------------------------------ */ 1714 1715 struct proto udpv6_prot = { 1716 .name = "UDPv6", 1717 .owner = THIS_MODULE, 1718 .close = udp_lib_close, 1719 .pre_connect = udpv6_pre_connect, 1720 .connect = ip6_datagram_connect, 1721 .disconnect = udp_disconnect, 1722 .ioctl = udp_ioctl, 1723 .init = udp_init_sock, 1724 .destroy = udpv6_destroy_sock, 1725 .setsockopt = udpv6_setsockopt, 1726 .getsockopt = udpv6_getsockopt, 1727 .sendmsg = udpv6_sendmsg, 1728 .recvmsg = udpv6_recvmsg, 1729 .release_cb = ip6_datagram_release_cb, 1730 .hash = udp_lib_hash, 1731 .unhash = udp_lib_unhash, 1732 .rehash = udp_v6_rehash, 1733 .get_port = udp_v6_get_port, 1734 #ifdef CONFIG_BPF_SYSCALL 1735 .psock_update_sk_prot = udp_bpf_update_proto, 1736 #endif 1737 .memory_allocated = &udp_memory_allocated, 1738 .sysctl_mem = sysctl_udp_mem, 1739 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1740 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1741 .obj_size = sizeof(struct udp6_sock), 1742 .h.udp_table = &udp_table, 1743 .diag_destroy = udp_abort, 1744 }; 1745 1746 static struct inet_protosw udpv6_protosw = { 1747 .type = SOCK_DGRAM, 1748 .protocol = IPPROTO_UDP, 1749 .prot = &udpv6_prot, 1750 .ops = &inet6_dgram_ops, 1751 .flags = INET_PROTOSW_PERMANENT, 1752 }; 1753 1754 int __init udpv6_init(void) 1755 { 1756 int ret; 1757 1758 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1759 if (ret) 1760 goto out; 1761 1762 ret = inet6_register_protosw(&udpv6_protosw); 1763 if (ret) 1764 goto out_udpv6_protocol; 1765 out: 1766 return ret; 1767 1768 out_udpv6_protocol: 1769 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1770 goto out; 1771 } 1772 1773 void udpv6_exit(void) 1774 { 1775 inet6_unregister_protosw(&udpv6_protosw); 1776 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1777 } 1778