1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * The User Datagram Protocol (UDP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 12 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 13 * Hirokazu Takahashi, <taka@valinux.co.jp> 14 * 15 * Fixes: 16 * Alan Cox : verify_area() calls 17 * Alan Cox : stopped close while in use off icmp 18 * messages. Not a fix but a botch that 19 * for udp at least is 'valid'. 20 * Alan Cox : Fixed icmp handling properly 21 * Alan Cox : Correct error for oversized datagrams 22 * Alan Cox : Tidied select() semantics. 23 * Alan Cox : udp_err() fixed properly, also now 24 * select and read wake correctly on errors 25 * Alan Cox : udp_send verify_area moved to avoid mem leak 26 * Alan Cox : UDP can count its memory 27 * Alan Cox : send to an unknown connection causes 28 * an ECONNREFUSED off the icmp, but 29 * does NOT close. 30 * Alan Cox : Switched to new sk_buff handlers. No more backlog! 31 * Alan Cox : Using generic datagram code. Even smaller and the PEEK 32 * bug no longer crashes it. 33 * Fred Van Kempen : Net2e support for sk->broadcast. 34 * Alan Cox : Uses skb_free_datagram 35 * Alan Cox : Added get/set sockopt support. 36 * Alan Cox : Broadcasting without option set returns EACCES. 37 * Alan Cox : No wakeup calls. Instead we now use the callbacks. 38 * Alan Cox : Use ip_tos and ip_ttl 39 * Alan Cox : SNMP Mibs 40 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 41 * Matt Dillon : UDP length checks. 42 * Alan Cox : Smarter af_inet used properly. 43 * Alan Cox : Use new kernel side addressing. 44 * Alan Cox : Incorrect return on truncated datagram receive. 45 * Arnt Gulbrandsen : New udp_send and stuff 46 * Alan Cox : Cache last socket 47 * Alan Cox : Route cache 48 * Jon Peatfield : Minor efficiency fix to sendto(). 49 * Mike Shaver : RFC1122 checks. 50 * Alan Cox : Nonblocking error fix. 51 * Willy Konynenberg : Transparent proxying support. 52 * Mike McLagan : Routing by source 53 * David S. Miller : New socket lookup architecture. 54 * Last socket cache retained as it 55 * does have a high hit rate. 56 * Olaf Kirch : Don't linearise iovec on sendmsg. 57 * Andi Kleen : Some cleanups, cache destination entry 58 * for connect. 59 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 60 * Melvin Smith : Check msg_name not msg_namelen in sendto(), 61 * return ENOTCONN for unconnected sockets (POSIX) 62 * Janos Farkas : don't deliver multi/broadcasts to a different 63 * bound-to-device socket 64 * Hirokazu Takahashi : HW checksumming for outgoing UDP 65 * datagrams. 66 * Hirokazu Takahashi : sendfile() on UDP works now. 67 * Arnaldo C. Melo : convert /proc/net/udp to seq_file 68 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 69 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 70 * a single port at the same time. 71 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 72 * James Chapman : Add L2TP encapsulation type. 73 */ 74 75 #define pr_fmt(fmt) "UDP: " fmt 76 77 #include <linux/bpf-cgroup.h> 78 #include <linux/uaccess.h> 79 #include <asm/ioctls.h> 80 #include <linux/memblock.h> 81 #include <linux/highmem.h> 82 #include <linux/types.h> 83 #include <linux/fcntl.h> 84 #include <linux/module.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/igmp.h> 88 #include <linux/inetdevice.h> 89 #include <linux/in.h> 90 #include <linux/errno.h> 91 #include <linux/timer.h> 92 #include <linux/mm.h> 93 #include <linux/inet.h> 94 #include <linux/netdevice.h> 95 #include <linux/slab.h> 96 #include <net/tcp_states.h> 97 #include <linux/skbuff.h> 98 #include <linux/proc_fs.h> 99 #include <linux/seq_file.h> 100 #include <net/net_namespace.h> 101 #include <net/icmp.h> 102 #include <net/inet_hashtables.h> 103 #include <net/ip_tunnels.h> 104 #include <net/route.h> 105 #include <net/checksum.h> 106 #include <net/gso.h> 107 #include <net/xfrm.h> 108 #include <trace/events/udp.h> 109 #include <linux/static_key.h> 110 #include <linux/btf_ids.h> 111 #include <trace/events/skb.h> 112 #include <net/busy_poll.h> 113 #include "udp_impl.h" 114 #include <net/sock_reuseport.h> 115 #include <net/addrconf.h> 116 #include <net/udp_tunnel.h> 117 #include <net/gro.h> 118 #if IS_ENABLED(CONFIG_IPV6) 119 #include <net/ipv6_stubs.h> 120 #endif 121 122 struct udp_table udp_table __read_mostly; 123 EXPORT_SYMBOL(udp_table); 124 125 long sysctl_udp_mem[3] __read_mostly; 126 EXPORT_SYMBOL(sysctl_udp_mem); 127 128 atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp; 129 EXPORT_SYMBOL(udp_memory_allocated); 130 DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); 131 EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc); 132 133 #define MAX_UDP_PORTS 65536 134 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN_PERNET) 135 136 static struct udp_table *udp_get_table_prot(struct sock *sk) 137 { 138 return sk->sk_prot->h.udp_table ? : sock_net(sk)->ipv4.udp_table; 139 } 140 141 static int udp_lib_lport_inuse(struct net *net, __u16 num, 142 const struct udp_hslot *hslot, 143 unsigned long *bitmap, 144 struct sock *sk, unsigned int log) 145 { 146 struct sock *sk2; 147 kuid_t uid = sock_i_uid(sk); 148 149 sk_for_each(sk2, &hslot->head) { 150 if (net_eq(sock_net(sk2), net) && 151 sk2 != sk && 152 (bitmap || udp_sk(sk2)->udp_port_hash == num) && 153 (!sk2->sk_reuse || !sk->sk_reuse) && 154 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 155 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 156 inet_rcv_saddr_equal(sk, sk2, true)) { 157 if (sk2->sk_reuseport && sk->sk_reuseport && 158 !rcu_access_pointer(sk->sk_reuseport_cb) && 159 uid_eq(uid, sock_i_uid(sk2))) { 160 if (!bitmap) 161 return 0; 162 } else { 163 if (!bitmap) 164 return 1; 165 __set_bit(udp_sk(sk2)->udp_port_hash >> log, 166 bitmap); 167 } 168 } 169 } 170 return 0; 171 } 172 173 /* 174 * Note: we still hold spinlock of primary hash chain, so no other writer 175 * can insert/delete a socket with local_port == num 176 */ 177 static int udp_lib_lport_inuse2(struct net *net, __u16 num, 178 struct udp_hslot *hslot2, 179 struct sock *sk) 180 { 181 struct sock *sk2; 182 kuid_t uid = sock_i_uid(sk); 183 int res = 0; 184 185 spin_lock(&hslot2->lock); 186 udp_portaddr_for_each_entry(sk2, &hslot2->head) { 187 if (net_eq(sock_net(sk2), net) && 188 sk2 != sk && 189 (udp_sk(sk2)->udp_port_hash == num) && 190 (!sk2->sk_reuse || !sk->sk_reuse) && 191 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 192 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 193 inet_rcv_saddr_equal(sk, sk2, true)) { 194 if (sk2->sk_reuseport && sk->sk_reuseport && 195 !rcu_access_pointer(sk->sk_reuseport_cb) && 196 uid_eq(uid, sock_i_uid(sk2))) { 197 res = 0; 198 } else { 199 res = 1; 200 } 201 break; 202 } 203 } 204 spin_unlock(&hslot2->lock); 205 return res; 206 } 207 208 static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) 209 { 210 struct net *net = sock_net(sk); 211 kuid_t uid = sock_i_uid(sk); 212 struct sock *sk2; 213 214 sk_for_each(sk2, &hslot->head) { 215 if (net_eq(sock_net(sk2), net) && 216 sk2 != sk && 217 sk2->sk_family == sk->sk_family && 218 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 219 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && 220 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 221 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 222 inet_rcv_saddr_equal(sk, sk2, false)) { 223 return reuseport_add_sock(sk, sk2, 224 inet_rcv_saddr_any(sk)); 225 } 226 } 227 228 return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); 229 } 230 231 /** 232 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 233 * 234 * @sk: socket struct in question 235 * @snum: port number to look up 236 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, 237 * with NULL address 238 */ 239 int udp_lib_get_port(struct sock *sk, unsigned short snum, 240 unsigned int hash2_nulladdr) 241 { 242 struct udp_table *udptable = udp_get_table_prot(sk); 243 struct udp_hslot *hslot, *hslot2; 244 struct net *net = sock_net(sk); 245 int error = -EADDRINUSE; 246 247 if (!snum) { 248 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 249 unsigned short first, last; 250 int low, high, remaining; 251 unsigned int rand; 252 253 inet_sk_get_local_port_range(sk, &low, &high); 254 remaining = (high - low) + 1; 255 256 rand = get_random_u32(); 257 first = reciprocal_scale(rand, remaining) + low; 258 /* 259 * force rand to be an odd multiple of UDP_HTABLE_SIZE 260 */ 261 rand = (rand | 1) * (udptable->mask + 1); 262 last = first + udptable->mask + 1; 263 do { 264 hslot = udp_hashslot(udptable, net, first); 265 bitmap_zero(bitmap, PORTS_PER_CHAIN); 266 spin_lock_bh(&hslot->lock); 267 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, 268 udptable->log); 269 270 snum = first; 271 /* 272 * Iterate on all possible values of snum for this hash. 273 * Using steps of an odd multiple of UDP_HTABLE_SIZE 274 * give us randomization and full range coverage. 275 */ 276 do { 277 if (low <= snum && snum <= high && 278 !test_bit(snum >> udptable->log, bitmap) && 279 !inet_is_local_reserved_port(net, snum)) 280 goto found; 281 snum += rand; 282 } while (snum != first); 283 spin_unlock_bh(&hslot->lock); 284 cond_resched(); 285 } while (++first != last); 286 goto fail; 287 } else { 288 hslot = udp_hashslot(udptable, net, snum); 289 spin_lock_bh(&hslot->lock); 290 if (hslot->count > 10) { 291 int exist; 292 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; 293 294 slot2 &= udptable->mask; 295 hash2_nulladdr &= udptable->mask; 296 297 hslot2 = udp_hashslot2(udptable, slot2); 298 if (hslot->count < hslot2->count) 299 goto scan_primary_hash; 300 301 exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); 302 if (!exist && (hash2_nulladdr != slot2)) { 303 hslot2 = udp_hashslot2(udptable, hash2_nulladdr); 304 exist = udp_lib_lport_inuse2(net, snum, hslot2, 305 sk); 306 } 307 if (exist) 308 goto fail_unlock; 309 else 310 goto found; 311 } 312 scan_primary_hash: 313 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) 314 goto fail_unlock; 315 } 316 found: 317 inet_sk(sk)->inet_num = snum; 318 udp_sk(sk)->udp_port_hash = snum; 319 udp_sk(sk)->udp_portaddr_hash ^= snum; 320 if (sk_unhashed(sk)) { 321 if (sk->sk_reuseport && 322 udp_reuseport_add_sock(sk, hslot)) { 323 inet_sk(sk)->inet_num = 0; 324 udp_sk(sk)->udp_port_hash = 0; 325 udp_sk(sk)->udp_portaddr_hash ^= snum; 326 goto fail_unlock; 327 } 328 329 sk_add_node_rcu(sk, &hslot->head); 330 hslot->count++; 331 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 332 333 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 334 spin_lock(&hslot2->lock); 335 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 336 sk->sk_family == AF_INET6) 337 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 338 &hslot2->head); 339 else 340 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 341 &hslot2->head); 342 hslot2->count++; 343 spin_unlock(&hslot2->lock); 344 } 345 sock_set_flag(sk, SOCK_RCU_FREE); 346 error = 0; 347 fail_unlock: 348 spin_unlock_bh(&hslot->lock); 349 fail: 350 return error; 351 } 352 EXPORT_SYMBOL(udp_lib_get_port); 353 354 int udp_v4_get_port(struct sock *sk, unsigned short snum) 355 { 356 unsigned int hash2_nulladdr = 357 ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); 358 unsigned int hash2_partial = 359 ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 360 361 /* precompute partial secondary hash */ 362 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 363 return udp_lib_get_port(sk, snum, hash2_nulladdr); 364 } 365 366 static int compute_score(struct sock *sk, struct net *net, 367 __be32 saddr, __be16 sport, 368 __be32 daddr, unsigned short hnum, 369 int dif, int sdif) 370 { 371 int score; 372 struct inet_sock *inet; 373 bool dev_match; 374 375 if (!net_eq(sock_net(sk), net) || 376 udp_sk(sk)->udp_port_hash != hnum || 377 ipv6_only_sock(sk)) 378 return -1; 379 380 if (sk->sk_rcv_saddr != daddr) 381 return -1; 382 383 score = (sk->sk_family == PF_INET) ? 2 : 1; 384 385 inet = inet_sk(sk); 386 if (inet->inet_daddr) { 387 if (inet->inet_daddr != saddr) 388 return -1; 389 score += 4; 390 } 391 392 if (inet->inet_dport) { 393 if (inet->inet_dport != sport) 394 return -1; 395 score += 4; 396 } 397 398 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, 399 dif, sdif); 400 if (!dev_match) 401 return -1; 402 if (sk->sk_bound_dev_if) 403 score += 4; 404 405 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 406 score++; 407 return score; 408 } 409 410 INDIRECT_CALLABLE_SCOPE 411 u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, 412 const __be32 faddr, const __be16 fport) 413 { 414 static u32 udp_ehash_secret __read_mostly; 415 416 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); 417 418 return __inet_ehashfn(laddr, lport, faddr, fport, 419 udp_ehash_secret + net_hash_mix(net)); 420 } 421 422 /* called with rcu_read_lock() */ 423 static struct sock *udp4_lib_lookup2(struct net *net, 424 __be32 saddr, __be16 sport, 425 __be32 daddr, unsigned int hnum, 426 int dif, int sdif, 427 struct udp_hslot *hslot2, 428 struct sk_buff *skb) 429 { 430 struct sock *sk, *result; 431 int score, badness; 432 433 result = NULL; 434 badness = 0; 435 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 436 score = compute_score(sk, net, saddr, sport, 437 daddr, hnum, dif, sdif); 438 if (score > badness) { 439 badness = score; 440 441 if (sk->sk_state == TCP_ESTABLISHED) { 442 result = sk; 443 continue; 444 } 445 446 result = inet_lookup_reuseport(net, sk, skb, sizeof(struct udphdr), 447 saddr, sport, daddr, hnum, udp_ehashfn); 448 if (!result) { 449 result = sk; 450 continue; 451 } 452 453 /* Fall back to scoring if group has connections */ 454 if (!reuseport_has_conns(sk)) 455 return result; 456 457 /* Reuseport logic returned an error, keep original score. */ 458 if (IS_ERR(result)) 459 continue; 460 461 badness = compute_score(result, net, saddr, sport, 462 daddr, hnum, dif, sdif); 463 464 } 465 } 466 return result; 467 } 468 469 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 470 * harder than this. -DaveM 471 */ 472 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, 473 __be16 sport, __be32 daddr, __be16 dport, int dif, 474 int sdif, struct udp_table *udptable, struct sk_buff *skb) 475 { 476 unsigned short hnum = ntohs(dport); 477 unsigned int hash2, slot2; 478 struct udp_hslot *hslot2; 479 struct sock *result, *sk; 480 481 hash2 = ipv4_portaddr_hash(net, daddr, hnum); 482 slot2 = hash2 & udptable->mask; 483 hslot2 = &udptable->hash2[slot2]; 484 485 /* Lookup connected or non-wildcard socket */ 486 result = udp4_lib_lookup2(net, saddr, sport, 487 daddr, hnum, dif, sdif, 488 hslot2, skb); 489 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 490 goto done; 491 492 /* Lookup redirect from BPF */ 493 if (static_branch_unlikely(&bpf_sk_lookup_enabled) && 494 udptable == net->ipv4.udp_table) { 495 sk = inet_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr), 496 saddr, sport, daddr, hnum, dif, 497 udp_ehashfn); 498 if (sk) { 499 result = sk; 500 goto done; 501 } 502 } 503 504 /* Got non-wildcard socket or error on first lookup */ 505 if (result) 506 goto done; 507 508 /* Lookup wildcard sockets */ 509 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 510 slot2 = hash2 & udptable->mask; 511 hslot2 = &udptable->hash2[slot2]; 512 513 result = udp4_lib_lookup2(net, saddr, sport, 514 htonl(INADDR_ANY), hnum, dif, sdif, 515 hslot2, skb); 516 done: 517 if (IS_ERR(result)) 518 return NULL; 519 return result; 520 } 521 EXPORT_SYMBOL_GPL(__udp4_lib_lookup); 522 523 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, 524 __be16 sport, __be16 dport, 525 struct udp_table *udptable) 526 { 527 const struct iphdr *iph = ip_hdr(skb); 528 529 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, 530 iph->daddr, dport, inet_iif(skb), 531 inet_sdif(skb), udptable, skb); 532 } 533 534 struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb, 535 __be16 sport, __be16 dport) 536 { 537 const struct iphdr *iph = ip_hdr(skb); 538 struct net *net = dev_net(skb->dev); 539 int iif, sdif; 540 541 inet_get_iif_sdif(skb, &iif, &sdif); 542 543 return __udp4_lib_lookup(net, iph->saddr, sport, 544 iph->daddr, dport, iif, 545 sdif, net->ipv4.udp_table, NULL); 546 } 547 548 /* Must be called under rcu_read_lock(). 549 * Does increment socket refcount. 550 */ 551 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4) 552 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 553 __be32 daddr, __be16 dport, int dif) 554 { 555 struct sock *sk; 556 557 sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, 558 dif, 0, net->ipv4.udp_table, NULL); 559 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 560 sk = NULL; 561 return sk; 562 } 563 EXPORT_SYMBOL_GPL(udp4_lib_lookup); 564 #endif 565 566 static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk, 567 __be16 loc_port, __be32 loc_addr, 568 __be16 rmt_port, __be32 rmt_addr, 569 int dif, int sdif, unsigned short hnum) 570 { 571 const struct inet_sock *inet = inet_sk(sk); 572 573 if (!net_eq(sock_net(sk), net) || 574 udp_sk(sk)->udp_port_hash != hnum || 575 (inet->inet_daddr && inet->inet_daddr != rmt_addr) || 576 (inet->inet_dport != rmt_port && inet->inet_dport) || 577 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 578 ipv6_only_sock(sk) || 579 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 580 return false; 581 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) 582 return false; 583 return true; 584 } 585 586 DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); 587 void udp_encap_enable(void) 588 { 589 static_branch_inc(&udp_encap_needed_key); 590 } 591 EXPORT_SYMBOL(udp_encap_enable); 592 593 void udp_encap_disable(void) 594 { 595 static_branch_dec(&udp_encap_needed_key); 596 } 597 EXPORT_SYMBOL(udp_encap_disable); 598 599 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 600 * through error handlers in encapsulations looking for a match. 601 */ 602 static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) 603 { 604 int i; 605 606 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 607 int (*handler)(struct sk_buff *skb, u32 info); 608 const struct ip_tunnel_encap_ops *encap; 609 610 encap = rcu_dereference(iptun_encaps[i]); 611 if (!encap) 612 continue; 613 handler = encap->err_handler; 614 if (handler && !handler(skb, info)) 615 return 0; 616 } 617 618 return -ENOENT; 619 } 620 621 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 622 * reversing source and destination port: this will match tunnels that force the 623 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 624 * lwtunnels might actually break this assumption by being configured with 625 * different destination ports on endpoints, in this case we won't be able to 626 * trace ICMP messages back to them. 627 * 628 * If this doesn't match any socket, probe tunnels with arbitrary destination 629 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 630 * we've sent packets to won't necessarily match the local destination port. 631 * 632 * Then ask the tunnel implementation to match the error against a valid 633 * association. 634 * 635 * Return an error if we can't find a match, the socket if we need further 636 * processing, zero otherwise. 637 */ 638 static struct sock *__udp4_lib_err_encap(struct net *net, 639 const struct iphdr *iph, 640 struct udphdr *uh, 641 struct udp_table *udptable, 642 struct sock *sk, 643 struct sk_buff *skb, u32 info) 644 { 645 int (*lookup)(struct sock *sk, struct sk_buff *skb); 646 int network_offset, transport_offset; 647 struct udp_sock *up; 648 649 network_offset = skb_network_offset(skb); 650 transport_offset = skb_transport_offset(skb); 651 652 /* Network header needs to point to the outer IPv4 header inside ICMP */ 653 skb_reset_network_header(skb); 654 655 /* Transport header needs to point to the UDP header */ 656 skb_set_transport_header(skb, iph->ihl << 2); 657 658 if (sk) { 659 up = udp_sk(sk); 660 661 lookup = READ_ONCE(up->encap_err_lookup); 662 if (lookup && lookup(sk, skb)) 663 sk = NULL; 664 665 goto out; 666 } 667 668 sk = __udp4_lib_lookup(net, iph->daddr, uh->source, 669 iph->saddr, uh->dest, skb->dev->ifindex, 0, 670 udptable, NULL); 671 if (sk) { 672 up = udp_sk(sk); 673 674 lookup = READ_ONCE(up->encap_err_lookup); 675 if (!lookup || lookup(sk, skb)) 676 sk = NULL; 677 } 678 679 out: 680 if (!sk) 681 sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info)); 682 683 skb_set_transport_header(skb, transport_offset); 684 skb_set_network_header(skb, network_offset); 685 686 return sk; 687 } 688 689 /* 690 * This routine is called by the ICMP module when it gets some 691 * sort of error condition. If err < 0 then the socket should 692 * be closed and the error returned to the user. If err > 0 693 * it's just the icmp type << 8 | icmp code. 694 * Header points to the ip header of the error packet. We move 695 * on past this. Then (as it used to claim before adjustment) 696 * header points to the first 8 bytes of the udp header. We need 697 * to find the appropriate port. 698 */ 699 700 int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 701 { 702 struct inet_sock *inet; 703 const struct iphdr *iph = (const struct iphdr *)skb->data; 704 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 705 const int type = icmp_hdr(skb)->type; 706 const int code = icmp_hdr(skb)->code; 707 bool tunnel = false; 708 struct sock *sk; 709 int harderr; 710 int err; 711 struct net *net = dev_net(skb->dev); 712 713 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, 714 iph->saddr, uh->source, skb->dev->ifindex, 715 inet_sdif(skb), udptable, NULL); 716 717 if (!sk || udp_sk(sk)->encap_type) { 718 /* No socket for error: try tunnels before discarding */ 719 if (static_branch_unlikely(&udp_encap_needed_key)) { 720 sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb, 721 info); 722 if (!sk) 723 return 0; 724 } else 725 sk = ERR_PTR(-ENOENT); 726 727 if (IS_ERR(sk)) { 728 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 729 return PTR_ERR(sk); 730 } 731 732 tunnel = true; 733 } 734 735 err = 0; 736 harderr = 0; 737 inet = inet_sk(sk); 738 739 switch (type) { 740 default: 741 case ICMP_TIME_EXCEEDED: 742 err = EHOSTUNREACH; 743 break; 744 case ICMP_SOURCE_QUENCH: 745 goto out; 746 case ICMP_PARAMETERPROB: 747 err = EPROTO; 748 harderr = 1; 749 break; 750 case ICMP_DEST_UNREACH: 751 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 752 ipv4_sk_update_pmtu(skb, sk, info); 753 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 754 err = EMSGSIZE; 755 harderr = 1; 756 break; 757 } 758 goto out; 759 } 760 err = EHOSTUNREACH; 761 if (code <= NR_ICMP_UNREACH) { 762 harderr = icmp_err_convert[code].fatal; 763 err = icmp_err_convert[code].errno; 764 } 765 break; 766 case ICMP_REDIRECT: 767 ipv4_sk_redirect(skb, sk); 768 goto out; 769 } 770 771 /* 772 * RFC1122: OK. Passes ICMP errors back to application, as per 773 * 4.1.3.3. 774 */ 775 if (tunnel) { 776 /* ...not for tunnels though: we don't have a sending socket */ 777 if (udp_sk(sk)->encap_err_rcv) 778 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, info, 779 (u8 *)(uh+1)); 780 goto out; 781 } 782 if (!inet->recverr) { 783 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 784 goto out; 785 } else 786 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 787 788 sk->sk_err = err; 789 sk_error_report(sk); 790 out: 791 return 0; 792 } 793 794 int udp_err(struct sk_buff *skb, u32 info) 795 { 796 return __udp4_lib_err(skb, info, dev_net(skb->dev)->ipv4.udp_table); 797 } 798 799 /* 800 * Throw away all pending data and cancel the corking. Socket is locked. 801 */ 802 void udp_flush_pending_frames(struct sock *sk) 803 { 804 struct udp_sock *up = udp_sk(sk); 805 806 if (up->pending) { 807 up->len = 0; 808 up->pending = 0; 809 ip_flush_pending_frames(sk); 810 } 811 } 812 EXPORT_SYMBOL(udp_flush_pending_frames); 813 814 /** 815 * udp4_hwcsum - handle outgoing HW checksumming 816 * @skb: sk_buff containing the filled-in UDP header 817 * (checksum field must be zeroed out) 818 * @src: source IP address 819 * @dst: destination IP address 820 */ 821 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 822 { 823 struct udphdr *uh = udp_hdr(skb); 824 int offset = skb_transport_offset(skb); 825 int len = skb->len - offset; 826 int hlen = len; 827 __wsum csum = 0; 828 829 if (!skb_has_frag_list(skb)) { 830 /* 831 * Only one fragment on the socket. 832 */ 833 skb->csum_start = skb_transport_header(skb) - skb->head; 834 skb->csum_offset = offsetof(struct udphdr, check); 835 uh->check = ~csum_tcpudp_magic(src, dst, len, 836 IPPROTO_UDP, 0); 837 } else { 838 struct sk_buff *frags; 839 840 /* 841 * HW-checksum won't work as there are two or more 842 * fragments on the socket so that all csums of sk_buffs 843 * should be together 844 */ 845 skb_walk_frags(skb, frags) { 846 csum = csum_add(csum, frags->csum); 847 hlen -= frags->len; 848 } 849 850 csum = skb_checksum(skb, offset, hlen, csum); 851 skb->ip_summed = CHECKSUM_NONE; 852 853 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 854 if (uh->check == 0) 855 uh->check = CSUM_MANGLED_0; 856 } 857 } 858 EXPORT_SYMBOL_GPL(udp4_hwcsum); 859 860 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended 861 * for the simple case like when setting the checksum for a UDP tunnel. 862 */ 863 void udp_set_csum(bool nocheck, struct sk_buff *skb, 864 __be32 saddr, __be32 daddr, int len) 865 { 866 struct udphdr *uh = udp_hdr(skb); 867 868 if (nocheck) { 869 uh->check = 0; 870 } else if (skb_is_gso(skb)) { 871 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 872 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 873 uh->check = 0; 874 uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); 875 if (uh->check == 0) 876 uh->check = CSUM_MANGLED_0; 877 } else { 878 skb->ip_summed = CHECKSUM_PARTIAL; 879 skb->csum_start = skb_transport_header(skb) - skb->head; 880 skb->csum_offset = offsetof(struct udphdr, check); 881 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 882 } 883 } 884 EXPORT_SYMBOL(udp_set_csum); 885 886 static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, 887 struct inet_cork *cork) 888 { 889 struct sock *sk = skb->sk; 890 struct inet_sock *inet = inet_sk(sk); 891 struct udphdr *uh; 892 int err; 893 int is_udplite = IS_UDPLITE(sk); 894 int offset = skb_transport_offset(skb); 895 int len = skb->len - offset; 896 int datalen = len - sizeof(*uh); 897 __wsum csum = 0; 898 899 /* 900 * Create a UDP header 901 */ 902 uh = udp_hdr(skb); 903 uh->source = inet->inet_sport; 904 uh->dest = fl4->fl4_dport; 905 uh->len = htons(len); 906 uh->check = 0; 907 908 if (cork->gso_size) { 909 const int hlen = skb_network_header_len(skb) + 910 sizeof(struct udphdr); 911 912 if (hlen + cork->gso_size > cork->fragsize) { 913 kfree_skb(skb); 914 return -EINVAL; 915 } 916 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 917 kfree_skb(skb); 918 return -EINVAL; 919 } 920 if (sk->sk_no_check_tx) { 921 kfree_skb(skb); 922 return -EINVAL; 923 } 924 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 925 dst_xfrm(skb_dst(skb))) { 926 kfree_skb(skb); 927 return -EIO; 928 } 929 930 if (datalen > cork->gso_size) { 931 skb_shinfo(skb)->gso_size = cork->gso_size; 932 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 933 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 934 cork->gso_size); 935 } 936 goto csum_partial; 937 } 938 939 if (is_udplite) /* UDP-Lite */ 940 csum = udplite_csum(skb); 941 942 else if (sk->sk_no_check_tx) { /* UDP csum off */ 943 944 skb->ip_summed = CHECKSUM_NONE; 945 goto send; 946 947 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 948 csum_partial: 949 950 udp4_hwcsum(skb, fl4->saddr, fl4->daddr); 951 goto send; 952 953 } else 954 csum = udp_csum(skb); 955 956 /* add protocol-dependent pseudo-header */ 957 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, 958 sk->sk_protocol, csum); 959 if (uh->check == 0) 960 uh->check = CSUM_MANGLED_0; 961 962 send: 963 err = ip_send_skb(sock_net(sk), skb); 964 if (err) { 965 if (err == -ENOBUFS && !inet->recverr) { 966 UDP_INC_STATS(sock_net(sk), 967 UDP_MIB_SNDBUFERRORS, is_udplite); 968 err = 0; 969 } 970 } else 971 UDP_INC_STATS(sock_net(sk), 972 UDP_MIB_OUTDATAGRAMS, is_udplite); 973 return err; 974 } 975 976 /* 977 * Push out all pending data as one UDP datagram. Socket is locked. 978 */ 979 int udp_push_pending_frames(struct sock *sk) 980 { 981 struct udp_sock *up = udp_sk(sk); 982 struct inet_sock *inet = inet_sk(sk); 983 struct flowi4 *fl4 = &inet->cork.fl.u.ip4; 984 struct sk_buff *skb; 985 int err = 0; 986 987 skb = ip_finish_skb(sk, fl4); 988 if (!skb) 989 goto out; 990 991 err = udp_send_skb(skb, fl4, &inet->cork.base); 992 993 out: 994 up->len = 0; 995 up->pending = 0; 996 return err; 997 } 998 EXPORT_SYMBOL(udp_push_pending_frames); 999 1000 static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size) 1001 { 1002 switch (cmsg->cmsg_type) { 1003 case UDP_SEGMENT: 1004 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16))) 1005 return -EINVAL; 1006 *gso_size = *(__u16 *)CMSG_DATA(cmsg); 1007 return 0; 1008 default: 1009 return -EINVAL; 1010 } 1011 } 1012 1013 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size) 1014 { 1015 struct cmsghdr *cmsg; 1016 bool need_ip = false; 1017 int err; 1018 1019 for_each_cmsghdr(cmsg, msg) { 1020 if (!CMSG_OK(msg, cmsg)) 1021 return -EINVAL; 1022 1023 if (cmsg->cmsg_level != SOL_UDP) { 1024 need_ip = true; 1025 continue; 1026 } 1027 1028 err = __udp_cmsg_send(cmsg, gso_size); 1029 if (err) 1030 return err; 1031 } 1032 1033 return need_ip; 1034 } 1035 EXPORT_SYMBOL_GPL(udp_cmsg_send); 1036 1037 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1038 { 1039 struct inet_sock *inet = inet_sk(sk); 1040 struct udp_sock *up = udp_sk(sk); 1041 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 1042 struct flowi4 fl4_stack; 1043 struct flowi4 *fl4; 1044 int ulen = len; 1045 struct ipcm_cookie ipc; 1046 struct rtable *rt = NULL; 1047 int free = 0; 1048 int connected = 0; 1049 __be32 daddr, faddr, saddr; 1050 u8 tos, scope; 1051 __be16 dport; 1052 int err, is_udplite = IS_UDPLITE(sk); 1053 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1054 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1055 struct sk_buff *skb; 1056 struct ip_options_data opt_copy; 1057 1058 if (len > 0xFFFF) 1059 return -EMSGSIZE; 1060 1061 /* 1062 * Check the flags. 1063 */ 1064 1065 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ 1066 return -EOPNOTSUPP; 1067 1068 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1069 1070 fl4 = &inet->cork.fl.u.ip4; 1071 if (up->pending) { 1072 /* 1073 * There are pending frames. 1074 * The socket lock must be held while it's corked. 1075 */ 1076 lock_sock(sk); 1077 if (likely(up->pending)) { 1078 if (unlikely(up->pending != AF_INET)) { 1079 release_sock(sk); 1080 return -EINVAL; 1081 } 1082 goto do_append_data; 1083 } 1084 release_sock(sk); 1085 } 1086 ulen += sizeof(struct udphdr); 1087 1088 /* 1089 * Get and verify the address. 1090 */ 1091 if (usin) { 1092 if (msg->msg_namelen < sizeof(*usin)) 1093 return -EINVAL; 1094 if (usin->sin_family != AF_INET) { 1095 if (usin->sin_family != AF_UNSPEC) 1096 return -EAFNOSUPPORT; 1097 } 1098 1099 daddr = usin->sin_addr.s_addr; 1100 dport = usin->sin_port; 1101 if (dport == 0) 1102 return -EINVAL; 1103 } else { 1104 if (sk->sk_state != TCP_ESTABLISHED) 1105 return -EDESTADDRREQ; 1106 daddr = inet->inet_daddr; 1107 dport = inet->inet_dport; 1108 /* Open fast path for connected socket. 1109 Route will not be used, if at least one option is set. 1110 */ 1111 connected = 1; 1112 } 1113 1114 ipcm_init_sk(&ipc, inet); 1115 ipc.gso_size = READ_ONCE(up->gso_size); 1116 1117 if (msg->msg_controllen) { 1118 err = udp_cmsg_send(sk, msg, &ipc.gso_size); 1119 if (err > 0) 1120 err = ip_cmsg_send(sk, msg, &ipc, 1121 sk->sk_family == AF_INET6); 1122 if (unlikely(err < 0)) { 1123 kfree(ipc.opt); 1124 return err; 1125 } 1126 if (ipc.opt) 1127 free = 1; 1128 connected = 0; 1129 } 1130 if (!ipc.opt) { 1131 struct ip_options_rcu *inet_opt; 1132 1133 rcu_read_lock(); 1134 inet_opt = rcu_dereference(inet->inet_opt); 1135 if (inet_opt) { 1136 memcpy(&opt_copy, inet_opt, 1137 sizeof(*inet_opt) + inet_opt->opt.optlen); 1138 ipc.opt = &opt_copy.opt; 1139 } 1140 rcu_read_unlock(); 1141 } 1142 1143 if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) { 1144 err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, 1145 (struct sockaddr *)usin, &ipc.addr); 1146 if (err) 1147 goto out_free; 1148 if (usin) { 1149 if (usin->sin_port == 0) { 1150 /* BPF program set invalid port. Reject it. */ 1151 err = -EINVAL; 1152 goto out_free; 1153 } 1154 daddr = usin->sin_addr.s_addr; 1155 dport = usin->sin_port; 1156 } 1157 } 1158 1159 saddr = ipc.addr; 1160 ipc.addr = faddr = daddr; 1161 1162 if (ipc.opt && ipc.opt->opt.srr) { 1163 if (!daddr) { 1164 err = -EINVAL; 1165 goto out_free; 1166 } 1167 faddr = ipc.opt->opt.faddr; 1168 connected = 0; 1169 } 1170 tos = get_rttos(&ipc, inet); 1171 scope = ip_sendmsg_scope(inet, &ipc, msg); 1172 if (scope == RT_SCOPE_LINK) 1173 connected = 0; 1174 1175 if (ipv4_is_multicast(daddr)) { 1176 if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) 1177 ipc.oif = inet->mc_index; 1178 if (!saddr) 1179 saddr = inet->mc_addr; 1180 connected = 0; 1181 } else if (!ipc.oif) { 1182 ipc.oif = inet->uc_index; 1183 } else if (ipv4_is_lbcast(daddr) && inet->uc_index) { 1184 /* oif is set, packet is to local broadcast and 1185 * uc_index is set. oif is most likely set 1186 * by sk_bound_dev_if. If uc_index != oif check if the 1187 * oif is an L3 master and uc_index is an L3 slave. 1188 * If so, we want to allow the send using the uc_index. 1189 */ 1190 if (ipc.oif != inet->uc_index && 1191 ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk), 1192 inet->uc_index)) { 1193 ipc.oif = inet->uc_index; 1194 } 1195 } 1196 1197 if (connected) 1198 rt = (struct rtable *)sk_dst_check(sk, 0); 1199 1200 if (!rt) { 1201 struct net *net = sock_net(sk); 1202 __u8 flow_flags = inet_sk_flowi_flags(sk); 1203 1204 fl4 = &fl4_stack; 1205 1206 flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, scope, 1207 sk->sk_protocol, flow_flags, faddr, saddr, 1208 dport, inet->inet_sport, sk->sk_uid); 1209 1210 security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 1211 rt = ip_route_output_flow(net, fl4, sk); 1212 if (IS_ERR(rt)) { 1213 err = PTR_ERR(rt); 1214 rt = NULL; 1215 if (err == -ENETUNREACH) 1216 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 1217 goto out; 1218 } 1219 1220 err = -EACCES; 1221 if ((rt->rt_flags & RTCF_BROADCAST) && 1222 !sock_flag(sk, SOCK_BROADCAST)) 1223 goto out; 1224 if (connected) 1225 sk_dst_set(sk, dst_clone(&rt->dst)); 1226 } 1227 1228 if (msg->msg_flags&MSG_CONFIRM) 1229 goto do_confirm; 1230 back_from_confirm: 1231 1232 saddr = fl4->saddr; 1233 if (!ipc.addr) 1234 daddr = ipc.addr = fl4->daddr; 1235 1236 /* Lockless fast path for the non-corking case. */ 1237 if (!corkreq) { 1238 struct inet_cork cork; 1239 1240 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, 1241 sizeof(struct udphdr), &ipc, &rt, 1242 &cork, msg->msg_flags); 1243 err = PTR_ERR(skb); 1244 if (!IS_ERR_OR_NULL(skb)) 1245 err = udp_send_skb(skb, fl4, &cork); 1246 goto out; 1247 } 1248 1249 lock_sock(sk); 1250 if (unlikely(up->pending)) { 1251 /* The socket is already corked while preparing it. */ 1252 /* ... which is an evident application bug. --ANK */ 1253 release_sock(sk); 1254 1255 net_dbg_ratelimited("socket already corked\n"); 1256 err = -EINVAL; 1257 goto out; 1258 } 1259 /* 1260 * Now cork the socket to pend data. 1261 */ 1262 fl4 = &inet->cork.fl.u.ip4; 1263 fl4->daddr = daddr; 1264 fl4->saddr = saddr; 1265 fl4->fl4_dport = dport; 1266 fl4->fl4_sport = inet->inet_sport; 1267 up->pending = AF_INET; 1268 1269 do_append_data: 1270 up->len += ulen; 1271 err = ip_append_data(sk, fl4, getfrag, msg, ulen, 1272 sizeof(struct udphdr), &ipc, &rt, 1273 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1274 if (err) 1275 udp_flush_pending_frames(sk); 1276 else if (!corkreq) 1277 err = udp_push_pending_frames(sk); 1278 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1279 up->pending = 0; 1280 release_sock(sk); 1281 1282 out: 1283 ip_rt_put(rt); 1284 out_free: 1285 if (free) 1286 kfree(ipc.opt); 1287 if (!err) 1288 return len; 1289 /* 1290 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1291 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1292 * we don't have a good statistic (IpOutDiscards but it can be too many 1293 * things). We could add another new stat but at least for now that 1294 * seems like overkill. 1295 */ 1296 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1297 UDP_INC_STATS(sock_net(sk), 1298 UDP_MIB_SNDBUFERRORS, is_udplite); 1299 } 1300 return err; 1301 1302 do_confirm: 1303 if (msg->msg_flags & MSG_PROBE) 1304 dst_confirm_neigh(&rt->dst, &fl4->daddr); 1305 if (!(msg->msg_flags&MSG_PROBE) || len) 1306 goto back_from_confirm; 1307 err = 0; 1308 goto out; 1309 } 1310 EXPORT_SYMBOL(udp_sendmsg); 1311 1312 void udp_splice_eof(struct socket *sock) 1313 { 1314 struct sock *sk = sock->sk; 1315 struct udp_sock *up = udp_sk(sk); 1316 1317 if (!up->pending || READ_ONCE(up->corkflag)) 1318 return; 1319 1320 lock_sock(sk); 1321 if (up->pending && !READ_ONCE(up->corkflag)) 1322 udp_push_pending_frames(sk); 1323 release_sock(sk); 1324 } 1325 EXPORT_SYMBOL_GPL(udp_splice_eof); 1326 1327 #define UDP_SKB_IS_STATELESS 0x80000000 1328 1329 /* all head states (dst, sk, nf conntrack) except skb extensions are 1330 * cleared by udp_rcv(). 1331 * 1332 * We need to preserve secpath, if present, to eventually process 1333 * IP_CMSG_PASSSEC at recvmsg() time. 1334 * 1335 * Other extensions can be cleared. 1336 */ 1337 static bool udp_try_make_stateless(struct sk_buff *skb) 1338 { 1339 if (!skb_has_extensions(skb)) 1340 return true; 1341 1342 if (!secpath_exists(skb)) { 1343 skb_ext_reset(skb); 1344 return true; 1345 } 1346 1347 return false; 1348 } 1349 1350 static void udp_set_dev_scratch(struct sk_buff *skb) 1351 { 1352 struct udp_dev_scratch *scratch = udp_skb_scratch(skb); 1353 1354 BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); 1355 scratch->_tsize_state = skb->truesize; 1356 #if BITS_PER_LONG == 64 1357 scratch->len = skb->len; 1358 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); 1359 scratch->is_linear = !skb_is_nonlinear(skb); 1360 #endif 1361 if (udp_try_make_stateless(skb)) 1362 scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1363 } 1364 1365 static void udp_skb_csum_unnecessary_set(struct sk_buff *skb) 1366 { 1367 /* We come here after udp_lib_checksum_complete() returned 0. 1368 * This means that __skb_checksum_complete() might have 1369 * set skb->csum_valid to 1. 1370 * On 64bit platforms, we can set csum_unnecessary 1371 * to true, but only if the skb is not shared. 1372 */ 1373 #if BITS_PER_LONG == 64 1374 if (!skb_shared(skb)) 1375 udp_skb_scratch(skb)->csum_unnecessary = true; 1376 #endif 1377 } 1378 1379 static int udp_skb_truesize(struct sk_buff *skb) 1380 { 1381 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; 1382 } 1383 1384 static bool udp_skb_has_head_state(struct sk_buff *skb) 1385 { 1386 return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); 1387 } 1388 1389 /* fully reclaim rmem/fwd memory allocated for skb */ 1390 static void udp_rmem_release(struct sock *sk, int size, int partial, 1391 bool rx_queue_lock_held) 1392 { 1393 struct udp_sock *up = udp_sk(sk); 1394 struct sk_buff_head *sk_queue; 1395 int amt; 1396 1397 if (likely(partial)) { 1398 up->forward_deficit += size; 1399 size = up->forward_deficit; 1400 if (size < READ_ONCE(up->forward_threshold) && 1401 !skb_queue_empty(&up->reader_queue)) 1402 return; 1403 } else { 1404 size += up->forward_deficit; 1405 } 1406 up->forward_deficit = 0; 1407 1408 /* acquire the sk_receive_queue for fwd allocated memory scheduling, 1409 * if the called don't held it already 1410 */ 1411 sk_queue = &sk->sk_receive_queue; 1412 if (!rx_queue_lock_held) 1413 spin_lock(&sk_queue->lock); 1414 1415 1416 sk->sk_forward_alloc += size; 1417 amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); 1418 sk->sk_forward_alloc -= amt; 1419 1420 if (amt) 1421 __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT); 1422 1423 atomic_sub(size, &sk->sk_rmem_alloc); 1424 1425 /* this can save us from acquiring the rx queue lock on next receive */ 1426 skb_queue_splice_tail_init(sk_queue, &up->reader_queue); 1427 1428 if (!rx_queue_lock_held) 1429 spin_unlock(&sk_queue->lock); 1430 } 1431 1432 /* Note: called with reader_queue.lock held. 1433 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch 1434 * This avoids a cache line miss while receive_queue lock is held. 1435 * Look at __udp_enqueue_schedule_skb() to find where this copy is done. 1436 */ 1437 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) 1438 { 1439 prefetch(&skb->data); 1440 udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); 1441 } 1442 EXPORT_SYMBOL(udp_skb_destructor); 1443 1444 /* as above, but the caller held the rx queue lock, too */ 1445 static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) 1446 { 1447 prefetch(&skb->data); 1448 udp_rmem_release(sk, udp_skb_truesize(skb), 1, true); 1449 } 1450 1451 /* Idea of busylocks is to let producers grab an extra spinlock 1452 * to relieve pressure on the receive_queue spinlock shared by consumer. 1453 * Under flood, this means that only one producer can be in line 1454 * trying to acquire the receive_queue spinlock. 1455 * These busylock can be allocated on a per cpu manner, instead of a 1456 * per socket one (that would consume a cache line per socket) 1457 */ 1458 static int udp_busylocks_log __read_mostly; 1459 static spinlock_t *udp_busylocks __read_mostly; 1460 1461 static spinlock_t *busylock_acquire(void *ptr) 1462 { 1463 spinlock_t *busy; 1464 1465 busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); 1466 spin_lock(busy); 1467 return busy; 1468 } 1469 1470 static void busylock_release(spinlock_t *busy) 1471 { 1472 if (busy) 1473 spin_unlock(busy); 1474 } 1475 1476 static int udp_rmem_schedule(struct sock *sk, int size) 1477 { 1478 int delta; 1479 1480 delta = size - sk->sk_forward_alloc; 1481 if (delta > 0 && !__sk_mem_schedule(sk, delta, SK_MEM_RECV)) 1482 return -ENOBUFS; 1483 1484 return 0; 1485 } 1486 1487 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) 1488 { 1489 struct sk_buff_head *list = &sk->sk_receive_queue; 1490 int rmem, err = -ENOMEM; 1491 spinlock_t *busy = NULL; 1492 int size; 1493 1494 /* try to avoid the costly atomic add/sub pair when the receive 1495 * queue is full; always allow at least a packet 1496 */ 1497 rmem = atomic_read(&sk->sk_rmem_alloc); 1498 if (rmem > sk->sk_rcvbuf) 1499 goto drop; 1500 1501 /* Under mem pressure, it might be helpful to help udp_recvmsg() 1502 * having linear skbs : 1503 * - Reduce memory overhead and thus increase receive queue capacity 1504 * - Less cache line misses at copyout() time 1505 * - Less work at consume_skb() (less alien page frag freeing) 1506 */ 1507 if (rmem > (sk->sk_rcvbuf >> 1)) { 1508 skb_condense(skb); 1509 1510 busy = busylock_acquire(sk); 1511 } 1512 size = skb->truesize; 1513 udp_set_dev_scratch(skb); 1514 1515 /* we drop only if the receive buf is full and the receive 1516 * queue contains some other skb 1517 */ 1518 rmem = atomic_add_return(size, &sk->sk_rmem_alloc); 1519 if (rmem > (size + (unsigned int)sk->sk_rcvbuf)) 1520 goto uncharge_drop; 1521 1522 spin_lock(&list->lock); 1523 err = udp_rmem_schedule(sk, size); 1524 if (err) { 1525 spin_unlock(&list->lock); 1526 goto uncharge_drop; 1527 } 1528 1529 sk->sk_forward_alloc -= size; 1530 1531 /* no need to setup a destructor, we will explicitly release the 1532 * forward allocated memory on dequeue 1533 */ 1534 sock_skb_set_dropcount(sk, skb); 1535 1536 __skb_queue_tail(list, skb); 1537 spin_unlock(&list->lock); 1538 1539 if (!sock_flag(sk, SOCK_DEAD)) 1540 INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk); 1541 1542 busylock_release(busy); 1543 return 0; 1544 1545 uncharge_drop: 1546 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1547 1548 drop: 1549 atomic_inc(&sk->sk_drops); 1550 busylock_release(busy); 1551 return err; 1552 } 1553 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); 1554 1555 void udp_destruct_common(struct sock *sk) 1556 { 1557 /* reclaim completely the forward allocated memory */ 1558 struct udp_sock *up = udp_sk(sk); 1559 unsigned int total = 0; 1560 struct sk_buff *skb; 1561 1562 skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue); 1563 while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) { 1564 total += skb->truesize; 1565 kfree_skb(skb); 1566 } 1567 udp_rmem_release(sk, total, 0, true); 1568 } 1569 EXPORT_SYMBOL_GPL(udp_destruct_common); 1570 1571 static void udp_destruct_sock(struct sock *sk) 1572 { 1573 udp_destruct_common(sk); 1574 inet_sock_destruct(sk); 1575 } 1576 1577 int udp_init_sock(struct sock *sk) 1578 { 1579 udp_lib_init_sock(sk); 1580 sk->sk_destruct = udp_destruct_sock; 1581 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 1582 return 0; 1583 } 1584 1585 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) 1586 { 1587 if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { 1588 bool slow = lock_sock_fast(sk); 1589 1590 sk_peek_offset_bwd(sk, len); 1591 unlock_sock_fast(sk, slow); 1592 } 1593 1594 if (!skb_unref(skb)) 1595 return; 1596 1597 /* In the more common cases we cleared the head states previously, 1598 * see __udp_queue_rcv_skb(). 1599 */ 1600 if (unlikely(udp_skb_has_head_state(skb))) 1601 skb_release_head_state(skb); 1602 __consume_stateless_skb(skb); 1603 } 1604 EXPORT_SYMBOL_GPL(skb_consume_udp); 1605 1606 static struct sk_buff *__first_packet_length(struct sock *sk, 1607 struct sk_buff_head *rcvq, 1608 int *total) 1609 { 1610 struct sk_buff *skb; 1611 1612 while ((skb = skb_peek(rcvq)) != NULL) { 1613 if (udp_lib_checksum_complete(skb)) { 1614 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, 1615 IS_UDPLITE(sk)); 1616 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 1617 IS_UDPLITE(sk)); 1618 atomic_inc(&sk->sk_drops); 1619 __skb_unlink(skb, rcvq); 1620 *total += skb->truesize; 1621 kfree_skb(skb); 1622 } else { 1623 udp_skb_csum_unnecessary_set(skb); 1624 break; 1625 } 1626 } 1627 return skb; 1628 } 1629 1630 /** 1631 * first_packet_length - return length of first packet in receive queue 1632 * @sk: socket 1633 * 1634 * Drops all bad checksum frames, until a valid one is found. 1635 * Returns the length of found skb, or -1 if none is found. 1636 */ 1637 static int first_packet_length(struct sock *sk) 1638 { 1639 struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; 1640 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 1641 struct sk_buff *skb; 1642 int total = 0; 1643 int res; 1644 1645 spin_lock_bh(&rcvq->lock); 1646 skb = __first_packet_length(sk, rcvq, &total); 1647 if (!skb && !skb_queue_empty_lockless(sk_queue)) { 1648 spin_lock(&sk_queue->lock); 1649 skb_queue_splice_tail_init(sk_queue, rcvq); 1650 spin_unlock(&sk_queue->lock); 1651 1652 skb = __first_packet_length(sk, rcvq, &total); 1653 } 1654 res = skb ? skb->len : -1; 1655 if (total) 1656 udp_rmem_release(sk, total, 1, false); 1657 spin_unlock_bh(&rcvq->lock); 1658 return res; 1659 } 1660 1661 /* 1662 * IOCTL requests applicable to the UDP protocol 1663 */ 1664 1665 int udp_ioctl(struct sock *sk, int cmd, int *karg) 1666 { 1667 switch (cmd) { 1668 case SIOCOUTQ: 1669 { 1670 *karg = sk_wmem_alloc_get(sk); 1671 return 0; 1672 } 1673 1674 case SIOCINQ: 1675 { 1676 *karg = max_t(int, 0, first_packet_length(sk)); 1677 return 0; 1678 } 1679 1680 default: 1681 return -ENOIOCTLCMD; 1682 } 1683 1684 return 0; 1685 } 1686 EXPORT_SYMBOL(udp_ioctl); 1687 1688 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, 1689 int *off, int *err) 1690 { 1691 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 1692 struct sk_buff_head *queue; 1693 struct sk_buff *last; 1694 long timeo; 1695 int error; 1696 1697 queue = &udp_sk(sk)->reader_queue; 1698 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1699 do { 1700 struct sk_buff *skb; 1701 1702 error = sock_error(sk); 1703 if (error) 1704 break; 1705 1706 error = -EAGAIN; 1707 do { 1708 spin_lock_bh(&queue->lock); 1709 skb = __skb_try_recv_from_queue(sk, queue, flags, off, 1710 err, &last); 1711 if (skb) { 1712 if (!(flags & MSG_PEEK)) 1713 udp_skb_destructor(sk, skb); 1714 spin_unlock_bh(&queue->lock); 1715 return skb; 1716 } 1717 1718 if (skb_queue_empty_lockless(sk_queue)) { 1719 spin_unlock_bh(&queue->lock); 1720 goto busy_check; 1721 } 1722 1723 /* refill the reader queue and walk it again 1724 * keep both queues locked to avoid re-acquiring 1725 * the sk_receive_queue lock if fwd memory scheduling 1726 * is needed. 1727 */ 1728 spin_lock(&sk_queue->lock); 1729 skb_queue_splice_tail_init(sk_queue, queue); 1730 1731 skb = __skb_try_recv_from_queue(sk, queue, flags, off, 1732 err, &last); 1733 if (skb && !(flags & MSG_PEEK)) 1734 udp_skb_dtor_locked(sk, skb); 1735 spin_unlock(&sk_queue->lock); 1736 spin_unlock_bh(&queue->lock); 1737 if (skb) 1738 return skb; 1739 1740 busy_check: 1741 if (!sk_can_busy_loop(sk)) 1742 break; 1743 1744 sk_busy_loop(sk, flags & MSG_DONTWAIT); 1745 } while (!skb_queue_empty_lockless(sk_queue)); 1746 1747 /* sk_queue is empty, reader_queue may contain peeked packets */ 1748 } while (timeo && 1749 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, 1750 &error, &timeo, 1751 (struct sk_buff *)sk_queue)); 1752 1753 *err = error; 1754 return NULL; 1755 } 1756 EXPORT_SYMBOL(__skb_recv_udp); 1757 1758 int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 1759 { 1760 struct sk_buff *skb; 1761 int err; 1762 1763 try_again: 1764 skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); 1765 if (!skb) 1766 return err; 1767 1768 if (udp_lib_checksum_complete(skb)) { 1769 int is_udplite = IS_UDPLITE(sk); 1770 struct net *net = sock_net(sk); 1771 1772 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite); 1773 __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite); 1774 atomic_inc(&sk->sk_drops); 1775 kfree_skb(skb); 1776 goto try_again; 1777 } 1778 1779 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1780 return recv_actor(sk, skb); 1781 } 1782 EXPORT_SYMBOL(udp_read_skb); 1783 1784 /* 1785 * This should be easy, if there is something there we 1786 * return it, otherwise we block. 1787 */ 1788 1789 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 1790 int *addr_len) 1791 { 1792 struct inet_sock *inet = inet_sk(sk); 1793 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 1794 struct sk_buff *skb; 1795 unsigned int ulen, copied; 1796 int off, err, peeking = flags & MSG_PEEK; 1797 int is_udplite = IS_UDPLITE(sk); 1798 bool checksum_valid = false; 1799 1800 if (flags & MSG_ERRQUEUE) 1801 return ip_recv_error(sk, msg, len, addr_len); 1802 1803 try_again: 1804 off = sk_peek_offset(sk, flags); 1805 skb = __skb_recv_udp(sk, flags, &off, &err); 1806 if (!skb) 1807 return err; 1808 1809 ulen = udp_skb_len(skb); 1810 copied = len; 1811 if (copied > ulen - off) 1812 copied = ulen - off; 1813 else if (copied < ulen) 1814 msg->msg_flags |= MSG_TRUNC; 1815 1816 /* 1817 * If checksum is needed at all, try to do it while copying the 1818 * data. If the data is truncated, or if we only want a partial 1819 * coverage checksum (UDP-Lite), do it before the copy. 1820 */ 1821 1822 if (copied < ulen || peeking || 1823 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 1824 checksum_valid = udp_skb_csum_unnecessary(skb) || 1825 !__udp_lib_checksum_complete(skb); 1826 if (!checksum_valid) 1827 goto csum_copy_err; 1828 } 1829 1830 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 1831 if (udp_skb_is_linear(skb)) 1832 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 1833 else 1834 err = skb_copy_datagram_msg(skb, off, msg, copied); 1835 } else { 1836 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 1837 1838 if (err == -EINVAL) 1839 goto csum_copy_err; 1840 } 1841 1842 if (unlikely(err)) { 1843 if (!peeking) { 1844 atomic_inc(&sk->sk_drops); 1845 UDP_INC_STATS(sock_net(sk), 1846 UDP_MIB_INERRORS, is_udplite); 1847 } 1848 kfree_skb(skb); 1849 return err; 1850 } 1851 1852 if (!peeking) 1853 UDP_INC_STATS(sock_net(sk), 1854 UDP_MIB_INDATAGRAMS, is_udplite); 1855 1856 sock_recv_cmsgs(msg, sk, skb); 1857 1858 /* Copy the address. */ 1859 if (sin) { 1860 sin->sin_family = AF_INET; 1861 sin->sin_port = udp_hdr(skb)->source; 1862 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 1863 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 1864 *addr_len = sizeof(*sin); 1865 1866 BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, 1867 (struct sockaddr *)sin); 1868 } 1869 1870 if (udp_sk(sk)->gro_enabled) 1871 udp_cmsg_recv(msg, sk, skb); 1872 1873 if (inet->cmsg_flags) 1874 ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); 1875 1876 err = copied; 1877 if (flags & MSG_TRUNC) 1878 err = ulen; 1879 1880 skb_consume_udp(sk, skb, peeking ? -err : err); 1881 return err; 1882 1883 csum_copy_err: 1884 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 1885 udp_skb_destructor)) { 1886 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1887 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1888 } 1889 kfree_skb(skb); 1890 1891 /* starting over for a new packet, but check if we need to yield */ 1892 cond_resched(); 1893 msg->msg_flags &= ~MSG_TRUNC; 1894 goto try_again; 1895 } 1896 1897 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 1898 { 1899 /* This check is replicated from __ip4_datagram_connect() and 1900 * intended to prevent BPF program called below from accessing bytes 1901 * that are out of the bound specified by user in addr_len. 1902 */ 1903 if (addr_len < sizeof(struct sockaddr_in)) 1904 return -EINVAL; 1905 1906 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr); 1907 } 1908 EXPORT_SYMBOL(udp_pre_connect); 1909 1910 int __udp_disconnect(struct sock *sk, int flags) 1911 { 1912 struct inet_sock *inet = inet_sk(sk); 1913 /* 1914 * 1003.1g - break association. 1915 */ 1916 1917 sk->sk_state = TCP_CLOSE; 1918 inet->inet_daddr = 0; 1919 inet->inet_dport = 0; 1920 sock_rps_reset_rxhash(sk); 1921 sk->sk_bound_dev_if = 0; 1922 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) { 1923 inet_reset_saddr(sk); 1924 if (sk->sk_prot->rehash && 1925 (sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1926 sk->sk_prot->rehash(sk); 1927 } 1928 1929 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 1930 sk->sk_prot->unhash(sk); 1931 inet->inet_sport = 0; 1932 } 1933 sk_dst_reset(sk); 1934 return 0; 1935 } 1936 EXPORT_SYMBOL(__udp_disconnect); 1937 1938 int udp_disconnect(struct sock *sk, int flags) 1939 { 1940 lock_sock(sk); 1941 __udp_disconnect(sk, flags); 1942 release_sock(sk); 1943 return 0; 1944 } 1945 EXPORT_SYMBOL(udp_disconnect); 1946 1947 void udp_lib_unhash(struct sock *sk) 1948 { 1949 if (sk_hashed(sk)) { 1950 struct udp_table *udptable = udp_get_table_prot(sk); 1951 struct udp_hslot *hslot, *hslot2; 1952 1953 hslot = udp_hashslot(udptable, sock_net(sk), 1954 udp_sk(sk)->udp_port_hash); 1955 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1956 1957 spin_lock_bh(&hslot->lock); 1958 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1959 reuseport_detach_sock(sk); 1960 if (sk_del_node_init_rcu(sk)) { 1961 hslot->count--; 1962 inet_sk(sk)->inet_num = 0; 1963 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 1964 1965 spin_lock(&hslot2->lock); 1966 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1967 hslot2->count--; 1968 spin_unlock(&hslot2->lock); 1969 } 1970 spin_unlock_bh(&hslot->lock); 1971 } 1972 } 1973 EXPORT_SYMBOL(udp_lib_unhash); 1974 1975 /* 1976 * inet_rcv_saddr was changed, we must rehash secondary hash 1977 */ 1978 void udp_lib_rehash(struct sock *sk, u16 newhash) 1979 { 1980 if (sk_hashed(sk)) { 1981 struct udp_table *udptable = udp_get_table_prot(sk); 1982 struct udp_hslot *hslot, *hslot2, *nhslot2; 1983 1984 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1985 nhslot2 = udp_hashslot2(udptable, newhash); 1986 udp_sk(sk)->udp_portaddr_hash = newhash; 1987 1988 if (hslot2 != nhslot2 || 1989 rcu_access_pointer(sk->sk_reuseport_cb)) { 1990 hslot = udp_hashslot(udptable, sock_net(sk), 1991 udp_sk(sk)->udp_port_hash); 1992 /* we must lock primary chain too */ 1993 spin_lock_bh(&hslot->lock); 1994 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1995 reuseport_detach_sock(sk); 1996 1997 if (hslot2 != nhslot2) { 1998 spin_lock(&hslot2->lock); 1999 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 2000 hslot2->count--; 2001 spin_unlock(&hslot2->lock); 2002 2003 spin_lock(&nhslot2->lock); 2004 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 2005 &nhslot2->head); 2006 nhslot2->count++; 2007 spin_unlock(&nhslot2->lock); 2008 } 2009 2010 spin_unlock_bh(&hslot->lock); 2011 } 2012 } 2013 } 2014 EXPORT_SYMBOL(udp_lib_rehash); 2015 2016 void udp_v4_rehash(struct sock *sk) 2017 { 2018 u16 new_hash = ipv4_portaddr_hash(sock_net(sk), 2019 inet_sk(sk)->inet_rcv_saddr, 2020 inet_sk(sk)->inet_num); 2021 udp_lib_rehash(sk, new_hash); 2022 } 2023 2024 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 2025 { 2026 int rc; 2027 2028 if (inet_sk(sk)->inet_daddr) { 2029 sock_rps_save_rxhash(sk, skb); 2030 sk_mark_napi_id(sk, skb); 2031 sk_incoming_cpu_update(sk); 2032 } else { 2033 sk_mark_napi_id_once(sk, skb); 2034 } 2035 2036 rc = __udp_enqueue_schedule_skb(sk, skb); 2037 if (rc < 0) { 2038 int is_udplite = IS_UDPLITE(sk); 2039 int drop_reason; 2040 2041 /* Note that an ENOMEM error is charged twice */ 2042 if (rc == -ENOMEM) { 2043 UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, 2044 is_udplite); 2045 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 2046 } else { 2047 UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS, 2048 is_udplite); 2049 drop_reason = SKB_DROP_REASON_PROTO_MEM; 2050 } 2051 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2052 kfree_skb_reason(skb, drop_reason); 2053 trace_udp_fail_queue_rcv_skb(rc, sk); 2054 return -1; 2055 } 2056 2057 return 0; 2058 } 2059 2060 /* returns: 2061 * -1: error 2062 * 0: success 2063 * >0: "udp encap" protocol resubmission 2064 * 2065 * Note that in the success and error cases, the skb is assumed to 2066 * have either been requeued or freed. 2067 */ 2068 static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 2069 { 2070 int drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 2071 struct udp_sock *up = udp_sk(sk); 2072 int is_udplite = IS_UDPLITE(sk); 2073 2074 /* 2075 * Charge it to the socket, dropping if the queue is full. 2076 */ 2077 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { 2078 drop_reason = SKB_DROP_REASON_XFRM_POLICY; 2079 goto drop; 2080 } 2081 nf_reset_ct(skb); 2082 2083 if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) { 2084 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 2085 2086 /* 2087 * This is an encapsulation socket so pass the skb to 2088 * the socket's udp_encap_rcv() hook. Otherwise, just 2089 * fall through and pass this up the UDP socket. 2090 * up->encap_rcv() returns the following value: 2091 * =0 if skb was successfully passed to the encap 2092 * handler or was discarded by it. 2093 * >0 if skb should be passed on to UDP. 2094 * <0 if skb should be resubmitted as proto -N 2095 */ 2096 2097 /* if we're overly short, let UDP handle it */ 2098 encap_rcv = READ_ONCE(up->encap_rcv); 2099 if (encap_rcv) { 2100 int ret; 2101 2102 /* Verify checksum before giving to encap */ 2103 if (udp_lib_checksum_complete(skb)) 2104 goto csum_error; 2105 2106 ret = encap_rcv(sk, skb); 2107 if (ret <= 0) { 2108 __UDP_INC_STATS(sock_net(sk), 2109 UDP_MIB_INDATAGRAMS, 2110 is_udplite); 2111 return -ret; 2112 } 2113 } 2114 2115 /* FALLTHROUGH -- it's a UDP Packet */ 2116 } 2117 2118 /* 2119 * UDP-Lite specific tests, ignored on UDP sockets 2120 */ 2121 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 2122 2123 /* 2124 * MIB statistics other than incrementing the error count are 2125 * disabled for the following two types of errors: these depend 2126 * on the application settings, not on the functioning of the 2127 * protocol stack as such. 2128 * 2129 * RFC 3828 here recommends (sec 3.3): "There should also be a 2130 * way ... to ... at least let the receiving application block 2131 * delivery of packets with coverage values less than a value 2132 * provided by the application." 2133 */ 2134 if (up->pcrlen == 0) { /* full coverage was set */ 2135 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", 2136 UDP_SKB_CB(skb)->cscov, skb->len); 2137 goto drop; 2138 } 2139 /* The next case involves violating the min. coverage requested 2140 * by the receiver. This is subtle: if receiver wants x and x is 2141 * greater than the buffersize/MTU then receiver will complain 2142 * that it wants x while sender emits packets of smaller size y. 2143 * Therefore the above ...()->partial_cov statement is essential. 2144 */ 2145 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 2146 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", 2147 UDP_SKB_CB(skb)->cscov, up->pcrlen); 2148 goto drop; 2149 } 2150 } 2151 2152 prefetch(&sk->sk_rmem_alloc); 2153 if (rcu_access_pointer(sk->sk_filter) && 2154 udp_lib_checksum_complete(skb)) 2155 goto csum_error; 2156 2157 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 2158 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 2159 goto drop; 2160 } 2161 2162 udp_csum_pull_header(skb); 2163 2164 ipv4_pktinfo_prepare(sk, skb); 2165 return __udp_queue_rcv_skb(sk, skb); 2166 2167 csum_error: 2168 drop_reason = SKB_DROP_REASON_UDP_CSUM; 2169 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 2170 drop: 2171 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2172 atomic_inc(&sk->sk_drops); 2173 kfree_skb_reason(skb, drop_reason); 2174 return -1; 2175 } 2176 2177 static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 2178 { 2179 struct sk_buff *next, *segs; 2180 int ret; 2181 2182 if (likely(!udp_unexpected_gso(sk, skb))) 2183 return udp_queue_rcv_one_skb(sk, skb); 2184 2185 BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET); 2186 __skb_push(skb, -skb_mac_offset(skb)); 2187 segs = udp_rcv_segment(sk, skb, true); 2188 skb_list_walk_safe(segs, skb, next) { 2189 __skb_pull(skb, skb_transport_offset(skb)); 2190 2191 udp_post_segment_fix_csum(skb); 2192 ret = udp_queue_rcv_one_skb(sk, skb); 2193 if (ret > 0) 2194 ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); 2195 } 2196 return 0; 2197 } 2198 2199 /* For TCP sockets, sk_rx_dst is protected by socket lock 2200 * For UDP, we use xchg() to guard against concurrent changes. 2201 */ 2202 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 2203 { 2204 struct dst_entry *old; 2205 2206 if (dst_hold_safe(dst)) { 2207 old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); 2208 dst_release(old); 2209 return old != dst; 2210 } 2211 return false; 2212 } 2213 EXPORT_SYMBOL(udp_sk_rx_dst_set); 2214 2215 /* 2216 * Multicasts and broadcasts go to each listener. 2217 * 2218 * Note: called only from the BH handler context. 2219 */ 2220 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 2221 struct udphdr *uh, 2222 __be32 saddr, __be32 daddr, 2223 struct udp_table *udptable, 2224 int proto) 2225 { 2226 struct sock *sk, *first = NULL; 2227 unsigned short hnum = ntohs(uh->dest); 2228 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 2229 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 2230 unsigned int offset = offsetof(typeof(*sk), sk_node); 2231 int dif = skb->dev->ifindex; 2232 int sdif = inet_sdif(skb); 2233 struct hlist_node *node; 2234 struct sk_buff *nskb; 2235 2236 if (use_hash2) { 2237 hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 2238 udptable->mask; 2239 hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask; 2240 start_lookup: 2241 hslot = &udptable->hash2[hash2]; 2242 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 2243 } 2244 2245 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 2246 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, 2247 uh->source, saddr, dif, sdif, hnum)) 2248 continue; 2249 2250 if (!first) { 2251 first = sk; 2252 continue; 2253 } 2254 nskb = skb_clone(skb, GFP_ATOMIC); 2255 2256 if (unlikely(!nskb)) { 2257 atomic_inc(&sk->sk_drops); 2258 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 2259 IS_UDPLITE(sk)); 2260 __UDP_INC_STATS(net, UDP_MIB_INERRORS, 2261 IS_UDPLITE(sk)); 2262 continue; 2263 } 2264 if (udp_queue_rcv_skb(sk, nskb) > 0) 2265 consume_skb(nskb); 2266 } 2267 2268 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 2269 if (use_hash2 && hash2 != hash2_any) { 2270 hash2 = hash2_any; 2271 goto start_lookup; 2272 } 2273 2274 if (first) { 2275 if (udp_queue_rcv_skb(first, skb) > 0) 2276 consume_skb(skb); 2277 } else { 2278 kfree_skb(skb); 2279 __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 2280 proto == IPPROTO_UDPLITE); 2281 } 2282 return 0; 2283 } 2284 2285 /* Initialize UDP checksum. If exited with zero value (success), 2286 * CHECKSUM_UNNECESSARY means, that no more checks are required. 2287 * Otherwise, csum completion requires checksumming packet body, 2288 * including udp header and folding it to skb->csum. 2289 */ 2290 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 2291 int proto) 2292 { 2293 int err; 2294 2295 UDP_SKB_CB(skb)->partial_cov = 0; 2296 UDP_SKB_CB(skb)->cscov = skb->len; 2297 2298 if (proto == IPPROTO_UDPLITE) { 2299 err = udplite_checksum_init(skb, uh); 2300 if (err) 2301 return err; 2302 2303 if (UDP_SKB_CB(skb)->partial_cov) { 2304 skb->csum = inet_compute_pseudo(skb, proto); 2305 return 0; 2306 } 2307 } 2308 2309 /* Note, we are only interested in != 0 or == 0, thus the 2310 * force to int. 2311 */ 2312 err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, 2313 inet_compute_pseudo); 2314 if (err) 2315 return err; 2316 2317 if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) { 2318 /* If SW calculated the value, we know it's bad */ 2319 if (skb->csum_complete_sw) 2320 return 1; 2321 2322 /* HW says the value is bad. Let's validate that. 2323 * skb->csum is no longer the full packet checksum, 2324 * so don't treat it as such. 2325 */ 2326 skb_checksum_complete_unset(skb); 2327 } 2328 2329 return 0; 2330 } 2331 2332 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 2333 * return code conversion for ip layer consumption 2334 */ 2335 static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 2336 struct udphdr *uh) 2337 { 2338 int ret; 2339 2340 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 2341 skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo); 2342 2343 ret = udp_queue_rcv_skb(sk, skb); 2344 2345 /* a return value > 0 means to resubmit the input, but 2346 * it wants the return to be -protocol, or 0 2347 */ 2348 if (ret > 0) 2349 return -ret; 2350 return 0; 2351 } 2352 2353 /* 2354 * All we need to do is get the socket, and then do a checksum. 2355 */ 2356 2357 int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 2358 int proto) 2359 { 2360 struct sock *sk; 2361 struct udphdr *uh; 2362 unsigned short ulen; 2363 struct rtable *rt = skb_rtable(skb); 2364 __be32 saddr, daddr; 2365 struct net *net = dev_net(skb->dev); 2366 bool refcounted; 2367 int drop_reason; 2368 2369 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 2370 2371 /* 2372 * Validate the packet. 2373 */ 2374 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 2375 goto drop; /* No space for header. */ 2376 2377 uh = udp_hdr(skb); 2378 ulen = ntohs(uh->len); 2379 saddr = ip_hdr(skb)->saddr; 2380 daddr = ip_hdr(skb)->daddr; 2381 2382 if (ulen > skb->len) 2383 goto short_packet; 2384 2385 if (proto == IPPROTO_UDP) { 2386 /* UDP validates ulen. */ 2387 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 2388 goto short_packet; 2389 uh = udp_hdr(skb); 2390 } 2391 2392 if (udp4_csum_init(skb, uh, proto)) 2393 goto csum_error; 2394 2395 sk = inet_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest, 2396 &refcounted, udp_ehashfn); 2397 if (IS_ERR(sk)) 2398 goto no_sk; 2399 2400 if (sk) { 2401 struct dst_entry *dst = skb_dst(skb); 2402 int ret; 2403 2404 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 2405 udp_sk_rx_dst_set(sk, dst); 2406 2407 ret = udp_unicast_rcv_skb(sk, skb, uh); 2408 if (refcounted) 2409 sock_put(sk); 2410 return ret; 2411 } 2412 2413 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 2414 return __udp4_lib_mcast_deliver(net, skb, uh, 2415 saddr, daddr, udptable, proto); 2416 2417 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 2418 if (sk) 2419 return udp_unicast_rcv_skb(sk, skb, uh); 2420 no_sk: 2421 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 2422 goto drop; 2423 nf_reset_ct(skb); 2424 2425 /* No socket. Drop packet silently, if checksum is wrong */ 2426 if (udp_lib_checksum_complete(skb)) 2427 goto csum_error; 2428 2429 drop_reason = SKB_DROP_REASON_NO_SOCKET; 2430 __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 2431 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 2432 2433 /* 2434 * Hmm. We got an UDP packet to a port to which we 2435 * don't wanna listen. Ignore it. 2436 */ 2437 kfree_skb_reason(skb, drop_reason); 2438 return 0; 2439 2440 short_packet: 2441 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; 2442 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 2443 proto == IPPROTO_UDPLITE ? "Lite" : "", 2444 &saddr, ntohs(uh->source), 2445 ulen, skb->len, 2446 &daddr, ntohs(uh->dest)); 2447 goto drop; 2448 2449 csum_error: 2450 /* 2451 * RFC1122: OK. Discards the bad packet silently (as far as 2452 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 2453 */ 2454 drop_reason = SKB_DROP_REASON_UDP_CSUM; 2455 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 2456 proto == IPPROTO_UDPLITE ? "Lite" : "", 2457 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), 2458 ulen); 2459 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 2460 drop: 2461 __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 2462 kfree_skb_reason(skb, drop_reason); 2463 return 0; 2464 } 2465 2466 /* We can only early demux multicast if there is a single matching socket. 2467 * If more than one socket found returns NULL 2468 */ 2469 static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, 2470 __be16 loc_port, __be32 loc_addr, 2471 __be16 rmt_port, __be32 rmt_addr, 2472 int dif, int sdif) 2473 { 2474 struct udp_table *udptable = net->ipv4.udp_table; 2475 unsigned short hnum = ntohs(loc_port); 2476 struct sock *sk, *result; 2477 struct udp_hslot *hslot; 2478 unsigned int slot; 2479 2480 slot = udp_hashfn(net, hnum, udptable->mask); 2481 hslot = &udptable->hash[slot]; 2482 2483 /* Do not bother scanning a too big list */ 2484 if (hslot->count > 10) 2485 return NULL; 2486 2487 result = NULL; 2488 sk_for_each_rcu(sk, &hslot->head) { 2489 if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, 2490 rmt_port, rmt_addr, dif, sdif, hnum)) { 2491 if (result) 2492 return NULL; 2493 result = sk; 2494 } 2495 } 2496 2497 return result; 2498 } 2499 2500 /* For unicast we should only early demux connected sockets or we can 2501 * break forwarding setups. The chains here can be long so only check 2502 * if the first socket is an exact match and if not move on. 2503 */ 2504 static struct sock *__udp4_lib_demux_lookup(struct net *net, 2505 __be16 loc_port, __be32 loc_addr, 2506 __be16 rmt_port, __be32 rmt_addr, 2507 int dif, int sdif) 2508 { 2509 struct udp_table *udptable = net->ipv4.udp_table; 2510 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); 2511 unsigned short hnum = ntohs(loc_port); 2512 unsigned int hash2, slot2; 2513 struct udp_hslot *hslot2; 2514 __portpair ports; 2515 struct sock *sk; 2516 2517 hash2 = ipv4_portaddr_hash(net, loc_addr, hnum); 2518 slot2 = hash2 & udptable->mask; 2519 hslot2 = &udptable->hash2[slot2]; 2520 ports = INET_COMBINED_PORTS(rmt_port, hnum); 2521 2522 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 2523 if (inet_match(net, sk, acookie, ports, dif, sdif)) 2524 return sk; 2525 /* Only check first socket in chain */ 2526 break; 2527 } 2528 return NULL; 2529 } 2530 2531 int udp_v4_early_demux(struct sk_buff *skb) 2532 { 2533 struct net *net = dev_net(skb->dev); 2534 struct in_device *in_dev = NULL; 2535 const struct iphdr *iph; 2536 const struct udphdr *uh; 2537 struct sock *sk = NULL; 2538 struct dst_entry *dst; 2539 int dif = skb->dev->ifindex; 2540 int sdif = inet_sdif(skb); 2541 int ours; 2542 2543 /* validate the packet */ 2544 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 2545 return 0; 2546 2547 iph = ip_hdr(skb); 2548 uh = udp_hdr(skb); 2549 2550 if (skb->pkt_type == PACKET_MULTICAST) { 2551 in_dev = __in_dev_get_rcu(skb->dev); 2552 2553 if (!in_dev) 2554 return 0; 2555 2556 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, 2557 iph->protocol); 2558 if (!ours) 2559 return 0; 2560 2561 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 2562 uh->source, iph->saddr, 2563 dif, sdif); 2564 } else if (skb->pkt_type == PACKET_HOST) { 2565 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, 2566 uh->source, iph->saddr, dif, sdif); 2567 } 2568 2569 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 2570 return 0; 2571 2572 skb->sk = sk; 2573 skb->destructor = sock_efree; 2574 dst = rcu_dereference(sk->sk_rx_dst); 2575 2576 if (dst) 2577 dst = dst_check(dst, 0); 2578 if (dst) { 2579 u32 itag = 0; 2580 2581 /* set noref for now. 2582 * any place which wants to hold dst has to call 2583 * dst_hold_safe() 2584 */ 2585 skb_dst_set_noref(skb, dst); 2586 2587 /* for unconnected multicast sockets we need to validate 2588 * the source on each packet 2589 */ 2590 if (!inet_sk(sk)->inet_daddr && in_dev) 2591 return ip_mc_validate_source(skb, iph->daddr, 2592 iph->saddr, 2593 iph->tos & IPTOS_RT_MASK, 2594 skb->dev, in_dev, &itag); 2595 } 2596 return 0; 2597 } 2598 2599 int udp_rcv(struct sk_buff *skb) 2600 { 2601 return __udp4_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); 2602 } 2603 2604 void udp_destroy_sock(struct sock *sk) 2605 { 2606 struct udp_sock *up = udp_sk(sk); 2607 bool slow = lock_sock_fast(sk); 2608 2609 /* protects from races with udp_abort() */ 2610 sock_set_flag(sk, SOCK_DEAD); 2611 udp_flush_pending_frames(sk); 2612 unlock_sock_fast(sk, slow); 2613 if (static_branch_unlikely(&udp_encap_needed_key)) { 2614 if (up->encap_type) { 2615 void (*encap_destroy)(struct sock *sk); 2616 encap_destroy = READ_ONCE(up->encap_destroy); 2617 if (encap_destroy) 2618 encap_destroy(sk); 2619 } 2620 if (up->encap_enabled) 2621 static_branch_dec(&udp_encap_needed_key); 2622 } 2623 } 2624 2625 /* 2626 * Socket option code for UDP 2627 */ 2628 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 2629 sockptr_t optval, unsigned int optlen, 2630 int (*push_pending_frames)(struct sock *)) 2631 { 2632 struct udp_sock *up = udp_sk(sk); 2633 int val, valbool; 2634 int err = 0; 2635 int is_udplite = IS_UDPLITE(sk); 2636 2637 if (level == SOL_SOCKET) { 2638 err = sk_setsockopt(sk, level, optname, optval, optlen); 2639 2640 if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) { 2641 sockopt_lock_sock(sk); 2642 /* paired with READ_ONCE in udp_rmem_release() */ 2643 WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2); 2644 sockopt_release_sock(sk); 2645 } 2646 return err; 2647 } 2648 2649 if (optlen < sizeof(int)) 2650 return -EINVAL; 2651 2652 if (copy_from_sockptr(&val, optval, sizeof(val))) 2653 return -EFAULT; 2654 2655 valbool = val ? 1 : 0; 2656 2657 switch (optname) { 2658 case UDP_CORK: 2659 if (val != 0) { 2660 WRITE_ONCE(up->corkflag, 1); 2661 } else { 2662 WRITE_ONCE(up->corkflag, 0); 2663 lock_sock(sk); 2664 push_pending_frames(sk); 2665 release_sock(sk); 2666 } 2667 break; 2668 2669 case UDP_ENCAP: 2670 switch (val) { 2671 case 0: 2672 #ifdef CONFIG_XFRM 2673 case UDP_ENCAP_ESPINUDP: 2674 case UDP_ENCAP_ESPINUDP_NON_IKE: 2675 #if IS_ENABLED(CONFIG_IPV6) 2676 if (sk->sk_family == AF_INET6) 2677 up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv; 2678 else 2679 #endif 2680 up->encap_rcv = xfrm4_udp_encap_rcv; 2681 #endif 2682 fallthrough; 2683 case UDP_ENCAP_L2TPINUDP: 2684 up->encap_type = val; 2685 lock_sock(sk); 2686 udp_tunnel_encap_enable(sk->sk_socket); 2687 release_sock(sk); 2688 break; 2689 default: 2690 err = -ENOPROTOOPT; 2691 break; 2692 } 2693 break; 2694 2695 case UDP_NO_CHECK6_TX: 2696 up->no_check6_tx = valbool; 2697 break; 2698 2699 case UDP_NO_CHECK6_RX: 2700 up->no_check6_rx = valbool; 2701 break; 2702 2703 case UDP_SEGMENT: 2704 if (val < 0 || val > USHRT_MAX) 2705 return -EINVAL; 2706 WRITE_ONCE(up->gso_size, val); 2707 break; 2708 2709 case UDP_GRO: 2710 lock_sock(sk); 2711 2712 /* when enabling GRO, accept the related GSO packet type */ 2713 if (valbool) 2714 udp_tunnel_encap_enable(sk->sk_socket); 2715 up->gro_enabled = valbool; 2716 up->accept_udp_l4 = valbool; 2717 release_sock(sk); 2718 break; 2719 2720 /* 2721 * UDP-Lite's partial checksum coverage (RFC 3828). 2722 */ 2723 /* The sender sets actual checksum coverage length via this option. 2724 * The case coverage > packet length is handled by send module. */ 2725 case UDPLITE_SEND_CSCOV: 2726 if (!is_udplite) /* Disable the option on UDP sockets */ 2727 return -ENOPROTOOPT; 2728 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 2729 val = 8; 2730 else if (val > USHRT_MAX) 2731 val = USHRT_MAX; 2732 up->pcslen = val; 2733 up->pcflag |= UDPLITE_SEND_CC; 2734 break; 2735 2736 /* The receiver specifies a minimum checksum coverage value. To make 2737 * sense, this should be set to at least 8 (as done below). If zero is 2738 * used, this again means full checksum coverage. */ 2739 case UDPLITE_RECV_CSCOV: 2740 if (!is_udplite) /* Disable the option on UDP sockets */ 2741 return -ENOPROTOOPT; 2742 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 2743 val = 8; 2744 else if (val > USHRT_MAX) 2745 val = USHRT_MAX; 2746 up->pcrlen = val; 2747 up->pcflag |= UDPLITE_RECV_CC; 2748 break; 2749 2750 default: 2751 err = -ENOPROTOOPT; 2752 break; 2753 } 2754 2755 return err; 2756 } 2757 EXPORT_SYMBOL(udp_lib_setsockopt); 2758 2759 int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 2760 unsigned int optlen) 2761 { 2762 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 2763 return udp_lib_setsockopt(sk, level, optname, 2764 optval, optlen, 2765 udp_push_pending_frames); 2766 return ip_setsockopt(sk, level, optname, optval, optlen); 2767 } 2768 2769 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 2770 char __user *optval, int __user *optlen) 2771 { 2772 struct udp_sock *up = udp_sk(sk); 2773 int val, len; 2774 2775 if (get_user(len, optlen)) 2776 return -EFAULT; 2777 2778 len = min_t(unsigned int, len, sizeof(int)); 2779 2780 if (len < 0) 2781 return -EINVAL; 2782 2783 switch (optname) { 2784 case UDP_CORK: 2785 val = READ_ONCE(up->corkflag); 2786 break; 2787 2788 case UDP_ENCAP: 2789 val = up->encap_type; 2790 break; 2791 2792 case UDP_NO_CHECK6_TX: 2793 val = up->no_check6_tx; 2794 break; 2795 2796 case UDP_NO_CHECK6_RX: 2797 val = up->no_check6_rx; 2798 break; 2799 2800 case UDP_SEGMENT: 2801 val = READ_ONCE(up->gso_size); 2802 break; 2803 2804 case UDP_GRO: 2805 val = up->gro_enabled; 2806 break; 2807 2808 /* The following two cannot be changed on UDP sockets, the return is 2809 * always 0 (which corresponds to the full checksum coverage of UDP). */ 2810 case UDPLITE_SEND_CSCOV: 2811 val = up->pcslen; 2812 break; 2813 2814 case UDPLITE_RECV_CSCOV: 2815 val = up->pcrlen; 2816 break; 2817 2818 default: 2819 return -ENOPROTOOPT; 2820 } 2821 2822 if (put_user(len, optlen)) 2823 return -EFAULT; 2824 if (copy_to_user(optval, &val, len)) 2825 return -EFAULT; 2826 return 0; 2827 } 2828 EXPORT_SYMBOL(udp_lib_getsockopt); 2829 2830 int udp_getsockopt(struct sock *sk, int level, int optname, 2831 char __user *optval, int __user *optlen) 2832 { 2833 if (level == SOL_UDP || level == SOL_UDPLITE) 2834 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2835 return ip_getsockopt(sk, level, optname, optval, optlen); 2836 } 2837 2838 /** 2839 * udp_poll - wait for a UDP event. 2840 * @file: - file struct 2841 * @sock: - socket 2842 * @wait: - poll table 2843 * 2844 * This is same as datagram poll, except for the special case of 2845 * blocking sockets. If application is using a blocking fd 2846 * and a packet with checksum error is in the queue; 2847 * then it could get return from select indicating data available 2848 * but then block when reading it. Add special case code 2849 * to work around these arguably broken applications. 2850 */ 2851 __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) 2852 { 2853 __poll_t mask = datagram_poll(file, sock, wait); 2854 struct sock *sk = sock->sk; 2855 2856 if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 2857 mask |= EPOLLIN | EPOLLRDNORM; 2858 2859 /* Check for false positives due to checksum errors */ 2860 if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2861 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2862 mask &= ~(EPOLLIN | EPOLLRDNORM); 2863 2864 /* psock ingress_msg queue should not contain any bad checksum frames */ 2865 if (sk_is_readable(sk)) 2866 mask |= EPOLLIN | EPOLLRDNORM; 2867 return mask; 2868 2869 } 2870 EXPORT_SYMBOL(udp_poll); 2871 2872 int udp_abort(struct sock *sk, int err) 2873 { 2874 if (!has_current_bpf_ctx()) 2875 lock_sock(sk); 2876 2877 /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing 2878 * with close() 2879 */ 2880 if (sock_flag(sk, SOCK_DEAD)) 2881 goto out; 2882 2883 sk->sk_err = err; 2884 sk_error_report(sk); 2885 __udp_disconnect(sk, 0); 2886 2887 out: 2888 if (!has_current_bpf_ctx()) 2889 release_sock(sk); 2890 2891 return 0; 2892 } 2893 EXPORT_SYMBOL_GPL(udp_abort); 2894 2895 struct proto udp_prot = { 2896 .name = "UDP", 2897 .owner = THIS_MODULE, 2898 .close = udp_lib_close, 2899 .pre_connect = udp_pre_connect, 2900 .connect = ip4_datagram_connect, 2901 .disconnect = udp_disconnect, 2902 .ioctl = udp_ioctl, 2903 .init = udp_init_sock, 2904 .destroy = udp_destroy_sock, 2905 .setsockopt = udp_setsockopt, 2906 .getsockopt = udp_getsockopt, 2907 .sendmsg = udp_sendmsg, 2908 .recvmsg = udp_recvmsg, 2909 .splice_eof = udp_splice_eof, 2910 .release_cb = ip4_datagram_release_cb, 2911 .hash = udp_lib_hash, 2912 .unhash = udp_lib_unhash, 2913 .rehash = udp_v4_rehash, 2914 .get_port = udp_v4_get_port, 2915 .put_port = udp_lib_unhash, 2916 #ifdef CONFIG_BPF_SYSCALL 2917 .psock_update_sk_prot = udp_bpf_update_proto, 2918 #endif 2919 .memory_allocated = &udp_memory_allocated, 2920 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 2921 2922 .sysctl_mem = sysctl_udp_mem, 2923 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 2924 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 2925 .obj_size = sizeof(struct udp_sock), 2926 .h.udp_table = NULL, 2927 .diag_destroy = udp_abort, 2928 }; 2929 EXPORT_SYMBOL(udp_prot); 2930 2931 /* ------------------------------------------------------------------------ */ 2932 #ifdef CONFIG_PROC_FS 2933 2934 static unsigned short seq_file_family(const struct seq_file *seq); 2935 static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) 2936 { 2937 unsigned short family = seq_file_family(seq); 2938 2939 /* AF_UNSPEC is used as a match all */ 2940 return ((family == AF_UNSPEC || family == sk->sk_family) && 2941 net_eq(sock_net(sk), seq_file_net(seq))); 2942 } 2943 2944 #ifdef CONFIG_BPF_SYSCALL 2945 static const struct seq_operations bpf_iter_udp_seq_ops; 2946 #endif 2947 static struct udp_table *udp_get_table_seq(struct seq_file *seq, 2948 struct net *net) 2949 { 2950 const struct udp_seq_afinfo *afinfo; 2951 2952 #ifdef CONFIG_BPF_SYSCALL 2953 if (seq->op == &bpf_iter_udp_seq_ops) 2954 return net->ipv4.udp_table; 2955 #endif 2956 2957 afinfo = pde_data(file_inode(seq->file)); 2958 return afinfo->udp_table ? : net->ipv4.udp_table; 2959 } 2960 2961 static struct sock *udp_get_first(struct seq_file *seq, int start) 2962 { 2963 struct udp_iter_state *state = seq->private; 2964 struct net *net = seq_file_net(seq); 2965 struct udp_table *udptable; 2966 struct sock *sk; 2967 2968 udptable = udp_get_table_seq(seq, net); 2969 2970 for (state->bucket = start; state->bucket <= udptable->mask; 2971 ++state->bucket) { 2972 struct udp_hslot *hslot = &udptable->hash[state->bucket]; 2973 2974 if (hlist_empty(&hslot->head)) 2975 continue; 2976 2977 spin_lock_bh(&hslot->lock); 2978 sk_for_each(sk, &hslot->head) { 2979 if (seq_sk_match(seq, sk)) 2980 goto found; 2981 } 2982 spin_unlock_bh(&hslot->lock); 2983 } 2984 sk = NULL; 2985 found: 2986 return sk; 2987 } 2988 2989 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 2990 { 2991 struct udp_iter_state *state = seq->private; 2992 struct net *net = seq_file_net(seq); 2993 struct udp_table *udptable; 2994 2995 do { 2996 sk = sk_next(sk); 2997 } while (sk && !seq_sk_match(seq, sk)); 2998 2999 if (!sk) { 3000 udptable = udp_get_table_seq(seq, net); 3001 3002 if (state->bucket <= udptable->mask) 3003 spin_unlock_bh(&udptable->hash[state->bucket].lock); 3004 3005 return udp_get_first(seq, state->bucket + 1); 3006 } 3007 return sk; 3008 } 3009 3010 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 3011 { 3012 struct sock *sk = udp_get_first(seq, 0); 3013 3014 if (sk) 3015 while (pos && (sk = udp_get_next(seq, sk)) != NULL) 3016 --pos; 3017 return pos ? NULL : sk; 3018 } 3019 3020 void *udp_seq_start(struct seq_file *seq, loff_t *pos) 3021 { 3022 struct udp_iter_state *state = seq->private; 3023 state->bucket = MAX_UDP_PORTS; 3024 3025 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 3026 } 3027 EXPORT_SYMBOL(udp_seq_start); 3028 3029 void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3030 { 3031 struct sock *sk; 3032 3033 if (v == SEQ_START_TOKEN) 3034 sk = udp_get_idx(seq, 0); 3035 else 3036 sk = udp_get_next(seq, v); 3037 3038 ++*pos; 3039 return sk; 3040 } 3041 EXPORT_SYMBOL(udp_seq_next); 3042 3043 void udp_seq_stop(struct seq_file *seq, void *v) 3044 { 3045 struct udp_iter_state *state = seq->private; 3046 struct udp_table *udptable; 3047 3048 udptable = udp_get_table_seq(seq, seq_file_net(seq)); 3049 3050 if (state->bucket <= udptable->mask) 3051 spin_unlock_bh(&udptable->hash[state->bucket].lock); 3052 } 3053 EXPORT_SYMBOL(udp_seq_stop); 3054 3055 /* ------------------------------------------------------------------------ */ 3056 static void udp4_format_sock(struct sock *sp, struct seq_file *f, 3057 int bucket) 3058 { 3059 struct inet_sock *inet = inet_sk(sp); 3060 __be32 dest = inet->inet_daddr; 3061 __be32 src = inet->inet_rcv_saddr; 3062 __u16 destp = ntohs(inet->inet_dport); 3063 __u16 srcp = ntohs(inet->inet_sport); 3064 3065 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 3066 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", 3067 bucket, src, srcp, dest, destp, sp->sk_state, 3068 sk_wmem_alloc_get(sp), 3069 udp_rqueue_get(sp), 3070 0, 0L, 0, 3071 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 3072 0, sock_i_ino(sp), 3073 refcount_read(&sp->sk_refcnt), sp, 3074 atomic_read(&sp->sk_drops)); 3075 } 3076 3077 int udp4_seq_show(struct seq_file *seq, void *v) 3078 { 3079 seq_setwidth(seq, 127); 3080 if (v == SEQ_START_TOKEN) 3081 seq_puts(seq, " sl local_address rem_address st tx_queue " 3082 "rx_queue tr tm->when retrnsmt uid timeout " 3083 "inode ref pointer drops"); 3084 else { 3085 struct udp_iter_state *state = seq->private; 3086 3087 udp4_format_sock(v, seq, state->bucket); 3088 } 3089 seq_pad(seq, '\n'); 3090 return 0; 3091 } 3092 3093 #ifdef CONFIG_BPF_SYSCALL 3094 struct bpf_iter__udp { 3095 __bpf_md_ptr(struct bpf_iter_meta *, meta); 3096 __bpf_md_ptr(struct udp_sock *, udp_sk); 3097 uid_t uid __aligned(8); 3098 int bucket __aligned(8); 3099 }; 3100 3101 struct bpf_udp_iter_state { 3102 struct udp_iter_state state; 3103 unsigned int cur_sk; 3104 unsigned int end_sk; 3105 unsigned int max_sk; 3106 int offset; 3107 struct sock **batch; 3108 bool st_bucket_done; 3109 }; 3110 3111 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, 3112 unsigned int new_batch_sz); 3113 static struct sock *bpf_iter_udp_batch(struct seq_file *seq) 3114 { 3115 struct bpf_udp_iter_state *iter = seq->private; 3116 struct udp_iter_state *state = &iter->state; 3117 struct net *net = seq_file_net(seq); 3118 struct udp_table *udptable; 3119 unsigned int batch_sks = 0; 3120 bool resized = false; 3121 struct sock *sk; 3122 3123 /* The current batch is done, so advance the bucket. */ 3124 if (iter->st_bucket_done) { 3125 state->bucket++; 3126 iter->offset = 0; 3127 } 3128 3129 udptable = udp_get_table_seq(seq, net); 3130 3131 again: 3132 /* New batch for the next bucket. 3133 * Iterate over the hash table to find a bucket with sockets matching 3134 * the iterator attributes, and return the first matching socket from 3135 * the bucket. The remaining matched sockets from the bucket are batched 3136 * before releasing the bucket lock. This allows BPF programs that are 3137 * called in seq_show to acquire the bucket lock if needed. 3138 */ 3139 iter->cur_sk = 0; 3140 iter->end_sk = 0; 3141 iter->st_bucket_done = false; 3142 batch_sks = 0; 3143 3144 for (; state->bucket <= udptable->mask; state->bucket++) { 3145 struct udp_hslot *hslot2 = &udptable->hash2[state->bucket]; 3146 3147 if (hlist_empty(&hslot2->head)) { 3148 iter->offset = 0; 3149 continue; 3150 } 3151 3152 spin_lock_bh(&hslot2->lock); 3153 udp_portaddr_for_each_entry(sk, &hslot2->head) { 3154 if (seq_sk_match(seq, sk)) { 3155 /* Resume from the last iterated socket at the 3156 * offset in the bucket before iterator was stopped. 3157 */ 3158 if (iter->offset) { 3159 --iter->offset; 3160 continue; 3161 } 3162 if (iter->end_sk < iter->max_sk) { 3163 sock_hold(sk); 3164 iter->batch[iter->end_sk++] = sk; 3165 } 3166 batch_sks++; 3167 } 3168 } 3169 spin_unlock_bh(&hslot2->lock); 3170 3171 if (iter->end_sk) 3172 break; 3173 3174 /* Reset the current bucket's offset before moving to the next bucket. */ 3175 iter->offset = 0; 3176 } 3177 3178 /* All done: no batch made. */ 3179 if (!iter->end_sk) 3180 return NULL; 3181 3182 if (iter->end_sk == batch_sks) { 3183 /* Batching is done for the current bucket; return the first 3184 * socket to be iterated from the batch. 3185 */ 3186 iter->st_bucket_done = true; 3187 goto done; 3188 } 3189 if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2)) { 3190 resized = true; 3191 /* After allocating a larger batch, retry one more time to grab 3192 * the whole bucket. 3193 */ 3194 state->bucket--; 3195 goto again; 3196 } 3197 done: 3198 return iter->batch[0]; 3199 } 3200 3201 static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3202 { 3203 struct bpf_udp_iter_state *iter = seq->private; 3204 struct sock *sk; 3205 3206 /* Whenever seq_next() is called, the iter->cur_sk is 3207 * done with seq_show(), so unref the iter->cur_sk. 3208 */ 3209 if (iter->cur_sk < iter->end_sk) { 3210 sock_put(iter->batch[iter->cur_sk++]); 3211 ++iter->offset; 3212 } 3213 3214 /* After updating iter->cur_sk, check if there are more sockets 3215 * available in the current bucket batch. 3216 */ 3217 if (iter->cur_sk < iter->end_sk) 3218 sk = iter->batch[iter->cur_sk]; 3219 else 3220 /* Prepare a new batch. */ 3221 sk = bpf_iter_udp_batch(seq); 3222 3223 ++*pos; 3224 return sk; 3225 } 3226 3227 static void *bpf_iter_udp_seq_start(struct seq_file *seq, loff_t *pos) 3228 { 3229 /* bpf iter does not support lseek, so it always 3230 * continue from where it was stop()-ped. 3231 */ 3232 if (*pos) 3233 return bpf_iter_udp_batch(seq); 3234 3235 return SEQ_START_TOKEN; 3236 } 3237 3238 static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, 3239 struct udp_sock *udp_sk, uid_t uid, int bucket) 3240 { 3241 struct bpf_iter__udp ctx; 3242 3243 meta->seq_num--; /* skip SEQ_START_TOKEN */ 3244 ctx.meta = meta; 3245 ctx.udp_sk = udp_sk; 3246 ctx.uid = uid; 3247 ctx.bucket = bucket; 3248 return bpf_iter_run_prog(prog, &ctx); 3249 } 3250 3251 static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v) 3252 { 3253 struct udp_iter_state *state = seq->private; 3254 struct bpf_iter_meta meta; 3255 struct bpf_prog *prog; 3256 struct sock *sk = v; 3257 uid_t uid; 3258 int ret; 3259 3260 if (v == SEQ_START_TOKEN) 3261 return 0; 3262 3263 lock_sock(sk); 3264 3265 if (unlikely(sk_unhashed(sk))) { 3266 ret = SEQ_SKIP; 3267 goto unlock; 3268 } 3269 3270 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 3271 meta.seq = seq; 3272 prog = bpf_iter_get_info(&meta, false); 3273 ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket); 3274 3275 unlock: 3276 release_sock(sk); 3277 return ret; 3278 } 3279 3280 static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter) 3281 { 3282 while (iter->cur_sk < iter->end_sk) 3283 sock_put(iter->batch[iter->cur_sk++]); 3284 } 3285 3286 static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) 3287 { 3288 struct bpf_udp_iter_state *iter = seq->private; 3289 struct bpf_iter_meta meta; 3290 struct bpf_prog *prog; 3291 3292 if (!v) { 3293 meta.seq = seq; 3294 prog = bpf_iter_get_info(&meta, true); 3295 if (prog) 3296 (void)udp_prog_seq_show(prog, &meta, v, 0, 0); 3297 } 3298 3299 if (iter->cur_sk < iter->end_sk) { 3300 bpf_iter_udp_put_batch(iter); 3301 iter->st_bucket_done = false; 3302 } 3303 } 3304 3305 static const struct seq_operations bpf_iter_udp_seq_ops = { 3306 .start = bpf_iter_udp_seq_start, 3307 .next = bpf_iter_udp_seq_next, 3308 .stop = bpf_iter_udp_seq_stop, 3309 .show = bpf_iter_udp_seq_show, 3310 }; 3311 #endif 3312 3313 static unsigned short seq_file_family(const struct seq_file *seq) 3314 { 3315 const struct udp_seq_afinfo *afinfo; 3316 3317 #ifdef CONFIG_BPF_SYSCALL 3318 /* BPF iterator: bpf programs to filter sockets. */ 3319 if (seq->op == &bpf_iter_udp_seq_ops) 3320 return AF_UNSPEC; 3321 #endif 3322 3323 /* Proc fs iterator */ 3324 afinfo = pde_data(file_inode(seq->file)); 3325 return afinfo->family; 3326 } 3327 3328 const struct seq_operations udp_seq_ops = { 3329 .start = udp_seq_start, 3330 .next = udp_seq_next, 3331 .stop = udp_seq_stop, 3332 .show = udp4_seq_show, 3333 }; 3334 EXPORT_SYMBOL(udp_seq_ops); 3335 3336 static struct udp_seq_afinfo udp4_seq_afinfo = { 3337 .family = AF_INET, 3338 .udp_table = NULL, 3339 }; 3340 3341 static int __net_init udp4_proc_init_net(struct net *net) 3342 { 3343 if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops, 3344 sizeof(struct udp_iter_state), &udp4_seq_afinfo)) 3345 return -ENOMEM; 3346 return 0; 3347 } 3348 3349 static void __net_exit udp4_proc_exit_net(struct net *net) 3350 { 3351 remove_proc_entry("udp", net->proc_net); 3352 } 3353 3354 static struct pernet_operations udp4_net_ops = { 3355 .init = udp4_proc_init_net, 3356 .exit = udp4_proc_exit_net, 3357 }; 3358 3359 int __init udp4_proc_init(void) 3360 { 3361 return register_pernet_subsys(&udp4_net_ops); 3362 } 3363 3364 void udp4_proc_exit(void) 3365 { 3366 unregister_pernet_subsys(&udp4_net_ops); 3367 } 3368 #endif /* CONFIG_PROC_FS */ 3369 3370 static __initdata unsigned long uhash_entries; 3371 static int __init set_uhash_entries(char *str) 3372 { 3373 ssize_t ret; 3374 3375 if (!str) 3376 return 0; 3377 3378 ret = kstrtoul(str, 0, &uhash_entries); 3379 if (ret) 3380 return 0; 3381 3382 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) 3383 uhash_entries = UDP_HTABLE_SIZE_MIN; 3384 return 1; 3385 } 3386 __setup("uhash_entries=", set_uhash_entries); 3387 3388 void __init udp_table_init(struct udp_table *table, const char *name) 3389 { 3390 unsigned int i; 3391 3392 table->hash = alloc_large_system_hash(name, 3393 2 * sizeof(struct udp_hslot), 3394 uhash_entries, 3395 21, /* one slot per 2 MB */ 3396 0, 3397 &table->log, 3398 &table->mask, 3399 UDP_HTABLE_SIZE_MIN, 3400 UDP_HTABLE_SIZE_MAX); 3401 3402 table->hash2 = table->hash + (table->mask + 1); 3403 for (i = 0; i <= table->mask; i++) { 3404 INIT_HLIST_HEAD(&table->hash[i].head); 3405 table->hash[i].count = 0; 3406 spin_lock_init(&table->hash[i].lock); 3407 } 3408 for (i = 0; i <= table->mask; i++) { 3409 INIT_HLIST_HEAD(&table->hash2[i].head); 3410 table->hash2[i].count = 0; 3411 spin_lock_init(&table->hash2[i].lock); 3412 } 3413 } 3414 3415 u32 udp_flow_hashrnd(void) 3416 { 3417 static u32 hashrnd __read_mostly; 3418 3419 net_get_random_once(&hashrnd, sizeof(hashrnd)); 3420 3421 return hashrnd; 3422 } 3423 EXPORT_SYMBOL(udp_flow_hashrnd); 3424 3425 static void __net_init udp_sysctl_init(struct net *net) 3426 { 3427 net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE; 3428 net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE; 3429 3430 #ifdef CONFIG_NET_L3_MASTER_DEV 3431 net->ipv4.sysctl_udp_l3mdev_accept = 0; 3432 #endif 3433 } 3434 3435 static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries) 3436 { 3437 struct udp_table *udptable; 3438 int i; 3439 3440 udptable = kmalloc(sizeof(*udptable), GFP_KERNEL); 3441 if (!udptable) 3442 goto out; 3443 3444 udptable->hash = vmalloc_huge(hash_entries * 2 * sizeof(struct udp_hslot), 3445 GFP_KERNEL_ACCOUNT); 3446 if (!udptable->hash) 3447 goto free_table; 3448 3449 udptable->hash2 = udptable->hash + hash_entries; 3450 udptable->mask = hash_entries - 1; 3451 udptable->log = ilog2(hash_entries); 3452 3453 for (i = 0; i < hash_entries; i++) { 3454 INIT_HLIST_HEAD(&udptable->hash[i].head); 3455 udptable->hash[i].count = 0; 3456 spin_lock_init(&udptable->hash[i].lock); 3457 3458 INIT_HLIST_HEAD(&udptable->hash2[i].head); 3459 udptable->hash2[i].count = 0; 3460 spin_lock_init(&udptable->hash2[i].lock); 3461 } 3462 3463 return udptable; 3464 3465 free_table: 3466 kfree(udptable); 3467 out: 3468 return NULL; 3469 } 3470 3471 static void __net_exit udp_pernet_table_free(struct net *net) 3472 { 3473 struct udp_table *udptable = net->ipv4.udp_table; 3474 3475 if (udptable == &udp_table) 3476 return; 3477 3478 kvfree(udptable->hash); 3479 kfree(udptable); 3480 } 3481 3482 static void __net_init udp_set_table(struct net *net) 3483 { 3484 struct udp_table *udptable; 3485 unsigned int hash_entries; 3486 struct net *old_net; 3487 3488 if (net_eq(net, &init_net)) 3489 goto fallback; 3490 3491 old_net = current->nsproxy->net_ns; 3492 hash_entries = READ_ONCE(old_net->ipv4.sysctl_udp_child_hash_entries); 3493 if (!hash_entries) 3494 goto fallback; 3495 3496 /* Set min to keep the bitmap on stack in udp_lib_get_port() */ 3497 if (hash_entries < UDP_HTABLE_SIZE_MIN_PERNET) 3498 hash_entries = UDP_HTABLE_SIZE_MIN_PERNET; 3499 else 3500 hash_entries = roundup_pow_of_two(hash_entries); 3501 3502 udptable = udp_pernet_table_alloc(hash_entries); 3503 if (udptable) { 3504 net->ipv4.udp_table = udptable; 3505 } else { 3506 pr_warn("Failed to allocate UDP hash table (entries: %u) " 3507 "for a netns, fallback to the global one\n", 3508 hash_entries); 3509 fallback: 3510 net->ipv4.udp_table = &udp_table; 3511 } 3512 } 3513 3514 static int __net_init udp_pernet_init(struct net *net) 3515 { 3516 udp_sysctl_init(net); 3517 udp_set_table(net); 3518 3519 return 0; 3520 } 3521 3522 static void __net_exit udp_pernet_exit(struct net *net) 3523 { 3524 udp_pernet_table_free(net); 3525 } 3526 3527 static struct pernet_operations __net_initdata udp_sysctl_ops = { 3528 .init = udp_pernet_init, 3529 .exit = udp_pernet_exit, 3530 }; 3531 3532 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 3533 DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, 3534 struct udp_sock *udp_sk, uid_t uid, int bucket) 3535 3536 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, 3537 unsigned int new_batch_sz) 3538 { 3539 struct sock **new_batch; 3540 3541 new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch), 3542 GFP_USER | __GFP_NOWARN); 3543 if (!new_batch) 3544 return -ENOMEM; 3545 3546 bpf_iter_udp_put_batch(iter); 3547 kvfree(iter->batch); 3548 iter->batch = new_batch; 3549 iter->max_sk = new_batch_sz; 3550 3551 return 0; 3552 } 3553 3554 #define INIT_BATCH_SZ 16 3555 3556 static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) 3557 { 3558 struct bpf_udp_iter_state *iter = priv_data; 3559 int ret; 3560 3561 ret = bpf_iter_init_seq_net(priv_data, aux); 3562 if (ret) 3563 return ret; 3564 3565 ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ); 3566 if (ret) 3567 bpf_iter_fini_seq_net(priv_data); 3568 3569 return ret; 3570 } 3571 3572 static void bpf_iter_fini_udp(void *priv_data) 3573 { 3574 struct bpf_udp_iter_state *iter = priv_data; 3575 3576 bpf_iter_fini_seq_net(priv_data); 3577 kvfree(iter->batch); 3578 } 3579 3580 static const struct bpf_iter_seq_info udp_seq_info = { 3581 .seq_ops = &bpf_iter_udp_seq_ops, 3582 .init_seq_private = bpf_iter_init_udp, 3583 .fini_seq_private = bpf_iter_fini_udp, 3584 .seq_priv_size = sizeof(struct bpf_udp_iter_state), 3585 }; 3586 3587 static struct bpf_iter_reg udp_reg_info = { 3588 .target = "udp", 3589 .ctx_arg_info_size = 1, 3590 .ctx_arg_info = { 3591 { offsetof(struct bpf_iter__udp, udp_sk), 3592 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, 3593 }, 3594 .seq_info = &udp_seq_info, 3595 }; 3596 3597 static void __init bpf_iter_register(void) 3598 { 3599 udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP]; 3600 if (bpf_iter_reg_target(&udp_reg_info)) 3601 pr_warn("Warning: could not register bpf iterator udp\n"); 3602 } 3603 #endif 3604 3605 void __init udp_init(void) 3606 { 3607 unsigned long limit; 3608 unsigned int i; 3609 3610 udp_table_init(&udp_table, "UDP"); 3611 limit = nr_free_buffer_pages() / 8; 3612 limit = max(limit, 128UL); 3613 sysctl_udp_mem[0] = limit / 4 * 3; 3614 sysctl_udp_mem[1] = limit; 3615 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; 3616 3617 /* 16 spinlocks per cpu */ 3618 udp_busylocks_log = ilog2(nr_cpu_ids) + 4; 3619 udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, 3620 GFP_KERNEL); 3621 if (!udp_busylocks) 3622 panic("UDP: failed to alloc udp_busylocks\n"); 3623 for (i = 0; i < (1U << udp_busylocks_log); i++) 3624 spin_lock_init(udp_busylocks + i); 3625 3626 if (register_pernet_subsys(&udp_sysctl_ops)) 3627 panic("UDP: failed to init sysctl parameters.\n"); 3628 3629 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 3630 bpf_iter_register(); 3631 #endif 3632 } 3633