1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The User Datagram Protocol (UDP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 11 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 12 * Hirokazu Takahashi, <taka@valinux.co.jp> 13 * 14 * Fixes: 15 * Alan Cox : verify_area() calls 16 * Alan Cox : stopped close while in use off icmp 17 * messages. Not a fix but a botch that 18 * for udp at least is 'valid'. 19 * Alan Cox : Fixed icmp handling properly 20 * Alan Cox : Correct error for oversized datagrams 21 * Alan Cox : Tidied select() semantics. 22 * Alan Cox : udp_err() fixed properly, also now 23 * select and read wake correctly on errors 24 * Alan Cox : udp_send verify_area moved to avoid mem leak 25 * Alan Cox : UDP can count its memory 26 * Alan Cox : send to an unknown connection causes 27 * an ECONNREFUSED off the icmp, but 28 * does NOT close. 29 * Alan Cox : Switched to new sk_buff handlers. No more backlog! 30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK 31 * bug no longer crashes it. 32 * Fred Van Kempen : Net2e support for sk->broadcast. 33 * Alan Cox : Uses skb_free_datagram 34 * Alan Cox : Added get/set sockopt support. 35 * Alan Cox : Broadcasting without option set returns EACCES. 36 * Alan Cox : No wakeup calls. Instead we now use the callbacks. 37 * Alan Cox : Use ip_tos and ip_ttl 38 * Alan Cox : SNMP Mibs 39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 40 * Matt Dillon : UDP length checks. 41 * Alan Cox : Smarter af_inet used properly. 42 * Alan Cox : Use new kernel side addressing. 43 * Alan Cox : Incorrect return on truncated datagram receive. 44 * Arnt Gulbrandsen : New udp_send and stuff 45 * Alan Cox : Cache last socket 46 * Alan Cox : Route cache 47 * Jon Peatfield : Minor efficiency fix to sendto(). 48 * Mike Shaver : RFC1122 checks. 49 * Alan Cox : Nonblocking error fix. 50 * Willy Konynenberg : Transparent proxying support. 51 * Mike McLagan : Routing by source 52 * David S. Miller : New socket lookup architecture. 53 * Last socket cache retained as it 54 * does have a high hit rate. 55 * Olaf Kirch : Don't linearise iovec on sendmsg. 56 * Andi Kleen : Some cleanups, cache destination entry 57 * for connect. 58 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 59 * Melvin Smith : Check msg_name not msg_namelen in sendto(), 60 * return ENOTCONN for unconnected sockets (POSIX) 61 * Janos Farkas : don't deliver multi/broadcasts to a different 62 * bound-to-device socket 63 * Hirokazu Takahashi : HW checksumming for outgoing UDP 64 * datagrams. 65 * Hirokazu Takahashi : sendfile() on UDP works now. 66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file 67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 69 * a single port at the same time. 70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 71 * James Chapman : Add L2TP encapsulation type. 72 * 73 * 74 * This program is free software; you can redistribute it and/or 75 * modify it under the terms of the GNU General Public License 76 * as published by the Free Software Foundation; either version 77 * 2 of the License, or (at your option) any later version. 78 */ 79 80 #define pr_fmt(fmt) "UDP: " fmt 81 82 #include <linux/uaccess.h> 83 #include <asm/ioctls.h> 84 #include <linux/bootmem.h> 85 #include <linux/highmem.h> 86 #include <linux/swap.h> 87 #include <linux/types.h> 88 #include <linux/fcntl.h> 89 #include <linux/module.h> 90 #include <linux/socket.h> 91 #include <linux/sockios.h> 92 #include <linux/igmp.h> 93 #include <linux/inetdevice.h> 94 #include <linux/in.h> 95 #include <linux/errno.h> 96 #include <linux/timer.h> 97 #include <linux/mm.h> 98 #include <linux/inet.h> 99 #include <linux/netdevice.h> 100 #include <linux/slab.h> 101 #include <net/tcp_states.h> 102 #include <linux/skbuff.h> 103 #include <linux/proc_fs.h> 104 #include <linux/seq_file.h> 105 #include <net/net_namespace.h> 106 #include <net/icmp.h> 107 #include <net/inet_hashtables.h> 108 #include <net/route.h> 109 #include <net/checksum.h> 110 #include <net/xfrm.h> 111 #include <trace/events/udp.h> 112 #include <linux/static_key.h> 113 #include <trace/events/skb.h> 114 #include <net/busy_poll.h> 115 #include "udp_impl.h" 116 #include <net/sock_reuseport.h> 117 #include <net/addrconf.h> 118 119 struct udp_table udp_table __read_mostly; 120 EXPORT_SYMBOL(udp_table); 121 122 long sysctl_udp_mem[3] __read_mostly; 123 EXPORT_SYMBOL(sysctl_udp_mem); 124 125 int sysctl_udp_rmem_min __read_mostly; 126 EXPORT_SYMBOL(sysctl_udp_rmem_min); 127 128 int sysctl_udp_wmem_min __read_mostly; 129 EXPORT_SYMBOL(sysctl_udp_wmem_min); 130 131 atomic_long_t udp_memory_allocated; 132 EXPORT_SYMBOL(udp_memory_allocated); 133 134 #define MAX_UDP_PORTS 65536 135 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) 136 137 /* IPCB reference means this can not be used from early demux */ 138 static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb) 139 { 140 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 141 if (!net->ipv4.sysctl_udp_l3mdev_accept && 142 skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 143 return true; 144 #endif 145 return false; 146 } 147 148 static int udp_lib_lport_inuse(struct net *net, __u16 num, 149 const struct udp_hslot *hslot, 150 unsigned long *bitmap, 151 struct sock *sk, unsigned int log) 152 { 153 struct sock *sk2; 154 kuid_t uid = sock_i_uid(sk); 155 156 sk_for_each(sk2, &hslot->head) { 157 if (net_eq(sock_net(sk2), net) && 158 sk2 != sk && 159 (bitmap || udp_sk(sk2)->udp_port_hash == num) && 160 (!sk2->sk_reuse || !sk->sk_reuse) && 161 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 162 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 163 inet_rcv_saddr_equal(sk, sk2, true)) { 164 if (sk2->sk_reuseport && sk->sk_reuseport && 165 !rcu_access_pointer(sk->sk_reuseport_cb) && 166 uid_eq(uid, sock_i_uid(sk2))) { 167 if (!bitmap) 168 return 0; 169 } else { 170 if (!bitmap) 171 return 1; 172 __set_bit(udp_sk(sk2)->udp_port_hash >> log, 173 bitmap); 174 } 175 } 176 } 177 return 0; 178 } 179 180 /* 181 * Note: we still hold spinlock of primary hash chain, so no other writer 182 * can insert/delete a socket with local_port == num 183 */ 184 static int udp_lib_lport_inuse2(struct net *net, __u16 num, 185 struct udp_hslot *hslot2, 186 struct sock *sk) 187 { 188 struct sock *sk2; 189 kuid_t uid = sock_i_uid(sk); 190 int res = 0; 191 192 spin_lock(&hslot2->lock); 193 udp_portaddr_for_each_entry(sk2, &hslot2->head) { 194 if (net_eq(sock_net(sk2), net) && 195 sk2 != sk && 196 (udp_sk(sk2)->udp_port_hash == num) && 197 (!sk2->sk_reuse || !sk->sk_reuse) && 198 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 199 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 200 inet_rcv_saddr_equal(sk, sk2, true)) { 201 if (sk2->sk_reuseport && sk->sk_reuseport && 202 !rcu_access_pointer(sk->sk_reuseport_cb) && 203 uid_eq(uid, sock_i_uid(sk2))) { 204 res = 0; 205 } else { 206 res = 1; 207 } 208 break; 209 } 210 } 211 spin_unlock(&hslot2->lock); 212 return res; 213 } 214 215 static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) 216 { 217 struct net *net = sock_net(sk); 218 kuid_t uid = sock_i_uid(sk); 219 struct sock *sk2; 220 221 sk_for_each(sk2, &hslot->head) { 222 if (net_eq(sock_net(sk2), net) && 223 sk2 != sk && 224 sk2->sk_family == sk->sk_family && 225 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 226 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && 227 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 228 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 229 inet_rcv_saddr_equal(sk, sk2, false)) { 230 return reuseport_add_sock(sk, sk2); 231 } 232 } 233 234 /* Initial allocation may have already happened via setsockopt */ 235 if (!rcu_access_pointer(sk->sk_reuseport_cb)) 236 return reuseport_alloc(sk); 237 return 0; 238 } 239 240 /** 241 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 242 * 243 * @sk: socket struct in question 244 * @snum: port number to look up 245 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, 246 * with NULL address 247 */ 248 int udp_lib_get_port(struct sock *sk, unsigned short snum, 249 unsigned int hash2_nulladdr) 250 { 251 struct udp_hslot *hslot, *hslot2; 252 struct udp_table *udptable = sk->sk_prot->h.udp_table; 253 int error = 1; 254 struct net *net = sock_net(sk); 255 256 if (!snum) { 257 int low, high, remaining; 258 unsigned int rand; 259 unsigned short first, last; 260 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 261 262 inet_get_local_port_range(net, &low, &high); 263 remaining = (high - low) + 1; 264 265 rand = prandom_u32(); 266 first = reciprocal_scale(rand, remaining) + low; 267 /* 268 * force rand to be an odd multiple of UDP_HTABLE_SIZE 269 */ 270 rand = (rand | 1) * (udptable->mask + 1); 271 last = first + udptable->mask + 1; 272 do { 273 hslot = udp_hashslot(udptable, net, first); 274 bitmap_zero(bitmap, PORTS_PER_CHAIN); 275 spin_lock_bh(&hslot->lock); 276 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, 277 udptable->log); 278 279 snum = first; 280 /* 281 * Iterate on all possible values of snum for this hash. 282 * Using steps of an odd multiple of UDP_HTABLE_SIZE 283 * give us randomization and full range coverage. 284 */ 285 do { 286 if (low <= snum && snum <= high && 287 !test_bit(snum >> udptable->log, bitmap) && 288 !inet_is_local_reserved_port(net, snum)) 289 goto found; 290 snum += rand; 291 } while (snum != first); 292 spin_unlock_bh(&hslot->lock); 293 cond_resched(); 294 } while (++first != last); 295 goto fail; 296 } else { 297 hslot = udp_hashslot(udptable, net, snum); 298 spin_lock_bh(&hslot->lock); 299 if (hslot->count > 10) { 300 int exist; 301 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; 302 303 slot2 &= udptable->mask; 304 hash2_nulladdr &= udptable->mask; 305 306 hslot2 = udp_hashslot2(udptable, slot2); 307 if (hslot->count < hslot2->count) 308 goto scan_primary_hash; 309 310 exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); 311 if (!exist && (hash2_nulladdr != slot2)) { 312 hslot2 = udp_hashslot2(udptable, hash2_nulladdr); 313 exist = udp_lib_lport_inuse2(net, snum, hslot2, 314 sk); 315 } 316 if (exist) 317 goto fail_unlock; 318 else 319 goto found; 320 } 321 scan_primary_hash: 322 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) 323 goto fail_unlock; 324 } 325 found: 326 inet_sk(sk)->inet_num = snum; 327 udp_sk(sk)->udp_port_hash = snum; 328 udp_sk(sk)->udp_portaddr_hash ^= snum; 329 if (sk_unhashed(sk)) { 330 if (sk->sk_reuseport && 331 udp_reuseport_add_sock(sk, hslot)) { 332 inet_sk(sk)->inet_num = 0; 333 udp_sk(sk)->udp_port_hash = 0; 334 udp_sk(sk)->udp_portaddr_hash ^= snum; 335 goto fail_unlock; 336 } 337 338 sk_add_node_rcu(sk, &hslot->head); 339 hslot->count++; 340 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 341 342 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 343 spin_lock(&hslot2->lock); 344 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 345 sk->sk_family == AF_INET6) 346 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 347 &hslot2->head); 348 else 349 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 350 &hslot2->head); 351 hslot2->count++; 352 spin_unlock(&hslot2->lock); 353 } 354 sock_set_flag(sk, SOCK_RCU_FREE); 355 error = 0; 356 fail_unlock: 357 spin_unlock_bh(&hslot->lock); 358 fail: 359 return error; 360 } 361 EXPORT_SYMBOL(udp_lib_get_port); 362 363 static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, 364 unsigned int port) 365 { 366 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; 367 } 368 369 int udp_v4_get_port(struct sock *sk, unsigned short snum) 370 { 371 unsigned int hash2_nulladdr = 372 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); 373 unsigned int hash2_partial = 374 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 375 376 /* precompute partial secondary hash */ 377 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 378 return udp_lib_get_port(sk, snum, hash2_nulladdr); 379 } 380 381 static int compute_score(struct sock *sk, struct net *net, 382 __be32 saddr, __be16 sport, 383 __be32 daddr, unsigned short hnum, int dif, 384 bool exact_dif) 385 { 386 int score; 387 struct inet_sock *inet; 388 389 if (!net_eq(sock_net(sk), net) || 390 udp_sk(sk)->udp_port_hash != hnum || 391 ipv6_only_sock(sk)) 392 return -1; 393 394 score = (sk->sk_family == PF_INET) ? 2 : 1; 395 inet = inet_sk(sk); 396 397 if (inet->inet_rcv_saddr) { 398 if (inet->inet_rcv_saddr != daddr) 399 return -1; 400 score += 4; 401 } 402 403 if (inet->inet_daddr) { 404 if (inet->inet_daddr != saddr) 405 return -1; 406 score += 4; 407 } 408 409 if (inet->inet_dport) { 410 if (inet->inet_dport != sport) 411 return -1; 412 score += 4; 413 } 414 415 if (sk->sk_bound_dev_if || exact_dif) { 416 if (sk->sk_bound_dev_if != dif) 417 return -1; 418 score += 4; 419 } 420 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 421 score++; 422 return score; 423 } 424 425 static u32 udp_ehashfn(const struct net *net, const __be32 laddr, 426 const __u16 lport, const __be32 faddr, 427 const __be16 fport) 428 { 429 static u32 udp_ehash_secret __read_mostly; 430 431 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); 432 433 return __inet_ehashfn(laddr, lport, faddr, fport, 434 udp_ehash_secret + net_hash_mix(net)); 435 } 436 437 /* called with rcu_read_lock() */ 438 static struct sock *udp4_lib_lookup2(struct net *net, 439 __be32 saddr, __be16 sport, 440 __be32 daddr, unsigned int hnum, int dif, bool exact_dif, 441 struct udp_hslot *hslot2, 442 struct sk_buff *skb) 443 { 444 struct sock *sk, *result; 445 int score, badness, matches = 0, reuseport = 0; 446 u32 hash = 0; 447 448 result = NULL; 449 badness = 0; 450 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 451 score = compute_score(sk, net, saddr, sport, 452 daddr, hnum, dif, exact_dif); 453 if (score > badness) { 454 reuseport = sk->sk_reuseport; 455 if (reuseport) { 456 hash = udp_ehashfn(net, daddr, hnum, 457 saddr, sport); 458 result = reuseport_select_sock(sk, hash, skb, 459 sizeof(struct udphdr)); 460 if (result) 461 return result; 462 matches = 1; 463 } 464 badness = score; 465 result = sk; 466 } else if (score == badness && reuseport) { 467 matches++; 468 if (reciprocal_scale(hash, matches) == 0) 469 result = sk; 470 hash = next_pseudo_random32(hash); 471 } 472 } 473 return result; 474 } 475 476 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 477 * harder than this. -DaveM 478 */ 479 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, 480 __be16 sport, __be32 daddr, __be16 dport, 481 int dif, struct udp_table *udptable, struct sk_buff *skb) 482 { 483 struct sock *sk, *result; 484 unsigned short hnum = ntohs(dport); 485 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); 486 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; 487 bool exact_dif = udp_lib_exact_dif_match(net, skb); 488 int score, badness, matches = 0, reuseport = 0; 489 u32 hash = 0; 490 491 if (hslot->count > 10) { 492 hash2 = udp4_portaddr_hash(net, daddr, hnum); 493 slot2 = hash2 & udptable->mask; 494 hslot2 = &udptable->hash2[slot2]; 495 if (hslot->count < hslot2->count) 496 goto begin; 497 498 result = udp4_lib_lookup2(net, saddr, sport, 499 daddr, hnum, dif, 500 exact_dif, hslot2, skb); 501 if (!result) { 502 unsigned int old_slot2 = slot2; 503 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 504 slot2 = hash2 & udptable->mask; 505 /* avoid searching the same slot again. */ 506 if (unlikely(slot2 == old_slot2)) 507 return result; 508 509 hslot2 = &udptable->hash2[slot2]; 510 if (hslot->count < hslot2->count) 511 goto begin; 512 513 result = udp4_lib_lookup2(net, saddr, sport, 514 daddr, hnum, dif, 515 exact_dif, hslot2, skb); 516 } 517 return result; 518 } 519 begin: 520 result = NULL; 521 badness = 0; 522 sk_for_each_rcu(sk, &hslot->head) { 523 score = compute_score(sk, net, saddr, sport, 524 daddr, hnum, dif, exact_dif); 525 if (score > badness) { 526 reuseport = sk->sk_reuseport; 527 if (reuseport) { 528 hash = udp_ehashfn(net, daddr, hnum, 529 saddr, sport); 530 result = reuseport_select_sock(sk, hash, skb, 531 sizeof(struct udphdr)); 532 if (result) 533 return result; 534 matches = 1; 535 } 536 result = sk; 537 badness = score; 538 } else if (score == badness && reuseport) { 539 matches++; 540 if (reciprocal_scale(hash, matches) == 0) 541 result = sk; 542 hash = next_pseudo_random32(hash); 543 } 544 } 545 return result; 546 } 547 EXPORT_SYMBOL_GPL(__udp4_lib_lookup); 548 549 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, 550 __be16 sport, __be16 dport, 551 struct udp_table *udptable) 552 { 553 const struct iphdr *iph = ip_hdr(skb); 554 555 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, 556 iph->daddr, dport, inet_iif(skb), 557 udptable, skb); 558 } 559 560 struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, 561 __be16 sport, __be16 dport) 562 { 563 return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); 564 } 565 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); 566 567 /* Must be called under rcu_read_lock(). 568 * Does increment socket refcount. 569 */ 570 #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ 571 IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ 572 IS_ENABLED(CONFIG_NF_SOCKET_IPV4) 573 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 574 __be32 daddr, __be16 dport, int dif) 575 { 576 struct sock *sk; 577 578 sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, 579 dif, &udp_table, NULL); 580 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 581 sk = NULL; 582 return sk; 583 } 584 EXPORT_SYMBOL_GPL(udp4_lib_lookup); 585 #endif 586 587 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, 588 __be16 loc_port, __be32 loc_addr, 589 __be16 rmt_port, __be32 rmt_addr, 590 int dif, unsigned short hnum) 591 { 592 struct inet_sock *inet = inet_sk(sk); 593 594 if (!net_eq(sock_net(sk), net) || 595 udp_sk(sk)->udp_port_hash != hnum || 596 (inet->inet_daddr && inet->inet_daddr != rmt_addr) || 597 (inet->inet_dport != rmt_port && inet->inet_dport) || 598 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 599 ipv6_only_sock(sk) || 600 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 601 return false; 602 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) 603 return false; 604 return true; 605 } 606 607 /* 608 * This routine is called by the ICMP module when it gets some 609 * sort of error condition. If err < 0 then the socket should 610 * be closed and the error returned to the user. If err > 0 611 * it's just the icmp type << 8 | icmp code. 612 * Header points to the ip header of the error packet. We move 613 * on past this. Then (as it used to claim before adjustment) 614 * header points to the first 8 bytes of the udp header. We need 615 * to find the appropriate port. 616 */ 617 618 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 619 { 620 struct inet_sock *inet; 621 const struct iphdr *iph = (const struct iphdr *)skb->data; 622 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 623 const int type = icmp_hdr(skb)->type; 624 const int code = icmp_hdr(skb)->code; 625 struct sock *sk; 626 int harderr; 627 int err; 628 struct net *net = dev_net(skb->dev); 629 630 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, 631 iph->saddr, uh->source, skb->dev->ifindex, udptable, 632 NULL); 633 if (!sk) { 634 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 635 return; /* No socket for error */ 636 } 637 638 err = 0; 639 harderr = 0; 640 inet = inet_sk(sk); 641 642 switch (type) { 643 default: 644 case ICMP_TIME_EXCEEDED: 645 err = EHOSTUNREACH; 646 break; 647 case ICMP_SOURCE_QUENCH: 648 goto out; 649 case ICMP_PARAMETERPROB: 650 err = EPROTO; 651 harderr = 1; 652 break; 653 case ICMP_DEST_UNREACH: 654 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 655 ipv4_sk_update_pmtu(skb, sk, info); 656 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 657 err = EMSGSIZE; 658 harderr = 1; 659 break; 660 } 661 goto out; 662 } 663 err = EHOSTUNREACH; 664 if (code <= NR_ICMP_UNREACH) { 665 harderr = icmp_err_convert[code].fatal; 666 err = icmp_err_convert[code].errno; 667 } 668 break; 669 case ICMP_REDIRECT: 670 ipv4_sk_redirect(skb, sk); 671 goto out; 672 } 673 674 /* 675 * RFC1122: OK. Passes ICMP errors back to application, as per 676 * 4.1.3.3. 677 */ 678 if (!inet->recverr) { 679 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 680 goto out; 681 } else 682 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 683 684 sk->sk_err = err; 685 sk->sk_error_report(sk); 686 out: 687 return; 688 } 689 690 void udp_err(struct sk_buff *skb, u32 info) 691 { 692 __udp4_lib_err(skb, info, &udp_table); 693 } 694 695 /* 696 * Throw away all pending data and cancel the corking. Socket is locked. 697 */ 698 void udp_flush_pending_frames(struct sock *sk) 699 { 700 struct udp_sock *up = udp_sk(sk); 701 702 if (up->pending) { 703 up->len = 0; 704 up->pending = 0; 705 ip_flush_pending_frames(sk); 706 } 707 } 708 EXPORT_SYMBOL(udp_flush_pending_frames); 709 710 /** 711 * udp4_hwcsum - handle outgoing HW checksumming 712 * @skb: sk_buff containing the filled-in UDP header 713 * (checksum field must be zeroed out) 714 * @src: source IP address 715 * @dst: destination IP address 716 */ 717 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 718 { 719 struct udphdr *uh = udp_hdr(skb); 720 int offset = skb_transport_offset(skb); 721 int len = skb->len - offset; 722 int hlen = len; 723 __wsum csum = 0; 724 725 if (!skb_has_frag_list(skb)) { 726 /* 727 * Only one fragment on the socket. 728 */ 729 skb->csum_start = skb_transport_header(skb) - skb->head; 730 skb->csum_offset = offsetof(struct udphdr, check); 731 uh->check = ~csum_tcpudp_magic(src, dst, len, 732 IPPROTO_UDP, 0); 733 } else { 734 struct sk_buff *frags; 735 736 /* 737 * HW-checksum won't work as there are two or more 738 * fragments on the socket so that all csums of sk_buffs 739 * should be together 740 */ 741 skb_walk_frags(skb, frags) { 742 csum = csum_add(csum, frags->csum); 743 hlen -= frags->len; 744 } 745 746 csum = skb_checksum(skb, offset, hlen, csum); 747 skb->ip_summed = CHECKSUM_NONE; 748 749 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 750 if (uh->check == 0) 751 uh->check = CSUM_MANGLED_0; 752 } 753 } 754 EXPORT_SYMBOL_GPL(udp4_hwcsum); 755 756 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended 757 * for the simple case like when setting the checksum for a UDP tunnel. 758 */ 759 void udp_set_csum(bool nocheck, struct sk_buff *skb, 760 __be32 saddr, __be32 daddr, int len) 761 { 762 struct udphdr *uh = udp_hdr(skb); 763 764 if (nocheck) { 765 uh->check = 0; 766 } else if (skb_is_gso(skb)) { 767 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 768 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 769 uh->check = 0; 770 uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); 771 if (uh->check == 0) 772 uh->check = CSUM_MANGLED_0; 773 } else { 774 skb->ip_summed = CHECKSUM_PARTIAL; 775 skb->csum_start = skb_transport_header(skb) - skb->head; 776 skb->csum_offset = offsetof(struct udphdr, check); 777 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 778 } 779 } 780 EXPORT_SYMBOL(udp_set_csum); 781 782 static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) 783 { 784 struct sock *sk = skb->sk; 785 struct inet_sock *inet = inet_sk(sk); 786 struct udphdr *uh; 787 int err = 0; 788 int is_udplite = IS_UDPLITE(sk); 789 int offset = skb_transport_offset(skb); 790 int len = skb->len - offset; 791 __wsum csum = 0; 792 793 /* 794 * Create a UDP header 795 */ 796 uh = udp_hdr(skb); 797 uh->source = inet->inet_sport; 798 uh->dest = fl4->fl4_dport; 799 uh->len = htons(len); 800 uh->check = 0; 801 802 if (is_udplite) /* UDP-Lite */ 803 csum = udplite_csum(skb); 804 805 else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */ 806 807 skb->ip_summed = CHECKSUM_NONE; 808 goto send; 809 810 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 811 812 udp4_hwcsum(skb, fl4->saddr, fl4->daddr); 813 goto send; 814 815 } else 816 csum = udp_csum(skb); 817 818 /* add protocol-dependent pseudo-header */ 819 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, 820 sk->sk_protocol, csum); 821 if (uh->check == 0) 822 uh->check = CSUM_MANGLED_0; 823 824 send: 825 err = ip_send_skb(sock_net(sk), skb); 826 if (err) { 827 if (err == -ENOBUFS && !inet->recverr) { 828 UDP_INC_STATS(sock_net(sk), 829 UDP_MIB_SNDBUFERRORS, is_udplite); 830 err = 0; 831 } 832 } else 833 UDP_INC_STATS(sock_net(sk), 834 UDP_MIB_OUTDATAGRAMS, is_udplite); 835 return err; 836 } 837 838 /* 839 * Push out all pending data as one UDP datagram. Socket is locked. 840 */ 841 int udp_push_pending_frames(struct sock *sk) 842 { 843 struct udp_sock *up = udp_sk(sk); 844 struct inet_sock *inet = inet_sk(sk); 845 struct flowi4 *fl4 = &inet->cork.fl.u.ip4; 846 struct sk_buff *skb; 847 int err = 0; 848 849 skb = ip_finish_skb(sk, fl4); 850 if (!skb) 851 goto out; 852 853 err = udp_send_skb(skb, fl4); 854 855 out: 856 up->len = 0; 857 up->pending = 0; 858 return err; 859 } 860 EXPORT_SYMBOL(udp_push_pending_frames); 861 862 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 863 { 864 struct inet_sock *inet = inet_sk(sk); 865 struct udp_sock *up = udp_sk(sk); 866 struct flowi4 fl4_stack; 867 struct flowi4 *fl4; 868 int ulen = len; 869 struct ipcm_cookie ipc; 870 struct rtable *rt = NULL; 871 int free = 0; 872 int connected = 0; 873 __be32 daddr, faddr, saddr; 874 __be16 dport; 875 u8 tos; 876 int err, is_udplite = IS_UDPLITE(sk); 877 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 878 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 879 struct sk_buff *skb; 880 struct ip_options_data opt_copy; 881 882 if (len > 0xFFFF) 883 return -EMSGSIZE; 884 885 /* 886 * Check the flags. 887 */ 888 889 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ 890 return -EOPNOTSUPP; 891 892 ipc.opt = NULL; 893 ipc.tx_flags = 0; 894 ipc.ttl = 0; 895 ipc.tos = -1; 896 897 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 898 899 fl4 = &inet->cork.fl.u.ip4; 900 if (up->pending) { 901 /* 902 * There are pending frames. 903 * The socket lock must be held while it's corked. 904 */ 905 lock_sock(sk); 906 if (likely(up->pending)) { 907 if (unlikely(up->pending != AF_INET)) { 908 release_sock(sk); 909 return -EINVAL; 910 } 911 goto do_append_data; 912 } 913 release_sock(sk); 914 } 915 ulen += sizeof(struct udphdr); 916 917 /* 918 * Get and verify the address. 919 */ 920 if (msg->msg_name) { 921 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 922 if (msg->msg_namelen < sizeof(*usin)) 923 return -EINVAL; 924 if (usin->sin_family != AF_INET) { 925 if (usin->sin_family != AF_UNSPEC) 926 return -EAFNOSUPPORT; 927 } 928 929 daddr = usin->sin_addr.s_addr; 930 dport = usin->sin_port; 931 if (dport == 0) 932 return -EINVAL; 933 } else { 934 if (sk->sk_state != TCP_ESTABLISHED) 935 return -EDESTADDRREQ; 936 daddr = inet->inet_daddr; 937 dport = inet->inet_dport; 938 /* Open fast path for connected socket. 939 Route will not be used, if at least one option is set. 940 */ 941 connected = 1; 942 } 943 944 ipc.sockc.tsflags = sk->sk_tsflags; 945 ipc.addr = inet->inet_saddr; 946 ipc.oif = sk->sk_bound_dev_if; 947 948 if (msg->msg_controllen) { 949 err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); 950 if (unlikely(err)) { 951 kfree(ipc.opt); 952 return err; 953 } 954 if (ipc.opt) 955 free = 1; 956 connected = 0; 957 } 958 if (!ipc.opt) { 959 struct ip_options_rcu *inet_opt; 960 961 rcu_read_lock(); 962 inet_opt = rcu_dereference(inet->inet_opt); 963 if (inet_opt) { 964 memcpy(&opt_copy, inet_opt, 965 sizeof(*inet_opt) + inet_opt->opt.optlen); 966 ipc.opt = &opt_copy.opt; 967 } 968 rcu_read_unlock(); 969 } 970 971 saddr = ipc.addr; 972 ipc.addr = faddr = daddr; 973 974 sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); 975 976 if (ipc.opt && ipc.opt->opt.srr) { 977 if (!daddr) 978 return -EINVAL; 979 faddr = ipc.opt->opt.faddr; 980 connected = 0; 981 } 982 tos = get_rttos(&ipc, inet); 983 if (sock_flag(sk, SOCK_LOCALROUTE) || 984 (msg->msg_flags & MSG_DONTROUTE) || 985 (ipc.opt && ipc.opt->opt.is_strictroute)) { 986 tos |= RTO_ONLINK; 987 connected = 0; 988 } 989 990 if (ipv4_is_multicast(daddr)) { 991 if (!ipc.oif) 992 ipc.oif = inet->mc_index; 993 if (!saddr) 994 saddr = inet->mc_addr; 995 connected = 0; 996 } else if (!ipc.oif) 997 ipc.oif = inet->uc_index; 998 999 if (connected) 1000 rt = (struct rtable *)sk_dst_check(sk, 0); 1001 1002 if (!rt) { 1003 struct net *net = sock_net(sk); 1004 __u8 flow_flags = inet_sk_flowi_flags(sk); 1005 1006 fl4 = &fl4_stack; 1007 1008 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, 1009 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1010 flow_flags, 1011 faddr, saddr, dport, inet->inet_sport, 1012 sk->sk_uid); 1013 1014 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 1015 rt = ip_route_output_flow(net, fl4, sk); 1016 if (IS_ERR(rt)) { 1017 err = PTR_ERR(rt); 1018 rt = NULL; 1019 if (err == -ENETUNREACH) 1020 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 1021 goto out; 1022 } 1023 1024 err = -EACCES; 1025 if ((rt->rt_flags & RTCF_BROADCAST) && 1026 !sock_flag(sk, SOCK_BROADCAST)) 1027 goto out; 1028 if (connected) 1029 sk_dst_set(sk, dst_clone(&rt->dst)); 1030 } 1031 1032 if (msg->msg_flags&MSG_CONFIRM) 1033 goto do_confirm; 1034 back_from_confirm: 1035 1036 saddr = fl4->saddr; 1037 if (!ipc.addr) 1038 daddr = ipc.addr = fl4->daddr; 1039 1040 /* Lockless fast path for the non-corking case. */ 1041 if (!corkreq) { 1042 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, 1043 sizeof(struct udphdr), &ipc, &rt, 1044 msg->msg_flags); 1045 err = PTR_ERR(skb); 1046 if (!IS_ERR_OR_NULL(skb)) 1047 err = udp_send_skb(skb, fl4); 1048 goto out; 1049 } 1050 1051 lock_sock(sk); 1052 if (unlikely(up->pending)) { 1053 /* The socket is already corked while preparing it. */ 1054 /* ... which is an evident application bug. --ANK */ 1055 release_sock(sk); 1056 1057 net_dbg_ratelimited("cork app bug 2\n"); 1058 err = -EINVAL; 1059 goto out; 1060 } 1061 /* 1062 * Now cork the socket to pend data. 1063 */ 1064 fl4 = &inet->cork.fl.u.ip4; 1065 fl4->daddr = daddr; 1066 fl4->saddr = saddr; 1067 fl4->fl4_dport = dport; 1068 fl4->fl4_sport = inet->inet_sport; 1069 up->pending = AF_INET; 1070 1071 do_append_data: 1072 up->len += ulen; 1073 err = ip_append_data(sk, fl4, getfrag, msg, ulen, 1074 sizeof(struct udphdr), &ipc, &rt, 1075 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1076 if (err) 1077 udp_flush_pending_frames(sk); 1078 else if (!corkreq) 1079 err = udp_push_pending_frames(sk); 1080 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1081 up->pending = 0; 1082 release_sock(sk); 1083 1084 out: 1085 ip_rt_put(rt); 1086 if (free) 1087 kfree(ipc.opt); 1088 if (!err) 1089 return len; 1090 /* 1091 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1092 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1093 * we don't have a good statistic (IpOutDiscards but it can be too many 1094 * things). We could add another new stat but at least for now that 1095 * seems like overkill. 1096 */ 1097 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1098 UDP_INC_STATS(sock_net(sk), 1099 UDP_MIB_SNDBUFERRORS, is_udplite); 1100 } 1101 return err; 1102 1103 do_confirm: 1104 if (msg->msg_flags & MSG_PROBE) 1105 dst_confirm_neigh(&rt->dst, &fl4->daddr); 1106 if (!(msg->msg_flags&MSG_PROBE) || len) 1107 goto back_from_confirm; 1108 err = 0; 1109 goto out; 1110 } 1111 EXPORT_SYMBOL(udp_sendmsg); 1112 1113 int udp_sendpage(struct sock *sk, struct page *page, int offset, 1114 size_t size, int flags) 1115 { 1116 struct inet_sock *inet = inet_sk(sk); 1117 struct udp_sock *up = udp_sk(sk); 1118 int ret; 1119 1120 if (flags & MSG_SENDPAGE_NOTLAST) 1121 flags |= MSG_MORE; 1122 1123 if (!up->pending) { 1124 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 1125 1126 /* Call udp_sendmsg to specify destination address which 1127 * sendpage interface can't pass. 1128 * This will succeed only when the socket is connected. 1129 */ 1130 ret = udp_sendmsg(sk, &msg, 0); 1131 if (ret < 0) 1132 return ret; 1133 } 1134 1135 lock_sock(sk); 1136 1137 if (unlikely(!up->pending)) { 1138 release_sock(sk); 1139 1140 net_dbg_ratelimited("udp cork app bug 3\n"); 1141 return -EINVAL; 1142 } 1143 1144 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, 1145 page, offset, size, flags); 1146 if (ret == -EOPNOTSUPP) { 1147 release_sock(sk); 1148 return sock_no_sendpage(sk->sk_socket, page, offset, 1149 size, flags); 1150 } 1151 if (ret < 0) { 1152 udp_flush_pending_frames(sk); 1153 goto out; 1154 } 1155 1156 up->len += size; 1157 if (!(up->corkflag || (flags&MSG_MORE))) 1158 ret = udp_push_pending_frames(sk); 1159 if (!ret) 1160 ret = size; 1161 out: 1162 release_sock(sk); 1163 return ret; 1164 } 1165 1166 #define UDP_SKB_IS_STATELESS 0x80000000 1167 1168 static void udp_set_dev_scratch(struct sk_buff *skb) 1169 { 1170 struct udp_dev_scratch *scratch = udp_skb_scratch(skb); 1171 1172 BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); 1173 scratch->_tsize_state = skb->truesize; 1174 #if BITS_PER_LONG == 64 1175 scratch->len = skb->len; 1176 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); 1177 scratch->is_linear = !skb_is_nonlinear(skb); 1178 #endif 1179 if (likely(!skb->_skb_refdst)) 1180 scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1181 } 1182 1183 static int udp_skb_truesize(struct sk_buff *skb) 1184 { 1185 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; 1186 } 1187 1188 static bool udp_skb_has_head_state(struct sk_buff *skb) 1189 { 1190 return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); 1191 } 1192 1193 /* fully reclaim rmem/fwd memory allocated for skb */ 1194 static void udp_rmem_release(struct sock *sk, int size, int partial, 1195 bool rx_queue_lock_held) 1196 { 1197 struct udp_sock *up = udp_sk(sk); 1198 struct sk_buff_head *sk_queue; 1199 int amt; 1200 1201 if (likely(partial)) { 1202 up->forward_deficit += size; 1203 size = up->forward_deficit; 1204 if (size < (sk->sk_rcvbuf >> 2) && 1205 !skb_queue_empty(&up->reader_queue)) 1206 return; 1207 } else { 1208 size += up->forward_deficit; 1209 } 1210 up->forward_deficit = 0; 1211 1212 /* acquire the sk_receive_queue for fwd allocated memory scheduling, 1213 * if the called don't held it already 1214 */ 1215 sk_queue = &sk->sk_receive_queue; 1216 if (!rx_queue_lock_held) 1217 spin_lock(&sk_queue->lock); 1218 1219 1220 sk->sk_forward_alloc += size; 1221 amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); 1222 sk->sk_forward_alloc -= amt; 1223 1224 if (amt) 1225 __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); 1226 1227 atomic_sub(size, &sk->sk_rmem_alloc); 1228 1229 /* this can save us from acquiring the rx queue lock on next receive */ 1230 skb_queue_splice_tail_init(sk_queue, &up->reader_queue); 1231 1232 if (!rx_queue_lock_held) 1233 spin_unlock(&sk_queue->lock); 1234 } 1235 1236 /* Note: called with reader_queue.lock held. 1237 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch 1238 * This avoids a cache line miss while receive_queue lock is held. 1239 * Look at __udp_enqueue_schedule_skb() to find where this copy is done. 1240 */ 1241 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) 1242 { 1243 prefetch(&skb->data); 1244 udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); 1245 } 1246 EXPORT_SYMBOL(udp_skb_destructor); 1247 1248 /* as above, but the caller held the rx queue lock, too */ 1249 static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) 1250 { 1251 prefetch(&skb->data); 1252 udp_rmem_release(sk, udp_skb_truesize(skb), 1, true); 1253 } 1254 1255 /* Idea of busylocks is to let producers grab an extra spinlock 1256 * to relieve pressure on the receive_queue spinlock shared by consumer. 1257 * Under flood, this means that only one producer can be in line 1258 * trying to acquire the receive_queue spinlock. 1259 * These busylock can be allocated on a per cpu manner, instead of a 1260 * per socket one (that would consume a cache line per socket) 1261 */ 1262 static int udp_busylocks_log __read_mostly; 1263 static spinlock_t *udp_busylocks __read_mostly; 1264 1265 static spinlock_t *busylock_acquire(void *ptr) 1266 { 1267 spinlock_t *busy; 1268 1269 busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); 1270 spin_lock(busy); 1271 return busy; 1272 } 1273 1274 static void busylock_release(spinlock_t *busy) 1275 { 1276 if (busy) 1277 spin_unlock(busy); 1278 } 1279 1280 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) 1281 { 1282 struct sk_buff_head *list = &sk->sk_receive_queue; 1283 int rmem, delta, amt, err = -ENOMEM; 1284 spinlock_t *busy = NULL; 1285 int size; 1286 1287 /* try to avoid the costly atomic add/sub pair when the receive 1288 * queue is full; always allow at least a packet 1289 */ 1290 rmem = atomic_read(&sk->sk_rmem_alloc); 1291 if (rmem > sk->sk_rcvbuf) 1292 goto drop; 1293 1294 /* Under mem pressure, it might be helpful to help udp_recvmsg() 1295 * having linear skbs : 1296 * - Reduce memory overhead and thus increase receive queue capacity 1297 * - Less cache line misses at copyout() time 1298 * - Less work at consume_skb() (less alien page frag freeing) 1299 */ 1300 if (rmem > (sk->sk_rcvbuf >> 1)) { 1301 skb_condense(skb); 1302 1303 busy = busylock_acquire(sk); 1304 } 1305 size = skb->truesize; 1306 udp_set_dev_scratch(skb); 1307 1308 /* we drop only if the receive buf is full and the receive 1309 * queue contains some other skb 1310 */ 1311 rmem = atomic_add_return(size, &sk->sk_rmem_alloc); 1312 if (rmem > (size + sk->sk_rcvbuf)) 1313 goto uncharge_drop; 1314 1315 spin_lock(&list->lock); 1316 if (size >= sk->sk_forward_alloc) { 1317 amt = sk_mem_pages(size); 1318 delta = amt << SK_MEM_QUANTUM_SHIFT; 1319 if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { 1320 err = -ENOBUFS; 1321 spin_unlock(&list->lock); 1322 goto uncharge_drop; 1323 } 1324 1325 sk->sk_forward_alloc += delta; 1326 } 1327 1328 sk->sk_forward_alloc -= size; 1329 1330 /* no need to setup a destructor, we will explicitly release the 1331 * forward allocated memory on dequeue 1332 */ 1333 sock_skb_set_dropcount(sk, skb); 1334 1335 __skb_queue_tail(list, skb); 1336 spin_unlock(&list->lock); 1337 1338 if (!sock_flag(sk, SOCK_DEAD)) 1339 sk->sk_data_ready(sk); 1340 1341 busylock_release(busy); 1342 return 0; 1343 1344 uncharge_drop: 1345 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1346 1347 drop: 1348 atomic_inc(&sk->sk_drops); 1349 busylock_release(busy); 1350 return err; 1351 } 1352 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); 1353 1354 void udp_destruct_sock(struct sock *sk) 1355 { 1356 /* reclaim completely the forward allocated memory */ 1357 struct udp_sock *up = udp_sk(sk); 1358 unsigned int total = 0; 1359 struct sk_buff *skb; 1360 1361 skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue); 1362 while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) { 1363 total += skb->truesize; 1364 kfree_skb(skb); 1365 } 1366 udp_rmem_release(sk, total, 0, true); 1367 1368 inet_sock_destruct(sk); 1369 } 1370 EXPORT_SYMBOL_GPL(udp_destruct_sock); 1371 1372 int udp_init_sock(struct sock *sk) 1373 { 1374 skb_queue_head_init(&udp_sk(sk)->reader_queue); 1375 sk->sk_destruct = udp_destruct_sock; 1376 return 0; 1377 } 1378 EXPORT_SYMBOL_GPL(udp_init_sock); 1379 1380 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) 1381 { 1382 if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { 1383 bool slow = lock_sock_fast(sk); 1384 1385 sk_peek_offset_bwd(sk, len); 1386 unlock_sock_fast(sk, slow); 1387 } 1388 1389 /* In the more common cases we cleared the head states previously, 1390 * see __udp_queue_rcv_skb(). 1391 */ 1392 if (unlikely(udp_skb_has_head_state(skb))) 1393 skb_release_head_state(skb); 1394 consume_stateless_skb(skb); 1395 } 1396 EXPORT_SYMBOL_GPL(skb_consume_udp); 1397 1398 static struct sk_buff *__first_packet_length(struct sock *sk, 1399 struct sk_buff_head *rcvq, 1400 int *total) 1401 { 1402 struct sk_buff *skb; 1403 1404 while ((skb = skb_peek(rcvq)) != NULL) { 1405 if (udp_lib_checksum_complete(skb)) { 1406 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, 1407 IS_UDPLITE(sk)); 1408 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 1409 IS_UDPLITE(sk)); 1410 atomic_inc(&sk->sk_drops); 1411 __skb_unlink(skb, rcvq); 1412 *total += skb->truesize; 1413 kfree_skb(skb); 1414 } else { 1415 /* the csum related bits could be changed, refresh 1416 * the scratch area 1417 */ 1418 udp_set_dev_scratch(skb); 1419 break; 1420 } 1421 } 1422 return skb; 1423 } 1424 1425 /** 1426 * first_packet_length - return length of first packet in receive queue 1427 * @sk: socket 1428 * 1429 * Drops all bad checksum frames, until a valid one is found. 1430 * Returns the length of found skb, or -1 if none is found. 1431 */ 1432 static int first_packet_length(struct sock *sk) 1433 { 1434 struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; 1435 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 1436 struct sk_buff *skb; 1437 int total = 0; 1438 int res; 1439 1440 spin_lock_bh(&rcvq->lock); 1441 skb = __first_packet_length(sk, rcvq, &total); 1442 if (!skb && !skb_queue_empty(sk_queue)) { 1443 spin_lock(&sk_queue->lock); 1444 skb_queue_splice_tail_init(sk_queue, rcvq); 1445 spin_unlock(&sk_queue->lock); 1446 1447 skb = __first_packet_length(sk, rcvq, &total); 1448 } 1449 res = skb ? skb->len : -1; 1450 if (total) 1451 udp_rmem_release(sk, total, 1, false); 1452 spin_unlock_bh(&rcvq->lock); 1453 return res; 1454 } 1455 1456 /* 1457 * IOCTL requests applicable to the UDP protocol 1458 */ 1459 1460 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 1461 { 1462 switch (cmd) { 1463 case SIOCOUTQ: 1464 { 1465 int amount = sk_wmem_alloc_get(sk); 1466 1467 return put_user(amount, (int __user *)arg); 1468 } 1469 1470 case SIOCINQ: 1471 { 1472 int amount = max_t(int, 0, first_packet_length(sk)); 1473 1474 return put_user(amount, (int __user *)arg); 1475 } 1476 1477 default: 1478 return -ENOIOCTLCMD; 1479 } 1480 1481 return 0; 1482 } 1483 EXPORT_SYMBOL(udp_ioctl); 1484 1485 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, 1486 int noblock, int *peeked, int *off, int *err) 1487 { 1488 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 1489 struct sk_buff_head *queue; 1490 struct sk_buff *last; 1491 long timeo; 1492 int error; 1493 1494 queue = &udp_sk(sk)->reader_queue; 1495 flags |= noblock ? MSG_DONTWAIT : 0; 1496 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1497 do { 1498 struct sk_buff *skb; 1499 1500 error = sock_error(sk); 1501 if (error) 1502 break; 1503 1504 error = -EAGAIN; 1505 *peeked = 0; 1506 do { 1507 spin_lock_bh(&queue->lock); 1508 skb = __skb_try_recv_from_queue(sk, queue, flags, 1509 udp_skb_destructor, 1510 peeked, off, err, 1511 &last); 1512 if (skb) { 1513 spin_unlock_bh(&queue->lock); 1514 return skb; 1515 } 1516 1517 if (skb_queue_empty(sk_queue)) { 1518 spin_unlock_bh(&queue->lock); 1519 goto busy_check; 1520 } 1521 1522 /* refill the reader queue and walk it again 1523 * keep both queues locked to avoid re-acquiring 1524 * the sk_receive_queue lock if fwd memory scheduling 1525 * is needed. 1526 */ 1527 spin_lock(&sk_queue->lock); 1528 skb_queue_splice_tail_init(sk_queue, queue); 1529 1530 skb = __skb_try_recv_from_queue(sk, queue, flags, 1531 udp_skb_dtor_locked, 1532 peeked, off, err, 1533 &last); 1534 spin_unlock(&sk_queue->lock); 1535 spin_unlock_bh(&queue->lock); 1536 if (skb) 1537 return skb; 1538 1539 busy_check: 1540 if (!sk_can_busy_loop(sk)) 1541 break; 1542 1543 sk_busy_loop(sk, flags & MSG_DONTWAIT); 1544 } while (!skb_queue_empty(sk_queue)); 1545 1546 /* sk_queue is empty, reader_queue may contain peeked packets */ 1547 } while (timeo && 1548 !__skb_wait_for_more_packets(sk, &error, &timeo, 1549 (struct sk_buff *)sk_queue)); 1550 1551 *err = error; 1552 return NULL; 1553 } 1554 EXPORT_SYMBOL_GPL(__skb_recv_udp); 1555 1556 /* 1557 * This should be easy, if there is something there we 1558 * return it, otherwise we block. 1559 */ 1560 1561 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 1562 int flags, int *addr_len) 1563 { 1564 struct inet_sock *inet = inet_sk(sk); 1565 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 1566 struct sk_buff *skb; 1567 unsigned int ulen, copied; 1568 int peeked, peeking, off; 1569 int err; 1570 int is_udplite = IS_UDPLITE(sk); 1571 bool checksum_valid = false; 1572 1573 if (flags & MSG_ERRQUEUE) 1574 return ip_recv_error(sk, msg, len, addr_len); 1575 1576 try_again: 1577 peeking = off = sk_peek_offset(sk, flags); 1578 skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); 1579 if (!skb) 1580 return err; 1581 1582 ulen = udp_skb_len(skb); 1583 copied = len; 1584 if (copied > ulen - off) 1585 copied = ulen - off; 1586 else if (copied < ulen) 1587 msg->msg_flags |= MSG_TRUNC; 1588 1589 /* 1590 * If checksum is needed at all, try to do it while copying the 1591 * data. If the data is truncated, or if we only want a partial 1592 * coverage checksum (UDP-Lite), do it before the copy. 1593 */ 1594 1595 if (copied < ulen || peeking || 1596 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 1597 checksum_valid = udp_skb_csum_unnecessary(skb) || 1598 !__udp_lib_checksum_complete(skb); 1599 if (!checksum_valid) 1600 goto csum_copy_err; 1601 } 1602 1603 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 1604 if (udp_skb_is_linear(skb)) 1605 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 1606 else 1607 err = skb_copy_datagram_msg(skb, off, msg, copied); 1608 } else { 1609 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 1610 1611 if (err == -EINVAL) 1612 goto csum_copy_err; 1613 } 1614 1615 if (unlikely(err)) { 1616 if (!peeked) { 1617 atomic_inc(&sk->sk_drops); 1618 UDP_INC_STATS(sock_net(sk), 1619 UDP_MIB_INERRORS, is_udplite); 1620 } 1621 kfree_skb(skb); 1622 return err; 1623 } 1624 1625 if (!peeked) 1626 UDP_INC_STATS(sock_net(sk), 1627 UDP_MIB_INDATAGRAMS, is_udplite); 1628 1629 sock_recv_ts_and_drops(msg, sk, skb); 1630 1631 /* Copy the address. */ 1632 if (sin) { 1633 sin->sin_family = AF_INET; 1634 sin->sin_port = udp_hdr(skb)->source; 1635 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 1636 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 1637 *addr_len = sizeof(*sin); 1638 } 1639 if (inet->cmsg_flags) 1640 ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); 1641 1642 err = copied; 1643 if (flags & MSG_TRUNC) 1644 err = ulen; 1645 1646 skb_consume_udp(sk, skb, peeking ? -err : err); 1647 return err; 1648 1649 csum_copy_err: 1650 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 1651 udp_skb_destructor)) { 1652 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1653 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1654 } 1655 kfree_skb(skb); 1656 1657 /* starting over for a new packet, but check if we need to yield */ 1658 cond_resched(); 1659 msg->msg_flags &= ~MSG_TRUNC; 1660 goto try_again; 1661 } 1662 1663 int __udp_disconnect(struct sock *sk, int flags) 1664 { 1665 struct inet_sock *inet = inet_sk(sk); 1666 /* 1667 * 1003.1g - break association. 1668 */ 1669 1670 sk->sk_state = TCP_CLOSE; 1671 inet->inet_daddr = 0; 1672 inet->inet_dport = 0; 1673 sock_rps_reset_rxhash(sk); 1674 sk->sk_bound_dev_if = 0; 1675 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1676 inet_reset_saddr(sk); 1677 1678 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 1679 sk->sk_prot->unhash(sk); 1680 inet->inet_sport = 0; 1681 } 1682 sk_dst_reset(sk); 1683 return 0; 1684 } 1685 EXPORT_SYMBOL(__udp_disconnect); 1686 1687 int udp_disconnect(struct sock *sk, int flags) 1688 { 1689 lock_sock(sk); 1690 __udp_disconnect(sk, flags); 1691 release_sock(sk); 1692 return 0; 1693 } 1694 EXPORT_SYMBOL(udp_disconnect); 1695 1696 void udp_lib_unhash(struct sock *sk) 1697 { 1698 if (sk_hashed(sk)) { 1699 struct udp_table *udptable = sk->sk_prot->h.udp_table; 1700 struct udp_hslot *hslot, *hslot2; 1701 1702 hslot = udp_hashslot(udptable, sock_net(sk), 1703 udp_sk(sk)->udp_port_hash); 1704 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1705 1706 spin_lock_bh(&hslot->lock); 1707 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1708 reuseport_detach_sock(sk); 1709 if (sk_del_node_init_rcu(sk)) { 1710 hslot->count--; 1711 inet_sk(sk)->inet_num = 0; 1712 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 1713 1714 spin_lock(&hslot2->lock); 1715 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1716 hslot2->count--; 1717 spin_unlock(&hslot2->lock); 1718 } 1719 spin_unlock_bh(&hslot->lock); 1720 } 1721 } 1722 EXPORT_SYMBOL(udp_lib_unhash); 1723 1724 /* 1725 * inet_rcv_saddr was changed, we must rehash secondary hash 1726 */ 1727 void udp_lib_rehash(struct sock *sk, u16 newhash) 1728 { 1729 if (sk_hashed(sk)) { 1730 struct udp_table *udptable = sk->sk_prot->h.udp_table; 1731 struct udp_hslot *hslot, *hslot2, *nhslot2; 1732 1733 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1734 nhslot2 = udp_hashslot2(udptable, newhash); 1735 udp_sk(sk)->udp_portaddr_hash = newhash; 1736 1737 if (hslot2 != nhslot2 || 1738 rcu_access_pointer(sk->sk_reuseport_cb)) { 1739 hslot = udp_hashslot(udptable, sock_net(sk), 1740 udp_sk(sk)->udp_port_hash); 1741 /* we must lock primary chain too */ 1742 spin_lock_bh(&hslot->lock); 1743 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1744 reuseport_detach_sock(sk); 1745 1746 if (hslot2 != nhslot2) { 1747 spin_lock(&hslot2->lock); 1748 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1749 hslot2->count--; 1750 spin_unlock(&hslot2->lock); 1751 1752 spin_lock(&nhslot2->lock); 1753 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 1754 &nhslot2->head); 1755 nhslot2->count++; 1756 spin_unlock(&nhslot2->lock); 1757 } 1758 1759 spin_unlock_bh(&hslot->lock); 1760 } 1761 } 1762 } 1763 EXPORT_SYMBOL(udp_lib_rehash); 1764 1765 static void udp_v4_rehash(struct sock *sk) 1766 { 1767 u16 new_hash = udp4_portaddr_hash(sock_net(sk), 1768 inet_sk(sk)->inet_rcv_saddr, 1769 inet_sk(sk)->inet_num); 1770 udp_lib_rehash(sk, new_hash); 1771 } 1772 1773 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1774 { 1775 int rc; 1776 1777 if (inet_sk(sk)->inet_daddr) { 1778 sock_rps_save_rxhash(sk, skb); 1779 sk_mark_napi_id(sk, skb); 1780 sk_incoming_cpu_update(sk); 1781 } else { 1782 sk_mark_napi_id_once(sk, skb); 1783 } 1784 1785 /* At recvmsg() time we may access skb->dst or skb->sp depending on 1786 * the IP options and the cmsg flags, elsewhere can we clear all 1787 * pending head states while they are hot in the cache 1788 */ 1789 if (likely(IPCB(skb)->opt.optlen == 0 && !skb_sec_path(skb))) 1790 skb_release_head_state(skb); 1791 1792 rc = __udp_enqueue_schedule_skb(sk, skb); 1793 if (rc < 0) { 1794 int is_udplite = IS_UDPLITE(sk); 1795 1796 /* Note that an ENOMEM error is charged twice */ 1797 if (rc == -ENOMEM) 1798 UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, 1799 is_udplite); 1800 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1801 kfree_skb(skb); 1802 trace_udp_fail_queue_rcv_skb(rc, sk); 1803 return -1; 1804 } 1805 1806 return 0; 1807 } 1808 1809 static struct static_key udp_encap_needed __read_mostly; 1810 void udp_encap_enable(void) 1811 { 1812 if (!static_key_enabled(&udp_encap_needed)) 1813 static_key_slow_inc(&udp_encap_needed); 1814 } 1815 EXPORT_SYMBOL(udp_encap_enable); 1816 1817 /* returns: 1818 * -1: error 1819 * 0: success 1820 * >0: "udp encap" protocol resubmission 1821 * 1822 * Note that in the success and error cases, the skb is assumed to 1823 * have either been requeued or freed. 1824 */ 1825 static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1826 { 1827 struct udp_sock *up = udp_sk(sk); 1828 int is_udplite = IS_UDPLITE(sk); 1829 1830 /* 1831 * Charge it to the socket, dropping if the queue is full. 1832 */ 1833 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1834 goto drop; 1835 nf_reset(skb); 1836 1837 if (static_key_false(&udp_encap_needed) && up->encap_type) { 1838 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 1839 1840 /* 1841 * This is an encapsulation socket so pass the skb to 1842 * the socket's udp_encap_rcv() hook. Otherwise, just 1843 * fall through and pass this up the UDP socket. 1844 * up->encap_rcv() returns the following value: 1845 * =0 if skb was successfully passed to the encap 1846 * handler or was discarded by it. 1847 * >0 if skb should be passed on to UDP. 1848 * <0 if skb should be resubmitted as proto -N 1849 */ 1850 1851 /* if we're overly short, let UDP handle it */ 1852 encap_rcv = ACCESS_ONCE(up->encap_rcv); 1853 if (encap_rcv) { 1854 int ret; 1855 1856 /* Verify checksum before giving to encap */ 1857 if (udp_lib_checksum_complete(skb)) 1858 goto csum_error; 1859 1860 ret = encap_rcv(sk, skb); 1861 if (ret <= 0) { 1862 __UDP_INC_STATS(sock_net(sk), 1863 UDP_MIB_INDATAGRAMS, 1864 is_udplite); 1865 return -ret; 1866 } 1867 } 1868 1869 /* FALLTHROUGH -- it's a UDP Packet */ 1870 } 1871 1872 /* 1873 * UDP-Lite specific tests, ignored on UDP sockets 1874 */ 1875 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 1876 1877 /* 1878 * MIB statistics other than incrementing the error count are 1879 * disabled for the following two types of errors: these depend 1880 * on the application settings, not on the functioning of the 1881 * protocol stack as such. 1882 * 1883 * RFC 3828 here recommends (sec 3.3): "There should also be a 1884 * way ... to ... at least let the receiving application block 1885 * delivery of packets with coverage values less than a value 1886 * provided by the application." 1887 */ 1888 if (up->pcrlen == 0) { /* full coverage was set */ 1889 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", 1890 UDP_SKB_CB(skb)->cscov, skb->len); 1891 goto drop; 1892 } 1893 /* The next case involves violating the min. coverage requested 1894 * by the receiver. This is subtle: if receiver wants x and x is 1895 * greater than the buffersize/MTU then receiver will complain 1896 * that it wants x while sender emits packets of smaller size y. 1897 * Therefore the above ...()->partial_cov statement is essential. 1898 */ 1899 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 1900 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", 1901 UDP_SKB_CB(skb)->cscov, up->pcrlen); 1902 goto drop; 1903 } 1904 } 1905 1906 prefetch(&sk->sk_rmem_alloc); 1907 if (rcu_access_pointer(sk->sk_filter) && 1908 udp_lib_checksum_complete(skb)) 1909 goto csum_error; 1910 1911 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 1912 goto drop; 1913 1914 udp_csum_pull_header(skb); 1915 1916 ipv4_pktinfo_prepare(sk, skb); 1917 return __udp_queue_rcv_skb(sk, skb); 1918 1919 csum_error: 1920 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1921 drop: 1922 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1923 atomic_inc(&sk->sk_drops); 1924 kfree_skb(skb); 1925 return -1; 1926 } 1927 1928 /* For TCP sockets, sk_rx_dst is protected by socket lock 1929 * For UDP, we use xchg() to guard against concurrent changes. 1930 */ 1931 void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 1932 { 1933 struct dst_entry *old; 1934 1935 if (dst_hold_safe(dst)) { 1936 old = xchg(&sk->sk_rx_dst, dst); 1937 dst_release(old); 1938 } 1939 } 1940 EXPORT_SYMBOL(udp_sk_rx_dst_set); 1941 1942 /* 1943 * Multicasts and broadcasts go to each listener. 1944 * 1945 * Note: called only from the BH handler context. 1946 */ 1947 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 1948 struct udphdr *uh, 1949 __be32 saddr, __be32 daddr, 1950 struct udp_table *udptable, 1951 int proto) 1952 { 1953 struct sock *sk, *first = NULL; 1954 unsigned short hnum = ntohs(uh->dest); 1955 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 1956 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 1957 unsigned int offset = offsetof(typeof(*sk), sk_node); 1958 int dif = skb->dev->ifindex; 1959 struct hlist_node *node; 1960 struct sk_buff *nskb; 1961 1962 if (use_hash2) { 1963 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 1964 udptable->mask; 1965 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; 1966 start_lookup: 1967 hslot = &udptable->hash2[hash2]; 1968 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 1969 } 1970 1971 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 1972 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, 1973 uh->source, saddr, dif, hnum)) 1974 continue; 1975 1976 if (!first) { 1977 first = sk; 1978 continue; 1979 } 1980 nskb = skb_clone(skb, GFP_ATOMIC); 1981 1982 if (unlikely(!nskb)) { 1983 atomic_inc(&sk->sk_drops); 1984 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 1985 IS_UDPLITE(sk)); 1986 __UDP_INC_STATS(net, UDP_MIB_INERRORS, 1987 IS_UDPLITE(sk)); 1988 continue; 1989 } 1990 if (udp_queue_rcv_skb(sk, nskb) > 0) 1991 consume_skb(nskb); 1992 } 1993 1994 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 1995 if (use_hash2 && hash2 != hash2_any) { 1996 hash2 = hash2_any; 1997 goto start_lookup; 1998 } 1999 2000 if (first) { 2001 if (udp_queue_rcv_skb(first, skb) > 0) 2002 consume_skb(skb); 2003 } else { 2004 kfree_skb(skb); 2005 __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 2006 proto == IPPROTO_UDPLITE); 2007 } 2008 return 0; 2009 } 2010 2011 /* Initialize UDP checksum. If exited with zero value (success), 2012 * CHECKSUM_UNNECESSARY means, that no more checks are required. 2013 * Otherwise, csum completion requires chacksumming packet body, 2014 * including udp header and folding it to skb->csum. 2015 */ 2016 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 2017 int proto) 2018 { 2019 int err; 2020 2021 UDP_SKB_CB(skb)->partial_cov = 0; 2022 UDP_SKB_CB(skb)->cscov = skb->len; 2023 2024 if (proto == IPPROTO_UDPLITE) { 2025 err = udplite_checksum_init(skb, uh); 2026 if (err) 2027 return err; 2028 } 2029 2030 /* Note, we are only interested in != 0 or == 0, thus the 2031 * force to int. 2032 */ 2033 return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, 2034 inet_compute_pseudo); 2035 } 2036 2037 /* 2038 * All we need to do is get the socket, and then do a checksum. 2039 */ 2040 2041 int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 2042 int proto) 2043 { 2044 struct sock *sk; 2045 struct udphdr *uh; 2046 unsigned short ulen; 2047 struct rtable *rt = skb_rtable(skb); 2048 __be32 saddr, daddr; 2049 struct net *net = dev_net(skb->dev); 2050 2051 /* 2052 * Validate the packet. 2053 */ 2054 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 2055 goto drop; /* No space for header. */ 2056 2057 uh = udp_hdr(skb); 2058 ulen = ntohs(uh->len); 2059 saddr = ip_hdr(skb)->saddr; 2060 daddr = ip_hdr(skb)->daddr; 2061 2062 if (ulen > skb->len) 2063 goto short_packet; 2064 2065 if (proto == IPPROTO_UDP) { 2066 /* UDP validates ulen. */ 2067 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 2068 goto short_packet; 2069 uh = udp_hdr(skb); 2070 } 2071 2072 if (udp4_csum_init(skb, uh, proto)) 2073 goto csum_error; 2074 2075 sk = skb_steal_sock(skb); 2076 if (sk) { 2077 struct dst_entry *dst = skb_dst(skb); 2078 int ret; 2079 2080 if (unlikely(sk->sk_rx_dst != dst)) 2081 udp_sk_rx_dst_set(sk, dst); 2082 2083 ret = udp_queue_rcv_skb(sk, skb); 2084 sock_put(sk); 2085 /* a return value > 0 means to resubmit the input, but 2086 * it wants the return to be -protocol, or 0 2087 */ 2088 if (ret > 0) 2089 return -ret; 2090 return 0; 2091 } 2092 2093 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 2094 return __udp4_lib_mcast_deliver(net, skb, uh, 2095 saddr, daddr, udptable, proto); 2096 2097 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 2098 if (sk) { 2099 int ret; 2100 2101 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 2102 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, 2103 inet_compute_pseudo); 2104 2105 ret = udp_queue_rcv_skb(sk, skb); 2106 2107 /* a return value > 0 means to resubmit the input, but 2108 * it wants the return to be -protocol, or 0 2109 */ 2110 if (ret > 0) 2111 return -ret; 2112 return 0; 2113 } 2114 2115 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 2116 goto drop; 2117 nf_reset(skb); 2118 2119 /* No socket. Drop packet silently, if checksum is wrong */ 2120 if (udp_lib_checksum_complete(skb)) 2121 goto csum_error; 2122 2123 __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 2124 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 2125 2126 /* 2127 * Hmm. We got an UDP packet to a port to which we 2128 * don't wanna listen. Ignore it. 2129 */ 2130 kfree_skb(skb); 2131 return 0; 2132 2133 short_packet: 2134 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 2135 proto == IPPROTO_UDPLITE ? "Lite" : "", 2136 &saddr, ntohs(uh->source), 2137 ulen, skb->len, 2138 &daddr, ntohs(uh->dest)); 2139 goto drop; 2140 2141 csum_error: 2142 /* 2143 * RFC1122: OK. Discards the bad packet silently (as far as 2144 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 2145 */ 2146 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 2147 proto == IPPROTO_UDPLITE ? "Lite" : "", 2148 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), 2149 ulen); 2150 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 2151 drop: 2152 __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 2153 kfree_skb(skb); 2154 return 0; 2155 } 2156 2157 /* We can only early demux multicast if there is a single matching socket. 2158 * If more than one socket found returns NULL 2159 */ 2160 static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, 2161 __be16 loc_port, __be32 loc_addr, 2162 __be16 rmt_port, __be32 rmt_addr, 2163 int dif) 2164 { 2165 struct sock *sk, *result; 2166 unsigned short hnum = ntohs(loc_port); 2167 unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); 2168 struct udp_hslot *hslot = &udp_table.hash[slot]; 2169 2170 /* Do not bother scanning a too big list */ 2171 if (hslot->count > 10) 2172 return NULL; 2173 2174 result = NULL; 2175 sk_for_each_rcu(sk, &hslot->head) { 2176 if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, 2177 rmt_port, rmt_addr, dif, hnum)) { 2178 if (result) 2179 return NULL; 2180 result = sk; 2181 } 2182 } 2183 2184 return result; 2185 } 2186 2187 /* For unicast we should only early demux connected sockets or we can 2188 * break forwarding setups. The chains here can be long so only check 2189 * if the first socket is an exact match and if not move on. 2190 */ 2191 static struct sock *__udp4_lib_demux_lookup(struct net *net, 2192 __be16 loc_port, __be32 loc_addr, 2193 __be16 rmt_port, __be32 rmt_addr, 2194 int dif) 2195 { 2196 unsigned short hnum = ntohs(loc_port); 2197 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); 2198 unsigned int slot2 = hash2 & udp_table.mask; 2199 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 2200 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); 2201 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 2202 struct sock *sk; 2203 2204 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 2205 if (INET_MATCH(sk, net, acookie, rmt_addr, 2206 loc_addr, ports, dif)) 2207 return sk; 2208 /* Only check first socket in chain */ 2209 break; 2210 } 2211 return NULL; 2212 } 2213 2214 void udp_v4_early_demux(struct sk_buff *skb) 2215 { 2216 struct net *net = dev_net(skb->dev); 2217 const struct iphdr *iph; 2218 const struct udphdr *uh; 2219 struct sock *sk = NULL; 2220 struct dst_entry *dst; 2221 int dif = skb->dev->ifindex; 2222 int ours; 2223 2224 /* validate the packet */ 2225 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 2226 return; 2227 2228 iph = ip_hdr(skb); 2229 uh = udp_hdr(skb); 2230 2231 if (skb->pkt_type == PACKET_BROADCAST || 2232 skb->pkt_type == PACKET_MULTICAST) { 2233 struct in_device *in_dev = __in_dev_get_rcu(skb->dev); 2234 2235 if (!in_dev) 2236 return; 2237 2238 /* we are supposed to accept bcast packets */ 2239 if (skb->pkt_type == PACKET_MULTICAST) { 2240 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, 2241 iph->protocol); 2242 if (!ours) 2243 return; 2244 } 2245 2246 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 2247 uh->source, iph->saddr, dif); 2248 } else if (skb->pkt_type == PACKET_HOST) { 2249 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, 2250 uh->source, iph->saddr, dif); 2251 } 2252 2253 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 2254 return; 2255 2256 skb->sk = sk; 2257 skb->destructor = sock_efree; 2258 dst = READ_ONCE(sk->sk_rx_dst); 2259 2260 if (dst) 2261 dst = dst_check(dst, 0); 2262 if (dst) { 2263 /* set noref for now. 2264 * any place which wants to hold dst has to call 2265 * dst_hold_safe() 2266 */ 2267 skb_dst_set_noref(skb, dst); 2268 } 2269 } 2270 2271 int udp_rcv(struct sk_buff *skb) 2272 { 2273 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); 2274 } 2275 2276 void udp_destroy_sock(struct sock *sk) 2277 { 2278 struct udp_sock *up = udp_sk(sk); 2279 bool slow = lock_sock_fast(sk); 2280 udp_flush_pending_frames(sk); 2281 unlock_sock_fast(sk, slow); 2282 if (static_key_false(&udp_encap_needed) && up->encap_type) { 2283 void (*encap_destroy)(struct sock *sk); 2284 encap_destroy = ACCESS_ONCE(up->encap_destroy); 2285 if (encap_destroy) 2286 encap_destroy(sk); 2287 } 2288 } 2289 2290 /* 2291 * Socket option code for UDP 2292 */ 2293 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 2294 char __user *optval, unsigned int optlen, 2295 int (*push_pending_frames)(struct sock *)) 2296 { 2297 struct udp_sock *up = udp_sk(sk); 2298 int val, valbool; 2299 int err = 0; 2300 int is_udplite = IS_UDPLITE(sk); 2301 2302 if (optlen < sizeof(int)) 2303 return -EINVAL; 2304 2305 if (get_user(val, (int __user *)optval)) 2306 return -EFAULT; 2307 2308 valbool = val ? 1 : 0; 2309 2310 switch (optname) { 2311 case UDP_CORK: 2312 if (val != 0) { 2313 up->corkflag = 1; 2314 } else { 2315 up->corkflag = 0; 2316 lock_sock(sk); 2317 push_pending_frames(sk); 2318 release_sock(sk); 2319 } 2320 break; 2321 2322 case UDP_ENCAP: 2323 switch (val) { 2324 case 0: 2325 case UDP_ENCAP_ESPINUDP: 2326 case UDP_ENCAP_ESPINUDP_NON_IKE: 2327 up->encap_rcv = xfrm4_udp_encap_rcv; 2328 /* FALLTHROUGH */ 2329 case UDP_ENCAP_L2TPINUDP: 2330 up->encap_type = val; 2331 udp_encap_enable(); 2332 break; 2333 default: 2334 err = -ENOPROTOOPT; 2335 break; 2336 } 2337 break; 2338 2339 case UDP_NO_CHECK6_TX: 2340 up->no_check6_tx = valbool; 2341 break; 2342 2343 case UDP_NO_CHECK6_RX: 2344 up->no_check6_rx = valbool; 2345 break; 2346 2347 /* 2348 * UDP-Lite's partial checksum coverage (RFC 3828). 2349 */ 2350 /* The sender sets actual checksum coverage length via this option. 2351 * The case coverage > packet length is handled by send module. */ 2352 case UDPLITE_SEND_CSCOV: 2353 if (!is_udplite) /* Disable the option on UDP sockets */ 2354 return -ENOPROTOOPT; 2355 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 2356 val = 8; 2357 else if (val > USHRT_MAX) 2358 val = USHRT_MAX; 2359 up->pcslen = val; 2360 up->pcflag |= UDPLITE_SEND_CC; 2361 break; 2362 2363 /* The receiver specifies a minimum checksum coverage value. To make 2364 * sense, this should be set to at least 8 (as done below). If zero is 2365 * used, this again means full checksum coverage. */ 2366 case UDPLITE_RECV_CSCOV: 2367 if (!is_udplite) /* Disable the option on UDP sockets */ 2368 return -ENOPROTOOPT; 2369 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 2370 val = 8; 2371 else if (val > USHRT_MAX) 2372 val = USHRT_MAX; 2373 up->pcrlen = val; 2374 up->pcflag |= UDPLITE_RECV_CC; 2375 break; 2376 2377 default: 2378 err = -ENOPROTOOPT; 2379 break; 2380 } 2381 2382 return err; 2383 } 2384 EXPORT_SYMBOL(udp_lib_setsockopt); 2385 2386 int udp_setsockopt(struct sock *sk, int level, int optname, 2387 char __user *optval, unsigned int optlen) 2388 { 2389 if (level == SOL_UDP || level == SOL_UDPLITE) 2390 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2391 udp_push_pending_frames); 2392 return ip_setsockopt(sk, level, optname, optval, optlen); 2393 } 2394 2395 #ifdef CONFIG_COMPAT 2396 int compat_udp_setsockopt(struct sock *sk, int level, int optname, 2397 char __user *optval, unsigned int optlen) 2398 { 2399 if (level == SOL_UDP || level == SOL_UDPLITE) 2400 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2401 udp_push_pending_frames); 2402 return compat_ip_setsockopt(sk, level, optname, optval, optlen); 2403 } 2404 #endif 2405 2406 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 2407 char __user *optval, int __user *optlen) 2408 { 2409 struct udp_sock *up = udp_sk(sk); 2410 int val, len; 2411 2412 if (get_user(len, optlen)) 2413 return -EFAULT; 2414 2415 len = min_t(unsigned int, len, sizeof(int)); 2416 2417 if (len < 0) 2418 return -EINVAL; 2419 2420 switch (optname) { 2421 case UDP_CORK: 2422 val = up->corkflag; 2423 break; 2424 2425 case UDP_ENCAP: 2426 val = up->encap_type; 2427 break; 2428 2429 case UDP_NO_CHECK6_TX: 2430 val = up->no_check6_tx; 2431 break; 2432 2433 case UDP_NO_CHECK6_RX: 2434 val = up->no_check6_rx; 2435 break; 2436 2437 /* The following two cannot be changed on UDP sockets, the return is 2438 * always 0 (which corresponds to the full checksum coverage of UDP). */ 2439 case UDPLITE_SEND_CSCOV: 2440 val = up->pcslen; 2441 break; 2442 2443 case UDPLITE_RECV_CSCOV: 2444 val = up->pcrlen; 2445 break; 2446 2447 default: 2448 return -ENOPROTOOPT; 2449 } 2450 2451 if (put_user(len, optlen)) 2452 return -EFAULT; 2453 if (copy_to_user(optval, &val, len)) 2454 return -EFAULT; 2455 return 0; 2456 } 2457 EXPORT_SYMBOL(udp_lib_getsockopt); 2458 2459 int udp_getsockopt(struct sock *sk, int level, int optname, 2460 char __user *optval, int __user *optlen) 2461 { 2462 if (level == SOL_UDP || level == SOL_UDPLITE) 2463 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2464 return ip_getsockopt(sk, level, optname, optval, optlen); 2465 } 2466 2467 #ifdef CONFIG_COMPAT 2468 int compat_udp_getsockopt(struct sock *sk, int level, int optname, 2469 char __user *optval, int __user *optlen) 2470 { 2471 if (level == SOL_UDP || level == SOL_UDPLITE) 2472 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2473 return compat_ip_getsockopt(sk, level, optname, optval, optlen); 2474 } 2475 #endif 2476 /** 2477 * udp_poll - wait for a UDP event. 2478 * @file - file struct 2479 * @sock - socket 2480 * @wait - poll table 2481 * 2482 * This is same as datagram poll, except for the special case of 2483 * blocking sockets. If application is using a blocking fd 2484 * and a packet with checksum error is in the queue; 2485 * then it could get return from select indicating data available 2486 * but then block when reading it. Add special case code 2487 * to work around these arguably broken applications. 2488 */ 2489 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) 2490 { 2491 unsigned int mask = datagram_poll(file, sock, wait); 2492 struct sock *sk = sock->sk; 2493 2494 if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) 2495 mask |= POLLIN | POLLRDNORM; 2496 2497 sock_rps_record_flow(sk); 2498 2499 /* Check for false positives due to checksum errors */ 2500 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2501 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2502 mask &= ~(POLLIN | POLLRDNORM); 2503 2504 return mask; 2505 2506 } 2507 EXPORT_SYMBOL(udp_poll); 2508 2509 int udp_abort(struct sock *sk, int err) 2510 { 2511 lock_sock(sk); 2512 2513 sk->sk_err = err; 2514 sk->sk_error_report(sk); 2515 __udp_disconnect(sk, 0); 2516 2517 release_sock(sk); 2518 2519 return 0; 2520 } 2521 EXPORT_SYMBOL_GPL(udp_abort); 2522 2523 struct proto udp_prot = { 2524 .name = "UDP", 2525 .owner = THIS_MODULE, 2526 .close = udp_lib_close, 2527 .connect = ip4_datagram_connect, 2528 .disconnect = udp_disconnect, 2529 .ioctl = udp_ioctl, 2530 .init = udp_init_sock, 2531 .destroy = udp_destroy_sock, 2532 .setsockopt = udp_setsockopt, 2533 .getsockopt = udp_getsockopt, 2534 .sendmsg = udp_sendmsg, 2535 .recvmsg = udp_recvmsg, 2536 .sendpage = udp_sendpage, 2537 .release_cb = ip4_datagram_release_cb, 2538 .hash = udp_lib_hash, 2539 .unhash = udp_lib_unhash, 2540 .rehash = udp_v4_rehash, 2541 .get_port = udp_v4_get_port, 2542 .memory_allocated = &udp_memory_allocated, 2543 .sysctl_mem = sysctl_udp_mem, 2544 .sysctl_wmem = &sysctl_udp_wmem_min, 2545 .sysctl_rmem = &sysctl_udp_rmem_min, 2546 .obj_size = sizeof(struct udp_sock), 2547 .h.udp_table = &udp_table, 2548 #ifdef CONFIG_COMPAT 2549 .compat_setsockopt = compat_udp_setsockopt, 2550 .compat_getsockopt = compat_udp_getsockopt, 2551 #endif 2552 .diag_destroy = udp_abort, 2553 }; 2554 EXPORT_SYMBOL(udp_prot); 2555 2556 /* ------------------------------------------------------------------------ */ 2557 #ifdef CONFIG_PROC_FS 2558 2559 static struct sock *udp_get_first(struct seq_file *seq, int start) 2560 { 2561 struct sock *sk; 2562 struct udp_iter_state *state = seq->private; 2563 struct net *net = seq_file_net(seq); 2564 2565 for (state->bucket = start; state->bucket <= state->udp_table->mask; 2566 ++state->bucket) { 2567 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; 2568 2569 if (hlist_empty(&hslot->head)) 2570 continue; 2571 2572 spin_lock_bh(&hslot->lock); 2573 sk_for_each(sk, &hslot->head) { 2574 if (!net_eq(sock_net(sk), net)) 2575 continue; 2576 if (sk->sk_family == state->family) 2577 goto found; 2578 } 2579 spin_unlock_bh(&hslot->lock); 2580 } 2581 sk = NULL; 2582 found: 2583 return sk; 2584 } 2585 2586 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 2587 { 2588 struct udp_iter_state *state = seq->private; 2589 struct net *net = seq_file_net(seq); 2590 2591 do { 2592 sk = sk_next(sk); 2593 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); 2594 2595 if (!sk) { 2596 if (state->bucket <= state->udp_table->mask) 2597 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); 2598 return udp_get_first(seq, state->bucket + 1); 2599 } 2600 return sk; 2601 } 2602 2603 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 2604 { 2605 struct sock *sk = udp_get_first(seq, 0); 2606 2607 if (sk) 2608 while (pos && (sk = udp_get_next(seq, sk)) != NULL) 2609 --pos; 2610 return pos ? NULL : sk; 2611 } 2612 2613 static void *udp_seq_start(struct seq_file *seq, loff_t *pos) 2614 { 2615 struct udp_iter_state *state = seq->private; 2616 state->bucket = MAX_UDP_PORTS; 2617 2618 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 2619 } 2620 2621 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2622 { 2623 struct sock *sk; 2624 2625 if (v == SEQ_START_TOKEN) 2626 sk = udp_get_idx(seq, 0); 2627 else 2628 sk = udp_get_next(seq, v); 2629 2630 ++*pos; 2631 return sk; 2632 } 2633 2634 static void udp_seq_stop(struct seq_file *seq, void *v) 2635 { 2636 struct udp_iter_state *state = seq->private; 2637 2638 if (state->bucket <= state->udp_table->mask) 2639 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); 2640 } 2641 2642 int udp_seq_open(struct inode *inode, struct file *file) 2643 { 2644 struct udp_seq_afinfo *afinfo = PDE_DATA(inode); 2645 struct udp_iter_state *s; 2646 int err; 2647 2648 err = seq_open_net(inode, file, &afinfo->seq_ops, 2649 sizeof(struct udp_iter_state)); 2650 if (err < 0) 2651 return err; 2652 2653 s = ((struct seq_file *)file->private_data)->private; 2654 s->family = afinfo->family; 2655 s->udp_table = afinfo->udp_table; 2656 return err; 2657 } 2658 EXPORT_SYMBOL(udp_seq_open); 2659 2660 /* ------------------------------------------------------------------------ */ 2661 int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) 2662 { 2663 struct proc_dir_entry *p; 2664 int rc = 0; 2665 2666 afinfo->seq_ops.start = udp_seq_start; 2667 afinfo->seq_ops.next = udp_seq_next; 2668 afinfo->seq_ops.stop = udp_seq_stop; 2669 2670 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 2671 afinfo->seq_fops, afinfo); 2672 if (!p) 2673 rc = -ENOMEM; 2674 return rc; 2675 } 2676 EXPORT_SYMBOL(udp_proc_register); 2677 2678 void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) 2679 { 2680 remove_proc_entry(afinfo->name, net->proc_net); 2681 } 2682 EXPORT_SYMBOL(udp_proc_unregister); 2683 2684 /* ------------------------------------------------------------------------ */ 2685 static void udp4_format_sock(struct sock *sp, struct seq_file *f, 2686 int bucket) 2687 { 2688 struct inet_sock *inet = inet_sk(sp); 2689 __be32 dest = inet->inet_daddr; 2690 __be32 src = inet->inet_rcv_saddr; 2691 __u16 destp = ntohs(inet->inet_dport); 2692 __u16 srcp = ntohs(inet->inet_sport); 2693 2694 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2695 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", 2696 bucket, src, srcp, dest, destp, sp->sk_state, 2697 sk_wmem_alloc_get(sp), 2698 sk_rmem_alloc_get(sp), 2699 0, 0L, 0, 2700 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 2701 0, sock_i_ino(sp), 2702 refcount_read(&sp->sk_refcnt), sp, 2703 atomic_read(&sp->sk_drops)); 2704 } 2705 2706 int udp4_seq_show(struct seq_file *seq, void *v) 2707 { 2708 seq_setwidth(seq, 127); 2709 if (v == SEQ_START_TOKEN) 2710 seq_puts(seq, " sl local_address rem_address st tx_queue " 2711 "rx_queue tr tm->when retrnsmt uid timeout " 2712 "inode ref pointer drops"); 2713 else { 2714 struct udp_iter_state *state = seq->private; 2715 2716 udp4_format_sock(v, seq, state->bucket); 2717 } 2718 seq_pad(seq, '\n'); 2719 return 0; 2720 } 2721 2722 static const struct file_operations udp_afinfo_seq_fops = { 2723 .owner = THIS_MODULE, 2724 .open = udp_seq_open, 2725 .read = seq_read, 2726 .llseek = seq_lseek, 2727 .release = seq_release_net 2728 }; 2729 2730 /* ------------------------------------------------------------------------ */ 2731 static struct udp_seq_afinfo udp4_seq_afinfo = { 2732 .name = "udp", 2733 .family = AF_INET, 2734 .udp_table = &udp_table, 2735 .seq_fops = &udp_afinfo_seq_fops, 2736 .seq_ops = { 2737 .show = udp4_seq_show, 2738 }, 2739 }; 2740 2741 static int __net_init udp4_proc_init_net(struct net *net) 2742 { 2743 return udp_proc_register(net, &udp4_seq_afinfo); 2744 } 2745 2746 static void __net_exit udp4_proc_exit_net(struct net *net) 2747 { 2748 udp_proc_unregister(net, &udp4_seq_afinfo); 2749 } 2750 2751 static struct pernet_operations udp4_net_ops = { 2752 .init = udp4_proc_init_net, 2753 .exit = udp4_proc_exit_net, 2754 }; 2755 2756 int __init udp4_proc_init(void) 2757 { 2758 return register_pernet_subsys(&udp4_net_ops); 2759 } 2760 2761 void udp4_proc_exit(void) 2762 { 2763 unregister_pernet_subsys(&udp4_net_ops); 2764 } 2765 #endif /* CONFIG_PROC_FS */ 2766 2767 static __initdata unsigned long uhash_entries; 2768 static int __init set_uhash_entries(char *str) 2769 { 2770 ssize_t ret; 2771 2772 if (!str) 2773 return 0; 2774 2775 ret = kstrtoul(str, 0, &uhash_entries); 2776 if (ret) 2777 return 0; 2778 2779 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) 2780 uhash_entries = UDP_HTABLE_SIZE_MIN; 2781 return 1; 2782 } 2783 __setup("uhash_entries=", set_uhash_entries); 2784 2785 void __init udp_table_init(struct udp_table *table, const char *name) 2786 { 2787 unsigned int i; 2788 2789 table->hash = alloc_large_system_hash(name, 2790 2 * sizeof(struct udp_hslot), 2791 uhash_entries, 2792 21, /* one slot per 2 MB */ 2793 0, 2794 &table->log, 2795 &table->mask, 2796 UDP_HTABLE_SIZE_MIN, 2797 64 * 1024); 2798 2799 table->hash2 = table->hash + (table->mask + 1); 2800 for (i = 0; i <= table->mask; i++) { 2801 INIT_HLIST_HEAD(&table->hash[i].head); 2802 table->hash[i].count = 0; 2803 spin_lock_init(&table->hash[i].lock); 2804 } 2805 for (i = 0; i <= table->mask; i++) { 2806 INIT_HLIST_HEAD(&table->hash2[i].head); 2807 table->hash2[i].count = 0; 2808 spin_lock_init(&table->hash2[i].lock); 2809 } 2810 } 2811 2812 u32 udp_flow_hashrnd(void) 2813 { 2814 static u32 hashrnd __read_mostly; 2815 2816 net_get_random_once(&hashrnd, sizeof(hashrnd)); 2817 2818 return hashrnd; 2819 } 2820 EXPORT_SYMBOL(udp_flow_hashrnd); 2821 2822 void __init udp_init(void) 2823 { 2824 unsigned long limit; 2825 unsigned int i; 2826 2827 udp_table_init(&udp_table, "UDP"); 2828 limit = nr_free_buffer_pages() / 8; 2829 limit = max(limit, 128UL); 2830 sysctl_udp_mem[0] = limit / 4 * 3; 2831 sysctl_udp_mem[1] = limit; 2832 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; 2833 2834 sysctl_udp_rmem_min = SK_MEM_QUANTUM; 2835 sysctl_udp_wmem_min = SK_MEM_QUANTUM; 2836 2837 /* 16 spinlocks per cpu */ 2838 udp_busylocks_log = ilog2(nr_cpu_ids) + 4; 2839 udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, 2840 GFP_KERNEL); 2841 if (!udp_busylocks) 2842 panic("UDP: failed to alloc udp_busylocks\n"); 2843 for (i = 0; i < (1U << udp_busylocks_log); i++) 2844 spin_lock_init(udp_busylocks + i); 2845 } 2846