1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The User Datagram Protocol (UDP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 11 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 12 * Hirokazu Takahashi, <taka@valinux.co.jp> 13 * 14 * Fixes: 15 * Alan Cox : verify_area() calls 16 * Alan Cox : stopped close while in use off icmp 17 * messages. Not a fix but a botch that 18 * for udp at least is 'valid'. 19 * Alan Cox : Fixed icmp handling properly 20 * Alan Cox : Correct error for oversized datagrams 21 * Alan Cox : Tidied select() semantics. 22 * Alan Cox : udp_err() fixed properly, also now 23 * select and read wake correctly on errors 24 * Alan Cox : udp_send verify_area moved to avoid mem leak 25 * Alan Cox : UDP can count its memory 26 * Alan Cox : send to an unknown connection causes 27 * an ECONNREFUSED off the icmp, but 28 * does NOT close. 29 * Alan Cox : Switched to new sk_buff handlers. No more backlog! 30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK 31 * bug no longer crashes it. 32 * Fred Van Kempen : Net2e support for sk->broadcast. 33 * Alan Cox : Uses skb_free_datagram 34 * Alan Cox : Added get/set sockopt support. 35 * Alan Cox : Broadcasting without option set returns EACCES. 36 * Alan Cox : No wakeup calls. Instead we now use the callbacks. 37 * Alan Cox : Use ip_tos and ip_ttl 38 * Alan Cox : SNMP Mibs 39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 40 * Matt Dillon : UDP length checks. 41 * Alan Cox : Smarter af_inet used properly. 42 * Alan Cox : Use new kernel side addressing. 43 * Alan Cox : Incorrect return on truncated datagram receive. 44 * Arnt Gulbrandsen : New udp_send and stuff 45 * Alan Cox : Cache last socket 46 * Alan Cox : Route cache 47 * Jon Peatfield : Minor efficiency fix to sendto(). 48 * Mike Shaver : RFC1122 checks. 49 * Alan Cox : Nonblocking error fix. 50 * Willy Konynenberg : Transparent proxying support. 51 * Mike McLagan : Routing by source 52 * David S. Miller : New socket lookup architecture. 53 * Last socket cache retained as it 54 * does have a high hit rate. 55 * Olaf Kirch : Don't linearise iovec on sendmsg. 56 * Andi Kleen : Some cleanups, cache destination entry 57 * for connect. 58 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 59 * Melvin Smith : Check msg_name not msg_namelen in sendto(), 60 * return ENOTCONN for unconnected sockets (POSIX) 61 * Janos Farkas : don't deliver multi/broadcasts to a different 62 * bound-to-device socket 63 * Hirokazu Takahashi : HW checksumming for outgoing UDP 64 * datagrams. 65 * Hirokazu Takahashi : sendfile() on UDP works now. 66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file 67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 69 * a single port at the same time. 70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 71 * James Chapman : Add L2TP encapsulation type. 72 * 73 * 74 * This program is free software; you can redistribute it and/or 75 * modify it under the terms of the GNU General Public License 76 * as published by the Free Software Foundation; either version 77 * 2 of the License, or (at your option) any later version. 78 */ 79 80 #define pr_fmt(fmt) "UDP: " fmt 81 82 #include <linux/uaccess.h> 83 #include <asm/ioctls.h> 84 #include <linux/bootmem.h> 85 #include <linux/highmem.h> 86 #include <linux/swap.h> 87 #include <linux/types.h> 88 #include <linux/fcntl.h> 89 #include <linux/module.h> 90 #include <linux/socket.h> 91 #include <linux/sockios.h> 92 #include <linux/igmp.h> 93 #include <linux/inetdevice.h> 94 #include <linux/in.h> 95 #include <linux/errno.h> 96 #include <linux/timer.h> 97 #include <linux/mm.h> 98 #include <linux/inet.h> 99 #include <linux/netdevice.h> 100 #include <linux/slab.h> 101 #include <net/tcp_states.h> 102 #include <linux/skbuff.h> 103 #include <linux/proc_fs.h> 104 #include <linux/seq_file.h> 105 #include <net/net_namespace.h> 106 #include <net/icmp.h> 107 #include <net/inet_hashtables.h> 108 #include <net/route.h> 109 #include <net/checksum.h> 110 #include <net/xfrm.h> 111 #include <trace/events/udp.h> 112 #include <linux/static_key.h> 113 #include <trace/events/skb.h> 114 #include <net/busy_poll.h> 115 #include "udp_impl.h" 116 #include <net/sock_reuseport.h> 117 #include <net/addrconf.h> 118 119 struct udp_table udp_table __read_mostly; 120 EXPORT_SYMBOL(udp_table); 121 122 long sysctl_udp_mem[3] __read_mostly; 123 EXPORT_SYMBOL(sysctl_udp_mem); 124 125 int sysctl_udp_rmem_min __read_mostly; 126 EXPORT_SYMBOL(sysctl_udp_rmem_min); 127 128 int sysctl_udp_wmem_min __read_mostly; 129 EXPORT_SYMBOL(sysctl_udp_wmem_min); 130 131 atomic_long_t udp_memory_allocated; 132 EXPORT_SYMBOL(udp_memory_allocated); 133 134 #define MAX_UDP_PORTS 65536 135 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) 136 137 static int udp_lib_lport_inuse(struct net *net, __u16 num, 138 const struct udp_hslot *hslot, 139 unsigned long *bitmap, 140 struct sock *sk, 141 int (*saddr_comp)(const struct sock *sk1, 142 const struct sock *sk2, 143 bool match_wildcard), 144 unsigned int log) 145 { 146 struct sock *sk2; 147 kuid_t uid = sock_i_uid(sk); 148 149 sk_for_each(sk2, &hslot->head) { 150 if (net_eq(sock_net(sk2), net) && 151 sk2 != sk && 152 (bitmap || udp_sk(sk2)->udp_port_hash == num) && 153 (!sk2->sk_reuse || !sk->sk_reuse) && 154 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 155 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 156 (!sk2->sk_reuseport || !sk->sk_reuseport || 157 rcu_access_pointer(sk->sk_reuseport_cb) || 158 !uid_eq(uid, sock_i_uid(sk2))) && 159 saddr_comp(sk, sk2, true)) { 160 if (!bitmap) 161 return 1; 162 __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); 163 } 164 } 165 return 0; 166 } 167 168 /* 169 * Note: we still hold spinlock of primary hash chain, so no other writer 170 * can insert/delete a socket with local_port == num 171 */ 172 static int udp_lib_lport_inuse2(struct net *net, __u16 num, 173 struct udp_hslot *hslot2, 174 struct sock *sk, 175 int (*saddr_comp)(const struct sock *sk1, 176 const struct sock *sk2, 177 bool match_wildcard)) 178 { 179 struct sock *sk2; 180 kuid_t uid = sock_i_uid(sk); 181 int res = 0; 182 183 spin_lock(&hslot2->lock); 184 udp_portaddr_for_each_entry(sk2, &hslot2->head) { 185 if (net_eq(sock_net(sk2), net) && 186 sk2 != sk && 187 (udp_sk(sk2)->udp_port_hash == num) && 188 (!sk2->sk_reuse || !sk->sk_reuse) && 189 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 190 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 191 (!sk2->sk_reuseport || !sk->sk_reuseport || 192 rcu_access_pointer(sk->sk_reuseport_cb) || 193 !uid_eq(uid, sock_i_uid(sk2))) && 194 saddr_comp(sk, sk2, true)) { 195 res = 1; 196 break; 197 } 198 } 199 spin_unlock(&hslot2->lock); 200 return res; 201 } 202 203 static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot, 204 int (*saddr_same)(const struct sock *sk1, 205 const struct sock *sk2, 206 bool match_wildcard)) 207 { 208 struct net *net = sock_net(sk); 209 kuid_t uid = sock_i_uid(sk); 210 struct sock *sk2; 211 212 sk_for_each(sk2, &hslot->head) { 213 if (net_eq(sock_net(sk2), net) && 214 sk2 != sk && 215 sk2->sk_family == sk->sk_family && 216 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 217 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && 218 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 219 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 220 (*saddr_same)(sk, sk2, false)) { 221 return reuseport_add_sock(sk, sk2); 222 } 223 } 224 225 /* Initial allocation may have already happened via setsockopt */ 226 if (!rcu_access_pointer(sk->sk_reuseport_cb)) 227 return reuseport_alloc(sk); 228 return 0; 229 } 230 231 /** 232 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 233 * 234 * @sk: socket struct in question 235 * @snum: port number to look up 236 * @saddr_comp: AF-dependent comparison of bound local IP addresses 237 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, 238 * with NULL address 239 */ 240 int udp_lib_get_port(struct sock *sk, unsigned short snum, 241 int (*saddr_comp)(const struct sock *sk1, 242 const struct sock *sk2, 243 bool match_wildcard), 244 unsigned int hash2_nulladdr) 245 { 246 struct udp_hslot *hslot, *hslot2; 247 struct udp_table *udptable = sk->sk_prot->h.udp_table; 248 int error = 1; 249 struct net *net = sock_net(sk); 250 251 if (!snum) { 252 int low, high, remaining; 253 unsigned int rand; 254 unsigned short first, last; 255 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 256 257 inet_get_local_port_range(net, &low, &high); 258 remaining = (high - low) + 1; 259 260 rand = prandom_u32(); 261 first = reciprocal_scale(rand, remaining) + low; 262 /* 263 * force rand to be an odd multiple of UDP_HTABLE_SIZE 264 */ 265 rand = (rand | 1) * (udptable->mask + 1); 266 last = first + udptable->mask + 1; 267 do { 268 hslot = udp_hashslot(udptable, net, first); 269 bitmap_zero(bitmap, PORTS_PER_CHAIN); 270 spin_lock_bh(&hslot->lock); 271 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, 272 saddr_comp, udptable->log); 273 274 snum = first; 275 /* 276 * Iterate on all possible values of snum for this hash. 277 * Using steps of an odd multiple of UDP_HTABLE_SIZE 278 * give us randomization and full range coverage. 279 */ 280 do { 281 if (low <= snum && snum <= high && 282 !test_bit(snum >> udptable->log, bitmap) && 283 !inet_is_local_reserved_port(net, snum)) 284 goto found; 285 snum += rand; 286 } while (snum != first); 287 spin_unlock_bh(&hslot->lock); 288 } while (++first != last); 289 goto fail; 290 } else { 291 hslot = udp_hashslot(udptable, net, snum); 292 spin_lock_bh(&hslot->lock); 293 if (hslot->count > 10) { 294 int exist; 295 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; 296 297 slot2 &= udptable->mask; 298 hash2_nulladdr &= udptable->mask; 299 300 hslot2 = udp_hashslot2(udptable, slot2); 301 if (hslot->count < hslot2->count) 302 goto scan_primary_hash; 303 304 exist = udp_lib_lport_inuse2(net, snum, hslot2, 305 sk, saddr_comp); 306 if (!exist && (hash2_nulladdr != slot2)) { 307 hslot2 = udp_hashslot2(udptable, hash2_nulladdr); 308 exist = udp_lib_lport_inuse2(net, snum, hslot2, 309 sk, saddr_comp); 310 } 311 if (exist) 312 goto fail_unlock; 313 else 314 goto found; 315 } 316 scan_primary_hash: 317 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 318 saddr_comp, 0)) 319 goto fail_unlock; 320 } 321 found: 322 inet_sk(sk)->inet_num = snum; 323 udp_sk(sk)->udp_port_hash = snum; 324 udp_sk(sk)->udp_portaddr_hash ^= snum; 325 if (sk_unhashed(sk)) { 326 if (sk->sk_reuseport && 327 udp_reuseport_add_sock(sk, hslot, saddr_comp)) { 328 inet_sk(sk)->inet_num = 0; 329 udp_sk(sk)->udp_port_hash = 0; 330 udp_sk(sk)->udp_portaddr_hash ^= snum; 331 goto fail_unlock; 332 } 333 334 sk_add_node_rcu(sk, &hslot->head); 335 hslot->count++; 336 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 337 338 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 339 spin_lock(&hslot2->lock); 340 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 341 sk->sk_family == AF_INET6) 342 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 343 &hslot2->head); 344 else 345 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 346 &hslot2->head); 347 hslot2->count++; 348 spin_unlock(&hslot2->lock); 349 } 350 sock_set_flag(sk, SOCK_RCU_FREE); 351 error = 0; 352 fail_unlock: 353 spin_unlock_bh(&hslot->lock); 354 fail: 355 return error; 356 } 357 EXPORT_SYMBOL(udp_lib_get_port); 358 359 /* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 360 * match_wildcard == false: addresses must be exactly the same, i.e. 361 * 0.0.0.0 only equals to 0.0.0.0 362 */ 363 int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2, 364 bool match_wildcard) 365 { 366 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 367 368 if (!ipv6_only_sock(sk2)) { 369 if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr) 370 return 1; 371 if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr) 372 return match_wildcard; 373 } 374 return 0; 375 } 376 377 static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, 378 unsigned int port) 379 { 380 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; 381 } 382 383 int udp_v4_get_port(struct sock *sk, unsigned short snum) 384 { 385 unsigned int hash2_nulladdr = 386 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); 387 unsigned int hash2_partial = 388 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 389 390 /* precompute partial secondary hash */ 391 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 392 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); 393 } 394 395 static int compute_score(struct sock *sk, struct net *net, 396 __be32 saddr, __be16 sport, 397 __be32 daddr, unsigned short hnum, int dif) 398 { 399 int score; 400 struct inet_sock *inet; 401 402 if (!net_eq(sock_net(sk), net) || 403 udp_sk(sk)->udp_port_hash != hnum || 404 ipv6_only_sock(sk)) 405 return -1; 406 407 score = (sk->sk_family == PF_INET) ? 2 : 1; 408 inet = inet_sk(sk); 409 410 if (inet->inet_rcv_saddr) { 411 if (inet->inet_rcv_saddr != daddr) 412 return -1; 413 score += 4; 414 } 415 416 if (inet->inet_daddr) { 417 if (inet->inet_daddr != saddr) 418 return -1; 419 score += 4; 420 } 421 422 if (inet->inet_dport) { 423 if (inet->inet_dport != sport) 424 return -1; 425 score += 4; 426 } 427 428 if (sk->sk_bound_dev_if) { 429 if (sk->sk_bound_dev_if != dif) 430 return -1; 431 score += 4; 432 } 433 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 434 score++; 435 return score; 436 } 437 438 static u32 udp_ehashfn(const struct net *net, const __be32 laddr, 439 const __u16 lport, const __be32 faddr, 440 const __be16 fport) 441 { 442 static u32 udp_ehash_secret __read_mostly; 443 444 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); 445 446 return __inet_ehashfn(laddr, lport, faddr, fport, 447 udp_ehash_secret + net_hash_mix(net)); 448 } 449 450 /* called with rcu_read_lock() */ 451 static struct sock *udp4_lib_lookup2(struct net *net, 452 __be32 saddr, __be16 sport, 453 __be32 daddr, unsigned int hnum, int dif, 454 struct udp_hslot *hslot2, 455 struct sk_buff *skb) 456 { 457 struct sock *sk, *result; 458 int score, badness, matches = 0, reuseport = 0; 459 u32 hash = 0; 460 461 result = NULL; 462 badness = 0; 463 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 464 score = compute_score(sk, net, saddr, sport, 465 daddr, hnum, dif); 466 if (score > badness) { 467 reuseport = sk->sk_reuseport; 468 if (reuseport) { 469 hash = udp_ehashfn(net, daddr, hnum, 470 saddr, sport); 471 result = reuseport_select_sock(sk, hash, skb, 472 sizeof(struct udphdr)); 473 if (result) 474 return result; 475 matches = 1; 476 } 477 badness = score; 478 result = sk; 479 } else if (score == badness && reuseport) { 480 matches++; 481 if (reciprocal_scale(hash, matches) == 0) 482 result = sk; 483 hash = next_pseudo_random32(hash); 484 } 485 } 486 return result; 487 } 488 489 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 490 * harder than this. -DaveM 491 */ 492 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, 493 __be16 sport, __be32 daddr, __be16 dport, 494 int dif, struct udp_table *udptable, struct sk_buff *skb) 495 { 496 struct sock *sk, *result; 497 unsigned short hnum = ntohs(dport); 498 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); 499 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; 500 int score, badness, matches = 0, reuseport = 0; 501 u32 hash = 0; 502 503 if (hslot->count > 10) { 504 hash2 = udp4_portaddr_hash(net, daddr, hnum); 505 slot2 = hash2 & udptable->mask; 506 hslot2 = &udptable->hash2[slot2]; 507 if (hslot->count < hslot2->count) 508 goto begin; 509 510 result = udp4_lib_lookup2(net, saddr, sport, 511 daddr, hnum, dif, 512 hslot2, skb); 513 if (!result) { 514 unsigned int old_slot2 = slot2; 515 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 516 slot2 = hash2 & udptable->mask; 517 /* avoid searching the same slot again. */ 518 if (unlikely(slot2 == old_slot2)) 519 return result; 520 521 hslot2 = &udptable->hash2[slot2]; 522 if (hslot->count < hslot2->count) 523 goto begin; 524 525 result = udp4_lib_lookup2(net, saddr, sport, 526 daddr, hnum, dif, 527 hslot2, skb); 528 } 529 return result; 530 } 531 begin: 532 result = NULL; 533 badness = 0; 534 sk_for_each_rcu(sk, &hslot->head) { 535 score = compute_score(sk, net, saddr, sport, 536 daddr, hnum, dif); 537 if (score > badness) { 538 reuseport = sk->sk_reuseport; 539 if (reuseport) { 540 hash = udp_ehashfn(net, daddr, hnum, 541 saddr, sport); 542 result = reuseport_select_sock(sk, hash, skb, 543 sizeof(struct udphdr)); 544 if (result) 545 return result; 546 matches = 1; 547 } 548 result = sk; 549 badness = score; 550 } else if (score == badness && reuseport) { 551 matches++; 552 if (reciprocal_scale(hash, matches) == 0) 553 result = sk; 554 hash = next_pseudo_random32(hash); 555 } 556 } 557 return result; 558 } 559 EXPORT_SYMBOL_GPL(__udp4_lib_lookup); 560 561 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, 562 __be16 sport, __be16 dport, 563 struct udp_table *udptable) 564 { 565 const struct iphdr *iph = ip_hdr(skb); 566 567 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, 568 iph->daddr, dport, inet_iif(skb), 569 udptable, skb); 570 } 571 572 struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, 573 __be16 sport, __be16 dport) 574 { 575 return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); 576 } 577 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); 578 579 /* Must be called under rcu_read_lock(). 580 * Does increment socket refcount. 581 */ 582 #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ 583 IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ 584 IS_ENABLED(CONFIG_NF_SOCKET_IPV4) 585 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 586 __be32 daddr, __be16 dport, int dif) 587 { 588 struct sock *sk; 589 590 sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, 591 dif, &udp_table, NULL); 592 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) 593 sk = NULL; 594 return sk; 595 } 596 EXPORT_SYMBOL_GPL(udp4_lib_lookup); 597 #endif 598 599 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, 600 __be16 loc_port, __be32 loc_addr, 601 __be16 rmt_port, __be32 rmt_addr, 602 int dif, unsigned short hnum) 603 { 604 struct inet_sock *inet = inet_sk(sk); 605 606 if (!net_eq(sock_net(sk), net) || 607 udp_sk(sk)->udp_port_hash != hnum || 608 (inet->inet_daddr && inet->inet_daddr != rmt_addr) || 609 (inet->inet_dport != rmt_port && inet->inet_dport) || 610 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 611 ipv6_only_sock(sk) || 612 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 613 return false; 614 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) 615 return false; 616 return true; 617 } 618 619 /* 620 * This routine is called by the ICMP module when it gets some 621 * sort of error condition. If err < 0 then the socket should 622 * be closed and the error returned to the user. If err > 0 623 * it's just the icmp type << 8 | icmp code. 624 * Header points to the ip header of the error packet. We move 625 * on past this. Then (as it used to claim before adjustment) 626 * header points to the first 8 bytes of the udp header. We need 627 * to find the appropriate port. 628 */ 629 630 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 631 { 632 struct inet_sock *inet; 633 const struct iphdr *iph = (const struct iphdr *)skb->data; 634 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 635 const int type = icmp_hdr(skb)->type; 636 const int code = icmp_hdr(skb)->code; 637 struct sock *sk; 638 int harderr; 639 int err; 640 struct net *net = dev_net(skb->dev); 641 642 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, 643 iph->saddr, uh->source, skb->dev->ifindex, udptable, 644 NULL); 645 if (!sk) { 646 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 647 return; /* No socket for error */ 648 } 649 650 err = 0; 651 harderr = 0; 652 inet = inet_sk(sk); 653 654 switch (type) { 655 default: 656 case ICMP_TIME_EXCEEDED: 657 err = EHOSTUNREACH; 658 break; 659 case ICMP_SOURCE_QUENCH: 660 goto out; 661 case ICMP_PARAMETERPROB: 662 err = EPROTO; 663 harderr = 1; 664 break; 665 case ICMP_DEST_UNREACH: 666 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 667 ipv4_sk_update_pmtu(skb, sk, info); 668 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 669 err = EMSGSIZE; 670 harderr = 1; 671 break; 672 } 673 goto out; 674 } 675 err = EHOSTUNREACH; 676 if (code <= NR_ICMP_UNREACH) { 677 harderr = icmp_err_convert[code].fatal; 678 err = icmp_err_convert[code].errno; 679 } 680 break; 681 case ICMP_REDIRECT: 682 ipv4_sk_redirect(skb, sk); 683 goto out; 684 } 685 686 /* 687 * RFC1122: OK. Passes ICMP errors back to application, as per 688 * 4.1.3.3. 689 */ 690 if (!inet->recverr) { 691 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 692 goto out; 693 } else 694 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 695 696 sk->sk_err = err; 697 sk->sk_error_report(sk); 698 out: 699 return; 700 } 701 702 void udp_err(struct sk_buff *skb, u32 info) 703 { 704 __udp4_lib_err(skb, info, &udp_table); 705 } 706 707 /* 708 * Throw away all pending data and cancel the corking. Socket is locked. 709 */ 710 void udp_flush_pending_frames(struct sock *sk) 711 { 712 struct udp_sock *up = udp_sk(sk); 713 714 if (up->pending) { 715 up->len = 0; 716 up->pending = 0; 717 ip_flush_pending_frames(sk); 718 } 719 } 720 EXPORT_SYMBOL(udp_flush_pending_frames); 721 722 /** 723 * udp4_hwcsum - handle outgoing HW checksumming 724 * @skb: sk_buff containing the filled-in UDP header 725 * (checksum field must be zeroed out) 726 * @src: source IP address 727 * @dst: destination IP address 728 */ 729 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 730 { 731 struct udphdr *uh = udp_hdr(skb); 732 int offset = skb_transport_offset(skb); 733 int len = skb->len - offset; 734 int hlen = len; 735 __wsum csum = 0; 736 737 if (!skb_has_frag_list(skb)) { 738 /* 739 * Only one fragment on the socket. 740 */ 741 skb->csum_start = skb_transport_header(skb) - skb->head; 742 skb->csum_offset = offsetof(struct udphdr, check); 743 uh->check = ~csum_tcpudp_magic(src, dst, len, 744 IPPROTO_UDP, 0); 745 } else { 746 struct sk_buff *frags; 747 748 /* 749 * HW-checksum won't work as there are two or more 750 * fragments on the socket so that all csums of sk_buffs 751 * should be together 752 */ 753 skb_walk_frags(skb, frags) { 754 csum = csum_add(csum, frags->csum); 755 hlen -= frags->len; 756 } 757 758 csum = skb_checksum(skb, offset, hlen, csum); 759 skb->ip_summed = CHECKSUM_NONE; 760 761 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 762 if (uh->check == 0) 763 uh->check = CSUM_MANGLED_0; 764 } 765 } 766 EXPORT_SYMBOL_GPL(udp4_hwcsum); 767 768 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended 769 * for the simple case like when setting the checksum for a UDP tunnel. 770 */ 771 void udp_set_csum(bool nocheck, struct sk_buff *skb, 772 __be32 saddr, __be32 daddr, int len) 773 { 774 struct udphdr *uh = udp_hdr(skb); 775 776 if (nocheck) { 777 uh->check = 0; 778 } else if (skb_is_gso(skb)) { 779 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 780 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 781 uh->check = 0; 782 uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); 783 if (uh->check == 0) 784 uh->check = CSUM_MANGLED_0; 785 } else { 786 skb->ip_summed = CHECKSUM_PARTIAL; 787 skb->csum_start = skb_transport_header(skb) - skb->head; 788 skb->csum_offset = offsetof(struct udphdr, check); 789 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 790 } 791 } 792 EXPORT_SYMBOL(udp_set_csum); 793 794 static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) 795 { 796 struct sock *sk = skb->sk; 797 struct inet_sock *inet = inet_sk(sk); 798 struct udphdr *uh; 799 int err = 0; 800 int is_udplite = IS_UDPLITE(sk); 801 int offset = skb_transport_offset(skb); 802 int len = skb->len - offset; 803 __wsum csum = 0; 804 805 /* 806 * Create a UDP header 807 */ 808 uh = udp_hdr(skb); 809 uh->source = inet->inet_sport; 810 uh->dest = fl4->fl4_dport; 811 uh->len = htons(len); 812 uh->check = 0; 813 814 if (is_udplite) /* UDP-Lite */ 815 csum = udplite_csum(skb); 816 817 else if (sk->sk_no_check_tx) { /* UDP csum disabled */ 818 819 skb->ip_summed = CHECKSUM_NONE; 820 goto send; 821 822 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 823 824 udp4_hwcsum(skb, fl4->saddr, fl4->daddr); 825 goto send; 826 827 } else 828 csum = udp_csum(skb); 829 830 /* add protocol-dependent pseudo-header */ 831 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, 832 sk->sk_protocol, csum); 833 if (uh->check == 0) 834 uh->check = CSUM_MANGLED_0; 835 836 send: 837 err = ip_send_skb(sock_net(sk), skb); 838 if (err) { 839 if (err == -ENOBUFS && !inet->recverr) { 840 UDP_INC_STATS(sock_net(sk), 841 UDP_MIB_SNDBUFERRORS, is_udplite); 842 err = 0; 843 } 844 } else 845 UDP_INC_STATS(sock_net(sk), 846 UDP_MIB_OUTDATAGRAMS, is_udplite); 847 return err; 848 } 849 850 /* 851 * Push out all pending data as one UDP datagram. Socket is locked. 852 */ 853 int udp_push_pending_frames(struct sock *sk) 854 { 855 struct udp_sock *up = udp_sk(sk); 856 struct inet_sock *inet = inet_sk(sk); 857 struct flowi4 *fl4 = &inet->cork.fl.u.ip4; 858 struct sk_buff *skb; 859 int err = 0; 860 861 skb = ip_finish_skb(sk, fl4); 862 if (!skb) 863 goto out; 864 865 err = udp_send_skb(skb, fl4); 866 867 out: 868 up->len = 0; 869 up->pending = 0; 870 return err; 871 } 872 EXPORT_SYMBOL(udp_push_pending_frames); 873 874 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 875 { 876 struct inet_sock *inet = inet_sk(sk); 877 struct udp_sock *up = udp_sk(sk); 878 struct flowi4 fl4_stack; 879 struct flowi4 *fl4; 880 int ulen = len; 881 struct ipcm_cookie ipc; 882 struct rtable *rt = NULL; 883 int free = 0; 884 int connected = 0; 885 __be32 daddr, faddr, saddr; 886 __be16 dport; 887 u8 tos; 888 int err, is_udplite = IS_UDPLITE(sk); 889 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 890 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 891 struct sk_buff *skb; 892 struct ip_options_data opt_copy; 893 894 if (len > 0xFFFF) 895 return -EMSGSIZE; 896 897 /* 898 * Check the flags. 899 */ 900 901 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ 902 return -EOPNOTSUPP; 903 904 ipc.opt = NULL; 905 ipc.tx_flags = 0; 906 ipc.ttl = 0; 907 ipc.tos = -1; 908 909 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 910 911 fl4 = &inet->cork.fl.u.ip4; 912 if (up->pending) { 913 /* 914 * There are pending frames. 915 * The socket lock must be held while it's corked. 916 */ 917 lock_sock(sk); 918 if (likely(up->pending)) { 919 if (unlikely(up->pending != AF_INET)) { 920 release_sock(sk); 921 return -EINVAL; 922 } 923 goto do_append_data; 924 } 925 release_sock(sk); 926 } 927 ulen += sizeof(struct udphdr); 928 929 /* 930 * Get and verify the address. 931 */ 932 if (msg->msg_name) { 933 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 934 if (msg->msg_namelen < sizeof(*usin)) 935 return -EINVAL; 936 if (usin->sin_family != AF_INET) { 937 if (usin->sin_family != AF_UNSPEC) 938 return -EAFNOSUPPORT; 939 } 940 941 daddr = usin->sin_addr.s_addr; 942 dport = usin->sin_port; 943 if (dport == 0) 944 return -EINVAL; 945 } else { 946 if (sk->sk_state != TCP_ESTABLISHED) 947 return -EDESTADDRREQ; 948 daddr = inet->inet_daddr; 949 dport = inet->inet_dport; 950 /* Open fast path for connected socket. 951 Route will not be used, if at least one option is set. 952 */ 953 connected = 1; 954 } 955 956 ipc.sockc.tsflags = sk->sk_tsflags; 957 ipc.addr = inet->inet_saddr; 958 ipc.oif = sk->sk_bound_dev_if; 959 960 if (msg->msg_controllen) { 961 err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); 962 if (unlikely(err)) { 963 kfree(ipc.opt); 964 return err; 965 } 966 if (ipc.opt) 967 free = 1; 968 connected = 0; 969 } 970 if (!ipc.opt) { 971 struct ip_options_rcu *inet_opt; 972 973 rcu_read_lock(); 974 inet_opt = rcu_dereference(inet->inet_opt); 975 if (inet_opt) { 976 memcpy(&opt_copy, inet_opt, 977 sizeof(*inet_opt) + inet_opt->opt.optlen); 978 ipc.opt = &opt_copy.opt; 979 } 980 rcu_read_unlock(); 981 } 982 983 saddr = ipc.addr; 984 ipc.addr = faddr = daddr; 985 986 sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); 987 988 if (ipc.opt && ipc.opt->opt.srr) { 989 if (!daddr) 990 return -EINVAL; 991 faddr = ipc.opt->opt.faddr; 992 connected = 0; 993 } 994 tos = get_rttos(&ipc, inet); 995 if (sock_flag(sk, SOCK_LOCALROUTE) || 996 (msg->msg_flags & MSG_DONTROUTE) || 997 (ipc.opt && ipc.opt->opt.is_strictroute)) { 998 tos |= RTO_ONLINK; 999 connected = 0; 1000 } 1001 1002 if (ipv4_is_multicast(daddr)) { 1003 if (!ipc.oif) 1004 ipc.oif = inet->mc_index; 1005 if (!saddr) 1006 saddr = inet->mc_addr; 1007 connected = 0; 1008 } else if (!ipc.oif) 1009 ipc.oif = inet->uc_index; 1010 1011 if (connected) 1012 rt = (struct rtable *)sk_dst_check(sk, 0); 1013 1014 if (!rt) { 1015 struct net *net = sock_net(sk); 1016 __u8 flow_flags = inet_sk_flowi_flags(sk); 1017 1018 fl4 = &fl4_stack; 1019 1020 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, 1021 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1022 flow_flags, 1023 faddr, saddr, dport, inet->inet_sport, 1024 sk->sk_uid); 1025 1026 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 1027 rt = ip_route_output_flow(net, fl4, sk); 1028 if (IS_ERR(rt)) { 1029 err = PTR_ERR(rt); 1030 rt = NULL; 1031 if (err == -ENETUNREACH) 1032 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 1033 goto out; 1034 } 1035 1036 err = -EACCES; 1037 if ((rt->rt_flags & RTCF_BROADCAST) && 1038 !sock_flag(sk, SOCK_BROADCAST)) 1039 goto out; 1040 if (connected) 1041 sk_dst_set(sk, dst_clone(&rt->dst)); 1042 } 1043 1044 if (msg->msg_flags&MSG_CONFIRM) 1045 goto do_confirm; 1046 back_from_confirm: 1047 1048 saddr = fl4->saddr; 1049 if (!ipc.addr) 1050 daddr = ipc.addr = fl4->daddr; 1051 1052 /* Lockless fast path for the non-corking case. */ 1053 if (!corkreq) { 1054 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, 1055 sizeof(struct udphdr), &ipc, &rt, 1056 msg->msg_flags); 1057 err = PTR_ERR(skb); 1058 if (!IS_ERR_OR_NULL(skb)) 1059 err = udp_send_skb(skb, fl4); 1060 goto out; 1061 } 1062 1063 lock_sock(sk); 1064 if (unlikely(up->pending)) { 1065 /* The socket is already corked while preparing it. */ 1066 /* ... which is an evident application bug. --ANK */ 1067 release_sock(sk); 1068 1069 net_dbg_ratelimited("cork app bug 2\n"); 1070 err = -EINVAL; 1071 goto out; 1072 } 1073 /* 1074 * Now cork the socket to pend data. 1075 */ 1076 fl4 = &inet->cork.fl.u.ip4; 1077 fl4->daddr = daddr; 1078 fl4->saddr = saddr; 1079 fl4->fl4_dport = dport; 1080 fl4->fl4_sport = inet->inet_sport; 1081 up->pending = AF_INET; 1082 1083 do_append_data: 1084 up->len += ulen; 1085 err = ip_append_data(sk, fl4, getfrag, msg, ulen, 1086 sizeof(struct udphdr), &ipc, &rt, 1087 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1088 if (err) 1089 udp_flush_pending_frames(sk); 1090 else if (!corkreq) 1091 err = udp_push_pending_frames(sk); 1092 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1093 up->pending = 0; 1094 release_sock(sk); 1095 1096 out: 1097 ip_rt_put(rt); 1098 if (free) 1099 kfree(ipc.opt); 1100 if (!err) 1101 return len; 1102 /* 1103 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1104 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1105 * we don't have a good statistic (IpOutDiscards but it can be too many 1106 * things). We could add another new stat but at least for now that 1107 * seems like overkill. 1108 */ 1109 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1110 UDP_INC_STATS(sock_net(sk), 1111 UDP_MIB_SNDBUFERRORS, is_udplite); 1112 } 1113 return err; 1114 1115 do_confirm: 1116 dst_confirm(&rt->dst); 1117 if (!(msg->msg_flags&MSG_PROBE) || len) 1118 goto back_from_confirm; 1119 err = 0; 1120 goto out; 1121 } 1122 EXPORT_SYMBOL(udp_sendmsg); 1123 1124 int udp_sendpage(struct sock *sk, struct page *page, int offset, 1125 size_t size, int flags) 1126 { 1127 struct inet_sock *inet = inet_sk(sk); 1128 struct udp_sock *up = udp_sk(sk); 1129 int ret; 1130 1131 if (flags & MSG_SENDPAGE_NOTLAST) 1132 flags |= MSG_MORE; 1133 1134 if (!up->pending) { 1135 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 1136 1137 /* Call udp_sendmsg to specify destination address which 1138 * sendpage interface can't pass. 1139 * This will succeed only when the socket is connected. 1140 */ 1141 ret = udp_sendmsg(sk, &msg, 0); 1142 if (ret < 0) 1143 return ret; 1144 } 1145 1146 lock_sock(sk); 1147 1148 if (unlikely(!up->pending)) { 1149 release_sock(sk); 1150 1151 net_dbg_ratelimited("udp cork app bug 3\n"); 1152 return -EINVAL; 1153 } 1154 1155 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, 1156 page, offset, size, flags); 1157 if (ret == -EOPNOTSUPP) { 1158 release_sock(sk); 1159 return sock_no_sendpage(sk->sk_socket, page, offset, 1160 size, flags); 1161 } 1162 if (ret < 0) { 1163 udp_flush_pending_frames(sk); 1164 goto out; 1165 } 1166 1167 up->len += size; 1168 if (!(up->corkflag || (flags&MSG_MORE))) 1169 ret = udp_push_pending_frames(sk); 1170 if (!ret) 1171 ret = size; 1172 out: 1173 release_sock(sk); 1174 return ret; 1175 } 1176 1177 /* fully reclaim rmem/fwd memory allocated for skb */ 1178 static void udp_rmem_release(struct sock *sk, int size, int partial) 1179 { 1180 struct udp_sock *up = udp_sk(sk); 1181 int amt; 1182 1183 if (likely(partial)) { 1184 up->forward_deficit += size; 1185 size = up->forward_deficit; 1186 if (size < (sk->sk_rcvbuf >> 2) && 1187 !skb_queue_empty(&sk->sk_receive_queue)) 1188 return; 1189 } else { 1190 size += up->forward_deficit; 1191 } 1192 up->forward_deficit = 0; 1193 1194 sk->sk_forward_alloc += size; 1195 amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); 1196 sk->sk_forward_alloc -= amt; 1197 1198 if (amt) 1199 __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); 1200 1201 atomic_sub(size, &sk->sk_rmem_alloc); 1202 } 1203 1204 /* Note: called with sk_receive_queue.lock held. 1205 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch 1206 * This avoids a cache line miss while receive_queue lock is held. 1207 * Look at __udp_enqueue_schedule_skb() to find where this copy is done. 1208 */ 1209 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) 1210 { 1211 udp_rmem_release(sk, skb->dev_scratch, 1); 1212 } 1213 EXPORT_SYMBOL(udp_skb_destructor); 1214 1215 /* Idea of busylocks is to let producers grab an extra spinlock 1216 * to relieve pressure on the receive_queue spinlock shared by consumer. 1217 * Under flood, this means that only one producer can be in line 1218 * trying to acquire the receive_queue spinlock. 1219 * These busylock can be allocated on a per cpu manner, instead of a 1220 * per socket one (that would consume a cache line per socket) 1221 */ 1222 static int udp_busylocks_log __read_mostly; 1223 static spinlock_t *udp_busylocks __read_mostly; 1224 1225 static spinlock_t *busylock_acquire(void *ptr) 1226 { 1227 spinlock_t *busy; 1228 1229 busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); 1230 spin_lock(busy); 1231 return busy; 1232 } 1233 1234 static void busylock_release(spinlock_t *busy) 1235 { 1236 if (busy) 1237 spin_unlock(busy); 1238 } 1239 1240 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) 1241 { 1242 struct sk_buff_head *list = &sk->sk_receive_queue; 1243 int rmem, delta, amt, err = -ENOMEM; 1244 spinlock_t *busy = NULL; 1245 int size; 1246 1247 /* try to avoid the costly atomic add/sub pair when the receive 1248 * queue is full; always allow at least a packet 1249 */ 1250 rmem = atomic_read(&sk->sk_rmem_alloc); 1251 if (rmem > sk->sk_rcvbuf) 1252 goto drop; 1253 1254 /* Under mem pressure, it might be helpful to help udp_recvmsg() 1255 * having linear skbs : 1256 * - Reduce memory overhead and thus increase receive queue capacity 1257 * - Less cache line misses at copyout() time 1258 * - Less work at consume_skb() (less alien page frag freeing) 1259 */ 1260 if (rmem > (sk->sk_rcvbuf >> 1)) { 1261 skb_condense(skb); 1262 1263 busy = busylock_acquire(sk); 1264 } 1265 size = skb->truesize; 1266 /* Copy skb->truesize into skb->dev_scratch to avoid a cache line miss 1267 * in udp_skb_destructor() 1268 */ 1269 skb->dev_scratch = size; 1270 1271 /* we drop only if the receive buf is full and the receive 1272 * queue contains some other skb 1273 */ 1274 rmem = atomic_add_return(size, &sk->sk_rmem_alloc); 1275 if (rmem > (size + sk->sk_rcvbuf)) 1276 goto uncharge_drop; 1277 1278 spin_lock(&list->lock); 1279 if (size >= sk->sk_forward_alloc) { 1280 amt = sk_mem_pages(size); 1281 delta = amt << SK_MEM_QUANTUM_SHIFT; 1282 if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { 1283 err = -ENOBUFS; 1284 spin_unlock(&list->lock); 1285 goto uncharge_drop; 1286 } 1287 1288 sk->sk_forward_alloc += delta; 1289 } 1290 1291 sk->sk_forward_alloc -= size; 1292 1293 /* no need to setup a destructor, we will explicitly release the 1294 * forward allocated memory on dequeue 1295 */ 1296 sock_skb_set_dropcount(sk, skb); 1297 1298 __skb_queue_tail(list, skb); 1299 spin_unlock(&list->lock); 1300 1301 if (!sock_flag(sk, SOCK_DEAD)) 1302 sk->sk_data_ready(sk); 1303 1304 busylock_release(busy); 1305 return 0; 1306 1307 uncharge_drop: 1308 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1309 1310 drop: 1311 atomic_inc(&sk->sk_drops); 1312 busylock_release(busy); 1313 return err; 1314 } 1315 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); 1316 1317 void udp_destruct_sock(struct sock *sk) 1318 { 1319 /* reclaim completely the forward allocated memory */ 1320 unsigned int total = 0; 1321 struct sk_buff *skb; 1322 1323 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 1324 total += skb->truesize; 1325 kfree_skb(skb); 1326 } 1327 udp_rmem_release(sk, total, 0); 1328 1329 inet_sock_destruct(sk); 1330 } 1331 EXPORT_SYMBOL_GPL(udp_destruct_sock); 1332 1333 int udp_init_sock(struct sock *sk) 1334 { 1335 sk->sk_destruct = udp_destruct_sock; 1336 return 0; 1337 } 1338 EXPORT_SYMBOL_GPL(udp_init_sock); 1339 1340 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) 1341 { 1342 if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { 1343 bool slow = lock_sock_fast(sk); 1344 1345 sk_peek_offset_bwd(sk, len); 1346 unlock_sock_fast(sk, slow); 1347 } 1348 consume_skb(skb); 1349 } 1350 EXPORT_SYMBOL_GPL(skb_consume_udp); 1351 1352 /** 1353 * first_packet_length - return length of first packet in receive queue 1354 * @sk: socket 1355 * 1356 * Drops all bad checksum frames, until a valid one is found. 1357 * Returns the length of found skb, or -1 if none is found. 1358 */ 1359 static int first_packet_length(struct sock *sk) 1360 { 1361 struct sk_buff_head *rcvq = &sk->sk_receive_queue; 1362 struct sk_buff *skb; 1363 int total = 0; 1364 int res; 1365 1366 spin_lock_bh(&rcvq->lock); 1367 while ((skb = skb_peek(rcvq)) != NULL && 1368 udp_lib_checksum_complete(skb)) { 1369 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, 1370 IS_UDPLITE(sk)); 1371 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 1372 IS_UDPLITE(sk)); 1373 atomic_inc(&sk->sk_drops); 1374 __skb_unlink(skb, rcvq); 1375 total += skb->truesize; 1376 kfree_skb(skb); 1377 } 1378 res = skb ? skb->len : -1; 1379 if (total) 1380 udp_rmem_release(sk, total, 1); 1381 spin_unlock_bh(&rcvq->lock); 1382 return res; 1383 } 1384 1385 /* 1386 * IOCTL requests applicable to the UDP protocol 1387 */ 1388 1389 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 1390 { 1391 switch (cmd) { 1392 case SIOCOUTQ: 1393 { 1394 int amount = sk_wmem_alloc_get(sk); 1395 1396 return put_user(amount, (int __user *)arg); 1397 } 1398 1399 case SIOCINQ: 1400 { 1401 int amount = max_t(int, 0, first_packet_length(sk)); 1402 1403 return put_user(amount, (int __user *)arg); 1404 } 1405 1406 default: 1407 return -ENOIOCTLCMD; 1408 } 1409 1410 return 0; 1411 } 1412 EXPORT_SYMBOL(udp_ioctl); 1413 1414 /* 1415 * This should be easy, if there is something there we 1416 * return it, otherwise we block. 1417 */ 1418 1419 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 1420 int flags, int *addr_len) 1421 { 1422 struct inet_sock *inet = inet_sk(sk); 1423 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 1424 struct sk_buff *skb; 1425 unsigned int ulen, copied; 1426 int peeked, peeking, off; 1427 int err; 1428 int is_udplite = IS_UDPLITE(sk); 1429 bool checksum_valid = false; 1430 1431 if (flags & MSG_ERRQUEUE) 1432 return ip_recv_error(sk, msg, len, addr_len); 1433 1434 try_again: 1435 peeking = off = sk_peek_offset(sk, flags); 1436 skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); 1437 if (!skb) 1438 return err; 1439 1440 ulen = skb->len; 1441 copied = len; 1442 if (copied > ulen - off) 1443 copied = ulen - off; 1444 else if (copied < ulen) 1445 msg->msg_flags |= MSG_TRUNC; 1446 1447 /* 1448 * If checksum is needed at all, try to do it while copying the 1449 * data. If the data is truncated, or if we only want a partial 1450 * coverage checksum (UDP-Lite), do it before the copy. 1451 */ 1452 1453 if (copied < ulen || peeking || 1454 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 1455 checksum_valid = !udp_lib_checksum_complete(skb); 1456 if (!checksum_valid) 1457 goto csum_copy_err; 1458 } 1459 1460 if (checksum_valid || skb_csum_unnecessary(skb)) 1461 err = skb_copy_datagram_msg(skb, off, msg, copied); 1462 else { 1463 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 1464 1465 if (err == -EINVAL) 1466 goto csum_copy_err; 1467 } 1468 1469 if (unlikely(err)) { 1470 if (!peeked) { 1471 atomic_inc(&sk->sk_drops); 1472 UDP_INC_STATS(sock_net(sk), 1473 UDP_MIB_INERRORS, is_udplite); 1474 } 1475 kfree_skb(skb); 1476 return err; 1477 } 1478 1479 if (!peeked) 1480 UDP_INC_STATS(sock_net(sk), 1481 UDP_MIB_INDATAGRAMS, is_udplite); 1482 1483 sock_recv_ts_and_drops(msg, sk, skb); 1484 1485 /* Copy the address. */ 1486 if (sin) { 1487 sin->sin_family = AF_INET; 1488 sin->sin_port = udp_hdr(skb)->source; 1489 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 1490 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 1491 *addr_len = sizeof(*sin); 1492 } 1493 if (inet->cmsg_flags) 1494 ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); 1495 1496 err = copied; 1497 if (flags & MSG_TRUNC) 1498 err = ulen; 1499 1500 skb_consume_udp(sk, skb, peeking ? -err : err); 1501 return err; 1502 1503 csum_copy_err: 1504 if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) { 1505 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1506 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1507 } 1508 kfree_skb(skb); 1509 1510 /* starting over for a new packet, but check if we need to yield */ 1511 cond_resched(); 1512 msg->msg_flags &= ~MSG_TRUNC; 1513 goto try_again; 1514 } 1515 1516 int __udp_disconnect(struct sock *sk, int flags) 1517 { 1518 struct inet_sock *inet = inet_sk(sk); 1519 /* 1520 * 1003.1g - break association. 1521 */ 1522 1523 sk->sk_state = TCP_CLOSE; 1524 inet->inet_daddr = 0; 1525 inet->inet_dport = 0; 1526 sock_rps_reset_rxhash(sk); 1527 sk->sk_bound_dev_if = 0; 1528 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1529 inet_reset_saddr(sk); 1530 1531 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 1532 sk->sk_prot->unhash(sk); 1533 inet->inet_sport = 0; 1534 } 1535 sk_dst_reset(sk); 1536 return 0; 1537 } 1538 EXPORT_SYMBOL(__udp_disconnect); 1539 1540 int udp_disconnect(struct sock *sk, int flags) 1541 { 1542 lock_sock(sk); 1543 __udp_disconnect(sk, flags); 1544 release_sock(sk); 1545 return 0; 1546 } 1547 EXPORT_SYMBOL(udp_disconnect); 1548 1549 void udp_lib_unhash(struct sock *sk) 1550 { 1551 if (sk_hashed(sk)) { 1552 struct udp_table *udptable = sk->sk_prot->h.udp_table; 1553 struct udp_hslot *hslot, *hslot2; 1554 1555 hslot = udp_hashslot(udptable, sock_net(sk), 1556 udp_sk(sk)->udp_port_hash); 1557 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1558 1559 spin_lock_bh(&hslot->lock); 1560 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1561 reuseport_detach_sock(sk); 1562 if (sk_del_node_init_rcu(sk)) { 1563 hslot->count--; 1564 inet_sk(sk)->inet_num = 0; 1565 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 1566 1567 spin_lock(&hslot2->lock); 1568 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1569 hslot2->count--; 1570 spin_unlock(&hslot2->lock); 1571 } 1572 spin_unlock_bh(&hslot->lock); 1573 } 1574 } 1575 EXPORT_SYMBOL(udp_lib_unhash); 1576 1577 /* 1578 * inet_rcv_saddr was changed, we must rehash secondary hash 1579 */ 1580 void udp_lib_rehash(struct sock *sk, u16 newhash) 1581 { 1582 if (sk_hashed(sk)) { 1583 struct udp_table *udptable = sk->sk_prot->h.udp_table; 1584 struct udp_hslot *hslot, *hslot2, *nhslot2; 1585 1586 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1587 nhslot2 = udp_hashslot2(udptable, newhash); 1588 udp_sk(sk)->udp_portaddr_hash = newhash; 1589 1590 if (hslot2 != nhslot2 || 1591 rcu_access_pointer(sk->sk_reuseport_cb)) { 1592 hslot = udp_hashslot(udptable, sock_net(sk), 1593 udp_sk(sk)->udp_port_hash); 1594 /* we must lock primary chain too */ 1595 spin_lock_bh(&hslot->lock); 1596 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1597 reuseport_detach_sock(sk); 1598 1599 if (hslot2 != nhslot2) { 1600 spin_lock(&hslot2->lock); 1601 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1602 hslot2->count--; 1603 spin_unlock(&hslot2->lock); 1604 1605 spin_lock(&nhslot2->lock); 1606 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 1607 &nhslot2->head); 1608 nhslot2->count++; 1609 spin_unlock(&nhslot2->lock); 1610 } 1611 1612 spin_unlock_bh(&hslot->lock); 1613 } 1614 } 1615 } 1616 EXPORT_SYMBOL(udp_lib_rehash); 1617 1618 static void udp_v4_rehash(struct sock *sk) 1619 { 1620 u16 new_hash = udp4_portaddr_hash(sock_net(sk), 1621 inet_sk(sk)->inet_rcv_saddr, 1622 inet_sk(sk)->inet_num); 1623 udp_lib_rehash(sk, new_hash); 1624 } 1625 1626 int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1627 { 1628 int rc; 1629 1630 if (inet_sk(sk)->inet_daddr) { 1631 sock_rps_save_rxhash(sk, skb); 1632 sk_mark_napi_id(sk, skb); 1633 sk_incoming_cpu_update(sk); 1634 } else { 1635 sk_mark_napi_id_once(sk, skb); 1636 } 1637 1638 rc = __udp_enqueue_schedule_skb(sk, skb); 1639 if (rc < 0) { 1640 int is_udplite = IS_UDPLITE(sk); 1641 1642 /* Note that an ENOMEM error is charged twice */ 1643 if (rc == -ENOMEM) 1644 UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, 1645 is_udplite); 1646 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1647 kfree_skb(skb); 1648 trace_udp_fail_queue_rcv_skb(rc, sk); 1649 return -1; 1650 } 1651 1652 return 0; 1653 } 1654 1655 static struct static_key udp_encap_needed __read_mostly; 1656 void udp_encap_enable(void) 1657 { 1658 if (!static_key_enabled(&udp_encap_needed)) 1659 static_key_slow_inc(&udp_encap_needed); 1660 } 1661 EXPORT_SYMBOL(udp_encap_enable); 1662 1663 /* returns: 1664 * -1: error 1665 * 0: success 1666 * >0: "udp encap" protocol resubmission 1667 * 1668 * Note that in the success and error cases, the skb is assumed to 1669 * have either been requeued or freed. 1670 */ 1671 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1672 { 1673 struct udp_sock *up = udp_sk(sk); 1674 int is_udplite = IS_UDPLITE(sk); 1675 1676 /* 1677 * Charge it to the socket, dropping if the queue is full. 1678 */ 1679 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1680 goto drop; 1681 nf_reset(skb); 1682 1683 if (static_key_false(&udp_encap_needed) && up->encap_type) { 1684 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 1685 1686 /* 1687 * This is an encapsulation socket so pass the skb to 1688 * the socket's udp_encap_rcv() hook. Otherwise, just 1689 * fall through and pass this up the UDP socket. 1690 * up->encap_rcv() returns the following value: 1691 * =0 if skb was successfully passed to the encap 1692 * handler or was discarded by it. 1693 * >0 if skb should be passed on to UDP. 1694 * <0 if skb should be resubmitted as proto -N 1695 */ 1696 1697 /* if we're overly short, let UDP handle it */ 1698 encap_rcv = ACCESS_ONCE(up->encap_rcv); 1699 if (encap_rcv) { 1700 int ret; 1701 1702 /* Verify checksum before giving to encap */ 1703 if (udp_lib_checksum_complete(skb)) 1704 goto csum_error; 1705 1706 ret = encap_rcv(sk, skb); 1707 if (ret <= 0) { 1708 __UDP_INC_STATS(sock_net(sk), 1709 UDP_MIB_INDATAGRAMS, 1710 is_udplite); 1711 return -ret; 1712 } 1713 } 1714 1715 /* FALLTHROUGH -- it's a UDP Packet */ 1716 } 1717 1718 /* 1719 * UDP-Lite specific tests, ignored on UDP sockets 1720 */ 1721 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 1722 1723 /* 1724 * MIB statistics other than incrementing the error count are 1725 * disabled for the following two types of errors: these depend 1726 * on the application settings, not on the functioning of the 1727 * protocol stack as such. 1728 * 1729 * RFC 3828 here recommends (sec 3.3): "There should also be a 1730 * way ... to ... at least let the receiving application block 1731 * delivery of packets with coverage values less than a value 1732 * provided by the application." 1733 */ 1734 if (up->pcrlen == 0) { /* full coverage was set */ 1735 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", 1736 UDP_SKB_CB(skb)->cscov, skb->len); 1737 goto drop; 1738 } 1739 /* The next case involves violating the min. coverage requested 1740 * by the receiver. This is subtle: if receiver wants x and x is 1741 * greater than the buffersize/MTU then receiver will complain 1742 * that it wants x while sender emits packets of smaller size y. 1743 * Therefore the above ...()->partial_cov statement is essential. 1744 */ 1745 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 1746 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", 1747 UDP_SKB_CB(skb)->cscov, up->pcrlen); 1748 goto drop; 1749 } 1750 } 1751 1752 if (rcu_access_pointer(sk->sk_filter) && 1753 udp_lib_checksum_complete(skb)) 1754 goto csum_error; 1755 1756 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 1757 goto drop; 1758 1759 udp_csum_pull_header(skb); 1760 1761 ipv4_pktinfo_prepare(sk, skb); 1762 return __udp_queue_rcv_skb(sk, skb); 1763 1764 csum_error: 1765 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1766 drop: 1767 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1768 atomic_inc(&sk->sk_drops); 1769 kfree_skb(skb); 1770 return -1; 1771 } 1772 1773 /* For TCP sockets, sk_rx_dst is protected by socket lock 1774 * For UDP, we use xchg() to guard against concurrent changes. 1775 */ 1776 static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 1777 { 1778 struct dst_entry *old; 1779 1780 dst_hold(dst); 1781 old = xchg(&sk->sk_rx_dst, dst); 1782 dst_release(old); 1783 } 1784 1785 /* 1786 * Multicasts and broadcasts go to each listener. 1787 * 1788 * Note: called only from the BH handler context. 1789 */ 1790 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 1791 struct udphdr *uh, 1792 __be32 saddr, __be32 daddr, 1793 struct udp_table *udptable, 1794 int proto) 1795 { 1796 struct sock *sk, *first = NULL; 1797 unsigned short hnum = ntohs(uh->dest); 1798 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 1799 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 1800 unsigned int offset = offsetof(typeof(*sk), sk_node); 1801 int dif = skb->dev->ifindex; 1802 struct hlist_node *node; 1803 struct sk_buff *nskb; 1804 1805 if (use_hash2) { 1806 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 1807 udptable->mask; 1808 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; 1809 start_lookup: 1810 hslot = &udptable->hash2[hash2]; 1811 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 1812 } 1813 1814 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 1815 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, 1816 uh->source, saddr, dif, hnum)) 1817 continue; 1818 1819 if (!first) { 1820 first = sk; 1821 continue; 1822 } 1823 nskb = skb_clone(skb, GFP_ATOMIC); 1824 1825 if (unlikely(!nskb)) { 1826 atomic_inc(&sk->sk_drops); 1827 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 1828 IS_UDPLITE(sk)); 1829 __UDP_INC_STATS(net, UDP_MIB_INERRORS, 1830 IS_UDPLITE(sk)); 1831 continue; 1832 } 1833 if (udp_queue_rcv_skb(sk, nskb) > 0) 1834 consume_skb(nskb); 1835 } 1836 1837 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 1838 if (use_hash2 && hash2 != hash2_any) { 1839 hash2 = hash2_any; 1840 goto start_lookup; 1841 } 1842 1843 if (first) { 1844 if (udp_queue_rcv_skb(first, skb) > 0) 1845 consume_skb(skb); 1846 } else { 1847 kfree_skb(skb); 1848 __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 1849 proto == IPPROTO_UDPLITE); 1850 } 1851 return 0; 1852 } 1853 1854 /* Initialize UDP checksum. If exited with zero value (success), 1855 * CHECKSUM_UNNECESSARY means, that no more checks are required. 1856 * Otherwise, csum completion requires chacksumming packet body, 1857 * including udp header and folding it to skb->csum. 1858 */ 1859 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 1860 int proto) 1861 { 1862 int err; 1863 1864 UDP_SKB_CB(skb)->partial_cov = 0; 1865 UDP_SKB_CB(skb)->cscov = skb->len; 1866 1867 if (proto == IPPROTO_UDPLITE) { 1868 err = udplite_checksum_init(skb, uh); 1869 if (err) 1870 return err; 1871 } 1872 1873 /* Note, we are only interested in != 0 or == 0, thus the 1874 * force to int. 1875 */ 1876 return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, 1877 inet_compute_pseudo); 1878 } 1879 1880 /* 1881 * All we need to do is get the socket, and then do a checksum. 1882 */ 1883 1884 int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 1885 int proto) 1886 { 1887 struct sock *sk; 1888 struct udphdr *uh; 1889 unsigned short ulen; 1890 struct rtable *rt = skb_rtable(skb); 1891 __be32 saddr, daddr; 1892 struct net *net = dev_net(skb->dev); 1893 1894 /* 1895 * Validate the packet. 1896 */ 1897 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 1898 goto drop; /* No space for header. */ 1899 1900 uh = udp_hdr(skb); 1901 ulen = ntohs(uh->len); 1902 saddr = ip_hdr(skb)->saddr; 1903 daddr = ip_hdr(skb)->daddr; 1904 1905 if (ulen > skb->len) 1906 goto short_packet; 1907 1908 if (proto == IPPROTO_UDP) { 1909 /* UDP validates ulen. */ 1910 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 1911 goto short_packet; 1912 uh = udp_hdr(skb); 1913 } 1914 1915 if (udp4_csum_init(skb, uh, proto)) 1916 goto csum_error; 1917 1918 sk = skb_steal_sock(skb); 1919 if (sk) { 1920 struct dst_entry *dst = skb_dst(skb); 1921 int ret; 1922 1923 if (unlikely(sk->sk_rx_dst != dst)) 1924 udp_sk_rx_dst_set(sk, dst); 1925 1926 ret = udp_queue_rcv_skb(sk, skb); 1927 sock_put(sk); 1928 /* a return value > 0 means to resubmit the input, but 1929 * it wants the return to be -protocol, or 0 1930 */ 1931 if (ret > 0) 1932 return -ret; 1933 return 0; 1934 } 1935 1936 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1937 return __udp4_lib_mcast_deliver(net, skb, uh, 1938 saddr, daddr, udptable, proto); 1939 1940 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1941 if (sk) { 1942 int ret; 1943 1944 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 1945 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, 1946 inet_compute_pseudo); 1947 1948 ret = udp_queue_rcv_skb(sk, skb); 1949 1950 /* a return value > 0 means to resubmit the input, but 1951 * it wants the return to be -protocol, or 0 1952 */ 1953 if (ret > 0) 1954 return -ret; 1955 return 0; 1956 } 1957 1958 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1959 goto drop; 1960 nf_reset(skb); 1961 1962 /* No socket. Drop packet silently, if checksum is wrong */ 1963 if (udp_lib_checksum_complete(skb)) 1964 goto csum_error; 1965 1966 __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1967 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 1968 1969 /* 1970 * Hmm. We got an UDP packet to a port to which we 1971 * don't wanna listen. Ignore it. 1972 */ 1973 kfree_skb(skb); 1974 return 0; 1975 1976 short_packet: 1977 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 1978 proto == IPPROTO_UDPLITE ? "Lite" : "", 1979 &saddr, ntohs(uh->source), 1980 ulen, skb->len, 1981 &daddr, ntohs(uh->dest)); 1982 goto drop; 1983 1984 csum_error: 1985 /* 1986 * RFC1122: OK. Discards the bad packet silently (as far as 1987 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1988 */ 1989 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 1990 proto == IPPROTO_UDPLITE ? "Lite" : "", 1991 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), 1992 ulen); 1993 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1994 drop: 1995 __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1996 kfree_skb(skb); 1997 return 0; 1998 } 1999 2000 /* We can only early demux multicast if there is a single matching socket. 2001 * If more than one socket found returns NULL 2002 */ 2003 static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, 2004 __be16 loc_port, __be32 loc_addr, 2005 __be16 rmt_port, __be32 rmt_addr, 2006 int dif) 2007 { 2008 struct sock *sk, *result; 2009 unsigned short hnum = ntohs(loc_port); 2010 unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); 2011 struct udp_hslot *hslot = &udp_table.hash[slot]; 2012 2013 /* Do not bother scanning a too big list */ 2014 if (hslot->count > 10) 2015 return NULL; 2016 2017 result = NULL; 2018 sk_for_each_rcu(sk, &hslot->head) { 2019 if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, 2020 rmt_port, rmt_addr, dif, hnum)) { 2021 if (result) 2022 return NULL; 2023 result = sk; 2024 } 2025 } 2026 2027 return result; 2028 } 2029 2030 /* For unicast we should only early demux connected sockets or we can 2031 * break forwarding setups. The chains here can be long so only check 2032 * if the first socket is an exact match and if not move on. 2033 */ 2034 static struct sock *__udp4_lib_demux_lookup(struct net *net, 2035 __be16 loc_port, __be32 loc_addr, 2036 __be16 rmt_port, __be32 rmt_addr, 2037 int dif) 2038 { 2039 unsigned short hnum = ntohs(loc_port); 2040 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); 2041 unsigned int slot2 = hash2 & udp_table.mask; 2042 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 2043 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); 2044 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 2045 struct sock *sk; 2046 2047 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 2048 if (INET_MATCH(sk, net, acookie, rmt_addr, 2049 loc_addr, ports, dif)) 2050 return sk; 2051 /* Only check first socket in chain */ 2052 break; 2053 } 2054 return NULL; 2055 } 2056 2057 void udp_v4_early_demux(struct sk_buff *skb) 2058 { 2059 struct net *net = dev_net(skb->dev); 2060 const struct iphdr *iph; 2061 const struct udphdr *uh; 2062 struct sock *sk = NULL; 2063 struct dst_entry *dst; 2064 int dif = skb->dev->ifindex; 2065 int ours; 2066 2067 /* validate the packet */ 2068 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 2069 return; 2070 2071 iph = ip_hdr(skb); 2072 uh = udp_hdr(skb); 2073 2074 if (skb->pkt_type == PACKET_BROADCAST || 2075 skb->pkt_type == PACKET_MULTICAST) { 2076 struct in_device *in_dev = __in_dev_get_rcu(skb->dev); 2077 2078 if (!in_dev) 2079 return; 2080 2081 /* we are supposed to accept bcast packets */ 2082 if (skb->pkt_type == PACKET_MULTICAST) { 2083 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, 2084 iph->protocol); 2085 if (!ours) 2086 return; 2087 } 2088 2089 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 2090 uh->source, iph->saddr, dif); 2091 } else if (skb->pkt_type == PACKET_HOST) { 2092 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, 2093 uh->source, iph->saddr, dif); 2094 } 2095 2096 if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2)) 2097 return; 2098 2099 skb->sk = sk; 2100 skb->destructor = sock_efree; 2101 dst = READ_ONCE(sk->sk_rx_dst); 2102 2103 if (dst) 2104 dst = dst_check(dst, 0); 2105 if (dst) { 2106 /* DST_NOCACHE can not be used without taking a reference */ 2107 if (dst->flags & DST_NOCACHE) { 2108 if (likely(atomic_inc_not_zero(&dst->__refcnt))) 2109 skb_dst_set(skb, dst); 2110 } else { 2111 skb_dst_set_noref(skb, dst); 2112 } 2113 } 2114 } 2115 2116 int udp_rcv(struct sk_buff *skb) 2117 { 2118 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); 2119 } 2120 2121 void udp_destroy_sock(struct sock *sk) 2122 { 2123 struct udp_sock *up = udp_sk(sk); 2124 bool slow = lock_sock_fast(sk); 2125 udp_flush_pending_frames(sk); 2126 unlock_sock_fast(sk, slow); 2127 if (static_key_false(&udp_encap_needed) && up->encap_type) { 2128 void (*encap_destroy)(struct sock *sk); 2129 encap_destroy = ACCESS_ONCE(up->encap_destroy); 2130 if (encap_destroy) 2131 encap_destroy(sk); 2132 } 2133 } 2134 2135 /* 2136 * Socket option code for UDP 2137 */ 2138 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 2139 char __user *optval, unsigned int optlen, 2140 int (*push_pending_frames)(struct sock *)) 2141 { 2142 struct udp_sock *up = udp_sk(sk); 2143 int val, valbool; 2144 int err = 0; 2145 int is_udplite = IS_UDPLITE(sk); 2146 2147 if (optlen < sizeof(int)) 2148 return -EINVAL; 2149 2150 if (get_user(val, (int __user *)optval)) 2151 return -EFAULT; 2152 2153 valbool = val ? 1 : 0; 2154 2155 switch (optname) { 2156 case UDP_CORK: 2157 if (val != 0) { 2158 up->corkflag = 1; 2159 } else { 2160 up->corkflag = 0; 2161 lock_sock(sk); 2162 push_pending_frames(sk); 2163 release_sock(sk); 2164 } 2165 break; 2166 2167 case UDP_ENCAP: 2168 switch (val) { 2169 case 0: 2170 case UDP_ENCAP_ESPINUDP: 2171 case UDP_ENCAP_ESPINUDP_NON_IKE: 2172 up->encap_rcv = xfrm4_udp_encap_rcv; 2173 /* FALLTHROUGH */ 2174 case UDP_ENCAP_L2TPINUDP: 2175 up->encap_type = val; 2176 udp_encap_enable(); 2177 break; 2178 default: 2179 err = -ENOPROTOOPT; 2180 break; 2181 } 2182 break; 2183 2184 case UDP_NO_CHECK6_TX: 2185 up->no_check6_tx = valbool; 2186 break; 2187 2188 case UDP_NO_CHECK6_RX: 2189 up->no_check6_rx = valbool; 2190 break; 2191 2192 /* 2193 * UDP-Lite's partial checksum coverage (RFC 3828). 2194 */ 2195 /* The sender sets actual checksum coverage length via this option. 2196 * The case coverage > packet length is handled by send module. */ 2197 case UDPLITE_SEND_CSCOV: 2198 if (!is_udplite) /* Disable the option on UDP sockets */ 2199 return -ENOPROTOOPT; 2200 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 2201 val = 8; 2202 else if (val > USHRT_MAX) 2203 val = USHRT_MAX; 2204 up->pcslen = val; 2205 up->pcflag |= UDPLITE_SEND_CC; 2206 break; 2207 2208 /* The receiver specifies a minimum checksum coverage value. To make 2209 * sense, this should be set to at least 8 (as done below). If zero is 2210 * used, this again means full checksum coverage. */ 2211 case UDPLITE_RECV_CSCOV: 2212 if (!is_udplite) /* Disable the option on UDP sockets */ 2213 return -ENOPROTOOPT; 2214 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 2215 val = 8; 2216 else if (val > USHRT_MAX) 2217 val = USHRT_MAX; 2218 up->pcrlen = val; 2219 up->pcflag |= UDPLITE_RECV_CC; 2220 break; 2221 2222 default: 2223 err = -ENOPROTOOPT; 2224 break; 2225 } 2226 2227 return err; 2228 } 2229 EXPORT_SYMBOL(udp_lib_setsockopt); 2230 2231 int udp_setsockopt(struct sock *sk, int level, int optname, 2232 char __user *optval, unsigned int optlen) 2233 { 2234 if (level == SOL_UDP || level == SOL_UDPLITE) 2235 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2236 udp_push_pending_frames); 2237 return ip_setsockopt(sk, level, optname, optval, optlen); 2238 } 2239 2240 #ifdef CONFIG_COMPAT 2241 int compat_udp_setsockopt(struct sock *sk, int level, int optname, 2242 char __user *optval, unsigned int optlen) 2243 { 2244 if (level == SOL_UDP || level == SOL_UDPLITE) 2245 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2246 udp_push_pending_frames); 2247 return compat_ip_setsockopt(sk, level, optname, optval, optlen); 2248 } 2249 #endif 2250 2251 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 2252 char __user *optval, int __user *optlen) 2253 { 2254 struct udp_sock *up = udp_sk(sk); 2255 int val, len; 2256 2257 if (get_user(len, optlen)) 2258 return -EFAULT; 2259 2260 len = min_t(unsigned int, len, sizeof(int)); 2261 2262 if (len < 0) 2263 return -EINVAL; 2264 2265 switch (optname) { 2266 case UDP_CORK: 2267 val = up->corkflag; 2268 break; 2269 2270 case UDP_ENCAP: 2271 val = up->encap_type; 2272 break; 2273 2274 case UDP_NO_CHECK6_TX: 2275 val = up->no_check6_tx; 2276 break; 2277 2278 case UDP_NO_CHECK6_RX: 2279 val = up->no_check6_rx; 2280 break; 2281 2282 /* The following two cannot be changed on UDP sockets, the return is 2283 * always 0 (which corresponds to the full checksum coverage of UDP). */ 2284 case UDPLITE_SEND_CSCOV: 2285 val = up->pcslen; 2286 break; 2287 2288 case UDPLITE_RECV_CSCOV: 2289 val = up->pcrlen; 2290 break; 2291 2292 default: 2293 return -ENOPROTOOPT; 2294 } 2295 2296 if (put_user(len, optlen)) 2297 return -EFAULT; 2298 if (copy_to_user(optval, &val, len)) 2299 return -EFAULT; 2300 return 0; 2301 } 2302 EXPORT_SYMBOL(udp_lib_getsockopt); 2303 2304 int udp_getsockopt(struct sock *sk, int level, int optname, 2305 char __user *optval, int __user *optlen) 2306 { 2307 if (level == SOL_UDP || level == SOL_UDPLITE) 2308 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2309 return ip_getsockopt(sk, level, optname, optval, optlen); 2310 } 2311 2312 #ifdef CONFIG_COMPAT 2313 int compat_udp_getsockopt(struct sock *sk, int level, int optname, 2314 char __user *optval, int __user *optlen) 2315 { 2316 if (level == SOL_UDP || level == SOL_UDPLITE) 2317 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2318 return compat_ip_getsockopt(sk, level, optname, optval, optlen); 2319 } 2320 #endif 2321 /** 2322 * udp_poll - wait for a UDP event. 2323 * @file - file struct 2324 * @sock - socket 2325 * @wait - poll table 2326 * 2327 * This is same as datagram poll, except for the special case of 2328 * blocking sockets. If application is using a blocking fd 2329 * and a packet with checksum error is in the queue; 2330 * then it could get return from select indicating data available 2331 * but then block when reading it. Add special case code 2332 * to work around these arguably broken applications. 2333 */ 2334 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) 2335 { 2336 unsigned int mask = datagram_poll(file, sock, wait); 2337 struct sock *sk = sock->sk; 2338 2339 sock_rps_record_flow(sk); 2340 2341 /* Check for false positives due to checksum errors */ 2342 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2343 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2344 mask &= ~(POLLIN | POLLRDNORM); 2345 2346 return mask; 2347 2348 } 2349 EXPORT_SYMBOL(udp_poll); 2350 2351 int udp_abort(struct sock *sk, int err) 2352 { 2353 lock_sock(sk); 2354 2355 sk->sk_err = err; 2356 sk->sk_error_report(sk); 2357 __udp_disconnect(sk, 0); 2358 2359 release_sock(sk); 2360 2361 return 0; 2362 } 2363 EXPORT_SYMBOL_GPL(udp_abort); 2364 2365 struct proto udp_prot = { 2366 .name = "UDP", 2367 .owner = THIS_MODULE, 2368 .close = udp_lib_close, 2369 .connect = ip4_datagram_connect, 2370 .disconnect = udp_disconnect, 2371 .ioctl = udp_ioctl, 2372 .init = udp_init_sock, 2373 .destroy = udp_destroy_sock, 2374 .setsockopt = udp_setsockopt, 2375 .getsockopt = udp_getsockopt, 2376 .sendmsg = udp_sendmsg, 2377 .recvmsg = udp_recvmsg, 2378 .sendpage = udp_sendpage, 2379 .release_cb = ip4_datagram_release_cb, 2380 .hash = udp_lib_hash, 2381 .unhash = udp_lib_unhash, 2382 .rehash = udp_v4_rehash, 2383 .get_port = udp_v4_get_port, 2384 .memory_allocated = &udp_memory_allocated, 2385 .sysctl_mem = sysctl_udp_mem, 2386 .sysctl_wmem = &sysctl_udp_wmem_min, 2387 .sysctl_rmem = &sysctl_udp_rmem_min, 2388 .obj_size = sizeof(struct udp_sock), 2389 .h.udp_table = &udp_table, 2390 #ifdef CONFIG_COMPAT 2391 .compat_setsockopt = compat_udp_setsockopt, 2392 .compat_getsockopt = compat_udp_getsockopt, 2393 #endif 2394 .diag_destroy = udp_abort, 2395 }; 2396 EXPORT_SYMBOL(udp_prot); 2397 2398 /* ------------------------------------------------------------------------ */ 2399 #ifdef CONFIG_PROC_FS 2400 2401 static struct sock *udp_get_first(struct seq_file *seq, int start) 2402 { 2403 struct sock *sk; 2404 struct udp_iter_state *state = seq->private; 2405 struct net *net = seq_file_net(seq); 2406 2407 for (state->bucket = start; state->bucket <= state->udp_table->mask; 2408 ++state->bucket) { 2409 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; 2410 2411 if (hlist_empty(&hslot->head)) 2412 continue; 2413 2414 spin_lock_bh(&hslot->lock); 2415 sk_for_each(sk, &hslot->head) { 2416 if (!net_eq(sock_net(sk), net)) 2417 continue; 2418 if (sk->sk_family == state->family) 2419 goto found; 2420 } 2421 spin_unlock_bh(&hslot->lock); 2422 } 2423 sk = NULL; 2424 found: 2425 return sk; 2426 } 2427 2428 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 2429 { 2430 struct udp_iter_state *state = seq->private; 2431 struct net *net = seq_file_net(seq); 2432 2433 do { 2434 sk = sk_next(sk); 2435 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); 2436 2437 if (!sk) { 2438 if (state->bucket <= state->udp_table->mask) 2439 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); 2440 return udp_get_first(seq, state->bucket + 1); 2441 } 2442 return sk; 2443 } 2444 2445 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 2446 { 2447 struct sock *sk = udp_get_first(seq, 0); 2448 2449 if (sk) 2450 while (pos && (sk = udp_get_next(seq, sk)) != NULL) 2451 --pos; 2452 return pos ? NULL : sk; 2453 } 2454 2455 static void *udp_seq_start(struct seq_file *seq, loff_t *pos) 2456 { 2457 struct udp_iter_state *state = seq->private; 2458 state->bucket = MAX_UDP_PORTS; 2459 2460 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 2461 } 2462 2463 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2464 { 2465 struct sock *sk; 2466 2467 if (v == SEQ_START_TOKEN) 2468 sk = udp_get_idx(seq, 0); 2469 else 2470 sk = udp_get_next(seq, v); 2471 2472 ++*pos; 2473 return sk; 2474 } 2475 2476 static void udp_seq_stop(struct seq_file *seq, void *v) 2477 { 2478 struct udp_iter_state *state = seq->private; 2479 2480 if (state->bucket <= state->udp_table->mask) 2481 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); 2482 } 2483 2484 int udp_seq_open(struct inode *inode, struct file *file) 2485 { 2486 struct udp_seq_afinfo *afinfo = PDE_DATA(inode); 2487 struct udp_iter_state *s; 2488 int err; 2489 2490 err = seq_open_net(inode, file, &afinfo->seq_ops, 2491 sizeof(struct udp_iter_state)); 2492 if (err < 0) 2493 return err; 2494 2495 s = ((struct seq_file *)file->private_data)->private; 2496 s->family = afinfo->family; 2497 s->udp_table = afinfo->udp_table; 2498 return err; 2499 } 2500 EXPORT_SYMBOL(udp_seq_open); 2501 2502 /* ------------------------------------------------------------------------ */ 2503 int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) 2504 { 2505 struct proc_dir_entry *p; 2506 int rc = 0; 2507 2508 afinfo->seq_ops.start = udp_seq_start; 2509 afinfo->seq_ops.next = udp_seq_next; 2510 afinfo->seq_ops.stop = udp_seq_stop; 2511 2512 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 2513 afinfo->seq_fops, afinfo); 2514 if (!p) 2515 rc = -ENOMEM; 2516 return rc; 2517 } 2518 EXPORT_SYMBOL(udp_proc_register); 2519 2520 void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) 2521 { 2522 remove_proc_entry(afinfo->name, net->proc_net); 2523 } 2524 EXPORT_SYMBOL(udp_proc_unregister); 2525 2526 /* ------------------------------------------------------------------------ */ 2527 static void udp4_format_sock(struct sock *sp, struct seq_file *f, 2528 int bucket) 2529 { 2530 struct inet_sock *inet = inet_sk(sp); 2531 __be32 dest = inet->inet_daddr; 2532 __be32 src = inet->inet_rcv_saddr; 2533 __u16 destp = ntohs(inet->inet_dport); 2534 __u16 srcp = ntohs(inet->inet_sport); 2535 2536 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2537 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", 2538 bucket, src, srcp, dest, destp, sp->sk_state, 2539 sk_wmem_alloc_get(sp), 2540 sk_rmem_alloc_get(sp), 2541 0, 0L, 0, 2542 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 2543 0, sock_i_ino(sp), 2544 atomic_read(&sp->sk_refcnt), sp, 2545 atomic_read(&sp->sk_drops)); 2546 } 2547 2548 int udp4_seq_show(struct seq_file *seq, void *v) 2549 { 2550 seq_setwidth(seq, 127); 2551 if (v == SEQ_START_TOKEN) 2552 seq_puts(seq, " sl local_address rem_address st tx_queue " 2553 "rx_queue tr tm->when retrnsmt uid timeout " 2554 "inode ref pointer drops"); 2555 else { 2556 struct udp_iter_state *state = seq->private; 2557 2558 udp4_format_sock(v, seq, state->bucket); 2559 } 2560 seq_pad(seq, '\n'); 2561 return 0; 2562 } 2563 2564 static const struct file_operations udp_afinfo_seq_fops = { 2565 .owner = THIS_MODULE, 2566 .open = udp_seq_open, 2567 .read = seq_read, 2568 .llseek = seq_lseek, 2569 .release = seq_release_net 2570 }; 2571 2572 /* ------------------------------------------------------------------------ */ 2573 static struct udp_seq_afinfo udp4_seq_afinfo = { 2574 .name = "udp", 2575 .family = AF_INET, 2576 .udp_table = &udp_table, 2577 .seq_fops = &udp_afinfo_seq_fops, 2578 .seq_ops = { 2579 .show = udp4_seq_show, 2580 }, 2581 }; 2582 2583 static int __net_init udp4_proc_init_net(struct net *net) 2584 { 2585 return udp_proc_register(net, &udp4_seq_afinfo); 2586 } 2587 2588 static void __net_exit udp4_proc_exit_net(struct net *net) 2589 { 2590 udp_proc_unregister(net, &udp4_seq_afinfo); 2591 } 2592 2593 static struct pernet_operations udp4_net_ops = { 2594 .init = udp4_proc_init_net, 2595 .exit = udp4_proc_exit_net, 2596 }; 2597 2598 int __init udp4_proc_init(void) 2599 { 2600 return register_pernet_subsys(&udp4_net_ops); 2601 } 2602 2603 void udp4_proc_exit(void) 2604 { 2605 unregister_pernet_subsys(&udp4_net_ops); 2606 } 2607 #endif /* CONFIG_PROC_FS */ 2608 2609 static __initdata unsigned long uhash_entries; 2610 static int __init set_uhash_entries(char *str) 2611 { 2612 ssize_t ret; 2613 2614 if (!str) 2615 return 0; 2616 2617 ret = kstrtoul(str, 0, &uhash_entries); 2618 if (ret) 2619 return 0; 2620 2621 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) 2622 uhash_entries = UDP_HTABLE_SIZE_MIN; 2623 return 1; 2624 } 2625 __setup("uhash_entries=", set_uhash_entries); 2626 2627 void __init udp_table_init(struct udp_table *table, const char *name) 2628 { 2629 unsigned int i; 2630 2631 table->hash = alloc_large_system_hash(name, 2632 2 * sizeof(struct udp_hslot), 2633 uhash_entries, 2634 21, /* one slot per 2 MB */ 2635 0, 2636 &table->log, 2637 &table->mask, 2638 UDP_HTABLE_SIZE_MIN, 2639 64 * 1024); 2640 2641 table->hash2 = table->hash + (table->mask + 1); 2642 for (i = 0; i <= table->mask; i++) { 2643 INIT_HLIST_HEAD(&table->hash[i].head); 2644 table->hash[i].count = 0; 2645 spin_lock_init(&table->hash[i].lock); 2646 } 2647 for (i = 0; i <= table->mask; i++) { 2648 INIT_HLIST_HEAD(&table->hash2[i].head); 2649 table->hash2[i].count = 0; 2650 spin_lock_init(&table->hash2[i].lock); 2651 } 2652 } 2653 2654 u32 udp_flow_hashrnd(void) 2655 { 2656 static u32 hashrnd __read_mostly; 2657 2658 net_get_random_once(&hashrnd, sizeof(hashrnd)); 2659 2660 return hashrnd; 2661 } 2662 EXPORT_SYMBOL(udp_flow_hashrnd); 2663 2664 void __init udp_init(void) 2665 { 2666 unsigned long limit; 2667 unsigned int i; 2668 2669 udp_table_init(&udp_table, "UDP"); 2670 limit = nr_free_buffer_pages() / 8; 2671 limit = max(limit, 128UL); 2672 sysctl_udp_mem[0] = limit / 4 * 3; 2673 sysctl_udp_mem[1] = limit; 2674 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; 2675 2676 sysctl_udp_rmem_min = SK_MEM_QUANTUM; 2677 sysctl_udp_wmem_min = SK_MEM_QUANTUM; 2678 2679 /* 16 spinlocks per cpu */ 2680 udp_busylocks_log = ilog2(nr_cpu_ids) + 4; 2681 udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, 2682 GFP_KERNEL); 2683 if (!udp_busylocks) 2684 panic("UDP: failed to alloc udp_busylocks\n"); 2685 for (i = 0; i < (1U << udp_busylocks_log); i++) 2686 spin_lock_init(udp_busylocks + i); 2687 } 2688