1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * The User Datagram Protocol (UDP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 12 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 13 * Hirokazu Takahashi, <taka@valinux.co.jp> 14 * 15 * Fixes: 16 * Alan Cox : verify_area() calls 17 * Alan Cox : stopped close while in use off icmp 18 * messages. Not a fix but a botch that 19 * for udp at least is 'valid'. 20 * Alan Cox : Fixed icmp handling properly 21 * Alan Cox : Correct error for oversized datagrams 22 * Alan Cox : Tidied select() semantics. 23 * Alan Cox : udp_err() fixed properly, also now 24 * select and read wake correctly on errors 25 * Alan Cox : udp_send verify_area moved to avoid mem leak 26 * Alan Cox : UDP can count its memory 27 * Alan Cox : send to an unknown connection causes 28 * an ECONNREFUSED off the icmp, but 29 * does NOT close. 30 * Alan Cox : Switched to new sk_buff handlers. No more backlog! 31 * Alan Cox : Using generic datagram code. Even smaller and the PEEK 32 * bug no longer crashes it. 33 * Fred Van Kempen : Net2e support for sk->broadcast. 34 * Alan Cox : Uses skb_free_datagram 35 * Alan Cox : Added get/set sockopt support. 36 * Alan Cox : Broadcasting without option set returns EACCES. 37 * Alan Cox : No wakeup calls. Instead we now use the callbacks. 38 * Alan Cox : Use ip_tos and ip_ttl 39 * Alan Cox : SNMP Mibs 40 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 41 * Matt Dillon : UDP length checks. 42 * Alan Cox : Smarter af_inet used properly. 43 * Alan Cox : Use new kernel side addressing. 44 * Alan Cox : Incorrect return on truncated datagram receive. 45 * Arnt Gulbrandsen : New udp_send and stuff 46 * Alan Cox : Cache last socket 47 * Alan Cox : Route cache 48 * Jon Peatfield : Minor efficiency fix to sendto(). 49 * Mike Shaver : RFC1122 checks. 50 * Alan Cox : Nonblocking error fix. 51 * Willy Konynenberg : Transparent proxying support. 52 * Mike McLagan : Routing by source 53 * David S. Miller : New socket lookup architecture. 54 * Last socket cache retained as it 55 * does have a high hit rate. 56 * Olaf Kirch : Don't linearise iovec on sendmsg. 57 * Andi Kleen : Some cleanups, cache destination entry 58 * for connect. 59 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 60 * Melvin Smith : Check msg_name not msg_namelen in sendto(), 61 * return ENOTCONN for unconnected sockets (POSIX) 62 * Janos Farkas : don't deliver multi/broadcasts to a different 63 * bound-to-device socket 64 * Hirokazu Takahashi : HW checksumming for outgoing UDP 65 * datagrams. 66 * Hirokazu Takahashi : sendfile() on UDP works now. 67 * Arnaldo C. Melo : convert /proc/net/udp to seq_file 68 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 69 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 70 * a single port at the same time. 71 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 72 * James Chapman : Add L2TP encapsulation type. 73 */ 74 75 #define pr_fmt(fmt) "UDP: " fmt 76 77 #include <linux/uaccess.h> 78 #include <asm/ioctls.h> 79 #include <linux/memblock.h> 80 #include <linux/highmem.h> 81 #include <linux/swap.h> 82 #include <linux/types.h> 83 #include <linux/fcntl.h> 84 #include <linux/module.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/igmp.h> 88 #include <linux/inetdevice.h> 89 #include <linux/in.h> 90 #include <linux/errno.h> 91 #include <linux/timer.h> 92 #include <linux/mm.h> 93 #include <linux/inet.h> 94 #include <linux/netdevice.h> 95 #include <linux/slab.h> 96 #include <net/tcp_states.h> 97 #include <linux/skbuff.h> 98 #include <linux/proc_fs.h> 99 #include <linux/seq_file.h> 100 #include <net/net_namespace.h> 101 #include <net/icmp.h> 102 #include <net/inet_hashtables.h> 103 #include <net/ip_tunnels.h> 104 #include <net/route.h> 105 #include <net/checksum.h> 106 #include <net/xfrm.h> 107 #include <trace/events/udp.h> 108 #include <linux/static_key.h> 109 #include <trace/events/skb.h> 110 #include <net/busy_poll.h> 111 #include "udp_impl.h" 112 #include <net/sock_reuseport.h> 113 #include <net/addrconf.h> 114 #include <net/udp_tunnel.h> 115 116 struct udp_table udp_table __read_mostly; 117 EXPORT_SYMBOL(udp_table); 118 119 long sysctl_udp_mem[3] __read_mostly; 120 EXPORT_SYMBOL(sysctl_udp_mem); 121 122 atomic_long_t udp_memory_allocated; 123 EXPORT_SYMBOL(udp_memory_allocated); 124 125 #define MAX_UDP_PORTS 65536 126 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) 127 128 /* IPCB reference means this can not be used from early demux */ 129 static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb) 130 { 131 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 132 if (!net->ipv4.sysctl_udp_l3mdev_accept && 133 skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 134 return true; 135 #endif 136 return false; 137 } 138 139 static int udp_lib_lport_inuse(struct net *net, __u16 num, 140 const struct udp_hslot *hslot, 141 unsigned long *bitmap, 142 struct sock *sk, unsigned int log) 143 { 144 struct sock *sk2; 145 kuid_t uid = sock_i_uid(sk); 146 147 sk_for_each(sk2, &hslot->head) { 148 if (net_eq(sock_net(sk2), net) && 149 sk2 != sk && 150 (bitmap || udp_sk(sk2)->udp_port_hash == num) && 151 (!sk2->sk_reuse || !sk->sk_reuse) && 152 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 153 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 154 inet_rcv_saddr_equal(sk, sk2, true)) { 155 if (sk2->sk_reuseport && sk->sk_reuseport && 156 !rcu_access_pointer(sk->sk_reuseport_cb) && 157 uid_eq(uid, sock_i_uid(sk2))) { 158 if (!bitmap) 159 return 0; 160 } else { 161 if (!bitmap) 162 return 1; 163 __set_bit(udp_sk(sk2)->udp_port_hash >> log, 164 bitmap); 165 } 166 } 167 } 168 return 0; 169 } 170 171 /* 172 * Note: we still hold spinlock of primary hash chain, so no other writer 173 * can insert/delete a socket with local_port == num 174 */ 175 static int udp_lib_lport_inuse2(struct net *net, __u16 num, 176 struct udp_hslot *hslot2, 177 struct sock *sk) 178 { 179 struct sock *sk2; 180 kuid_t uid = sock_i_uid(sk); 181 int res = 0; 182 183 spin_lock(&hslot2->lock); 184 udp_portaddr_for_each_entry(sk2, &hslot2->head) { 185 if (net_eq(sock_net(sk2), net) && 186 sk2 != sk && 187 (udp_sk(sk2)->udp_port_hash == num) && 188 (!sk2->sk_reuse || !sk->sk_reuse) && 189 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 190 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 191 inet_rcv_saddr_equal(sk, sk2, true)) { 192 if (sk2->sk_reuseport && sk->sk_reuseport && 193 !rcu_access_pointer(sk->sk_reuseport_cb) && 194 uid_eq(uid, sock_i_uid(sk2))) { 195 res = 0; 196 } else { 197 res = 1; 198 } 199 break; 200 } 201 } 202 spin_unlock(&hslot2->lock); 203 return res; 204 } 205 206 static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) 207 { 208 struct net *net = sock_net(sk); 209 kuid_t uid = sock_i_uid(sk); 210 struct sock *sk2; 211 212 sk_for_each(sk2, &hslot->head) { 213 if (net_eq(sock_net(sk2), net) && 214 sk2 != sk && 215 sk2->sk_family == sk->sk_family && 216 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 217 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && 218 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 219 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 220 inet_rcv_saddr_equal(sk, sk2, false)) { 221 return reuseport_add_sock(sk, sk2, 222 inet_rcv_saddr_any(sk)); 223 } 224 } 225 226 return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); 227 } 228 229 /** 230 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 231 * 232 * @sk: socket struct in question 233 * @snum: port number to look up 234 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, 235 * with NULL address 236 */ 237 int udp_lib_get_port(struct sock *sk, unsigned short snum, 238 unsigned int hash2_nulladdr) 239 { 240 struct udp_hslot *hslot, *hslot2; 241 struct udp_table *udptable = sk->sk_prot->h.udp_table; 242 int error = 1; 243 struct net *net = sock_net(sk); 244 245 if (!snum) { 246 int low, high, remaining; 247 unsigned int rand; 248 unsigned short first, last; 249 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 250 251 inet_get_local_port_range(net, &low, &high); 252 remaining = (high - low) + 1; 253 254 rand = prandom_u32(); 255 first = reciprocal_scale(rand, remaining) + low; 256 /* 257 * force rand to be an odd multiple of UDP_HTABLE_SIZE 258 */ 259 rand = (rand | 1) * (udptable->mask + 1); 260 last = first + udptable->mask + 1; 261 do { 262 hslot = udp_hashslot(udptable, net, first); 263 bitmap_zero(bitmap, PORTS_PER_CHAIN); 264 spin_lock_bh(&hslot->lock); 265 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, 266 udptable->log); 267 268 snum = first; 269 /* 270 * Iterate on all possible values of snum for this hash. 271 * Using steps of an odd multiple of UDP_HTABLE_SIZE 272 * give us randomization and full range coverage. 273 */ 274 do { 275 if (low <= snum && snum <= high && 276 !test_bit(snum >> udptable->log, bitmap) && 277 !inet_is_local_reserved_port(net, snum)) 278 goto found; 279 snum += rand; 280 } while (snum != first); 281 spin_unlock_bh(&hslot->lock); 282 cond_resched(); 283 } while (++first != last); 284 goto fail; 285 } else { 286 hslot = udp_hashslot(udptable, net, snum); 287 spin_lock_bh(&hslot->lock); 288 if (hslot->count > 10) { 289 int exist; 290 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; 291 292 slot2 &= udptable->mask; 293 hash2_nulladdr &= udptable->mask; 294 295 hslot2 = udp_hashslot2(udptable, slot2); 296 if (hslot->count < hslot2->count) 297 goto scan_primary_hash; 298 299 exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); 300 if (!exist && (hash2_nulladdr != slot2)) { 301 hslot2 = udp_hashslot2(udptable, hash2_nulladdr); 302 exist = udp_lib_lport_inuse2(net, snum, hslot2, 303 sk); 304 } 305 if (exist) 306 goto fail_unlock; 307 else 308 goto found; 309 } 310 scan_primary_hash: 311 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) 312 goto fail_unlock; 313 } 314 found: 315 inet_sk(sk)->inet_num = snum; 316 udp_sk(sk)->udp_port_hash = snum; 317 udp_sk(sk)->udp_portaddr_hash ^= snum; 318 if (sk_unhashed(sk)) { 319 if (sk->sk_reuseport && 320 udp_reuseport_add_sock(sk, hslot)) { 321 inet_sk(sk)->inet_num = 0; 322 udp_sk(sk)->udp_port_hash = 0; 323 udp_sk(sk)->udp_portaddr_hash ^= snum; 324 goto fail_unlock; 325 } 326 327 sk_add_node_rcu(sk, &hslot->head); 328 hslot->count++; 329 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 330 331 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 332 spin_lock(&hslot2->lock); 333 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 334 sk->sk_family == AF_INET6) 335 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, 336 &hslot2->head); 337 else 338 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 339 &hslot2->head); 340 hslot2->count++; 341 spin_unlock(&hslot2->lock); 342 } 343 sock_set_flag(sk, SOCK_RCU_FREE); 344 error = 0; 345 fail_unlock: 346 spin_unlock_bh(&hslot->lock); 347 fail: 348 return error; 349 } 350 EXPORT_SYMBOL(udp_lib_get_port); 351 352 int udp_v4_get_port(struct sock *sk, unsigned short snum) 353 { 354 unsigned int hash2_nulladdr = 355 ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); 356 unsigned int hash2_partial = 357 ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 358 359 /* precompute partial secondary hash */ 360 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 361 return udp_lib_get_port(sk, snum, hash2_nulladdr); 362 } 363 364 static int compute_score(struct sock *sk, struct net *net, 365 __be32 saddr, __be16 sport, 366 __be32 daddr, unsigned short hnum, 367 int dif, int sdif, bool exact_dif) 368 { 369 int score; 370 struct inet_sock *inet; 371 bool dev_match; 372 373 if (!net_eq(sock_net(sk), net) || 374 udp_sk(sk)->udp_port_hash != hnum || 375 ipv6_only_sock(sk)) 376 return -1; 377 378 if (sk->sk_rcv_saddr != daddr) 379 return -1; 380 381 score = (sk->sk_family == PF_INET) ? 2 : 1; 382 383 inet = inet_sk(sk); 384 if (inet->inet_daddr) { 385 if (inet->inet_daddr != saddr) 386 return -1; 387 score += 4; 388 } 389 390 if (inet->inet_dport) { 391 if (inet->inet_dport != sport) 392 return -1; 393 score += 4; 394 } 395 396 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, 397 dif, sdif); 398 if (!dev_match) 399 return -1; 400 score += 4; 401 402 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 403 score++; 404 return score; 405 } 406 407 static u32 udp_ehashfn(const struct net *net, const __be32 laddr, 408 const __u16 lport, const __be32 faddr, 409 const __be16 fport) 410 { 411 static u32 udp_ehash_secret __read_mostly; 412 413 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); 414 415 return __inet_ehashfn(laddr, lport, faddr, fport, 416 udp_ehash_secret + net_hash_mix(net)); 417 } 418 419 /* called with rcu_read_lock() */ 420 static struct sock *udp4_lib_lookup2(struct net *net, 421 __be32 saddr, __be16 sport, 422 __be32 daddr, unsigned int hnum, 423 int dif, int sdif, bool exact_dif, 424 struct udp_hslot *hslot2, 425 struct sk_buff *skb) 426 { 427 struct sock *sk, *result; 428 int score, badness; 429 u32 hash = 0; 430 431 result = NULL; 432 badness = 0; 433 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 434 score = compute_score(sk, net, saddr, sport, 435 daddr, hnum, dif, sdif, exact_dif); 436 if (score > badness) { 437 if (sk->sk_reuseport) { 438 hash = udp_ehashfn(net, daddr, hnum, 439 saddr, sport); 440 result = reuseport_select_sock(sk, hash, skb, 441 sizeof(struct udphdr)); 442 if (result) 443 return result; 444 } 445 badness = score; 446 result = sk; 447 } 448 } 449 return result; 450 } 451 452 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 453 * harder than this. -DaveM 454 */ 455 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, 456 __be16 sport, __be32 daddr, __be16 dport, int dif, 457 int sdif, struct udp_table *udptable, struct sk_buff *skb) 458 { 459 struct sock *result; 460 unsigned short hnum = ntohs(dport); 461 unsigned int hash2, slot2; 462 struct udp_hslot *hslot2; 463 bool exact_dif = udp_lib_exact_dif_match(net, skb); 464 465 hash2 = ipv4_portaddr_hash(net, daddr, hnum); 466 slot2 = hash2 & udptable->mask; 467 hslot2 = &udptable->hash2[slot2]; 468 469 result = udp4_lib_lookup2(net, saddr, sport, 470 daddr, hnum, dif, sdif, 471 exact_dif, hslot2, skb); 472 if (!result) { 473 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 474 slot2 = hash2 & udptable->mask; 475 hslot2 = &udptable->hash2[slot2]; 476 477 result = udp4_lib_lookup2(net, saddr, sport, 478 htonl(INADDR_ANY), hnum, dif, sdif, 479 exact_dif, hslot2, skb); 480 } 481 if (unlikely(IS_ERR(result))) 482 return NULL; 483 return result; 484 } 485 EXPORT_SYMBOL_GPL(__udp4_lib_lookup); 486 487 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, 488 __be16 sport, __be16 dport, 489 struct udp_table *udptable) 490 { 491 const struct iphdr *iph = ip_hdr(skb); 492 493 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, 494 iph->daddr, dport, inet_iif(skb), 495 inet_sdif(skb), udptable, skb); 496 } 497 498 struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, 499 __be16 sport, __be16 dport) 500 { 501 return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); 502 } 503 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); 504 505 /* Must be called under rcu_read_lock(). 506 * Does increment socket refcount. 507 */ 508 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4) 509 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 510 __be32 daddr, __be16 dport, int dif) 511 { 512 struct sock *sk; 513 514 sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, 515 dif, 0, &udp_table, NULL); 516 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 517 sk = NULL; 518 return sk; 519 } 520 EXPORT_SYMBOL_GPL(udp4_lib_lookup); 521 #endif 522 523 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, 524 __be16 loc_port, __be32 loc_addr, 525 __be16 rmt_port, __be32 rmt_addr, 526 int dif, int sdif, unsigned short hnum) 527 { 528 struct inet_sock *inet = inet_sk(sk); 529 530 if (!net_eq(sock_net(sk), net) || 531 udp_sk(sk)->udp_port_hash != hnum || 532 (inet->inet_daddr && inet->inet_daddr != rmt_addr) || 533 (inet->inet_dport != rmt_port && inet->inet_dport) || 534 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 535 ipv6_only_sock(sk) || 536 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && 537 sk->sk_bound_dev_if != sdif)) 538 return false; 539 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) 540 return false; 541 return true; 542 } 543 544 DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); 545 void udp_encap_enable(void) 546 { 547 static_branch_inc(&udp_encap_needed_key); 548 } 549 EXPORT_SYMBOL(udp_encap_enable); 550 551 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 552 * through error handlers in encapsulations looking for a match. 553 */ 554 static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) 555 { 556 int i; 557 558 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 559 int (*handler)(struct sk_buff *skb, u32 info); 560 const struct ip_tunnel_encap_ops *encap; 561 562 encap = rcu_dereference(iptun_encaps[i]); 563 if (!encap) 564 continue; 565 handler = encap->err_handler; 566 if (handler && !handler(skb, info)) 567 return 0; 568 } 569 570 return -ENOENT; 571 } 572 573 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 574 * reversing source and destination port: this will match tunnels that force the 575 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 576 * lwtunnels might actually break this assumption by being configured with 577 * different destination ports on endpoints, in this case we won't be able to 578 * trace ICMP messages back to them. 579 * 580 * If this doesn't match any socket, probe tunnels with arbitrary destination 581 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 582 * we've sent packets to won't necessarily match the local destination port. 583 * 584 * Then ask the tunnel implementation to match the error against a valid 585 * association. 586 * 587 * Return an error if we can't find a match, the socket if we need further 588 * processing, zero otherwise. 589 */ 590 static struct sock *__udp4_lib_err_encap(struct net *net, 591 const struct iphdr *iph, 592 struct udphdr *uh, 593 struct udp_table *udptable, 594 struct sk_buff *skb, u32 info) 595 { 596 int network_offset, transport_offset; 597 struct sock *sk; 598 599 network_offset = skb_network_offset(skb); 600 transport_offset = skb_transport_offset(skb); 601 602 /* Network header needs to point to the outer IPv4 header inside ICMP */ 603 skb_reset_network_header(skb); 604 605 /* Transport header needs to point to the UDP header */ 606 skb_set_transport_header(skb, iph->ihl << 2); 607 608 sk = __udp4_lib_lookup(net, iph->daddr, uh->source, 609 iph->saddr, uh->dest, skb->dev->ifindex, 0, 610 udptable, NULL); 611 if (sk) { 612 int (*lookup)(struct sock *sk, struct sk_buff *skb); 613 struct udp_sock *up = udp_sk(sk); 614 615 lookup = READ_ONCE(up->encap_err_lookup); 616 if (!lookup || lookup(sk, skb)) 617 sk = NULL; 618 } 619 620 if (!sk) 621 sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info)); 622 623 skb_set_transport_header(skb, transport_offset); 624 skb_set_network_header(skb, network_offset); 625 626 return sk; 627 } 628 629 /* 630 * This routine is called by the ICMP module when it gets some 631 * sort of error condition. If err < 0 then the socket should 632 * be closed and the error returned to the user. If err > 0 633 * it's just the icmp type << 8 | icmp code. 634 * Header points to the ip header of the error packet. We move 635 * on past this. Then (as it used to claim before adjustment) 636 * header points to the first 8 bytes of the udp header. We need 637 * to find the appropriate port. 638 */ 639 640 int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 641 { 642 struct inet_sock *inet; 643 const struct iphdr *iph = (const struct iphdr *)skb->data; 644 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 645 const int type = icmp_hdr(skb)->type; 646 const int code = icmp_hdr(skb)->code; 647 bool tunnel = false; 648 struct sock *sk; 649 int harderr; 650 int err; 651 struct net *net = dev_net(skb->dev); 652 653 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, 654 iph->saddr, uh->source, skb->dev->ifindex, 655 inet_sdif(skb), udptable, NULL); 656 if (!sk) { 657 /* No socket for error: try tunnels before discarding */ 658 sk = ERR_PTR(-ENOENT); 659 if (static_branch_unlikely(&udp_encap_needed_key)) { 660 sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb, 661 info); 662 if (!sk) 663 return 0; 664 } 665 666 if (IS_ERR(sk)) { 667 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 668 return PTR_ERR(sk); 669 } 670 671 tunnel = true; 672 } 673 674 err = 0; 675 harderr = 0; 676 inet = inet_sk(sk); 677 678 switch (type) { 679 default: 680 case ICMP_TIME_EXCEEDED: 681 err = EHOSTUNREACH; 682 break; 683 case ICMP_SOURCE_QUENCH: 684 goto out; 685 case ICMP_PARAMETERPROB: 686 err = EPROTO; 687 harderr = 1; 688 break; 689 case ICMP_DEST_UNREACH: 690 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 691 ipv4_sk_update_pmtu(skb, sk, info); 692 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 693 err = EMSGSIZE; 694 harderr = 1; 695 break; 696 } 697 goto out; 698 } 699 err = EHOSTUNREACH; 700 if (code <= NR_ICMP_UNREACH) { 701 harderr = icmp_err_convert[code].fatal; 702 err = icmp_err_convert[code].errno; 703 } 704 break; 705 case ICMP_REDIRECT: 706 ipv4_sk_redirect(skb, sk); 707 goto out; 708 } 709 710 /* 711 * RFC1122: OK. Passes ICMP errors back to application, as per 712 * 4.1.3.3. 713 */ 714 if (tunnel) { 715 /* ...not for tunnels though: we don't have a sending socket */ 716 goto out; 717 } 718 if (!inet->recverr) { 719 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 720 goto out; 721 } else 722 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 723 724 sk->sk_err = err; 725 sk->sk_error_report(sk); 726 out: 727 return 0; 728 } 729 730 int udp_err(struct sk_buff *skb, u32 info) 731 { 732 return __udp4_lib_err(skb, info, &udp_table); 733 } 734 735 /* 736 * Throw away all pending data and cancel the corking. Socket is locked. 737 */ 738 void udp_flush_pending_frames(struct sock *sk) 739 { 740 struct udp_sock *up = udp_sk(sk); 741 742 if (up->pending) { 743 up->len = 0; 744 up->pending = 0; 745 ip_flush_pending_frames(sk); 746 } 747 } 748 EXPORT_SYMBOL(udp_flush_pending_frames); 749 750 /** 751 * udp4_hwcsum - handle outgoing HW checksumming 752 * @skb: sk_buff containing the filled-in UDP header 753 * (checksum field must be zeroed out) 754 * @src: source IP address 755 * @dst: destination IP address 756 */ 757 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 758 { 759 struct udphdr *uh = udp_hdr(skb); 760 int offset = skb_transport_offset(skb); 761 int len = skb->len - offset; 762 int hlen = len; 763 __wsum csum = 0; 764 765 if (!skb_has_frag_list(skb)) { 766 /* 767 * Only one fragment on the socket. 768 */ 769 skb->csum_start = skb_transport_header(skb) - skb->head; 770 skb->csum_offset = offsetof(struct udphdr, check); 771 uh->check = ~csum_tcpudp_magic(src, dst, len, 772 IPPROTO_UDP, 0); 773 } else { 774 struct sk_buff *frags; 775 776 /* 777 * HW-checksum won't work as there are two or more 778 * fragments on the socket so that all csums of sk_buffs 779 * should be together 780 */ 781 skb_walk_frags(skb, frags) { 782 csum = csum_add(csum, frags->csum); 783 hlen -= frags->len; 784 } 785 786 csum = skb_checksum(skb, offset, hlen, csum); 787 skb->ip_summed = CHECKSUM_NONE; 788 789 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 790 if (uh->check == 0) 791 uh->check = CSUM_MANGLED_0; 792 } 793 } 794 EXPORT_SYMBOL_GPL(udp4_hwcsum); 795 796 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended 797 * for the simple case like when setting the checksum for a UDP tunnel. 798 */ 799 void udp_set_csum(bool nocheck, struct sk_buff *skb, 800 __be32 saddr, __be32 daddr, int len) 801 { 802 struct udphdr *uh = udp_hdr(skb); 803 804 if (nocheck) { 805 uh->check = 0; 806 } else if (skb_is_gso(skb)) { 807 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 808 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 809 uh->check = 0; 810 uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); 811 if (uh->check == 0) 812 uh->check = CSUM_MANGLED_0; 813 } else { 814 skb->ip_summed = CHECKSUM_PARTIAL; 815 skb->csum_start = skb_transport_header(skb) - skb->head; 816 skb->csum_offset = offsetof(struct udphdr, check); 817 uh->check = ~udp_v4_check(len, saddr, daddr, 0); 818 } 819 } 820 EXPORT_SYMBOL(udp_set_csum); 821 822 static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, 823 struct inet_cork *cork) 824 { 825 struct sock *sk = skb->sk; 826 struct inet_sock *inet = inet_sk(sk); 827 struct udphdr *uh; 828 int err = 0; 829 int is_udplite = IS_UDPLITE(sk); 830 int offset = skb_transport_offset(skb); 831 int len = skb->len - offset; 832 __wsum csum = 0; 833 834 /* 835 * Create a UDP header 836 */ 837 uh = udp_hdr(skb); 838 uh->source = inet->inet_sport; 839 uh->dest = fl4->fl4_dport; 840 uh->len = htons(len); 841 uh->check = 0; 842 843 if (cork->gso_size) { 844 const int hlen = skb_network_header_len(skb) + 845 sizeof(struct udphdr); 846 847 if (hlen + cork->gso_size > cork->fragsize) { 848 kfree_skb(skb); 849 return -EINVAL; 850 } 851 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { 852 kfree_skb(skb); 853 return -EINVAL; 854 } 855 if (sk->sk_no_check_tx) { 856 kfree_skb(skb); 857 return -EINVAL; 858 } 859 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 860 dst_xfrm(skb_dst(skb))) { 861 kfree_skb(skb); 862 return -EIO; 863 } 864 865 skb_shinfo(skb)->gso_size = cork->gso_size; 866 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 867 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh), 868 cork->gso_size); 869 goto csum_partial; 870 } 871 872 if (is_udplite) /* UDP-Lite */ 873 csum = udplite_csum(skb); 874 875 else if (sk->sk_no_check_tx) { /* UDP csum off */ 876 877 skb->ip_summed = CHECKSUM_NONE; 878 goto send; 879 880 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 881 csum_partial: 882 883 udp4_hwcsum(skb, fl4->saddr, fl4->daddr); 884 goto send; 885 886 } else 887 csum = udp_csum(skb); 888 889 /* add protocol-dependent pseudo-header */ 890 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, 891 sk->sk_protocol, csum); 892 if (uh->check == 0) 893 uh->check = CSUM_MANGLED_0; 894 895 send: 896 err = ip_send_skb(sock_net(sk), skb); 897 if (err) { 898 if (err == -ENOBUFS && !inet->recverr) { 899 UDP_INC_STATS(sock_net(sk), 900 UDP_MIB_SNDBUFERRORS, is_udplite); 901 err = 0; 902 } 903 } else 904 UDP_INC_STATS(sock_net(sk), 905 UDP_MIB_OUTDATAGRAMS, is_udplite); 906 return err; 907 } 908 909 /* 910 * Push out all pending data as one UDP datagram. Socket is locked. 911 */ 912 int udp_push_pending_frames(struct sock *sk) 913 { 914 struct udp_sock *up = udp_sk(sk); 915 struct inet_sock *inet = inet_sk(sk); 916 struct flowi4 *fl4 = &inet->cork.fl.u.ip4; 917 struct sk_buff *skb; 918 int err = 0; 919 920 skb = ip_finish_skb(sk, fl4); 921 if (!skb) 922 goto out; 923 924 err = udp_send_skb(skb, fl4, &inet->cork.base); 925 926 out: 927 up->len = 0; 928 up->pending = 0; 929 return err; 930 } 931 EXPORT_SYMBOL(udp_push_pending_frames); 932 933 static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size) 934 { 935 switch (cmsg->cmsg_type) { 936 case UDP_SEGMENT: 937 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16))) 938 return -EINVAL; 939 *gso_size = *(__u16 *)CMSG_DATA(cmsg); 940 return 0; 941 default: 942 return -EINVAL; 943 } 944 } 945 946 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size) 947 { 948 struct cmsghdr *cmsg; 949 bool need_ip = false; 950 int err; 951 952 for_each_cmsghdr(cmsg, msg) { 953 if (!CMSG_OK(msg, cmsg)) 954 return -EINVAL; 955 956 if (cmsg->cmsg_level != SOL_UDP) { 957 need_ip = true; 958 continue; 959 } 960 961 err = __udp_cmsg_send(cmsg, gso_size); 962 if (err) 963 return err; 964 } 965 966 return need_ip; 967 } 968 EXPORT_SYMBOL_GPL(udp_cmsg_send); 969 970 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 971 { 972 struct inet_sock *inet = inet_sk(sk); 973 struct udp_sock *up = udp_sk(sk); 974 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 975 struct flowi4 fl4_stack; 976 struct flowi4 *fl4; 977 int ulen = len; 978 struct ipcm_cookie ipc; 979 struct rtable *rt = NULL; 980 int free = 0; 981 int connected = 0; 982 __be32 daddr, faddr, saddr; 983 __be16 dport; 984 u8 tos; 985 int err, is_udplite = IS_UDPLITE(sk); 986 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 987 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 988 struct sk_buff *skb; 989 struct ip_options_data opt_copy; 990 991 if (len > 0xFFFF) 992 return -EMSGSIZE; 993 994 /* 995 * Check the flags. 996 */ 997 998 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ 999 return -EOPNOTSUPP; 1000 1001 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1002 1003 fl4 = &inet->cork.fl.u.ip4; 1004 if (up->pending) { 1005 /* 1006 * There are pending frames. 1007 * The socket lock must be held while it's corked. 1008 */ 1009 lock_sock(sk); 1010 if (likely(up->pending)) { 1011 if (unlikely(up->pending != AF_INET)) { 1012 release_sock(sk); 1013 return -EINVAL; 1014 } 1015 goto do_append_data; 1016 } 1017 release_sock(sk); 1018 } 1019 ulen += sizeof(struct udphdr); 1020 1021 /* 1022 * Get and verify the address. 1023 */ 1024 if (usin) { 1025 if (msg->msg_namelen < sizeof(*usin)) 1026 return -EINVAL; 1027 if (usin->sin_family != AF_INET) { 1028 if (usin->sin_family != AF_UNSPEC) 1029 return -EAFNOSUPPORT; 1030 } 1031 1032 daddr = usin->sin_addr.s_addr; 1033 dport = usin->sin_port; 1034 if (dport == 0) 1035 return -EINVAL; 1036 } else { 1037 if (sk->sk_state != TCP_ESTABLISHED) 1038 return -EDESTADDRREQ; 1039 daddr = inet->inet_daddr; 1040 dport = inet->inet_dport; 1041 /* Open fast path for connected socket. 1042 Route will not be used, if at least one option is set. 1043 */ 1044 connected = 1; 1045 } 1046 1047 ipcm_init_sk(&ipc, inet); 1048 ipc.gso_size = up->gso_size; 1049 1050 if (msg->msg_controllen) { 1051 err = udp_cmsg_send(sk, msg, &ipc.gso_size); 1052 if (err > 0) 1053 err = ip_cmsg_send(sk, msg, &ipc, 1054 sk->sk_family == AF_INET6); 1055 if (unlikely(err < 0)) { 1056 kfree(ipc.opt); 1057 return err; 1058 } 1059 if (ipc.opt) 1060 free = 1; 1061 connected = 0; 1062 } 1063 if (!ipc.opt) { 1064 struct ip_options_rcu *inet_opt; 1065 1066 rcu_read_lock(); 1067 inet_opt = rcu_dereference(inet->inet_opt); 1068 if (inet_opt) { 1069 memcpy(&opt_copy, inet_opt, 1070 sizeof(*inet_opt) + inet_opt->opt.optlen); 1071 ipc.opt = &opt_copy.opt; 1072 } 1073 rcu_read_unlock(); 1074 } 1075 1076 if (cgroup_bpf_enabled && !connected) { 1077 err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, 1078 (struct sockaddr *)usin, &ipc.addr); 1079 if (err) 1080 goto out_free; 1081 if (usin) { 1082 if (usin->sin_port == 0) { 1083 /* BPF program set invalid port. Reject it. */ 1084 err = -EINVAL; 1085 goto out_free; 1086 } 1087 daddr = usin->sin_addr.s_addr; 1088 dport = usin->sin_port; 1089 } 1090 } 1091 1092 saddr = ipc.addr; 1093 ipc.addr = faddr = daddr; 1094 1095 if (ipc.opt && ipc.opt->opt.srr) { 1096 if (!daddr) { 1097 err = -EINVAL; 1098 goto out_free; 1099 } 1100 faddr = ipc.opt->opt.faddr; 1101 connected = 0; 1102 } 1103 tos = get_rttos(&ipc, inet); 1104 if (sock_flag(sk, SOCK_LOCALROUTE) || 1105 (msg->msg_flags & MSG_DONTROUTE) || 1106 (ipc.opt && ipc.opt->opt.is_strictroute)) { 1107 tos |= RTO_ONLINK; 1108 connected = 0; 1109 } 1110 1111 if (ipv4_is_multicast(daddr)) { 1112 if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) 1113 ipc.oif = inet->mc_index; 1114 if (!saddr) 1115 saddr = inet->mc_addr; 1116 connected = 0; 1117 } else if (!ipc.oif) { 1118 ipc.oif = inet->uc_index; 1119 } else if (ipv4_is_lbcast(daddr) && inet->uc_index) { 1120 /* oif is set, packet is to local broadcast and 1121 * and uc_index is set. oif is most likely set 1122 * by sk_bound_dev_if. If uc_index != oif check if the 1123 * oif is an L3 master and uc_index is an L3 slave. 1124 * If so, we want to allow the send using the uc_index. 1125 */ 1126 if (ipc.oif != inet->uc_index && 1127 ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk), 1128 inet->uc_index)) { 1129 ipc.oif = inet->uc_index; 1130 } 1131 } 1132 1133 if (connected) 1134 rt = (struct rtable *)sk_dst_check(sk, 0); 1135 1136 if (!rt) { 1137 struct net *net = sock_net(sk); 1138 __u8 flow_flags = inet_sk_flowi_flags(sk); 1139 1140 fl4 = &fl4_stack; 1141 1142 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, 1143 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1144 flow_flags, 1145 faddr, saddr, dport, inet->inet_sport, 1146 sk->sk_uid); 1147 1148 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 1149 rt = ip_route_output_flow(net, fl4, sk); 1150 if (IS_ERR(rt)) { 1151 err = PTR_ERR(rt); 1152 rt = NULL; 1153 if (err == -ENETUNREACH) 1154 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 1155 goto out; 1156 } 1157 1158 err = -EACCES; 1159 if ((rt->rt_flags & RTCF_BROADCAST) && 1160 !sock_flag(sk, SOCK_BROADCAST)) 1161 goto out; 1162 if (connected) 1163 sk_dst_set(sk, dst_clone(&rt->dst)); 1164 } 1165 1166 if (msg->msg_flags&MSG_CONFIRM) 1167 goto do_confirm; 1168 back_from_confirm: 1169 1170 saddr = fl4->saddr; 1171 if (!ipc.addr) 1172 daddr = ipc.addr = fl4->daddr; 1173 1174 /* Lockless fast path for the non-corking case. */ 1175 if (!corkreq) { 1176 struct inet_cork cork; 1177 1178 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, 1179 sizeof(struct udphdr), &ipc, &rt, 1180 &cork, msg->msg_flags); 1181 err = PTR_ERR(skb); 1182 if (!IS_ERR_OR_NULL(skb)) 1183 err = udp_send_skb(skb, fl4, &cork); 1184 goto out; 1185 } 1186 1187 lock_sock(sk); 1188 if (unlikely(up->pending)) { 1189 /* The socket is already corked while preparing it. */ 1190 /* ... which is an evident application bug. --ANK */ 1191 release_sock(sk); 1192 1193 net_dbg_ratelimited("socket already corked\n"); 1194 err = -EINVAL; 1195 goto out; 1196 } 1197 /* 1198 * Now cork the socket to pend data. 1199 */ 1200 fl4 = &inet->cork.fl.u.ip4; 1201 fl4->daddr = daddr; 1202 fl4->saddr = saddr; 1203 fl4->fl4_dport = dport; 1204 fl4->fl4_sport = inet->inet_sport; 1205 up->pending = AF_INET; 1206 1207 do_append_data: 1208 up->len += ulen; 1209 err = ip_append_data(sk, fl4, getfrag, msg, ulen, 1210 sizeof(struct udphdr), &ipc, &rt, 1211 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1212 if (err) 1213 udp_flush_pending_frames(sk); 1214 else if (!corkreq) 1215 err = udp_push_pending_frames(sk); 1216 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1217 up->pending = 0; 1218 release_sock(sk); 1219 1220 out: 1221 ip_rt_put(rt); 1222 out_free: 1223 if (free) 1224 kfree(ipc.opt); 1225 if (!err) 1226 return len; 1227 /* 1228 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1229 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1230 * we don't have a good statistic (IpOutDiscards but it can be too many 1231 * things). We could add another new stat but at least for now that 1232 * seems like overkill. 1233 */ 1234 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1235 UDP_INC_STATS(sock_net(sk), 1236 UDP_MIB_SNDBUFERRORS, is_udplite); 1237 } 1238 return err; 1239 1240 do_confirm: 1241 if (msg->msg_flags & MSG_PROBE) 1242 dst_confirm_neigh(&rt->dst, &fl4->daddr); 1243 if (!(msg->msg_flags&MSG_PROBE) || len) 1244 goto back_from_confirm; 1245 err = 0; 1246 goto out; 1247 } 1248 EXPORT_SYMBOL(udp_sendmsg); 1249 1250 int udp_sendpage(struct sock *sk, struct page *page, int offset, 1251 size_t size, int flags) 1252 { 1253 struct inet_sock *inet = inet_sk(sk); 1254 struct udp_sock *up = udp_sk(sk); 1255 int ret; 1256 1257 if (flags & MSG_SENDPAGE_NOTLAST) 1258 flags |= MSG_MORE; 1259 1260 if (!up->pending) { 1261 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 1262 1263 /* Call udp_sendmsg to specify destination address which 1264 * sendpage interface can't pass. 1265 * This will succeed only when the socket is connected. 1266 */ 1267 ret = udp_sendmsg(sk, &msg, 0); 1268 if (ret < 0) 1269 return ret; 1270 } 1271 1272 lock_sock(sk); 1273 1274 if (unlikely(!up->pending)) { 1275 release_sock(sk); 1276 1277 net_dbg_ratelimited("cork failed\n"); 1278 return -EINVAL; 1279 } 1280 1281 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, 1282 page, offset, size, flags); 1283 if (ret == -EOPNOTSUPP) { 1284 release_sock(sk); 1285 return sock_no_sendpage(sk->sk_socket, page, offset, 1286 size, flags); 1287 } 1288 if (ret < 0) { 1289 udp_flush_pending_frames(sk); 1290 goto out; 1291 } 1292 1293 up->len += size; 1294 if (!(up->corkflag || (flags&MSG_MORE))) 1295 ret = udp_push_pending_frames(sk); 1296 if (!ret) 1297 ret = size; 1298 out: 1299 release_sock(sk); 1300 return ret; 1301 } 1302 1303 #define UDP_SKB_IS_STATELESS 0x80000000 1304 1305 static void udp_set_dev_scratch(struct sk_buff *skb) 1306 { 1307 struct udp_dev_scratch *scratch = udp_skb_scratch(skb); 1308 1309 BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); 1310 scratch->_tsize_state = skb->truesize; 1311 #if BITS_PER_LONG == 64 1312 scratch->len = skb->len; 1313 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); 1314 scratch->is_linear = !skb_is_nonlinear(skb); 1315 #endif 1316 /* all head states execept sp (dst, sk, nf) are always cleared by 1317 * udp_rcv() and we need to preserve secpath, if present, to eventually 1318 * process IP_CMSG_PASSSEC at recvmsg() time 1319 */ 1320 if (likely(!skb_sec_path(skb))) 1321 scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1322 } 1323 1324 static int udp_skb_truesize(struct sk_buff *skb) 1325 { 1326 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; 1327 } 1328 1329 static bool udp_skb_has_head_state(struct sk_buff *skb) 1330 { 1331 return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); 1332 } 1333 1334 /* fully reclaim rmem/fwd memory allocated for skb */ 1335 static void udp_rmem_release(struct sock *sk, int size, int partial, 1336 bool rx_queue_lock_held) 1337 { 1338 struct udp_sock *up = udp_sk(sk); 1339 struct sk_buff_head *sk_queue; 1340 int amt; 1341 1342 if (likely(partial)) { 1343 up->forward_deficit += size; 1344 size = up->forward_deficit; 1345 if (size < (sk->sk_rcvbuf >> 2)) 1346 return; 1347 } else { 1348 size += up->forward_deficit; 1349 } 1350 up->forward_deficit = 0; 1351 1352 /* acquire the sk_receive_queue for fwd allocated memory scheduling, 1353 * if the called don't held it already 1354 */ 1355 sk_queue = &sk->sk_receive_queue; 1356 if (!rx_queue_lock_held) 1357 spin_lock(&sk_queue->lock); 1358 1359 1360 sk->sk_forward_alloc += size; 1361 amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); 1362 sk->sk_forward_alloc -= amt; 1363 1364 if (amt) 1365 __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); 1366 1367 atomic_sub(size, &sk->sk_rmem_alloc); 1368 1369 /* this can save us from acquiring the rx queue lock on next receive */ 1370 skb_queue_splice_tail_init(sk_queue, &up->reader_queue); 1371 1372 if (!rx_queue_lock_held) 1373 spin_unlock(&sk_queue->lock); 1374 } 1375 1376 /* Note: called with reader_queue.lock held. 1377 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch 1378 * This avoids a cache line miss while receive_queue lock is held. 1379 * Look at __udp_enqueue_schedule_skb() to find where this copy is done. 1380 */ 1381 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) 1382 { 1383 prefetch(&skb->data); 1384 udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); 1385 } 1386 EXPORT_SYMBOL(udp_skb_destructor); 1387 1388 /* as above, but the caller held the rx queue lock, too */ 1389 static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) 1390 { 1391 prefetch(&skb->data); 1392 udp_rmem_release(sk, udp_skb_truesize(skb), 1, true); 1393 } 1394 1395 /* Idea of busylocks is to let producers grab an extra spinlock 1396 * to relieve pressure on the receive_queue spinlock shared by consumer. 1397 * Under flood, this means that only one producer can be in line 1398 * trying to acquire the receive_queue spinlock. 1399 * These busylock can be allocated on a per cpu manner, instead of a 1400 * per socket one (that would consume a cache line per socket) 1401 */ 1402 static int udp_busylocks_log __read_mostly; 1403 static spinlock_t *udp_busylocks __read_mostly; 1404 1405 static spinlock_t *busylock_acquire(void *ptr) 1406 { 1407 spinlock_t *busy; 1408 1409 busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); 1410 spin_lock(busy); 1411 return busy; 1412 } 1413 1414 static void busylock_release(spinlock_t *busy) 1415 { 1416 if (busy) 1417 spin_unlock(busy); 1418 } 1419 1420 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) 1421 { 1422 struct sk_buff_head *list = &sk->sk_receive_queue; 1423 int rmem, delta, amt, err = -ENOMEM; 1424 spinlock_t *busy = NULL; 1425 int size; 1426 1427 /* try to avoid the costly atomic add/sub pair when the receive 1428 * queue is full; always allow at least a packet 1429 */ 1430 rmem = atomic_read(&sk->sk_rmem_alloc); 1431 if (rmem > sk->sk_rcvbuf) 1432 goto drop; 1433 1434 /* Under mem pressure, it might be helpful to help udp_recvmsg() 1435 * having linear skbs : 1436 * - Reduce memory overhead and thus increase receive queue capacity 1437 * - Less cache line misses at copyout() time 1438 * - Less work at consume_skb() (less alien page frag freeing) 1439 */ 1440 if (rmem > (sk->sk_rcvbuf >> 1)) { 1441 skb_condense(skb); 1442 1443 busy = busylock_acquire(sk); 1444 } 1445 size = skb->truesize; 1446 udp_set_dev_scratch(skb); 1447 1448 /* we drop only if the receive buf is full and the receive 1449 * queue contains some other skb 1450 */ 1451 rmem = atomic_add_return(size, &sk->sk_rmem_alloc); 1452 if (rmem > (size + sk->sk_rcvbuf)) 1453 goto uncharge_drop; 1454 1455 spin_lock(&list->lock); 1456 if (size >= sk->sk_forward_alloc) { 1457 amt = sk_mem_pages(size); 1458 delta = amt << SK_MEM_QUANTUM_SHIFT; 1459 if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { 1460 err = -ENOBUFS; 1461 spin_unlock(&list->lock); 1462 goto uncharge_drop; 1463 } 1464 1465 sk->sk_forward_alloc += delta; 1466 } 1467 1468 sk->sk_forward_alloc -= size; 1469 1470 /* no need to setup a destructor, we will explicitly release the 1471 * forward allocated memory on dequeue 1472 */ 1473 sock_skb_set_dropcount(sk, skb); 1474 1475 __skb_queue_tail(list, skb); 1476 spin_unlock(&list->lock); 1477 1478 if (!sock_flag(sk, SOCK_DEAD)) 1479 sk->sk_data_ready(sk); 1480 1481 busylock_release(busy); 1482 return 0; 1483 1484 uncharge_drop: 1485 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1486 1487 drop: 1488 atomic_inc(&sk->sk_drops); 1489 busylock_release(busy); 1490 return err; 1491 } 1492 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); 1493 1494 void udp_destruct_sock(struct sock *sk) 1495 { 1496 /* reclaim completely the forward allocated memory */ 1497 struct udp_sock *up = udp_sk(sk); 1498 unsigned int total = 0; 1499 struct sk_buff *skb; 1500 1501 skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue); 1502 while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) { 1503 total += skb->truesize; 1504 kfree_skb(skb); 1505 } 1506 udp_rmem_release(sk, total, 0, true); 1507 1508 inet_sock_destruct(sk); 1509 } 1510 EXPORT_SYMBOL_GPL(udp_destruct_sock); 1511 1512 int udp_init_sock(struct sock *sk) 1513 { 1514 skb_queue_head_init(&udp_sk(sk)->reader_queue); 1515 sk->sk_destruct = udp_destruct_sock; 1516 return 0; 1517 } 1518 EXPORT_SYMBOL_GPL(udp_init_sock); 1519 1520 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) 1521 { 1522 if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { 1523 bool slow = lock_sock_fast(sk); 1524 1525 sk_peek_offset_bwd(sk, len); 1526 unlock_sock_fast(sk, slow); 1527 } 1528 1529 if (!skb_unref(skb)) 1530 return; 1531 1532 /* In the more common cases we cleared the head states previously, 1533 * see __udp_queue_rcv_skb(). 1534 */ 1535 if (unlikely(udp_skb_has_head_state(skb))) 1536 skb_release_head_state(skb); 1537 __consume_stateless_skb(skb); 1538 } 1539 EXPORT_SYMBOL_GPL(skb_consume_udp); 1540 1541 static struct sk_buff *__first_packet_length(struct sock *sk, 1542 struct sk_buff_head *rcvq, 1543 int *total) 1544 { 1545 struct sk_buff *skb; 1546 1547 while ((skb = skb_peek(rcvq)) != NULL) { 1548 if (udp_lib_checksum_complete(skb)) { 1549 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, 1550 IS_UDPLITE(sk)); 1551 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 1552 IS_UDPLITE(sk)); 1553 atomic_inc(&sk->sk_drops); 1554 __skb_unlink(skb, rcvq); 1555 *total += skb->truesize; 1556 kfree_skb(skb); 1557 } else { 1558 /* the csum related bits could be changed, refresh 1559 * the scratch area 1560 */ 1561 udp_set_dev_scratch(skb); 1562 break; 1563 } 1564 } 1565 return skb; 1566 } 1567 1568 /** 1569 * first_packet_length - return length of first packet in receive queue 1570 * @sk: socket 1571 * 1572 * Drops all bad checksum frames, until a valid one is found. 1573 * Returns the length of found skb, or -1 if none is found. 1574 */ 1575 static int first_packet_length(struct sock *sk) 1576 { 1577 struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; 1578 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 1579 struct sk_buff *skb; 1580 int total = 0; 1581 int res; 1582 1583 spin_lock_bh(&rcvq->lock); 1584 skb = __first_packet_length(sk, rcvq, &total); 1585 if (!skb && !skb_queue_empty(sk_queue)) { 1586 spin_lock(&sk_queue->lock); 1587 skb_queue_splice_tail_init(sk_queue, rcvq); 1588 spin_unlock(&sk_queue->lock); 1589 1590 skb = __first_packet_length(sk, rcvq, &total); 1591 } 1592 res = skb ? skb->len : -1; 1593 if (total) 1594 udp_rmem_release(sk, total, 1, false); 1595 spin_unlock_bh(&rcvq->lock); 1596 return res; 1597 } 1598 1599 /* 1600 * IOCTL requests applicable to the UDP protocol 1601 */ 1602 1603 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 1604 { 1605 switch (cmd) { 1606 case SIOCOUTQ: 1607 { 1608 int amount = sk_wmem_alloc_get(sk); 1609 1610 return put_user(amount, (int __user *)arg); 1611 } 1612 1613 case SIOCINQ: 1614 { 1615 int amount = max_t(int, 0, first_packet_length(sk)); 1616 1617 return put_user(amount, (int __user *)arg); 1618 } 1619 1620 default: 1621 return -ENOIOCTLCMD; 1622 } 1623 1624 return 0; 1625 } 1626 EXPORT_SYMBOL(udp_ioctl); 1627 1628 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, 1629 int noblock, int *off, int *err) 1630 { 1631 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; 1632 struct sk_buff_head *queue; 1633 struct sk_buff *last; 1634 long timeo; 1635 int error; 1636 1637 queue = &udp_sk(sk)->reader_queue; 1638 flags |= noblock ? MSG_DONTWAIT : 0; 1639 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1640 do { 1641 struct sk_buff *skb; 1642 1643 error = sock_error(sk); 1644 if (error) 1645 break; 1646 1647 error = -EAGAIN; 1648 do { 1649 spin_lock_bh(&queue->lock); 1650 skb = __skb_try_recv_from_queue(sk, queue, flags, 1651 udp_skb_destructor, 1652 off, err, &last); 1653 if (skb) { 1654 spin_unlock_bh(&queue->lock); 1655 return skb; 1656 } 1657 1658 if (skb_queue_empty(sk_queue)) { 1659 spin_unlock_bh(&queue->lock); 1660 goto busy_check; 1661 } 1662 1663 /* refill the reader queue and walk it again 1664 * keep both queues locked to avoid re-acquiring 1665 * the sk_receive_queue lock if fwd memory scheduling 1666 * is needed. 1667 */ 1668 spin_lock(&sk_queue->lock); 1669 skb_queue_splice_tail_init(sk_queue, queue); 1670 1671 skb = __skb_try_recv_from_queue(sk, queue, flags, 1672 udp_skb_dtor_locked, 1673 off, err, &last); 1674 spin_unlock(&sk_queue->lock); 1675 spin_unlock_bh(&queue->lock); 1676 if (skb) 1677 return skb; 1678 1679 busy_check: 1680 if (!sk_can_busy_loop(sk)) 1681 break; 1682 1683 sk_busy_loop(sk, flags & MSG_DONTWAIT); 1684 } while (!skb_queue_empty(sk_queue)); 1685 1686 /* sk_queue is empty, reader_queue may contain peeked packets */ 1687 } while (timeo && 1688 !__skb_wait_for_more_packets(sk, &error, &timeo, 1689 (struct sk_buff *)sk_queue)); 1690 1691 *err = error; 1692 return NULL; 1693 } 1694 EXPORT_SYMBOL(__skb_recv_udp); 1695 1696 /* 1697 * This should be easy, if there is something there we 1698 * return it, otherwise we block. 1699 */ 1700 1701 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 1702 int flags, int *addr_len) 1703 { 1704 struct inet_sock *inet = inet_sk(sk); 1705 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 1706 struct sk_buff *skb; 1707 unsigned int ulen, copied; 1708 int off, err, peeking = flags & MSG_PEEK; 1709 int is_udplite = IS_UDPLITE(sk); 1710 bool checksum_valid = false; 1711 1712 if (flags & MSG_ERRQUEUE) 1713 return ip_recv_error(sk, msg, len, addr_len); 1714 1715 try_again: 1716 off = sk_peek_offset(sk, flags); 1717 skb = __skb_recv_udp(sk, flags, noblock, &off, &err); 1718 if (!skb) 1719 return err; 1720 1721 ulen = udp_skb_len(skb); 1722 copied = len; 1723 if (copied > ulen - off) 1724 copied = ulen - off; 1725 else if (copied < ulen) 1726 msg->msg_flags |= MSG_TRUNC; 1727 1728 /* 1729 * If checksum is needed at all, try to do it while copying the 1730 * data. If the data is truncated, or if we only want a partial 1731 * coverage checksum (UDP-Lite), do it before the copy. 1732 */ 1733 1734 if (copied < ulen || peeking || 1735 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 1736 checksum_valid = udp_skb_csum_unnecessary(skb) || 1737 !__udp_lib_checksum_complete(skb); 1738 if (!checksum_valid) 1739 goto csum_copy_err; 1740 } 1741 1742 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 1743 if (udp_skb_is_linear(skb)) 1744 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 1745 else 1746 err = skb_copy_datagram_msg(skb, off, msg, copied); 1747 } else { 1748 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 1749 1750 if (err == -EINVAL) 1751 goto csum_copy_err; 1752 } 1753 1754 if (unlikely(err)) { 1755 if (!peeking) { 1756 atomic_inc(&sk->sk_drops); 1757 UDP_INC_STATS(sock_net(sk), 1758 UDP_MIB_INERRORS, is_udplite); 1759 } 1760 kfree_skb(skb); 1761 return err; 1762 } 1763 1764 if (!peeking) 1765 UDP_INC_STATS(sock_net(sk), 1766 UDP_MIB_INDATAGRAMS, is_udplite); 1767 1768 sock_recv_ts_and_drops(msg, sk, skb); 1769 1770 /* Copy the address. */ 1771 if (sin) { 1772 sin->sin_family = AF_INET; 1773 sin->sin_port = udp_hdr(skb)->source; 1774 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 1775 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 1776 *addr_len = sizeof(*sin); 1777 } 1778 1779 if (udp_sk(sk)->gro_enabled) 1780 udp_cmsg_recv(msg, sk, skb); 1781 1782 if (inet->cmsg_flags) 1783 ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); 1784 1785 err = copied; 1786 if (flags & MSG_TRUNC) 1787 err = ulen; 1788 1789 skb_consume_udp(sk, skb, peeking ? -err : err); 1790 return err; 1791 1792 csum_copy_err: 1793 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 1794 udp_skb_destructor)) { 1795 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1796 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1797 } 1798 kfree_skb(skb); 1799 1800 /* starting over for a new packet, but check if we need to yield */ 1801 cond_resched(); 1802 msg->msg_flags &= ~MSG_TRUNC; 1803 goto try_again; 1804 } 1805 1806 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 1807 { 1808 /* This check is replicated from __ip4_datagram_connect() and 1809 * intended to prevent BPF program called below from accessing bytes 1810 * that are out of the bound specified by user in addr_len. 1811 */ 1812 if (addr_len < sizeof(struct sockaddr_in)) 1813 return -EINVAL; 1814 1815 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr); 1816 } 1817 EXPORT_SYMBOL(udp_pre_connect); 1818 1819 int __udp_disconnect(struct sock *sk, int flags) 1820 { 1821 struct inet_sock *inet = inet_sk(sk); 1822 /* 1823 * 1003.1g - break association. 1824 */ 1825 1826 sk->sk_state = TCP_CLOSE; 1827 inet->inet_daddr = 0; 1828 inet->inet_dport = 0; 1829 sock_rps_reset_rxhash(sk); 1830 sk->sk_bound_dev_if = 0; 1831 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1832 inet_reset_saddr(sk); 1833 1834 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 1835 sk->sk_prot->unhash(sk); 1836 inet->inet_sport = 0; 1837 } 1838 sk_dst_reset(sk); 1839 return 0; 1840 } 1841 EXPORT_SYMBOL(__udp_disconnect); 1842 1843 int udp_disconnect(struct sock *sk, int flags) 1844 { 1845 lock_sock(sk); 1846 __udp_disconnect(sk, flags); 1847 release_sock(sk); 1848 return 0; 1849 } 1850 EXPORT_SYMBOL(udp_disconnect); 1851 1852 void udp_lib_unhash(struct sock *sk) 1853 { 1854 if (sk_hashed(sk)) { 1855 struct udp_table *udptable = sk->sk_prot->h.udp_table; 1856 struct udp_hslot *hslot, *hslot2; 1857 1858 hslot = udp_hashslot(udptable, sock_net(sk), 1859 udp_sk(sk)->udp_port_hash); 1860 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1861 1862 spin_lock_bh(&hslot->lock); 1863 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1864 reuseport_detach_sock(sk); 1865 if (sk_del_node_init_rcu(sk)) { 1866 hslot->count--; 1867 inet_sk(sk)->inet_num = 0; 1868 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 1869 1870 spin_lock(&hslot2->lock); 1871 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1872 hslot2->count--; 1873 spin_unlock(&hslot2->lock); 1874 } 1875 spin_unlock_bh(&hslot->lock); 1876 } 1877 } 1878 EXPORT_SYMBOL(udp_lib_unhash); 1879 1880 /* 1881 * inet_rcv_saddr was changed, we must rehash secondary hash 1882 */ 1883 void udp_lib_rehash(struct sock *sk, u16 newhash) 1884 { 1885 if (sk_hashed(sk)) { 1886 struct udp_table *udptable = sk->sk_prot->h.udp_table; 1887 struct udp_hslot *hslot, *hslot2, *nhslot2; 1888 1889 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 1890 nhslot2 = udp_hashslot2(udptable, newhash); 1891 udp_sk(sk)->udp_portaddr_hash = newhash; 1892 1893 if (hslot2 != nhslot2 || 1894 rcu_access_pointer(sk->sk_reuseport_cb)) { 1895 hslot = udp_hashslot(udptable, sock_net(sk), 1896 udp_sk(sk)->udp_port_hash); 1897 /* we must lock primary chain too */ 1898 spin_lock_bh(&hslot->lock); 1899 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1900 reuseport_detach_sock(sk); 1901 1902 if (hslot2 != nhslot2) { 1903 spin_lock(&hslot2->lock); 1904 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); 1905 hslot2->count--; 1906 spin_unlock(&hslot2->lock); 1907 1908 spin_lock(&nhslot2->lock); 1909 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 1910 &nhslot2->head); 1911 nhslot2->count++; 1912 spin_unlock(&nhslot2->lock); 1913 } 1914 1915 spin_unlock_bh(&hslot->lock); 1916 } 1917 } 1918 } 1919 EXPORT_SYMBOL(udp_lib_rehash); 1920 1921 void udp_v4_rehash(struct sock *sk) 1922 { 1923 u16 new_hash = ipv4_portaddr_hash(sock_net(sk), 1924 inet_sk(sk)->inet_rcv_saddr, 1925 inet_sk(sk)->inet_num); 1926 udp_lib_rehash(sk, new_hash); 1927 } 1928 1929 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1930 { 1931 int rc; 1932 1933 if (inet_sk(sk)->inet_daddr) { 1934 sock_rps_save_rxhash(sk, skb); 1935 sk_mark_napi_id(sk, skb); 1936 sk_incoming_cpu_update(sk); 1937 } else { 1938 sk_mark_napi_id_once(sk, skb); 1939 } 1940 1941 rc = __udp_enqueue_schedule_skb(sk, skb); 1942 if (rc < 0) { 1943 int is_udplite = IS_UDPLITE(sk); 1944 1945 /* Note that an ENOMEM error is charged twice */ 1946 if (rc == -ENOMEM) 1947 UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, 1948 is_udplite); 1949 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1950 kfree_skb(skb); 1951 trace_udp_fail_queue_rcv_skb(rc, sk); 1952 return -1; 1953 } 1954 1955 return 0; 1956 } 1957 1958 /* returns: 1959 * -1: error 1960 * 0: success 1961 * >0: "udp encap" protocol resubmission 1962 * 1963 * Note that in the success and error cases, the skb is assumed to 1964 * have either been requeued or freed. 1965 */ 1966 static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 1967 { 1968 struct udp_sock *up = udp_sk(sk); 1969 int is_udplite = IS_UDPLITE(sk); 1970 1971 /* 1972 * Charge it to the socket, dropping if the queue is full. 1973 */ 1974 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1975 goto drop; 1976 nf_reset(skb); 1977 1978 if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) { 1979 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 1980 1981 /* 1982 * This is an encapsulation socket so pass the skb to 1983 * the socket's udp_encap_rcv() hook. Otherwise, just 1984 * fall through and pass this up the UDP socket. 1985 * up->encap_rcv() returns the following value: 1986 * =0 if skb was successfully passed to the encap 1987 * handler or was discarded by it. 1988 * >0 if skb should be passed on to UDP. 1989 * <0 if skb should be resubmitted as proto -N 1990 */ 1991 1992 /* if we're overly short, let UDP handle it */ 1993 encap_rcv = READ_ONCE(up->encap_rcv); 1994 if (encap_rcv) { 1995 int ret; 1996 1997 /* Verify checksum before giving to encap */ 1998 if (udp_lib_checksum_complete(skb)) 1999 goto csum_error; 2000 2001 ret = encap_rcv(sk, skb); 2002 if (ret <= 0) { 2003 __UDP_INC_STATS(sock_net(sk), 2004 UDP_MIB_INDATAGRAMS, 2005 is_udplite); 2006 return -ret; 2007 } 2008 } 2009 2010 /* FALLTHROUGH -- it's a UDP Packet */ 2011 } 2012 2013 /* 2014 * UDP-Lite specific tests, ignored on UDP sockets 2015 */ 2016 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 2017 2018 /* 2019 * MIB statistics other than incrementing the error count are 2020 * disabled for the following two types of errors: these depend 2021 * on the application settings, not on the functioning of the 2022 * protocol stack as such. 2023 * 2024 * RFC 3828 here recommends (sec 3.3): "There should also be a 2025 * way ... to ... at least let the receiving application block 2026 * delivery of packets with coverage values less than a value 2027 * provided by the application." 2028 */ 2029 if (up->pcrlen == 0) { /* full coverage was set */ 2030 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", 2031 UDP_SKB_CB(skb)->cscov, skb->len); 2032 goto drop; 2033 } 2034 /* The next case involves violating the min. coverage requested 2035 * by the receiver. This is subtle: if receiver wants x and x is 2036 * greater than the buffersize/MTU then receiver will complain 2037 * that it wants x while sender emits packets of smaller size y. 2038 * Therefore the above ...()->partial_cov statement is essential. 2039 */ 2040 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 2041 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", 2042 UDP_SKB_CB(skb)->cscov, up->pcrlen); 2043 goto drop; 2044 } 2045 } 2046 2047 prefetch(&sk->sk_rmem_alloc); 2048 if (rcu_access_pointer(sk->sk_filter) && 2049 udp_lib_checksum_complete(skb)) 2050 goto csum_error; 2051 2052 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 2053 goto drop; 2054 2055 udp_csum_pull_header(skb); 2056 2057 ipv4_pktinfo_prepare(sk, skb); 2058 return __udp_queue_rcv_skb(sk, skb); 2059 2060 csum_error: 2061 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 2062 drop: 2063 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2064 atomic_inc(&sk->sk_drops); 2065 kfree_skb(skb); 2066 return -1; 2067 } 2068 2069 static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 2070 { 2071 struct sk_buff *next, *segs; 2072 int ret; 2073 2074 if (likely(!udp_unexpected_gso(sk, skb))) 2075 return udp_queue_rcv_one_skb(sk, skb); 2076 2077 BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET); 2078 __skb_push(skb, -skb_mac_offset(skb)); 2079 segs = udp_rcv_segment(sk, skb, true); 2080 for (skb = segs; skb; skb = next) { 2081 next = skb->next; 2082 __skb_pull(skb, skb_transport_offset(skb)); 2083 ret = udp_queue_rcv_one_skb(sk, skb); 2084 if (ret > 0) 2085 ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret); 2086 } 2087 return 0; 2088 } 2089 2090 /* For TCP sockets, sk_rx_dst is protected by socket lock 2091 * For UDP, we use xchg() to guard against concurrent changes. 2092 */ 2093 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 2094 { 2095 struct dst_entry *old; 2096 2097 if (dst_hold_safe(dst)) { 2098 old = xchg(&sk->sk_rx_dst, dst); 2099 dst_release(old); 2100 return old != dst; 2101 } 2102 return false; 2103 } 2104 EXPORT_SYMBOL(udp_sk_rx_dst_set); 2105 2106 /* 2107 * Multicasts and broadcasts go to each listener. 2108 * 2109 * Note: called only from the BH handler context. 2110 */ 2111 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 2112 struct udphdr *uh, 2113 __be32 saddr, __be32 daddr, 2114 struct udp_table *udptable, 2115 int proto) 2116 { 2117 struct sock *sk, *first = NULL; 2118 unsigned short hnum = ntohs(uh->dest); 2119 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 2120 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 2121 unsigned int offset = offsetof(typeof(*sk), sk_node); 2122 int dif = skb->dev->ifindex; 2123 int sdif = inet_sdif(skb); 2124 struct hlist_node *node; 2125 struct sk_buff *nskb; 2126 2127 if (use_hash2) { 2128 hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 2129 udptable->mask; 2130 hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask; 2131 start_lookup: 2132 hslot = &udptable->hash2[hash2]; 2133 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 2134 } 2135 2136 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 2137 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, 2138 uh->source, saddr, dif, sdif, hnum)) 2139 continue; 2140 2141 if (!first) { 2142 first = sk; 2143 continue; 2144 } 2145 nskb = skb_clone(skb, GFP_ATOMIC); 2146 2147 if (unlikely(!nskb)) { 2148 atomic_inc(&sk->sk_drops); 2149 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 2150 IS_UDPLITE(sk)); 2151 __UDP_INC_STATS(net, UDP_MIB_INERRORS, 2152 IS_UDPLITE(sk)); 2153 continue; 2154 } 2155 if (udp_queue_rcv_skb(sk, nskb) > 0) 2156 consume_skb(nskb); 2157 } 2158 2159 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 2160 if (use_hash2 && hash2 != hash2_any) { 2161 hash2 = hash2_any; 2162 goto start_lookup; 2163 } 2164 2165 if (first) { 2166 if (udp_queue_rcv_skb(first, skb) > 0) 2167 consume_skb(skb); 2168 } else { 2169 kfree_skb(skb); 2170 __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 2171 proto == IPPROTO_UDPLITE); 2172 } 2173 return 0; 2174 } 2175 2176 /* Initialize UDP checksum. If exited with zero value (success), 2177 * CHECKSUM_UNNECESSARY means, that no more checks are required. 2178 * Otherwise, csum completion requires chacksumming packet body, 2179 * including udp header and folding it to skb->csum. 2180 */ 2181 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 2182 int proto) 2183 { 2184 int err; 2185 2186 UDP_SKB_CB(skb)->partial_cov = 0; 2187 UDP_SKB_CB(skb)->cscov = skb->len; 2188 2189 if (proto == IPPROTO_UDPLITE) { 2190 err = udplite_checksum_init(skb, uh); 2191 if (err) 2192 return err; 2193 2194 if (UDP_SKB_CB(skb)->partial_cov) { 2195 skb->csum = inet_compute_pseudo(skb, proto); 2196 return 0; 2197 } 2198 } 2199 2200 /* Note, we are only interested in != 0 or == 0, thus the 2201 * force to int. 2202 */ 2203 err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, 2204 inet_compute_pseudo); 2205 if (err) 2206 return err; 2207 2208 if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) { 2209 /* If SW calculated the value, we know it's bad */ 2210 if (skb->csum_complete_sw) 2211 return 1; 2212 2213 /* HW says the value is bad. Let's validate that. 2214 * skb->csum is no longer the full packet checksum, 2215 * so don't treat it as such. 2216 */ 2217 skb_checksum_complete_unset(skb); 2218 } 2219 2220 return 0; 2221 } 2222 2223 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 2224 * return code conversion for ip layer consumption 2225 */ 2226 static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 2227 struct udphdr *uh) 2228 { 2229 int ret; 2230 2231 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 2232 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, 2233 inet_compute_pseudo); 2234 2235 ret = udp_queue_rcv_skb(sk, skb); 2236 2237 /* a return value > 0 means to resubmit the input, but 2238 * it wants the return to be -protocol, or 0 2239 */ 2240 if (ret > 0) 2241 return -ret; 2242 return 0; 2243 } 2244 2245 /* 2246 * All we need to do is get the socket, and then do a checksum. 2247 */ 2248 2249 int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 2250 int proto) 2251 { 2252 struct sock *sk; 2253 struct udphdr *uh; 2254 unsigned short ulen; 2255 struct rtable *rt = skb_rtable(skb); 2256 __be32 saddr, daddr; 2257 struct net *net = dev_net(skb->dev); 2258 2259 /* 2260 * Validate the packet. 2261 */ 2262 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 2263 goto drop; /* No space for header. */ 2264 2265 uh = udp_hdr(skb); 2266 ulen = ntohs(uh->len); 2267 saddr = ip_hdr(skb)->saddr; 2268 daddr = ip_hdr(skb)->daddr; 2269 2270 if (ulen > skb->len) 2271 goto short_packet; 2272 2273 if (proto == IPPROTO_UDP) { 2274 /* UDP validates ulen. */ 2275 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 2276 goto short_packet; 2277 uh = udp_hdr(skb); 2278 } 2279 2280 if (udp4_csum_init(skb, uh, proto)) 2281 goto csum_error; 2282 2283 sk = skb_steal_sock(skb); 2284 if (sk) { 2285 struct dst_entry *dst = skb_dst(skb); 2286 int ret; 2287 2288 if (unlikely(sk->sk_rx_dst != dst)) 2289 udp_sk_rx_dst_set(sk, dst); 2290 2291 ret = udp_unicast_rcv_skb(sk, skb, uh); 2292 sock_put(sk); 2293 return ret; 2294 } 2295 2296 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 2297 return __udp4_lib_mcast_deliver(net, skb, uh, 2298 saddr, daddr, udptable, proto); 2299 2300 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 2301 if (sk) 2302 return udp_unicast_rcv_skb(sk, skb, uh); 2303 2304 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 2305 goto drop; 2306 nf_reset(skb); 2307 2308 /* No socket. Drop packet silently, if checksum is wrong */ 2309 if (udp_lib_checksum_complete(skb)) 2310 goto csum_error; 2311 2312 __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 2313 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 2314 2315 /* 2316 * Hmm. We got an UDP packet to a port to which we 2317 * don't wanna listen. Ignore it. 2318 */ 2319 kfree_skb(skb); 2320 return 0; 2321 2322 short_packet: 2323 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 2324 proto == IPPROTO_UDPLITE ? "Lite" : "", 2325 &saddr, ntohs(uh->source), 2326 ulen, skb->len, 2327 &daddr, ntohs(uh->dest)); 2328 goto drop; 2329 2330 csum_error: 2331 /* 2332 * RFC1122: OK. Discards the bad packet silently (as far as 2333 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 2334 */ 2335 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 2336 proto == IPPROTO_UDPLITE ? "Lite" : "", 2337 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), 2338 ulen); 2339 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 2340 drop: 2341 __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 2342 kfree_skb(skb); 2343 return 0; 2344 } 2345 2346 /* We can only early demux multicast if there is a single matching socket. 2347 * If more than one socket found returns NULL 2348 */ 2349 static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, 2350 __be16 loc_port, __be32 loc_addr, 2351 __be16 rmt_port, __be32 rmt_addr, 2352 int dif, int sdif) 2353 { 2354 struct sock *sk, *result; 2355 unsigned short hnum = ntohs(loc_port); 2356 unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); 2357 struct udp_hslot *hslot = &udp_table.hash[slot]; 2358 2359 /* Do not bother scanning a too big list */ 2360 if (hslot->count > 10) 2361 return NULL; 2362 2363 result = NULL; 2364 sk_for_each_rcu(sk, &hslot->head) { 2365 if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, 2366 rmt_port, rmt_addr, dif, sdif, hnum)) { 2367 if (result) 2368 return NULL; 2369 result = sk; 2370 } 2371 } 2372 2373 return result; 2374 } 2375 2376 /* For unicast we should only early demux connected sockets or we can 2377 * break forwarding setups. The chains here can be long so only check 2378 * if the first socket is an exact match and if not move on. 2379 */ 2380 static struct sock *__udp4_lib_demux_lookup(struct net *net, 2381 __be16 loc_port, __be32 loc_addr, 2382 __be16 rmt_port, __be32 rmt_addr, 2383 int dif, int sdif) 2384 { 2385 unsigned short hnum = ntohs(loc_port); 2386 unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum); 2387 unsigned int slot2 = hash2 & udp_table.mask; 2388 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 2389 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); 2390 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 2391 struct sock *sk; 2392 2393 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 2394 if (INET_MATCH(sk, net, acookie, rmt_addr, 2395 loc_addr, ports, dif, sdif)) 2396 return sk; 2397 /* Only check first socket in chain */ 2398 break; 2399 } 2400 return NULL; 2401 } 2402 2403 int udp_v4_early_demux(struct sk_buff *skb) 2404 { 2405 struct net *net = dev_net(skb->dev); 2406 struct in_device *in_dev = NULL; 2407 const struct iphdr *iph; 2408 const struct udphdr *uh; 2409 struct sock *sk = NULL; 2410 struct dst_entry *dst; 2411 int dif = skb->dev->ifindex; 2412 int sdif = inet_sdif(skb); 2413 int ours; 2414 2415 /* validate the packet */ 2416 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 2417 return 0; 2418 2419 iph = ip_hdr(skb); 2420 uh = udp_hdr(skb); 2421 2422 if (skb->pkt_type == PACKET_MULTICAST) { 2423 in_dev = __in_dev_get_rcu(skb->dev); 2424 2425 if (!in_dev) 2426 return 0; 2427 2428 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, 2429 iph->protocol); 2430 if (!ours) 2431 return 0; 2432 2433 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 2434 uh->source, iph->saddr, 2435 dif, sdif); 2436 } else if (skb->pkt_type == PACKET_HOST) { 2437 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, 2438 uh->source, iph->saddr, dif, sdif); 2439 } 2440 2441 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 2442 return 0; 2443 2444 skb->sk = sk; 2445 skb->destructor = sock_efree; 2446 dst = READ_ONCE(sk->sk_rx_dst); 2447 2448 if (dst) 2449 dst = dst_check(dst, 0); 2450 if (dst) { 2451 u32 itag = 0; 2452 2453 /* set noref for now. 2454 * any place which wants to hold dst has to call 2455 * dst_hold_safe() 2456 */ 2457 skb_dst_set_noref(skb, dst); 2458 2459 /* for unconnected multicast sockets we need to validate 2460 * the source on each packet 2461 */ 2462 if (!inet_sk(sk)->inet_daddr && in_dev) 2463 return ip_mc_validate_source(skb, iph->daddr, 2464 iph->saddr, iph->tos, 2465 skb->dev, in_dev, &itag); 2466 } 2467 return 0; 2468 } 2469 2470 int udp_rcv(struct sk_buff *skb) 2471 { 2472 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); 2473 } 2474 2475 void udp_destroy_sock(struct sock *sk) 2476 { 2477 struct udp_sock *up = udp_sk(sk); 2478 bool slow = lock_sock_fast(sk); 2479 udp_flush_pending_frames(sk); 2480 unlock_sock_fast(sk, slow); 2481 if (static_branch_unlikely(&udp_encap_needed_key)) { 2482 if (up->encap_type) { 2483 void (*encap_destroy)(struct sock *sk); 2484 encap_destroy = READ_ONCE(up->encap_destroy); 2485 if (encap_destroy) 2486 encap_destroy(sk); 2487 } 2488 if (up->encap_enabled) 2489 static_branch_dec(&udp_encap_needed_key); 2490 } 2491 } 2492 2493 /* 2494 * Socket option code for UDP 2495 */ 2496 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 2497 char __user *optval, unsigned int optlen, 2498 int (*push_pending_frames)(struct sock *)) 2499 { 2500 struct udp_sock *up = udp_sk(sk); 2501 int val, valbool; 2502 int err = 0; 2503 int is_udplite = IS_UDPLITE(sk); 2504 2505 if (optlen < sizeof(int)) 2506 return -EINVAL; 2507 2508 if (get_user(val, (int __user *)optval)) 2509 return -EFAULT; 2510 2511 valbool = val ? 1 : 0; 2512 2513 switch (optname) { 2514 case UDP_CORK: 2515 if (val != 0) { 2516 up->corkflag = 1; 2517 } else { 2518 up->corkflag = 0; 2519 lock_sock(sk); 2520 push_pending_frames(sk); 2521 release_sock(sk); 2522 } 2523 break; 2524 2525 case UDP_ENCAP: 2526 switch (val) { 2527 case 0: 2528 case UDP_ENCAP_ESPINUDP: 2529 case UDP_ENCAP_ESPINUDP_NON_IKE: 2530 up->encap_rcv = xfrm4_udp_encap_rcv; 2531 /* FALLTHROUGH */ 2532 case UDP_ENCAP_L2TPINUDP: 2533 up->encap_type = val; 2534 lock_sock(sk); 2535 udp_tunnel_encap_enable(sk->sk_socket); 2536 release_sock(sk); 2537 break; 2538 default: 2539 err = -ENOPROTOOPT; 2540 break; 2541 } 2542 break; 2543 2544 case UDP_NO_CHECK6_TX: 2545 up->no_check6_tx = valbool; 2546 break; 2547 2548 case UDP_NO_CHECK6_RX: 2549 up->no_check6_rx = valbool; 2550 break; 2551 2552 case UDP_SEGMENT: 2553 if (val < 0 || val > USHRT_MAX) 2554 return -EINVAL; 2555 up->gso_size = val; 2556 break; 2557 2558 case UDP_GRO: 2559 lock_sock(sk); 2560 if (valbool) 2561 udp_tunnel_encap_enable(sk->sk_socket); 2562 up->gro_enabled = valbool; 2563 release_sock(sk); 2564 break; 2565 2566 /* 2567 * UDP-Lite's partial checksum coverage (RFC 3828). 2568 */ 2569 /* The sender sets actual checksum coverage length via this option. 2570 * The case coverage > packet length is handled by send module. */ 2571 case UDPLITE_SEND_CSCOV: 2572 if (!is_udplite) /* Disable the option on UDP sockets */ 2573 return -ENOPROTOOPT; 2574 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 2575 val = 8; 2576 else if (val > USHRT_MAX) 2577 val = USHRT_MAX; 2578 up->pcslen = val; 2579 up->pcflag |= UDPLITE_SEND_CC; 2580 break; 2581 2582 /* The receiver specifies a minimum checksum coverage value. To make 2583 * sense, this should be set to at least 8 (as done below). If zero is 2584 * used, this again means full checksum coverage. */ 2585 case UDPLITE_RECV_CSCOV: 2586 if (!is_udplite) /* Disable the option on UDP sockets */ 2587 return -ENOPROTOOPT; 2588 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 2589 val = 8; 2590 else if (val > USHRT_MAX) 2591 val = USHRT_MAX; 2592 up->pcrlen = val; 2593 up->pcflag |= UDPLITE_RECV_CC; 2594 break; 2595 2596 default: 2597 err = -ENOPROTOOPT; 2598 break; 2599 } 2600 2601 return err; 2602 } 2603 EXPORT_SYMBOL(udp_lib_setsockopt); 2604 2605 int udp_setsockopt(struct sock *sk, int level, int optname, 2606 char __user *optval, unsigned int optlen) 2607 { 2608 if (level == SOL_UDP || level == SOL_UDPLITE) 2609 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2610 udp_push_pending_frames); 2611 return ip_setsockopt(sk, level, optname, optval, optlen); 2612 } 2613 2614 #ifdef CONFIG_COMPAT 2615 int compat_udp_setsockopt(struct sock *sk, int level, int optname, 2616 char __user *optval, unsigned int optlen) 2617 { 2618 if (level == SOL_UDP || level == SOL_UDPLITE) 2619 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 2620 udp_push_pending_frames); 2621 return compat_ip_setsockopt(sk, level, optname, optval, optlen); 2622 } 2623 #endif 2624 2625 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 2626 char __user *optval, int __user *optlen) 2627 { 2628 struct udp_sock *up = udp_sk(sk); 2629 int val, len; 2630 2631 if (get_user(len, optlen)) 2632 return -EFAULT; 2633 2634 len = min_t(unsigned int, len, sizeof(int)); 2635 2636 if (len < 0) 2637 return -EINVAL; 2638 2639 switch (optname) { 2640 case UDP_CORK: 2641 val = up->corkflag; 2642 break; 2643 2644 case UDP_ENCAP: 2645 val = up->encap_type; 2646 break; 2647 2648 case UDP_NO_CHECK6_TX: 2649 val = up->no_check6_tx; 2650 break; 2651 2652 case UDP_NO_CHECK6_RX: 2653 val = up->no_check6_rx; 2654 break; 2655 2656 case UDP_SEGMENT: 2657 val = up->gso_size; 2658 break; 2659 2660 /* The following two cannot be changed on UDP sockets, the return is 2661 * always 0 (which corresponds to the full checksum coverage of UDP). */ 2662 case UDPLITE_SEND_CSCOV: 2663 val = up->pcslen; 2664 break; 2665 2666 case UDPLITE_RECV_CSCOV: 2667 val = up->pcrlen; 2668 break; 2669 2670 default: 2671 return -ENOPROTOOPT; 2672 } 2673 2674 if (put_user(len, optlen)) 2675 return -EFAULT; 2676 if (copy_to_user(optval, &val, len)) 2677 return -EFAULT; 2678 return 0; 2679 } 2680 EXPORT_SYMBOL(udp_lib_getsockopt); 2681 2682 int udp_getsockopt(struct sock *sk, int level, int optname, 2683 char __user *optval, int __user *optlen) 2684 { 2685 if (level == SOL_UDP || level == SOL_UDPLITE) 2686 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2687 return ip_getsockopt(sk, level, optname, optval, optlen); 2688 } 2689 2690 #ifdef CONFIG_COMPAT 2691 int compat_udp_getsockopt(struct sock *sk, int level, int optname, 2692 char __user *optval, int __user *optlen) 2693 { 2694 if (level == SOL_UDP || level == SOL_UDPLITE) 2695 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 2696 return compat_ip_getsockopt(sk, level, optname, optval, optlen); 2697 } 2698 #endif 2699 /** 2700 * udp_poll - wait for a UDP event. 2701 * @file - file struct 2702 * @sock - socket 2703 * @wait - poll table 2704 * 2705 * This is same as datagram poll, except for the special case of 2706 * blocking sockets. If application is using a blocking fd 2707 * and a packet with checksum error is in the queue; 2708 * then it could get return from select indicating data available 2709 * but then block when reading it. Add special case code 2710 * to work around these arguably broken applications. 2711 */ 2712 __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) 2713 { 2714 __poll_t mask = datagram_poll(file, sock, wait); 2715 struct sock *sk = sock->sk; 2716 2717 if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) 2718 mask |= EPOLLIN | EPOLLRDNORM; 2719 2720 /* Check for false positives due to checksum errors */ 2721 if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2722 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2723 mask &= ~(EPOLLIN | EPOLLRDNORM); 2724 2725 return mask; 2726 2727 } 2728 EXPORT_SYMBOL(udp_poll); 2729 2730 int udp_abort(struct sock *sk, int err) 2731 { 2732 lock_sock(sk); 2733 2734 sk->sk_err = err; 2735 sk->sk_error_report(sk); 2736 __udp_disconnect(sk, 0); 2737 2738 release_sock(sk); 2739 2740 return 0; 2741 } 2742 EXPORT_SYMBOL_GPL(udp_abort); 2743 2744 struct proto udp_prot = { 2745 .name = "UDP", 2746 .owner = THIS_MODULE, 2747 .close = udp_lib_close, 2748 .pre_connect = udp_pre_connect, 2749 .connect = ip4_datagram_connect, 2750 .disconnect = udp_disconnect, 2751 .ioctl = udp_ioctl, 2752 .init = udp_init_sock, 2753 .destroy = udp_destroy_sock, 2754 .setsockopt = udp_setsockopt, 2755 .getsockopt = udp_getsockopt, 2756 .sendmsg = udp_sendmsg, 2757 .recvmsg = udp_recvmsg, 2758 .sendpage = udp_sendpage, 2759 .release_cb = ip4_datagram_release_cb, 2760 .hash = udp_lib_hash, 2761 .unhash = udp_lib_unhash, 2762 .rehash = udp_v4_rehash, 2763 .get_port = udp_v4_get_port, 2764 .memory_allocated = &udp_memory_allocated, 2765 .sysctl_mem = sysctl_udp_mem, 2766 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 2767 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 2768 .obj_size = sizeof(struct udp_sock), 2769 .h.udp_table = &udp_table, 2770 #ifdef CONFIG_COMPAT 2771 .compat_setsockopt = compat_udp_setsockopt, 2772 .compat_getsockopt = compat_udp_getsockopt, 2773 #endif 2774 .diag_destroy = udp_abort, 2775 }; 2776 EXPORT_SYMBOL(udp_prot); 2777 2778 /* ------------------------------------------------------------------------ */ 2779 #ifdef CONFIG_PROC_FS 2780 2781 static struct sock *udp_get_first(struct seq_file *seq, int start) 2782 { 2783 struct sock *sk; 2784 struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); 2785 struct udp_iter_state *state = seq->private; 2786 struct net *net = seq_file_net(seq); 2787 2788 for (state->bucket = start; state->bucket <= afinfo->udp_table->mask; 2789 ++state->bucket) { 2790 struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket]; 2791 2792 if (hlist_empty(&hslot->head)) 2793 continue; 2794 2795 spin_lock_bh(&hslot->lock); 2796 sk_for_each(sk, &hslot->head) { 2797 if (!net_eq(sock_net(sk), net)) 2798 continue; 2799 if (sk->sk_family == afinfo->family) 2800 goto found; 2801 } 2802 spin_unlock_bh(&hslot->lock); 2803 } 2804 sk = NULL; 2805 found: 2806 return sk; 2807 } 2808 2809 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 2810 { 2811 struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); 2812 struct udp_iter_state *state = seq->private; 2813 struct net *net = seq_file_net(seq); 2814 2815 do { 2816 sk = sk_next(sk); 2817 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family)); 2818 2819 if (!sk) { 2820 if (state->bucket <= afinfo->udp_table->mask) 2821 spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); 2822 return udp_get_first(seq, state->bucket + 1); 2823 } 2824 return sk; 2825 } 2826 2827 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 2828 { 2829 struct sock *sk = udp_get_first(seq, 0); 2830 2831 if (sk) 2832 while (pos && (sk = udp_get_next(seq, sk)) != NULL) 2833 --pos; 2834 return pos ? NULL : sk; 2835 } 2836 2837 void *udp_seq_start(struct seq_file *seq, loff_t *pos) 2838 { 2839 struct udp_iter_state *state = seq->private; 2840 state->bucket = MAX_UDP_PORTS; 2841 2842 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 2843 } 2844 EXPORT_SYMBOL(udp_seq_start); 2845 2846 void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2847 { 2848 struct sock *sk; 2849 2850 if (v == SEQ_START_TOKEN) 2851 sk = udp_get_idx(seq, 0); 2852 else 2853 sk = udp_get_next(seq, v); 2854 2855 ++*pos; 2856 return sk; 2857 } 2858 EXPORT_SYMBOL(udp_seq_next); 2859 2860 void udp_seq_stop(struct seq_file *seq, void *v) 2861 { 2862 struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); 2863 struct udp_iter_state *state = seq->private; 2864 2865 if (state->bucket <= afinfo->udp_table->mask) 2866 spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); 2867 } 2868 EXPORT_SYMBOL(udp_seq_stop); 2869 2870 /* ------------------------------------------------------------------------ */ 2871 static void udp4_format_sock(struct sock *sp, struct seq_file *f, 2872 int bucket) 2873 { 2874 struct inet_sock *inet = inet_sk(sp); 2875 __be32 dest = inet->inet_daddr; 2876 __be32 src = inet->inet_rcv_saddr; 2877 __u16 destp = ntohs(inet->inet_dport); 2878 __u16 srcp = ntohs(inet->inet_sport); 2879 2880 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2881 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", 2882 bucket, src, srcp, dest, destp, sp->sk_state, 2883 sk_wmem_alloc_get(sp), 2884 udp_rqueue_get(sp), 2885 0, 0L, 0, 2886 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 2887 0, sock_i_ino(sp), 2888 refcount_read(&sp->sk_refcnt), sp, 2889 atomic_read(&sp->sk_drops)); 2890 } 2891 2892 int udp4_seq_show(struct seq_file *seq, void *v) 2893 { 2894 seq_setwidth(seq, 127); 2895 if (v == SEQ_START_TOKEN) 2896 seq_puts(seq, " sl local_address rem_address st tx_queue " 2897 "rx_queue tr tm->when retrnsmt uid timeout " 2898 "inode ref pointer drops"); 2899 else { 2900 struct udp_iter_state *state = seq->private; 2901 2902 udp4_format_sock(v, seq, state->bucket); 2903 } 2904 seq_pad(seq, '\n'); 2905 return 0; 2906 } 2907 2908 const struct seq_operations udp_seq_ops = { 2909 .start = udp_seq_start, 2910 .next = udp_seq_next, 2911 .stop = udp_seq_stop, 2912 .show = udp4_seq_show, 2913 }; 2914 EXPORT_SYMBOL(udp_seq_ops); 2915 2916 static struct udp_seq_afinfo udp4_seq_afinfo = { 2917 .family = AF_INET, 2918 .udp_table = &udp_table, 2919 }; 2920 2921 static int __net_init udp4_proc_init_net(struct net *net) 2922 { 2923 if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops, 2924 sizeof(struct udp_iter_state), &udp4_seq_afinfo)) 2925 return -ENOMEM; 2926 return 0; 2927 } 2928 2929 static void __net_exit udp4_proc_exit_net(struct net *net) 2930 { 2931 remove_proc_entry("udp", net->proc_net); 2932 } 2933 2934 static struct pernet_operations udp4_net_ops = { 2935 .init = udp4_proc_init_net, 2936 .exit = udp4_proc_exit_net, 2937 }; 2938 2939 int __init udp4_proc_init(void) 2940 { 2941 return register_pernet_subsys(&udp4_net_ops); 2942 } 2943 2944 void udp4_proc_exit(void) 2945 { 2946 unregister_pernet_subsys(&udp4_net_ops); 2947 } 2948 #endif /* CONFIG_PROC_FS */ 2949 2950 static __initdata unsigned long uhash_entries; 2951 static int __init set_uhash_entries(char *str) 2952 { 2953 ssize_t ret; 2954 2955 if (!str) 2956 return 0; 2957 2958 ret = kstrtoul(str, 0, &uhash_entries); 2959 if (ret) 2960 return 0; 2961 2962 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) 2963 uhash_entries = UDP_HTABLE_SIZE_MIN; 2964 return 1; 2965 } 2966 __setup("uhash_entries=", set_uhash_entries); 2967 2968 void __init udp_table_init(struct udp_table *table, const char *name) 2969 { 2970 unsigned int i; 2971 2972 table->hash = alloc_large_system_hash(name, 2973 2 * sizeof(struct udp_hslot), 2974 uhash_entries, 2975 21, /* one slot per 2 MB */ 2976 0, 2977 &table->log, 2978 &table->mask, 2979 UDP_HTABLE_SIZE_MIN, 2980 64 * 1024); 2981 2982 table->hash2 = table->hash + (table->mask + 1); 2983 for (i = 0; i <= table->mask; i++) { 2984 INIT_HLIST_HEAD(&table->hash[i].head); 2985 table->hash[i].count = 0; 2986 spin_lock_init(&table->hash[i].lock); 2987 } 2988 for (i = 0; i <= table->mask; i++) { 2989 INIT_HLIST_HEAD(&table->hash2[i].head); 2990 table->hash2[i].count = 0; 2991 spin_lock_init(&table->hash2[i].lock); 2992 } 2993 } 2994 2995 u32 udp_flow_hashrnd(void) 2996 { 2997 static u32 hashrnd __read_mostly; 2998 2999 net_get_random_once(&hashrnd, sizeof(hashrnd)); 3000 3001 return hashrnd; 3002 } 3003 EXPORT_SYMBOL(udp_flow_hashrnd); 3004 3005 static void __udp_sysctl_init(struct net *net) 3006 { 3007 net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM; 3008 net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM; 3009 3010 #ifdef CONFIG_NET_L3_MASTER_DEV 3011 net->ipv4.sysctl_udp_l3mdev_accept = 0; 3012 #endif 3013 } 3014 3015 static int __net_init udp_sysctl_init(struct net *net) 3016 { 3017 __udp_sysctl_init(net); 3018 return 0; 3019 } 3020 3021 static struct pernet_operations __net_initdata udp_sysctl_ops = { 3022 .init = udp_sysctl_init, 3023 }; 3024 3025 void __init udp_init(void) 3026 { 3027 unsigned long limit; 3028 unsigned int i; 3029 3030 udp_table_init(&udp_table, "UDP"); 3031 limit = nr_free_buffer_pages() / 8; 3032 limit = max(limit, 128UL); 3033 sysctl_udp_mem[0] = limit / 4 * 3; 3034 sysctl_udp_mem[1] = limit; 3035 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; 3036 3037 __udp_sysctl_init(&init_net); 3038 3039 /* 16 spinlocks per cpu */ 3040 udp_busylocks_log = ilog2(nr_cpu_ids) + 4; 3041 udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, 3042 GFP_KERNEL); 3043 if (!udp_busylocks) 3044 panic("UDP: failed to alloc udp_busylocks\n"); 3045 for (i = 0; i < (1U << udp_busylocks_log); i++) 3046 spin_lock_init(udp_busylocks + i); 3047 3048 if (register_pernet_subsys(&udp_sysctl_ops)) 3049 panic("UDP: failed to init sysctl parameters.\n"); 3050 } 3051