1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The User Datagram Protocol (UDP). 7 * 8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 13 * Alan Cox, <Alan.Cox@linux.org> 14 * Hirokazu Takahashi, <taka@valinux.co.jp> 15 * 16 * Fixes: 17 * Alan Cox : verify_area() calls 18 * Alan Cox : stopped close while in use off icmp 19 * messages. Not a fix but a botch that 20 * for udp at least is 'valid'. 21 * Alan Cox : Fixed icmp handling properly 22 * Alan Cox : Correct error for oversized datagrams 23 * Alan Cox : Tidied select() semantics. 24 * Alan Cox : udp_err() fixed properly, also now 25 * select and read wake correctly on errors 26 * Alan Cox : udp_send verify_area moved to avoid mem leak 27 * Alan Cox : UDP can count its memory 28 * Alan Cox : send to an unknown connection causes 29 * an ECONNREFUSED off the icmp, but 30 * does NOT close. 31 * Alan Cox : Switched to new sk_buff handlers. No more backlog! 32 * Alan Cox : Using generic datagram code. Even smaller and the PEEK 33 * bug no longer crashes it. 34 * Fred Van Kempen : Net2e support for sk->broadcast. 35 * Alan Cox : Uses skb_free_datagram 36 * Alan Cox : Added get/set sockopt support. 37 * Alan Cox : Broadcasting without option set returns EACCES. 38 * Alan Cox : No wakeup calls. Instead we now use the callbacks. 39 * Alan Cox : Use ip_tos and ip_ttl 40 * Alan Cox : SNMP Mibs 41 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 42 * Matt Dillon : UDP length checks. 43 * Alan Cox : Smarter af_inet used properly. 44 * Alan Cox : Use new kernel side addressing. 45 * Alan Cox : Incorrect return on truncated datagram receive. 46 * Arnt Gulbrandsen : New udp_send and stuff 47 * Alan Cox : Cache last socket 48 * Alan Cox : Route cache 49 * Jon Peatfield : Minor efficiency fix to sendto(). 50 * Mike Shaver : RFC1122 checks. 51 * Alan Cox : Nonblocking error fix. 52 * Willy Konynenberg : Transparent proxying support. 53 * Mike McLagan : Routing by source 54 * David S. Miller : New socket lookup architecture. 55 * Last socket cache retained as it 56 * does have a high hit rate. 57 * Olaf Kirch : Don't linearise iovec on sendmsg. 58 * Andi Kleen : Some cleanups, cache destination entry 59 * for connect. 60 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 61 * Melvin Smith : Check msg_name not msg_namelen in sendto(), 62 * return ENOTCONN for unconnected sockets (POSIX) 63 * Janos Farkas : don't deliver multi/broadcasts to a different 64 * bound-to-device socket 65 * Hirokazu Takahashi : HW checksumming for outgoing UDP 66 * datagrams. 67 * Hirokazu Takahashi : sendfile() on UDP works now. 68 * Arnaldo C. Melo : convert /proc/net/udp to seq_file 69 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 71 * a single port at the same time. 72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 73 * James Chapman : Add L2TP encapsulation type. 74 * 75 * 76 * This program is free software; you can redistribute it and/or 77 * modify it under the terms of the GNU General Public License 78 * as published by the Free Software Foundation; either version 79 * 2 of the License, or (at your option) any later version. 80 */ 81 82 #include <asm/system.h> 83 #include <asm/uaccess.h> 84 #include <asm/ioctls.h> 85 #include <linux/bootmem.h> 86 #include <linux/types.h> 87 #include <linux/fcntl.h> 88 #include <linux/module.h> 89 #include <linux/socket.h> 90 #include <linux/sockios.h> 91 #include <linux/igmp.h> 92 #include <linux/in.h> 93 #include <linux/errno.h> 94 #include <linux/timer.h> 95 #include <linux/mm.h> 96 #include <linux/inet.h> 97 #include <linux/netdevice.h> 98 #include <net/tcp_states.h> 99 #include <linux/skbuff.h> 100 #include <linux/proc_fs.h> 101 #include <linux/seq_file.h> 102 #include <net/net_namespace.h> 103 #include <net/icmp.h> 104 #include <net/route.h> 105 #include <net/checksum.h> 106 #include <net/xfrm.h> 107 #include "udp_impl.h" 108 109 /* 110 * Snmp MIB for the UDP layer 111 */ 112 113 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly; 114 EXPORT_SYMBOL(udp_statistics); 115 116 DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; 117 EXPORT_SYMBOL(udp_stats_in6); 118 119 struct hlist_head udp_hash[UDP_HTABLE_SIZE]; 120 DEFINE_RWLOCK(udp_hash_lock); 121 122 int sysctl_udp_mem[3] __read_mostly; 123 int sysctl_udp_rmem_min __read_mostly; 124 int sysctl_udp_wmem_min __read_mostly; 125 126 EXPORT_SYMBOL(sysctl_udp_mem); 127 EXPORT_SYMBOL(sysctl_udp_rmem_min); 128 EXPORT_SYMBOL(sysctl_udp_wmem_min); 129 130 atomic_t udp_memory_allocated; 131 EXPORT_SYMBOL(udp_memory_allocated); 132 133 static inline int __udp_lib_lport_inuse(struct net *net, __u16 num, 134 const struct hlist_head udptable[]) 135 { 136 struct sock *sk; 137 struct hlist_node *node; 138 139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 140 if (sk->sk_net == net && sk->sk_hash == num) 141 return 1; 142 return 0; 143 } 144 145 /** 146 * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 147 * 148 * @sk: socket struct in question 149 * @snum: port number to look up 150 * @udptable: hash list table, must be of UDP_HTABLE_SIZE 151 * @saddr_comp: AF-dependent comparison of bound local IP addresses 152 */ 153 int __udp_lib_get_port(struct sock *sk, unsigned short snum, 154 struct hlist_head udptable[], 155 int (*saddr_comp)(const struct sock *sk1, 156 const struct sock *sk2 ) ) 157 { 158 struct hlist_node *node; 159 struct hlist_head *head; 160 struct sock *sk2; 161 int error = 1; 162 struct net *net = sk->sk_net; 163 164 write_lock_bh(&udp_hash_lock); 165 166 if (!snum) { 167 int i, low, high, remaining; 168 unsigned rover, best, best_size_so_far; 169 170 inet_get_local_port_range(&low, &high); 171 remaining = (high - low) + 1; 172 173 best_size_so_far = UINT_MAX; 174 best = rover = net_random() % remaining + low; 175 176 /* 1st pass: look for empty (or shortest) hash chain */ 177 for (i = 0; i < UDP_HTABLE_SIZE; i++) { 178 int size = 0; 179 180 head = &udptable[rover & (UDP_HTABLE_SIZE - 1)]; 181 if (hlist_empty(head)) 182 goto gotit; 183 184 sk_for_each(sk2, node, head) { 185 if (++size >= best_size_so_far) 186 goto next; 187 } 188 best_size_so_far = size; 189 best = rover; 190 next: 191 /* fold back if end of range */ 192 if (++rover > high) 193 rover = low + ((rover - low) 194 & (UDP_HTABLE_SIZE - 1)); 195 196 197 } 198 199 /* 2nd pass: find hole in shortest hash chain */ 200 rover = best; 201 for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) { 202 if (! __udp_lib_lport_inuse(net, rover, udptable)) 203 goto gotit; 204 rover += UDP_HTABLE_SIZE; 205 if (rover > high) 206 rover = low + ((rover - low) 207 & (UDP_HTABLE_SIZE - 1)); 208 } 209 210 211 /* All ports in use! */ 212 goto fail; 213 214 gotit: 215 snum = rover; 216 } else { 217 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 218 219 sk_for_each(sk2, node, head) 220 if (sk2->sk_hash == snum && 221 sk2 != sk && 222 sk2->sk_net == net && 223 (!sk2->sk_reuse || !sk->sk_reuse) && 224 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 225 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 226 (*saddr_comp)(sk, sk2) ) 227 goto fail; 228 } 229 230 inet_sk(sk)->num = snum; 231 sk->sk_hash = snum; 232 if (sk_unhashed(sk)) { 233 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 234 sk_add_node(sk, head); 235 sock_prot_inuse_add(sk->sk_prot, 1); 236 } 237 error = 0; 238 fail: 239 write_unlock_bh(&udp_hash_lock); 240 return error; 241 } 242 243 int udp_get_port(struct sock *sk, unsigned short snum, 244 int (*scmp)(const struct sock *, const struct sock *)) 245 { 246 return __udp_lib_get_port(sk, snum, udp_hash, scmp); 247 } 248 249 int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) 250 { 251 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 252 253 return ( !ipv6_only_sock(sk2) && 254 (!inet1->rcv_saddr || !inet2->rcv_saddr || 255 inet1->rcv_saddr == inet2->rcv_saddr )); 256 } 257 258 static inline int udp_v4_get_port(struct sock *sk, unsigned short snum) 259 { 260 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal); 261 } 262 263 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 264 * harder than this. -DaveM 265 */ 266 static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, 267 __be16 sport, __be32 daddr, __be16 dport, 268 int dif, struct hlist_head udptable[]) 269 { 270 struct sock *sk, *result = NULL; 271 struct hlist_node *node; 272 unsigned short hnum = ntohs(dport); 273 int badness = -1; 274 275 read_lock(&udp_hash_lock); 276 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 277 struct inet_sock *inet = inet_sk(sk); 278 279 if (sk->sk_net == net && sk->sk_hash == hnum && 280 !ipv6_only_sock(sk)) { 281 int score = (sk->sk_family == PF_INET ? 1 : 0); 282 if (inet->rcv_saddr) { 283 if (inet->rcv_saddr != daddr) 284 continue; 285 score+=2; 286 } 287 if (inet->daddr) { 288 if (inet->daddr != saddr) 289 continue; 290 score+=2; 291 } 292 if (inet->dport) { 293 if (inet->dport != sport) 294 continue; 295 score+=2; 296 } 297 if (sk->sk_bound_dev_if) { 298 if (sk->sk_bound_dev_if != dif) 299 continue; 300 score+=2; 301 } 302 if (score == 9) { 303 result = sk; 304 break; 305 } else if (score > badness) { 306 result = sk; 307 badness = score; 308 } 309 } 310 } 311 if (result) 312 sock_hold(result); 313 read_unlock(&udp_hash_lock); 314 return result; 315 } 316 317 static inline struct sock *udp_v4_mcast_next(struct sock *sk, 318 __be16 loc_port, __be32 loc_addr, 319 __be16 rmt_port, __be32 rmt_addr, 320 int dif) 321 { 322 struct hlist_node *node; 323 struct sock *s = sk; 324 unsigned short hnum = ntohs(loc_port); 325 326 sk_for_each_from(s, node) { 327 struct inet_sock *inet = inet_sk(s); 328 329 if (s->sk_hash != hnum || 330 (inet->daddr && inet->daddr != rmt_addr) || 331 (inet->dport != rmt_port && inet->dport) || 332 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || 333 ipv6_only_sock(s) || 334 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) 335 continue; 336 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) 337 continue; 338 goto found; 339 } 340 s = NULL; 341 found: 342 return s; 343 } 344 345 /* 346 * This routine is called by the ICMP module when it gets some 347 * sort of error condition. If err < 0 then the socket should 348 * be closed and the error returned to the user. If err > 0 349 * it's just the icmp type << 8 | icmp code. 350 * Header points to the ip header of the error packet. We move 351 * on past this. Then (as it used to claim before adjustment) 352 * header points to the first 8 bytes of the udp header. We need 353 * to find the appropriate port. 354 */ 355 356 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) 357 { 358 struct inet_sock *inet; 359 struct iphdr *iph = (struct iphdr*)skb->data; 360 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2)); 361 const int type = icmp_hdr(skb)->type; 362 const int code = icmp_hdr(skb)->code; 363 struct sock *sk; 364 int harderr; 365 int err; 366 367 sk = __udp4_lib_lookup(skb->dev->nd_net, iph->daddr, uh->dest, 368 iph->saddr, uh->source, skb->dev->ifindex, udptable); 369 if (sk == NULL) { 370 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 371 return; /* No socket for error */ 372 } 373 374 err = 0; 375 harderr = 0; 376 inet = inet_sk(sk); 377 378 switch (type) { 379 default: 380 case ICMP_TIME_EXCEEDED: 381 err = EHOSTUNREACH; 382 break; 383 case ICMP_SOURCE_QUENCH: 384 goto out; 385 case ICMP_PARAMETERPROB: 386 err = EPROTO; 387 harderr = 1; 388 break; 389 case ICMP_DEST_UNREACH: 390 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 391 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 392 err = EMSGSIZE; 393 harderr = 1; 394 break; 395 } 396 goto out; 397 } 398 err = EHOSTUNREACH; 399 if (code <= NR_ICMP_UNREACH) { 400 harderr = icmp_err_convert[code].fatal; 401 err = icmp_err_convert[code].errno; 402 } 403 break; 404 } 405 406 /* 407 * RFC1122: OK. Passes ICMP errors back to application, as per 408 * 4.1.3.3. 409 */ 410 if (!inet->recverr) { 411 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 412 goto out; 413 } else { 414 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1)); 415 } 416 sk->sk_err = err; 417 sk->sk_error_report(sk); 418 out: 419 sock_put(sk); 420 } 421 422 void udp_err(struct sk_buff *skb, u32 info) 423 { 424 __udp4_lib_err(skb, info, udp_hash); 425 } 426 427 /* 428 * Throw away all pending data and cancel the corking. Socket is locked. 429 */ 430 static void udp_flush_pending_frames(struct sock *sk) 431 { 432 struct udp_sock *up = udp_sk(sk); 433 434 if (up->pending) { 435 up->len = 0; 436 up->pending = 0; 437 ip_flush_pending_frames(sk); 438 } 439 } 440 441 /** 442 * udp4_hwcsum_outgoing - handle outgoing HW checksumming 443 * @sk: socket we are sending on 444 * @skb: sk_buff containing the filled-in UDP header 445 * (checksum field must be zeroed out) 446 */ 447 static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 448 __be32 src, __be32 dst, int len ) 449 { 450 unsigned int offset; 451 struct udphdr *uh = udp_hdr(skb); 452 __wsum csum = 0; 453 454 if (skb_queue_len(&sk->sk_write_queue) == 1) { 455 /* 456 * Only one fragment on the socket. 457 */ 458 skb->csum_start = skb_transport_header(skb) - skb->head; 459 skb->csum_offset = offsetof(struct udphdr, check); 460 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); 461 } else { 462 /* 463 * HW-checksum won't work as there are two or more 464 * fragments on the socket so that all csums of sk_buffs 465 * should be together 466 */ 467 offset = skb_transport_offset(skb); 468 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 469 470 skb->ip_summed = CHECKSUM_NONE; 471 472 skb_queue_walk(&sk->sk_write_queue, skb) { 473 csum = csum_add(csum, skb->csum); 474 } 475 476 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 477 if (uh->check == 0) 478 uh->check = CSUM_MANGLED_0; 479 } 480 } 481 482 /* 483 * Push out all pending data as one UDP datagram. Socket is locked. 484 */ 485 static int udp_push_pending_frames(struct sock *sk) 486 { 487 struct udp_sock *up = udp_sk(sk); 488 struct inet_sock *inet = inet_sk(sk); 489 struct flowi *fl = &inet->cork.fl; 490 struct sk_buff *skb; 491 struct udphdr *uh; 492 int err = 0; 493 int is_udplite = IS_UDPLITE(sk); 494 __wsum csum = 0; 495 496 /* Grab the skbuff where UDP header space exists. */ 497 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) 498 goto out; 499 500 /* 501 * Create a UDP header 502 */ 503 uh = udp_hdr(skb); 504 uh->source = fl->fl_ip_sport; 505 uh->dest = fl->fl_ip_dport; 506 uh->len = htons(up->len); 507 uh->check = 0; 508 509 if (is_udplite) /* UDP-Lite */ 510 csum = udplite_csum_outgoing(sk, skb); 511 512 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ 513 514 skb->ip_summed = CHECKSUM_NONE; 515 goto send; 516 517 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 518 519 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len); 520 goto send; 521 522 } else /* `normal' UDP */ 523 csum = udp_csum_outgoing(sk, skb); 524 525 /* add protocol-dependent pseudo-header */ 526 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, 527 sk->sk_protocol, csum ); 528 if (uh->check == 0) 529 uh->check = CSUM_MANGLED_0; 530 531 send: 532 err = ip_push_pending_frames(sk); 533 out: 534 up->len = 0; 535 up->pending = 0; 536 if (!err) 537 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); 538 return err; 539 } 540 541 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 542 size_t len) 543 { 544 struct inet_sock *inet = inet_sk(sk); 545 struct udp_sock *up = udp_sk(sk); 546 int ulen = len; 547 struct ipcm_cookie ipc; 548 struct rtable *rt = NULL; 549 int free = 0; 550 int connected = 0; 551 __be32 daddr, faddr, saddr; 552 __be16 dport; 553 u8 tos; 554 int err, is_udplite = IS_UDPLITE(sk); 555 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 556 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 557 558 if (len > 0xFFFF) 559 return -EMSGSIZE; 560 561 /* 562 * Check the flags. 563 */ 564 565 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */ 566 return -EOPNOTSUPP; 567 568 ipc.opt = NULL; 569 570 if (up->pending) { 571 /* 572 * There are pending frames. 573 * The socket lock must be held while it's corked. 574 */ 575 lock_sock(sk); 576 if (likely(up->pending)) { 577 if (unlikely(up->pending != AF_INET)) { 578 release_sock(sk); 579 return -EINVAL; 580 } 581 goto do_append_data; 582 } 583 release_sock(sk); 584 } 585 ulen += sizeof(struct udphdr); 586 587 /* 588 * Get and verify the address. 589 */ 590 if (msg->msg_name) { 591 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; 592 if (msg->msg_namelen < sizeof(*usin)) 593 return -EINVAL; 594 if (usin->sin_family != AF_INET) { 595 if (usin->sin_family != AF_UNSPEC) 596 return -EAFNOSUPPORT; 597 } 598 599 daddr = usin->sin_addr.s_addr; 600 dport = usin->sin_port; 601 if (dport == 0) 602 return -EINVAL; 603 } else { 604 if (sk->sk_state != TCP_ESTABLISHED) 605 return -EDESTADDRREQ; 606 daddr = inet->daddr; 607 dport = inet->dport; 608 /* Open fast path for connected socket. 609 Route will not be used, if at least one option is set. 610 */ 611 connected = 1; 612 } 613 ipc.addr = inet->saddr; 614 615 ipc.oif = sk->sk_bound_dev_if; 616 if (msg->msg_controllen) { 617 err = ip_cmsg_send(msg, &ipc); 618 if (err) 619 return err; 620 if (ipc.opt) 621 free = 1; 622 connected = 0; 623 } 624 if (!ipc.opt) 625 ipc.opt = inet->opt; 626 627 saddr = ipc.addr; 628 ipc.addr = faddr = daddr; 629 630 if (ipc.opt && ipc.opt->srr) { 631 if (!daddr) 632 return -EINVAL; 633 faddr = ipc.opt->faddr; 634 connected = 0; 635 } 636 tos = RT_TOS(inet->tos); 637 if (sock_flag(sk, SOCK_LOCALROUTE) || 638 (msg->msg_flags & MSG_DONTROUTE) || 639 (ipc.opt && ipc.opt->is_strictroute)) { 640 tos |= RTO_ONLINK; 641 connected = 0; 642 } 643 644 if (ipv4_is_multicast(daddr)) { 645 if (!ipc.oif) 646 ipc.oif = inet->mc_index; 647 if (!saddr) 648 saddr = inet->mc_addr; 649 connected = 0; 650 } 651 652 if (connected) 653 rt = (struct rtable*)sk_dst_check(sk, 0); 654 655 if (rt == NULL) { 656 struct flowi fl = { .oif = ipc.oif, 657 .nl_u = { .ip4_u = 658 { .daddr = faddr, 659 .saddr = saddr, 660 .tos = tos } }, 661 .proto = sk->sk_protocol, 662 .uli_u = { .ports = 663 { .sport = inet->sport, 664 .dport = dport } } }; 665 security_sk_classify_flow(sk, &fl); 666 err = ip_route_output_flow(&init_net, &rt, &fl, sk, 1); 667 if (err) { 668 if (err == -ENETUNREACH) 669 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 670 goto out; 671 } 672 673 err = -EACCES; 674 if ((rt->rt_flags & RTCF_BROADCAST) && 675 !sock_flag(sk, SOCK_BROADCAST)) 676 goto out; 677 if (connected) 678 sk_dst_set(sk, dst_clone(&rt->u.dst)); 679 } 680 681 if (msg->msg_flags&MSG_CONFIRM) 682 goto do_confirm; 683 back_from_confirm: 684 685 saddr = rt->rt_src; 686 if (!ipc.addr) 687 daddr = ipc.addr = rt->rt_dst; 688 689 lock_sock(sk); 690 if (unlikely(up->pending)) { 691 /* The socket is already corked while preparing it. */ 692 /* ... which is an evident application bug. --ANK */ 693 release_sock(sk); 694 695 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); 696 err = -EINVAL; 697 goto out; 698 } 699 /* 700 * Now cork the socket to pend data. 701 */ 702 inet->cork.fl.fl4_dst = daddr; 703 inet->cork.fl.fl_ip_dport = dport; 704 inet->cork.fl.fl4_src = saddr; 705 inet->cork.fl.fl_ip_sport = inet->sport; 706 up->pending = AF_INET; 707 708 do_append_data: 709 up->len += ulen; 710 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 711 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, 712 sizeof(struct udphdr), &ipc, rt, 713 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 714 if (err) 715 udp_flush_pending_frames(sk); 716 else if (!corkreq) 717 err = udp_push_pending_frames(sk); 718 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 719 up->pending = 0; 720 release_sock(sk); 721 722 out: 723 ip_rt_put(rt); 724 if (free) 725 kfree(ipc.opt); 726 if (!err) 727 return len; 728 /* 729 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 730 * ENOBUFS might not be good (it's not tunable per se), but otherwise 731 * we don't have a good statistic (IpOutDiscards but it can be too many 732 * things). We could add another new stat but at least for now that 733 * seems like overkill. 734 */ 735 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 736 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); 737 } 738 return err; 739 740 do_confirm: 741 dst_confirm(&rt->u.dst); 742 if (!(msg->msg_flags&MSG_PROBE) || len) 743 goto back_from_confirm; 744 err = 0; 745 goto out; 746 } 747 748 int udp_sendpage(struct sock *sk, struct page *page, int offset, 749 size_t size, int flags) 750 { 751 struct udp_sock *up = udp_sk(sk); 752 int ret; 753 754 if (!up->pending) { 755 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 756 757 /* Call udp_sendmsg to specify destination address which 758 * sendpage interface can't pass. 759 * This will succeed only when the socket is connected. 760 */ 761 ret = udp_sendmsg(NULL, sk, &msg, 0); 762 if (ret < 0) 763 return ret; 764 } 765 766 lock_sock(sk); 767 768 if (unlikely(!up->pending)) { 769 release_sock(sk); 770 771 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); 772 return -EINVAL; 773 } 774 775 ret = ip_append_page(sk, page, offset, size, flags); 776 if (ret == -EOPNOTSUPP) { 777 release_sock(sk); 778 return sock_no_sendpage(sk->sk_socket, page, offset, 779 size, flags); 780 } 781 if (ret < 0) { 782 udp_flush_pending_frames(sk); 783 goto out; 784 } 785 786 up->len += size; 787 if (!(up->corkflag || (flags&MSG_MORE))) 788 ret = udp_push_pending_frames(sk); 789 if (!ret) 790 ret = size; 791 out: 792 release_sock(sk); 793 return ret; 794 } 795 796 /* 797 * IOCTL requests applicable to the UDP protocol 798 */ 799 800 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 801 { 802 switch (cmd) { 803 case SIOCOUTQ: 804 { 805 int amount = atomic_read(&sk->sk_wmem_alloc); 806 return put_user(amount, (int __user *)arg); 807 } 808 809 case SIOCINQ: 810 { 811 struct sk_buff *skb; 812 unsigned long amount; 813 814 amount = 0; 815 spin_lock_bh(&sk->sk_receive_queue.lock); 816 skb = skb_peek(&sk->sk_receive_queue); 817 if (skb != NULL) { 818 /* 819 * We will only return the amount 820 * of this packet since that is all 821 * that will be read. 822 */ 823 amount = skb->len - sizeof(struct udphdr); 824 } 825 spin_unlock_bh(&sk->sk_receive_queue.lock); 826 return put_user(amount, (int __user *)arg); 827 } 828 829 default: 830 return -ENOIOCTLCMD; 831 } 832 833 return 0; 834 } 835 836 /* 837 * This should be easy, if there is something there we 838 * return it, otherwise we block. 839 */ 840 841 int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 842 size_t len, int noblock, int flags, int *addr_len) 843 { 844 struct inet_sock *inet = inet_sk(sk); 845 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 846 struct sk_buff *skb; 847 unsigned int ulen, copied; 848 int peeked; 849 int err; 850 int is_udplite = IS_UDPLITE(sk); 851 852 /* 853 * Check any passed addresses 854 */ 855 if (addr_len) 856 *addr_len=sizeof(*sin); 857 858 if (flags & MSG_ERRQUEUE) 859 return ip_recv_error(sk, msg, len); 860 861 try_again: 862 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 863 &peeked, &err); 864 if (!skb) 865 goto out; 866 867 ulen = skb->len - sizeof(struct udphdr); 868 copied = len; 869 if (copied > ulen) 870 copied = ulen; 871 else if (copied < ulen) 872 msg->msg_flags |= MSG_TRUNC; 873 874 /* 875 * If checksum is needed at all, try to do it while copying the 876 * data. If the data is truncated, or if we only want a partial 877 * coverage checksum (UDP-Lite), do it before the copy. 878 */ 879 880 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 881 if (udp_lib_checksum_complete(skb)) 882 goto csum_copy_err; 883 } 884 885 if (skb_csum_unnecessary(skb)) 886 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 887 msg->msg_iov, copied ); 888 else { 889 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 890 891 if (err == -EINVAL) 892 goto csum_copy_err; 893 } 894 895 if (err) 896 goto out_free; 897 898 if (!peeked) 899 UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); 900 901 sock_recv_timestamp(msg, sk, skb); 902 903 /* Copy the address. */ 904 if (sin) 905 { 906 sin->sin_family = AF_INET; 907 sin->sin_port = udp_hdr(skb)->source; 908 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 909 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 910 } 911 if (inet->cmsg_flags) 912 ip_cmsg_recv(msg, skb); 913 914 err = copied; 915 if (flags & MSG_TRUNC) 916 err = ulen; 917 918 out_free: 919 lock_sock(sk); 920 skb_free_datagram(sk, skb); 921 release_sock(sk); 922 out: 923 return err; 924 925 csum_copy_err: 926 lock_sock(sk); 927 if (!skb_kill_datagram(sk, skb, flags)) 928 UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); 929 release_sock(sk); 930 931 if (noblock) 932 return -EAGAIN; 933 goto try_again; 934 } 935 936 937 int udp_disconnect(struct sock *sk, int flags) 938 { 939 struct inet_sock *inet = inet_sk(sk); 940 /* 941 * 1003.1g - break association. 942 */ 943 944 sk->sk_state = TCP_CLOSE; 945 inet->daddr = 0; 946 inet->dport = 0; 947 sk->sk_bound_dev_if = 0; 948 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 949 inet_reset_saddr(sk); 950 951 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 952 sk->sk_prot->unhash(sk); 953 inet->sport = 0; 954 } 955 sk_dst_reset(sk); 956 return 0; 957 } 958 959 /* returns: 960 * -1: error 961 * 0: success 962 * >0: "udp encap" protocol resubmission 963 * 964 * Note that in the success and error cases, the skb is assumed to 965 * have either been requeued or freed. 966 */ 967 int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) 968 { 969 struct udp_sock *up = udp_sk(sk); 970 int rc; 971 int is_udplite = IS_UDPLITE(sk); 972 973 /* 974 * Charge it to the socket, dropping if the queue is full. 975 */ 976 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 977 goto drop; 978 nf_reset(skb); 979 980 if (up->encap_type) { 981 /* 982 * This is an encapsulation socket so pass the skb to 983 * the socket's udp_encap_rcv() hook. Otherwise, just 984 * fall through and pass this up the UDP socket. 985 * up->encap_rcv() returns the following value: 986 * =0 if skb was successfully passed to the encap 987 * handler or was discarded by it. 988 * >0 if skb should be passed on to UDP. 989 * <0 if skb should be resubmitted as proto -N 990 */ 991 992 /* if we're overly short, let UDP handle it */ 993 if (skb->len > sizeof(struct udphdr) && 994 up->encap_rcv != NULL) { 995 int ret; 996 997 ret = (*up->encap_rcv)(sk, skb); 998 if (ret <= 0) { 999 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 1000 is_udplite); 1001 return -ret; 1002 } 1003 } 1004 1005 /* FALLTHROUGH -- it's a UDP Packet */ 1006 } 1007 1008 /* 1009 * UDP-Lite specific tests, ignored on UDP sockets 1010 */ 1011 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 1012 1013 /* 1014 * MIB statistics other than incrementing the error count are 1015 * disabled for the following two types of errors: these depend 1016 * on the application settings, not on the functioning of the 1017 * protocol stack as such. 1018 * 1019 * RFC 3828 here recommends (sec 3.3): "There should also be a 1020 * way ... to ... at least let the receiving application block 1021 * delivery of packets with coverage values less than a value 1022 * provided by the application." 1023 */ 1024 if (up->pcrlen == 0) { /* full coverage was set */ 1025 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " 1026 "%d while full coverage %d requested\n", 1027 UDP_SKB_CB(skb)->cscov, skb->len); 1028 goto drop; 1029 } 1030 /* The next case involves violating the min. coverage requested 1031 * by the receiver. This is subtle: if receiver wants x and x is 1032 * greater than the buffersize/MTU then receiver will complain 1033 * that it wants x while sender emits packets of smaller size y. 1034 * Therefore the above ...()->partial_cov statement is essential. 1035 */ 1036 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 1037 LIMIT_NETDEBUG(KERN_WARNING 1038 "UDPLITE: coverage %d too small, need min %d\n", 1039 UDP_SKB_CB(skb)->cscov, up->pcrlen); 1040 goto drop; 1041 } 1042 } 1043 1044 if (sk->sk_filter) { 1045 if (udp_lib_checksum_complete(skb)) 1046 goto drop; 1047 } 1048 1049 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1050 /* Note that an ENOMEM error is charged twice */ 1051 if (rc == -ENOMEM) 1052 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); 1053 goto drop; 1054 } 1055 1056 return 0; 1057 1058 drop: 1059 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 1060 kfree_skb(skb); 1061 return -1; 1062 } 1063 1064 /* 1065 * Multicasts and broadcasts go to each listener. 1066 * 1067 * Note: called only from the BH handler context, 1068 * so we don't need to lock the hashes. 1069 */ 1070 static int __udp4_lib_mcast_deliver(struct sk_buff *skb, 1071 struct udphdr *uh, 1072 __be32 saddr, __be32 daddr, 1073 struct hlist_head udptable[]) 1074 { 1075 struct sock *sk; 1076 int dif; 1077 1078 read_lock(&udp_hash_lock); 1079 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); 1080 dif = skb->dev->ifindex; 1081 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 1082 if (sk) { 1083 struct sock *sknext = NULL; 1084 1085 do { 1086 struct sk_buff *skb1 = skb; 1087 1088 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr, 1089 uh->source, saddr, dif); 1090 if (sknext) 1091 skb1 = skb_clone(skb, GFP_ATOMIC); 1092 1093 if (skb1) { 1094 int ret = 0; 1095 1096 bh_lock_sock_nested(sk); 1097 if (!sock_owned_by_user(sk)) 1098 ret = udp_queue_rcv_skb(sk, skb1); 1099 else 1100 sk_add_backlog(sk, skb1); 1101 bh_unlock_sock(sk); 1102 1103 if (ret > 0) 1104 /* we should probably re-process instead 1105 * of dropping packets here. */ 1106 kfree_skb(skb1); 1107 } 1108 sk = sknext; 1109 } while (sknext); 1110 } else 1111 kfree_skb(skb); 1112 read_unlock(&udp_hash_lock); 1113 return 0; 1114 } 1115 1116 /* Initialize UDP checksum. If exited with zero value (success), 1117 * CHECKSUM_UNNECESSARY means, that no more checks are required. 1118 * Otherwise, csum completion requires chacksumming packet body, 1119 * including udp header and folding it to skb->csum. 1120 */ 1121 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 1122 int proto) 1123 { 1124 const struct iphdr *iph; 1125 int err; 1126 1127 UDP_SKB_CB(skb)->partial_cov = 0; 1128 UDP_SKB_CB(skb)->cscov = skb->len; 1129 1130 if (proto == IPPROTO_UDPLITE) { 1131 err = udplite_checksum_init(skb, uh); 1132 if (err) 1133 return err; 1134 } 1135 1136 iph = ip_hdr(skb); 1137 if (uh->check == 0) { 1138 skb->ip_summed = CHECKSUM_UNNECESSARY; 1139 } else if (skb->ip_summed == CHECKSUM_COMPLETE) { 1140 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, 1141 proto, skb->csum)) 1142 skb->ip_summed = CHECKSUM_UNNECESSARY; 1143 } 1144 if (!skb_csum_unnecessary(skb)) 1145 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, 1146 skb->len, proto, 0); 1147 /* Probably, we should checksum udp header (it should be in cache 1148 * in any case) and data in tiny packets (< rx copybreak). 1149 */ 1150 1151 return 0; 1152 } 1153 1154 /* 1155 * All we need to do is get the socket, and then do a checksum. 1156 */ 1157 1158 int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], 1159 int proto) 1160 { 1161 struct sock *sk; 1162 struct udphdr *uh = udp_hdr(skb); 1163 unsigned short ulen; 1164 struct rtable *rt = (struct rtable*)skb->dst; 1165 __be32 saddr = ip_hdr(skb)->saddr; 1166 __be32 daddr = ip_hdr(skb)->daddr; 1167 1168 /* 1169 * Validate the packet. 1170 */ 1171 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 1172 goto drop; /* No space for header. */ 1173 1174 ulen = ntohs(uh->len); 1175 if (ulen > skb->len) 1176 goto short_packet; 1177 1178 if (proto == IPPROTO_UDP) { 1179 /* UDP validates ulen. */ 1180 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 1181 goto short_packet; 1182 uh = udp_hdr(skb); 1183 } 1184 1185 if (udp4_csum_init(skb, uh, proto)) 1186 goto csum_error; 1187 1188 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1189 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1190 1191 sk = __udp4_lib_lookup(skb->dev->nd_net, saddr, uh->source, daddr, 1192 uh->dest, inet_iif(skb), udptable); 1193 1194 if (sk != NULL) { 1195 int ret = 0; 1196 bh_lock_sock_nested(sk); 1197 if (!sock_owned_by_user(sk)) 1198 ret = udp_queue_rcv_skb(sk, skb); 1199 else 1200 sk_add_backlog(sk, skb); 1201 bh_unlock_sock(sk); 1202 sock_put(sk); 1203 1204 /* a return value > 0 means to resubmit the input, but 1205 * it wants the return to be -protocol, or 0 1206 */ 1207 if (ret > 0) 1208 return -ret; 1209 return 0; 1210 } 1211 1212 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1213 goto drop; 1214 nf_reset(skb); 1215 1216 /* No socket. Drop packet silently, if checksum is wrong */ 1217 if (udp_lib_checksum_complete(skb)) 1218 goto csum_error; 1219 1220 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1221 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 1222 1223 /* 1224 * Hmm. We got an UDP packet to a port to which we 1225 * don't wanna listen. Ignore it. 1226 */ 1227 kfree_skb(skb); 1228 return 0; 1229 1230 short_packet: 1231 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", 1232 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1233 NIPQUAD(saddr), 1234 ntohs(uh->source), 1235 ulen, 1236 skb->len, 1237 NIPQUAD(daddr), 1238 ntohs(uh->dest)); 1239 goto drop; 1240 1241 csum_error: 1242 /* 1243 * RFC1122: OK. Discards the bad packet silently (as far as 1244 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1245 */ 1246 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1247 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1248 NIPQUAD(saddr), 1249 ntohs(uh->source), 1250 NIPQUAD(daddr), 1251 ntohs(uh->dest), 1252 ulen); 1253 drop: 1254 UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1255 kfree_skb(skb); 1256 return 0; 1257 } 1258 1259 int udp_rcv(struct sk_buff *skb) 1260 { 1261 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); 1262 } 1263 1264 int udp_destroy_sock(struct sock *sk) 1265 { 1266 lock_sock(sk); 1267 udp_flush_pending_frames(sk); 1268 release_sock(sk); 1269 return 0; 1270 } 1271 1272 /* 1273 * Socket option code for UDP 1274 */ 1275 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 1276 char __user *optval, int optlen, 1277 int (*push_pending_frames)(struct sock *)) 1278 { 1279 struct udp_sock *up = udp_sk(sk); 1280 int val; 1281 int err = 0; 1282 int is_udplite = IS_UDPLITE(sk); 1283 1284 if (optlen<sizeof(int)) 1285 return -EINVAL; 1286 1287 if (get_user(val, (int __user *)optval)) 1288 return -EFAULT; 1289 1290 switch (optname) { 1291 case UDP_CORK: 1292 if (val != 0) { 1293 up->corkflag = 1; 1294 } else { 1295 up->corkflag = 0; 1296 lock_sock(sk); 1297 (*push_pending_frames)(sk); 1298 release_sock(sk); 1299 } 1300 break; 1301 1302 case UDP_ENCAP: 1303 switch (val) { 1304 case 0: 1305 case UDP_ENCAP_ESPINUDP: 1306 case UDP_ENCAP_ESPINUDP_NON_IKE: 1307 up->encap_rcv = xfrm4_udp_encap_rcv; 1308 /* FALLTHROUGH */ 1309 case UDP_ENCAP_L2TPINUDP: 1310 up->encap_type = val; 1311 break; 1312 default: 1313 err = -ENOPROTOOPT; 1314 break; 1315 } 1316 break; 1317 1318 /* 1319 * UDP-Lite's partial checksum coverage (RFC 3828). 1320 */ 1321 /* The sender sets actual checksum coverage length via this option. 1322 * The case coverage > packet length is handled by send module. */ 1323 case UDPLITE_SEND_CSCOV: 1324 if (!is_udplite) /* Disable the option on UDP sockets */ 1325 return -ENOPROTOOPT; 1326 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 1327 val = 8; 1328 up->pcslen = val; 1329 up->pcflag |= UDPLITE_SEND_CC; 1330 break; 1331 1332 /* The receiver specifies a minimum checksum coverage value. To make 1333 * sense, this should be set to at least 8 (as done below). If zero is 1334 * used, this again means full checksum coverage. */ 1335 case UDPLITE_RECV_CSCOV: 1336 if (!is_udplite) /* Disable the option on UDP sockets */ 1337 return -ENOPROTOOPT; 1338 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 1339 val = 8; 1340 up->pcrlen = val; 1341 up->pcflag |= UDPLITE_RECV_CC; 1342 break; 1343 1344 default: 1345 err = -ENOPROTOOPT; 1346 break; 1347 } 1348 1349 return err; 1350 } 1351 1352 int udp_setsockopt(struct sock *sk, int level, int optname, 1353 char __user *optval, int optlen) 1354 { 1355 if (level == SOL_UDP || level == SOL_UDPLITE) 1356 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1357 udp_push_pending_frames); 1358 return ip_setsockopt(sk, level, optname, optval, optlen); 1359 } 1360 1361 #ifdef CONFIG_COMPAT 1362 int compat_udp_setsockopt(struct sock *sk, int level, int optname, 1363 char __user *optval, int optlen) 1364 { 1365 if (level == SOL_UDP || level == SOL_UDPLITE) 1366 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1367 udp_push_pending_frames); 1368 return compat_ip_setsockopt(sk, level, optname, optval, optlen); 1369 } 1370 #endif 1371 1372 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 1373 char __user *optval, int __user *optlen) 1374 { 1375 struct udp_sock *up = udp_sk(sk); 1376 int val, len; 1377 1378 if (get_user(len,optlen)) 1379 return -EFAULT; 1380 1381 len = min_t(unsigned int, len, sizeof(int)); 1382 1383 if (len < 0) 1384 return -EINVAL; 1385 1386 switch (optname) { 1387 case UDP_CORK: 1388 val = up->corkflag; 1389 break; 1390 1391 case UDP_ENCAP: 1392 val = up->encap_type; 1393 break; 1394 1395 /* The following two cannot be changed on UDP sockets, the return is 1396 * always 0 (which corresponds to the full checksum coverage of UDP). */ 1397 case UDPLITE_SEND_CSCOV: 1398 val = up->pcslen; 1399 break; 1400 1401 case UDPLITE_RECV_CSCOV: 1402 val = up->pcrlen; 1403 break; 1404 1405 default: 1406 return -ENOPROTOOPT; 1407 } 1408 1409 if (put_user(len, optlen)) 1410 return -EFAULT; 1411 if (copy_to_user(optval, &val,len)) 1412 return -EFAULT; 1413 return 0; 1414 } 1415 1416 int udp_getsockopt(struct sock *sk, int level, int optname, 1417 char __user *optval, int __user *optlen) 1418 { 1419 if (level == SOL_UDP || level == SOL_UDPLITE) 1420 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1421 return ip_getsockopt(sk, level, optname, optval, optlen); 1422 } 1423 1424 #ifdef CONFIG_COMPAT 1425 int compat_udp_getsockopt(struct sock *sk, int level, int optname, 1426 char __user *optval, int __user *optlen) 1427 { 1428 if (level == SOL_UDP || level == SOL_UDPLITE) 1429 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1430 return compat_ip_getsockopt(sk, level, optname, optval, optlen); 1431 } 1432 #endif 1433 /** 1434 * udp_poll - wait for a UDP event. 1435 * @file - file struct 1436 * @sock - socket 1437 * @wait - poll table 1438 * 1439 * This is same as datagram poll, except for the special case of 1440 * blocking sockets. If application is using a blocking fd 1441 * and a packet with checksum error is in the queue; 1442 * then it could get return from select indicating data available 1443 * but then block when reading it. Add special case code 1444 * to work around these arguably broken applications. 1445 */ 1446 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) 1447 { 1448 unsigned int mask = datagram_poll(file, sock, wait); 1449 struct sock *sk = sock->sk; 1450 int is_lite = IS_UDPLITE(sk); 1451 1452 /* Check for false positives due to checksum errors */ 1453 if ( (mask & POLLRDNORM) && 1454 !(file->f_flags & O_NONBLOCK) && 1455 !(sk->sk_shutdown & RCV_SHUTDOWN)){ 1456 struct sk_buff_head *rcvq = &sk->sk_receive_queue; 1457 struct sk_buff *skb; 1458 1459 spin_lock_bh(&rcvq->lock); 1460 while ((skb = skb_peek(rcvq)) != NULL && 1461 udp_lib_checksum_complete(skb)) { 1462 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); 1463 __skb_unlink(skb, rcvq); 1464 kfree_skb(skb); 1465 } 1466 spin_unlock_bh(&rcvq->lock); 1467 1468 /* nothing to see, move along */ 1469 if (skb == NULL) 1470 mask &= ~(POLLIN | POLLRDNORM); 1471 } 1472 1473 return mask; 1474 1475 } 1476 1477 DEFINE_PROTO_INUSE(udp) 1478 1479 struct proto udp_prot = { 1480 .name = "UDP", 1481 .owner = THIS_MODULE, 1482 .close = udp_lib_close, 1483 .connect = ip4_datagram_connect, 1484 .disconnect = udp_disconnect, 1485 .ioctl = udp_ioctl, 1486 .destroy = udp_destroy_sock, 1487 .setsockopt = udp_setsockopt, 1488 .getsockopt = udp_getsockopt, 1489 .sendmsg = udp_sendmsg, 1490 .recvmsg = udp_recvmsg, 1491 .sendpage = udp_sendpage, 1492 .backlog_rcv = udp_queue_rcv_skb, 1493 .hash = udp_lib_hash, 1494 .unhash = udp_lib_unhash, 1495 .get_port = udp_v4_get_port, 1496 .memory_allocated = &udp_memory_allocated, 1497 .sysctl_mem = sysctl_udp_mem, 1498 .sysctl_wmem = &sysctl_udp_wmem_min, 1499 .sysctl_rmem = &sysctl_udp_rmem_min, 1500 .obj_size = sizeof(struct udp_sock), 1501 #ifdef CONFIG_COMPAT 1502 .compat_setsockopt = compat_udp_setsockopt, 1503 .compat_getsockopt = compat_udp_getsockopt, 1504 #endif 1505 REF_PROTO_INUSE(udp) 1506 }; 1507 1508 /* ------------------------------------------------------------------------ */ 1509 #ifdef CONFIG_PROC_FS 1510 1511 static struct sock *udp_get_first(struct seq_file *seq) 1512 { 1513 struct sock *sk; 1514 struct udp_iter_state *state = seq->private; 1515 1516 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { 1517 struct hlist_node *node; 1518 sk_for_each(sk, node, state->hashtable + state->bucket) { 1519 if (sk->sk_family == state->family) 1520 goto found; 1521 } 1522 } 1523 sk = NULL; 1524 found: 1525 return sk; 1526 } 1527 1528 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 1529 { 1530 struct udp_iter_state *state = seq->private; 1531 1532 do { 1533 sk = sk_next(sk); 1534 try_again: 1535 ; 1536 } while (sk && sk->sk_family != state->family); 1537 1538 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { 1539 sk = sk_head(state->hashtable + state->bucket); 1540 goto try_again; 1541 } 1542 return sk; 1543 } 1544 1545 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 1546 { 1547 struct sock *sk = udp_get_first(seq); 1548 1549 if (sk) 1550 while (pos && (sk = udp_get_next(seq, sk)) != NULL) 1551 --pos; 1552 return pos ? NULL : sk; 1553 } 1554 1555 static void *udp_seq_start(struct seq_file *seq, loff_t *pos) 1556 __acquires(udp_hash_lock) 1557 { 1558 read_lock(&udp_hash_lock); 1559 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 1560 } 1561 1562 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1563 { 1564 struct sock *sk; 1565 1566 if (v == SEQ_START_TOKEN) 1567 sk = udp_get_idx(seq, 0); 1568 else 1569 sk = udp_get_next(seq, v); 1570 1571 ++*pos; 1572 return sk; 1573 } 1574 1575 static void udp_seq_stop(struct seq_file *seq, void *v) 1576 __releases(udp_hash_lock) 1577 { 1578 read_unlock(&udp_hash_lock); 1579 } 1580 1581 static int udp_seq_open(struct inode *inode, struct file *file) 1582 { 1583 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1584 struct seq_file *seq; 1585 int rc = -ENOMEM; 1586 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1587 1588 if (!s) 1589 goto out; 1590 s->family = afinfo->family; 1591 s->hashtable = afinfo->hashtable; 1592 s->seq_ops.start = udp_seq_start; 1593 s->seq_ops.next = udp_seq_next; 1594 s->seq_ops.show = afinfo->seq_show; 1595 s->seq_ops.stop = udp_seq_stop; 1596 1597 rc = seq_open(file, &s->seq_ops); 1598 if (rc) 1599 goto out_kfree; 1600 1601 seq = file->private_data; 1602 seq->private = s; 1603 out: 1604 return rc; 1605 out_kfree: 1606 kfree(s); 1607 goto out; 1608 } 1609 1610 /* ------------------------------------------------------------------------ */ 1611 int udp_proc_register(struct udp_seq_afinfo *afinfo) 1612 { 1613 struct proc_dir_entry *p; 1614 int rc = 0; 1615 1616 if (!afinfo) 1617 return -EINVAL; 1618 afinfo->seq_fops->owner = afinfo->owner; 1619 afinfo->seq_fops->open = udp_seq_open; 1620 afinfo->seq_fops->read = seq_read; 1621 afinfo->seq_fops->llseek = seq_lseek; 1622 afinfo->seq_fops->release = seq_release_private; 1623 1624 p = proc_net_fops_create(&init_net, afinfo->name, S_IRUGO, afinfo->seq_fops); 1625 if (p) 1626 p->data = afinfo; 1627 else 1628 rc = -ENOMEM; 1629 return rc; 1630 } 1631 1632 void udp_proc_unregister(struct udp_seq_afinfo *afinfo) 1633 { 1634 if (!afinfo) 1635 return; 1636 proc_net_remove(&init_net, afinfo->name); 1637 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 1638 } 1639 1640 /* ------------------------------------------------------------------------ */ 1641 static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) 1642 { 1643 struct inet_sock *inet = inet_sk(sp); 1644 __be32 dest = inet->daddr; 1645 __be32 src = inet->rcv_saddr; 1646 __u16 destp = ntohs(inet->dport); 1647 __u16 srcp = ntohs(inet->sport); 1648 1649 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 1650 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", 1651 bucket, src, srcp, dest, destp, sp->sk_state, 1652 atomic_read(&sp->sk_wmem_alloc), 1653 atomic_read(&sp->sk_rmem_alloc), 1654 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 1655 atomic_read(&sp->sk_refcnt), sp); 1656 } 1657 1658 int udp4_seq_show(struct seq_file *seq, void *v) 1659 { 1660 if (v == SEQ_START_TOKEN) 1661 seq_printf(seq, "%-127s\n", 1662 " sl local_address rem_address st tx_queue " 1663 "rx_queue tr tm->when retrnsmt uid timeout " 1664 "inode"); 1665 else { 1666 char tmpbuf[129]; 1667 struct udp_iter_state *state = seq->private; 1668 1669 udp4_format_sock(v, tmpbuf, state->bucket); 1670 seq_printf(seq, "%-127s\n", tmpbuf); 1671 } 1672 return 0; 1673 } 1674 1675 /* ------------------------------------------------------------------------ */ 1676 static struct file_operations udp4_seq_fops; 1677 static struct udp_seq_afinfo udp4_seq_afinfo = { 1678 .owner = THIS_MODULE, 1679 .name = "udp", 1680 .family = AF_INET, 1681 .hashtable = udp_hash, 1682 .seq_show = udp4_seq_show, 1683 .seq_fops = &udp4_seq_fops, 1684 }; 1685 1686 int __init udp4_proc_init(void) 1687 { 1688 return udp_proc_register(&udp4_seq_afinfo); 1689 } 1690 1691 void udp4_proc_exit(void) 1692 { 1693 udp_proc_unregister(&udp4_seq_afinfo); 1694 } 1695 #endif /* CONFIG_PROC_FS */ 1696 1697 void __init udp_init(void) 1698 { 1699 unsigned long limit; 1700 1701 /* Set the pressure threshold up by the same strategy of TCP. It is a 1702 * fraction of global memory that is up to 1/2 at 256 MB, decreasing 1703 * toward zero with the amount of memory, with a floor of 128 pages. 1704 */ 1705 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); 1706 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); 1707 limit = max(limit, 128UL); 1708 sysctl_udp_mem[0] = limit / 4 * 3; 1709 sysctl_udp_mem[1] = limit; 1710 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; 1711 1712 sysctl_udp_rmem_min = SK_MEM_QUANTUM; 1713 sysctl_udp_wmem_min = SK_MEM_QUANTUM; 1714 } 1715 1716 EXPORT_SYMBOL(udp_disconnect); 1717 EXPORT_SYMBOL(udp_hash); 1718 EXPORT_SYMBOL(udp_hash_lock); 1719 EXPORT_SYMBOL(udp_ioctl); 1720 EXPORT_SYMBOL(udp_get_port); 1721 EXPORT_SYMBOL(udp_prot); 1722 EXPORT_SYMBOL(udp_sendmsg); 1723 EXPORT_SYMBOL(udp_lib_getsockopt); 1724 EXPORT_SYMBOL(udp_lib_setsockopt); 1725 EXPORT_SYMBOL(udp_poll); 1726 1727 #ifdef CONFIG_PROC_FS 1728 EXPORT_SYMBOL(udp_proc_register); 1729 EXPORT_SYMBOL(udp_proc_unregister); 1730 #endif 1731