1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The User Datagram Protocol (UDP). 7 * 8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 13 * Alan Cox, <Alan.Cox@linux.org> 14 * Hirokazu Takahashi, <taka@valinux.co.jp> 15 * 16 * Fixes: 17 * Alan Cox : verify_area() calls 18 * Alan Cox : stopped close while in use off icmp 19 * messages. Not a fix but a botch that 20 * for udp at least is 'valid'. 21 * Alan Cox : Fixed icmp handling properly 22 * Alan Cox : Correct error for oversized datagrams 23 * Alan Cox : Tidied select() semantics. 24 * Alan Cox : udp_err() fixed properly, also now 25 * select and read wake correctly on errors 26 * Alan Cox : udp_send verify_area moved to avoid mem leak 27 * Alan Cox : UDP can count its memory 28 * Alan Cox : send to an unknown connection causes 29 * an ECONNREFUSED off the icmp, but 30 * does NOT close. 31 * Alan Cox : Switched to new sk_buff handlers. No more backlog! 32 * Alan Cox : Using generic datagram code. Even smaller and the PEEK 33 * bug no longer crashes it. 34 * Fred Van Kempen : Net2e support for sk->broadcast. 35 * Alan Cox : Uses skb_free_datagram 36 * Alan Cox : Added get/set sockopt support. 37 * Alan Cox : Broadcasting without option set returns EACCES. 38 * Alan Cox : No wakeup calls. Instead we now use the callbacks. 39 * Alan Cox : Use ip_tos and ip_ttl 40 * Alan Cox : SNMP Mibs 41 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. 42 * Matt Dillon : UDP length checks. 43 * Alan Cox : Smarter af_inet used properly. 44 * Alan Cox : Use new kernel side addressing. 45 * Alan Cox : Incorrect return on truncated datagram receive. 46 * Arnt Gulbrandsen : New udp_send and stuff 47 * Alan Cox : Cache last socket 48 * Alan Cox : Route cache 49 * Jon Peatfield : Minor efficiency fix to sendto(). 50 * Mike Shaver : RFC1122 checks. 51 * Alan Cox : Nonblocking error fix. 52 * Willy Konynenberg : Transparent proxying support. 53 * Mike McLagan : Routing by source 54 * David S. Miller : New socket lookup architecture. 55 * Last socket cache retained as it 56 * does have a high hit rate. 57 * Olaf Kirch : Don't linearise iovec on sendmsg. 58 * Andi Kleen : Some cleanups, cache destination entry 59 * for connect. 60 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 61 * Melvin Smith : Check msg_name not msg_namelen in sendto(), 62 * return ENOTCONN for unconnected sockets (POSIX) 63 * Janos Farkas : don't deliver multi/broadcasts to a different 64 * bound-to-device socket 65 * Hirokazu Takahashi : HW checksumming for outgoing UDP 66 * datagrams. 67 * Hirokazu Takahashi : sendfile() on UDP works now. 68 * Arnaldo C. Melo : convert /proc/net/udp to seq_file 69 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 71 * a single port at the same time. 72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 73 * 74 * 75 * This program is free software; you can redistribute it and/or 76 * modify it under the terms of the GNU General Public License 77 * as published by the Free Software Foundation; either version 78 * 2 of the License, or (at your option) any later version. 79 */ 80 81 #include <asm/system.h> 82 #include <asm/uaccess.h> 83 #include <asm/ioctls.h> 84 #include <linux/types.h> 85 #include <linux/fcntl.h> 86 #include <linux/module.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/igmp.h> 90 #include <linux/in.h> 91 #include <linux/errno.h> 92 #include <linux/timer.h> 93 #include <linux/mm.h> 94 #include <linux/inet.h> 95 #include <linux/netdevice.h> 96 #include <net/tcp_states.h> 97 #include <linux/skbuff.h> 98 #include <linux/proc_fs.h> 99 #include <linux/seq_file.h> 100 #include <net/icmp.h> 101 #include <net/route.h> 102 #include <net/checksum.h> 103 #include <net/xfrm.h> 104 #include "udp_impl.h" 105 106 /* 107 * Snmp MIB for the UDP layer 108 */ 109 110 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly; 111 112 struct hlist_head udp_hash[UDP_HTABLE_SIZE]; 113 DEFINE_RWLOCK(udp_hash_lock); 114 115 static int udp_port_rover; 116 117 static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[]) 118 { 119 struct sock *sk; 120 struct hlist_node *node; 121 122 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 123 if (sk->sk_hash == num) 124 return 1; 125 return 0; 126 } 127 128 /** 129 * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 130 * 131 * @sk: socket struct in question 132 * @snum: port number to look up 133 * @udptable: hash list table, must be of UDP_HTABLE_SIZE 134 * @port_rover: pointer to record of last unallocated port 135 * @saddr_comp: AF-dependent comparison of bound local IP addresses 136 */ 137 int __udp_lib_get_port(struct sock *sk, unsigned short snum, 138 struct hlist_head udptable[], int *port_rover, 139 int (*saddr_comp)(const struct sock *sk1, 140 const struct sock *sk2 ) ) 141 { 142 struct hlist_node *node; 143 struct hlist_head *head; 144 struct sock *sk2; 145 int error = 1; 146 147 write_lock_bh(&udp_hash_lock); 148 if (snum == 0) { 149 int best_size_so_far, best, result, i; 150 151 if (*port_rover > sysctl_local_port_range[1] || 152 *port_rover < sysctl_local_port_range[0]) 153 *port_rover = sysctl_local_port_range[0]; 154 best_size_so_far = 32767; 155 best = result = *port_rover; 156 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { 157 int size; 158 159 head = &udptable[result & (UDP_HTABLE_SIZE - 1)]; 160 if (hlist_empty(head)) { 161 if (result > sysctl_local_port_range[1]) 162 result = sysctl_local_port_range[0] + 163 ((result - sysctl_local_port_range[0]) & 164 (UDP_HTABLE_SIZE - 1)); 165 goto gotit; 166 } 167 size = 0; 168 sk_for_each(sk2, node, head) { 169 if (++size >= best_size_so_far) 170 goto next; 171 } 172 best_size_so_far = size; 173 best = result; 174 next: 175 ; 176 } 177 result = best; 178 for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { 179 if (result > sysctl_local_port_range[1]) 180 result = sysctl_local_port_range[0] 181 + ((result - sysctl_local_port_range[0]) & 182 (UDP_HTABLE_SIZE - 1)); 183 if (! __udp_lib_lport_inuse(result, udptable)) 184 break; 185 } 186 if (i >= (1 << 16) / UDP_HTABLE_SIZE) 187 goto fail; 188 gotit: 189 *port_rover = snum = result; 190 } else { 191 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 192 193 sk_for_each(sk2, node, head) 194 if (sk2->sk_hash == snum && 195 sk2 != sk && 196 (!sk2->sk_reuse || !sk->sk_reuse) && 197 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 198 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 199 (*saddr_comp)(sk, sk2) ) 200 goto fail; 201 } 202 inet_sk(sk)->num = snum; 203 sk->sk_hash = snum; 204 if (sk_unhashed(sk)) { 205 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 206 sk_add_node(sk, head); 207 sock_prot_inc_use(sk->sk_prot); 208 } 209 error = 0; 210 fail: 211 write_unlock_bh(&udp_hash_lock); 212 return error; 213 } 214 215 __inline__ int udp_get_port(struct sock *sk, unsigned short snum, 216 int (*scmp)(const struct sock *, const struct sock *)) 217 { 218 return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp); 219 } 220 221 inline int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) 222 { 223 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 224 225 return ( !ipv6_only_sock(sk2) && 226 (!inet1->rcv_saddr || !inet2->rcv_saddr || 227 inet1->rcv_saddr == inet2->rcv_saddr )); 228 } 229 230 static inline int udp_v4_get_port(struct sock *sk, unsigned short snum) 231 { 232 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal); 233 } 234 235 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try 236 * harder than this. -DaveM 237 */ 238 static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport, 239 __be32 daddr, __be16 dport, 240 int dif, struct hlist_head udptable[]) 241 { 242 struct sock *sk, *result = NULL; 243 struct hlist_node *node; 244 unsigned short hnum = ntohs(dport); 245 int badness = -1; 246 247 read_lock(&udp_hash_lock); 248 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 249 struct inet_sock *inet = inet_sk(sk); 250 251 if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) { 252 int score = (sk->sk_family == PF_INET ? 1 : 0); 253 if (inet->rcv_saddr) { 254 if (inet->rcv_saddr != daddr) 255 continue; 256 score+=2; 257 } 258 if (inet->daddr) { 259 if (inet->daddr != saddr) 260 continue; 261 score+=2; 262 } 263 if (inet->dport) { 264 if (inet->dport != sport) 265 continue; 266 score+=2; 267 } 268 if (sk->sk_bound_dev_if) { 269 if (sk->sk_bound_dev_if != dif) 270 continue; 271 score+=2; 272 } 273 if(score == 9) { 274 result = sk; 275 break; 276 } else if(score > badness) { 277 result = sk; 278 badness = score; 279 } 280 } 281 } 282 if (result) 283 sock_hold(result); 284 read_unlock(&udp_hash_lock); 285 return result; 286 } 287 288 static inline struct sock *udp_v4_mcast_next(struct sock *sk, 289 __be16 loc_port, __be32 loc_addr, 290 __be16 rmt_port, __be32 rmt_addr, 291 int dif) 292 { 293 struct hlist_node *node; 294 struct sock *s = sk; 295 unsigned short hnum = ntohs(loc_port); 296 297 sk_for_each_from(s, node) { 298 struct inet_sock *inet = inet_sk(s); 299 300 if (s->sk_hash != hnum || 301 (inet->daddr && inet->daddr != rmt_addr) || 302 (inet->dport != rmt_port && inet->dport) || 303 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || 304 ipv6_only_sock(s) || 305 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) 306 continue; 307 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) 308 continue; 309 goto found; 310 } 311 s = NULL; 312 found: 313 return s; 314 } 315 316 /* 317 * This routine is called by the ICMP module when it gets some 318 * sort of error condition. If err < 0 then the socket should 319 * be closed and the error returned to the user. If err > 0 320 * it's just the icmp type << 8 | icmp code. 321 * Header points to the ip header of the error packet. We move 322 * on past this. Then (as it used to claim before adjustment) 323 * header points to the first 8 bytes of the udp header. We need 324 * to find the appropriate port. 325 */ 326 327 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) 328 { 329 struct inet_sock *inet; 330 struct iphdr *iph = (struct iphdr*)skb->data; 331 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2)); 332 int type = skb->h.icmph->type; 333 int code = skb->h.icmph->code; 334 struct sock *sk; 335 int harderr; 336 int err; 337 338 sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, 339 skb->dev->ifindex, udptable ); 340 if (sk == NULL) { 341 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 342 return; /* No socket for error */ 343 } 344 345 err = 0; 346 harderr = 0; 347 inet = inet_sk(sk); 348 349 switch (type) { 350 default: 351 case ICMP_TIME_EXCEEDED: 352 err = EHOSTUNREACH; 353 break; 354 case ICMP_SOURCE_QUENCH: 355 goto out; 356 case ICMP_PARAMETERPROB: 357 err = EPROTO; 358 harderr = 1; 359 break; 360 case ICMP_DEST_UNREACH: 361 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 362 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 363 err = EMSGSIZE; 364 harderr = 1; 365 break; 366 } 367 goto out; 368 } 369 err = EHOSTUNREACH; 370 if (code <= NR_ICMP_UNREACH) { 371 harderr = icmp_err_convert[code].fatal; 372 err = icmp_err_convert[code].errno; 373 } 374 break; 375 } 376 377 /* 378 * RFC1122: OK. Passes ICMP errors back to application, as per 379 * 4.1.3.3. 380 */ 381 if (!inet->recverr) { 382 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 383 goto out; 384 } else { 385 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1)); 386 } 387 sk->sk_err = err; 388 sk->sk_error_report(sk); 389 out: 390 sock_put(sk); 391 } 392 393 __inline__ void udp_err(struct sk_buff *skb, u32 info) 394 { 395 return __udp4_lib_err(skb, info, udp_hash); 396 } 397 398 /* 399 * Throw away all pending data and cancel the corking. Socket is locked. 400 */ 401 static void udp_flush_pending_frames(struct sock *sk) 402 { 403 struct udp_sock *up = udp_sk(sk); 404 405 if (up->pending) { 406 up->len = 0; 407 up->pending = 0; 408 ip_flush_pending_frames(sk); 409 } 410 } 411 412 /** 413 * udp4_hwcsum_outgoing - handle outgoing HW checksumming 414 * @sk: socket we are sending on 415 * @skb: sk_buff containing the filled-in UDP header 416 * (checksum field must be zeroed out) 417 */ 418 static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 419 __be32 src, __be32 dst, int len ) 420 { 421 unsigned int offset; 422 struct udphdr *uh = skb->h.uh; 423 __wsum csum = 0; 424 425 if (skb_queue_len(&sk->sk_write_queue) == 1) { 426 /* 427 * Only one fragment on the socket. 428 */ 429 skb->csum_offset = offsetof(struct udphdr, check); 430 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); 431 } else { 432 /* 433 * HW-checksum won't work as there are two or more 434 * fragments on the socket so that all csums of sk_buffs 435 * should be together 436 */ 437 offset = skb->h.raw - skb->data; 438 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 439 440 skb->ip_summed = CHECKSUM_NONE; 441 442 skb_queue_walk(&sk->sk_write_queue, skb) { 443 csum = csum_add(csum, skb->csum); 444 } 445 446 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 447 if (uh->check == 0) 448 uh->check = CSUM_MANGLED_0; 449 } 450 } 451 452 /* 453 * Push out all pending data as one UDP datagram. Socket is locked. 454 */ 455 static int udp_push_pending_frames(struct sock *sk) 456 { 457 struct udp_sock *up = udp_sk(sk); 458 struct inet_sock *inet = inet_sk(sk); 459 struct flowi *fl = &inet->cork.fl; 460 struct sk_buff *skb; 461 struct udphdr *uh; 462 int err = 0; 463 __wsum csum = 0; 464 465 /* Grab the skbuff where UDP header space exists. */ 466 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) 467 goto out; 468 469 /* 470 * Create a UDP header 471 */ 472 uh = skb->h.uh; 473 uh->source = fl->fl_ip_sport; 474 uh->dest = fl->fl_ip_dport; 475 uh->len = htons(up->len); 476 uh->check = 0; 477 478 if (up->pcflag) /* UDP-Lite */ 479 csum = udplite_csum_outgoing(sk, skb); 480 481 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ 482 483 skb->ip_summed = CHECKSUM_NONE; 484 goto send; 485 486 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 487 488 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len); 489 goto send; 490 491 } else /* `normal' UDP */ 492 csum = udp_csum_outgoing(sk, skb); 493 494 /* add protocol-dependent pseudo-header */ 495 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, 496 sk->sk_protocol, csum ); 497 if (uh->check == 0) 498 uh->check = CSUM_MANGLED_0; 499 500 send: 501 err = ip_push_pending_frames(sk); 502 out: 503 up->len = 0; 504 up->pending = 0; 505 return err; 506 } 507 508 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 509 size_t len) 510 { 511 struct inet_sock *inet = inet_sk(sk); 512 struct udp_sock *up = udp_sk(sk); 513 int ulen = len; 514 struct ipcm_cookie ipc; 515 struct rtable *rt = NULL; 516 int free = 0; 517 int connected = 0; 518 __be32 daddr, faddr, saddr; 519 __be16 dport; 520 u8 tos; 521 int err, is_udplite = up->pcflag; 522 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 523 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 524 525 if (len > 0xFFFF) 526 return -EMSGSIZE; 527 528 /* 529 * Check the flags. 530 */ 531 532 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */ 533 return -EOPNOTSUPP; 534 535 ipc.opt = NULL; 536 537 if (up->pending) { 538 /* 539 * There are pending frames. 540 * The socket lock must be held while it's corked. 541 */ 542 lock_sock(sk); 543 if (likely(up->pending)) { 544 if (unlikely(up->pending != AF_INET)) { 545 release_sock(sk); 546 return -EINVAL; 547 } 548 goto do_append_data; 549 } 550 release_sock(sk); 551 } 552 ulen += sizeof(struct udphdr); 553 554 /* 555 * Get and verify the address. 556 */ 557 if (msg->msg_name) { 558 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; 559 if (msg->msg_namelen < sizeof(*usin)) 560 return -EINVAL; 561 if (usin->sin_family != AF_INET) { 562 if (usin->sin_family != AF_UNSPEC) 563 return -EAFNOSUPPORT; 564 } 565 566 daddr = usin->sin_addr.s_addr; 567 dport = usin->sin_port; 568 if (dport == 0) 569 return -EINVAL; 570 } else { 571 if (sk->sk_state != TCP_ESTABLISHED) 572 return -EDESTADDRREQ; 573 daddr = inet->daddr; 574 dport = inet->dport; 575 /* Open fast path for connected socket. 576 Route will not be used, if at least one option is set. 577 */ 578 connected = 1; 579 } 580 ipc.addr = inet->saddr; 581 582 ipc.oif = sk->sk_bound_dev_if; 583 if (msg->msg_controllen) { 584 err = ip_cmsg_send(msg, &ipc); 585 if (err) 586 return err; 587 if (ipc.opt) 588 free = 1; 589 connected = 0; 590 } 591 if (!ipc.opt) 592 ipc.opt = inet->opt; 593 594 saddr = ipc.addr; 595 ipc.addr = faddr = daddr; 596 597 if (ipc.opt && ipc.opt->srr) { 598 if (!daddr) 599 return -EINVAL; 600 faddr = ipc.opt->faddr; 601 connected = 0; 602 } 603 tos = RT_TOS(inet->tos); 604 if (sock_flag(sk, SOCK_LOCALROUTE) || 605 (msg->msg_flags & MSG_DONTROUTE) || 606 (ipc.opt && ipc.opt->is_strictroute)) { 607 tos |= RTO_ONLINK; 608 connected = 0; 609 } 610 611 if (MULTICAST(daddr)) { 612 if (!ipc.oif) 613 ipc.oif = inet->mc_index; 614 if (!saddr) 615 saddr = inet->mc_addr; 616 connected = 0; 617 } 618 619 if (connected) 620 rt = (struct rtable*)sk_dst_check(sk, 0); 621 622 if (rt == NULL) { 623 struct flowi fl = { .oif = ipc.oif, 624 .nl_u = { .ip4_u = 625 { .daddr = faddr, 626 .saddr = saddr, 627 .tos = tos } }, 628 .proto = sk->sk_protocol, 629 .uli_u = { .ports = 630 { .sport = inet->sport, 631 .dport = dport } } }; 632 security_sk_classify_flow(sk, &fl); 633 err = ip_route_output_flow(&rt, &fl, sk, 1); 634 if (err) 635 goto out; 636 637 err = -EACCES; 638 if ((rt->rt_flags & RTCF_BROADCAST) && 639 !sock_flag(sk, SOCK_BROADCAST)) 640 goto out; 641 if (connected) 642 sk_dst_set(sk, dst_clone(&rt->u.dst)); 643 } 644 645 if (msg->msg_flags&MSG_CONFIRM) 646 goto do_confirm; 647 back_from_confirm: 648 649 saddr = rt->rt_src; 650 if (!ipc.addr) 651 daddr = ipc.addr = rt->rt_dst; 652 653 lock_sock(sk); 654 if (unlikely(up->pending)) { 655 /* The socket is already corked while preparing it. */ 656 /* ... which is an evident application bug. --ANK */ 657 release_sock(sk); 658 659 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); 660 err = -EINVAL; 661 goto out; 662 } 663 /* 664 * Now cork the socket to pend data. 665 */ 666 inet->cork.fl.fl4_dst = daddr; 667 inet->cork.fl.fl_ip_dport = dport; 668 inet->cork.fl.fl4_src = saddr; 669 inet->cork.fl.fl_ip_sport = inet->sport; 670 up->pending = AF_INET; 671 672 do_append_data: 673 up->len += ulen; 674 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 675 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, 676 sizeof(struct udphdr), &ipc, rt, 677 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 678 if (err) 679 udp_flush_pending_frames(sk); 680 else if (!corkreq) 681 err = udp_push_pending_frames(sk); 682 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 683 up->pending = 0; 684 release_sock(sk); 685 686 out: 687 ip_rt_put(rt); 688 if (free) 689 kfree(ipc.opt); 690 if (!err) { 691 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); 692 return len; 693 } 694 /* 695 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 696 * ENOBUFS might not be good (it's not tunable per se), but otherwise 697 * we don't have a good statistic (IpOutDiscards but it can be too many 698 * things). We could add another new stat but at least for now that 699 * seems like overkill. 700 */ 701 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 702 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); 703 } 704 return err; 705 706 do_confirm: 707 dst_confirm(&rt->u.dst); 708 if (!(msg->msg_flags&MSG_PROBE) || len) 709 goto back_from_confirm; 710 err = 0; 711 goto out; 712 } 713 714 int udp_sendpage(struct sock *sk, struct page *page, int offset, 715 size_t size, int flags) 716 { 717 struct udp_sock *up = udp_sk(sk); 718 int ret; 719 720 if (!up->pending) { 721 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 722 723 /* Call udp_sendmsg to specify destination address which 724 * sendpage interface can't pass. 725 * This will succeed only when the socket is connected. 726 */ 727 ret = udp_sendmsg(NULL, sk, &msg, 0); 728 if (ret < 0) 729 return ret; 730 } 731 732 lock_sock(sk); 733 734 if (unlikely(!up->pending)) { 735 release_sock(sk); 736 737 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); 738 return -EINVAL; 739 } 740 741 ret = ip_append_page(sk, page, offset, size, flags); 742 if (ret == -EOPNOTSUPP) { 743 release_sock(sk); 744 return sock_no_sendpage(sk->sk_socket, page, offset, 745 size, flags); 746 } 747 if (ret < 0) { 748 udp_flush_pending_frames(sk); 749 goto out; 750 } 751 752 up->len += size; 753 if (!(up->corkflag || (flags&MSG_MORE))) 754 ret = udp_push_pending_frames(sk); 755 if (!ret) 756 ret = size; 757 out: 758 release_sock(sk); 759 return ret; 760 } 761 762 /* 763 * IOCTL requests applicable to the UDP protocol 764 */ 765 766 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 767 { 768 switch(cmd) 769 { 770 case SIOCOUTQ: 771 { 772 int amount = atomic_read(&sk->sk_wmem_alloc); 773 return put_user(amount, (int __user *)arg); 774 } 775 776 case SIOCINQ: 777 { 778 struct sk_buff *skb; 779 unsigned long amount; 780 781 amount = 0; 782 spin_lock_bh(&sk->sk_receive_queue.lock); 783 skb = skb_peek(&sk->sk_receive_queue); 784 if (skb != NULL) { 785 /* 786 * We will only return the amount 787 * of this packet since that is all 788 * that will be read. 789 */ 790 amount = skb->len - sizeof(struct udphdr); 791 } 792 spin_unlock_bh(&sk->sk_receive_queue.lock); 793 return put_user(amount, (int __user *)arg); 794 } 795 796 default: 797 return -ENOIOCTLCMD; 798 } 799 return(0); 800 } 801 802 /* 803 * This should be easy, if there is something there we 804 * return it, otherwise we block. 805 */ 806 807 int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 808 size_t len, int noblock, int flags, int *addr_len) 809 { 810 struct inet_sock *inet = inet_sk(sk); 811 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 812 struct sk_buff *skb; 813 int copied, err, copy_only, is_udplite = IS_UDPLITE(sk); 814 815 /* 816 * Check any passed addresses 817 */ 818 if (addr_len) 819 *addr_len=sizeof(*sin); 820 821 if (flags & MSG_ERRQUEUE) 822 return ip_recv_error(sk, msg, len); 823 824 try_again: 825 skb = skb_recv_datagram(sk, flags, noblock, &err); 826 if (!skb) 827 goto out; 828 829 copied = skb->len - sizeof(struct udphdr); 830 if (copied > len) { 831 copied = len; 832 msg->msg_flags |= MSG_TRUNC; 833 } 834 835 /* 836 * Decide whether to checksum and/or copy data. 837 * 838 * UDP: checksum may have been computed in HW, 839 * (re-)compute it if message is truncated. 840 * UDP-Lite: always needs to checksum, no HW support. 841 */ 842 copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY); 843 844 if (is_udplite || (!copy_only && msg->msg_flags&MSG_TRUNC)) { 845 if (__udp_lib_checksum_complete(skb)) 846 goto csum_copy_err; 847 copy_only = 1; 848 } 849 850 if (copy_only) 851 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 852 msg->msg_iov, copied ); 853 else { 854 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 855 856 if (err == -EINVAL) 857 goto csum_copy_err; 858 } 859 860 if (err) 861 goto out_free; 862 863 sock_recv_timestamp(msg, sk, skb); 864 865 /* Copy the address. */ 866 if (sin) 867 { 868 sin->sin_family = AF_INET; 869 sin->sin_port = skb->h.uh->source; 870 sin->sin_addr.s_addr = skb->nh.iph->saddr; 871 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 872 } 873 if (inet->cmsg_flags) 874 ip_cmsg_recv(msg, skb); 875 876 err = copied; 877 if (flags & MSG_TRUNC) 878 err = skb->len - sizeof(struct udphdr); 879 880 out_free: 881 skb_free_datagram(sk, skb); 882 out: 883 return err; 884 885 csum_copy_err: 886 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 887 888 skb_kill_datagram(sk, skb, flags); 889 890 if (noblock) 891 return -EAGAIN; 892 goto try_again; 893 } 894 895 896 int udp_disconnect(struct sock *sk, int flags) 897 { 898 struct inet_sock *inet = inet_sk(sk); 899 /* 900 * 1003.1g - break association. 901 */ 902 903 sk->sk_state = TCP_CLOSE; 904 inet->daddr = 0; 905 inet->dport = 0; 906 sk->sk_bound_dev_if = 0; 907 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 908 inet_reset_saddr(sk); 909 910 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { 911 sk->sk_prot->unhash(sk); 912 inet->sport = 0; 913 } 914 sk_dst_reset(sk); 915 return 0; 916 } 917 918 /* return: 919 * 1 if the the UDP system should process it 920 * 0 if we should drop this packet 921 * -1 if it should get processed by xfrm4_rcv_encap 922 */ 923 static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) 924 { 925 #ifndef CONFIG_XFRM 926 return 1; 927 #else 928 struct udp_sock *up = udp_sk(sk); 929 struct udphdr *uh; 930 struct iphdr *iph; 931 int iphlen, len; 932 933 __u8 *udpdata; 934 __be32 *udpdata32; 935 __u16 encap_type = up->encap_type; 936 937 /* if we're overly short, let UDP handle it */ 938 len = skb->len - sizeof(struct udphdr); 939 if (len <= 0) 940 return 1; 941 942 /* if this is not encapsulated socket, then just return now */ 943 if (!encap_type) 944 return 1; 945 946 /* If this is a paged skb, make sure we pull up 947 * whatever data we need to look at. */ 948 if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) 949 return 1; 950 951 /* Now we can get the pointers */ 952 uh = skb->h.uh; 953 udpdata = (__u8 *)uh + sizeof(struct udphdr); 954 udpdata32 = (__be32 *)udpdata; 955 956 switch (encap_type) { 957 default: 958 case UDP_ENCAP_ESPINUDP: 959 /* Check if this is a keepalive packet. If so, eat it. */ 960 if (len == 1 && udpdata[0] == 0xff) { 961 return 0; 962 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0 ) { 963 /* ESP Packet without Non-ESP header */ 964 len = sizeof(struct udphdr); 965 } else 966 /* Must be an IKE packet.. pass it through */ 967 return 1; 968 break; 969 case UDP_ENCAP_ESPINUDP_NON_IKE: 970 /* Check if this is a keepalive packet. If so, eat it. */ 971 if (len == 1 && udpdata[0] == 0xff) { 972 return 0; 973 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && 974 udpdata32[0] == 0 && udpdata32[1] == 0) { 975 976 /* ESP Packet with Non-IKE marker */ 977 len = sizeof(struct udphdr) + 2 * sizeof(u32); 978 } else 979 /* Must be an IKE packet.. pass it through */ 980 return 1; 981 break; 982 } 983 984 /* At this point we are sure that this is an ESPinUDP packet, 985 * so we need to remove 'len' bytes from the packet (the UDP 986 * header and optional ESP marker bytes) and then modify the 987 * protocol to ESP, and then call into the transform receiver. 988 */ 989 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 990 return 0; 991 992 /* Now we can update and verify the packet length... */ 993 iph = skb->nh.iph; 994 iphlen = iph->ihl << 2; 995 iph->tot_len = htons(ntohs(iph->tot_len) - len); 996 if (skb->len < iphlen + len) { 997 /* packet is too small!?! */ 998 return 0; 999 } 1000 1001 /* pull the data buffer up to the ESP header and set the 1002 * transport header to point to ESP. Keep UDP on the stack 1003 * for later. 1004 */ 1005 skb->h.raw = skb_pull(skb, len); 1006 1007 /* modify the protocol (it's ESP!) */ 1008 iph->protocol = IPPROTO_ESP; 1009 1010 /* and let the caller know to send this into the ESP processor... */ 1011 return -1; 1012 #endif 1013 } 1014 1015 /* returns: 1016 * -1: error 1017 * 0: success 1018 * >0: "udp encap" protocol resubmission 1019 * 1020 * Note that in the success and error cases, the skb is assumed to 1021 * have either been requeued or freed. 1022 */ 1023 int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) 1024 { 1025 struct udp_sock *up = udp_sk(sk); 1026 int rc; 1027 1028 /* 1029 * Charge it to the socket, dropping if the queue is full. 1030 */ 1031 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1032 goto drop; 1033 nf_reset(skb); 1034 1035 if (up->encap_type) { 1036 /* 1037 * This is an encapsulation socket, so let's see if this is 1038 * an encapsulated packet. 1039 * If it's a keepalive packet, then just eat it. 1040 * If it's an encapsulateed packet, then pass it to the 1041 * IPsec xfrm input and return the response 1042 * appropriately. Otherwise, just fall through and 1043 * pass this up the UDP socket. 1044 */ 1045 int ret; 1046 1047 ret = udp_encap_rcv(sk, skb); 1048 if (ret == 0) { 1049 /* Eat the packet .. */ 1050 kfree_skb(skb); 1051 return 0; 1052 } 1053 if (ret < 0) { 1054 /* process the ESP packet */ 1055 ret = xfrm4_rcv_encap(skb, up->encap_type); 1056 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); 1057 return -ret; 1058 } 1059 /* FALLTHROUGH -- it's a UDP Packet */ 1060 } 1061 1062 /* 1063 * UDP-Lite specific tests, ignored on UDP sockets 1064 */ 1065 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 1066 1067 /* 1068 * MIB statistics other than incrementing the error count are 1069 * disabled for the following two types of errors: these depend 1070 * on the application settings, not on the functioning of the 1071 * protocol stack as such. 1072 * 1073 * RFC 3828 here recommends (sec 3.3): "There should also be a 1074 * way ... to ... at least let the receiving application block 1075 * delivery of packets with coverage values less than a value 1076 * provided by the application." 1077 */ 1078 if (up->pcrlen == 0) { /* full coverage was set */ 1079 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " 1080 "%d while full coverage %d requested\n", 1081 UDP_SKB_CB(skb)->cscov, skb->len); 1082 goto drop; 1083 } 1084 /* The next case involves violating the min. coverage requested 1085 * by the receiver. This is subtle: if receiver wants x and x is 1086 * greater than the buffersize/MTU then receiver will complain 1087 * that it wants x while sender emits packets of smaller size y. 1088 * Therefore the above ...()->partial_cov statement is essential. 1089 */ 1090 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 1091 LIMIT_NETDEBUG(KERN_WARNING 1092 "UDPLITE: coverage %d too small, need min %d\n", 1093 UDP_SKB_CB(skb)->cscov, up->pcrlen); 1094 goto drop; 1095 } 1096 } 1097 1098 if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { 1099 if (__udp_lib_checksum_complete(skb)) 1100 goto drop; 1101 skb->ip_summed = CHECKSUM_UNNECESSARY; 1102 } 1103 1104 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1105 /* Note that an ENOMEM error is charged twice */ 1106 if (rc == -ENOMEM) 1107 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag); 1108 goto drop; 1109 } 1110 1111 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); 1112 return 0; 1113 1114 drop: 1115 UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag); 1116 kfree_skb(skb); 1117 return -1; 1118 } 1119 1120 /* 1121 * Multicasts and broadcasts go to each listener. 1122 * 1123 * Note: called only from the BH handler context, 1124 * so we don't need to lock the hashes. 1125 */ 1126 static int __udp4_lib_mcast_deliver(struct sk_buff *skb, 1127 struct udphdr *uh, 1128 __be32 saddr, __be32 daddr, 1129 struct hlist_head udptable[]) 1130 { 1131 struct sock *sk; 1132 int dif; 1133 1134 read_lock(&udp_hash_lock); 1135 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); 1136 dif = skb->dev->ifindex; 1137 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 1138 if (sk) { 1139 struct sock *sknext = NULL; 1140 1141 do { 1142 struct sk_buff *skb1 = skb; 1143 1144 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr, 1145 uh->source, saddr, dif); 1146 if(sknext) 1147 skb1 = skb_clone(skb, GFP_ATOMIC); 1148 1149 if(skb1) { 1150 int ret = udp_queue_rcv_skb(sk, skb1); 1151 if (ret > 0) 1152 /* we should probably re-process instead 1153 * of dropping packets here. */ 1154 kfree_skb(skb1); 1155 } 1156 sk = sknext; 1157 } while(sknext); 1158 } else 1159 kfree_skb(skb); 1160 read_unlock(&udp_hash_lock); 1161 return 0; 1162 } 1163 1164 /* Initialize UDP checksum. If exited with zero value (success), 1165 * CHECKSUM_UNNECESSARY means, that no more checks are required. 1166 * Otherwise, csum completion requires chacksumming packet body, 1167 * including udp header and folding it to skb->csum. 1168 */ 1169 static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh) 1170 { 1171 if (uh->check == 0) { 1172 skb->ip_summed = CHECKSUM_UNNECESSARY; 1173 } else if (skb->ip_summed == CHECKSUM_COMPLETE) { 1174 if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, 1175 skb->len, IPPROTO_UDP, skb->csum )) 1176 skb->ip_summed = CHECKSUM_UNNECESSARY; 1177 } 1178 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 1179 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, 1180 skb->nh.iph->daddr, 1181 skb->len, IPPROTO_UDP, 0); 1182 /* Probably, we should checksum udp header (it should be in cache 1183 * in any case) and data in tiny packets (< rx copybreak). 1184 */ 1185 1186 /* UDP = UDP-Lite with a non-partial checksum coverage */ 1187 UDP_SKB_CB(skb)->partial_cov = 0; 1188 } 1189 1190 /* 1191 * All we need to do is get the socket, and then do a checksum. 1192 */ 1193 1194 int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], 1195 int is_udplite) 1196 { 1197 struct sock *sk; 1198 struct udphdr *uh = skb->h.uh; 1199 unsigned short ulen; 1200 struct rtable *rt = (struct rtable*)skb->dst; 1201 __be32 saddr = skb->nh.iph->saddr; 1202 __be32 daddr = skb->nh.iph->daddr; 1203 1204 /* 1205 * Validate the packet. 1206 */ 1207 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 1208 goto drop; /* No space for header. */ 1209 1210 ulen = ntohs(uh->len); 1211 if (ulen > skb->len) 1212 goto short_packet; 1213 1214 if(! is_udplite ) { /* UDP validates ulen. */ 1215 1216 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 1217 goto short_packet; 1218 uh = skb->h.uh; 1219 1220 udp4_csum_init(skb, uh); 1221 1222 } else { /* UDP-Lite validates cscov. */ 1223 if (udplite4_csum_init(skb, uh)) 1224 goto csum_error; 1225 } 1226 1227 if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1228 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1229 1230 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, 1231 skb->dev->ifindex, udptable ); 1232 1233 if (sk != NULL) { 1234 int ret = udp_queue_rcv_skb(sk, skb); 1235 sock_put(sk); 1236 1237 /* a return value > 0 means to resubmit the input, but 1238 * it wants the return to be -protocol, or 0 1239 */ 1240 if (ret > 0) 1241 return -ret; 1242 return 0; 1243 } 1244 1245 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1246 goto drop; 1247 nf_reset(skb); 1248 1249 /* No socket. Drop packet silently, if checksum is wrong */ 1250 if (udp_lib_checksum_complete(skb)) 1251 goto csum_error; 1252 1253 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite); 1254 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 1255 1256 /* 1257 * Hmm. We got an UDP packet to a port to which we 1258 * don't wanna listen. Ignore it. 1259 */ 1260 kfree_skb(skb); 1261 return(0); 1262 1263 short_packet: 1264 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", 1265 is_udplite? "-Lite" : "", 1266 NIPQUAD(saddr), 1267 ntohs(uh->source), 1268 ulen, 1269 skb->len, 1270 NIPQUAD(daddr), 1271 ntohs(uh->dest)); 1272 goto drop; 1273 1274 csum_error: 1275 /* 1276 * RFC1122: OK. Discards the bad packet silently (as far as 1277 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1278 */ 1279 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1280 is_udplite? "-Lite" : "", 1281 NIPQUAD(saddr), 1282 ntohs(uh->source), 1283 NIPQUAD(daddr), 1284 ntohs(uh->dest), 1285 ulen); 1286 drop: 1287 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 1288 kfree_skb(skb); 1289 return(0); 1290 } 1291 1292 __inline__ int udp_rcv(struct sk_buff *skb) 1293 { 1294 return __udp4_lib_rcv(skb, udp_hash, 0); 1295 } 1296 1297 int udp_destroy_sock(struct sock *sk) 1298 { 1299 lock_sock(sk); 1300 udp_flush_pending_frames(sk); 1301 release_sock(sk); 1302 return 0; 1303 } 1304 1305 /* 1306 * Socket option code for UDP 1307 */ 1308 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 1309 char __user *optval, int optlen, 1310 int (*push_pending_frames)(struct sock *)) 1311 { 1312 struct udp_sock *up = udp_sk(sk); 1313 int val; 1314 int err = 0; 1315 1316 if(optlen<sizeof(int)) 1317 return -EINVAL; 1318 1319 if (get_user(val, (int __user *)optval)) 1320 return -EFAULT; 1321 1322 switch(optname) { 1323 case UDP_CORK: 1324 if (val != 0) { 1325 up->corkflag = 1; 1326 } else { 1327 up->corkflag = 0; 1328 lock_sock(sk); 1329 (*push_pending_frames)(sk); 1330 release_sock(sk); 1331 } 1332 break; 1333 1334 case UDP_ENCAP: 1335 switch (val) { 1336 case 0: 1337 case UDP_ENCAP_ESPINUDP: 1338 case UDP_ENCAP_ESPINUDP_NON_IKE: 1339 up->encap_type = val; 1340 break; 1341 default: 1342 err = -ENOPROTOOPT; 1343 break; 1344 } 1345 break; 1346 1347 /* 1348 * UDP-Lite's partial checksum coverage (RFC 3828). 1349 */ 1350 /* The sender sets actual checksum coverage length via this option. 1351 * The case coverage > packet length is handled by send module. */ 1352 case UDPLITE_SEND_CSCOV: 1353 if (!up->pcflag) /* Disable the option on UDP sockets */ 1354 return -ENOPROTOOPT; 1355 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 1356 val = 8; 1357 up->pcslen = val; 1358 up->pcflag |= UDPLITE_SEND_CC; 1359 break; 1360 1361 /* The receiver specifies a minimum checksum coverage value. To make 1362 * sense, this should be set to at least 8 (as done below). If zero is 1363 * used, this again means full checksum coverage. */ 1364 case UDPLITE_RECV_CSCOV: 1365 if (!up->pcflag) /* Disable the option on UDP sockets */ 1366 return -ENOPROTOOPT; 1367 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 1368 val = 8; 1369 up->pcrlen = val; 1370 up->pcflag |= UDPLITE_RECV_CC; 1371 break; 1372 1373 default: 1374 err = -ENOPROTOOPT; 1375 break; 1376 }; 1377 1378 return err; 1379 } 1380 1381 int udp_setsockopt(struct sock *sk, int level, int optname, 1382 char __user *optval, int optlen) 1383 { 1384 if (level == SOL_UDP || level == SOL_UDPLITE) 1385 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1386 udp_push_pending_frames); 1387 return ip_setsockopt(sk, level, optname, optval, optlen); 1388 } 1389 1390 #ifdef CONFIG_COMPAT 1391 int compat_udp_setsockopt(struct sock *sk, int level, int optname, 1392 char __user *optval, int optlen) 1393 { 1394 if (level == SOL_UDP || level == SOL_UDPLITE) 1395 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1396 udp_push_pending_frames); 1397 return compat_ip_setsockopt(sk, level, optname, optval, optlen); 1398 } 1399 #endif 1400 1401 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 1402 char __user *optval, int __user *optlen) 1403 { 1404 struct udp_sock *up = udp_sk(sk); 1405 int val, len; 1406 1407 if(get_user(len,optlen)) 1408 return -EFAULT; 1409 1410 len = min_t(unsigned int, len, sizeof(int)); 1411 1412 if(len < 0) 1413 return -EINVAL; 1414 1415 switch(optname) { 1416 case UDP_CORK: 1417 val = up->corkflag; 1418 break; 1419 1420 case UDP_ENCAP: 1421 val = up->encap_type; 1422 break; 1423 1424 /* The following two cannot be changed on UDP sockets, the return is 1425 * always 0 (which corresponds to the full checksum coverage of UDP). */ 1426 case UDPLITE_SEND_CSCOV: 1427 val = up->pcslen; 1428 break; 1429 1430 case UDPLITE_RECV_CSCOV: 1431 val = up->pcrlen; 1432 break; 1433 1434 default: 1435 return -ENOPROTOOPT; 1436 }; 1437 1438 if(put_user(len, optlen)) 1439 return -EFAULT; 1440 if(copy_to_user(optval, &val,len)) 1441 return -EFAULT; 1442 return 0; 1443 } 1444 1445 int udp_getsockopt(struct sock *sk, int level, int optname, 1446 char __user *optval, int __user *optlen) 1447 { 1448 if (level == SOL_UDP || level == SOL_UDPLITE) 1449 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1450 return ip_getsockopt(sk, level, optname, optval, optlen); 1451 } 1452 1453 #ifdef CONFIG_COMPAT 1454 int compat_udp_getsockopt(struct sock *sk, int level, int optname, 1455 char __user *optval, int __user *optlen) 1456 { 1457 if (level == SOL_UDP || level == SOL_UDPLITE) 1458 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1459 return compat_ip_getsockopt(sk, level, optname, optval, optlen); 1460 } 1461 #endif 1462 /** 1463 * udp_poll - wait for a UDP event. 1464 * @file - file struct 1465 * @sock - socket 1466 * @wait - poll table 1467 * 1468 * This is same as datagram poll, except for the special case of 1469 * blocking sockets. If application is using a blocking fd 1470 * and a packet with checksum error is in the queue; 1471 * then it could get return from select indicating data available 1472 * but then block when reading it. Add special case code 1473 * to work around these arguably broken applications. 1474 */ 1475 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) 1476 { 1477 unsigned int mask = datagram_poll(file, sock, wait); 1478 struct sock *sk = sock->sk; 1479 int is_lite = IS_UDPLITE(sk); 1480 1481 /* Check for false positives due to checksum errors */ 1482 if ( (mask & POLLRDNORM) && 1483 !(file->f_flags & O_NONBLOCK) && 1484 !(sk->sk_shutdown & RCV_SHUTDOWN)){ 1485 struct sk_buff_head *rcvq = &sk->sk_receive_queue; 1486 struct sk_buff *skb; 1487 1488 spin_lock_bh(&rcvq->lock); 1489 while ((skb = skb_peek(rcvq)) != NULL) { 1490 if (udp_lib_checksum_complete(skb)) { 1491 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); 1492 __skb_unlink(skb, rcvq); 1493 kfree_skb(skb); 1494 } else { 1495 skb->ip_summed = CHECKSUM_UNNECESSARY; 1496 break; 1497 } 1498 } 1499 spin_unlock_bh(&rcvq->lock); 1500 1501 /* nothing to see, move along */ 1502 if (skb == NULL) 1503 mask &= ~(POLLIN | POLLRDNORM); 1504 } 1505 1506 return mask; 1507 1508 } 1509 1510 struct proto udp_prot = { 1511 .name = "UDP", 1512 .owner = THIS_MODULE, 1513 .close = udp_lib_close, 1514 .connect = ip4_datagram_connect, 1515 .disconnect = udp_disconnect, 1516 .ioctl = udp_ioctl, 1517 .destroy = udp_destroy_sock, 1518 .setsockopt = udp_setsockopt, 1519 .getsockopt = udp_getsockopt, 1520 .sendmsg = udp_sendmsg, 1521 .recvmsg = udp_recvmsg, 1522 .sendpage = udp_sendpage, 1523 .backlog_rcv = udp_queue_rcv_skb, 1524 .hash = udp_lib_hash, 1525 .unhash = udp_lib_unhash, 1526 .get_port = udp_v4_get_port, 1527 .obj_size = sizeof(struct udp_sock), 1528 #ifdef CONFIG_COMPAT 1529 .compat_setsockopt = compat_udp_setsockopt, 1530 .compat_getsockopt = compat_udp_getsockopt, 1531 #endif 1532 }; 1533 1534 /* ------------------------------------------------------------------------ */ 1535 #ifdef CONFIG_PROC_FS 1536 1537 static struct sock *udp_get_first(struct seq_file *seq) 1538 { 1539 struct sock *sk; 1540 struct udp_iter_state *state = seq->private; 1541 1542 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { 1543 struct hlist_node *node; 1544 sk_for_each(sk, node, state->hashtable + state->bucket) { 1545 if (sk->sk_family == state->family) 1546 goto found; 1547 } 1548 } 1549 sk = NULL; 1550 found: 1551 return sk; 1552 } 1553 1554 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 1555 { 1556 struct udp_iter_state *state = seq->private; 1557 1558 do { 1559 sk = sk_next(sk); 1560 try_again: 1561 ; 1562 } while (sk && sk->sk_family != state->family); 1563 1564 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { 1565 sk = sk_head(state->hashtable + state->bucket); 1566 goto try_again; 1567 } 1568 return sk; 1569 } 1570 1571 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) 1572 { 1573 struct sock *sk = udp_get_first(seq); 1574 1575 if (sk) 1576 while(pos && (sk = udp_get_next(seq, sk)) != NULL) 1577 --pos; 1578 return pos ? NULL : sk; 1579 } 1580 1581 static void *udp_seq_start(struct seq_file *seq, loff_t *pos) 1582 { 1583 read_lock(&udp_hash_lock); 1584 return *pos ? udp_get_idx(seq, *pos-1) : (void *)1; 1585 } 1586 1587 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1588 { 1589 struct sock *sk; 1590 1591 if (v == (void *)1) 1592 sk = udp_get_idx(seq, 0); 1593 else 1594 sk = udp_get_next(seq, v); 1595 1596 ++*pos; 1597 return sk; 1598 } 1599 1600 static void udp_seq_stop(struct seq_file *seq, void *v) 1601 { 1602 read_unlock(&udp_hash_lock); 1603 } 1604 1605 static int udp_seq_open(struct inode *inode, struct file *file) 1606 { 1607 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1608 struct seq_file *seq; 1609 int rc = -ENOMEM; 1610 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1611 1612 if (!s) 1613 goto out; 1614 s->family = afinfo->family; 1615 s->hashtable = afinfo->hashtable; 1616 s->seq_ops.start = udp_seq_start; 1617 s->seq_ops.next = udp_seq_next; 1618 s->seq_ops.show = afinfo->seq_show; 1619 s->seq_ops.stop = udp_seq_stop; 1620 1621 rc = seq_open(file, &s->seq_ops); 1622 if (rc) 1623 goto out_kfree; 1624 1625 seq = file->private_data; 1626 seq->private = s; 1627 out: 1628 return rc; 1629 out_kfree: 1630 kfree(s); 1631 goto out; 1632 } 1633 1634 /* ------------------------------------------------------------------------ */ 1635 int udp_proc_register(struct udp_seq_afinfo *afinfo) 1636 { 1637 struct proc_dir_entry *p; 1638 int rc = 0; 1639 1640 if (!afinfo) 1641 return -EINVAL; 1642 afinfo->seq_fops->owner = afinfo->owner; 1643 afinfo->seq_fops->open = udp_seq_open; 1644 afinfo->seq_fops->read = seq_read; 1645 afinfo->seq_fops->llseek = seq_lseek; 1646 afinfo->seq_fops->release = seq_release_private; 1647 1648 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); 1649 if (p) 1650 p->data = afinfo; 1651 else 1652 rc = -ENOMEM; 1653 return rc; 1654 } 1655 1656 void udp_proc_unregister(struct udp_seq_afinfo *afinfo) 1657 { 1658 if (!afinfo) 1659 return; 1660 proc_net_remove(afinfo->name); 1661 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 1662 } 1663 1664 /* ------------------------------------------------------------------------ */ 1665 static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) 1666 { 1667 struct inet_sock *inet = inet_sk(sp); 1668 __be32 dest = inet->daddr; 1669 __be32 src = inet->rcv_saddr; 1670 __u16 destp = ntohs(inet->dport); 1671 __u16 srcp = ntohs(inet->sport); 1672 1673 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 1674 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", 1675 bucket, src, srcp, dest, destp, sp->sk_state, 1676 atomic_read(&sp->sk_wmem_alloc), 1677 atomic_read(&sp->sk_rmem_alloc), 1678 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 1679 atomic_read(&sp->sk_refcnt), sp); 1680 } 1681 1682 int udp4_seq_show(struct seq_file *seq, void *v) 1683 { 1684 if (v == SEQ_START_TOKEN) 1685 seq_printf(seq, "%-127s\n", 1686 " sl local_address rem_address st tx_queue " 1687 "rx_queue tr tm->when retrnsmt uid timeout " 1688 "inode"); 1689 else { 1690 char tmpbuf[129]; 1691 struct udp_iter_state *state = seq->private; 1692 1693 udp4_format_sock(v, tmpbuf, state->bucket); 1694 seq_printf(seq, "%-127s\n", tmpbuf); 1695 } 1696 return 0; 1697 } 1698 1699 /* ------------------------------------------------------------------------ */ 1700 static struct file_operations udp4_seq_fops; 1701 static struct udp_seq_afinfo udp4_seq_afinfo = { 1702 .owner = THIS_MODULE, 1703 .name = "udp", 1704 .family = AF_INET, 1705 .hashtable = udp_hash, 1706 .seq_show = udp4_seq_show, 1707 .seq_fops = &udp4_seq_fops, 1708 }; 1709 1710 int __init udp4_proc_init(void) 1711 { 1712 return udp_proc_register(&udp4_seq_afinfo); 1713 } 1714 1715 void udp4_proc_exit(void) 1716 { 1717 udp_proc_unregister(&udp4_seq_afinfo); 1718 } 1719 #endif /* CONFIG_PROC_FS */ 1720 1721 EXPORT_SYMBOL(udp_disconnect); 1722 EXPORT_SYMBOL(udp_hash); 1723 EXPORT_SYMBOL(udp_hash_lock); 1724 EXPORT_SYMBOL(udp_ioctl); 1725 EXPORT_SYMBOL(udp_get_port); 1726 EXPORT_SYMBOL(udp_prot); 1727 EXPORT_SYMBOL(udp_sendmsg); 1728 EXPORT_SYMBOL(udp_lib_getsockopt); 1729 EXPORT_SYMBOL(udp_lib_setsockopt); 1730 EXPORT_SYMBOL(udp_poll); 1731 1732 #ifdef CONFIG_PROC_FS 1733 EXPORT_SYMBOL(udp_proc_register); 1734 EXPORT_SYMBOL(udp_proc_unregister); 1735 #endif 1736