xref: /openbmc/linux/net/ipv4/udp.c (revision 1da177e4)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The User Datagram Protocol (UDP).
7  *
8  * Version:	$Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
9  *
10  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13  *		Alan Cox, <Alan.Cox@linux.org>
14  *		Hirokazu Takahashi, <taka@valinux.co.jp>
15  *
16  * Fixes:
17  *		Alan Cox	:	verify_area() calls
18  *		Alan Cox	: 	stopped close while in use off icmp
19  *					messages. Not a fix but a botch that
20  *					for udp at least is 'valid'.
21  *		Alan Cox	:	Fixed icmp handling properly
22  *		Alan Cox	: 	Correct error for oversized datagrams
23  *		Alan Cox	:	Tidied select() semantics.
24  *		Alan Cox	:	udp_err() fixed properly, also now
25  *					select and read wake correctly on errors
26  *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
27  *		Alan Cox	:	UDP can count its memory
28  *		Alan Cox	:	send to an unknown connection causes
29  *					an ECONNREFUSED off the icmp, but
30  *					does NOT close.
31  *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
32  *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
33  *					bug no longer crashes it.
34  *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
35  *		Alan Cox	:	Uses skb_free_datagram
36  *		Alan Cox	:	Added get/set sockopt support.
37  *		Alan Cox	:	Broadcasting without option set returns EACCES.
38  *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
39  *		Alan Cox	:	Use ip_tos and ip_ttl
40  *		Alan Cox	:	SNMP Mibs
41  *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
42  *		Matt Dillon	:	UDP length checks.
43  *		Alan Cox	:	Smarter af_inet used properly.
44  *		Alan Cox	:	Use new kernel side addressing.
45  *		Alan Cox	:	Incorrect return on truncated datagram receive.
46  *	Arnt Gulbrandsen 	:	New udp_send and stuff
47  *		Alan Cox	:	Cache last socket
48  *		Alan Cox	:	Route cache
49  *		Jon Peatfield	:	Minor efficiency fix to sendto().
50  *		Mike Shaver	:	RFC1122 checks.
51  *		Alan Cox	:	Nonblocking error fix.
52  *	Willy Konynenberg	:	Transparent proxying support.
53  *		Mike McLagan	:	Routing by source
54  *		David S. Miller	:	New socket lookup architecture.
55  *					Last socket cache retained as it
56  *					does have a high hit rate.
57  *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
58  *		Andi Kleen	:	Some cleanups, cache destination entry
59  *					for connect.
60  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
61  *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
62  *					return ENOTCONN for unconnected sockets (POSIX)
63  *		Janos Farkas	:	don't deliver multi/broadcasts to a different
64  *					bound-to-device socket
65  *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
66  *					datagrams.
67  *	Hirokazu Takahashi	:	sendfile() on UDP works now.
68  *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
69  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
70  *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
71  *					a single port at the same time.
72  *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
73  *
74  *
75  *		This program is free software; you can redistribute it and/or
76  *		modify it under the terms of the GNU General Public License
77  *		as published by the Free Software Foundation; either version
78  *		2 of the License, or (at your option) any later version.
79  */
80 
81 #include <asm/system.h>
82 #include <asm/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/types.h>
85 #include <linux/fcntl.h>
86 #include <linux/module.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/in.h>
90 #include <linux/errno.h>
91 #include <linux/timer.h>
92 #include <linux/mm.h>
93 #include <linux/config.h>
94 #include <linux/inet.h>
95 #include <linux/ipv6.h>
96 #include <linux/netdevice.h>
97 #include <net/snmp.h>
98 #include <net/tcp.h>
99 #include <net/protocol.h>
100 #include <linux/skbuff.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <net/sock.h>
104 #include <net/udp.h>
105 #include <net/icmp.h>
106 #include <net/route.h>
107 #include <net/inet_common.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 
111 /*
112  *	Snmp MIB for the UDP layer
113  */
114 
115 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
116 
117 struct hlist_head udp_hash[UDP_HTABLE_SIZE];
118 DEFINE_RWLOCK(udp_hash_lock);
119 
120 /* Shared by v4/v6 udp. */
121 int udp_port_rover;
122 
123 static int udp_v4_get_port(struct sock *sk, unsigned short snum)
124 {
125 	struct hlist_node *node;
126 	struct sock *sk2;
127 	struct inet_sock *inet = inet_sk(sk);
128 
129 	write_lock_bh(&udp_hash_lock);
130 	if (snum == 0) {
131 		int best_size_so_far, best, result, i;
132 
133 		if (udp_port_rover > sysctl_local_port_range[1] ||
134 		    udp_port_rover < sysctl_local_port_range[0])
135 			udp_port_rover = sysctl_local_port_range[0];
136 		best_size_so_far = 32767;
137 		best = result = udp_port_rover;
138 		for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
139 			struct hlist_head *list;
140 			int size;
141 
142 			list = &udp_hash[result & (UDP_HTABLE_SIZE - 1)];
143 			if (hlist_empty(list)) {
144 				if (result > sysctl_local_port_range[1])
145 					result = sysctl_local_port_range[0] +
146 						((result - sysctl_local_port_range[0]) &
147 						 (UDP_HTABLE_SIZE - 1));
148 				goto gotit;
149 			}
150 			size = 0;
151 			sk_for_each(sk2, node, list)
152 				if (++size >= best_size_so_far)
153 					goto next;
154 			best_size_so_far = size;
155 			best = result;
156 		next:;
157 		}
158 		result = best;
159 		for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) {
160 			if (result > sysctl_local_port_range[1])
161 				result = sysctl_local_port_range[0]
162 					+ ((result - sysctl_local_port_range[0]) &
163 					   (UDP_HTABLE_SIZE - 1));
164 			if (!udp_lport_inuse(result))
165 				break;
166 		}
167 		if (i >= (1 << 16) / UDP_HTABLE_SIZE)
168 			goto fail;
169 gotit:
170 		udp_port_rover = snum = result;
171 	} else {
172 		sk_for_each(sk2, node,
173 			    &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]) {
174 			struct inet_sock *inet2 = inet_sk(sk2);
175 
176 			if (inet2->num == snum &&
177 			    sk2 != sk &&
178 			    !ipv6_only_sock(sk2) &&
179 			    (!sk2->sk_bound_dev_if ||
180 			     !sk->sk_bound_dev_if ||
181 			     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
182 			    (!inet2->rcv_saddr ||
183 			     !inet->rcv_saddr ||
184 			     inet2->rcv_saddr == inet->rcv_saddr) &&
185 			    (!sk2->sk_reuse || !sk->sk_reuse))
186 				goto fail;
187 		}
188 	}
189 	inet->num = snum;
190 	if (sk_unhashed(sk)) {
191 		struct hlist_head *h = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
192 
193 		sk_add_node(sk, h);
194 		sock_prot_inc_use(sk->sk_prot);
195 	}
196 	write_unlock_bh(&udp_hash_lock);
197 	return 0;
198 
199 fail:
200 	write_unlock_bh(&udp_hash_lock);
201 	return 1;
202 }
203 
204 static void udp_v4_hash(struct sock *sk)
205 {
206 	BUG();
207 }
208 
209 static void udp_v4_unhash(struct sock *sk)
210 {
211 	write_lock_bh(&udp_hash_lock);
212 	if (sk_del_node_init(sk)) {
213 		inet_sk(sk)->num = 0;
214 		sock_prot_dec_use(sk->sk_prot);
215 	}
216 	write_unlock_bh(&udp_hash_lock);
217 }
218 
219 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
220  * harder than this. -DaveM
221  */
222 static struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport,
223 					  u32 daddr, u16 dport, int dif)
224 {
225 	struct sock *sk, *result = NULL;
226 	struct hlist_node *node;
227 	unsigned short hnum = ntohs(dport);
228 	int badness = -1;
229 
230 	sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) {
231 		struct inet_sock *inet = inet_sk(sk);
232 
233 		if (inet->num == hnum && !ipv6_only_sock(sk)) {
234 			int score = (sk->sk_family == PF_INET ? 1 : 0);
235 			if (inet->rcv_saddr) {
236 				if (inet->rcv_saddr != daddr)
237 					continue;
238 				score+=2;
239 			}
240 			if (inet->daddr) {
241 				if (inet->daddr != saddr)
242 					continue;
243 				score+=2;
244 			}
245 			if (inet->dport) {
246 				if (inet->dport != sport)
247 					continue;
248 				score+=2;
249 			}
250 			if (sk->sk_bound_dev_if) {
251 				if (sk->sk_bound_dev_if != dif)
252 					continue;
253 				score+=2;
254 			}
255 			if(score == 9) {
256 				result = sk;
257 				break;
258 			} else if(score > badness) {
259 				result = sk;
260 				badness = score;
261 			}
262 		}
263 	}
264 	return result;
265 }
266 
267 static __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport,
268 					     u32 daddr, u16 dport, int dif)
269 {
270 	struct sock *sk;
271 
272 	read_lock(&udp_hash_lock);
273 	sk = udp_v4_lookup_longway(saddr, sport, daddr, dport, dif);
274 	if (sk)
275 		sock_hold(sk);
276 	read_unlock(&udp_hash_lock);
277 	return sk;
278 }
279 
280 static inline struct sock *udp_v4_mcast_next(struct sock *sk,
281 					     u16 loc_port, u32 loc_addr,
282 					     u16 rmt_port, u32 rmt_addr,
283 					     int dif)
284 {
285 	struct hlist_node *node;
286 	struct sock *s = sk;
287 	unsigned short hnum = ntohs(loc_port);
288 
289 	sk_for_each_from(s, node) {
290 		struct inet_sock *inet = inet_sk(s);
291 
292 		if (inet->num != hnum					||
293 		    (inet->daddr && inet->daddr != rmt_addr)		||
294 		    (inet->dport != rmt_port && inet->dport)		||
295 		    (inet->rcv_saddr && inet->rcv_saddr != loc_addr)	||
296 		    ipv6_only_sock(s)					||
297 		    (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
298 			continue;
299 		if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
300 			continue;
301 		goto found;
302   	}
303 	s = NULL;
304 found:
305   	return s;
306 }
307 
308 /*
309  * This routine is called by the ICMP module when it gets some
310  * sort of error condition.  If err < 0 then the socket should
311  * be closed and the error returned to the user.  If err > 0
312  * it's just the icmp type << 8 | icmp code.
313  * Header points to the ip header of the error packet. We move
314  * on past this. Then (as it used to claim before adjustment)
315  * header points to the first 8 bytes of the udp header.  We need
316  * to find the appropriate port.
317  */
318 
319 void udp_err(struct sk_buff *skb, u32 info)
320 {
321 	struct inet_sock *inet;
322 	struct iphdr *iph = (struct iphdr*)skb->data;
323 	struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
324 	int type = skb->h.icmph->type;
325 	int code = skb->h.icmph->code;
326 	struct sock *sk;
327 	int harderr;
328 	int err;
329 
330 	sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex);
331 	if (sk == NULL) {
332 		ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
333     	  	return;	/* No socket for error */
334 	}
335 
336 	err = 0;
337 	harderr = 0;
338 	inet = inet_sk(sk);
339 
340 	switch (type) {
341 	default:
342 	case ICMP_TIME_EXCEEDED:
343 		err = EHOSTUNREACH;
344 		break;
345 	case ICMP_SOURCE_QUENCH:
346 		goto out;
347 	case ICMP_PARAMETERPROB:
348 		err = EPROTO;
349 		harderr = 1;
350 		break;
351 	case ICMP_DEST_UNREACH:
352 		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
353 			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
354 				err = EMSGSIZE;
355 				harderr = 1;
356 				break;
357 			}
358 			goto out;
359 		}
360 		err = EHOSTUNREACH;
361 		if (code <= NR_ICMP_UNREACH) {
362 			harderr = icmp_err_convert[code].fatal;
363 			err = icmp_err_convert[code].errno;
364 		}
365 		break;
366 	}
367 
368 	/*
369 	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
370 	 *	4.1.3.3.
371 	 */
372 	if (!inet->recverr) {
373 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
374 			goto out;
375 	} else {
376 		ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
377 	}
378 	sk->sk_err = err;
379 	sk->sk_error_report(sk);
380 out:
381 	sock_put(sk);
382 }
383 
384 /*
385  * Throw away all pending data and cancel the corking. Socket is locked.
386  */
387 static void udp_flush_pending_frames(struct sock *sk)
388 {
389 	struct udp_sock *up = udp_sk(sk);
390 
391 	if (up->pending) {
392 		up->len = 0;
393 		up->pending = 0;
394 		ip_flush_pending_frames(sk);
395 	}
396 }
397 
398 /*
399  * Push out all pending data as one UDP datagram. Socket is locked.
400  */
401 static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up)
402 {
403 	struct inet_sock *inet = inet_sk(sk);
404 	struct flowi *fl = &inet->cork.fl;
405 	struct sk_buff *skb;
406 	struct udphdr *uh;
407 	int err = 0;
408 
409 	/* Grab the skbuff where UDP header space exists. */
410 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
411 		goto out;
412 
413 	/*
414 	 * Create a UDP header
415 	 */
416 	uh = skb->h.uh;
417 	uh->source = fl->fl_ip_sport;
418 	uh->dest = fl->fl_ip_dport;
419 	uh->len = htons(up->len);
420 	uh->check = 0;
421 
422 	if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
423 		skb->ip_summed = CHECKSUM_NONE;
424 		goto send;
425 	}
426 
427 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
428 		/*
429 		 * Only one fragment on the socket.
430 		 */
431 		if (skb->ip_summed == CHECKSUM_HW) {
432 			skb->csum = offsetof(struct udphdr, check);
433 			uh->check = ~csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
434 					up->len, IPPROTO_UDP, 0);
435 		} else {
436 			skb->csum = csum_partial((char *)uh,
437 					sizeof(struct udphdr), skb->csum);
438 			uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
439 					up->len, IPPROTO_UDP, skb->csum);
440 			if (uh->check == 0)
441 				uh->check = -1;
442 		}
443 	} else {
444 		unsigned int csum = 0;
445 		/*
446 		 * HW-checksum won't work as there are two or more
447 		 * fragments on the socket so that all csums of sk_buffs
448 		 * should be together.
449 		 */
450 		if (skb->ip_summed == CHECKSUM_HW) {
451 			int offset = (unsigned char *)uh - skb->data;
452 			skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
453 
454 			skb->ip_summed = CHECKSUM_NONE;
455 		} else {
456 			skb->csum = csum_partial((char *)uh,
457 					sizeof(struct udphdr), skb->csum);
458 		}
459 
460 		skb_queue_walk(&sk->sk_write_queue, skb) {
461 			csum = csum_add(csum, skb->csum);
462 		}
463 		uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
464 				up->len, IPPROTO_UDP, csum);
465 		if (uh->check == 0)
466 			uh->check = -1;
467 	}
468 send:
469 	err = ip_push_pending_frames(sk);
470 out:
471 	up->len = 0;
472 	up->pending = 0;
473 	return err;
474 }
475 
476 
477 static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr, unsigned long base)
478 {
479 	return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
480 }
481 
482 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
483 		size_t len)
484 {
485 	struct inet_sock *inet = inet_sk(sk);
486 	struct udp_sock *up = udp_sk(sk);
487 	int ulen = len;
488 	struct ipcm_cookie ipc;
489 	struct rtable *rt = NULL;
490 	int free = 0;
491 	int connected = 0;
492 	u32 daddr, faddr, saddr;
493 	u16 dport;
494 	u8  tos;
495 	int err;
496 	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
497 
498 	if (len > 0xFFFF)
499 		return -EMSGSIZE;
500 
501 	/*
502 	 *	Check the flags.
503 	 */
504 
505 	if (msg->msg_flags&MSG_OOB)	/* Mirror BSD error message compatibility */
506 		return -EOPNOTSUPP;
507 
508 	ipc.opt = NULL;
509 
510 	if (up->pending) {
511 		/*
512 		 * There are pending frames.
513 	 	 * The socket lock must be held while it's corked.
514 		 */
515 		lock_sock(sk);
516 		if (likely(up->pending)) {
517 			if (unlikely(up->pending != AF_INET)) {
518 				release_sock(sk);
519 				return -EINVAL;
520 			}
521  			goto do_append_data;
522 		}
523 		release_sock(sk);
524 	}
525 	ulen += sizeof(struct udphdr);
526 
527 	/*
528 	 *	Get and verify the address.
529 	 */
530 	if (msg->msg_name) {
531 		struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
532 		if (msg->msg_namelen < sizeof(*usin))
533 			return -EINVAL;
534 		if (usin->sin_family != AF_INET) {
535 			if (usin->sin_family != AF_UNSPEC)
536 				return -EAFNOSUPPORT;
537 		}
538 
539 		daddr = usin->sin_addr.s_addr;
540 		dport = usin->sin_port;
541 		if (dport == 0)
542 			return -EINVAL;
543 	} else {
544 		if (sk->sk_state != TCP_ESTABLISHED)
545 			return -EDESTADDRREQ;
546 		daddr = inet->daddr;
547 		dport = inet->dport;
548 		/* Open fast path for connected socket.
549 		   Route will not be used, if at least one option is set.
550 		 */
551 		connected = 1;
552   	}
553 	ipc.addr = inet->saddr;
554 
555 	ipc.oif = sk->sk_bound_dev_if;
556 	if (msg->msg_controllen) {
557 		err = ip_cmsg_send(msg, &ipc);
558 		if (err)
559 			return err;
560 		if (ipc.opt)
561 			free = 1;
562 		connected = 0;
563 	}
564 	if (!ipc.opt)
565 		ipc.opt = inet->opt;
566 
567 	saddr = ipc.addr;
568 	ipc.addr = faddr = daddr;
569 
570 	if (ipc.opt && ipc.opt->srr) {
571 		if (!daddr)
572 			return -EINVAL;
573 		faddr = ipc.opt->faddr;
574 		connected = 0;
575 	}
576 	tos = RT_TOS(inet->tos);
577 	if (sock_flag(sk, SOCK_LOCALROUTE) ||
578 	    (msg->msg_flags & MSG_DONTROUTE) ||
579 	    (ipc.opt && ipc.opt->is_strictroute)) {
580 		tos |= RTO_ONLINK;
581 		connected = 0;
582 	}
583 
584 	if (MULTICAST(daddr)) {
585 		if (!ipc.oif)
586 			ipc.oif = inet->mc_index;
587 		if (!saddr)
588 			saddr = inet->mc_addr;
589 		connected = 0;
590 	}
591 
592 	if (connected)
593 		rt = (struct rtable*)sk_dst_check(sk, 0);
594 
595 	if (rt == NULL) {
596 		struct flowi fl = { .oif = ipc.oif,
597 				    .nl_u = { .ip4_u =
598 					      { .daddr = faddr,
599 						.saddr = saddr,
600 						.tos = tos } },
601 				    .proto = IPPROTO_UDP,
602 				    .uli_u = { .ports =
603 					       { .sport = inet->sport,
604 						 .dport = dport } } };
605 		err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
606 		if (err)
607 			goto out;
608 
609 		err = -EACCES;
610 		if ((rt->rt_flags & RTCF_BROADCAST) &&
611 		    !sock_flag(sk, SOCK_BROADCAST))
612 			goto out;
613 		if (connected)
614 			sk_dst_set(sk, dst_clone(&rt->u.dst));
615 	}
616 
617 	if (msg->msg_flags&MSG_CONFIRM)
618 		goto do_confirm;
619 back_from_confirm:
620 
621 	saddr = rt->rt_src;
622 	if (!ipc.addr)
623 		daddr = ipc.addr = rt->rt_dst;
624 
625 	lock_sock(sk);
626 	if (unlikely(up->pending)) {
627 		/* The socket is already corked while preparing it. */
628 		/* ... which is an evident application bug. --ANK */
629 		release_sock(sk);
630 
631 		NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 2\n"));
632 		err = -EINVAL;
633 		goto out;
634 	}
635 	/*
636 	 *	Now cork the socket to pend data.
637 	 */
638 	inet->cork.fl.fl4_dst = daddr;
639 	inet->cork.fl.fl_ip_dport = dport;
640 	inet->cork.fl.fl4_src = saddr;
641 	inet->cork.fl.fl_ip_sport = inet->sport;
642 	up->pending = AF_INET;
643 
644 do_append_data:
645 	up->len += ulen;
646 	err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
647 			sizeof(struct udphdr), &ipc, rt,
648 			corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
649 	if (err)
650 		udp_flush_pending_frames(sk);
651 	else if (!corkreq)
652 		err = udp_push_pending_frames(sk, up);
653 	release_sock(sk);
654 
655 out:
656 	ip_rt_put(rt);
657 	if (free)
658 		kfree(ipc.opt);
659 	if (!err) {
660 		UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
661 		return len;
662 	}
663 	return err;
664 
665 do_confirm:
666 	dst_confirm(&rt->u.dst);
667 	if (!(msg->msg_flags&MSG_PROBE) || len)
668 		goto back_from_confirm;
669 	err = 0;
670 	goto out;
671 }
672 
673 static int udp_sendpage(struct sock *sk, struct page *page, int offset,
674 			size_t size, int flags)
675 {
676 	struct udp_sock *up = udp_sk(sk);
677 	int ret;
678 
679 	if (!up->pending) {
680 		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };
681 
682 		/* Call udp_sendmsg to specify destination address which
683 		 * sendpage interface can't pass.
684 		 * This will succeed only when the socket is connected.
685 		 */
686 		ret = udp_sendmsg(NULL, sk, &msg, 0);
687 		if (ret < 0)
688 			return ret;
689 	}
690 
691 	lock_sock(sk);
692 
693 	if (unlikely(!up->pending)) {
694 		release_sock(sk);
695 
696 		NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 3\n"));
697 		return -EINVAL;
698 	}
699 
700 	ret = ip_append_page(sk, page, offset, size, flags);
701 	if (ret == -EOPNOTSUPP) {
702 		release_sock(sk);
703 		return sock_no_sendpage(sk->sk_socket, page, offset,
704 					size, flags);
705 	}
706 	if (ret < 0) {
707 		udp_flush_pending_frames(sk);
708 		goto out;
709 	}
710 
711 	up->len += size;
712 	if (!(up->corkflag || (flags&MSG_MORE)))
713 		ret = udp_push_pending_frames(sk, up);
714 	if (!ret)
715 		ret = size;
716 out:
717 	release_sock(sk);
718 	return ret;
719 }
720 
721 /*
722  *	IOCTL requests applicable to the UDP protocol
723  */
724 
725 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
726 {
727 	switch(cmd)
728 	{
729 		case SIOCOUTQ:
730 		{
731 			int amount = atomic_read(&sk->sk_wmem_alloc);
732 			return put_user(amount, (int __user *)arg);
733 		}
734 
735 		case SIOCINQ:
736 		{
737 			struct sk_buff *skb;
738 			unsigned long amount;
739 
740 			amount = 0;
741 			spin_lock_irq(&sk->sk_receive_queue.lock);
742 			skb = skb_peek(&sk->sk_receive_queue);
743 			if (skb != NULL) {
744 				/*
745 				 * We will only return the amount
746 				 * of this packet since that is all
747 				 * that will be read.
748 				 */
749 				amount = skb->len - sizeof(struct udphdr);
750 			}
751 			spin_unlock_irq(&sk->sk_receive_queue.lock);
752 			return put_user(amount, (int __user *)arg);
753 		}
754 
755 		default:
756 			return -ENOIOCTLCMD;
757 	}
758 	return(0);
759 }
760 
761 static __inline__ int __udp_checksum_complete(struct sk_buff *skb)
762 {
763 	return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
764 }
765 
766 static __inline__ int udp_checksum_complete(struct sk_buff *skb)
767 {
768 	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
769 		__udp_checksum_complete(skb);
770 }
771 
772 /*
773  * 	This should be easy, if there is something there we
774  * 	return it, otherwise we block.
775  */
776 
777 static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
778 		       size_t len, int noblock, int flags, int *addr_len)
779 {
780 	struct inet_sock *inet = inet_sk(sk);
781   	struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
782   	struct sk_buff *skb;
783   	int copied, err;
784 
785 	/*
786 	 *	Check any passed addresses
787 	 */
788 	if (addr_len)
789 		*addr_len=sizeof(*sin);
790 
791 	if (flags & MSG_ERRQUEUE)
792 		return ip_recv_error(sk, msg, len);
793 
794 try_again:
795 	skb = skb_recv_datagram(sk, flags, noblock, &err);
796 	if (!skb)
797 		goto out;
798 
799   	copied = skb->len - sizeof(struct udphdr);
800 	if (copied > len) {
801 		copied = len;
802 		msg->msg_flags |= MSG_TRUNC;
803 	}
804 
805 	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
806 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
807 					      copied);
808 	} else if (msg->msg_flags&MSG_TRUNC) {
809 		if (__udp_checksum_complete(skb))
810 			goto csum_copy_err;
811 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
812 					      copied);
813 	} else {
814 		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
815 
816 		if (err == -EINVAL)
817 			goto csum_copy_err;
818 	}
819 
820 	if (err)
821 		goto out_free;
822 
823 	sock_recv_timestamp(msg, sk, skb);
824 
825 	/* Copy the address. */
826 	if (sin)
827 	{
828 		sin->sin_family = AF_INET;
829 		sin->sin_port = skb->h.uh->source;
830 		sin->sin_addr.s_addr = skb->nh.iph->saddr;
831 		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
832   	}
833 	if (inet->cmsg_flags)
834 		ip_cmsg_recv(msg, skb);
835 
836 	err = copied;
837 	if (flags & MSG_TRUNC)
838 		err = skb->len - sizeof(struct udphdr);
839 
840 out_free:
841   	skb_free_datagram(sk, skb);
842 out:
843   	return err;
844 
845 csum_copy_err:
846 	UDP_INC_STATS_BH(UDP_MIB_INERRORS);
847 
848 	/* Clear queue. */
849 	if (flags&MSG_PEEK) {
850 		int clear = 0;
851 		spin_lock_irq(&sk->sk_receive_queue.lock);
852 		if (skb == skb_peek(&sk->sk_receive_queue)) {
853 			__skb_unlink(skb, &sk->sk_receive_queue);
854 			clear = 1;
855 		}
856 		spin_unlock_irq(&sk->sk_receive_queue.lock);
857 		if (clear)
858 			kfree_skb(skb);
859 	}
860 
861 	skb_free_datagram(sk, skb);
862 
863 	if (noblock)
864 		return -EAGAIN;
865 	goto try_again;
866 }
867 
868 
869 int udp_disconnect(struct sock *sk, int flags)
870 {
871 	struct inet_sock *inet = inet_sk(sk);
872 	/*
873 	 *	1003.1g - break association.
874 	 */
875 
876 	sk->sk_state = TCP_CLOSE;
877 	inet->daddr = 0;
878 	inet->dport = 0;
879 	sk->sk_bound_dev_if = 0;
880 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
881 		inet_reset_saddr(sk);
882 
883 	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
884 		sk->sk_prot->unhash(sk);
885 		inet->sport = 0;
886 	}
887 	sk_dst_reset(sk);
888 	return 0;
889 }
890 
891 static void udp_close(struct sock *sk, long timeout)
892 {
893 	sk_common_release(sk);
894 }
895 
896 /* return:
897  * 	1  if the the UDP system should process it
898  *	0  if we should drop this packet
899  * 	-1 if it should get processed by xfrm4_rcv_encap
900  */
901 static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
902 {
903 #ifndef CONFIG_XFRM
904 	return 1;
905 #else
906 	struct udp_sock *up = udp_sk(sk);
907   	struct udphdr *uh = skb->h.uh;
908 	struct iphdr *iph;
909 	int iphlen, len;
910 
911 	__u8 *udpdata = (__u8 *)uh + sizeof(struct udphdr);
912 	__u32 *udpdata32 = (__u32 *)udpdata;
913 	__u16 encap_type = up->encap_type;
914 
915 	/* if we're overly short, let UDP handle it */
916 	if (udpdata > skb->tail)
917 		return 1;
918 
919 	/* if this is not encapsulated socket, then just return now */
920 	if (!encap_type)
921 		return 1;
922 
923 	len = skb->tail - udpdata;
924 
925 	switch (encap_type) {
926 	default:
927 	case UDP_ENCAP_ESPINUDP:
928 		/* Check if this is a keepalive packet.  If so, eat it. */
929 		if (len == 1 && udpdata[0] == 0xff) {
930 			return 0;
931 		} else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0 ) {
932 			/* ESP Packet without Non-ESP header */
933 			len = sizeof(struct udphdr);
934 		} else
935 			/* Must be an IKE packet.. pass it through */
936 			return 1;
937 		break;
938 	case UDP_ENCAP_ESPINUDP_NON_IKE:
939 		/* Check if this is a keepalive packet.  If so, eat it. */
940 		if (len == 1 && udpdata[0] == 0xff) {
941 			return 0;
942 		} else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
943 			   udpdata32[0] == 0 && udpdata32[1] == 0) {
944 
945 			/* ESP Packet with Non-IKE marker */
946 			len = sizeof(struct udphdr) + 2 * sizeof(u32);
947 		} else
948 			/* Must be an IKE packet.. pass it through */
949 			return 1;
950 		break;
951 	}
952 
953 	/* At this point we are sure that this is an ESPinUDP packet,
954 	 * so we need to remove 'len' bytes from the packet (the UDP
955 	 * header and optional ESP marker bytes) and then modify the
956 	 * protocol to ESP, and then call into the transform receiver.
957 	 */
958 
959 	/* Now we can update and verify the packet length... */
960 	iph = skb->nh.iph;
961 	iphlen = iph->ihl << 2;
962 	iph->tot_len = htons(ntohs(iph->tot_len) - len);
963 	if (skb->len < iphlen + len) {
964 		/* packet is too small!?! */
965 		return 0;
966 	}
967 
968 	/* pull the data buffer up to the ESP header and set the
969 	 * transport header to point to ESP.  Keep UDP on the stack
970 	 * for later.
971 	 */
972 	skb->h.raw = skb_pull(skb, len);
973 
974 	/* modify the protocol (it's ESP!) */
975 	iph->protocol = IPPROTO_ESP;
976 
977 	/* and let the caller know to send this into the ESP processor... */
978 	return -1;
979 #endif
980 }
981 
982 /* returns:
983  *  -1: error
984  *   0: success
985  *  >0: "udp encap" protocol resubmission
986  *
987  * Note that in the success and error cases, the skb is assumed to
988  * have either been requeued or freed.
989  */
990 static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
991 {
992 	struct udp_sock *up = udp_sk(sk);
993 
994 	/*
995 	 *	Charge it to the socket, dropping if the queue is full.
996 	 */
997 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
998 		kfree_skb(skb);
999 		return -1;
1000 	}
1001 
1002 	if (up->encap_type) {
1003 		/*
1004 		 * This is an encapsulation socket, so let's see if this is
1005 		 * an encapsulated packet.
1006 		 * If it's a keepalive packet, then just eat it.
1007 		 * If it's an encapsulateed packet, then pass it to the
1008 		 * IPsec xfrm input and return the response
1009 		 * appropriately.  Otherwise, just fall through and
1010 		 * pass this up the UDP socket.
1011 		 */
1012 		int ret;
1013 
1014 		ret = udp_encap_rcv(sk, skb);
1015 		if (ret == 0) {
1016 			/* Eat the packet .. */
1017 			kfree_skb(skb);
1018 			return 0;
1019 		}
1020 		if (ret < 0) {
1021 			/* process the ESP packet */
1022 			ret = xfrm4_rcv_encap(skb, up->encap_type);
1023 			UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
1024 			return -ret;
1025 		}
1026 		/* FALLTHROUGH -- it's a UDP Packet */
1027 	}
1028 
1029 	if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
1030 		if (__udp_checksum_complete(skb)) {
1031 			UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1032 			kfree_skb(skb);
1033 			return -1;
1034 		}
1035 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1036 	}
1037 
1038 	if (sock_queue_rcv_skb(sk,skb)<0) {
1039 		UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1040 		kfree_skb(skb);
1041 		return -1;
1042 	}
1043 	UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
1044 	return 0;
1045 }
1046 
1047 /*
1048  *	Multicasts and broadcasts go to each listener.
1049  *
1050  *	Note: called only from the BH handler context,
1051  *	so we don't need to lock the hashes.
1052  */
1053 static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh,
1054 				 u32 saddr, u32 daddr)
1055 {
1056 	struct sock *sk;
1057 	int dif;
1058 
1059 	read_lock(&udp_hash_lock);
1060 	sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
1061 	dif = skb->dev->ifindex;
1062 	sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1063 	if (sk) {
1064 		struct sock *sknext = NULL;
1065 
1066 		do {
1067 			struct sk_buff *skb1 = skb;
1068 
1069 			sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
1070 						   uh->source, saddr, dif);
1071 			if(sknext)
1072 				skb1 = skb_clone(skb, GFP_ATOMIC);
1073 
1074 			if(skb1) {
1075 				int ret = udp_queue_rcv_skb(sk, skb1);
1076 				if (ret > 0)
1077 					/* we should probably re-process instead
1078 					 * of dropping packets here. */
1079 					kfree_skb(skb1);
1080 			}
1081 			sk = sknext;
1082 		} while(sknext);
1083 	} else
1084 		kfree_skb(skb);
1085 	read_unlock(&udp_hash_lock);
1086 	return 0;
1087 }
1088 
1089 /* Initialize UDP checksum. If exited with zero value (success),
1090  * CHECKSUM_UNNECESSARY means, that no more checks are required.
1091  * Otherwise, csum completion requires chacksumming packet body,
1092  * including udp header and folding it to skb->csum.
1093  */
1094 static int udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
1095 			     unsigned short ulen, u32 saddr, u32 daddr)
1096 {
1097 	if (uh->check == 0) {
1098 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1099 	} else if (skb->ip_summed == CHECKSUM_HW) {
1100 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1101 		if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
1102 			return 0;
1103 		NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp v4 hw csum failure.\n"));
1104 		skb->ip_summed = CHECKSUM_NONE;
1105 	}
1106 	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
1107 		skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
1108 	/* Probably, we should checksum udp header (it should be in cache
1109 	 * in any case) and data in tiny packets (< rx copybreak).
1110 	 */
1111 	return 0;
1112 }
1113 
1114 /*
1115  *	All we need to do is get the socket, and then do a checksum.
1116  */
1117 
1118 int udp_rcv(struct sk_buff *skb)
1119 {
1120   	struct sock *sk;
1121   	struct udphdr *uh;
1122 	unsigned short ulen;
1123 	struct rtable *rt = (struct rtable*)skb->dst;
1124 	u32 saddr = skb->nh.iph->saddr;
1125 	u32 daddr = skb->nh.iph->daddr;
1126 	int len = skb->len;
1127 
1128 	/*
1129 	 *	Validate the packet and the UDP length.
1130 	 */
1131 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1132 		goto no_header;
1133 
1134 	uh = skb->h.uh;
1135 
1136 	ulen = ntohs(uh->len);
1137 
1138 	if (ulen > len || ulen < sizeof(*uh))
1139 		goto short_packet;
1140 
1141 	if (pskb_trim(skb, ulen))
1142 		goto short_packet;
1143 
1144 	if (udp_checksum_init(skb, uh, ulen, saddr, daddr) < 0)
1145 		goto csum_error;
1146 
1147 	if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1148 		return udp_v4_mcast_deliver(skb, uh, saddr, daddr);
1149 
1150 	sk = udp_v4_lookup(saddr, uh->source, daddr, uh->dest, skb->dev->ifindex);
1151 
1152 	if (sk != NULL) {
1153 		int ret = udp_queue_rcv_skb(sk, skb);
1154 		sock_put(sk);
1155 
1156 		/* a return value > 0 means to resubmit the input, but
1157 		 * it it wants the return to be -protocol, or 0
1158 		 */
1159 		if (ret > 0)
1160 			return -ret;
1161 		return 0;
1162 	}
1163 
1164 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1165 		goto drop;
1166 
1167 	/* No socket. Drop packet silently, if checksum is wrong */
1168 	if (udp_checksum_complete(skb))
1169 		goto csum_error;
1170 
1171 	UDP_INC_STATS_BH(UDP_MIB_NOPORTS);
1172 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1173 
1174 	/*
1175 	 * Hmm.  We got an UDP packet to a port to which we
1176 	 * don't wanna listen.  Ignore it.
1177 	 */
1178 	kfree_skb(skb);
1179 	return(0);
1180 
1181 short_packet:
1182 	NETDEBUG(if (net_ratelimit())
1183 		printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1184 			NIPQUAD(saddr),
1185 			ntohs(uh->source),
1186 			ulen,
1187 			len,
1188 			NIPQUAD(daddr),
1189 			ntohs(uh->dest)));
1190 no_header:
1191 	UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1192 	kfree_skb(skb);
1193 	return(0);
1194 
1195 csum_error:
1196 	/*
1197 	 * RFC1122: OK.  Discards the bad packet silently (as far as
1198 	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1199 	 */
1200 	NETDEBUG(if (net_ratelimit())
1201 		 printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1202 			NIPQUAD(saddr),
1203 			ntohs(uh->source),
1204 			NIPQUAD(daddr),
1205 			ntohs(uh->dest),
1206 			ulen));
1207 drop:
1208 	UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1209 	kfree_skb(skb);
1210 	return(0);
1211 }
1212 
1213 static int udp_destroy_sock(struct sock *sk)
1214 {
1215 	lock_sock(sk);
1216 	udp_flush_pending_frames(sk);
1217 	release_sock(sk);
1218 	return 0;
1219 }
1220 
1221 /*
1222  *	Socket option code for UDP
1223  */
1224 static int udp_setsockopt(struct sock *sk, int level, int optname,
1225 			  char __user *optval, int optlen)
1226 {
1227 	struct udp_sock *up = udp_sk(sk);
1228 	int val;
1229 	int err = 0;
1230 
1231 	if (level != SOL_UDP)
1232 		return ip_setsockopt(sk, level, optname, optval, optlen);
1233 
1234 	if(optlen<sizeof(int))
1235 		return -EINVAL;
1236 
1237 	if (get_user(val, (int __user *)optval))
1238 		return -EFAULT;
1239 
1240 	switch(optname) {
1241 	case UDP_CORK:
1242 		if (val != 0) {
1243 			up->corkflag = 1;
1244 		} else {
1245 			up->corkflag = 0;
1246 			lock_sock(sk);
1247 			udp_push_pending_frames(sk, up);
1248 			release_sock(sk);
1249 		}
1250 		break;
1251 
1252 	case UDP_ENCAP:
1253 		switch (val) {
1254 		case 0:
1255 		case UDP_ENCAP_ESPINUDP:
1256 		case UDP_ENCAP_ESPINUDP_NON_IKE:
1257 			up->encap_type = val;
1258 			break;
1259 		default:
1260 			err = -ENOPROTOOPT;
1261 			break;
1262 		}
1263 		break;
1264 
1265 	default:
1266 		err = -ENOPROTOOPT;
1267 		break;
1268 	};
1269 
1270 	return err;
1271 }
1272 
1273 static int udp_getsockopt(struct sock *sk, int level, int optname,
1274 			  char __user *optval, int __user *optlen)
1275 {
1276 	struct udp_sock *up = udp_sk(sk);
1277 	int val, len;
1278 
1279 	if (level != SOL_UDP)
1280 		return ip_getsockopt(sk, level, optname, optval, optlen);
1281 
1282 	if(get_user(len,optlen))
1283 		return -EFAULT;
1284 
1285 	len = min_t(unsigned int, len, sizeof(int));
1286 
1287 	if(len < 0)
1288 		return -EINVAL;
1289 
1290 	switch(optname) {
1291 	case UDP_CORK:
1292 		val = up->corkflag;
1293 		break;
1294 
1295 	case UDP_ENCAP:
1296 		val = up->encap_type;
1297 		break;
1298 
1299 	default:
1300 		return -ENOPROTOOPT;
1301 	};
1302 
1303   	if(put_user(len, optlen))
1304   		return -EFAULT;
1305 	if(copy_to_user(optval, &val,len))
1306 		return -EFAULT;
1307   	return 0;
1308 }
1309 
1310 /**
1311  * 	udp_poll - wait for a UDP event.
1312  *	@file - file struct
1313  *	@sock - socket
1314  *	@wait - poll table
1315  *
1316  *	This is same as datagram poll, except for the special case of
1317  *	blocking sockets. If application is using a blocking fd
1318  *	and a packet with checksum error is in the queue;
1319  *	then it could get return from select indicating data available
1320  *	but then block when reading it. Add special case code
1321  *	to work around these arguably broken applications.
1322  */
1323 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1324 {
1325 	unsigned int mask = datagram_poll(file, sock, wait);
1326 	struct sock *sk = sock->sk;
1327 
1328 	/* Check for false positives due to checksum errors */
1329 	if ( (mask & POLLRDNORM) &&
1330 	     !(file->f_flags & O_NONBLOCK) &&
1331 	     !(sk->sk_shutdown & RCV_SHUTDOWN)){
1332 		struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1333 		struct sk_buff *skb;
1334 
1335 		spin_lock_irq(&rcvq->lock);
1336 		while ((skb = skb_peek(rcvq)) != NULL) {
1337 			if (udp_checksum_complete(skb)) {
1338 				UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1339 				__skb_unlink(skb, rcvq);
1340 				kfree_skb(skb);
1341 			} else {
1342 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1343 				break;
1344 			}
1345 		}
1346 		spin_unlock_irq(&rcvq->lock);
1347 
1348 		/* nothing to see, move along */
1349 		if (skb == NULL)
1350 			mask &= ~(POLLIN | POLLRDNORM);
1351 	}
1352 
1353 	return mask;
1354 
1355 }
1356 
1357 struct proto udp_prot = {
1358  	.name =		"UDP",
1359 	.owner =	THIS_MODULE,
1360 	.close =	udp_close,
1361 	.connect =	ip4_datagram_connect,
1362 	.disconnect =	udp_disconnect,
1363 	.ioctl =	udp_ioctl,
1364 	.destroy =	udp_destroy_sock,
1365 	.setsockopt =	udp_setsockopt,
1366 	.getsockopt =	udp_getsockopt,
1367 	.sendmsg =	udp_sendmsg,
1368 	.recvmsg =	udp_recvmsg,
1369 	.sendpage =	udp_sendpage,
1370 	.backlog_rcv =	udp_queue_rcv_skb,
1371 	.hash =		udp_v4_hash,
1372 	.unhash =	udp_v4_unhash,
1373 	.get_port =	udp_v4_get_port,
1374 	.obj_size =	sizeof(struct udp_sock),
1375 };
1376 
1377 /* ------------------------------------------------------------------------ */
1378 #ifdef CONFIG_PROC_FS
1379 
1380 static struct sock *udp_get_first(struct seq_file *seq)
1381 {
1382 	struct sock *sk;
1383 	struct udp_iter_state *state = seq->private;
1384 
1385 	for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1386 		struct hlist_node *node;
1387 		sk_for_each(sk, node, &udp_hash[state->bucket]) {
1388 			if (sk->sk_family == state->family)
1389 				goto found;
1390 		}
1391 	}
1392 	sk = NULL;
1393 found:
1394 	return sk;
1395 }
1396 
1397 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1398 {
1399 	struct udp_iter_state *state = seq->private;
1400 
1401 	do {
1402 		sk = sk_next(sk);
1403 try_again:
1404 		;
1405 	} while (sk && sk->sk_family != state->family);
1406 
1407 	if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
1408 		sk = sk_head(&udp_hash[state->bucket]);
1409 		goto try_again;
1410 	}
1411 	return sk;
1412 }
1413 
1414 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
1415 {
1416 	struct sock *sk = udp_get_first(seq);
1417 
1418 	if (sk)
1419 		while(pos && (sk = udp_get_next(seq, sk)) != NULL)
1420 			--pos;
1421 	return pos ? NULL : sk;
1422 }
1423 
1424 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
1425 {
1426 	read_lock(&udp_hash_lock);
1427 	return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
1428 }
1429 
1430 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1431 {
1432 	struct sock *sk;
1433 
1434 	if (v == (void *)1)
1435 		sk = udp_get_idx(seq, 0);
1436 	else
1437 		sk = udp_get_next(seq, v);
1438 
1439 	++*pos;
1440 	return sk;
1441 }
1442 
1443 static void udp_seq_stop(struct seq_file *seq, void *v)
1444 {
1445 	read_unlock(&udp_hash_lock);
1446 }
1447 
1448 static int udp_seq_open(struct inode *inode, struct file *file)
1449 {
1450 	struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1451 	struct seq_file *seq;
1452 	int rc = -ENOMEM;
1453 	struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1454 
1455 	if (!s)
1456 		goto out;
1457 	memset(s, 0, sizeof(*s));
1458 	s->family		= afinfo->family;
1459 	s->seq_ops.start	= udp_seq_start;
1460 	s->seq_ops.next		= udp_seq_next;
1461 	s->seq_ops.show		= afinfo->seq_show;
1462 	s->seq_ops.stop		= udp_seq_stop;
1463 
1464 	rc = seq_open(file, &s->seq_ops);
1465 	if (rc)
1466 		goto out_kfree;
1467 
1468 	seq	     = file->private_data;
1469 	seq->private = s;
1470 out:
1471 	return rc;
1472 out_kfree:
1473 	kfree(s);
1474 	goto out;
1475 }
1476 
1477 /* ------------------------------------------------------------------------ */
1478 int udp_proc_register(struct udp_seq_afinfo *afinfo)
1479 {
1480 	struct proc_dir_entry *p;
1481 	int rc = 0;
1482 
1483 	if (!afinfo)
1484 		return -EINVAL;
1485 	afinfo->seq_fops->owner		= afinfo->owner;
1486 	afinfo->seq_fops->open		= udp_seq_open;
1487 	afinfo->seq_fops->read		= seq_read;
1488 	afinfo->seq_fops->llseek	= seq_lseek;
1489 	afinfo->seq_fops->release	= seq_release_private;
1490 
1491 	p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1492 	if (p)
1493 		p->data = afinfo;
1494 	else
1495 		rc = -ENOMEM;
1496 	return rc;
1497 }
1498 
1499 void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
1500 {
1501 	if (!afinfo)
1502 		return;
1503 	proc_net_remove(afinfo->name);
1504 	memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1505 }
1506 
1507 /* ------------------------------------------------------------------------ */
1508 static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1509 {
1510 	struct inet_sock *inet = inet_sk(sp);
1511 	unsigned int dest = inet->daddr;
1512 	unsigned int src  = inet->rcv_saddr;
1513 	__u16 destp	  = ntohs(inet->dport);
1514 	__u16 srcp	  = ntohs(inet->sport);
1515 
1516 	sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1517 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1518 		bucket, src, srcp, dest, destp, sp->sk_state,
1519 		atomic_read(&sp->sk_wmem_alloc),
1520 		atomic_read(&sp->sk_rmem_alloc),
1521 		0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1522 		atomic_read(&sp->sk_refcnt), sp);
1523 }
1524 
1525 static int udp4_seq_show(struct seq_file *seq, void *v)
1526 {
1527 	if (v == SEQ_START_TOKEN)
1528 		seq_printf(seq, "%-127s\n",
1529 			   "  sl  local_address rem_address   st tx_queue "
1530 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
1531 			   "inode");
1532 	else {
1533 		char tmpbuf[129];
1534 		struct udp_iter_state *state = seq->private;
1535 
1536 		udp4_format_sock(v, tmpbuf, state->bucket);
1537 		seq_printf(seq, "%-127s\n", tmpbuf);
1538 	}
1539 	return 0;
1540 }
1541 
1542 /* ------------------------------------------------------------------------ */
1543 static struct file_operations udp4_seq_fops;
1544 static struct udp_seq_afinfo udp4_seq_afinfo = {
1545 	.owner		= THIS_MODULE,
1546 	.name		= "udp",
1547 	.family		= AF_INET,
1548 	.seq_show	= udp4_seq_show,
1549 	.seq_fops	= &udp4_seq_fops,
1550 };
1551 
1552 int __init udp4_proc_init(void)
1553 {
1554 	return udp_proc_register(&udp4_seq_afinfo);
1555 }
1556 
1557 void udp4_proc_exit(void)
1558 {
1559 	udp_proc_unregister(&udp4_seq_afinfo);
1560 }
1561 #endif /* CONFIG_PROC_FS */
1562 
1563 EXPORT_SYMBOL(udp_disconnect);
1564 EXPORT_SYMBOL(udp_hash);
1565 EXPORT_SYMBOL(udp_hash_lock);
1566 EXPORT_SYMBOL(udp_ioctl);
1567 EXPORT_SYMBOL(udp_port_rover);
1568 EXPORT_SYMBOL(udp_prot);
1569 EXPORT_SYMBOL(udp_sendmsg);
1570 EXPORT_SYMBOL(udp_poll);
1571 
1572 #ifdef CONFIG_PROC_FS
1573 EXPORT_SYMBOL(udp_proc_register);
1574 EXPORT_SYMBOL(udp_proc_unregister);
1575 #endif
1576