xref: /openbmc/linux/net/core/datagram.c (revision a8da474e)
1 /*
2  *	SUCS NET3:
3  *
4  *	Generic datagram handling routines. These are generic for all
5  *	protocols. Possibly a generic IP version on top of these would
6  *	make sense. Not tonight however 8-).
7  *	This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8  *	NetROM layer all have identical poll code and mostly
9  *	identical recvmsg() code. So we share it here. The poll was
10  *	shared before but buried in udp.c so I moved it.
11  *
12  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
13  *						     udp.c code)
14  *
15  *	Fixes:
16  *		Alan Cox	:	NULL return from skb_peek_copy()
17  *					understood
18  *		Alan Cox	:	Rewrote skb_read_datagram to avoid the
19  *					skb_peek_copy stuff.
20  *		Alan Cox	:	Added support for SOCK_SEQPACKET.
21  *					IPX can no longer use the SO_TYPE hack
22  *					but AX.25 now works right, and SPX is
23  *					feasible.
24  *		Alan Cox	:	Fixed write poll of non IP protocol
25  *					crash.
26  *		Florian  La Roche:	Changed for my new skbuff handling.
27  *		Darryl Miles	:	Fixed non-blocking SOCK_SEQPACKET.
28  *		Linus Torvalds	:	BSD semantic fixes.
29  *		Alan Cox	:	Datagram iovec handling
30  *		Darryl Miles	:	Fixed non-blocking SOCK_STREAM.
31  *		Alan Cox	:	POSIXisms
32  *		Pete Wyckoff    :       Unconnected accept() fix.
33  *
34  */
35 
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
51 #include <linux/pagemap.h>
52 #include <linux/uio.h>
53 
54 #include <net/protocol.h>
55 #include <linux/skbuff.h>
56 
57 #include <net/checksum.h>
58 #include <net/sock.h>
59 #include <net/tcp_states.h>
60 #include <trace/events/skb.h>
61 #include <net/busy_poll.h>
62 
63 /*
64  *	Is a socket 'connection oriented' ?
65  */
66 static inline int connection_based(struct sock *sk)
67 {
68 	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
69 }
70 
71 static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
72 				  void *key)
73 {
74 	unsigned long bits = (unsigned long)key;
75 
76 	/*
77 	 * Avoid a wakeup if event not interesting for us
78 	 */
79 	if (bits && !(bits & (POLLIN | POLLERR)))
80 		return 0;
81 	return autoremove_wake_function(wait, mode, sync, key);
82 }
83 /*
84  * Wait for the last received packet to be different from skb
85  */
86 static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
87 				 const struct sk_buff *skb)
88 {
89 	int error;
90 	DEFINE_WAIT_FUNC(wait, receiver_wake_function);
91 
92 	prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
93 
94 	/* Socket errors? */
95 	error = sock_error(sk);
96 	if (error)
97 		goto out_err;
98 
99 	if (sk->sk_receive_queue.prev != skb)
100 		goto out;
101 
102 	/* Socket shut down? */
103 	if (sk->sk_shutdown & RCV_SHUTDOWN)
104 		goto out_noerr;
105 
106 	/* Sequenced packets can come disconnected.
107 	 * If so we report the problem
108 	 */
109 	error = -ENOTCONN;
110 	if (connection_based(sk) &&
111 	    !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
112 		goto out_err;
113 
114 	/* handle signals */
115 	if (signal_pending(current))
116 		goto interrupted;
117 
118 	error = 0;
119 	*timeo_p = schedule_timeout(*timeo_p);
120 out:
121 	finish_wait(sk_sleep(sk), &wait);
122 	return error;
123 interrupted:
124 	error = sock_intr_errno(*timeo_p);
125 out_err:
126 	*err = error;
127 	goto out;
128 out_noerr:
129 	*err = 0;
130 	error = 1;
131 	goto out;
132 }
133 
134 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
135 {
136 	struct sk_buff *nskb;
137 
138 	if (skb->peeked)
139 		return skb;
140 
141 	/* We have to unshare an skb before modifying it. */
142 	if (!skb_shared(skb))
143 		goto done;
144 
145 	nskb = skb_clone(skb, GFP_ATOMIC);
146 	if (!nskb)
147 		return ERR_PTR(-ENOMEM);
148 
149 	skb->prev->next = nskb;
150 	skb->next->prev = nskb;
151 	nskb->prev = skb->prev;
152 	nskb->next = skb->next;
153 
154 	consume_skb(skb);
155 	skb = nskb;
156 
157 done:
158 	skb->peeked = 1;
159 
160 	return skb;
161 }
162 
163 /**
164  *	__skb_recv_datagram - Receive a datagram skbuff
165  *	@sk: socket
166  *	@flags: MSG_ flags
167  *	@peeked: returns non-zero if this packet has been seen before
168  *	@off: an offset in bytes to peek skb from. Returns an offset
169  *	      within an skb where data actually starts
170  *	@err: error code returned
171  *
172  *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
173  *	and possible races. This replaces identical code in packet, raw and
174  *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
175  *	the long standing peek and read race for datagram sockets. If you
176  *	alter this routine remember it must be re-entrant.
177  *
178  *	This function will lock the socket if a skb is returned, so the caller
179  *	needs to unlock the socket in that case (usually by calling
180  *	skb_free_datagram)
181  *
182  *	* It does not lock socket since today. This function is
183  *	* free of race conditions. This measure should/can improve
184  *	* significantly datagram socket latencies at high loads,
185  *	* when data copying to user space takes lots of time.
186  *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
187  *	*  8) Great win.)
188  *	*			                    --ANK (980729)
189  *
190  *	The order of the tests when we find no data waiting are specified
191  *	quite explicitly by POSIX 1003.1g, don't change them without having
192  *	the standard around please.
193  */
194 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
195 				    int *peeked, int *off, int *err)
196 {
197 	struct sk_buff_head *queue = &sk->sk_receive_queue;
198 	struct sk_buff *skb, *last;
199 	unsigned long cpu_flags;
200 	long timeo;
201 	/*
202 	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
203 	 */
204 	int error = sock_error(sk);
205 
206 	if (error)
207 		goto no_packet;
208 
209 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
210 
211 	do {
212 		/* Again only user level code calls this function, so nothing
213 		 * interrupt level will suddenly eat the receive_queue.
214 		 *
215 		 * Look at current nfs client by the way...
216 		 * However, this function was correct in any case. 8)
217 		 */
218 		int _off = *off;
219 
220 		last = (struct sk_buff *)queue;
221 		spin_lock_irqsave(&queue->lock, cpu_flags);
222 		skb_queue_walk(queue, skb) {
223 			last = skb;
224 			*peeked = skb->peeked;
225 			if (flags & MSG_PEEK) {
226 				if (_off >= skb->len && (skb->len || _off ||
227 							 skb->peeked)) {
228 					_off -= skb->len;
229 					continue;
230 				}
231 
232 				skb = skb_set_peeked(skb);
233 				error = PTR_ERR(skb);
234 				if (IS_ERR(skb))
235 					goto unlock_err;
236 
237 				atomic_inc(&skb->users);
238 			} else
239 				__skb_unlink(skb, queue);
240 
241 			spin_unlock_irqrestore(&queue->lock, cpu_flags);
242 			*off = _off;
243 			return skb;
244 		}
245 		spin_unlock_irqrestore(&queue->lock, cpu_flags);
246 
247 		if (sk_can_busy_loop(sk) &&
248 		    sk_busy_loop(sk, flags & MSG_DONTWAIT))
249 			continue;
250 
251 		/* User doesn't want to wait */
252 		error = -EAGAIN;
253 		if (!timeo)
254 			goto no_packet;
255 
256 	} while (!wait_for_more_packets(sk, err, &timeo, last));
257 
258 	return NULL;
259 
260 unlock_err:
261 	spin_unlock_irqrestore(&queue->lock, cpu_flags);
262 no_packet:
263 	*err = error;
264 	return NULL;
265 }
266 EXPORT_SYMBOL(__skb_recv_datagram);
267 
268 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
269 				  int noblock, int *err)
270 {
271 	int peeked, off = 0;
272 
273 	return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
274 				   &peeked, &off, err);
275 }
276 EXPORT_SYMBOL(skb_recv_datagram);
277 
278 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
279 {
280 	consume_skb(skb);
281 	sk_mem_reclaim_partial(sk);
282 }
283 EXPORT_SYMBOL(skb_free_datagram);
284 
285 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
286 {
287 	bool slow;
288 
289 	if (likely(atomic_read(&skb->users) == 1))
290 		smp_rmb();
291 	else if (likely(!atomic_dec_and_test(&skb->users)))
292 		return;
293 
294 	slow = lock_sock_fast(sk);
295 	skb_orphan(skb);
296 	sk_mem_reclaim_partial(sk);
297 	unlock_sock_fast(sk, slow);
298 
299 	/* skb is now orphaned, can be freed outside of locked section */
300 	__kfree_skb(skb);
301 }
302 EXPORT_SYMBOL(skb_free_datagram_locked);
303 
304 /**
305  *	skb_kill_datagram - Free a datagram skbuff forcibly
306  *	@sk: socket
307  *	@skb: datagram skbuff
308  *	@flags: MSG_ flags
309  *
310  *	This function frees a datagram skbuff that was received by
311  *	skb_recv_datagram.  The flags argument must match the one
312  *	used for skb_recv_datagram.
313  *
314  *	If the MSG_PEEK flag is set, and the packet is still on the
315  *	receive queue of the socket, it will be taken off the queue
316  *	before it is freed.
317  *
318  *	This function currently only disables BH when acquiring the
319  *	sk_receive_queue lock.  Therefore it must not be used in a
320  *	context where that lock is acquired in an IRQ context.
321  *
322  *	It returns 0 if the packet was removed by us.
323  */
324 
325 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
326 {
327 	int err = 0;
328 
329 	if (flags & MSG_PEEK) {
330 		err = -ENOENT;
331 		spin_lock_bh(&sk->sk_receive_queue.lock);
332 		if (skb == skb_peek(&sk->sk_receive_queue)) {
333 			__skb_unlink(skb, &sk->sk_receive_queue);
334 			atomic_dec(&skb->users);
335 			err = 0;
336 		}
337 		spin_unlock_bh(&sk->sk_receive_queue.lock);
338 	}
339 
340 	kfree_skb(skb);
341 	atomic_inc(&sk->sk_drops);
342 	sk_mem_reclaim_partial(sk);
343 
344 	return err;
345 }
346 EXPORT_SYMBOL(skb_kill_datagram);
347 
348 /**
349  *	skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
350  *	@skb: buffer to copy
351  *	@offset: offset in the buffer to start copying from
352  *	@to: iovec iterator to copy to
353  *	@len: amount of data to copy from buffer to iovec
354  */
355 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
356 			   struct iov_iter *to, int len)
357 {
358 	int start = skb_headlen(skb);
359 	int i, copy = start - offset;
360 	struct sk_buff *frag_iter;
361 
362 	trace_skb_copy_datagram_iovec(skb, len);
363 
364 	/* Copy header. */
365 	if (copy > 0) {
366 		if (copy > len)
367 			copy = len;
368 		if (copy_to_iter(skb->data + offset, copy, to) != copy)
369 			goto short_copy;
370 		if ((len -= copy) == 0)
371 			return 0;
372 		offset += copy;
373 	}
374 
375 	/* Copy paged appendix. Hmm... why does this look so complicated? */
376 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
377 		int end;
378 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
379 
380 		WARN_ON(start > offset + len);
381 
382 		end = start + skb_frag_size(frag);
383 		if ((copy = end - offset) > 0) {
384 			if (copy > len)
385 				copy = len;
386 			if (copy_page_to_iter(skb_frag_page(frag),
387 					      frag->page_offset + offset -
388 					      start, copy, to) != copy)
389 				goto short_copy;
390 			if (!(len -= copy))
391 				return 0;
392 			offset += copy;
393 		}
394 		start = end;
395 	}
396 
397 	skb_walk_frags(skb, frag_iter) {
398 		int end;
399 
400 		WARN_ON(start > offset + len);
401 
402 		end = start + frag_iter->len;
403 		if ((copy = end - offset) > 0) {
404 			if (copy > len)
405 				copy = len;
406 			if (skb_copy_datagram_iter(frag_iter, offset - start,
407 						   to, copy))
408 				goto fault;
409 			if ((len -= copy) == 0)
410 				return 0;
411 			offset += copy;
412 		}
413 		start = end;
414 	}
415 	if (!len)
416 		return 0;
417 
418 	/* This is not really a user copy fault, but rather someone
419 	 * gave us a bogus length on the skb.  We should probably
420 	 * print a warning here as it may indicate a kernel bug.
421 	 */
422 
423 fault:
424 	return -EFAULT;
425 
426 short_copy:
427 	if (iov_iter_count(to))
428 		goto fault;
429 
430 	return 0;
431 }
432 EXPORT_SYMBOL(skb_copy_datagram_iter);
433 
434 /**
435  *	skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
436  *	@skb: buffer to copy
437  *	@offset: offset in the buffer to start copying to
438  *	@from: the copy source
439  *	@len: amount of data to copy to buffer from iovec
440  *
441  *	Returns 0 or -EFAULT.
442  */
443 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
444 				 struct iov_iter *from,
445 				 int len)
446 {
447 	int start = skb_headlen(skb);
448 	int i, copy = start - offset;
449 	struct sk_buff *frag_iter;
450 
451 	/* Copy header. */
452 	if (copy > 0) {
453 		if (copy > len)
454 			copy = len;
455 		if (copy_from_iter(skb->data + offset, copy, from) != copy)
456 			goto fault;
457 		if ((len -= copy) == 0)
458 			return 0;
459 		offset += copy;
460 	}
461 
462 	/* Copy paged appendix. Hmm... why does this look so complicated? */
463 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
464 		int end;
465 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
466 
467 		WARN_ON(start > offset + len);
468 
469 		end = start + skb_frag_size(frag);
470 		if ((copy = end - offset) > 0) {
471 			size_t copied;
472 
473 			if (copy > len)
474 				copy = len;
475 			copied = copy_page_from_iter(skb_frag_page(frag),
476 					  frag->page_offset + offset - start,
477 					  copy, from);
478 			if (copied != copy)
479 				goto fault;
480 
481 			if (!(len -= copy))
482 				return 0;
483 			offset += copy;
484 		}
485 		start = end;
486 	}
487 
488 	skb_walk_frags(skb, frag_iter) {
489 		int end;
490 
491 		WARN_ON(start > offset + len);
492 
493 		end = start + frag_iter->len;
494 		if ((copy = end - offset) > 0) {
495 			if (copy > len)
496 				copy = len;
497 			if (skb_copy_datagram_from_iter(frag_iter,
498 							offset - start,
499 							from, copy))
500 				goto fault;
501 			if ((len -= copy) == 0)
502 				return 0;
503 			offset += copy;
504 		}
505 		start = end;
506 	}
507 	if (!len)
508 		return 0;
509 
510 fault:
511 	return -EFAULT;
512 }
513 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
514 
515 /**
516  *	zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
517  *	@skb: buffer to copy
518  *	@from: the source to copy from
519  *
520  *	The function will first copy up to headlen, and then pin the userspace
521  *	pages and build frags through them.
522  *
523  *	Returns 0, -EFAULT or -EMSGSIZE.
524  */
525 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
526 {
527 	int len = iov_iter_count(from);
528 	int copy = min_t(int, skb_headlen(skb), len);
529 	int frag = 0;
530 
531 	/* copy up to skb headlen */
532 	if (skb_copy_datagram_from_iter(skb, 0, from, copy))
533 		return -EFAULT;
534 
535 	while (iov_iter_count(from)) {
536 		struct page *pages[MAX_SKB_FRAGS];
537 		size_t start;
538 		ssize_t copied;
539 		unsigned long truesize;
540 		int n = 0;
541 
542 		if (frag == MAX_SKB_FRAGS)
543 			return -EMSGSIZE;
544 
545 		copied = iov_iter_get_pages(from, pages, ~0U,
546 					    MAX_SKB_FRAGS - frag, &start);
547 		if (copied < 0)
548 			return -EFAULT;
549 
550 		iov_iter_advance(from, copied);
551 
552 		truesize = PAGE_ALIGN(copied + start);
553 		skb->data_len += copied;
554 		skb->len += copied;
555 		skb->truesize += truesize;
556 		atomic_add(truesize, &skb->sk->sk_wmem_alloc);
557 		while (copied) {
558 			int size = min_t(int, copied, PAGE_SIZE - start);
559 			skb_fill_page_desc(skb, frag++, pages[n], start, size);
560 			start = 0;
561 			copied -= size;
562 			n++;
563 		}
564 	}
565 	return 0;
566 }
567 EXPORT_SYMBOL(zerocopy_sg_from_iter);
568 
569 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
570 				      struct iov_iter *to, int len,
571 				      __wsum *csump)
572 {
573 	int start = skb_headlen(skb);
574 	int i, copy = start - offset;
575 	struct sk_buff *frag_iter;
576 	int pos = 0;
577 	int n;
578 
579 	/* Copy header. */
580 	if (copy > 0) {
581 		if (copy > len)
582 			copy = len;
583 		n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
584 		if (n != copy)
585 			goto fault;
586 		if ((len -= copy) == 0)
587 			return 0;
588 		offset += copy;
589 		pos = copy;
590 	}
591 
592 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
593 		int end;
594 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
595 
596 		WARN_ON(start > offset + len);
597 
598 		end = start + skb_frag_size(frag);
599 		if ((copy = end - offset) > 0) {
600 			__wsum csum2 = 0;
601 			struct page *page = skb_frag_page(frag);
602 			u8  *vaddr = kmap(page);
603 
604 			if (copy > len)
605 				copy = len;
606 			n = csum_and_copy_to_iter(vaddr + frag->page_offset +
607 						  offset - start, copy,
608 						  &csum2, to);
609 			kunmap(page);
610 			if (n != copy)
611 				goto fault;
612 			*csump = csum_block_add(*csump, csum2, pos);
613 			if (!(len -= copy))
614 				return 0;
615 			offset += copy;
616 			pos += copy;
617 		}
618 		start = end;
619 	}
620 
621 	skb_walk_frags(skb, frag_iter) {
622 		int end;
623 
624 		WARN_ON(start > offset + len);
625 
626 		end = start + frag_iter->len;
627 		if ((copy = end - offset) > 0) {
628 			__wsum csum2 = 0;
629 			if (copy > len)
630 				copy = len;
631 			if (skb_copy_and_csum_datagram(frag_iter,
632 						       offset - start,
633 						       to, copy,
634 						       &csum2))
635 				goto fault;
636 			*csump = csum_block_add(*csump, csum2, pos);
637 			if ((len -= copy) == 0)
638 				return 0;
639 			offset += copy;
640 			pos += copy;
641 		}
642 		start = end;
643 	}
644 	if (!len)
645 		return 0;
646 
647 fault:
648 	return -EFAULT;
649 }
650 
651 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
652 {
653 	__sum16 sum;
654 
655 	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
656 	if (likely(!sum)) {
657 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
658 		    !skb->csum_complete_sw)
659 			netdev_rx_csum_fault(skb->dev);
660 	}
661 	if (!skb_shared(skb))
662 		skb->csum_valid = !sum;
663 	return sum;
664 }
665 EXPORT_SYMBOL(__skb_checksum_complete_head);
666 
667 __sum16 __skb_checksum_complete(struct sk_buff *skb)
668 {
669 	__wsum csum;
670 	__sum16 sum;
671 
672 	csum = skb_checksum(skb, 0, skb->len, 0);
673 
674 	/* skb->csum holds pseudo checksum */
675 	sum = csum_fold(csum_add(skb->csum, csum));
676 	if (likely(!sum)) {
677 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
678 		    !skb->csum_complete_sw)
679 			netdev_rx_csum_fault(skb->dev);
680 	}
681 
682 	if (!skb_shared(skb)) {
683 		/* Save full packet checksum */
684 		skb->csum = csum;
685 		skb->ip_summed = CHECKSUM_COMPLETE;
686 		skb->csum_complete_sw = 1;
687 		skb->csum_valid = !sum;
688 	}
689 
690 	return sum;
691 }
692 EXPORT_SYMBOL(__skb_checksum_complete);
693 
694 /**
695  *	skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
696  *	@skb: skbuff
697  *	@hlen: hardware length
698  *	@msg: destination
699  *
700  *	Caller _must_ check that skb will fit to this iovec.
701  *
702  *	Returns: 0       - success.
703  *		 -EINVAL - checksum failure.
704  *		 -EFAULT - fault during copy.
705  */
706 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
707 				   int hlen, struct msghdr *msg)
708 {
709 	__wsum csum;
710 	int chunk = skb->len - hlen;
711 
712 	if (!chunk)
713 		return 0;
714 
715 	if (msg_data_left(msg) < chunk) {
716 		if (__skb_checksum_complete(skb))
717 			goto csum_error;
718 		if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
719 			goto fault;
720 	} else {
721 		csum = csum_partial(skb->data, hlen, skb->csum);
722 		if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
723 					       chunk, &csum))
724 			goto fault;
725 		if (csum_fold(csum))
726 			goto csum_error;
727 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
728 			netdev_rx_csum_fault(skb->dev);
729 	}
730 	return 0;
731 csum_error:
732 	return -EINVAL;
733 fault:
734 	return -EFAULT;
735 }
736 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
737 
738 /**
739  * 	datagram_poll - generic datagram poll
740  *	@file: file struct
741  *	@sock: socket
742  *	@wait: poll table
743  *
744  *	Datagram poll: Again totally generic. This also handles
745  *	sequenced packet sockets providing the socket receive queue
746  *	is only ever holding data ready to receive.
747  *
748  *	Note: when you _don't_ use this routine for this protocol,
749  *	and you use a different write policy from sock_writeable()
750  *	then please supply your own write_space callback.
751  */
752 unsigned int datagram_poll(struct file *file, struct socket *sock,
753 			   poll_table *wait)
754 {
755 	struct sock *sk = sock->sk;
756 	unsigned int mask;
757 
758 	sock_poll_wait(file, sk_sleep(sk), wait);
759 	mask = 0;
760 
761 	/* exceptional events? */
762 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
763 		mask |= POLLERR |
764 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
765 
766 	if (sk->sk_shutdown & RCV_SHUTDOWN)
767 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
768 	if (sk->sk_shutdown == SHUTDOWN_MASK)
769 		mask |= POLLHUP;
770 
771 	/* readable? */
772 	if (!skb_queue_empty(&sk->sk_receive_queue))
773 		mask |= POLLIN | POLLRDNORM;
774 
775 	/* Connection-based need to check for termination and startup */
776 	if (connection_based(sk)) {
777 		if (sk->sk_state == TCP_CLOSE)
778 			mask |= POLLHUP;
779 		/* connection hasn't started yet? */
780 		if (sk->sk_state == TCP_SYN_SENT)
781 			return mask;
782 	}
783 
784 	/* writable? */
785 	if (sock_writeable(sk))
786 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
787 	else
788 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
789 
790 	return mask;
791 }
792 EXPORT_SYMBOL(datagram_poll);
793