xref: /openbmc/linux/net/ipv4/ip_output.c (revision 1da177e4)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The Internet Protocol (IP) output module.
7  *
8  * Version:	$Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9  *
10  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Donald Becker, <becker@super.org>
13  *		Alan Cox, <Alan.Cox@linux.org>
14  *		Richard Underwood
15  *		Stefan Becker, <stefanb@yello.ping.de>
16  *		Jorge Cwik, <jorge@laser.satlink.net>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Hirokazu Takahashi, <taka@valinux.co.jp>
19  *
20  *	See ip_input.c for original log
21  *
22  *	Fixes:
23  *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
24  *		Mike Kilburn	:	htons() missing in ip_build_xmit.
25  *		Bradford Johnson:	Fix faulty handling of some frames when
26  *					no route is found.
27  *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
28  *					(in case if packet not accepted by
29  *					output firewall rules)
30  *		Mike McLagan	:	Routing by source
31  *		Alexey Kuznetsov:	use new route cache
32  *		Andi Kleen:		Fix broken PMTU recovery and remove
33  *					some redundant tests.
34  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
35  *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
36  *		Andi Kleen	:	Split fast and slow ip_build_xmit path
37  *					for decreased register pressure on x86
38  *					and more readibility.
39  *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
40  *					silently drop skb instead of failing with -EPERM.
41  *		Detlev Wengorz	:	Copy protocol for fragments.
42  *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
43  *					datagrams.
44  *		Hirokazu Takahashi:	sendfile() on UDP works now.
45  */
46 
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/mm.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
57 
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
67 
68 #include <net/snmp.h>
69 #include <net/ip.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
72 #include <net/tcp.h>
73 #include <net/udp.h>
74 #include <linux/skbuff.h>
75 #include <net/sock.h>
76 #include <net/arp.h>
77 #include <net/icmp.h>
78 #include <net/raw.h>
79 #include <net/checksum.h>
80 #include <net/inetpeer.h>
81 #include <net/checksum.h>
82 #include <linux/igmp.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/netfilter_bridge.h>
85 #include <linux/mroute.h>
86 #include <linux/netlink.h>
87 
88 /*
89  *      Shall we try to damage output packets if routing dev changes?
90  */
91 
92 int sysctl_ip_dynaddr;
93 int sysctl_ip_default_ttl = IPDEFTTL;
94 
95 /* Generate a checksum for an outgoing IP datagram. */
96 __inline__ void ip_send_check(struct iphdr *iph)
97 {
98 	iph->check = 0;
99 	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
100 }
101 
102 /* dev_loopback_xmit for use with netfilter. */
103 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
104 {
105 	newskb->mac.raw = newskb->data;
106 	__skb_pull(newskb, newskb->nh.raw - newskb->data);
107 	newskb->pkt_type = PACKET_LOOPBACK;
108 	newskb->ip_summed = CHECKSUM_UNNECESSARY;
109 	BUG_TRAP(newskb->dst);
110 
111 #ifdef CONFIG_NETFILTER_DEBUG
112 	nf_debug_ip_loopback_xmit(newskb);
113 #endif
114 	netif_rx(newskb);
115 	return 0;
116 }
117 
118 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
119 {
120 	int ttl = inet->uc_ttl;
121 
122 	if (ttl < 0)
123 		ttl = dst_metric(dst, RTAX_HOPLIMIT);
124 	return ttl;
125 }
126 
127 /*
128  *		Add an ip header to a skbuff and send it out.
129  *
130  */
131 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
132 			  u32 saddr, u32 daddr, struct ip_options *opt)
133 {
134 	struct inet_sock *inet = inet_sk(sk);
135 	struct rtable *rt = (struct rtable *)skb->dst;
136 	struct iphdr *iph;
137 
138 	/* Build the IP header. */
139 	if (opt)
140 		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
141 	else
142 		iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
143 
144 	iph->version  = 4;
145 	iph->ihl      = 5;
146 	iph->tos      = inet->tos;
147 	if (ip_dont_fragment(sk, &rt->u.dst))
148 		iph->frag_off = htons(IP_DF);
149 	else
150 		iph->frag_off = 0;
151 	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
152 	iph->daddr    = rt->rt_dst;
153 	iph->saddr    = rt->rt_src;
154 	iph->protocol = sk->sk_protocol;
155 	iph->tot_len  = htons(skb->len);
156 	ip_select_ident(iph, &rt->u.dst, sk);
157 	skb->nh.iph   = iph;
158 
159 	if (opt && opt->optlen) {
160 		iph->ihl += opt->optlen>>2;
161 		ip_options_build(skb, opt, daddr, rt, 0);
162 	}
163 	ip_send_check(iph);
164 
165 	skb->priority = sk->sk_priority;
166 
167 	/* Send it out. */
168 	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
169 		       dst_output);
170 }
171 
172 static inline int ip_finish_output2(struct sk_buff *skb)
173 {
174 	struct dst_entry *dst = skb->dst;
175 	struct hh_cache *hh = dst->hh;
176 	struct net_device *dev = dst->dev;
177 	int hh_len = LL_RESERVED_SPACE(dev);
178 
179 	/* Be paranoid, rather than too clever. */
180 	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
181 		struct sk_buff *skb2;
182 
183 		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
184 		if (skb2 == NULL) {
185 			kfree_skb(skb);
186 			return -ENOMEM;
187 		}
188 		if (skb->sk)
189 			skb_set_owner_w(skb2, skb->sk);
190 		kfree_skb(skb);
191 		skb = skb2;
192 	}
193 
194 #ifdef CONFIG_NETFILTER_DEBUG
195 	nf_debug_ip_finish_output2(skb);
196 #endif /*CONFIG_NETFILTER_DEBUG*/
197 
198 	if (hh) {
199 		int hh_alen;
200 
201 		read_lock_bh(&hh->hh_lock);
202 		hh_alen = HH_DATA_ALIGN(hh->hh_len);
203   		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
204 		read_unlock_bh(&hh->hh_lock);
205 	        skb_push(skb, hh->hh_len);
206 		return hh->hh_output(skb);
207 	} else if (dst->neighbour)
208 		return dst->neighbour->output(skb);
209 
210 	if (net_ratelimit())
211 		printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212 	kfree_skb(skb);
213 	return -EINVAL;
214 }
215 
216 int ip_finish_output(struct sk_buff *skb)
217 {
218 	struct net_device *dev = skb->dst->dev;
219 
220 	skb->dev = dev;
221 	skb->protocol = htons(ETH_P_IP);
222 
223 	return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
224 		       ip_finish_output2);
225 }
226 
227 int ip_mc_output(struct sk_buff *skb)
228 {
229 	struct sock *sk = skb->sk;
230 	struct rtable *rt = (struct rtable*)skb->dst;
231 	struct net_device *dev = rt->u.dst.dev;
232 
233 	/*
234 	 *	If the indicated interface is up and running, send the packet.
235 	 */
236 	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
237 
238 	skb->dev = dev;
239 	skb->protocol = htons(ETH_P_IP);
240 
241 	/*
242 	 *	Multicasts are looped back for other local users
243 	 */
244 
245 	if (rt->rt_flags&RTCF_MULTICAST) {
246 		if ((!sk || inet_sk(sk)->mc_loop)
247 #ifdef CONFIG_IP_MROUTE
248 		/* Small optimization: do not loopback not local frames,
249 		   which returned after forwarding; they will be  dropped
250 		   by ip_mr_input in any case.
251 		   Note, that local frames are looped back to be delivered
252 		   to local recipients.
253 
254 		   This check is duplicated in ip_mr_input at the moment.
255 		 */
256 		    && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
257 #endif
258 		) {
259 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
260 			if (newskb)
261 				NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
262 					newskb->dev,
263 					ip_dev_loopback_xmit);
264 		}
265 
266 		/* Multicasts with ttl 0 must not go beyond the host */
267 
268 		if (skb->nh.iph->ttl == 0) {
269 			kfree_skb(skb);
270 			return 0;
271 		}
272 	}
273 
274 	if (rt->rt_flags&RTCF_BROADCAST) {
275 		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
276 		if (newskb)
277 			NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
278 				newskb->dev, ip_dev_loopback_xmit);
279 	}
280 
281 	if (skb->len > dst_mtu(&rt->u.dst))
282 		return ip_fragment(skb, ip_finish_output);
283 	else
284 		return ip_finish_output(skb);
285 }
286 
287 int ip_output(struct sk_buff *skb)
288 {
289 	IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
290 
291 	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->tso_size)
292 		return ip_fragment(skb, ip_finish_output);
293 	else
294 		return ip_finish_output(skb);
295 }
296 
297 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
298 {
299 	struct sock *sk = skb->sk;
300 	struct inet_sock *inet = inet_sk(sk);
301 	struct ip_options *opt = inet->opt;
302 	struct rtable *rt;
303 	struct iphdr *iph;
304 
305 	/* Skip all of this if the packet is already routed,
306 	 * f.e. by something like SCTP.
307 	 */
308 	rt = (struct rtable *) skb->dst;
309 	if (rt != NULL)
310 		goto packet_routed;
311 
312 	/* Make sure we can route this packet. */
313 	rt = (struct rtable *)__sk_dst_check(sk, 0);
314 	if (rt == NULL) {
315 		u32 daddr;
316 
317 		/* Use correct destination address if we have options. */
318 		daddr = inet->daddr;
319 		if(opt && opt->srr)
320 			daddr = opt->faddr;
321 
322 		{
323 			struct flowi fl = { .oif = sk->sk_bound_dev_if,
324 					    .nl_u = { .ip4_u =
325 						      { .daddr = daddr,
326 							.saddr = inet->saddr,
327 							.tos = RT_CONN_FLAGS(sk) } },
328 					    .proto = sk->sk_protocol,
329 					    .uli_u = { .ports =
330 						       { .sport = inet->sport,
331 							 .dport = inet->dport } } };
332 
333 			/* If this fails, retransmit mechanism of transport layer will
334 			 * keep trying until route appears or the connection times
335 			 * itself out.
336 			 */
337 			if (ip_route_output_flow(&rt, &fl, sk, 0))
338 				goto no_route;
339 		}
340 		__sk_dst_set(sk, &rt->u.dst);
341 		tcp_v4_setup_caps(sk, &rt->u.dst);
342 	}
343 	skb->dst = dst_clone(&rt->u.dst);
344 
345 packet_routed:
346 	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
347 		goto no_route;
348 
349 	/* OK, we know where to send it, allocate and build IP header. */
350 	iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
351 	*((__u16 *)iph)	= htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
352 	iph->tot_len = htons(skb->len);
353 	if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
354 		iph->frag_off = htons(IP_DF);
355 	else
356 		iph->frag_off = 0;
357 	iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
358 	iph->protocol = sk->sk_protocol;
359 	iph->saddr    = rt->rt_src;
360 	iph->daddr    = rt->rt_dst;
361 	skb->nh.iph   = iph;
362 	/* Transport layer set skb->h.foo itself. */
363 
364 	if (opt && opt->optlen) {
365 		iph->ihl += opt->optlen >> 2;
366 		ip_options_build(skb, opt, inet->daddr, rt, 0);
367 	}
368 
369 	ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
370 
371 	/* Add an IP checksum. */
372 	ip_send_check(iph);
373 
374 	skb->priority = sk->sk_priority;
375 
376 	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
377 		       dst_output);
378 
379 no_route:
380 	IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
381 	kfree_skb(skb);
382 	return -EHOSTUNREACH;
383 }
384 
385 
386 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
387 {
388 	to->pkt_type = from->pkt_type;
389 	to->priority = from->priority;
390 	to->protocol = from->protocol;
391 	to->security = from->security;
392 	dst_release(to->dst);
393 	to->dst = dst_clone(from->dst);
394 	to->dev = from->dev;
395 
396 	/* Copy the flags to each fragment. */
397 	IPCB(to)->flags = IPCB(from)->flags;
398 
399 #ifdef CONFIG_NET_SCHED
400 	to->tc_index = from->tc_index;
401 #endif
402 #ifdef CONFIG_NETFILTER
403 	to->nfmark = from->nfmark;
404 	to->nfcache = from->nfcache;
405 	/* Connection association is same as pre-frag packet */
406 	nf_conntrack_put(to->nfct);
407 	to->nfct = from->nfct;
408 	nf_conntrack_get(to->nfct);
409 	to->nfctinfo = from->nfctinfo;
410 #ifdef CONFIG_BRIDGE_NETFILTER
411 	nf_bridge_put(to->nf_bridge);
412 	to->nf_bridge = from->nf_bridge;
413 	nf_bridge_get(to->nf_bridge);
414 #endif
415 #ifdef CONFIG_NETFILTER_DEBUG
416 	to->nf_debug = from->nf_debug;
417 #endif
418 #endif
419 }
420 
421 /*
422  *	This IP datagram is too large to be sent in one piece.  Break it up into
423  *	smaller pieces (each of size equal to IP header plus
424  *	a block of the data of the original IP data part) that will yet fit in a
425  *	single device frame, and queue such a frame for sending.
426  */
427 
428 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
429 {
430 	struct iphdr *iph;
431 	int raw = 0;
432 	int ptr;
433 	struct net_device *dev;
434 	struct sk_buff *skb2;
435 	unsigned int mtu, hlen, left, len, ll_rs;
436 	int offset;
437 	int not_last_frag;
438 	struct rtable *rt = (struct rtable*)skb->dst;
439 	int err = 0;
440 
441 	dev = rt->u.dst.dev;
442 
443 	/*
444 	 *	Point into the IP datagram header.
445 	 */
446 
447 	iph = skb->nh.iph;
448 
449 	if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
450 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
451 			  htonl(dst_mtu(&rt->u.dst)));
452 		kfree_skb(skb);
453 		return -EMSGSIZE;
454 	}
455 
456 	/*
457 	 *	Setup starting values.
458 	 */
459 
460 	hlen = iph->ihl * 4;
461 	mtu = dst_mtu(&rt->u.dst) - hlen;	/* Size of data space */
462 
463 	/* When frag_list is given, use it. First, check its validity:
464 	 * some transformers could create wrong frag_list or break existing
465 	 * one, it is not prohibited. In this case fall back to copying.
466 	 *
467 	 * LATER: this step can be merged to real generation of fragments,
468 	 * we can switch to copy when see the first bad fragment.
469 	 */
470 	if (skb_shinfo(skb)->frag_list) {
471 		struct sk_buff *frag;
472 		int first_len = skb_pagelen(skb);
473 
474 		if (first_len - hlen > mtu ||
475 		    ((first_len - hlen) & 7) ||
476 		    (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
477 		    skb_cloned(skb))
478 			goto slow_path;
479 
480 		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
481 			/* Correct geometry. */
482 			if (frag->len > mtu ||
483 			    ((frag->len & 7) && frag->next) ||
484 			    skb_headroom(frag) < hlen)
485 			    goto slow_path;
486 
487 			/* Partially cloned skb? */
488 			if (skb_shared(frag))
489 				goto slow_path;
490 		}
491 
492 		/* Everything is OK. Generate! */
493 
494 		err = 0;
495 		offset = 0;
496 		frag = skb_shinfo(skb)->frag_list;
497 		skb_shinfo(skb)->frag_list = NULL;
498 		skb->data_len = first_len - skb_headlen(skb);
499 		skb->len = first_len;
500 		iph->tot_len = htons(first_len);
501 		iph->frag_off = htons(IP_MF);
502 		ip_send_check(iph);
503 
504 		for (;;) {
505 			/* Prepare header of the next frame,
506 			 * before previous one went down. */
507 			if (frag) {
508 				frag->ip_summed = CHECKSUM_NONE;
509 				frag->h.raw = frag->data;
510 				frag->nh.raw = __skb_push(frag, hlen);
511 				memcpy(frag->nh.raw, iph, hlen);
512 				iph = frag->nh.iph;
513 				iph->tot_len = htons(frag->len);
514 				ip_copy_metadata(frag, skb);
515 				if (offset == 0)
516 					ip_options_fragment(frag);
517 				offset += skb->len - hlen;
518 				iph->frag_off = htons(offset>>3);
519 				if (frag->next != NULL)
520 					iph->frag_off |= htons(IP_MF);
521 				/* Ready, complete checksum */
522 				ip_send_check(iph);
523 			}
524 
525 			err = output(skb);
526 
527 			if (err || !frag)
528 				break;
529 
530 			skb = frag;
531 			frag = skb->next;
532 			skb->next = NULL;
533 		}
534 
535 		if (err == 0) {
536 			IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
537 			return 0;
538 		}
539 
540 		while (frag) {
541 			skb = frag->next;
542 			kfree_skb(frag);
543 			frag = skb;
544 		}
545 		IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
546 		return err;
547 	}
548 
549 slow_path:
550 	left = skb->len - hlen;		/* Space per frame */
551 	ptr = raw + hlen;		/* Where to start from */
552 
553 #ifdef CONFIG_BRIDGE_NETFILTER
554 	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
555 	 * we need to make room for the encapsulating header */
556 	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
557 	mtu -= nf_bridge_pad(skb);
558 #else
559 	ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
560 #endif
561 	/*
562 	 *	Fragment the datagram.
563 	 */
564 
565 	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
566 	not_last_frag = iph->frag_off & htons(IP_MF);
567 
568 	/*
569 	 *	Keep copying data until we run out.
570 	 */
571 
572 	while(left > 0)	{
573 		len = left;
574 		/* IF: it doesn't fit, use 'mtu' - the data space left */
575 		if (len > mtu)
576 			len = mtu;
577 		/* IF: we are not sending upto and including the packet end
578 		   then align the next start on an eight byte boundary */
579 		if (len < left)	{
580 			len &= ~7;
581 		}
582 		/*
583 		 *	Allocate buffer.
584 		 */
585 
586 		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
587 			NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
588 			err = -ENOMEM;
589 			goto fail;
590 		}
591 
592 		/*
593 		 *	Set up data on packet
594 		 */
595 
596 		ip_copy_metadata(skb2, skb);
597 		skb_reserve(skb2, ll_rs);
598 		skb_put(skb2, len + hlen);
599 		skb2->nh.raw = skb2->data;
600 		skb2->h.raw = skb2->data + hlen;
601 
602 		/*
603 		 *	Charge the memory for the fragment to any owner
604 		 *	it might possess
605 		 */
606 
607 		if (skb->sk)
608 			skb_set_owner_w(skb2, skb->sk);
609 
610 		/*
611 		 *	Copy the packet header into the new buffer.
612 		 */
613 
614 		memcpy(skb2->nh.raw, skb->data, hlen);
615 
616 		/*
617 		 *	Copy a block of the IP datagram.
618 		 */
619 		if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
620 			BUG();
621 		left -= len;
622 
623 		/*
624 		 *	Fill in the new header fields.
625 		 */
626 		iph = skb2->nh.iph;
627 		iph->frag_off = htons((offset >> 3));
628 
629 		/* ANK: dirty, but effective trick. Upgrade options only if
630 		 * the segment to be fragmented was THE FIRST (otherwise,
631 		 * options are already fixed) and make it ONCE
632 		 * on the initial skb, so that all the following fragments
633 		 * will inherit fixed options.
634 		 */
635 		if (offset == 0)
636 			ip_options_fragment(skb);
637 
638 		/*
639 		 *	Added AC : If we are fragmenting a fragment that's not the
640 		 *		   last fragment then keep MF on each bit
641 		 */
642 		if (left > 0 || not_last_frag)
643 			iph->frag_off |= htons(IP_MF);
644 		ptr += len;
645 		offset += len;
646 
647 		/*
648 		 *	Put this fragment into the sending queue.
649 		 */
650 
651 		IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
652 
653 		iph->tot_len = htons(len + hlen);
654 
655 		ip_send_check(iph);
656 
657 		err = output(skb2);
658 		if (err)
659 			goto fail;
660 	}
661 	kfree_skb(skb);
662 	IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
663 	return err;
664 
665 fail:
666 	kfree_skb(skb);
667 	IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
668 	return err;
669 }
670 
671 int
672 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
673 {
674 	struct iovec *iov = from;
675 
676 	if (skb->ip_summed == CHECKSUM_HW) {
677 		if (memcpy_fromiovecend(to, iov, offset, len) < 0)
678 			return -EFAULT;
679 	} else {
680 		unsigned int csum = 0;
681 		if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
682 			return -EFAULT;
683 		skb->csum = csum_block_add(skb->csum, csum, odd);
684 	}
685 	return 0;
686 }
687 
688 static inline unsigned int
689 csum_page(struct page *page, int offset, int copy)
690 {
691 	char *kaddr;
692 	unsigned int csum;
693 	kaddr = kmap(page);
694 	csum = csum_partial(kaddr + offset, copy, 0);
695 	kunmap(page);
696 	return csum;
697 }
698 
699 /*
700  *	ip_append_data() and ip_append_page() can make one large IP datagram
701  *	from many pieces of data. Each pieces will be holded on the socket
702  *	until ip_push_pending_frames() is called. Each piece can be a page
703  *	or non-page data.
704  *
705  *	Not only UDP, other transport protocols - e.g. raw sockets - can use
706  *	this interface potentially.
707  *
708  *	LATER: length must be adjusted by pad at tail, when it is required.
709  */
710 int ip_append_data(struct sock *sk,
711 		   int getfrag(void *from, char *to, int offset, int len,
712 			       int odd, struct sk_buff *skb),
713 		   void *from, int length, int transhdrlen,
714 		   struct ipcm_cookie *ipc, struct rtable *rt,
715 		   unsigned int flags)
716 {
717 	struct inet_sock *inet = inet_sk(sk);
718 	struct sk_buff *skb;
719 
720 	struct ip_options *opt = NULL;
721 	int hh_len;
722 	int exthdrlen;
723 	int mtu;
724 	int copy;
725 	int err;
726 	int offset = 0;
727 	unsigned int maxfraglen, fragheaderlen;
728 	int csummode = CHECKSUM_NONE;
729 
730 	if (flags&MSG_PROBE)
731 		return 0;
732 
733 	if (skb_queue_empty(&sk->sk_write_queue)) {
734 		/*
735 		 * setup for corking.
736 		 */
737 		opt = ipc->opt;
738 		if (opt) {
739 			if (inet->cork.opt == NULL) {
740 				inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
741 				if (unlikely(inet->cork.opt == NULL))
742 					return -ENOBUFS;
743 			}
744 			memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
745 			inet->cork.flags |= IPCORK_OPT;
746 			inet->cork.addr = ipc->addr;
747 		}
748 		dst_hold(&rt->u.dst);
749 		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
750 		inet->cork.rt = rt;
751 		inet->cork.length = 0;
752 		sk->sk_sndmsg_page = NULL;
753 		sk->sk_sndmsg_off = 0;
754 		if ((exthdrlen = rt->u.dst.header_len) != 0) {
755 			length += exthdrlen;
756 			transhdrlen += exthdrlen;
757 		}
758 	} else {
759 		rt = inet->cork.rt;
760 		if (inet->cork.flags & IPCORK_OPT)
761 			opt = inet->cork.opt;
762 
763 		transhdrlen = 0;
764 		exthdrlen = 0;
765 		mtu = inet->cork.fragsize;
766 	}
767 	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
768 
769 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
770 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
771 
772 	if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
773 		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
774 		return -EMSGSIZE;
775 	}
776 
777 	/*
778 	 * transhdrlen > 0 means that this is the first fragment and we wish
779 	 * it won't be fragmented in the future.
780 	 */
781 	if (transhdrlen &&
782 	    length + fragheaderlen <= mtu &&
783 	    rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
784 	    !exthdrlen)
785 		csummode = CHECKSUM_HW;
786 
787 	inet->cork.length += length;
788 
789 	/* So, what's going on in the loop below?
790 	 *
791 	 * We use calculated fragment length to generate chained skb,
792 	 * each of segments is IP fragment ready for sending to network after
793 	 * adding appropriate IP header.
794 	 */
795 
796 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
797 		goto alloc_new_skb;
798 
799 	while (length > 0) {
800 		/* Check if the remaining data fits into current packet. */
801 		copy = mtu - skb->len;
802 		if (copy < length)
803 			copy = maxfraglen - skb->len;
804 		if (copy <= 0) {
805 			char *data;
806 			unsigned int datalen;
807 			unsigned int fraglen;
808 			unsigned int fraggap;
809 			unsigned int alloclen;
810 			struct sk_buff *skb_prev;
811 alloc_new_skb:
812 			skb_prev = skb;
813 			if (skb_prev)
814 				fraggap = skb_prev->len - maxfraglen;
815 			else
816 				fraggap = 0;
817 
818 			/*
819 			 * If remaining data exceeds the mtu,
820 			 * we know we need more fragment(s).
821 			 */
822 			datalen = length + fraggap;
823 			if (datalen > mtu - fragheaderlen)
824 				datalen = maxfraglen - fragheaderlen;
825 			fraglen = datalen + fragheaderlen;
826 
827 			if ((flags & MSG_MORE) &&
828 			    !(rt->u.dst.dev->features&NETIF_F_SG))
829 				alloclen = mtu;
830 			else
831 				alloclen = datalen + fragheaderlen;
832 
833 			/* The last fragment gets additional space at tail.
834 			 * Note, with MSG_MORE we overallocate on fragments,
835 			 * because we have no idea what fragment will be
836 			 * the last.
837 			 */
838 			if (datalen == length)
839 				alloclen += rt->u.dst.trailer_len;
840 
841 			if (transhdrlen) {
842 				skb = sock_alloc_send_skb(sk,
843 						alloclen + hh_len + 15,
844 						(flags & MSG_DONTWAIT), &err);
845 			} else {
846 				skb = NULL;
847 				if (atomic_read(&sk->sk_wmem_alloc) <=
848 				    2 * sk->sk_sndbuf)
849 					skb = sock_wmalloc(sk,
850 							   alloclen + hh_len + 15, 1,
851 							   sk->sk_allocation);
852 				if (unlikely(skb == NULL))
853 					err = -ENOBUFS;
854 			}
855 			if (skb == NULL)
856 				goto error;
857 
858 			/*
859 			 *	Fill in the control structures
860 			 */
861 			skb->ip_summed = csummode;
862 			skb->csum = 0;
863 			skb_reserve(skb, hh_len);
864 
865 			/*
866 			 *	Find where to start putting bytes.
867 			 */
868 			data = skb_put(skb, fraglen);
869 			skb->nh.raw = data + exthdrlen;
870 			data += fragheaderlen;
871 			skb->h.raw = data + exthdrlen;
872 
873 			if (fraggap) {
874 				skb->csum = skb_copy_and_csum_bits(
875 					skb_prev, maxfraglen,
876 					data + transhdrlen, fraggap, 0);
877 				skb_prev->csum = csum_sub(skb_prev->csum,
878 							  skb->csum);
879 				data += fraggap;
880 				skb_trim(skb_prev, maxfraglen);
881 			}
882 
883 			copy = datalen - transhdrlen - fraggap;
884 			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
885 				err = -EFAULT;
886 				kfree_skb(skb);
887 				goto error;
888 			}
889 
890 			offset += copy;
891 			length -= datalen - fraggap;
892 			transhdrlen = 0;
893 			exthdrlen = 0;
894 			csummode = CHECKSUM_NONE;
895 
896 			/*
897 			 * Put the packet on the pending queue.
898 			 */
899 			__skb_queue_tail(&sk->sk_write_queue, skb);
900 			continue;
901 		}
902 
903 		if (copy > length)
904 			copy = length;
905 
906 		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
907 			unsigned int off;
908 
909 			off = skb->len;
910 			if (getfrag(from, skb_put(skb, copy),
911 					offset, copy, off, skb) < 0) {
912 				__skb_trim(skb, off);
913 				err = -EFAULT;
914 				goto error;
915 			}
916 		} else {
917 			int i = skb_shinfo(skb)->nr_frags;
918 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
919 			struct page *page = sk->sk_sndmsg_page;
920 			int off = sk->sk_sndmsg_off;
921 			unsigned int left;
922 
923 			if (page && (left = PAGE_SIZE - off) > 0) {
924 				if (copy >= left)
925 					copy = left;
926 				if (page != frag->page) {
927 					if (i == MAX_SKB_FRAGS) {
928 						err = -EMSGSIZE;
929 						goto error;
930 					}
931 					get_page(page);
932 	 				skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
933 					frag = &skb_shinfo(skb)->frags[i];
934 				}
935 			} else if (i < MAX_SKB_FRAGS) {
936 				if (copy > PAGE_SIZE)
937 					copy = PAGE_SIZE;
938 				page = alloc_pages(sk->sk_allocation, 0);
939 				if (page == NULL)  {
940 					err = -ENOMEM;
941 					goto error;
942 				}
943 				sk->sk_sndmsg_page = page;
944 				sk->sk_sndmsg_off = 0;
945 
946 				skb_fill_page_desc(skb, i, page, 0, 0);
947 				frag = &skb_shinfo(skb)->frags[i];
948 				skb->truesize += PAGE_SIZE;
949 				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
950 			} else {
951 				err = -EMSGSIZE;
952 				goto error;
953 			}
954 			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
955 				err = -EFAULT;
956 				goto error;
957 			}
958 			sk->sk_sndmsg_off += copy;
959 			frag->size += copy;
960 			skb->len += copy;
961 			skb->data_len += copy;
962 		}
963 		offset += copy;
964 		length -= copy;
965 	}
966 
967 	return 0;
968 
969 error:
970 	inet->cork.length -= length;
971 	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
972 	return err;
973 }
974 
975 ssize_t	ip_append_page(struct sock *sk, struct page *page,
976 		       int offset, size_t size, int flags)
977 {
978 	struct inet_sock *inet = inet_sk(sk);
979 	struct sk_buff *skb;
980 	struct rtable *rt;
981 	struct ip_options *opt = NULL;
982 	int hh_len;
983 	int mtu;
984 	int len;
985 	int err;
986 	unsigned int maxfraglen, fragheaderlen, fraggap;
987 
988 	if (inet->hdrincl)
989 		return -EPERM;
990 
991 	if (flags&MSG_PROBE)
992 		return 0;
993 
994 	if (skb_queue_empty(&sk->sk_write_queue))
995 		return -EINVAL;
996 
997 	rt = inet->cork.rt;
998 	if (inet->cork.flags & IPCORK_OPT)
999 		opt = inet->cork.opt;
1000 
1001 	if (!(rt->u.dst.dev->features&NETIF_F_SG))
1002 		return -EOPNOTSUPP;
1003 
1004 	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1005 	mtu = inet->cork.fragsize;
1006 
1007 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1008 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1009 
1010 	if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1011 		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1012 		return -EMSGSIZE;
1013 	}
1014 
1015 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1016 		return -EINVAL;
1017 
1018 	inet->cork.length += size;
1019 
1020 	while (size > 0) {
1021 		int i;
1022 
1023 		/* Check if the remaining data fits into current packet. */
1024 		len = mtu - skb->len;
1025 		if (len < size)
1026 			len = maxfraglen - skb->len;
1027 		if (len <= 0) {
1028 			struct sk_buff *skb_prev;
1029 			char *data;
1030 			struct iphdr *iph;
1031 			int alloclen;
1032 
1033 			skb_prev = skb;
1034 			if (skb_prev)
1035 				fraggap = skb_prev->len - maxfraglen;
1036 			else
1037 				fraggap = 0;
1038 
1039 			alloclen = fragheaderlen + hh_len + fraggap + 15;
1040 			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1041 			if (unlikely(!skb)) {
1042 				err = -ENOBUFS;
1043 				goto error;
1044 			}
1045 
1046 			/*
1047 			 *	Fill in the control structures
1048 			 */
1049 			skb->ip_summed = CHECKSUM_NONE;
1050 			skb->csum = 0;
1051 			skb_reserve(skb, hh_len);
1052 
1053 			/*
1054 			 *	Find where to start putting bytes.
1055 			 */
1056 			data = skb_put(skb, fragheaderlen + fraggap);
1057 			skb->nh.iph = iph = (struct iphdr *)data;
1058 			data += fragheaderlen;
1059 			skb->h.raw = data;
1060 
1061 			if (fraggap) {
1062 				skb->csum = skb_copy_and_csum_bits(
1063 					skb_prev, maxfraglen,
1064 					data, fraggap, 0);
1065 				skb_prev->csum = csum_sub(skb_prev->csum,
1066 							  skb->csum);
1067 				skb_trim(skb_prev, maxfraglen);
1068 			}
1069 
1070 			/*
1071 			 * Put the packet on the pending queue.
1072 			 */
1073 			__skb_queue_tail(&sk->sk_write_queue, skb);
1074 			continue;
1075 		}
1076 
1077 		i = skb_shinfo(skb)->nr_frags;
1078 		if (len > size)
1079 			len = size;
1080 		if (skb_can_coalesce(skb, i, page, offset)) {
1081 			skb_shinfo(skb)->frags[i-1].size += len;
1082 		} else if (i < MAX_SKB_FRAGS) {
1083 			get_page(page);
1084 			skb_fill_page_desc(skb, i, page, offset, len);
1085 		} else {
1086 			err = -EMSGSIZE;
1087 			goto error;
1088 		}
1089 
1090 		if (skb->ip_summed == CHECKSUM_NONE) {
1091 			unsigned int csum;
1092 			csum = csum_page(page, offset, len);
1093 			skb->csum = csum_block_add(skb->csum, csum, skb->len);
1094 		}
1095 
1096 		skb->len += len;
1097 		skb->data_len += len;
1098 		offset += len;
1099 		size -= len;
1100 	}
1101 	return 0;
1102 
1103 error:
1104 	inet->cork.length -= size;
1105 	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1106 	return err;
1107 }
1108 
1109 /*
1110  *	Combined all pending IP fragments on the socket as one IP datagram
1111  *	and push them out.
1112  */
1113 int ip_push_pending_frames(struct sock *sk)
1114 {
1115 	struct sk_buff *skb, *tmp_skb;
1116 	struct sk_buff **tail_skb;
1117 	struct inet_sock *inet = inet_sk(sk);
1118 	struct ip_options *opt = NULL;
1119 	struct rtable *rt = inet->cork.rt;
1120 	struct iphdr *iph;
1121 	int df = 0;
1122 	__u8 ttl;
1123 	int err = 0;
1124 
1125 	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1126 		goto out;
1127 	tail_skb = &(skb_shinfo(skb)->frag_list);
1128 
1129 	/* move skb->data to ip header from ext header */
1130 	if (skb->data < skb->nh.raw)
1131 		__skb_pull(skb, skb->nh.raw - skb->data);
1132 	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1133 		__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1134 		*tail_skb = tmp_skb;
1135 		tail_skb = &(tmp_skb->next);
1136 		skb->len += tmp_skb->len;
1137 		skb->data_len += tmp_skb->len;
1138 		skb->truesize += tmp_skb->truesize;
1139 		__sock_put(tmp_skb->sk);
1140 		tmp_skb->destructor = NULL;
1141 		tmp_skb->sk = NULL;
1142 	}
1143 
1144 	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1145 	 * to fragment the frame generated here. No matter, what transforms
1146 	 * how transforms change size of the packet, it will come out.
1147 	 */
1148 	if (inet->pmtudisc != IP_PMTUDISC_DO)
1149 		skb->local_df = 1;
1150 
1151 	/* DF bit is set when we want to see DF on outgoing frames.
1152 	 * If local_df is set too, we still allow to fragment this frame
1153 	 * locally. */
1154 	if (inet->pmtudisc == IP_PMTUDISC_DO ||
1155 	    (skb->len <= dst_mtu(&rt->u.dst) &&
1156 	     ip_dont_fragment(sk, &rt->u.dst)))
1157 		df = htons(IP_DF);
1158 
1159 	if (inet->cork.flags & IPCORK_OPT)
1160 		opt = inet->cork.opt;
1161 
1162 	if (rt->rt_type == RTN_MULTICAST)
1163 		ttl = inet->mc_ttl;
1164 	else
1165 		ttl = ip_select_ttl(inet, &rt->u.dst);
1166 
1167 	iph = (struct iphdr *)skb->data;
1168 	iph->version = 4;
1169 	iph->ihl = 5;
1170 	if (opt) {
1171 		iph->ihl += opt->optlen>>2;
1172 		ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1173 	}
1174 	iph->tos = inet->tos;
1175 	iph->tot_len = htons(skb->len);
1176 	iph->frag_off = df;
1177 	if (!df) {
1178 		__ip_select_ident(iph, &rt->u.dst, 0);
1179 	} else {
1180 		iph->id = htons(inet->id++);
1181 	}
1182 	iph->ttl = ttl;
1183 	iph->protocol = sk->sk_protocol;
1184 	iph->saddr = rt->rt_src;
1185 	iph->daddr = rt->rt_dst;
1186 	ip_send_check(iph);
1187 
1188 	skb->priority = sk->sk_priority;
1189 	skb->dst = dst_clone(&rt->u.dst);
1190 
1191 	/* Netfilter gets whole the not fragmented skb. */
1192 	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1193 		      skb->dst->dev, dst_output);
1194 	if (err) {
1195 		if (err > 0)
1196 			err = inet->recverr ? net_xmit_errno(err) : 0;
1197 		if (err)
1198 			goto error;
1199 	}
1200 
1201 out:
1202 	inet->cork.flags &= ~IPCORK_OPT;
1203 	if (inet->cork.opt) {
1204 		kfree(inet->cork.opt);
1205 		inet->cork.opt = NULL;
1206 	}
1207 	if (inet->cork.rt) {
1208 		ip_rt_put(inet->cork.rt);
1209 		inet->cork.rt = NULL;
1210 	}
1211 	return err;
1212 
1213 error:
1214 	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1215 	goto out;
1216 }
1217 
1218 /*
1219  *	Throw away all pending data on the socket.
1220  */
1221 void ip_flush_pending_frames(struct sock *sk)
1222 {
1223 	struct inet_sock *inet = inet_sk(sk);
1224 	struct sk_buff *skb;
1225 
1226 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1227 		kfree_skb(skb);
1228 
1229 	inet->cork.flags &= ~IPCORK_OPT;
1230 	if (inet->cork.opt) {
1231 		kfree(inet->cork.opt);
1232 		inet->cork.opt = NULL;
1233 	}
1234 	if (inet->cork.rt) {
1235 		ip_rt_put(inet->cork.rt);
1236 		inet->cork.rt = NULL;
1237 	}
1238 }
1239 
1240 
1241 /*
1242  *	Fetch data from kernel space and fill in checksum if needed.
1243  */
1244 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1245 			      int len, int odd, struct sk_buff *skb)
1246 {
1247 	unsigned int csum;
1248 
1249 	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1250 	skb->csum = csum_block_add(skb->csum, csum, odd);
1251 	return 0;
1252 }
1253 
1254 /*
1255  *	Generic function to send a packet as reply to another packet.
1256  *	Used to send TCP resets so far. ICMP should use this function too.
1257  *
1258  *	Should run single threaded per socket because it uses the sock
1259  *     	structure to pass arguments.
1260  *
1261  *	LATER: switch from ip_build_xmit to ip_append_*
1262  */
1263 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1264 		   unsigned int len)
1265 {
1266 	struct inet_sock *inet = inet_sk(sk);
1267 	struct {
1268 		struct ip_options	opt;
1269 		char			data[40];
1270 	} replyopts;
1271 	struct ipcm_cookie ipc;
1272 	u32 daddr;
1273 	struct rtable *rt = (struct rtable*)skb->dst;
1274 
1275 	if (ip_options_echo(&replyopts.opt, skb))
1276 		return;
1277 
1278 	daddr = ipc.addr = rt->rt_src;
1279 	ipc.opt = NULL;
1280 
1281 	if (replyopts.opt.optlen) {
1282 		ipc.opt = &replyopts.opt;
1283 
1284 		if (ipc.opt->srr)
1285 			daddr = replyopts.opt.faddr;
1286 	}
1287 
1288 	{
1289 		struct flowi fl = { .nl_u = { .ip4_u =
1290 					      { .daddr = daddr,
1291 						.saddr = rt->rt_spec_dst,
1292 						.tos = RT_TOS(skb->nh.iph->tos) } },
1293 				    /* Not quite clean, but right. */
1294 				    .uli_u = { .ports =
1295 					       { .sport = skb->h.th->dest,
1296 					         .dport = skb->h.th->source } },
1297 				    .proto = sk->sk_protocol };
1298 		if (ip_route_output_key(&rt, &fl))
1299 			return;
1300 	}
1301 
1302 	/* And let IP do all the hard work.
1303 
1304 	   This chunk is not reenterable, hence spinlock.
1305 	   Note that it uses the fact, that this function is called
1306 	   with locally disabled BH and that sk cannot be already spinlocked.
1307 	 */
1308 	bh_lock_sock(sk);
1309 	inet->tos = skb->nh.iph->tos;
1310 	sk->sk_priority = skb->priority;
1311 	sk->sk_protocol = skb->nh.iph->protocol;
1312 	ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1313 		       &ipc, rt, MSG_DONTWAIT);
1314 	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1315 		if (arg->csumoffset >= 0)
1316 			*((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1317 		skb->ip_summed = CHECKSUM_NONE;
1318 		ip_push_pending_frames(sk);
1319 	}
1320 
1321 	bh_unlock_sock(sk);
1322 
1323 	ip_rt_put(rt);
1324 }
1325 
1326 /*
1327  *	IP protocol layer initialiser
1328  */
1329 
1330 static struct packet_type ip_packet_type = {
1331 	.type = __constant_htons(ETH_P_IP),
1332 	.func = ip_rcv,
1333 };
1334 
1335 /*
1336  *	IP registers the packet type and then calls the subprotocol initialisers
1337  */
1338 
1339 void __init ip_init(void)
1340 {
1341 	dev_add_pack(&ip_packet_type);
1342 
1343 	ip_rt_init();
1344 	inet_initpeers();
1345 
1346 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1347 	igmp_mc_proc_init();
1348 #endif
1349 }
1350 
1351 EXPORT_SYMBOL(ip_finish_output);
1352 EXPORT_SYMBOL(ip_fragment);
1353 EXPORT_SYMBOL(ip_generic_getfrag);
1354 EXPORT_SYMBOL(ip_queue_xmit);
1355 EXPORT_SYMBOL(ip_send_check);
1356 
1357 #ifdef CONFIG_SYSCTL
1358 EXPORT_SYMBOL(sysctl_ip_default_ttl);
1359 #endif
1360