xref: /openbmc/linux/net/ipv6/ip6_output.c (revision 95c385b4d5a71b8ad552aecaa968ea46d7da2f6a)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9  *
10  *	Based on linux/net/ipv4/ip_output.c
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  *
17  *	Changes:
18  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
19  *				extension headers are implemented.
20  *				route changes now work.
21  *				ip6_forward does not confuse sniffers.
22  *				etc.
23  *
24  *      H. von Brand    :       Added missing #include <linux/string.h>
25  *	Imran Patel	: 	frag id should be in NBO
26  *      Kazunori MIYAZAWA @USAGI
27  *			:       add ip6_append_data and related functions
28  *				for datagram xmit
29  */
30 
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/net.h>
36 #include <linux/netdevice.h>
37 #include <linux/if_arp.h>
38 #include <linux/in6.h>
39 #include <linux/tcp.h>
40 #include <linux/route.h>
41 #include <linux/module.h>
42 
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
45 
46 #include <net/sock.h>
47 #include <net/snmp.h>
48 
49 #include <net/ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
55 #include <net/icmp.h>
56 #include <net/xfrm.h>
57 #include <net/checksum.h>
58 
59 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60 
61 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
62 {
63 	static u32 ipv6_fragmentation_id = 1;
64 	static DEFINE_SPINLOCK(ip6_id_lock);
65 
66 	spin_lock_bh(&ip6_id_lock);
67 	fhdr->identification = htonl(ipv6_fragmentation_id);
68 	if (++ipv6_fragmentation_id == 0)
69 		ipv6_fragmentation_id = 1;
70 	spin_unlock_bh(&ip6_id_lock);
71 }
72 
73 static inline int ip6_output_finish(struct sk_buff *skb)
74 {
75 	struct dst_entry *dst = skb->dst;
76 
77 	if (dst->hh)
78 		return neigh_hh_output(dst->hh, skb);
79 	else if (dst->neighbour)
80 		return dst->neighbour->output(skb);
81 
82 	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
83 	kfree_skb(skb);
84 	return -EINVAL;
85 
86 }
87 
88 /* dev_loopback_xmit for use with netfilter. */
89 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
90 {
91 	newskb->mac.raw = newskb->data;
92 	__skb_pull(newskb, newskb->nh.raw - newskb->data);
93 	newskb->pkt_type = PACKET_LOOPBACK;
94 	newskb->ip_summed = CHECKSUM_UNNECESSARY;
95 	BUG_TRAP(newskb->dst);
96 
97 	netif_rx(newskb);
98 	return 0;
99 }
100 
101 
102 static int ip6_output2(struct sk_buff *skb)
103 {
104 	struct dst_entry *dst = skb->dst;
105 	struct net_device *dev = dst->dev;
106 
107 	skb->protocol = htons(ETH_P_IPV6);
108 	skb->dev = dev;
109 
110 	if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
111 		struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
112 		struct inet6_dev *idev = ip6_dst_idev(skb->dst);
113 
114 		if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
115 		    ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
116 				&skb->nh.ipv6h->saddr)) {
117 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
118 
119 			/* Do not check for IFF_ALLMULTI; multicast routing
120 			   is not supported in any case.
121 			 */
122 			if (newskb)
123 				NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
124 					newskb->dev,
125 					ip6_dev_loopback_xmit);
126 
127 			if (skb->nh.ipv6h->hop_limit == 0) {
128 				IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
129 				kfree_skb(skb);
130 				return 0;
131 			}
132 		}
133 
134 		IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
135 	}
136 
137 	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
138 }
139 
140 int ip6_output(struct sk_buff *skb)
141 {
142 	if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
143 				dst_allfrag(skb->dst))
144 		return ip6_fragment(skb, ip6_output2);
145 	else
146 		return ip6_output2(skb);
147 }
148 
149 /*
150  *	xmit an sk_buff (used by TCP)
151  */
152 
153 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
154 	     struct ipv6_txoptions *opt, int ipfragok)
155 {
156 	struct ipv6_pinfo *np = inet6_sk(sk);
157 	struct in6_addr *first_hop = &fl->fl6_dst;
158 	struct dst_entry *dst = skb->dst;
159 	struct ipv6hdr *hdr;
160 	u8  proto = fl->proto;
161 	int seg_len = skb->len;
162 	int hlimit, tclass;
163 	u32 mtu;
164 
165 	if (opt) {
166 		int head_room;
167 
168 		/* First: exthdrs may take lots of space (~8K for now)
169 		   MAX_HEADER is not enough.
170 		 */
171 		head_room = opt->opt_nflen + opt->opt_flen;
172 		seg_len += head_room;
173 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
174 
175 		if (skb_headroom(skb) < head_room) {
176 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
177 			if (skb2 == NULL) {
178 				IP6_INC_STATS(ip6_dst_idev(skb->dst),
179 					      IPSTATS_MIB_OUTDISCARDS);
180 				kfree_skb(skb);
181 				return -ENOBUFS;
182 			}
183 			kfree_skb(skb);
184 			skb = skb2;
185 			if (sk)
186 				skb_set_owner_w(skb, sk);
187 		}
188 		if (opt->opt_flen)
189 			ipv6_push_frag_opts(skb, opt, &proto);
190 		if (opt->opt_nflen)
191 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
192 	}
193 
194 	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
195 
196 	/*
197 	 *	Fill in the IPv6 header
198 	 */
199 
200 	hlimit = -1;
201 	if (np)
202 		hlimit = np->hop_limit;
203 	if (hlimit < 0)
204 		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
205 	if (hlimit < 0)
206 		hlimit = ipv6_get_hoplimit(dst->dev);
207 
208 	tclass = -1;
209 	if (np)
210 		tclass = np->tclass;
211 	if (tclass < 0)
212 		tclass = 0;
213 
214 	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
215 
216 	hdr->payload_len = htons(seg_len);
217 	hdr->nexthdr = proto;
218 	hdr->hop_limit = hlimit;
219 
220 	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
221 	ipv6_addr_copy(&hdr->daddr, first_hop);
222 
223 	skb->priority = sk->sk_priority;
224 
225 	mtu = dst_mtu(dst);
226 	if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
227 		IP6_INC_STATS(ip6_dst_idev(skb->dst),
228 			      IPSTATS_MIB_OUTREQUESTS);
229 		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
230 				dst_output);
231 	}
232 
233 	if (net_ratelimit())
234 		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
235 	skb->dev = dst->dev;
236 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
237 	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
238 	kfree_skb(skb);
239 	return -EMSGSIZE;
240 }
241 
242 EXPORT_SYMBOL(ip6_xmit);
243 
244 /*
245  *	To avoid extra problems ND packets are send through this
246  *	routine. It's code duplication but I really want to avoid
247  *	extra checks since ipv6_build_header is used by TCP (which
248  *	is for us performance critical)
249  */
250 
251 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
252 	       struct in6_addr *saddr, struct in6_addr *daddr,
253 	       int proto, int len)
254 {
255 	struct ipv6_pinfo *np = inet6_sk(sk);
256 	struct ipv6hdr *hdr;
257 	int totlen;
258 
259 	skb->protocol = htons(ETH_P_IPV6);
260 	skb->dev = dev;
261 
262 	totlen = len + sizeof(struct ipv6hdr);
263 
264 	hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
265 	skb->nh.ipv6h = hdr;
266 
267 	*(__be32*)hdr = htonl(0x60000000);
268 
269 	hdr->payload_len = htons(len);
270 	hdr->nexthdr = proto;
271 	hdr->hop_limit = np->hop_limit;
272 
273 	ipv6_addr_copy(&hdr->saddr, saddr);
274 	ipv6_addr_copy(&hdr->daddr, daddr);
275 
276 	return 0;
277 }
278 
279 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
280 {
281 	struct ip6_ra_chain *ra;
282 	struct sock *last = NULL;
283 
284 	read_lock(&ip6_ra_lock);
285 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
286 		struct sock *sk = ra->sk;
287 		if (sk && ra->sel == sel &&
288 		    (!sk->sk_bound_dev_if ||
289 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
290 			if (last) {
291 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
292 				if (skb2)
293 					rawv6_rcv(last, skb2);
294 			}
295 			last = sk;
296 		}
297 	}
298 
299 	if (last) {
300 		rawv6_rcv(last, skb);
301 		read_unlock(&ip6_ra_lock);
302 		return 1;
303 	}
304 	read_unlock(&ip6_ra_lock);
305 	return 0;
306 }
307 
308 static int ip6_forward_proxy_check(struct sk_buff *skb)
309 {
310 	struct ipv6hdr *hdr = skb->nh.ipv6h;
311 	u8 nexthdr = hdr->nexthdr;
312 	int offset;
313 
314 	if (ipv6_ext_hdr(nexthdr)) {
315 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
316 		if (offset < 0)
317 			return 0;
318 	} else
319 		offset = sizeof(struct ipv6hdr);
320 
321 	if (nexthdr == IPPROTO_ICMPV6) {
322 		struct icmp6hdr *icmp6;
323 
324 		if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data))
325 			return 0;
326 
327 		icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset);
328 
329 		switch (icmp6->icmp6_type) {
330 		case NDISC_ROUTER_SOLICITATION:
331 		case NDISC_ROUTER_ADVERTISEMENT:
332 		case NDISC_NEIGHBOUR_SOLICITATION:
333 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
334 		case NDISC_REDIRECT:
335 			/* For reaction involving unicast neighbor discovery
336 			 * message destined to the proxied address, pass it to
337 			 * input function.
338 			 */
339 			return 1;
340 		default:
341 			break;
342 		}
343 	}
344 
345 	/*
346 	 * The proxying router can't forward traffic sent to a link-local
347 	 * address, so signal the sender and discard the packet. This
348 	 * behavior is clarified by the MIPv6 specification.
349 	 */
350 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
351 		dst_link_failure(skb);
352 		return -1;
353 	}
354 
355 	return 0;
356 }
357 
358 static inline int ip6_forward_finish(struct sk_buff *skb)
359 {
360 	return dst_output(skb);
361 }
362 
363 int ip6_forward(struct sk_buff *skb)
364 {
365 	struct dst_entry *dst = skb->dst;
366 	struct ipv6hdr *hdr = skb->nh.ipv6h;
367 	struct inet6_skb_parm *opt = IP6CB(skb);
368 
369 	if (ipv6_devconf.forwarding == 0)
370 		goto error;
371 
372 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
373 		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
374 		goto drop;
375 	}
376 
377 	skb->ip_summed = CHECKSUM_NONE;
378 
379 	/*
380 	 *	We DO NOT make any processing on
381 	 *	RA packets, pushing them to user level AS IS
382 	 *	without ane WARRANTY that application will be able
383 	 *	to interpret them. The reason is that we
384 	 *	cannot make anything clever here.
385 	 *
386 	 *	We are not end-node, so that if packet contains
387 	 *	AH/ESP, we cannot make anything.
388 	 *	Defragmentation also would be mistake, RA packets
389 	 *	cannot be fragmented, because there is no warranty
390 	 *	that different fragments will go along one path. --ANK
391 	 */
392 	if (opt->ra) {
393 		u8 *ptr = skb->nh.raw + opt->ra;
394 		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
395 			return 0;
396 	}
397 
398 	/*
399 	 *	check and decrement ttl
400 	 */
401 	if (hdr->hop_limit <= 1) {
402 		/* Force OUTPUT device used as source address */
403 		skb->dev = dst->dev;
404 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
405 			    0, skb->dev);
406 		IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
407 
408 		kfree_skb(skb);
409 		return -ETIMEDOUT;
410 	}
411 
412 	/* XXX: idev->cnf.proxy_ndp? */
413 	if (ipv6_devconf.proxy_ndp &&
414 	    pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
415 		int proxied = ip6_forward_proxy_check(skb);
416 		if (proxied > 0)
417 			return ip6_input(skb);
418 		else if (proxied < 0) {
419 			IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
420 			goto drop;
421 		}
422 	}
423 
424 	if (!xfrm6_route_forward(skb)) {
425 		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
426 		goto drop;
427 	}
428 	dst = skb->dst;
429 
430 	/* IPv6 specs say nothing about it, but it is clear that we cannot
431 	   send redirects to source routed frames.
432 	 */
433 	if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
434 		struct in6_addr *target = NULL;
435 		struct rt6_info *rt;
436 		struct neighbour *n = dst->neighbour;
437 
438 		/*
439 		 *	incoming and outgoing devices are the same
440 		 *	send a redirect.
441 		 */
442 
443 		rt = (struct rt6_info *) dst;
444 		if ((rt->rt6i_flags & RTF_GATEWAY))
445 			target = (struct in6_addr*)&n->primary_key;
446 		else
447 			target = &hdr->daddr;
448 
449 		/* Limit redirects both by destination (here)
450 		   and by source (inside ndisc_send_redirect)
451 		 */
452 		if (xrlim_allow(dst, 1*HZ))
453 			ndisc_send_redirect(skb, n, target);
454 	} else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
455 						|IPV6_ADDR_LINKLOCAL)) {
456 		/* This check is security critical. */
457 		goto error;
458 	}
459 
460 	if (skb->len > dst_mtu(dst)) {
461 		/* Again, force OUTPUT device used as source address */
462 		skb->dev = dst->dev;
463 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
464 		IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
465 		IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
466 		kfree_skb(skb);
467 		return -EMSGSIZE;
468 	}
469 
470 	if (skb_cow(skb, dst->dev->hard_header_len)) {
471 		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
472 		goto drop;
473 	}
474 
475 	hdr = skb->nh.ipv6h;
476 
477 	/* Mangling hops number delayed to point after skb COW */
478 
479 	hdr->hop_limit--;
480 
481 	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
482 	return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
483 
484 error:
485 	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
486 drop:
487 	kfree_skb(skb);
488 	return -EINVAL;
489 }
490 
491 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
492 {
493 	to->pkt_type = from->pkt_type;
494 	to->priority = from->priority;
495 	to->protocol = from->protocol;
496 	dst_release(to->dst);
497 	to->dst = dst_clone(from->dst);
498 	to->dev = from->dev;
499 	to->mark = from->mark;
500 
501 #ifdef CONFIG_NET_SCHED
502 	to->tc_index = from->tc_index;
503 #endif
504 #ifdef CONFIG_NETFILTER
505 	/* Connection association is same as pre-frag packet */
506 	nf_conntrack_put(to->nfct);
507 	to->nfct = from->nfct;
508 	nf_conntrack_get(to->nfct);
509 	to->nfctinfo = from->nfctinfo;
510 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
511 	nf_conntrack_put_reasm(to->nfct_reasm);
512 	to->nfct_reasm = from->nfct_reasm;
513 	nf_conntrack_get_reasm(to->nfct_reasm);
514 #endif
515 #ifdef CONFIG_BRIDGE_NETFILTER
516 	nf_bridge_put(to->nf_bridge);
517 	to->nf_bridge = from->nf_bridge;
518 	nf_bridge_get(to->nf_bridge);
519 #endif
520 #endif
521 	skb_copy_secmark(to, from);
522 }
523 
524 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
525 {
526 	u16 offset = sizeof(struct ipv6hdr);
527 	struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
528 	unsigned int packet_len = skb->tail - skb->nh.raw;
529 	int found_rhdr = 0;
530 	*nexthdr = &skb->nh.ipv6h->nexthdr;
531 
532 	while (offset + 1 <= packet_len) {
533 
534 		switch (**nexthdr) {
535 
536 		case NEXTHDR_HOP:
537 			break;
538 		case NEXTHDR_ROUTING:
539 			found_rhdr = 1;
540 			break;
541 		case NEXTHDR_DEST:
542 #ifdef CONFIG_IPV6_MIP6
543 			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
544 				break;
545 #endif
546 			if (found_rhdr)
547 				return offset;
548 			break;
549 		default :
550 			return offset;
551 		}
552 
553 		offset += ipv6_optlen(exthdr);
554 		*nexthdr = &exthdr->nexthdr;
555 		exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
556 	}
557 
558 	return offset;
559 }
560 EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
561 
562 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
563 {
564 	struct net_device *dev;
565 	struct sk_buff *frag;
566 	struct rt6_info *rt = (struct rt6_info*)skb->dst;
567 	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
568 	struct ipv6hdr *tmp_hdr;
569 	struct frag_hdr *fh;
570 	unsigned int mtu, hlen, left, len;
571 	__be32 frag_id = 0;
572 	int ptr, offset = 0, err=0;
573 	u8 *prevhdr, nexthdr = 0;
574 
575 	dev = rt->u.dst.dev;
576 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
577 	nexthdr = *prevhdr;
578 
579 	mtu = dst_mtu(&rt->u.dst);
580 	if (np && np->frag_size < mtu) {
581 		if (np->frag_size)
582 			mtu = np->frag_size;
583 	}
584 	mtu -= hlen + sizeof(struct frag_hdr);
585 
586 	if (skb_shinfo(skb)->frag_list) {
587 		int first_len = skb_pagelen(skb);
588 
589 		if (first_len - hlen > mtu ||
590 		    ((first_len - hlen) & 7) ||
591 		    skb_cloned(skb))
592 			goto slow_path;
593 
594 		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
595 			/* Correct geometry. */
596 			if (frag->len > mtu ||
597 			    ((frag->len & 7) && frag->next) ||
598 			    skb_headroom(frag) < hlen)
599 			    goto slow_path;
600 
601 			/* Partially cloned skb? */
602 			if (skb_shared(frag))
603 				goto slow_path;
604 
605 			BUG_ON(frag->sk);
606 			if (skb->sk) {
607 				sock_hold(skb->sk);
608 				frag->sk = skb->sk;
609 				frag->destructor = sock_wfree;
610 				skb->truesize -= frag->truesize;
611 			}
612 		}
613 
614 		err = 0;
615 		offset = 0;
616 		frag = skb_shinfo(skb)->frag_list;
617 		skb_shinfo(skb)->frag_list = NULL;
618 		/* BUILD HEADER */
619 
620 		*prevhdr = NEXTHDR_FRAGMENT;
621 		tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
622 		if (!tmp_hdr) {
623 			IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
624 			return -ENOMEM;
625 		}
626 
627 		__skb_pull(skb, hlen);
628 		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
629 		skb->nh.raw = __skb_push(skb, hlen);
630 		memcpy(skb->nh.raw, tmp_hdr, hlen);
631 
632 		ipv6_select_ident(skb, fh);
633 		fh->nexthdr = nexthdr;
634 		fh->reserved = 0;
635 		fh->frag_off = htons(IP6_MF);
636 		frag_id = fh->identification;
637 
638 		first_len = skb_pagelen(skb);
639 		skb->data_len = first_len - skb_headlen(skb);
640 		skb->len = first_len;
641 		skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
642 
643 		dst_hold(&rt->u.dst);
644 
645 		for (;;) {
646 			/* Prepare header of the next frame,
647 			 * before previous one went down. */
648 			if (frag) {
649 				frag->ip_summed = CHECKSUM_NONE;
650 				frag->h.raw = frag->data;
651 				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
652 				frag->nh.raw = __skb_push(frag, hlen);
653 				memcpy(frag->nh.raw, tmp_hdr, hlen);
654 				offset += skb->len - hlen - sizeof(struct frag_hdr);
655 				fh->nexthdr = nexthdr;
656 				fh->reserved = 0;
657 				fh->frag_off = htons(offset);
658 				if (frag->next != NULL)
659 					fh->frag_off |= htons(IP6_MF);
660 				fh->identification = frag_id;
661 				frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
662 				ip6_copy_metadata(frag, skb);
663 			}
664 
665 			err = output(skb);
666 			if(!err)
667 				IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
668 
669 			if (err || !frag)
670 				break;
671 
672 			skb = frag;
673 			frag = skb->next;
674 			skb->next = NULL;
675 		}
676 
677 		kfree(tmp_hdr);
678 
679 		if (err == 0) {
680 			IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
681 			dst_release(&rt->u.dst);
682 			return 0;
683 		}
684 
685 		while (frag) {
686 			skb = frag->next;
687 			kfree_skb(frag);
688 			frag = skb;
689 		}
690 
691 		IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
692 		dst_release(&rt->u.dst);
693 		return err;
694 	}
695 
696 slow_path:
697 	left = skb->len - hlen;		/* Space per frame */
698 	ptr = hlen;			/* Where to start from */
699 
700 	/*
701 	 *	Fragment the datagram.
702 	 */
703 
704 	*prevhdr = NEXTHDR_FRAGMENT;
705 
706 	/*
707 	 *	Keep copying data until we run out.
708 	 */
709 	while(left > 0)	{
710 		len = left;
711 		/* IF: it doesn't fit, use 'mtu' - the data space left */
712 		if (len > mtu)
713 			len = mtu;
714 		/* IF: we are not sending upto and including the packet end
715 		   then align the next start on an eight byte boundary */
716 		if (len < left)	{
717 			len &= ~7;
718 		}
719 		/*
720 		 *	Allocate buffer.
721 		 */
722 
723 		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
724 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
725 			IP6_INC_STATS(ip6_dst_idev(skb->dst),
726 				      IPSTATS_MIB_FRAGFAILS);
727 			err = -ENOMEM;
728 			goto fail;
729 		}
730 
731 		/*
732 		 *	Set up data on packet
733 		 */
734 
735 		ip6_copy_metadata(frag, skb);
736 		skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
737 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
738 		frag->nh.raw = frag->data;
739 		fh = (struct frag_hdr*)(frag->data + hlen);
740 		frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
741 
742 		/*
743 		 *	Charge the memory for the fragment to any owner
744 		 *	it might possess
745 		 */
746 		if (skb->sk)
747 			skb_set_owner_w(frag, skb->sk);
748 
749 		/*
750 		 *	Copy the packet header into the new buffer.
751 		 */
752 		memcpy(frag->nh.raw, skb->data, hlen);
753 
754 		/*
755 		 *	Build fragment header.
756 		 */
757 		fh->nexthdr = nexthdr;
758 		fh->reserved = 0;
759 		if (!frag_id) {
760 			ipv6_select_ident(skb, fh);
761 			frag_id = fh->identification;
762 		} else
763 			fh->identification = frag_id;
764 
765 		/*
766 		 *	Copy a block of the IP datagram.
767 		 */
768 		if (skb_copy_bits(skb, ptr, frag->h.raw, len))
769 			BUG();
770 		left -= len;
771 
772 		fh->frag_off = htons(offset);
773 		if (left > 0)
774 			fh->frag_off |= htons(IP6_MF);
775 		frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
776 
777 		ptr += len;
778 		offset += len;
779 
780 		/*
781 		 *	Put this fragment into the sending queue.
782 		 */
783 		err = output(frag);
784 		if (err)
785 			goto fail;
786 
787 		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
788 	}
789 	IP6_INC_STATS(ip6_dst_idev(skb->dst),
790 		      IPSTATS_MIB_FRAGOKS);
791 	kfree_skb(skb);
792 	return err;
793 
794 fail:
795 	IP6_INC_STATS(ip6_dst_idev(skb->dst),
796 		      IPSTATS_MIB_FRAGFAILS);
797 	kfree_skb(skb);
798 	return err;
799 }
800 
801 static inline int ip6_rt_check(struct rt6key *rt_key,
802 			       struct in6_addr *fl_addr,
803 			       struct in6_addr *addr_cache)
804 {
805 	return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
806 		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
807 }
808 
809 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
810 					  struct dst_entry *dst,
811 					  struct flowi *fl)
812 {
813 	struct ipv6_pinfo *np = inet6_sk(sk);
814 	struct rt6_info *rt = (struct rt6_info *)dst;
815 
816 	if (!dst)
817 		goto out;
818 
819 	/* Yes, checking route validity in not connected
820 	 * case is not very simple. Take into account,
821 	 * that we do not support routing by source, TOS,
822 	 * and MSG_DONTROUTE 		--ANK (980726)
823 	 *
824 	 * 1. ip6_rt_check(): If route was host route,
825 	 *    check that cached destination is current.
826 	 *    If it is network route, we still may
827 	 *    check its validity using saved pointer
828 	 *    to the last used address: daddr_cache.
829 	 *    We do not want to save whole address now,
830 	 *    (because main consumer of this service
831 	 *    is tcp, which has not this problem),
832 	 *    so that the last trick works only on connected
833 	 *    sockets.
834 	 * 2. oif also should be the same.
835 	 */
836 	if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
837 #ifdef CONFIG_IPV6_SUBTREES
838 	    ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
839 #endif
840 	    (fl->oif && fl->oif != dst->dev->ifindex)) {
841 		dst_release(dst);
842 		dst = NULL;
843 	}
844 
845 out:
846 	return dst;
847 }
848 
849 static int ip6_dst_lookup_tail(struct sock *sk,
850 			       struct dst_entry **dst, struct flowi *fl)
851 {
852 	int err;
853 
854 	if (*dst == NULL)
855 		*dst = ip6_route_output(sk, fl);
856 
857 	if ((err = (*dst)->error))
858 		goto out_err_release;
859 
860 	if (ipv6_addr_any(&fl->fl6_src)) {
861 		err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
862 		if (err)
863 			goto out_err_release;
864 	}
865 
866 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
867 		/*
868 		 * Here if the dst entry we've looked up
869 		 * has a neighbour entry that is in the INCOMPLETE
870 		 * state and the src address from the flow is
871 		 * marked as OPTIMISTIC, we release the found
872 		 * dst entry and replace it instead with the
873 		 * dst entry of the nexthop router
874 		 */
875 		if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
876 			struct inet6_ifaddr *ifp;
877 			struct flowi fl_gw;
878 			int redirect;
879 
880 			ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
881 
882 			redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
883 			if (ifp)
884 				in6_ifa_put(ifp);
885 
886 			if (redirect) {
887 				/*
888 				 * We need to get the dst entry for the
889 				 * default router instead
890 				 */
891 				dst_release(*dst);
892 				memcpy(&fl_gw, fl, sizeof(struct flowi));
893 				memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
894 				*dst = ip6_route_output(sk, &fl_gw);
895 				if ((err = (*dst)->error))
896 					goto out_err_release;
897 			}
898 		}
899 #endif
900 
901 	return 0;
902 
903 out_err_release:
904 	dst_release(*dst);
905 	*dst = NULL;
906 	return err;
907 }
908 
909 /**
910  *	ip6_dst_lookup - perform route lookup on flow
911  *	@sk: socket which provides route info
912  *	@dst: pointer to dst_entry * for result
913  *	@fl: flow to lookup
914  *
915  *	This function performs a route lookup on the given flow.
916  *
917  *	It returns zero on success, or a standard errno code on error.
918  */
919 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
920 {
921 	*dst = NULL;
922 	return ip6_dst_lookup_tail(sk, dst, fl);
923 }
924 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
925 
926 /**
927  *	ip6_sk_dst_lookup - perform socket cached route lookup on flow
928  *	@sk: socket which provides the dst cache and route info
929  *	@dst: pointer to dst_entry * for result
930  *	@fl: flow to lookup
931  *
932  *	This function performs a route lookup on the given flow with the
933  *	possibility of using the cached route in the socket if it is valid.
934  *	It will take the socket dst lock when operating on the dst cache.
935  *	As a result, this function can only be used in process context.
936  *
937  *	It returns zero on success, or a standard errno code on error.
938  */
939 int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
940 {
941 	*dst = NULL;
942 	if (sk) {
943 		*dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
944 		*dst = ip6_sk_dst_check(sk, *dst, fl);
945 	}
946 
947 	return ip6_dst_lookup_tail(sk, dst, fl);
948 }
949 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
950 
951 static inline int ip6_ufo_append_data(struct sock *sk,
952 			int getfrag(void *from, char *to, int offset, int len,
953 			int odd, struct sk_buff *skb),
954 			void *from, int length, int hh_len, int fragheaderlen,
955 			int transhdrlen, int mtu,unsigned int flags)
956 
957 {
958 	struct sk_buff *skb;
959 	int err;
960 
961 	/* There is support for UDP large send offload by network
962 	 * device, so create one single skb packet containing complete
963 	 * udp datagram
964 	 */
965 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
966 		skb = sock_alloc_send_skb(sk,
967 			hh_len + fragheaderlen + transhdrlen + 20,
968 			(flags & MSG_DONTWAIT), &err);
969 		if (skb == NULL)
970 			return -ENOMEM;
971 
972 		/* reserve space for Hardware header */
973 		skb_reserve(skb, hh_len);
974 
975 		/* create space for UDP/IP header */
976 		skb_put(skb,fragheaderlen + transhdrlen);
977 
978 		/* initialize network header pointer */
979 		skb->nh.raw = skb->data;
980 
981 		/* initialize protocol header pointer */
982 		skb->h.raw = skb->data + fragheaderlen;
983 
984 		skb->ip_summed = CHECKSUM_PARTIAL;
985 		skb->csum = 0;
986 		sk->sk_sndmsg_off = 0;
987 	}
988 
989 	err = skb_append_datato_frags(sk,skb, getfrag, from,
990 				      (length - transhdrlen));
991 	if (!err) {
992 		struct frag_hdr fhdr;
993 
994 		/* specify the length of each IP datagram fragment*/
995 		skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
996 					    sizeof(struct frag_hdr);
997 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
998 		ipv6_select_ident(skb, &fhdr);
999 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1000 		__skb_queue_tail(&sk->sk_write_queue, skb);
1001 
1002 		return 0;
1003 	}
1004 	/* There is not enough support do UPD LSO,
1005 	 * so follow normal path
1006 	 */
1007 	kfree_skb(skb);
1008 
1009 	return err;
1010 }
1011 
1012 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1013 	int offset, int len, int odd, struct sk_buff *skb),
1014 	void *from, int length, int transhdrlen,
1015 	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
1016 	struct rt6_info *rt, unsigned int flags)
1017 {
1018 	struct inet_sock *inet = inet_sk(sk);
1019 	struct ipv6_pinfo *np = inet6_sk(sk);
1020 	struct sk_buff *skb;
1021 	unsigned int maxfraglen, fragheaderlen;
1022 	int exthdrlen;
1023 	int hh_len;
1024 	int mtu;
1025 	int copy;
1026 	int err;
1027 	int offset = 0;
1028 	int csummode = CHECKSUM_NONE;
1029 
1030 	if (flags&MSG_PROBE)
1031 		return 0;
1032 	if (skb_queue_empty(&sk->sk_write_queue)) {
1033 		/*
1034 		 * setup for corking
1035 		 */
1036 		if (opt) {
1037 			if (np->cork.opt == NULL) {
1038 				np->cork.opt = kmalloc(opt->tot_len,
1039 						       sk->sk_allocation);
1040 				if (unlikely(np->cork.opt == NULL))
1041 					return -ENOBUFS;
1042 			} else if (np->cork.opt->tot_len < opt->tot_len) {
1043 				printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
1044 				return -EINVAL;
1045 			}
1046 			memcpy(np->cork.opt, opt, opt->tot_len);
1047 			inet->cork.flags |= IPCORK_OPT;
1048 			/* need source address above miyazawa*/
1049 		}
1050 		dst_hold(&rt->u.dst);
1051 		np->cork.rt = rt;
1052 		inet->cork.fl = *fl;
1053 		np->cork.hop_limit = hlimit;
1054 		np->cork.tclass = tclass;
1055 		mtu = dst_mtu(rt->u.dst.path);
1056 		if (np->frag_size < mtu) {
1057 			if (np->frag_size)
1058 				mtu = np->frag_size;
1059 		}
1060 		inet->cork.fragsize = mtu;
1061 		if (dst_allfrag(rt->u.dst.path))
1062 			inet->cork.flags |= IPCORK_ALLFRAG;
1063 		inet->cork.length = 0;
1064 		sk->sk_sndmsg_page = NULL;
1065 		sk->sk_sndmsg_off = 0;
1066 		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
1067 		length += exthdrlen;
1068 		transhdrlen += exthdrlen;
1069 	} else {
1070 		rt = np->cork.rt;
1071 		fl = &inet->cork.fl;
1072 		if (inet->cork.flags & IPCORK_OPT)
1073 			opt = np->cork.opt;
1074 		transhdrlen = 0;
1075 		exthdrlen = 0;
1076 		mtu = inet->cork.fragsize;
1077 	}
1078 
1079 	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1080 
1081 	fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
1082 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1083 
1084 	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1085 		if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1086 			ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
1087 			return -EMSGSIZE;
1088 		}
1089 	}
1090 
1091 	/*
1092 	 * Let's try using as much space as possible.
1093 	 * Use MTU if total length of the message fits into the MTU.
1094 	 * Otherwise, we need to reserve fragment header and
1095 	 * fragment alignment (= 8-15 octects, in total).
1096 	 *
1097 	 * Note that we may need to "move" the data from the tail of
1098 	 * of the buffer to the new fragment when we split
1099 	 * the message.
1100 	 *
1101 	 * FIXME: It may be fragmented into multiple chunks
1102 	 *        at once if non-fragmentable extension headers
1103 	 *        are too large.
1104 	 * --yoshfuji
1105 	 */
1106 
1107 	inet->cork.length += length;
1108 	if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
1109 	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
1110 
1111 		err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
1112 					  fragheaderlen, transhdrlen, mtu,
1113 					  flags);
1114 		if (err)
1115 			goto error;
1116 		return 0;
1117 	}
1118 
1119 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1120 		goto alloc_new_skb;
1121 
1122 	while (length > 0) {
1123 		/* Check if the remaining data fits into current packet. */
1124 		copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1125 		if (copy < length)
1126 			copy = maxfraglen - skb->len;
1127 
1128 		if (copy <= 0) {
1129 			char *data;
1130 			unsigned int datalen;
1131 			unsigned int fraglen;
1132 			unsigned int fraggap;
1133 			unsigned int alloclen;
1134 			struct sk_buff *skb_prev;
1135 alloc_new_skb:
1136 			skb_prev = skb;
1137 
1138 			/* There's no room in the current skb */
1139 			if (skb_prev)
1140 				fraggap = skb_prev->len - maxfraglen;
1141 			else
1142 				fraggap = 0;
1143 
1144 			/*
1145 			 * If remaining data exceeds the mtu,
1146 			 * we know we need more fragment(s).
1147 			 */
1148 			datalen = length + fraggap;
1149 			if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1150 				datalen = maxfraglen - fragheaderlen;
1151 
1152 			fraglen = datalen + fragheaderlen;
1153 			if ((flags & MSG_MORE) &&
1154 			    !(rt->u.dst.dev->features&NETIF_F_SG))
1155 				alloclen = mtu;
1156 			else
1157 				alloclen = datalen + fragheaderlen;
1158 
1159 			/*
1160 			 * The last fragment gets additional space at tail.
1161 			 * Note: we overallocate on fragments with MSG_MODE
1162 			 * because we have no idea if we're the last one.
1163 			 */
1164 			if (datalen == length + fraggap)
1165 				alloclen += rt->u.dst.trailer_len;
1166 
1167 			/*
1168 			 * We just reserve space for fragment header.
1169 			 * Note: this may be overallocation if the message
1170 			 * (without MSG_MORE) fits into the MTU.
1171 			 */
1172 			alloclen += sizeof(struct frag_hdr);
1173 
1174 			if (transhdrlen) {
1175 				skb = sock_alloc_send_skb(sk,
1176 						alloclen + hh_len,
1177 						(flags & MSG_DONTWAIT), &err);
1178 			} else {
1179 				skb = NULL;
1180 				if (atomic_read(&sk->sk_wmem_alloc) <=
1181 				    2 * sk->sk_sndbuf)
1182 					skb = sock_wmalloc(sk,
1183 							   alloclen + hh_len, 1,
1184 							   sk->sk_allocation);
1185 				if (unlikely(skb == NULL))
1186 					err = -ENOBUFS;
1187 			}
1188 			if (skb == NULL)
1189 				goto error;
1190 			/*
1191 			 *	Fill in the control structures
1192 			 */
1193 			skb->ip_summed = csummode;
1194 			skb->csum = 0;
1195 			/* reserve for fragmentation */
1196 			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1197 
1198 			/*
1199 			 *	Find where to start putting bytes
1200 			 */
1201 			data = skb_put(skb, fraglen);
1202 			skb->nh.raw = data + exthdrlen;
1203 			data += fragheaderlen;
1204 			skb->h.raw = data + exthdrlen;
1205 
1206 			if (fraggap) {
1207 				skb->csum = skb_copy_and_csum_bits(
1208 					skb_prev, maxfraglen,
1209 					data + transhdrlen, fraggap, 0);
1210 				skb_prev->csum = csum_sub(skb_prev->csum,
1211 							  skb->csum);
1212 				data += fraggap;
1213 				pskb_trim_unique(skb_prev, maxfraglen);
1214 			}
1215 			copy = datalen - transhdrlen - fraggap;
1216 			if (copy < 0) {
1217 				err = -EINVAL;
1218 				kfree_skb(skb);
1219 				goto error;
1220 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1221 				err = -EFAULT;
1222 				kfree_skb(skb);
1223 				goto error;
1224 			}
1225 
1226 			offset += copy;
1227 			length -= datalen - fraggap;
1228 			transhdrlen = 0;
1229 			exthdrlen = 0;
1230 			csummode = CHECKSUM_NONE;
1231 
1232 			/*
1233 			 * Put the packet on the pending queue
1234 			 */
1235 			__skb_queue_tail(&sk->sk_write_queue, skb);
1236 			continue;
1237 		}
1238 
1239 		if (copy > length)
1240 			copy = length;
1241 
1242 		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1243 			unsigned int off;
1244 
1245 			off = skb->len;
1246 			if (getfrag(from, skb_put(skb, copy),
1247 						offset, copy, off, skb) < 0) {
1248 				__skb_trim(skb, off);
1249 				err = -EFAULT;
1250 				goto error;
1251 			}
1252 		} else {
1253 			int i = skb_shinfo(skb)->nr_frags;
1254 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1255 			struct page *page = sk->sk_sndmsg_page;
1256 			int off = sk->sk_sndmsg_off;
1257 			unsigned int left;
1258 
1259 			if (page && (left = PAGE_SIZE - off) > 0) {
1260 				if (copy >= left)
1261 					copy = left;
1262 				if (page != frag->page) {
1263 					if (i == MAX_SKB_FRAGS) {
1264 						err = -EMSGSIZE;
1265 						goto error;
1266 					}
1267 					get_page(page);
1268 					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1269 					frag = &skb_shinfo(skb)->frags[i];
1270 				}
1271 			} else if(i < MAX_SKB_FRAGS) {
1272 				if (copy > PAGE_SIZE)
1273 					copy = PAGE_SIZE;
1274 				page = alloc_pages(sk->sk_allocation, 0);
1275 				if (page == NULL) {
1276 					err = -ENOMEM;
1277 					goto error;
1278 				}
1279 				sk->sk_sndmsg_page = page;
1280 				sk->sk_sndmsg_off = 0;
1281 
1282 				skb_fill_page_desc(skb, i, page, 0, 0);
1283 				frag = &skb_shinfo(skb)->frags[i];
1284 				skb->truesize += PAGE_SIZE;
1285 				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1286 			} else {
1287 				err = -EMSGSIZE;
1288 				goto error;
1289 			}
1290 			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1291 				err = -EFAULT;
1292 				goto error;
1293 			}
1294 			sk->sk_sndmsg_off += copy;
1295 			frag->size += copy;
1296 			skb->len += copy;
1297 			skb->data_len += copy;
1298 		}
1299 		offset += copy;
1300 		length -= copy;
1301 	}
1302 	return 0;
1303 error:
1304 	inet->cork.length -= length;
1305 	IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1306 	return err;
1307 }
1308 
1309 int ip6_push_pending_frames(struct sock *sk)
1310 {
1311 	struct sk_buff *skb, *tmp_skb;
1312 	struct sk_buff **tail_skb;
1313 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1314 	struct inet_sock *inet = inet_sk(sk);
1315 	struct ipv6_pinfo *np = inet6_sk(sk);
1316 	struct ipv6hdr *hdr;
1317 	struct ipv6_txoptions *opt = np->cork.opt;
1318 	struct rt6_info *rt = np->cork.rt;
1319 	struct flowi *fl = &inet->cork.fl;
1320 	unsigned char proto = fl->proto;
1321 	int err = 0;
1322 
1323 	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1324 		goto out;
1325 	tail_skb = &(skb_shinfo(skb)->frag_list);
1326 
1327 	/* move skb->data to ip header from ext header */
1328 	if (skb->data < skb->nh.raw)
1329 		__skb_pull(skb, skb->nh.raw - skb->data);
1330 	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1331 		__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1332 		*tail_skb = tmp_skb;
1333 		tail_skb = &(tmp_skb->next);
1334 		skb->len += tmp_skb->len;
1335 		skb->data_len += tmp_skb->len;
1336 		skb->truesize += tmp_skb->truesize;
1337 		__sock_put(tmp_skb->sk);
1338 		tmp_skb->destructor = NULL;
1339 		tmp_skb->sk = NULL;
1340 	}
1341 
1342 	ipv6_addr_copy(final_dst, &fl->fl6_dst);
1343 	__skb_pull(skb, skb->h.raw - skb->nh.raw);
1344 	if (opt && opt->opt_flen)
1345 		ipv6_push_frag_opts(skb, opt, &proto);
1346 	if (opt && opt->opt_nflen)
1347 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1348 
1349 	skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1350 
1351 	*(__be32*)hdr = fl->fl6_flowlabel |
1352 		     htonl(0x60000000 | ((int)np->cork.tclass << 20));
1353 
1354 	if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1355 		hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1356 	else
1357 		hdr->payload_len = 0;
1358 	hdr->hop_limit = np->cork.hop_limit;
1359 	hdr->nexthdr = proto;
1360 	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1361 	ipv6_addr_copy(&hdr->daddr, final_dst);
1362 
1363 	skb->priority = sk->sk_priority;
1364 
1365 	skb->dst = dst_clone(&rt->u.dst);
1366 	IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
1367 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1368 	if (err) {
1369 		if (err > 0)
1370 			err = np->recverr ? net_xmit_errno(err) : 0;
1371 		if (err)
1372 			goto error;
1373 	}
1374 
1375 out:
1376 	inet->cork.flags &= ~IPCORK_OPT;
1377 	kfree(np->cork.opt);
1378 	np->cork.opt = NULL;
1379 	if (np->cork.rt) {
1380 		dst_release(&np->cork.rt->u.dst);
1381 		np->cork.rt = NULL;
1382 		inet->cork.flags &= ~IPCORK_ALLFRAG;
1383 	}
1384 	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1385 	return err;
1386 error:
1387 	goto out;
1388 }
1389 
1390 void ip6_flush_pending_frames(struct sock *sk)
1391 {
1392 	struct inet_sock *inet = inet_sk(sk);
1393 	struct ipv6_pinfo *np = inet6_sk(sk);
1394 	struct sk_buff *skb;
1395 
1396 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1397 		IP6_INC_STATS(ip6_dst_idev(skb->dst),
1398 			      IPSTATS_MIB_OUTDISCARDS);
1399 		kfree_skb(skb);
1400 	}
1401 
1402 	inet->cork.flags &= ~IPCORK_OPT;
1403 
1404 	kfree(np->cork.opt);
1405 	np->cork.opt = NULL;
1406 	if (np->cork.rt) {
1407 		dst_release(&np->cork.rt->u.dst);
1408 		np->cork.rt = NULL;
1409 		inet->cork.flags &= ~IPCORK_ALLFRAG;
1410 	}
1411 	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1412 }
1413