xref: /openbmc/linux/net/ipv6/ip6_output.c (revision 206a81c1)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	: 	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44 
45 #include <net/sock.h>
46 #include <net/snmp.h>
47 
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 
59 static int ip6_finish_output2(struct sk_buff *skb)
60 {
61 	struct dst_entry *dst = skb_dst(skb);
62 	struct net_device *dev = dst->dev;
63 	struct neighbour *neigh;
64 	struct in6_addr *nexthop;
65 	int ret;
66 
67 	skb->protocol = htons(ETH_P_IPV6);
68 	skb->dev = dev;
69 
70 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72 
73 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74 		    ((mroute6_socket(dev_net(dev), skb) &&
75 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 					 &ipv6_hdr(skb)->saddr))) {
78 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79 
80 			/* Do not check for IFF_ALLMULTI; multicast routing
81 			   is not supported in any case.
82 			 */
83 			if (newskb)
84 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 					newskb, NULL, newskb->dev,
86 					dev_loopback_xmit);
87 
88 			if (ipv6_hdr(skb)->hop_limit == 0) {
89 				IP6_INC_STATS(dev_net(dev), idev,
90 					      IPSTATS_MIB_OUTDISCARDS);
91 				kfree_skb(skb);
92 				return 0;
93 			}
94 		}
95 
96 		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
97 				skb->len);
98 
99 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 		    IPV6_ADDR_SCOPE_NODELOCAL &&
101 		    !(dev->flags & IFF_LOOPBACK)) {
102 			kfree_skb(skb);
103 			return 0;
104 		}
105 	}
106 
107 	rcu_read_lock_bh();
108 	nexthop = rt6_nexthop((struct rt6_info *)dst);
109 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 	if (unlikely(!neigh))
111 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 	if (!IS_ERR(neigh)) {
113 		ret = dst_neigh_output(dst, neigh, skb);
114 		rcu_read_unlock_bh();
115 		return ret;
116 	}
117 	rcu_read_unlock_bh();
118 
119 	IP6_INC_STATS(dev_net(dst->dev),
120 		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 	kfree_skb(skb);
122 	return -EINVAL;
123 }
124 
125 static int ip6_finish_output(struct sk_buff *skb)
126 {
127 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 	    dst_allfrag(skb_dst(skb)) ||
129 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 		return ip6_fragment(skb, ip6_finish_output2);
131 	else
132 		return ip6_finish_output2(skb);
133 }
134 
135 int ip6_output(struct sock *sk, struct sk_buff *skb)
136 {
137 	struct net_device *dev = skb_dst(skb)->dev;
138 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 	if (unlikely(idev->cnf.disable_ipv6)) {
140 		IP6_INC_STATS(dev_net(dev), idev,
141 			      IPSTATS_MIB_OUTDISCARDS);
142 		kfree_skb(skb);
143 		return 0;
144 	}
145 
146 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
147 			    ip6_finish_output,
148 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150 
151 /*
152  *	xmit an sk_buff (used by TCP, SCTP and DCCP)
153  */
154 
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156 	     struct ipv6_txoptions *opt, int tclass)
157 {
158 	struct net *net = sock_net(sk);
159 	struct ipv6_pinfo *np = inet6_sk(sk);
160 	struct in6_addr *first_hop = &fl6->daddr;
161 	struct dst_entry *dst = skb_dst(skb);
162 	struct ipv6hdr *hdr;
163 	u8  proto = fl6->flowi6_proto;
164 	int seg_len = skb->len;
165 	int hlimit = -1;
166 	u32 mtu;
167 
168 	if (opt) {
169 		unsigned int head_room;
170 
171 		/* First: exthdrs may take lots of space (~8K for now)
172 		   MAX_HEADER is not enough.
173 		 */
174 		head_room = opt->opt_nflen + opt->opt_flen;
175 		seg_len += head_room;
176 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
177 
178 		if (skb_headroom(skb) < head_room) {
179 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
180 			if (skb2 == NULL) {
181 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182 					      IPSTATS_MIB_OUTDISCARDS);
183 				kfree_skb(skb);
184 				return -ENOBUFS;
185 			}
186 			consume_skb(skb);
187 			skb = skb2;
188 			skb_set_owner_w(skb, sk);
189 		}
190 		if (opt->opt_flen)
191 			ipv6_push_frag_opts(skb, opt, &proto);
192 		if (opt->opt_nflen)
193 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
194 	}
195 
196 	skb_push(skb, sizeof(struct ipv6hdr));
197 	skb_reset_network_header(skb);
198 	hdr = ipv6_hdr(skb);
199 
200 	/*
201 	 *	Fill in the IPv6 header
202 	 */
203 	if (np)
204 		hlimit = np->hop_limit;
205 	if (hlimit < 0)
206 		hlimit = ip6_dst_hoplimit(dst);
207 
208 	ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
209 
210 	hdr->payload_len = htons(seg_len);
211 	hdr->nexthdr = proto;
212 	hdr->hop_limit = hlimit;
213 
214 	hdr->saddr = fl6->saddr;
215 	hdr->daddr = *first_hop;
216 
217 	skb->protocol = htons(ETH_P_IPV6);
218 	skb->priority = sk->sk_priority;
219 	skb->mark = sk->sk_mark;
220 
221 	mtu = dst_mtu(dst);
222 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
223 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
224 			      IPSTATS_MIB_OUT, skb->len);
225 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
226 			       dst->dev, dst_output);
227 	}
228 
229 	skb->dev = dst->dev;
230 	ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
231 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
232 	kfree_skb(skb);
233 	return -EMSGSIZE;
234 }
235 
236 EXPORT_SYMBOL(ip6_xmit);
237 
238 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
239 {
240 	struct ip6_ra_chain *ra;
241 	struct sock *last = NULL;
242 
243 	read_lock(&ip6_ra_lock);
244 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
245 		struct sock *sk = ra->sk;
246 		if (sk && ra->sel == sel &&
247 		    (!sk->sk_bound_dev_if ||
248 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
249 			if (last) {
250 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
251 				if (skb2)
252 					rawv6_rcv(last, skb2);
253 			}
254 			last = sk;
255 		}
256 	}
257 
258 	if (last) {
259 		rawv6_rcv(last, skb);
260 		read_unlock(&ip6_ra_lock);
261 		return 1;
262 	}
263 	read_unlock(&ip6_ra_lock);
264 	return 0;
265 }
266 
267 static int ip6_forward_proxy_check(struct sk_buff *skb)
268 {
269 	struct ipv6hdr *hdr = ipv6_hdr(skb);
270 	u8 nexthdr = hdr->nexthdr;
271 	__be16 frag_off;
272 	int offset;
273 
274 	if (ipv6_ext_hdr(nexthdr)) {
275 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
276 		if (offset < 0)
277 			return 0;
278 	} else
279 		offset = sizeof(struct ipv6hdr);
280 
281 	if (nexthdr == IPPROTO_ICMPV6) {
282 		struct icmp6hdr *icmp6;
283 
284 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
285 					 offset + 1 - skb->data)))
286 			return 0;
287 
288 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
289 
290 		switch (icmp6->icmp6_type) {
291 		case NDISC_ROUTER_SOLICITATION:
292 		case NDISC_ROUTER_ADVERTISEMENT:
293 		case NDISC_NEIGHBOUR_SOLICITATION:
294 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
295 		case NDISC_REDIRECT:
296 			/* For reaction involving unicast neighbor discovery
297 			 * message destined to the proxied address, pass it to
298 			 * input function.
299 			 */
300 			return 1;
301 		default:
302 			break;
303 		}
304 	}
305 
306 	/*
307 	 * The proxying router can't forward traffic sent to a link-local
308 	 * address, so signal the sender and discard the packet. This
309 	 * behavior is clarified by the MIPv6 specification.
310 	 */
311 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
312 		dst_link_failure(skb);
313 		return -1;
314 	}
315 
316 	return 0;
317 }
318 
319 static inline int ip6_forward_finish(struct sk_buff *skb)
320 {
321 	return dst_output(skb);
322 }
323 
324 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
325 {
326 	unsigned int mtu;
327 	struct inet6_dev *idev;
328 
329 	if (dst_metric_locked(dst, RTAX_MTU)) {
330 		mtu = dst_metric_raw(dst, RTAX_MTU);
331 		if (mtu)
332 			return mtu;
333 	}
334 
335 	mtu = IPV6_MIN_MTU;
336 	rcu_read_lock();
337 	idev = __in6_dev_get(dst->dev);
338 	if (idev)
339 		mtu = idev->cnf.mtu6;
340 	rcu_read_unlock();
341 
342 	return mtu;
343 }
344 
345 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346 {
347 	if (skb->len <= mtu)
348 		return false;
349 
350 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
351 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
352 		return true;
353 
354 	if (skb->ignore_df)
355 		return false;
356 
357 	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
358 		return false;
359 
360 	return true;
361 }
362 
363 int ip6_forward(struct sk_buff *skb)
364 {
365 	struct dst_entry *dst = skb_dst(skb);
366 	struct ipv6hdr *hdr = ipv6_hdr(skb);
367 	struct inet6_skb_parm *opt = IP6CB(skb);
368 	struct net *net = dev_net(dst->dev);
369 	u32 mtu;
370 
371 	if (net->ipv6.devconf_all->forwarding == 0)
372 		goto error;
373 
374 	if (skb->pkt_type != PACKET_HOST)
375 		goto drop;
376 
377 	if (skb_warn_if_lro(skb))
378 		goto drop;
379 
380 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
381 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
382 				 IPSTATS_MIB_INDISCARDS);
383 		goto drop;
384 	}
385 
386 	skb_forward_csum(skb);
387 
388 	/*
389 	 *	We DO NOT make any processing on
390 	 *	RA packets, pushing them to user level AS IS
391 	 *	without ane WARRANTY that application will be able
392 	 *	to interpret them. The reason is that we
393 	 *	cannot make anything clever here.
394 	 *
395 	 *	We are not end-node, so that if packet contains
396 	 *	AH/ESP, we cannot make anything.
397 	 *	Defragmentation also would be mistake, RA packets
398 	 *	cannot be fragmented, because there is no warranty
399 	 *	that different fragments will go along one path. --ANK
400 	 */
401 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
402 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
403 			return 0;
404 	}
405 
406 	/*
407 	 *	check and decrement ttl
408 	 */
409 	if (hdr->hop_limit <= 1) {
410 		/* Force OUTPUT device used as source address */
411 		skb->dev = dst->dev;
412 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
413 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
414 				 IPSTATS_MIB_INHDRERRORS);
415 
416 		kfree_skb(skb);
417 		return -ETIMEDOUT;
418 	}
419 
420 	/* XXX: idev->cnf.proxy_ndp? */
421 	if (net->ipv6.devconf_all->proxy_ndp &&
422 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
423 		int proxied = ip6_forward_proxy_check(skb);
424 		if (proxied > 0)
425 			return ip6_input(skb);
426 		else if (proxied < 0) {
427 			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
428 					 IPSTATS_MIB_INDISCARDS);
429 			goto drop;
430 		}
431 	}
432 
433 	if (!xfrm6_route_forward(skb)) {
434 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
435 				 IPSTATS_MIB_INDISCARDS);
436 		goto drop;
437 	}
438 	dst = skb_dst(skb);
439 
440 	/* IPv6 specs say nothing about it, but it is clear that we cannot
441 	   send redirects to source routed frames.
442 	   We don't send redirects to frames decapsulated from IPsec.
443 	 */
444 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
445 		struct in6_addr *target = NULL;
446 		struct inet_peer *peer;
447 		struct rt6_info *rt;
448 
449 		/*
450 		 *	incoming and outgoing devices are the same
451 		 *	send a redirect.
452 		 */
453 
454 		rt = (struct rt6_info *) dst;
455 		if (rt->rt6i_flags & RTF_GATEWAY)
456 			target = &rt->rt6i_gateway;
457 		else
458 			target = &hdr->daddr;
459 
460 		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
461 
462 		/* Limit redirects both by destination (here)
463 		   and by source (inside ndisc_send_redirect)
464 		 */
465 		if (inet_peer_xrlim_allow(peer, 1*HZ))
466 			ndisc_send_redirect(skb, target);
467 		if (peer)
468 			inet_putpeer(peer);
469 	} else {
470 		int addrtype = ipv6_addr_type(&hdr->saddr);
471 
472 		/* This check is security critical. */
473 		if (addrtype == IPV6_ADDR_ANY ||
474 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
475 			goto error;
476 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
477 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
478 				    ICMPV6_NOT_NEIGHBOUR, 0);
479 			goto error;
480 		}
481 	}
482 
483 	mtu = ip6_dst_mtu_forward(dst);
484 	if (mtu < IPV6_MIN_MTU)
485 		mtu = IPV6_MIN_MTU;
486 
487 	if (ip6_pkt_too_big(skb, mtu)) {
488 		/* Again, force OUTPUT device used as source address */
489 		skb->dev = dst->dev;
490 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
491 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
492 				 IPSTATS_MIB_INTOOBIGERRORS);
493 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
494 				 IPSTATS_MIB_FRAGFAILS);
495 		kfree_skb(skb);
496 		return -EMSGSIZE;
497 	}
498 
499 	if (skb_cow(skb, dst->dev->hard_header_len)) {
500 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
501 				 IPSTATS_MIB_OUTDISCARDS);
502 		goto drop;
503 	}
504 
505 	hdr = ipv6_hdr(skb);
506 
507 	/* Mangling hops number delayed to point after skb COW */
508 
509 	hdr->hop_limit--;
510 
511 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
512 	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
513 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
514 		       ip6_forward_finish);
515 
516 error:
517 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
518 drop:
519 	kfree_skb(skb);
520 	return -EINVAL;
521 }
522 
523 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
524 {
525 	to->pkt_type = from->pkt_type;
526 	to->priority = from->priority;
527 	to->protocol = from->protocol;
528 	skb_dst_drop(to);
529 	skb_dst_set(to, dst_clone(skb_dst(from)));
530 	to->dev = from->dev;
531 	to->mark = from->mark;
532 
533 #ifdef CONFIG_NET_SCHED
534 	to->tc_index = from->tc_index;
535 #endif
536 	nf_copy(to, from);
537 	skb_copy_secmark(to, from);
538 }
539 
540 static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541 {
542 	static u32 ip6_idents_hashrnd __read_mostly;
543 	u32 hash, id;
544 
545 	net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546 
547 	hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 	id = ip_idents_reserve(hash, 1);
549 	fhdr->identification = htonl(id);
550 }
551 
552 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
553 {
554 	struct sk_buff *frag;
555 	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
556 	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
557 	struct ipv6hdr *tmp_hdr;
558 	struct frag_hdr *fh;
559 	unsigned int mtu, hlen, left, len;
560 	int hroom, troom;
561 	__be32 frag_id = 0;
562 	int ptr, offset = 0, err=0;
563 	u8 *prevhdr, nexthdr = 0;
564 	struct net *net = dev_net(skb_dst(skb)->dev);
565 
566 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
567 	nexthdr = *prevhdr;
568 
569 	mtu = ip6_skb_dst_mtu(skb);
570 
571 	/* We must not fragment if the socket is set to force MTU discovery
572 	 * or if the skb it not generated by a local socket.
573 	 */
574 	if (unlikely(!skb->ignore_df && skb->len > mtu) ||
575 		     (IP6CB(skb)->frag_max_size &&
576 		      IP6CB(skb)->frag_max_size > mtu)) {
577 		if (skb->sk && dst_allfrag(skb_dst(skb)))
578 			sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
579 
580 		skb->dev = skb_dst(skb)->dev;
581 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
582 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
583 			      IPSTATS_MIB_FRAGFAILS);
584 		kfree_skb(skb);
585 		return -EMSGSIZE;
586 	}
587 
588 	if (np && np->frag_size < mtu) {
589 		if (np->frag_size)
590 			mtu = np->frag_size;
591 	}
592 	mtu -= hlen + sizeof(struct frag_hdr);
593 
594 	if (skb_has_frag_list(skb)) {
595 		int first_len = skb_pagelen(skb);
596 		struct sk_buff *frag2;
597 
598 		if (first_len - hlen > mtu ||
599 		    ((first_len - hlen) & 7) ||
600 		    skb_cloned(skb))
601 			goto slow_path;
602 
603 		skb_walk_frags(skb, frag) {
604 			/* Correct geometry. */
605 			if (frag->len > mtu ||
606 			    ((frag->len & 7) && frag->next) ||
607 			    skb_headroom(frag) < hlen)
608 				goto slow_path_clean;
609 
610 			/* Partially cloned skb? */
611 			if (skb_shared(frag))
612 				goto slow_path_clean;
613 
614 			BUG_ON(frag->sk);
615 			if (skb->sk) {
616 				frag->sk = skb->sk;
617 				frag->destructor = sock_wfree;
618 			}
619 			skb->truesize -= frag->truesize;
620 		}
621 
622 		err = 0;
623 		offset = 0;
624 		frag = skb_shinfo(skb)->frag_list;
625 		skb_frag_list_init(skb);
626 		/* BUILD HEADER */
627 
628 		*prevhdr = NEXTHDR_FRAGMENT;
629 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
630 		if (!tmp_hdr) {
631 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
632 				      IPSTATS_MIB_FRAGFAILS);
633 			return -ENOMEM;
634 		}
635 
636 		__skb_pull(skb, hlen);
637 		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
638 		__skb_push(skb, hlen);
639 		skb_reset_network_header(skb);
640 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
641 
642 		ipv6_select_ident(fh, rt);
643 		fh->nexthdr = nexthdr;
644 		fh->reserved = 0;
645 		fh->frag_off = htons(IP6_MF);
646 		frag_id = fh->identification;
647 
648 		first_len = skb_pagelen(skb);
649 		skb->data_len = first_len - skb_headlen(skb);
650 		skb->len = first_len;
651 		ipv6_hdr(skb)->payload_len = htons(first_len -
652 						   sizeof(struct ipv6hdr));
653 
654 		dst_hold(&rt->dst);
655 
656 		for (;;) {
657 			/* Prepare header of the next frame,
658 			 * before previous one went down. */
659 			if (frag) {
660 				frag->ip_summed = CHECKSUM_NONE;
661 				skb_reset_transport_header(frag);
662 				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
663 				__skb_push(frag, hlen);
664 				skb_reset_network_header(frag);
665 				memcpy(skb_network_header(frag), tmp_hdr,
666 				       hlen);
667 				offset += skb->len - hlen - sizeof(struct frag_hdr);
668 				fh->nexthdr = nexthdr;
669 				fh->reserved = 0;
670 				fh->frag_off = htons(offset);
671 				if (frag->next != NULL)
672 					fh->frag_off |= htons(IP6_MF);
673 				fh->identification = frag_id;
674 				ipv6_hdr(frag)->payload_len =
675 						htons(frag->len -
676 						      sizeof(struct ipv6hdr));
677 				ip6_copy_metadata(frag, skb);
678 			}
679 
680 			err = output(skb);
681 			if(!err)
682 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
683 					      IPSTATS_MIB_FRAGCREATES);
684 
685 			if (err || !frag)
686 				break;
687 
688 			skb = frag;
689 			frag = skb->next;
690 			skb->next = NULL;
691 		}
692 
693 		kfree(tmp_hdr);
694 
695 		if (err == 0) {
696 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
697 				      IPSTATS_MIB_FRAGOKS);
698 			ip6_rt_put(rt);
699 			return 0;
700 		}
701 
702 		while (frag) {
703 			skb = frag->next;
704 			kfree_skb(frag);
705 			frag = skb;
706 		}
707 
708 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
709 			      IPSTATS_MIB_FRAGFAILS);
710 		ip6_rt_put(rt);
711 		return err;
712 
713 slow_path_clean:
714 		skb_walk_frags(skb, frag2) {
715 			if (frag2 == frag)
716 				break;
717 			frag2->sk = NULL;
718 			frag2->destructor = NULL;
719 			skb->truesize += frag2->truesize;
720 		}
721 	}
722 
723 slow_path:
724 	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
725 	    skb_checksum_help(skb))
726 		goto fail;
727 
728 	left = skb->len - hlen;		/* Space per frame */
729 	ptr = hlen;			/* Where to start from */
730 
731 	/*
732 	 *	Fragment the datagram.
733 	 */
734 
735 	*prevhdr = NEXTHDR_FRAGMENT;
736 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
737 	troom = rt->dst.dev->needed_tailroom;
738 
739 	/*
740 	 *	Keep copying data until we run out.
741 	 */
742 	while(left > 0)	{
743 		len = left;
744 		/* IF: it doesn't fit, use 'mtu' - the data space left */
745 		if (len > mtu)
746 			len = mtu;
747 		/* IF: we are not sending up to and including the packet end
748 		   then align the next start on an eight byte boundary */
749 		if (len < left)	{
750 			len &= ~7;
751 		}
752 		/*
753 		 *	Allocate buffer.
754 		 */
755 
756 		if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
757 				      hroom + troom, GFP_ATOMIC)) == NULL) {
758 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
759 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
760 				      IPSTATS_MIB_FRAGFAILS);
761 			err = -ENOMEM;
762 			goto fail;
763 		}
764 
765 		/*
766 		 *	Set up data on packet
767 		 */
768 
769 		ip6_copy_metadata(frag, skb);
770 		skb_reserve(frag, hroom);
771 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
772 		skb_reset_network_header(frag);
773 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
774 		frag->transport_header = (frag->network_header + hlen +
775 					  sizeof(struct frag_hdr));
776 
777 		/*
778 		 *	Charge the memory for the fragment to any owner
779 		 *	it might possess
780 		 */
781 		if (skb->sk)
782 			skb_set_owner_w(frag, skb->sk);
783 
784 		/*
785 		 *	Copy the packet header into the new buffer.
786 		 */
787 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
788 
789 		/*
790 		 *	Build fragment header.
791 		 */
792 		fh->nexthdr = nexthdr;
793 		fh->reserved = 0;
794 		if (!frag_id) {
795 			ipv6_select_ident(fh, rt);
796 			frag_id = fh->identification;
797 		} else
798 			fh->identification = frag_id;
799 
800 		/*
801 		 *	Copy a block of the IP datagram.
802 		 */
803 		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
804 			BUG();
805 		left -= len;
806 
807 		fh->frag_off = htons(offset);
808 		if (left > 0)
809 			fh->frag_off |= htons(IP6_MF);
810 		ipv6_hdr(frag)->payload_len = htons(frag->len -
811 						    sizeof(struct ipv6hdr));
812 
813 		ptr += len;
814 		offset += len;
815 
816 		/*
817 		 *	Put this fragment into the sending queue.
818 		 */
819 		err = output(frag);
820 		if (err)
821 			goto fail;
822 
823 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
824 			      IPSTATS_MIB_FRAGCREATES);
825 	}
826 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
827 		      IPSTATS_MIB_FRAGOKS);
828 	consume_skb(skb);
829 	return err;
830 
831 fail:
832 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
833 		      IPSTATS_MIB_FRAGFAILS);
834 	kfree_skb(skb);
835 	return err;
836 }
837 
838 static inline int ip6_rt_check(const struct rt6key *rt_key,
839 			       const struct in6_addr *fl_addr,
840 			       const struct in6_addr *addr_cache)
841 {
842 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
843 		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
844 }
845 
846 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
847 					  struct dst_entry *dst,
848 					  const struct flowi6 *fl6)
849 {
850 	struct ipv6_pinfo *np = inet6_sk(sk);
851 	struct rt6_info *rt;
852 
853 	if (!dst)
854 		goto out;
855 
856 	if (dst->ops->family != AF_INET6) {
857 		dst_release(dst);
858 		return NULL;
859 	}
860 
861 	rt = (struct rt6_info *)dst;
862 	/* Yes, checking route validity in not connected
863 	 * case is not very simple. Take into account,
864 	 * that we do not support routing by source, TOS,
865 	 * and MSG_DONTROUTE 		--ANK (980726)
866 	 *
867 	 * 1. ip6_rt_check(): If route was host route,
868 	 *    check that cached destination is current.
869 	 *    If it is network route, we still may
870 	 *    check its validity using saved pointer
871 	 *    to the last used address: daddr_cache.
872 	 *    We do not want to save whole address now,
873 	 *    (because main consumer of this service
874 	 *    is tcp, which has not this problem),
875 	 *    so that the last trick works only on connected
876 	 *    sockets.
877 	 * 2. oif also should be the same.
878 	 */
879 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
880 #ifdef CONFIG_IPV6_SUBTREES
881 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
882 #endif
883 	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
884 		dst_release(dst);
885 		dst = NULL;
886 	}
887 
888 out:
889 	return dst;
890 }
891 
892 static int ip6_dst_lookup_tail(struct sock *sk,
893 			       struct dst_entry **dst, struct flowi6 *fl6)
894 {
895 	struct net *net = sock_net(sk);
896 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
897 	struct neighbour *n;
898 	struct rt6_info *rt;
899 #endif
900 	int err;
901 
902 	if (*dst == NULL)
903 		*dst = ip6_route_output(net, sk, fl6);
904 
905 	if ((err = (*dst)->error))
906 		goto out_err_release;
907 
908 	if (ipv6_addr_any(&fl6->saddr)) {
909 		struct rt6_info *rt = (struct rt6_info *) *dst;
910 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
911 					  sk ? inet6_sk(sk)->srcprefs : 0,
912 					  &fl6->saddr);
913 		if (err)
914 			goto out_err_release;
915 	}
916 
917 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
918 	/*
919 	 * Here if the dst entry we've looked up
920 	 * has a neighbour entry that is in the INCOMPLETE
921 	 * state and the src address from the flow is
922 	 * marked as OPTIMISTIC, we release the found
923 	 * dst entry and replace it instead with the
924 	 * dst entry of the nexthop router
925 	 */
926 	rt = (struct rt6_info *) *dst;
927 	rcu_read_lock_bh();
928 	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
929 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
930 	rcu_read_unlock_bh();
931 
932 	if (err) {
933 		struct inet6_ifaddr *ifp;
934 		struct flowi6 fl_gw6;
935 		int redirect;
936 
937 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
938 				      (*dst)->dev, 1);
939 
940 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
941 		if (ifp)
942 			in6_ifa_put(ifp);
943 
944 		if (redirect) {
945 			/*
946 			 * We need to get the dst entry for the
947 			 * default router instead
948 			 */
949 			dst_release(*dst);
950 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
951 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
952 			*dst = ip6_route_output(net, sk, &fl_gw6);
953 			if ((err = (*dst)->error))
954 				goto out_err_release;
955 		}
956 	}
957 #endif
958 
959 	return 0;
960 
961 out_err_release:
962 	if (err == -ENETUNREACH)
963 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
964 	dst_release(*dst);
965 	*dst = NULL;
966 	return err;
967 }
968 
969 /**
970  *	ip6_dst_lookup - perform route lookup on flow
971  *	@sk: socket which provides route info
972  *	@dst: pointer to dst_entry * for result
973  *	@fl6: flow to lookup
974  *
975  *	This function performs a route lookup on the given flow.
976  *
977  *	It returns zero on success, or a standard errno code on error.
978  */
979 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
980 {
981 	*dst = NULL;
982 	return ip6_dst_lookup_tail(sk, dst, fl6);
983 }
984 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
985 
986 /**
987  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
988  *	@sk: socket which provides route info
989  *	@fl6: flow to lookup
990  *	@final_dst: final destination address for ipsec lookup
991  *
992  *	This function performs a route lookup on the given flow.
993  *
994  *	It returns a valid dst pointer on success, or a pointer encoded
995  *	error code.
996  */
997 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
998 				      const struct in6_addr *final_dst)
999 {
1000 	struct dst_entry *dst = NULL;
1001 	int err;
1002 
1003 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1004 	if (err)
1005 		return ERR_PTR(err);
1006 	if (final_dst)
1007 		fl6->daddr = *final_dst;
1008 
1009 	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1010 }
1011 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1012 
1013 /**
1014  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1015  *	@sk: socket which provides the dst cache and route info
1016  *	@fl6: flow to lookup
1017  *	@final_dst: final destination address for ipsec lookup
1018  *
1019  *	This function performs a route lookup on the given flow with the
1020  *	possibility of using the cached route in the socket if it is valid.
1021  *	It will take the socket dst lock when operating on the dst cache.
1022  *	As a result, this function can only be used in process context.
1023  *
1024  *	It returns a valid dst pointer on success, or a pointer encoded
1025  *	error code.
1026  */
1027 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1028 					 const struct in6_addr *final_dst)
1029 {
1030 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1031 	int err;
1032 
1033 	dst = ip6_sk_dst_check(sk, dst, fl6);
1034 
1035 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1036 	if (err)
1037 		return ERR_PTR(err);
1038 	if (final_dst)
1039 		fl6->daddr = *final_dst;
1040 
1041 	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1042 }
1043 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1044 
1045 static inline int ip6_ufo_append_data(struct sock *sk,
1046 			int getfrag(void *from, char *to, int offset, int len,
1047 			int odd, struct sk_buff *skb),
1048 			void *from, int length, int hh_len, int fragheaderlen,
1049 			int transhdrlen, int mtu,unsigned int flags,
1050 			struct rt6_info *rt)
1051 
1052 {
1053 	struct sk_buff *skb;
1054 	struct frag_hdr fhdr;
1055 	int err;
1056 
1057 	/* There is support for UDP large send offload by network
1058 	 * device, so create one single skb packet containing complete
1059 	 * udp datagram
1060 	 */
1061 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1062 		skb = sock_alloc_send_skb(sk,
1063 			hh_len + fragheaderlen + transhdrlen + 20,
1064 			(flags & MSG_DONTWAIT), &err);
1065 		if (skb == NULL)
1066 			return err;
1067 
1068 		/* reserve space for Hardware header */
1069 		skb_reserve(skb, hh_len);
1070 
1071 		/* create space for UDP/IP header */
1072 		skb_put(skb,fragheaderlen + transhdrlen);
1073 
1074 		/* initialize network header pointer */
1075 		skb_reset_network_header(skb);
1076 
1077 		/* initialize protocol header pointer */
1078 		skb->transport_header = skb->network_header + fragheaderlen;
1079 
1080 		skb->protocol = htons(ETH_P_IPV6);
1081 		skb->csum = 0;
1082 
1083 		__skb_queue_tail(&sk->sk_write_queue, skb);
1084 	} else if (skb_is_gso(skb)) {
1085 		goto append;
1086 	}
1087 
1088 	skb->ip_summed = CHECKSUM_PARTIAL;
1089 	/* Specify the length of each IPv6 datagram fragment.
1090 	 * It has to be a multiple of 8.
1091 	 */
1092 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1093 				     sizeof(struct frag_hdr)) & ~7;
1094 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1095 	ipv6_select_ident(&fhdr, rt);
1096 	skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1097 
1098 append:
1099 	return skb_append_datato_frags(sk, skb, getfrag, from,
1100 				       (length - transhdrlen));
1101 }
1102 
1103 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1104 					       gfp_t gfp)
1105 {
1106 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1107 }
1108 
1109 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1110 						gfp_t gfp)
1111 {
1112 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1113 }
1114 
1115 static void ip6_append_data_mtu(unsigned int *mtu,
1116 				int *maxfraglen,
1117 				unsigned int fragheaderlen,
1118 				struct sk_buff *skb,
1119 				struct rt6_info *rt,
1120 				unsigned int orig_mtu)
1121 {
1122 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1123 		if (skb == NULL) {
1124 			/* first fragment, reserve header_len */
1125 			*mtu = orig_mtu - rt->dst.header_len;
1126 
1127 		} else {
1128 			/*
1129 			 * this fragment is not first, the headers
1130 			 * space is regarded as data space.
1131 			 */
1132 			*mtu = orig_mtu;
1133 		}
1134 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1135 			      + fragheaderlen - sizeof(struct frag_hdr);
1136 	}
1137 }
1138 
1139 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1140 	int offset, int len, int odd, struct sk_buff *skb),
1141 	void *from, int length, int transhdrlen,
1142 	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1143 	struct rt6_info *rt, unsigned int flags, int dontfrag)
1144 {
1145 	struct inet_sock *inet = inet_sk(sk);
1146 	struct ipv6_pinfo *np = inet6_sk(sk);
1147 	struct inet_cork *cork;
1148 	struct sk_buff *skb, *skb_prev = NULL;
1149 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1150 	int exthdrlen;
1151 	int dst_exthdrlen;
1152 	int hh_len;
1153 	int copy;
1154 	int err;
1155 	int offset = 0;
1156 	__u8 tx_flags = 0;
1157 
1158 	if (flags&MSG_PROBE)
1159 		return 0;
1160 	cork = &inet->cork.base;
1161 	if (skb_queue_empty(&sk->sk_write_queue)) {
1162 		/*
1163 		 * setup for corking
1164 		 */
1165 		if (opt) {
1166 			if (WARN_ON(np->cork.opt))
1167 				return -EINVAL;
1168 
1169 			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1170 			if (unlikely(np->cork.opt == NULL))
1171 				return -ENOBUFS;
1172 
1173 			np->cork.opt->tot_len = opt->tot_len;
1174 			np->cork.opt->opt_flen = opt->opt_flen;
1175 			np->cork.opt->opt_nflen = opt->opt_nflen;
1176 
1177 			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1178 							    sk->sk_allocation);
1179 			if (opt->dst0opt && !np->cork.opt->dst0opt)
1180 				return -ENOBUFS;
1181 
1182 			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1183 							    sk->sk_allocation);
1184 			if (opt->dst1opt && !np->cork.opt->dst1opt)
1185 				return -ENOBUFS;
1186 
1187 			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1188 							   sk->sk_allocation);
1189 			if (opt->hopopt && !np->cork.opt->hopopt)
1190 				return -ENOBUFS;
1191 
1192 			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1193 							    sk->sk_allocation);
1194 			if (opt->srcrt && !np->cork.opt->srcrt)
1195 				return -ENOBUFS;
1196 
1197 			/* need source address above miyazawa*/
1198 		}
1199 		dst_hold(&rt->dst);
1200 		cork->dst = &rt->dst;
1201 		inet->cork.fl.u.ip6 = *fl6;
1202 		np->cork.hop_limit = hlimit;
1203 		np->cork.tclass = tclass;
1204 		if (rt->dst.flags & DST_XFRM_TUNNEL)
1205 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1206 			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1207 		else
1208 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1209 			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1210 		if (np->frag_size < mtu) {
1211 			if (np->frag_size)
1212 				mtu = np->frag_size;
1213 		}
1214 		cork->fragsize = mtu;
1215 		if (dst_allfrag(rt->dst.path))
1216 			cork->flags |= IPCORK_ALLFRAG;
1217 		cork->length = 0;
1218 		exthdrlen = (opt ? opt->opt_flen : 0);
1219 		length += exthdrlen;
1220 		transhdrlen += exthdrlen;
1221 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1222 	} else {
1223 		rt = (struct rt6_info *)cork->dst;
1224 		fl6 = &inet->cork.fl.u.ip6;
1225 		opt = np->cork.opt;
1226 		transhdrlen = 0;
1227 		exthdrlen = 0;
1228 		dst_exthdrlen = 0;
1229 		mtu = cork->fragsize;
1230 	}
1231 	orig_mtu = mtu;
1232 
1233 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1234 
1235 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1236 			(opt ? opt->opt_nflen : 0);
1237 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1238 		     sizeof(struct frag_hdr);
1239 
1240 	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1241 		unsigned int maxnonfragsize, headersize;
1242 
1243 		headersize = sizeof(struct ipv6hdr) +
1244 			     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1245 			     (dst_allfrag(&rt->dst) ?
1246 			      sizeof(struct frag_hdr) : 0) +
1247 			     rt->rt6i_nfheader_len;
1248 
1249 		if (ip6_sk_ignore_df(sk))
1250 			maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1251 		else
1252 			maxnonfragsize = mtu;
1253 
1254 		/* dontfrag active */
1255 		if ((cork->length + length > mtu - headersize) && dontfrag &&
1256 		    (sk->sk_protocol == IPPROTO_UDP ||
1257 		     sk->sk_protocol == IPPROTO_RAW)) {
1258 			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1259 						   sizeof(struct ipv6hdr));
1260 			goto emsgsize;
1261 		}
1262 
1263 		if (cork->length + length > maxnonfragsize - headersize) {
1264 emsgsize:
1265 			ipv6_local_error(sk, EMSGSIZE, fl6,
1266 					 mtu - headersize +
1267 					 sizeof(struct ipv6hdr));
1268 			return -EMSGSIZE;
1269 		}
1270 	}
1271 
1272 	/* For UDP, check if TX timestamp is enabled */
1273 	if (sk->sk_type == SOCK_DGRAM)
1274 		sock_tx_timestamp(sk, &tx_flags);
1275 
1276 	/*
1277 	 * Let's try using as much space as possible.
1278 	 * Use MTU if total length of the message fits into the MTU.
1279 	 * Otherwise, we need to reserve fragment header and
1280 	 * fragment alignment (= 8-15 octects, in total).
1281 	 *
1282 	 * Note that we may need to "move" the data from the tail of
1283 	 * of the buffer to the new fragment when we split
1284 	 * the message.
1285 	 *
1286 	 * FIXME: It may be fragmented into multiple chunks
1287 	 *        at once if non-fragmentable extension headers
1288 	 *        are too large.
1289 	 * --yoshfuji
1290 	 */
1291 
1292 	skb = skb_peek_tail(&sk->sk_write_queue);
1293 	cork->length += length;
1294 	if (((length > mtu) ||
1295 	     (skb && skb_is_gso(skb))) &&
1296 	    (sk->sk_protocol == IPPROTO_UDP) &&
1297 	    (rt->dst.dev->features & NETIF_F_UFO)) {
1298 		err = ip6_ufo_append_data(sk, getfrag, from, length,
1299 					  hh_len, fragheaderlen,
1300 					  transhdrlen, mtu, flags, rt);
1301 		if (err)
1302 			goto error;
1303 		return 0;
1304 	}
1305 
1306 	if (!skb)
1307 		goto alloc_new_skb;
1308 
1309 	while (length > 0) {
1310 		/* Check if the remaining data fits into current packet. */
1311 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1312 		if (copy < length)
1313 			copy = maxfraglen - skb->len;
1314 
1315 		if (copy <= 0) {
1316 			char *data;
1317 			unsigned int datalen;
1318 			unsigned int fraglen;
1319 			unsigned int fraggap;
1320 			unsigned int alloclen;
1321 alloc_new_skb:
1322 			/* There's no room in the current skb */
1323 			if (skb)
1324 				fraggap = skb->len - maxfraglen;
1325 			else
1326 				fraggap = 0;
1327 			/* update mtu and maxfraglen if necessary */
1328 			if (skb == NULL || skb_prev == NULL)
1329 				ip6_append_data_mtu(&mtu, &maxfraglen,
1330 						    fragheaderlen, skb, rt,
1331 						    orig_mtu);
1332 
1333 			skb_prev = skb;
1334 
1335 			/*
1336 			 * If remaining data exceeds the mtu,
1337 			 * we know we need more fragment(s).
1338 			 */
1339 			datalen = length + fraggap;
1340 
1341 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1342 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1343 			if ((flags & MSG_MORE) &&
1344 			    !(rt->dst.dev->features&NETIF_F_SG))
1345 				alloclen = mtu;
1346 			else
1347 				alloclen = datalen + fragheaderlen;
1348 
1349 			alloclen += dst_exthdrlen;
1350 
1351 			if (datalen != length + fraggap) {
1352 				/*
1353 				 * this is not the last fragment, the trailer
1354 				 * space is regarded as data space.
1355 				 */
1356 				datalen += rt->dst.trailer_len;
1357 			}
1358 
1359 			alloclen += rt->dst.trailer_len;
1360 			fraglen = datalen + fragheaderlen;
1361 
1362 			/*
1363 			 * We just reserve space for fragment header.
1364 			 * Note: this may be overallocation if the message
1365 			 * (without MSG_MORE) fits into the MTU.
1366 			 */
1367 			alloclen += sizeof(struct frag_hdr);
1368 
1369 			if (transhdrlen) {
1370 				skb = sock_alloc_send_skb(sk,
1371 						alloclen + hh_len,
1372 						(flags & MSG_DONTWAIT), &err);
1373 			} else {
1374 				skb = NULL;
1375 				if (atomic_read(&sk->sk_wmem_alloc) <=
1376 				    2 * sk->sk_sndbuf)
1377 					skb = sock_wmalloc(sk,
1378 							   alloclen + hh_len, 1,
1379 							   sk->sk_allocation);
1380 				if (unlikely(skb == NULL))
1381 					err = -ENOBUFS;
1382 				else {
1383 					/* Only the initial fragment
1384 					 * is time stamped.
1385 					 */
1386 					tx_flags = 0;
1387 				}
1388 			}
1389 			if (skb == NULL)
1390 				goto error;
1391 			/*
1392 			 *	Fill in the control structures
1393 			 */
1394 			skb->protocol = htons(ETH_P_IPV6);
1395 			skb->ip_summed = CHECKSUM_NONE;
1396 			skb->csum = 0;
1397 			/* reserve for fragmentation and ipsec header */
1398 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1399 				    dst_exthdrlen);
1400 
1401 			if (sk->sk_type == SOCK_DGRAM)
1402 				skb_shinfo(skb)->tx_flags = tx_flags;
1403 
1404 			/*
1405 			 *	Find where to start putting bytes
1406 			 */
1407 			data = skb_put(skb, fraglen);
1408 			skb_set_network_header(skb, exthdrlen);
1409 			data += fragheaderlen;
1410 			skb->transport_header = (skb->network_header +
1411 						 fragheaderlen);
1412 			if (fraggap) {
1413 				skb->csum = skb_copy_and_csum_bits(
1414 					skb_prev, maxfraglen,
1415 					data + transhdrlen, fraggap, 0);
1416 				skb_prev->csum = csum_sub(skb_prev->csum,
1417 							  skb->csum);
1418 				data += fraggap;
1419 				pskb_trim_unique(skb_prev, maxfraglen);
1420 			}
1421 			copy = datalen - transhdrlen - fraggap;
1422 
1423 			if (copy < 0) {
1424 				err = -EINVAL;
1425 				kfree_skb(skb);
1426 				goto error;
1427 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1428 				err = -EFAULT;
1429 				kfree_skb(skb);
1430 				goto error;
1431 			}
1432 
1433 			offset += copy;
1434 			length -= datalen - fraggap;
1435 			transhdrlen = 0;
1436 			exthdrlen = 0;
1437 			dst_exthdrlen = 0;
1438 
1439 			/*
1440 			 * Put the packet on the pending queue
1441 			 */
1442 			__skb_queue_tail(&sk->sk_write_queue, skb);
1443 			continue;
1444 		}
1445 
1446 		if (copy > length)
1447 			copy = length;
1448 
1449 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1450 			unsigned int off;
1451 
1452 			off = skb->len;
1453 			if (getfrag(from, skb_put(skb, copy),
1454 						offset, copy, off, skb) < 0) {
1455 				__skb_trim(skb, off);
1456 				err = -EFAULT;
1457 				goto error;
1458 			}
1459 		} else {
1460 			int i = skb_shinfo(skb)->nr_frags;
1461 			struct page_frag *pfrag = sk_page_frag(sk);
1462 
1463 			err = -ENOMEM;
1464 			if (!sk_page_frag_refill(sk, pfrag))
1465 				goto error;
1466 
1467 			if (!skb_can_coalesce(skb, i, pfrag->page,
1468 					      pfrag->offset)) {
1469 				err = -EMSGSIZE;
1470 				if (i == MAX_SKB_FRAGS)
1471 					goto error;
1472 
1473 				__skb_fill_page_desc(skb, i, pfrag->page,
1474 						     pfrag->offset, 0);
1475 				skb_shinfo(skb)->nr_frags = ++i;
1476 				get_page(pfrag->page);
1477 			}
1478 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1479 			if (getfrag(from,
1480 				    page_address(pfrag->page) + pfrag->offset,
1481 				    offset, copy, skb->len, skb) < 0)
1482 				goto error_efault;
1483 
1484 			pfrag->offset += copy;
1485 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1486 			skb->len += copy;
1487 			skb->data_len += copy;
1488 			skb->truesize += copy;
1489 			atomic_add(copy, &sk->sk_wmem_alloc);
1490 		}
1491 		offset += copy;
1492 		length -= copy;
1493 	}
1494 
1495 	return 0;
1496 
1497 error_efault:
1498 	err = -EFAULT;
1499 error:
1500 	cork->length -= length;
1501 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1502 	return err;
1503 }
1504 EXPORT_SYMBOL_GPL(ip6_append_data);
1505 
1506 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1507 {
1508 	if (np->cork.opt) {
1509 		kfree(np->cork.opt->dst0opt);
1510 		kfree(np->cork.opt->dst1opt);
1511 		kfree(np->cork.opt->hopopt);
1512 		kfree(np->cork.opt->srcrt);
1513 		kfree(np->cork.opt);
1514 		np->cork.opt = NULL;
1515 	}
1516 
1517 	if (inet->cork.base.dst) {
1518 		dst_release(inet->cork.base.dst);
1519 		inet->cork.base.dst = NULL;
1520 		inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1521 	}
1522 	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1523 }
1524 
1525 int ip6_push_pending_frames(struct sock *sk)
1526 {
1527 	struct sk_buff *skb, *tmp_skb;
1528 	struct sk_buff **tail_skb;
1529 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1530 	struct inet_sock *inet = inet_sk(sk);
1531 	struct ipv6_pinfo *np = inet6_sk(sk);
1532 	struct net *net = sock_net(sk);
1533 	struct ipv6hdr *hdr;
1534 	struct ipv6_txoptions *opt = np->cork.opt;
1535 	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1536 	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1537 	unsigned char proto = fl6->flowi6_proto;
1538 	int err = 0;
1539 
1540 	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1541 		goto out;
1542 	tail_skb = &(skb_shinfo(skb)->frag_list);
1543 
1544 	/* move skb->data to ip header from ext header */
1545 	if (skb->data < skb_network_header(skb))
1546 		__skb_pull(skb, skb_network_offset(skb));
1547 	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1548 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1549 		*tail_skb = tmp_skb;
1550 		tail_skb = &(tmp_skb->next);
1551 		skb->len += tmp_skb->len;
1552 		skb->data_len += tmp_skb->len;
1553 		skb->truesize += tmp_skb->truesize;
1554 		tmp_skb->destructor = NULL;
1555 		tmp_skb->sk = NULL;
1556 	}
1557 
1558 	/* Allow local fragmentation. */
1559 	skb->ignore_df = ip6_sk_ignore_df(sk);
1560 
1561 	*final_dst = fl6->daddr;
1562 	__skb_pull(skb, skb_network_header_len(skb));
1563 	if (opt && opt->opt_flen)
1564 		ipv6_push_frag_opts(skb, opt, &proto);
1565 	if (opt && opt->opt_nflen)
1566 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1567 
1568 	skb_push(skb, sizeof(struct ipv6hdr));
1569 	skb_reset_network_header(skb);
1570 	hdr = ipv6_hdr(skb);
1571 
1572 	ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1573 	hdr->hop_limit = np->cork.hop_limit;
1574 	hdr->nexthdr = proto;
1575 	hdr->saddr = fl6->saddr;
1576 	hdr->daddr = *final_dst;
1577 
1578 	skb->priority = sk->sk_priority;
1579 	skb->mark = sk->sk_mark;
1580 
1581 	skb_dst_set(skb, dst_clone(&rt->dst));
1582 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1583 	if (proto == IPPROTO_ICMPV6) {
1584 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1585 
1586 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1587 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1588 	}
1589 
1590 	err = ip6_local_out(skb);
1591 	if (err) {
1592 		if (err > 0)
1593 			err = net_xmit_errno(err);
1594 		if (err)
1595 			goto error;
1596 	}
1597 
1598 out:
1599 	ip6_cork_release(inet, np);
1600 	return err;
1601 error:
1602 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1603 	goto out;
1604 }
1605 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1606 
1607 void ip6_flush_pending_frames(struct sock *sk)
1608 {
1609 	struct sk_buff *skb;
1610 
1611 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1612 		if (skb_dst(skb))
1613 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1614 				      IPSTATS_MIB_OUTDISCARDS);
1615 		kfree_skb(skb);
1616 	}
1617 
1618 	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1619 }
1620 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1621