xref: /openbmc/linux/net/ipv6/ip6_output.c (revision 090f1166c6790e18c30f40690098829709e8dc68)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	: 	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44 
45 #include <net/sock.h>
46 #include <net/snmp.h>
47 
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 
59 static int ip6_finish_output2(struct sk_buff *skb)
60 {
61 	struct dst_entry *dst = skb_dst(skb);
62 	struct net_device *dev = dst->dev;
63 	struct neighbour *neigh;
64 	struct in6_addr *nexthop;
65 	int ret;
66 
67 	skb->protocol = htons(ETH_P_IPV6);
68 	skb->dev = dev;
69 
70 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72 
73 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74 		    ((mroute6_socket(dev_net(dev), skb) &&
75 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 					 &ipv6_hdr(skb)->saddr))) {
78 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79 
80 			/* Do not check for IFF_ALLMULTI; multicast routing
81 			   is not supported in any case.
82 			 */
83 			if (newskb)
84 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 					newskb, NULL, newskb->dev,
86 					dev_loopback_xmit);
87 
88 			if (ipv6_hdr(skb)->hop_limit == 0) {
89 				IP6_INC_STATS(dev_net(dev), idev,
90 					      IPSTATS_MIB_OUTDISCARDS);
91 				kfree_skb(skb);
92 				return 0;
93 			}
94 		}
95 
96 		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
97 				skb->len);
98 
99 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 		    IPV6_ADDR_SCOPE_NODELOCAL &&
101 		    !(dev->flags & IFF_LOOPBACK)) {
102 			kfree_skb(skb);
103 			return 0;
104 		}
105 	}
106 
107 	rcu_read_lock_bh();
108 	nexthop = rt6_nexthop((struct rt6_info *)dst);
109 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 	if (unlikely(!neigh))
111 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 	if (!IS_ERR(neigh)) {
113 		ret = dst_neigh_output(dst, neigh, skb);
114 		rcu_read_unlock_bh();
115 		return ret;
116 	}
117 	rcu_read_unlock_bh();
118 
119 	IP6_INC_STATS(dev_net(dst->dev),
120 		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 	kfree_skb(skb);
122 	return -EINVAL;
123 }
124 
125 static int ip6_finish_output(struct sk_buff *skb)
126 {
127 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 	    dst_allfrag(skb_dst(skb)) ||
129 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 		return ip6_fragment(skb, ip6_finish_output2);
131 	else
132 		return ip6_finish_output2(skb);
133 }
134 
135 int ip6_output(struct sk_buff *skb)
136 {
137 	struct net_device *dev = skb_dst(skb)->dev;
138 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 	if (unlikely(idev->cnf.disable_ipv6)) {
140 		IP6_INC_STATS(dev_net(dev), idev,
141 			      IPSTATS_MIB_OUTDISCARDS);
142 		kfree_skb(skb);
143 		return 0;
144 	}
145 
146 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
147 			    ip6_finish_output,
148 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150 
151 /*
152  *	xmit an sk_buff (used by TCP, SCTP and DCCP)
153  */
154 
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156 	     struct ipv6_txoptions *opt, int tclass)
157 {
158 	struct net *net = sock_net(sk);
159 	struct ipv6_pinfo *np = inet6_sk(sk);
160 	struct in6_addr *first_hop = &fl6->daddr;
161 	struct dst_entry *dst = skb_dst(skb);
162 	struct ipv6hdr *hdr;
163 	u8  proto = fl6->flowi6_proto;
164 	int seg_len = skb->len;
165 	int hlimit = -1;
166 	u32 mtu;
167 
168 	if (opt) {
169 		unsigned int head_room;
170 
171 		/* First: exthdrs may take lots of space (~8K for now)
172 		   MAX_HEADER is not enough.
173 		 */
174 		head_room = opt->opt_nflen + opt->opt_flen;
175 		seg_len += head_room;
176 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
177 
178 		if (skb_headroom(skb) < head_room) {
179 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
180 			if (skb2 == NULL) {
181 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182 					      IPSTATS_MIB_OUTDISCARDS);
183 				kfree_skb(skb);
184 				return -ENOBUFS;
185 			}
186 			consume_skb(skb);
187 			skb = skb2;
188 			skb_set_owner_w(skb, sk);
189 		}
190 		if (opt->opt_flen)
191 			ipv6_push_frag_opts(skb, opt, &proto);
192 		if (opt->opt_nflen)
193 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
194 	}
195 
196 	skb_push(skb, sizeof(struct ipv6hdr));
197 	skb_reset_network_header(skb);
198 	hdr = ipv6_hdr(skb);
199 
200 	/*
201 	 *	Fill in the IPv6 header
202 	 */
203 	if (np)
204 		hlimit = np->hop_limit;
205 	if (hlimit < 0)
206 		hlimit = ip6_dst_hoplimit(dst);
207 
208 	ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
209 
210 	hdr->payload_len = htons(seg_len);
211 	hdr->nexthdr = proto;
212 	hdr->hop_limit = hlimit;
213 
214 	hdr->saddr = fl6->saddr;
215 	hdr->daddr = *first_hop;
216 
217 	skb->protocol = htons(ETH_P_IPV6);
218 	skb->priority = sk->sk_priority;
219 	skb->mark = sk->sk_mark;
220 
221 	mtu = dst_mtu(dst);
222 	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
223 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
224 			      IPSTATS_MIB_OUT, skb->len);
225 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
226 			       dst->dev, dst_output);
227 	}
228 
229 	skb->dev = dst->dev;
230 	ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
231 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
232 	kfree_skb(skb);
233 	return -EMSGSIZE;
234 }
235 
236 EXPORT_SYMBOL(ip6_xmit);
237 
238 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
239 {
240 	struct ip6_ra_chain *ra;
241 	struct sock *last = NULL;
242 
243 	read_lock(&ip6_ra_lock);
244 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
245 		struct sock *sk = ra->sk;
246 		if (sk && ra->sel == sel &&
247 		    (!sk->sk_bound_dev_if ||
248 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
249 			if (last) {
250 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
251 				if (skb2)
252 					rawv6_rcv(last, skb2);
253 			}
254 			last = sk;
255 		}
256 	}
257 
258 	if (last) {
259 		rawv6_rcv(last, skb);
260 		read_unlock(&ip6_ra_lock);
261 		return 1;
262 	}
263 	read_unlock(&ip6_ra_lock);
264 	return 0;
265 }
266 
267 static int ip6_forward_proxy_check(struct sk_buff *skb)
268 {
269 	struct ipv6hdr *hdr = ipv6_hdr(skb);
270 	u8 nexthdr = hdr->nexthdr;
271 	__be16 frag_off;
272 	int offset;
273 
274 	if (ipv6_ext_hdr(nexthdr)) {
275 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
276 		if (offset < 0)
277 			return 0;
278 	} else
279 		offset = sizeof(struct ipv6hdr);
280 
281 	if (nexthdr == IPPROTO_ICMPV6) {
282 		struct icmp6hdr *icmp6;
283 
284 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
285 					 offset + 1 - skb->data)))
286 			return 0;
287 
288 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
289 
290 		switch (icmp6->icmp6_type) {
291 		case NDISC_ROUTER_SOLICITATION:
292 		case NDISC_ROUTER_ADVERTISEMENT:
293 		case NDISC_NEIGHBOUR_SOLICITATION:
294 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
295 		case NDISC_REDIRECT:
296 			/* For reaction involving unicast neighbor discovery
297 			 * message destined to the proxied address, pass it to
298 			 * input function.
299 			 */
300 			return 1;
301 		default:
302 			break;
303 		}
304 	}
305 
306 	/*
307 	 * The proxying router can't forward traffic sent to a link-local
308 	 * address, so signal the sender and discard the packet. This
309 	 * behavior is clarified by the MIPv6 specification.
310 	 */
311 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
312 		dst_link_failure(skb);
313 		return -1;
314 	}
315 
316 	return 0;
317 }
318 
319 static inline int ip6_forward_finish(struct sk_buff *skb)
320 {
321 	return dst_output(skb);
322 }
323 
324 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
325 {
326 	unsigned int mtu;
327 	struct inet6_dev *idev;
328 
329 	if (dst_metric_locked(dst, RTAX_MTU)) {
330 		mtu = dst_metric_raw(dst, RTAX_MTU);
331 		if (mtu)
332 			return mtu;
333 	}
334 
335 	mtu = IPV6_MIN_MTU;
336 	rcu_read_lock();
337 	idev = __in6_dev_get(dst->dev);
338 	if (idev)
339 		mtu = idev->cnf.mtu6;
340 	rcu_read_unlock();
341 
342 	return mtu;
343 }
344 
345 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346 {
347 	if (skb->len <= mtu || skb->local_df)
348 		return false;
349 
350 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
351 		return true;
352 
353 	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
354 		return false;
355 
356 	return true;
357 }
358 
359 int ip6_forward(struct sk_buff *skb)
360 {
361 	struct dst_entry *dst = skb_dst(skb);
362 	struct ipv6hdr *hdr = ipv6_hdr(skb);
363 	struct inet6_skb_parm *opt = IP6CB(skb);
364 	struct net *net = dev_net(dst->dev);
365 	u32 mtu;
366 
367 	if (net->ipv6.devconf_all->forwarding == 0)
368 		goto error;
369 
370 	if (skb->pkt_type != PACKET_HOST)
371 		goto drop;
372 
373 	if (skb_warn_if_lro(skb))
374 		goto drop;
375 
376 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
377 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
378 				 IPSTATS_MIB_INDISCARDS);
379 		goto drop;
380 	}
381 
382 	skb_forward_csum(skb);
383 
384 	/*
385 	 *	We DO NOT make any processing on
386 	 *	RA packets, pushing them to user level AS IS
387 	 *	without ane WARRANTY that application will be able
388 	 *	to interpret them. The reason is that we
389 	 *	cannot make anything clever here.
390 	 *
391 	 *	We are not end-node, so that if packet contains
392 	 *	AH/ESP, we cannot make anything.
393 	 *	Defragmentation also would be mistake, RA packets
394 	 *	cannot be fragmented, because there is no warranty
395 	 *	that different fragments will go along one path. --ANK
396 	 */
397 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
398 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
399 			return 0;
400 	}
401 
402 	/*
403 	 *	check and decrement ttl
404 	 */
405 	if (hdr->hop_limit <= 1) {
406 		/* Force OUTPUT device used as source address */
407 		skb->dev = dst->dev;
408 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
409 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
410 				 IPSTATS_MIB_INHDRERRORS);
411 
412 		kfree_skb(skb);
413 		return -ETIMEDOUT;
414 	}
415 
416 	/* XXX: idev->cnf.proxy_ndp? */
417 	if (net->ipv6.devconf_all->proxy_ndp &&
418 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
419 		int proxied = ip6_forward_proxy_check(skb);
420 		if (proxied > 0)
421 			return ip6_input(skb);
422 		else if (proxied < 0) {
423 			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
424 					 IPSTATS_MIB_INDISCARDS);
425 			goto drop;
426 		}
427 	}
428 
429 	if (!xfrm6_route_forward(skb)) {
430 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
431 				 IPSTATS_MIB_INDISCARDS);
432 		goto drop;
433 	}
434 	dst = skb_dst(skb);
435 
436 	/* IPv6 specs say nothing about it, but it is clear that we cannot
437 	   send redirects to source routed frames.
438 	   We don't send redirects to frames decapsulated from IPsec.
439 	 */
440 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
441 		struct in6_addr *target = NULL;
442 		struct inet_peer *peer;
443 		struct rt6_info *rt;
444 
445 		/*
446 		 *	incoming and outgoing devices are the same
447 		 *	send a redirect.
448 		 */
449 
450 		rt = (struct rt6_info *) dst;
451 		if (rt->rt6i_flags & RTF_GATEWAY)
452 			target = &rt->rt6i_gateway;
453 		else
454 			target = &hdr->daddr;
455 
456 		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
457 
458 		/* Limit redirects both by destination (here)
459 		   and by source (inside ndisc_send_redirect)
460 		 */
461 		if (inet_peer_xrlim_allow(peer, 1*HZ))
462 			ndisc_send_redirect(skb, target);
463 		if (peer)
464 			inet_putpeer(peer);
465 	} else {
466 		int addrtype = ipv6_addr_type(&hdr->saddr);
467 
468 		/* This check is security critical. */
469 		if (addrtype == IPV6_ADDR_ANY ||
470 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
471 			goto error;
472 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
473 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
474 				    ICMPV6_NOT_NEIGHBOUR, 0);
475 			goto error;
476 		}
477 	}
478 
479 	mtu = ip6_dst_mtu_forward(dst);
480 	if (mtu < IPV6_MIN_MTU)
481 		mtu = IPV6_MIN_MTU;
482 
483 	if (ip6_pkt_too_big(skb, mtu)) {
484 		/* Again, force OUTPUT device used as source address */
485 		skb->dev = dst->dev;
486 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
487 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
488 				 IPSTATS_MIB_INTOOBIGERRORS);
489 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
490 				 IPSTATS_MIB_FRAGFAILS);
491 		kfree_skb(skb);
492 		return -EMSGSIZE;
493 	}
494 
495 	if (skb_cow(skb, dst->dev->hard_header_len)) {
496 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
497 				 IPSTATS_MIB_OUTDISCARDS);
498 		goto drop;
499 	}
500 
501 	hdr = ipv6_hdr(skb);
502 
503 	/* Mangling hops number delayed to point after skb COW */
504 
505 	hdr->hop_limit--;
506 
507 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
508 	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
509 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
510 		       ip6_forward_finish);
511 
512 error:
513 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
514 drop:
515 	kfree_skb(skb);
516 	return -EINVAL;
517 }
518 
519 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
520 {
521 	to->pkt_type = from->pkt_type;
522 	to->priority = from->priority;
523 	to->protocol = from->protocol;
524 	skb_dst_drop(to);
525 	skb_dst_set(to, dst_clone(skb_dst(from)));
526 	to->dev = from->dev;
527 	to->mark = from->mark;
528 
529 #ifdef CONFIG_NET_SCHED
530 	to->tc_index = from->tc_index;
531 #endif
532 	nf_copy(to, from);
533 	skb_copy_secmark(to, from);
534 }
535 
536 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
537 {
538 	struct sk_buff *frag;
539 	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
540 	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
541 	struct ipv6hdr *tmp_hdr;
542 	struct frag_hdr *fh;
543 	unsigned int mtu, hlen, left, len;
544 	int hroom, troom;
545 	__be32 frag_id = 0;
546 	int ptr, offset = 0, err=0;
547 	u8 *prevhdr, nexthdr = 0;
548 	struct net *net = dev_net(skb_dst(skb)->dev);
549 
550 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
551 	nexthdr = *prevhdr;
552 
553 	mtu = ip6_skb_dst_mtu(skb);
554 
555 	/* We must not fragment if the socket is set to force MTU discovery
556 	 * or if the skb it not generated by a local socket.
557 	 */
558 	if (unlikely(!skb->local_df && skb->len > mtu) ||
559 		     (IP6CB(skb)->frag_max_size &&
560 		      IP6CB(skb)->frag_max_size > mtu)) {
561 		if (skb->sk && dst_allfrag(skb_dst(skb)))
562 			sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
563 
564 		skb->dev = skb_dst(skb)->dev;
565 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
566 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
567 			      IPSTATS_MIB_FRAGFAILS);
568 		kfree_skb(skb);
569 		return -EMSGSIZE;
570 	}
571 
572 	if (np && np->frag_size < mtu) {
573 		if (np->frag_size)
574 			mtu = np->frag_size;
575 	}
576 	mtu -= hlen + sizeof(struct frag_hdr);
577 
578 	if (skb_has_frag_list(skb)) {
579 		int first_len = skb_pagelen(skb);
580 		struct sk_buff *frag2;
581 
582 		if (first_len - hlen > mtu ||
583 		    ((first_len - hlen) & 7) ||
584 		    skb_cloned(skb))
585 			goto slow_path;
586 
587 		skb_walk_frags(skb, frag) {
588 			/* Correct geometry. */
589 			if (frag->len > mtu ||
590 			    ((frag->len & 7) && frag->next) ||
591 			    skb_headroom(frag) < hlen)
592 				goto slow_path_clean;
593 
594 			/* Partially cloned skb? */
595 			if (skb_shared(frag))
596 				goto slow_path_clean;
597 
598 			BUG_ON(frag->sk);
599 			if (skb->sk) {
600 				frag->sk = skb->sk;
601 				frag->destructor = sock_wfree;
602 			}
603 			skb->truesize -= frag->truesize;
604 		}
605 
606 		err = 0;
607 		offset = 0;
608 		frag = skb_shinfo(skb)->frag_list;
609 		skb_frag_list_init(skb);
610 		/* BUILD HEADER */
611 
612 		*prevhdr = NEXTHDR_FRAGMENT;
613 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
614 		if (!tmp_hdr) {
615 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
616 				      IPSTATS_MIB_FRAGFAILS);
617 			return -ENOMEM;
618 		}
619 
620 		__skb_pull(skb, hlen);
621 		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
622 		__skb_push(skb, hlen);
623 		skb_reset_network_header(skb);
624 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
625 
626 		ipv6_select_ident(fh, rt);
627 		fh->nexthdr = nexthdr;
628 		fh->reserved = 0;
629 		fh->frag_off = htons(IP6_MF);
630 		frag_id = fh->identification;
631 
632 		first_len = skb_pagelen(skb);
633 		skb->data_len = first_len - skb_headlen(skb);
634 		skb->len = first_len;
635 		ipv6_hdr(skb)->payload_len = htons(first_len -
636 						   sizeof(struct ipv6hdr));
637 
638 		dst_hold(&rt->dst);
639 
640 		for (;;) {
641 			/* Prepare header of the next frame,
642 			 * before previous one went down. */
643 			if (frag) {
644 				frag->ip_summed = CHECKSUM_NONE;
645 				skb_reset_transport_header(frag);
646 				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
647 				__skb_push(frag, hlen);
648 				skb_reset_network_header(frag);
649 				memcpy(skb_network_header(frag), tmp_hdr,
650 				       hlen);
651 				offset += skb->len - hlen - sizeof(struct frag_hdr);
652 				fh->nexthdr = nexthdr;
653 				fh->reserved = 0;
654 				fh->frag_off = htons(offset);
655 				if (frag->next != NULL)
656 					fh->frag_off |= htons(IP6_MF);
657 				fh->identification = frag_id;
658 				ipv6_hdr(frag)->payload_len =
659 						htons(frag->len -
660 						      sizeof(struct ipv6hdr));
661 				ip6_copy_metadata(frag, skb);
662 			}
663 
664 			err = output(skb);
665 			if(!err)
666 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
667 					      IPSTATS_MIB_FRAGCREATES);
668 
669 			if (err || !frag)
670 				break;
671 
672 			skb = frag;
673 			frag = skb->next;
674 			skb->next = NULL;
675 		}
676 
677 		kfree(tmp_hdr);
678 
679 		if (err == 0) {
680 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
681 				      IPSTATS_MIB_FRAGOKS);
682 			ip6_rt_put(rt);
683 			return 0;
684 		}
685 
686 		while (frag) {
687 			skb = frag->next;
688 			kfree_skb(frag);
689 			frag = skb;
690 		}
691 
692 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
693 			      IPSTATS_MIB_FRAGFAILS);
694 		ip6_rt_put(rt);
695 		return err;
696 
697 slow_path_clean:
698 		skb_walk_frags(skb, frag2) {
699 			if (frag2 == frag)
700 				break;
701 			frag2->sk = NULL;
702 			frag2->destructor = NULL;
703 			skb->truesize += frag2->truesize;
704 		}
705 	}
706 
707 slow_path:
708 	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
709 	    skb_checksum_help(skb))
710 		goto fail;
711 
712 	left = skb->len - hlen;		/* Space per frame */
713 	ptr = hlen;			/* Where to start from */
714 
715 	/*
716 	 *	Fragment the datagram.
717 	 */
718 
719 	*prevhdr = NEXTHDR_FRAGMENT;
720 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
721 	troom = rt->dst.dev->needed_tailroom;
722 
723 	/*
724 	 *	Keep copying data until we run out.
725 	 */
726 	while(left > 0)	{
727 		len = left;
728 		/* IF: it doesn't fit, use 'mtu' - the data space left */
729 		if (len > mtu)
730 			len = mtu;
731 		/* IF: we are not sending up to and including the packet end
732 		   then align the next start on an eight byte boundary */
733 		if (len < left)	{
734 			len &= ~7;
735 		}
736 		/*
737 		 *	Allocate buffer.
738 		 */
739 
740 		if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
741 				      hroom + troom, GFP_ATOMIC)) == NULL) {
742 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
743 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
744 				      IPSTATS_MIB_FRAGFAILS);
745 			err = -ENOMEM;
746 			goto fail;
747 		}
748 
749 		/*
750 		 *	Set up data on packet
751 		 */
752 
753 		ip6_copy_metadata(frag, skb);
754 		skb_reserve(frag, hroom);
755 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
756 		skb_reset_network_header(frag);
757 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
758 		frag->transport_header = (frag->network_header + hlen +
759 					  sizeof(struct frag_hdr));
760 
761 		/*
762 		 *	Charge the memory for the fragment to any owner
763 		 *	it might possess
764 		 */
765 		if (skb->sk)
766 			skb_set_owner_w(frag, skb->sk);
767 
768 		/*
769 		 *	Copy the packet header into the new buffer.
770 		 */
771 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
772 
773 		/*
774 		 *	Build fragment header.
775 		 */
776 		fh->nexthdr = nexthdr;
777 		fh->reserved = 0;
778 		if (!frag_id) {
779 			ipv6_select_ident(fh, rt);
780 			frag_id = fh->identification;
781 		} else
782 			fh->identification = frag_id;
783 
784 		/*
785 		 *	Copy a block of the IP datagram.
786 		 */
787 		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
788 			BUG();
789 		left -= len;
790 
791 		fh->frag_off = htons(offset);
792 		if (left > 0)
793 			fh->frag_off |= htons(IP6_MF);
794 		ipv6_hdr(frag)->payload_len = htons(frag->len -
795 						    sizeof(struct ipv6hdr));
796 
797 		ptr += len;
798 		offset += len;
799 
800 		/*
801 		 *	Put this fragment into the sending queue.
802 		 */
803 		err = output(frag);
804 		if (err)
805 			goto fail;
806 
807 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
808 			      IPSTATS_MIB_FRAGCREATES);
809 	}
810 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
811 		      IPSTATS_MIB_FRAGOKS);
812 	consume_skb(skb);
813 	return err;
814 
815 fail:
816 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
817 		      IPSTATS_MIB_FRAGFAILS);
818 	kfree_skb(skb);
819 	return err;
820 }
821 
822 static inline int ip6_rt_check(const struct rt6key *rt_key,
823 			       const struct in6_addr *fl_addr,
824 			       const struct in6_addr *addr_cache)
825 {
826 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
827 		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
828 }
829 
830 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
831 					  struct dst_entry *dst,
832 					  const struct flowi6 *fl6)
833 {
834 	struct ipv6_pinfo *np = inet6_sk(sk);
835 	struct rt6_info *rt;
836 
837 	if (!dst)
838 		goto out;
839 
840 	if (dst->ops->family != AF_INET6) {
841 		dst_release(dst);
842 		return NULL;
843 	}
844 
845 	rt = (struct rt6_info *)dst;
846 	/* Yes, checking route validity in not connected
847 	 * case is not very simple. Take into account,
848 	 * that we do not support routing by source, TOS,
849 	 * and MSG_DONTROUTE 		--ANK (980726)
850 	 *
851 	 * 1. ip6_rt_check(): If route was host route,
852 	 *    check that cached destination is current.
853 	 *    If it is network route, we still may
854 	 *    check its validity using saved pointer
855 	 *    to the last used address: daddr_cache.
856 	 *    We do not want to save whole address now,
857 	 *    (because main consumer of this service
858 	 *    is tcp, which has not this problem),
859 	 *    so that the last trick works only on connected
860 	 *    sockets.
861 	 * 2. oif also should be the same.
862 	 */
863 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
864 #ifdef CONFIG_IPV6_SUBTREES
865 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
866 #endif
867 	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
868 		dst_release(dst);
869 		dst = NULL;
870 	}
871 
872 out:
873 	return dst;
874 }
875 
876 static int ip6_dst_lookup_tail(struct sock *sk,
877 			       struct dst_entry **dst, struct flowi6 *fl6)
878 {
879 	struct net *net = sock_net(sk);
880 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
881 	struct neighbour *n;
882 	struct rt6_info *rt;
883 #endif
884 	int err;
885 
886 	if (*dst == NULL)
887 		*dst = ip6_route_output(net, sk, fl6);
888 
889 	if ((err = (*dst)->error))
890 		goto out_err_release;
891 
892 	if (ipv6_addr_any(&fl6->saddr)) {
893 		struct rt6_info *rt = (struct rt6_info *) *dst;
894 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
895 					  sk ? inet6_sk(sk)->srcprefs : 0,
896 					  &fl6->saddr);
897 		if (err)
898 			goto out_err_release;
899 	}
900 
901 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
902 	/*
903 	 * Here if the dst entry we've looked up
904 	 * has a neighbour entry that is in the INCOMPLETE
905 	 * state and the src address from the flow is
906 	 * marked as OPTIMISTIC, we release the found
907 	 * dst entry and replace it instead with the
908 	 * dst entry of the nexthop router
909 	 */
910 	rt = (struct rt6_info *) *dst;
911 	rcu_read_lock_bh();
912 	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
913 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
914 	rcu_read_unlock_bh();
915 
916 	if (err) {
917 		struct inet6_ifaddr *ifp;
918 		struct flowi6 fl_gw6;
919 		int redirect;
920 
921 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
922 				      (*dst)->dev, 1);
923 
924 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
925 		if (ifp)
926 			in6_ifa_put(ifp);
927 
928 		if (redirect) {
929 			/*
930 			 * We need to get the dst entry for the
931 			 * default router instead
932 			 */
933 			dst_release(*dst);
934 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
935 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
936 			*dst = ip6_route_output(net, sk, &fl_gw6);
937 			if ((err = (*dst)->error))
938 				goto out_err_release;
939 		}
940 	}
941 #endif
942 
943 	return 0;
944 
945 out_err_release:
946 	if (err == -ENETUNREACH)
947 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
948 	dst_release(*dst);
949 	*dst = NULL;
950 	return err;
951 }
952 
953 /**
954  *	ip6_dst_lookup - perform route lookup on flow
955  *	@sk: socket which provides route info
956  *	@dst: pointer to dst_entry * for result
957  *	@fl6: flow to lookup
958  *
959  *	This function performs a route lookup on the given flow.
960  *
961  *	It returns zero on success, or a standard errno code on error.
962  */
963 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
964 {
965 	*dst = NULL;
966 	return ip6_dst_lookup_tail(sk, dst, fl6);
967 }
968 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
969 
970 /**
971  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
972  *	@sk: socket which provides route info
973  *	@fl6: flow to lookup
974  *	@final_dst: final destination address for ipsec lookup
975  *
976  *	This function performs a route lookup on the given flow.
977  *
978  *	It returns a valid dst pointer on success, or a pointer encoded
979  *	error code.
980  */
981 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
982 				      const struct in6_addr *final_dst)
983 {
984 	struct dst_entry *dst = NULL;
985 	int err;
986 
987 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
988 	if (err)
989 		return ERR_PTR(err);
990 	if (final_dst)
991 		fl6->daddr = *final_dst;
992 
993 	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
994 }
995 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
996 
997 /**
998  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
999  *	@sk: socket which provides the dst cache and route info
1000  *	@fl6: flow to lookup
1001  *	@final_dst: final destination address for ipsec lookup
1002  *
1003  *	This function performs a route lookup on the given flow with the
1004  *	possibility of using the cached route in the socket if it is valid.
1005  *	It will take the socket dst lock when operating on the dst cache.
1006  *	As a result, this function can only be used in process context.
1007  *
1008  *	It returns a valid dst pointer on success, or a pointer encoded
1009  *	error code.
1010  */
1011 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1012 					 const struct in6_addr *final_dst)
1013 {
1014 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1015 	int err;
1016 
1017 	dst = ip6_sk_dst_check(sk, dst, fl6);
1018 
1019 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1020 	if (err)
1021 		return ERR_PTR(err);
1022 	if (final_dst)
1023 		fl6->daddr = *final_dst;
1024 
1025 	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1026 }
1027 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1028 
1029 static inline int ip6_ufo_append_data(struct sock *sk,
1030 			int getfrag(void *from, char *to, int offset, int len,
1031 			int odd, struct sk_buff *skb),
1032 			void *from, int length, int hh_len, int fragheaderlen,
1033 			int transhdrlen, int mtu,unsigned int flags,
1034 			struct rt6_info *rt)
1035 
1036 {
1037 	struct sk_buff *skb;
1038 	struct frag_hdr fhdr;
1039 	int err;
1040 
1041 	/* There is support for UDP large send offload by network
1042 	 * device, so create one single skb packet containing complete
1043 	 * udp datagram
1044 	 */
1045 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1046 		skb = sock_alloc_send_skb(sk,
1047 			hh_len + fragheaderlen + transhdrlen + 20,
1048 			(flags & MSG_DONTWAIT), &err);
1049 		if (skb == NULL)
1050 			return err;
1051 
1052 		/* reserve space for Hardware header */
1053 		skb_reserve(skb, hh_len);
1054 
1055 		/* create space for UDP/IP header */
1056 		skb_put(skb,fragheaderlen + transhdrlen);
1057 
1058 		/* initialize network header pointer */
1059 		skb_reset_network_header(skb);
1060 
1061 		/* initialize protocol header pointer */
1062 		skb->transport_header = skb->network_header + fragheaderlen;
1063 
1064 		skb->protocol = htons(ETH_P_IPV6);
1065 		skb->csum = 0;
1066 
1067 		__skb_queue_tail(&sk->sk_write_queue, skb);
1068 	} else if (skb_is_gso(skb)) {
1069 		goto append;
1070 	}
1071 
1072 	skb->ip_summed = CHECKSUM_PARTIAL;
1073 	/* Specify the length of each IPv6 datagram fragment.
1074 	 * It has to be a multiple of 8.
1075 	 */
1076 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1077 				     sizeof(struct frag_hdr)) & ~7;
1078 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1079 	ipv6_select_ident(&fhdr, rt);
1080 	skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1081 
1082 append:
1083 	return skb_append_datato_frags(sk, skb, getfrag, from,
1084 				       (length - transhdrlen));
1085 }
1086 
1087 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1088 					       gfp_t gfp)
1089 {
1090 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1091 }
1092 
1093 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1094 						gfp_t gfp)
1095 {
1096 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1097 }
1098 
1099 static void ip6_append_data_mtu(unsigned int *mtu,
1100 				int *maxfraglen,
1101 				unsigned int fragheaderlen,
1102 				struct sk_buff *skb,
1103 				struct rt6_info *rt,
1104 				bool pmtuprobe)
1105 {
1106 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1107 		if (skb == NULL) {
1108 			/* first fragment, reserve header_len */
1109 			*mtu = *mtu - rt->dst.header_len;
1110 
1111 		} else {
1112 			/*
1113 			 * this fragment is not first, the headers
1114 			 * space is regarded as data space.
1115 			 */
1116 			*mtu = min(*mtu, pmtuprobe ?
1117 				   rt->dst.dev->mtu :
1118 				   dst_mtu(rt->dst.path));
1119 		}
1120 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1121 			      + fragheaderlen - sizeof(struct frag_hdr);
1122 	}
1123 }
1124 
1125 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1126 	int offset, int len, int odd, struct sk_buff *skb),
1127 	void *from, int length, int transhdrlen,
1128 	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1129 	struct rt6_info *rt, unsigned int flags, int dontfrag)
1130 {
1131 	struct inet_sock *inet = inet_sk(sk);
1132 	struct ipv6_pinfo *np = inet6_sk(sk);
1133 	struct inet_cork *cork;
1134 	struct sk_buff *skb, *skb_prev = NULL;
1135 	unsigned int maxfraglen, fragheaderlen, mtu;
1136 	int exthdrlen;
1137 	int dst_exthdrlen;
1138 	int hh_len;
1139 	int copy;
1140 	int err;
1141 	int offset = 0;
1142 	__u8 tx_flags = 0;
1143 
1144 	if (flags&MSG_PROBE)
1145 		return 0;
1146 	cork = &inet->cork.base;
1147 	if (skb_queue_empty(&sk->sk_write_queue)) {
1148 		/*
1149 		 * setup for corking
1150 		 */
1151 		if (opt) {
1152 			if (WARN_ON(np->cork.opt))
1153 				return -EINVAL;
1154 
1155 			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1156 			if (unlikely(np->cork.opt == NULL))
1157 				return -ENOBUFS;
1158 
1159 			np->cork.opt->tot_len = opt->tot_len;
1160 			np->cork.opt->opt_flen = opt->opt_flen;
1161 			np->cork.opt->opt_nflen = opt->opt_nflen;
1162 
1163 			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1164 							    sk->sk_allocation);
1165 			if (opt->dst0opt && !np->cork.opt->dst0opt)
1166 				return -ENOBUFS;
1167 
1168 			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1169 							    sk->sk_allocation);
1170 			if (opt->dst1opt && !np->cork.opt->dst1opt)
1171 				return -ENOBUFS;
1172 
1173 			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1174 							   sk->sk_allocation);
1175 			if (opt->hopopt && !np->cork.opt->hopopt)
1176 				return -ENOBUFS;
1177 
1178 			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1179 							    sk->sk_allocation);
1180 			if (opt->srcrt && !np->cork.opt->srcrt)
1181 				return -ENOBUFS;
1182 
1183 			/* need source address above miyazawa*/
1184 		}
1185 		dst_hold(&rt->dst);
1186 		cork->dst = &rt->dst;
1187 		inet->cork.fl.u.ip6 = *fl6;
1188 		np->cork.hop_limit = hlimit;
1189 		np->cork.tclass = tclass;
1190 		if (rt->dst.flags & DST_XFRM_TUNNEL)
1191 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1192 			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1193 		else
1194 			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1195 			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1196 		if (np->frag_size < mtu) {
1197 			if (np->frag_size)
1198 				mtu = np->frag_size;
1199 		}
1200 		cork->fragsize = mtu;
1201 		if (dst_allfrag(rt->dst.path))
1202 			cork->flags |= IPCORK_ALLFRAG;
1203 		cork->length = 0;
1204 		exthdrlen = (opt ? opt->opt_flen : 0);
1205 		length += exthdrlen;
1206 		transhdrlen += exthdrlen;
1207 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1208 	} else {
1209 		rt = (struct rt6_info *)cork->dst;
1210 		fl6 = &inet->cork.fl.u.ip6;
1211 		opt = np->cork.opt;
1212 		transhdrlen = 0;
1213 		exthdrlen = 0;
1214 		dst_exthdrlen = 0;
1215 		mtu = cork->fragsize;
1216 	}
1217 
1218 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1219 
1220 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1221 			(opt ? opt->opt_nflen : 0);
1222 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1223 		     sizeof(struct frag_hdr);
1224 
1225 	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1226 		unsigned int maxnonfragsize, headersize;
1227 
1228 		headersize = sizeof(struct ipv6hdr) +
1229 			     (opt ? opt->tot_len : 0) +
1230 			     (dst_allfrag(&rt->dst) ?
1231 			      sizeof(struct frag_hdr) : 0) +
1232 			     rt->rt6i_nfheader_len;
1233 
1234 		if (ip6_sk_local_df(sk))
1235 			maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1236 		else
1237 			maxnonfragsize = mtu;
1238 
1239 		/* dontfrag active */
1240 		if ((cork->length + length > mtu - headersize) && dontfrag &&
1241 		    (sk->sk_protocol == IPPROTO_UDP ||
1242 		     sk->sk_protocol == IPPROTO_RAW)) {
1243 			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1244 						   sizeof(struct ipv6hdr));
1245 			goto emsgsize;
1246 		}
1247 
1248 		if (cork->length + length > maxnonfragsize - headersize) {
1249 emsgsize:
1250 			ipv6_local_error(sk, EMSGSIZE, fl6,
1251 					 mtu - headersize +
1252 					 sizeof(struct ipv6hdr));
1253 			return -EMSGSIZE;
1254 		}
1255 	}
1256 
1257 	/* For UDP, check if TX timestamp is enabled */
1258 	if (sk->sk_type == SOCK_DGRAM)
1259 		sock_tx_timestamp(sk, &tx_flags);
1260 
1261 	/*
1262 	 * Let's try using as much space as possible.
1263 	 * Use MTU if total length of the message fits into the MTU.
1264 	 * Otherwise, we need to reserve fragment header and
1265 	 * fragment alignment (= 8-15 octects, in total).
1266 	 *
1267 	 * Note that we may need to "move" the data from the tail of
1268 	 * of the buffer to the new fragment when we split
1269 	 * the message.
1270 	 *
1271 	 * FIXME: It may be fragmented into multiple chunks
1272 	 *        at once if non-fragmentable extension headers
1273 	 *        are too large.
1274 	 * --yoshfuji
1275 	 */
1276 
1277 	skb = skb_peek_tail(&sk->sk_write_queue);
1278 	cork->length += length;
1279 	if (((length > mtu) ||
1280 	     (skb && skb_is_gso(skb))) &&
1281 	    (sk->sk_protocol == IPPROTO_UDP) &&
1282 	    (rt->dst.dev->features & NETIF_F_UFO)) {
1283 		err = ip6_ufo_append_data(sk, getfrag, from, length,
1284 					  hh_len, fragheaderlen,
1285 					  transhdrlen, mtu, flags, rt);
1286 		if (err)
1287 			goto error;
1288 		return 0;
1289 	}
1290 
1291 	if (!skb)
1292 		goto alloc_new_skb;
1293 
1294 	while (length > 0) {
1295 		/* Check if the remaining data fits into current packet. */
1296 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1297 		if (copy < length)
1298 			copy = maxfraglen - skb->len;
1299 
1300 		if (copy <= 0) {
1301 			char *data;
1302 			unsigned int datalen;
1303 			unsigned int fraglen;
1304 			unsigned int fraggap;
1305 			unsigned int alloclen;
1306 alloc_new_skb:
1307 			/* There's no room in the current skb */
1308 			if (skb)
1309 				fraggap = skb->len - maxfraglen;
1310 			else
1311 				fraggap = 0;
1312 			/* update mtu and maxfraglen if necessary */
1313 			if (skb == NULL || skb_prev == NULL)
1314 				ip6_append_data_mtu(&mtu, &maxfraglen,
1315 						    fragheaderlen, skb, rt,
1316 						    np->pmtudisc >=
1317 						    IPV6_PMTUDISC_PROBE);
1318 
1319 			skb_prev = skb;
1320 
1321 			/*
1322 			 * If remaining data exceeds the mtu,
1323 			 * we know we need more fragment(s).
1324 			 */
1325 			datalen = length + fraggap;
1326 
1327 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1328 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1329 			if ((flags & MSG_MORE) &&
1330 			    !(rt->dst.dev->features&NETIF_F_SG))
1331 				alloclen = mtu;
1332 			else
1333 				alloclen = datalen + fragheaderlen;
1334 
1335 			alloclen += dst_exthdrlen;
1336 
1337 			if (datalen != length + fraggap) {
1338 				/*
1339 				 * this is not the last fragment, the trailer
1340 				 * space is regarded as data space.
1341 				 */
1342 				datalen += rt->dst.trailer_len;
1343 			}
1344 
1345 			alloclen += rt->dst.trailer_len;
1346 			fraglen = datalen + fragheaderlen;
1347 
1348 			/*
1349 			 * We just reserve space for fragment header.
1350 			 * Note: this may be overallocation if the message
1351 			 * (without MSG_MORE) fits into the MTU.
1352 			 */
1353 			alloclen += sizeof(struct frag_hdr);
1354 
1355 			if (transhdrlen) {
1356 				skb = sock_alloc_send_skb(sk,
1357 						alloclen + hh_len,
1358 						(flags & MSG_DONTWAIT), &err);
1359 			} else {
1360 				skb = NULL;
1361 				if (atomic_read(&sk->sk_wmem_alloc) <=
1362 				    2 * sk->sk_sndbuf)
1363 					skb = sock_wmalloc(sk,
1364 							   alloclen + hh_len, 1,
1365 							   sk->sk_allocation);
1366 				if (unlikely(skb == NULL))
1367 					err = -ENOBUFS;
1368 				else {
1369 					/* Only the initial fragment
1370 					 * is time stamped.
1371 					 */
1372 					tx_flags = 0;
1373 				}
1374 			}
1375 			if (skb == NULL)
1376 				goto error;
1377 			/*
1378 			 *	Fill in the control structures
1379 			 */
1380 			skb->protocol = htons(ETH_P_IPV6);
1381 			skb->ip_summed = CHECKSUM_NONE;
1382 			skb->csum = 0;
1383 			/* reserve for fragmentation and ipsec header */
1384 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1385 				    dst_exthdrlen);
1386 
1387 			if (sk->sk_type == SOCK_DGRAM)
1388 				skb_shinfo(skb)->tx_flags = tx_flags;
1389 
1390 			/*
1391 			 *	Find where to start putting bytes
1392 			 */
1393 			data = skb_put(skb, fraglen);
1394 			skb_set_network_header(skb, exthdrlen);
1395 			data += fragheaderlen;
1396 			skb->transport_header = (skb->network_header +
1397 						 fragheaderlen);
1398 			if (fraggap) {
1399 				skb->csum = skb_copy_and_csum_bits(
1400 					skb_prev, maxfraglen,
1401 					data + transhdrlen, fraggap, 0);
1402 				skb_prev->csum = csum_sub(skb_prev->csum,
1403 							  skb->csum);
1404 				data += fraggap;
1405 				pskb_trim_unique(skb_prev, maxfraglen);
1406 			}
1407 			copy = datalen - transhdrlen - fraggap;
1408 
1409 			if (copy < 0) {
1410 				err = -EINVAL;
1411 				kfree_skb(skb);
1412 				goto error;
1413 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1414 				err = -EFAULT;
1415 				kfree_skb(skb);
1416 				goto error;
1417 			}
1418 
1419 			offset += copy;
1420 			length -= datalen - fraggap;
1421 			transhdrlen = 0;
1422 			exthdrlen = 0;
1423 			dst_exthdrlen = 0;
1424 
1425 			/*
1426 			 * Put the packet on the pending queue
1427 			 */
1428 			__skb_queue_tail(&sk->sk_write_queue, skb);
1429 			continue;
1430 		}
1431 
1432 		if (copy > length)
1433 			copy = length;
1434 
1435 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1436 			unsigned int off;
1437 
1438 			off = skb->len;
1439 			if (getfrag(from, skb_put(skb, copy),
1440 						offset, copy, off, skb) < 0) {
1441 				__skb_trim(skb, off);
1442 				err = -EFAULT;
1443 				goto error;
1444 			}
1445 		} else {
1446 			int i = skb_shinfo(skb)->nr_frags;
1447 			struct page_frag *pfrag = sk_page_frag(sk);
1448 
1449 			err = -ENOMEM;
1450 			if (!sk_page_frag_refill(sk, pfrag))
1451 				goto error;
1452 
1453 			if (!skb_can_coalesce(skb, i, pfrag->page,
1454 					      pfrag->offset)) {
1455 				err = -EMSGSIZE;
1456 				if (i == MAX_SKB_FRAGS)
1457 					goto error;
1458 
1459 				__skb_fill_page_desc(skb, i, pfrag->page,
1460 						     pfrag->offset, 0);
1461 				skb_shinfo(skb)->nr_frags = ++i;
1462 				get_page(pfrag->page);
1463 			}
1464 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1465 			if (getfrag(from,
1466 				    page_address(pfrag->page) + pfrag->offset,
1467 				    offset, copy, skb->len, skb) < 0)
1468 				goto error_efault;
1469 
1470 			pfrag->offset += copy;
1471 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1472 			skb->len += copy;
1473 			skb->data_len += copy;
1474 			skb->truesize += copy;
1475 			atomic_add(copy, &sk->sk_wmem_alloc);
1476 		}
1477 		offset += copy;
1478 		length -= copy;
1479 	}
1480 
1481 	return 0;
1482 
1483 error_efault:
1484 	err = -EFAULT;
1485 error:
1486 	cork->length -= length;
1487 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1488 	return err;
1489 }
1490 EXPORT_SYMBOL_GPL(ip6_append_data);
1491 
1492 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1493 {
1494 	if (np->cork.opt) {
1495 		kfree(np->cork.opt->dst0opt);
1496 		kfree(np->cork.opt->dst1opt);
1497 		kfree(np->cork.opt->hopopt);
1498 		kfree(np->cork.opt->srcrt);
1499 		kfree(np->cork.opt);
1500 		np->cork.opt = NULL;
1501 	}
1502 
1503 	if (inet->cork.base.dst) {
1504 		dst_release(inet->cork.base.dst);
1505 		inet->cork.base.dst = NULL;
1506 		inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1507 	}
1508 	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1509 }
1510 
1511 int ip6_push_pending_frames(struct sock *sk)
1512 {
1513 	struct sk_buff *skb, *tmp_skb;
1514 	struct sk_buff **tail_skb;
1515 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1516 	struct inet_sock *inet = inet_sk(sk);
1517 	struct ipv6_pinfo *np = inet6_sk(sk);
1518 	struct net *net = sock_net(sk);
1519 	struct ipv6hdr *hdr;
1520 	struct ipv6_txoptions *opt = np->cork.opt;
1521 	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1522 	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1523 	unsigned char proto = fl6->flowi6_proto;
1524 	int err = 0;
1525 
1526 	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1527 		goto out;
1528 	tail_skb = &(skb_shinfo(skb)->frag_list);
1529 
1530 	/* move skb->data to ip header from ext header */
1531 	if (skb->data < skb_network_header(skb))
1532 		__skb_pull(skb, skb_network_offset(skb));
1533 	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1534 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1535 		*tail_skb = tmp_skb;
1536 		tail_skb = &(tmp_skb->next);
1537 		skb->len += tmp_skb->len;
1538 		skb->data_len += tmp_skb->len;
1539 		skb->truesize += tmp_skb->truesize;
1540 		tmp_skb->destructor = NULL;
1541 		tmp_skb->sk = NULL;
1542 	}
1543 
1544 	/* Allow local fragmentation. */
1545 	skb->local_df = ip6_sk_local_df(sk);
1546 
1547 	*final_dst = fl6->daddr;
1548 	__skb_pull(skb, skb_network_header_len(skb));
1549 	if (opt && opt->opt_flen)
1550 		ipv6_push_frag_opts(skb, opt, &proto);
1551 	if (opt && opt->opt_nflen)
1552 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1553 
1554 	skb_push(skb, sizeof(struct ipv6hdr));
1555 	skb_reset_network_header(skb);
1556 	hdr = ipv6_hdr(skb);
1557 
1558 	ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1559 	hdr->hop_limit = np->cork.hop_limit;
1560 	hdr->nexthdr = proto;
1561 	hdr->saddr = fl6->saddr;
1562 	hdr->daddr = *final_dst;
1563 
1564 	skb->priority = sk->sk_priority;
1565 	skb->mark = sk->sk_mark;
1566 
1567 	skb_dst_set(skb, dst_clone(&rt->dst));
1568 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1569 	if (proto == IPPROTO_ICMPV6) {
1570 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1571 
1572 		ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1573 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1574 	}
1575 
1576 	err = ip6_local_out(skb);
1577 	if (err) {
1578 		if (err > 0)
1579 			err = net_xmit_errno(err);
1580 		if (err)
1581 			goto error;
1582 	}
1583 
1584 out:
1585 	ip6_cork_release(inet, np);
1586 	return err;
1587 error:
1588 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1589 	goto out;
1590 }
1591 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1592 
1593 void ip6_flush_pending_frames(struct sock *sk)
1594 {
1595 	struct sk_buff *skb;
1596 
1597 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1598 		if (skb_dst(skb))
1599 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1600 				      IPSTATS_MIB_OUTDISCARDS);
1601 		kfree_skb(skb);
1602 	}
1603 
1604 	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1605 }
1606 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1607