xref: /openbmc/linux/net/ipv6/ip6_output.c (revision 92a2c6b2)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	:	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44 
45 #include <net/sock.h>
46 #include <net/snmp.h>
47 
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 
59 static int ip6_finish_output2(struct sk_buff *skb)
60 {
61 	struct dst_entry *dst = skb_dst(skb);
62 	struct net_device *dev = dst->dev;
63 	struct neighbour *neigh;
64 	struct in6_addr *nexthop;
65 	int ret;
66 
67 	skb->protocol = htons(ETH_P_IPV6);
68 	skb->dev = dev;
69 
70 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72 
73 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74 		    ((mroute6_socket(dev_net(dev), skb) &&
75 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 					 &ipv6_hdr(skb)->saddr))) {
78 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79 
80 			/* Do not check for IFF_ALLMULTI; multicast routing
81 			   is not supported in any case.
82 			 */
83 			if (newskb)
84 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 					newskb, NULL, newskb->dev,
86 					dev_loopback_xmit);
87 
88 			if (ipv6_hdr(skb)->hop_limit == 0) {
89 				IP6_INC_STATS(dev_net(dev), idev,
90 					      IPSTATS_MIB_OUTDISCARDS);
91 				kfree_skb(skb);
92 				return 0;
93 			}
94 		}
95 
96 		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
97 				skb->len);
98 
99 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 		    IPV6_ADDR_SCOPE_NODELOCAL &&
101 		    !(dev->flags & IFF_LOOPBACK)) {
102 			kfree_skb(skb);
103 			return 0;
104 		}
105 	}
106 
107 	rcu_read_lock_bh();
108 	nexthop = rt6_nexthop((struct rt6_info *)dst);
109 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 	if (unlikely(!neigh))
111 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 	if (!IS_ERR(neigh)) {
113 		ret = dst_neigh_output(dst, neigh, skb);
114 		rcu_read_unlock_bh();
115 		return ret;
116 	}
117 	rcu_read_unlock_bh();
118 
119 	IP6_INC_STATS(dev_net(dst->dev),
120 		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 	kfree_skb(skb);
122 	return -EINVAL;
123 }
124 
125 static int ip6_finish_output(struct sk_buff *skb)
126 {
127 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 	    dst_allfrag(skb_dst(skb)) ||
129 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 		return ip6_fragment(skb, ip6_finish_output2);
131 	else
132 		return ip6_finish_output2(skb);
133 }
134 
135 int ip6_output(struct sock *sk, struct sk_buff *skb)
136 {
137 	struct net_device *dev = skb_dst(skb)->dev;
138 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 	if (unlikely(idev->cnf.disable_ipv6)) {
140 		IP6_INC_STATS(dev_net(dev), idev,
141 			      IPSTATS_MIB_OUTDISCARDS);
142 		kfree_skb(skb);
143 		return 0;
144 	}
145 
146 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
147 			    ip6_finish_output,
148 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150 
151 /*
152  *	xmit an sk_buff (used by TCP, SCTP and DCCP)
153  */
154 
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156 	     struct ipv6_txoptions *opt, int tclass)
157 {
158 	struct net *net = sock_net(sk);
159 	struct ipv6_pinfo *np = inet6_sk(sk);
160 	struct in6_addr *first_hop = &fl6->daddr;
161 	struct dst_entry *dst = skb_dst(skb);
162 	struct ipv6hdr *hdr;
163 	u8  proto = fl6->flowi6_proto;
164 	int seg_len = skb->len;
165 	int hlimit = -1;
166 	u32 mtu;
167 
168 	if (opt) {
169 		unsigned int head_room;
170 
171 		/* First: exthdrs may take lots of space (~8K for now)
172 		   MAX_HEADER is not enough.
173 		 */
174 		head_room = opt->opt_nflen + opt->opt_flen;
175 		seg_len += head_room;
176 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
177 
178 		if (skb_headroom(skb) < head_room) {
179 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
180 			if (skb2 == NULL) {
181 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182 					      IPSTATS_MIB_OUTDISCARDS);
183 				kfree_skb(skb);
184 				return -ENOBUFS;
185 			}
186 			consume_skb(skb);
187 			skb = skb2;
188 			skb_set_owner_w(skb, sk);
189 		}
190 		if (opt->opt_flen)
191 			ipv6_push_frag_opts(skb, opt, &proto);
192 		if (opt->opt_nflen)
193 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
194 	}
195 
196 	skb_push(skb, sizeof(struct ipv6hdr));
197 	skb_reset_network_header(skb);
198 	hdr = ipv6_hdr(skb);
199 
200 	/*
201 	 *	Fill in the IPv6 header
202 	 */
203 	if (np)
204 		hlimit = np->hop_limit;
205 	if (hlimit < 0)
206 		hlimit = ip6_dst_hoplimit(dst);
207 
208 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
209 						     np->autoflowlabel));
210 
211 	hdr->payload_len = htons(seg_len);
212 	hdr->nexthdr = proto;
213 	hdr->hop_limit = hlimit;
214 
215 	hdr->saddr = fl6->saddr;
216 	hdr->daddr = *first_hop;
217 
218 	skb->protocol = htons(ETH_P_IPV6);
219 	skb->priority = sk->sk_priority;
220 	skb->mark = sk->sk_mark;
221 
222 	mtu = dst_mtu(dst);
223 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
224 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
225 			      IPSTATS_MIB_OUT, skb->len);
226 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
227 			       dst->dev, dst_output);
228 	}
229 
230 	skb->dev = dst->dev;
231 	ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
232 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
233 	kfree_skb(skb);
234 	return -EMSGSIZE;
235 }
236 EXPORT_SYMBOL(ip6_xmit);
237 
238 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
239 {
240 	struct ip6_ra_chain *ra;
241 	struct sock *last = NULL;
242 
243 	read_lock(&ip6_ra_lock);
244 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
245 		struct sock *sk = ra->sk;
246 		if (sk && ra->sel == sel &&
247 		    (!sk->sk_bound_dev_if ||
248 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
249 			if (last) {
250 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
251 				if (skb2)
252 					rawv6_rcv(last, skb2);
253 			}
254 			last = sk;
255 		}
256 	}
257 
258 	if (last) {
259 		rawv6_rcv(last, skb);
260 		read_unlock(&ip6_ra_lock);
261 		return 1;
262 	}
263 	read_unlock(&ip6_ra_lock);
264 	return 0;
265 }
266 
267 static int ip6_forward_proxy_check(struct sk_buff *skb)
268 {
269 	struct ipv6hdr *hdr = ipv6_hdr(skb);
270 	u8 nexthdr = hdr->nexthdr;
271 	__be16 frag_off;
272 	int offset;
273 
274 	if (ipv6_ext_hdr(nexthdr)) {
275 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
276 		if (offset < 0)
277 			return 0;
278 	} else
279 		offset = sizeof(struct ipv6hdr);
280 
281 	if (nexthdr == IPPROTO_ICMPV6) {
282 		struct icmp6hdr *icmp6;
283 
284 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
285 					 offset + 1 - skb->data)))
286 			return 0;
287 
288 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
289 
290 		switch (icmp6->icmp6_type) {
291 		case NDISC_ROUTER_SOLICITATION:
292 		case NDISC_ROUTER_ADVERTISEMENT:
293 		case NDISC_NEIGHBOUR_SOLICITATION:
294 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
295 		case NDISC_REDIRECT:
296 			/* For reaction involving unicast neighbor discovery
297 			 * message destined to the proxied address, pass it to
298 			 * input function.
299 			 */
300 			return 1;
301 		default:
302 			break;
303 		}
304 	}
305 
306 	/*
307 	 * The proxying router can't forward traffic sent to a link-local
308 	 * address, so signal the sender and discard the packet. This
309 	 * behavior is clarified by the MIPv6 specification.
310 	 */
311 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
312 		dst_link_failure(skb);
313 		return -1;
314 	}
315 
316 	return 0;
317 }
318 
319 static inline int ip6_forward_finish(struct sk_buff *skb)
320 {
321 	return dst_output(skb);
322 }
323 
324 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
325 {
326 	unsigned int mtu;
327 	struct inet6_dev *idev;
328 
329 	if (dst_metric_locked(dst, RTAX_MTU)) {
330 		mtu = dst_metric_raw(dst, RTAX_MTU);
331 		if (mtu)
332 			return mtu;
333 	}
334 
335 	mtu = IPV6_MIN_MTU;
336 	rcu_read_lock();
337 	idev = __in6_dev_get(dst->dev);
338 	if (idev)
339 		mtu = idev->cnf.mtu6;
340 	rcu_read_unlock();
341 
342 	return mtu;
343 }
344 
345 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346 {
347 	if (skb->len <= mtu)
348 		return false;
349 
350 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
351 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
352 		return true;
353 
354 	if (skb->ignore_df)
355 		return false;
356 
357 	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
358 		return false;
359 
360 	return true;
361 }
362 
363 int ip6_forward(struct sk_buff *skb)
364 {
365 	struct dst_entry *dst = skb_dst(skb);
366 	struct ipv6hdr *hdr = ipv6_hdr(skb);
367 	struct inet6_skb_parm *opt = IP6CB(skb);
368 	struct net *net = dev_net(dst->dev);
369 	u32 mtu;
370 
371 	if (net->ipv6.devconf_all->forwarding == 0)
372 		goto error;
373 
374 	if (skb->pkt_type != PACKET_HOST)
375 		goto drop;
376 
377 	if (skb_warn_if_lro(skb))
378 		goto drop;
379 
380 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
381 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
382 				 IPSTATS_MIB_INDISCARDS);
383 		goto drop;
384 	}
385 
386 	skb_forward_csum(skb);
387 
388 	/*
389 	 *	We DO NOT make any processing on
390 	 *	RA packets, pushing them to user level AS IS
391 	 *	without ane WARRANTY that application will be able
392 	 *	to interpret them. The reason is that we
393 	 *	cannot make anything clever here.
394 	 *
395 	 *	We are not end-node, so that if packet contains
396 	 *	AH/ESP, we cannot make anything.
397 	 *	Defragmentation also would be mistake, RA packets
398 	 *	cannot be fragmented, because there is no warranty
399 	 *	that different fragments will go along one path. --ANK
400 	 */
401 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
402 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
403 			return 0;
404 	}
405 
406 	/*
407 	 *	check and decrement ttl
408 	 */
409 	if (hdr->hop_limit <= 1) {
410 		/* Force OUTPUT device used as source address */
411 		skb->dev = dst->dev;
412 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
413 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
414 				 IPSTATS_MIB_INHDRERRORS);
415 
416 		kfree_skb(skb);
417 		return -ETIMEDOUT;
418 	}
419 
420 	/* XXX: idev->cnf.proxy_ndp? */
421 	if (net->ipv6.devconf_all->proxy_ndp &&
422 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
423 		int proxied = ip6_forward_proxy_check(skb);
424 		if (proxied > 0)
425 			return ip6_input(skb);
426 		else if (proxied < 0) {
427 			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
428 					 IPSTATS_MIB_INDISCARDS);
429 			goto drop;
430 		}
431 	}
432 
433 	if (!xfrm6_route_forward(skb)) {
434 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
435 				 IPSTATS_MIB_INDISCARDS);
436 		goto drop;
437 	}
438 	dst = skb_dst(skb);
439 
440 	/* IPv6 specs say nothing about it, but it is clear that we cannot
441 	   send redirects to source routed frames.
442 	   We don't send redirects to frames decapsulated from IPsec.
443 	 */
444 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
445 		struct in6_addr *target = NULL;
446 		struct inet_peer *peer;
447 		struct rt6_info *rt;
448 
449 		/*
450 		 *	incoming and outgoing devices are the same
451 		 *	send a redirect.
452 		 */
453 
454 		rt = (struct rt6_info *) dst;
455 		if (rt->rt6i_flags & RTF_GATEWAY)
456 			target = &rt->rt6i_gateway;
457 		else
458 			target = &hdr->daddr;
459 
460 		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
461 
462 		/* Limit redirects both by destination (here)
463 		   and by source (inside ndisc_send_redirect)
464 		 */
465 		if (inet_peer_xrlim_allow(peer, 1*HZ))
466 			ndisc_send_redirect(skb, target);
467 		if (peer)
468 			inet_putpeer(peer);
469 	} else {
470 		int addrtype = ipv6_addr_type(&hdr->saddr);
471 
472 		/* This check is security critical. */
473 		if (addrtype == IPV6_ADDR_ANY ||
474 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
475 			goto error;
476 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
477 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
478 				    ICMPV6_NOT_NEIGHBOUR, 0);
479 			goto error;
480 		}
481 	}
482 
483 	mtu = ip6_dst_mtu_forward(dst);
484 	if (mtu < IPV6_MIN_MTU)
485 		mtu = IPV6_MIN_MTU;
486 
487 	if (ip6_pkt_too_big(skb, mtu)) {
488 		/* Again, force OUTPUT device used as source address */
489 		skb->dev = dst->dev;
490 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
491 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
492 				 IPSTATS_MIB_INTOOBIGERRORS);
493 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
494 				 IPSTATS_MIB_FRAGFAILS);
495 		kfree_skb(skb);
496 		return -EMSGSIZE;
497 	}
498 
499 	if (skb_cow(skb, dst->dev->hard_header_len)) {
500 		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
501 				 IPSTATS_MIB_OUTDISCARDS);
502 		goto drop;
503 	}
504 
505 	hdr = ipv6_hdr(skb);
506 
507 	/* Mangling hops number delayed to point after skb COW */
508 
509 	hdr->hop_limit--;
510 
511 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
512 	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
513 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
514 		       ip6_forward_finish);
515 
516 error:
517 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
518 drop:
519 	kfree_skb(skb);
520 	return -EINVAL;
521 }
522 
523 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
524 {
525 	to->pkt_type = from->pkt_type;
526 	to->priority = from->priority;
527 	to->protocol = from->protocol;
528 	skb_dst_drop(to);
529 	skb_dst_set(to, dst_clone(skb_dst(from)));
530 	to->dev = from->dev;
531 	to->mark = from->mark;
532 
533 #ifdef CONFIG_NET_SCHED
534 	to->tc_index = from->tc_index;
535 #endif
536 	nf_copy(to, from);
537 	skb_copy_secmark(to, from);
538 }
539 
540 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
541 {
542 	struct sk_buff *frag;
543 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
544 	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
545 	struct ipv6hdr *tmp_hdr;
546 	struct frag_hdr *fh;
547 	unsigned int mtu, hlen, left, len;
548 	int hroom, troom;
549 	__be32 frag_id = 0;
550 	int ptr, offset = 0, err = 0;
551 	u8 *prevhdr, nexthdr = 0;
552 	struct net *net = dev_net(skb_dst(skb)->dev);
553 
554 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
555 	nexthdr = *prevhdr;
556 
557 	mtu = ip6_skb_dst_mtu(skb);
558 
559 	/* We must not fragment if the socket is set to force MTU discovery
560 	 * or if the skb it not generated by a local socket.
561 	 */
562 	if (unlikely(!skb->ignore_df && skb->len > mtu) ||
563 		     (IP6CB(skb)->frag_max_size &&
564 		      IP6CB(skb)->frag_max_size > mtu)) {
565 		if (skb->sk && dst_allfrag(skb_dst(skb)))
566 			sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
567 
568 		skb->dev = skb_dst(skb)->dev;
569 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
570 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
571 			      IPSTATS_MIB_FRAGFAILS);
572 		kfree_skb(skb);
573 		return -EMSGSIZE;
574 	}
575 
576 	if (np && np->frag_size < mtu) {
577 		if (np->frag_size)
578 			mtu = np->frag_size;
579 	}
580 	mtu -= hlen + sizeof(struct frag_hdr);
581 
582 	if (skb_has_frag_list(skb)) {
583 		int first_len = skb_pagelen(skb);
584 		struct sk_buff *frag2;
585 
586 		if (first_len - hlen > mtu ||
587 		    ((first_len - hlen) & 7) ||
588 		    skb_cloned(skb))
589 			goto slow_path;
590 
591 		skb_walk_frags(skb, frag) {
592 			/* Correct geometry. */
593 			if (frag->len > mtu ||
594 			    ((frag->len & 7) && frag->next) ||
595 			    skb_headroom(frag) < hlen)
596 				goto slow_path_clean;
597 
598 			/* Partially cloned skb? */
599 			if (skb_shared(frag))
600 				goto slow_path_clean;
601 
602 			BUG_ON(frag->sk);
603 			if (skb->sk) {
604 				frag->sk = skb->sk;
605 				frag->destructor = sock_wfree;
606 			}
607 			skb->truesize -= frag->truesize;
608 		}
609 
610 		err = 0;
611 		offset = 0;
612 		frag = skb_shinfo(skb)->frag_list;
613 		skb_frag_list_init(skb);
614 		/* BUILD HEADER */
615 
616 		*prevhdr = NEXTHDR_FRAGMENT;
617 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
618 		if (!tmp_hdr) {
619 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
620 				      IPSTATS_MIB_FRAGFAILS);
621 			return -ENOMEM;
622 		}
623 
624 		__skb_pull(skb, hlen);
625 		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
626 		__skb_push(skb, hlen);
627 		skb_reset_network_header(skb);
628 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
629 
630 		ipv6_select_ident(fh, rt);
631 		fh->nexthdr = nexthdr;
632 		fh->reserved = 0;
633 		fh->frag_off = htons(IP6_MF);
634 		frag_id = fh->identification;
635 
636 		first_len = skb_pagelen(skb);
637 		skb->data_len = first_len - skb_headlen(skb);
638 		skb->len = first_len;
639 		ipv6_hdr(skb)->payload_len = htons(first_len -
640 						   sizeof(struct ipv6hdr));
641 
642 		dst_hold(&rt->dst);
643 
644 		for (;;) {
645 			/* Prepare header of the next frame,
646 			 * before previous one went down. */
647 			if (frag) {
648 				frag->ip_summed = CHECKSUM_NONE;
649 				skb_reset_transport_header(frag);
650 				fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
651 				__skb_push(frag, hlen);
652 				skb_reset_network_header(frag);
653 				memcpy(skb_network_header(frag), tmp_hdr,
654 				       hlen);
655 				offset += skb->len - hlen - sizeof(struct frag_hdr);
656 				fh->nexthdr = nexthdr;
657 				fh->reserved = 0;
658 				fh->frag_off = htons(offset);
659 				if (frag->next != NULL)
660 					fh->frag_off |= htons(IP6_MF);
661 				fh->identification = frag_id;
662 				ipv6_hdr(frag)->payload_len =
663 						htons(frag->len -
664 						      sizeof(struct ipv6hdr));
665 				ip6_copy_metadata(frag, skb);
666 			}
667 
668 			err = output(skb);
669 			if (!err)
670 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
671 					      IPSTATS_MIB_FRAGCREATES);
672 
673 			if (err || !frag)
674 				break;
675 
676 			skb = frag;
677 			frag = skb->next;
678 			skb->next = NULL;
679 		}
680 
681 		kfree(tmp_hdr);
682 
683 		if (err == 0) {
684 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
685 				      IPSTATS_MIB_FRAGOKS);
686 			ip6_rt_put(rt);
687 			return 0;
688 		}
689 
690 		kfree_skb_list(frag);
691 
692 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
693 			      IPSTATS_MIB_FRAGFAILS);
694 		ip6_rt_put(rt);
695 		return err;
696 
697 slow_path_clean:
698 		skb_walk_frags(skb, frag2) {
699 			if (frag2 == frag)
700 				break;
701 			frag2->sk = NULL;
702 			frag2->destructor = NULL;
703 			skb->truesize += frag2->truesize;
704 		}
705 	}
706 
707 slow_path:
708 	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
709 	    skb_checksum_help(skb))
710 		goto fail;
711 
712 	left = skb->len - hlen;		/* Space per frame */
713 	ptr = hlen;			/* Where to start from */
714 
715 	/*
716 	 *	Fragment the datagram.
717 	 */
718 
719 	*prevhdr = NEXTHDR_FRAGMENT;
720 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
721 	troom = rt->dst.dev->needed_tailroom;
722 
723 	/*
724 	 *	Keep copying data until we run out.
725 	 */
726 	while (left > 0)	{
727 		len = left;
728 		/* IF: it doesn't fit, use 'mtu' - the data space left */
729 		if (len > mtu)
730 			len = mtu;
731 		/* IF: we are not sending up to and including the packet end
732 		   then align the next start on an eight byte boundary */
733 		if (len < left)	{
734 			len &= ~7;
735 		}
736 
737 		/* Allocate buffer */
738 		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
739 				 hroom + troom, GFP_ATOMIC);
740 		if (!frag) {
741 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
742 				      IPSTATS_MIB_FRAGFAILS);
743 			err = -ENOMEM;
744 			goto fail;
745 		}
746 
747 		/*
748 		 *	Set up data on packet
749 		 */
750 
751 		ip6_copy_metadata(frag, skb);
752 		skb_reserve(frag, hroom);
753 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
754 		skb_reset_network_header(frag);
755 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
756 		frag->transport_header = (frag->network_header + hlen +
757 					  sizeof(struct frag_hdr));
758 
759 		/*
760 		 *	Charge the memory for the fragment to any owner
761 		 *	it might possess
762 		 */
763 		if (skb->sk)
764 			skb_set_owner_w(frag, skb->sk);
765 
766 		/*
767 		 *	Copy the packet header into the new buffer.
768 		 */
769 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
770 
771 		/*
772 		 *	Build fragment header.
773 		 */
774 		fh->nexthdr = nexthdr;
775 		fh->reserved = 0;
776 		if (!frag_id) {
777 			ipv6_select_ident(fh, rt);
778 			frag_id = fh->identification;
779 		} else
780 			fh->identification = frag_id;
781 
782 		/*
783 		 *	Copy a block of the IP datagram.
784 		 */
785 		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
786 				     len));
787 		left -= len;
788 
789 		fh->frag_off = htons(offset);
790 		if (left > 0)
791 			fh->frag_off |= htons(IP6_MF);
792 		ipv6_hdr(frag)->payload_len = htons(frag->len -
793 						    sizeof(struct ipv6hdr));
794 
795 		ptr += len;
796 		offset += len;
797 
798 		/*
799 		 *	Put this fragment into the sending queue.
800 		 */
801 		err = output(frag);
802 		if (err)
803 			goto fail;
804 
805 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
806 			      IPSTATS_MIB_FRAGCREATES);
807 	}
808 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
809 		      IPSTATS_MIB_FRAGOKS);
810 	consume_skb(skb);
811 	return err;
812 
813 fail:
814 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
815 		      IPSTATS_MIB_FRAGFAILS);
816 	kfree_skb(skb);
817 	return err;
818 }
819 
820 static inline int ip6_rt_check(const struct rt6key *rt_key,
821 			       const struct in6_addr *fl_addr,
822 			       const struct in6_addr *addr_cache)
823 {
824 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
825 		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
826 }
827 
828 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
829 					  struct dst_entry *dst,
830 					  const struct flowi6 *fl6)
831 {
832 	struct ipv6_pinfo *np = inet6_sk(sk);
833 	struct rt6_info *rt;
834 
835 	if (!dst)
836 		goto out;
837 
838 	if (dst->ops->family != AF_INET6) {
839 		dst_release(dst);
840 		return NULL;
841 	}
842 
843 	rt = (struct rt6_info *)dst;
844 	/* Yes, checking route validity in not connected
845 	 * case is not very simple. Take into account,
846 	 * that we do not support routing by source, TOS,
847 	 * and MSG_DONTROUTE		--ANK (980726)
848 	 *
849 	 * 1. ip6_rt_check(): If route was host route,
850 	 *    check that cached destination is current.
851 	 *    If it is network route, we still may
852 	 *    check its validity using saved pointer
853 	 *    to the last used address: daddr_cache.
854 	 *    We do not want to save whole address now,
855 	 *    (because main consumer of this service
856 	 *    is tcp, which has not this problem),
857 	 *    so that the last trick works only on connected
858 	 *    sockets.
859 	 * 2. oif also should be the same.
860 	 */
861 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
862 #ifdef CONFIG_IPV6_SUBTREES
863 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
864 #endif
865 	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
866 		dst_release(dst);
867 		dst = NULL;
868 	}
869 
870 out:
871 	return dst;
872 }
873 
874 static int ip6_dst_lookup_tail(struct sock *sk,
875 			       struct dst_entry **dst, struct flowi6 *fl6)
876 {
877 	struct net *net = sock_net(sk);
878 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
879 	struct neighbour *n;
880 	struct rt6_info *rt;
881 #endif
882 	int err;
883 
884 	if (*dst == NULL)
885 		*dst = ip6_route_output(net, sk, fl6);
886 
887 	err = (*dst)->error;
888 	if (err)
889 		goto out_err_release;
890 
891 	if (ipv6_addr_any(&fl6->saddr)) {
892 		struct rt6_info *rt = (struct rt6_info *) *dst;
893 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
894 					  sk ? inet6_sk(sk)->srcprefs : 0,
895 					  &fl6->saddr);
896 		if (err)
897 			goto out_err_release;
898 	}
899 
900 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
901 	/*
902 	 * Here if the dst entry we've looked up
903 	 * has a neighbour entry that is in the INCOMPLETE
904 	 * state and the src address from the flow is
905 	 * marked as OPTIMISTIC, we release the found
906 	 * dst entry and replace it instead with the
907 	 * dst entry of the nexthop router
908 	 */
909 	rt = (struct rt6_info *) *dst;
910 	rcu_read_lock_bh();
911 	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
912 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
913 	rcu_read_unlock_bh();
914 
915 	if (err) {
916 		struct inet6_ifaddr *ifp;
917 		struct flowi6 fl_gw6;
918 		int redirect;
919 
920 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
921 				      (*dst)->dev, 1);
922 
923 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
924 		if (ifp)
925 			in6_ifa_put(ifp);
926 
927 		if (redirect) {
928 			/*
929 			 * We need to get the dst entry for the
930 			 * default router instead
931 			 */
932 			dst_release(*dst);
933 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
934 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
935 			*dst = ip6_route_output(net, sk, &fl_gw6);
936 			err = (*dst)->error;
937 			if (err)
938 				goto out_err_release;
939 		}
940 	}
941 #endif
942 
943 	return 0;
944 
945 out_err_release:
946 	if (err == -ENETUNREACH)
947 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
948 	dst_release(*dst);
949 	*dst = NULL;
950 	return err;
951 }
952 
953 /**
954  *	ip6_dst_lookup - perform route lookup on flow
955  *	@sk: socket which provides route info
956  *	@dst: pointer to dst_entry * for result
957  *	@fl6: flow to lookup
958  *
959  *	This function performs a route lookup on the given flow.
960  *
961  *	It returns zero on success, or a standard errno code on error.
962  */
963 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
964 {
965 	*dst = NULL;
966 	return ip6_dst_lookup_tail(sk, dst, fl6);
967 }
968 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
969 
970 /**
971  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
972  *	@sk: socket which provides route info
973  *	@fl6: flow to lookup
974  *	@final_dst: final destination address for ipsec lookup
975  *
976  *	This function performs a route lookup on the given flow.
977  *
978  *	It returns a valid dst pointer on success, or a pointer encoded
979  *	error code.
980  */
981 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
982 				      const struct in6_addr *final_dst)
983 {
984 	struct dst_entry *dst = NULL;
985 	int err;
986 
987 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
988 	if (err)
989 		return ERR_PTR(err);
990 	if (final_dst)
991 		fl6->daddr = *final_dst;
992 
993 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
994 }
995 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
996 
997 /**
998  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
999  *	@sk: socket which provides the dst cache and route info
1000  *	@fl6: flow to lookup
1001  *	@final_dst: final destination address for ipsec lookup
1002  *
1003  *	This function performs a route lookup on the given flow with the
1004  *	possibility of using the cached route in the socket if it is valid.
1005  *	It will take the socket dst lock when operating on the dst cache.
1006  *	As a result, this function can only be used in process context.
1007  *
1008  *	It returns a valid dst pointer on success, or a pointer encoded
1009  *	error code.
1010  */
1011 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1012 					 const struct in6_addr *final_dst)
1013 {
1014 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1015 	int err;
1016 
1017 	dst = ip6_sk_dst_check(sk, dst, fl6);
1018 
1019 	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1020 	if (err)
1021 		return ERR_PTR(err);
1022 	if (final_dst)
1023 		fl6->daddr = *final_dst;
1024 
1025 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1026 }
1027 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1028 
1029 static inline int ip6_ufo_append_data(struct sock *sk,
1030 			struct sk_buff_head *queue,
1031 			int getfrag(void *from, char *to, int offset, int len,
1032 			int odd, struct sk_buff *skb),
1033 			void *from, int length, int hh_len, int fragheaderlen,
1034 			int transhdrlen, int mtu, unsigned int flags,
1035 			struct rt6_info *rt)
1036 
1037 {
1038 	struct sk_buff *skb;
1039 	struct frag_hdr fhdr;
1040 	int err;
1041 
1042 	/* There is support for UDP large send offload by network
1043 	 * device, so create one single skb packet containing complete
1044 	 * udp datagram
1045 	 */
1046 	skb = skb_peek_tail(queue);
1047 	if (skb == NULL) {
1048 		skb = sock_alloc_send_skb(sk,
1049 			hh_len + fragheaderlen + transhdrlen + 20,
1050 			(flags & MSG_DONTWAIT), &err);
1051 		if (skb == NULL)
1052 			return err;
1053 
1054 		/* reserve space for Hardware header */
1055 		skb_reserve(skb, hh_len);
1056 
1057 		/* create space for UDP/IP header */
1058 		skb_put(skb, fragheaderlen + transhdrlen);
1059 
1060 		/* initialize network header pointer */
1061 		skb_reset_network_header(skb);
1062 
1063 		/* initialize protocol header pointer */
1064 		skb->transport_header = skb->network_header + fragheaderlen;
1065 
1066 		skb->protocol = htons(ETH_P_IPV6);
1067 		skb->csum = 0;
1068 
1069 		__skb_queue_tail(queue, skb);
1070 	} else if (skb_is_gso(skb)) {
1071 		goto append;
1072 	}
1073 
1074 	skb->ip_summed = CHECKSUM_PARTIAL;
1075 	/* Specify the length of each IPv6 datagram fragment.
1076 	 * It has to be a multiple of 8.
1077 	 */
1078 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1079 				     sizeof(struct frag_hdr)) & ~7;
1080 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1081 	ipv6_select_ident(&fhdr, rt);
1082 	skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1083 
1084 append:
1085 	return skb_append_datato_frags(sk, skb, getfrag, from,
1086 				       (length - transhdrlen));
1087 }
1088 
1089 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1090 					       gfp_t gfp)
1091 {
1092 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1093 }
1094 
1095 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1096 						gfp_t gfp)
1097 {
1098 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1099 }
1100 
1101 static void ip6_append_data_mtu(unsigned int *mtu,
1102 				int *maxfraglen,
1103 				unsigned int fragheaderlen,
1104 				struct sk_buff *skb,
1105 				struct rt6_info *rt,
1106 				unsigned int orig_mtu)
1107 {
1108 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1109 		if (skb == NULL) {
1110 			/* first fragment, reserve header_len */
1111 			*mtu = orig_mtu - rt->dst.header_len;
1112 
1113 		} else {
1114 			/*
1115 			 * this fragment is not first, the headers
1116 			 * space is regarded as data space.
1117 			 */
1118 			*mtu = orig_mtu;
1119 		}
1120 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1121 			      + fragheaderlen - sizeof(struct frag_hdr);
1122 	}
1123 }
1124 
1125 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1126 			  struct inet6_cork *v6_cork,
1127 			  int hlimit, int tclass, struct ipv6_txoptions *opt,
1128 			  struct rt6_info *rt, struct flowi6 *fl6)
1129 {
1130 	struct ipv6_pinfo *np = inet6_sk(sk);
1131 	unsigned int mtu;
1132 
1133 	/*
1134 	 * setup for corking
1135 	 */
1136 	if (opt) {
1137 		if (WARN_ON(v6_cork->opt))
1138 			return -EINVAL;
1139 
1140 		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1141 		if (unlikely(v6_cork->opt == NULL))
1142 			return -ENOBUFS;
1143 
1144 		v6_cork->opt->tot_len = opt->tot_len;
1145 		v6_cork->opt->opt_flen = opt->opt_flen;
1146 		v6_cork->opt->opt_nflen = opt->opt_nflen;
1147 
1148 		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1149 						    sk->sk_allocation);
1150 		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1151 			return -ENOBUFS;
1152 
1153 		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1154 						    sk->sk_allocation);
1155 		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1156 			return -ENOBUFS;
1157 
1158 		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1159 						   sk->sk_allocation);
1160 		if (opt->hopopt && !v6_cork->opt->hopopt)
1161 			return -ENOBUFS;
1162 
1163 		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1164 						    sk->sk_allocation);
1165 		if (opt->srcrt && !v6_cork->opt->srcrt)
1166 			return -ENOBUFS;
1167 
1168 		/* need source address above miyazawa*/
1169 	}
1170 	dst_hold(&rt->dst);
1171 	cork->base.dst = &rt->dst;
1172 	cork->fl.u.ip6 = *fl6;
1173 	v6_cork->hop_limit = hlimit;
1174 	v6_cork->tclass = tclass;
1175 	if (rt->dst.flags & DST_XFRM_TUNNEL)
1176 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1177 		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1178 	else
1179 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1180 		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1181 	if (np->frag_size < mtu) {
1182 		if (np->frag_size)
1183 			mtu = np->frag_size;
1184 	}
1185 	cork->base.fragsize = mtu;
1186 	if (dst_allfrag(rt->dst.path))
1187 		cork->base.flags |= IPCORK_ALLFRAG;
1188 	cork->base.length = 0;
1189 
1190 	return 0;
1191 }
1192 
1193 static int __ip6_append_data(struct sock *sk,
1194 			     struct flowi6 *fl6,
1195 			     struct sk_buff_head *queue,
1196 			     struct inet_cork *cork,
1197 			     struct inet6_cork *v6_cork,
1198 			     struct page_frag *pfrag,
1199 			     int getfrag(void *from, char *to, int offset,
1200 					 int len, int odd, struct sk_buff *skb),
1201 			     void *from, int length, int transhdrlen,
1202 			     unsigned int flags, int dontfrag)
1203 {
1204 	struct sk_buff *skb, *skb_prev = NULL;
1205 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1206 	int exthdrlen = 0;
1207 	int dst_exthdrlen = 0;
1208 	int hh_len;
1209 	int copy;
1210 	int err;
1211 	int offset = 0;
1212 	__u8 tx_flags = 0;
1213 	u32 tskey = 0;
1214 	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1215 	struct ipv6_txoptions *opt = v6_cork->opt;
1216 	int csummode = CHECKSUM_NONE;
1217 
1218 	skb = skb_peek_tail(queue);
1219 	if (!skb) {
1220 		exthdrlen = opt ? opt->opt_flen : 0;
1221 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1222 	}
1223 
1224 	mtu = cork->fragsize;
1225 	orig_mtu = mtu;
1226 
1227 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1228 
1229 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1230 			(opt ? opt->opt_nflen : 0);
1231 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1232 		     sizeof(struct frag_hdr);
1233 
1234 	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1235 		unsigned int maxnonfragsize, headersize;
1236 
1237 		headersize = sizeof(struct ipv6hdr) +
1238 			     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1239 			     (dst_allfrag(&rt->dst) ?
1240 			      sizeof(struct frag_hdr) : 0) +
1241 			     rt->rt6i_nfheader_len;
1242 
1243 		if (ip6_sk_ignore_df(sk))
1244 			maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1245 		else
1246 			maxnonfragsize = mtu;
1247 
1248 		/* dontfrag active */
1249 		if ((cork->length + length > mtu - headersize) && dontfrag &&
1250 		    (sk->sk_protocol == IPPROTO_UDP ||
1251 		     sk->sk_protocol == IPPROTO_RAW)) {
1252 			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1253 						   sizeof(struct ipv6hdr));
1254 			goto emsgsize;
1255 		}
1256 
1257 		if (cork->length + length > maxnonfragsize - headersize) {
1258 emsgsize:
1259 			ipv6_local_error(sk, EMSGSIZE, fl6,
1260 					 mtu - headersize +
1261 					 sizeof(struct ipv6hdr));
1262 			return -EMSGSIZE;
1263 		}
1264 	}
1265 
1266 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1267 		sock_tx_timestamp(sk, &tx_flags);
1268 		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1269 		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1270 			tskey = sk->sk_tskey++;
1271 	}
1272 
1273 	/* If this is the first and only packet and device
1274 	 * supports checksum offloading, let's use it.
1275 	 */
1276 	if (!skb && sk->sk_protocol == IPPROTO_UDP &&
1277 	    length + fragheaderlen < mtu &&
1278 	    rt->dst.dev->features & NETIF_F_V6_CSUM &&
1279 	    !exthdrlen)
1280 		csummode = CHECKSUM_PARTIAL;
1281 	/*
1282 	 * Let's try using as much space as possible.
1283 	 * Use MTU if total length of the message fits into the MTU.
1284 	 * Otherwise, we need to reserve fragment header and
1285 	 * fragment alignment (= 8-15 octects, in total).
1286 	 *
1287 	 * Note that we may need to "move" the data from the tail of
1288 	 * of the buffer to the new fragment when we split
1289 	 * the message.
1290 	 *
1291 	 * FIXME: It may be fragmented into multiple chunks
1292 	 *        at once if non-fragmentable extension headers
1293 	 *        are too large.
1294 	 * --yoshfuji
1295 	 */
1296 
1297 	cork->length += length;
1298 	if (((length > mtu) ||
1299 	     (skb && skb_is_gso(skb))) &&
1300 	    (sk->sk_protocol == IPPROTO_UDP) &&
1301 	    (rt->dst.dev->features & NETIF_F_UFO)) {
1302 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1303 					  hh_len, fragheaderlen,
1304 					  transhdrlen, mtu, flags, rt);
1305 		if (err)
1306 			goto error;
1307 		return 0;
1308 	}
1309 
1310 	if (!skb)
1311 		goto alloc_new_skb;
1312 
1313 	while (length > 0) {
1314 		/* Check if the remaining data fits into current packet. */
1315 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1316 		if (copy < length)
1317 			copy = maxfraglen - skb->len;
1318 
1319 		if (copy <= 0) {
1320 			char *data;
1321 			unsigned int datalen;
1322 			unsigned int fraglen;
1323 			unsigned int fraggap;
1324 			unsigned int alloclen;
1325 alloc_new_skb:
1326 			/* There's no room in the current skb */
1327 			if (skb)
1328 				fraggap = skb->len - maxfraglen;
1329 			else
1330 				fraggap = 0;
1331 			/* update mtu and maxfraglen if necessary */
1332 			if (skb == NULL || skb_prev == NULL)
1333 				ip6_append_data_mtu(&mtu, &maxfraglen,
1334 						    fragheaderlen, skb, rt,
1335 						    orig_mtu);
1336 
1337 			skb_prev = skb;
1338 
1339 			/*
1340 			 * If remaining data exceeds the mtu,
1341 			 * we know we need more fragment(s).
1342 			 */
1343 			datalen = length + fraggap;
1344 
1345 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1346 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1347 			if ((flags & MSG_MORE) &&
1348 			    !(rt->dst.dev->features&NETIF_F_SG))
1349 				alloclen = mtu;
1350 			else
1351 				alloclen = datalen + fragheaderlen;
1352 
1353 			alloclen += dst_exthdrlen;
1354 
1355 			if (datalen != length + fraggap) {
1356 				/*
1357 				 * this is not the last fragment, the trailer
1358 				 * space is regarded as data space.
1359 				 */
1360 				datalen += rt->dst.trailer_len;
1361 			}
1362 
1363 			alloclen += rt->dst.trailer_len;
1364 			fraglen = datalen + fragheaderlen;
1365 
1366 			/*
1367 			 * We just reserve space for fragment header.
1368 			 * Note: this may be overallocation if the message
1369 			 * (without MSG_MORE) fits into the MTU.
1370 			 */
1371 			alloclen += sizeof(struct frag_hdr);
1372 
1373 			if (transhdrlen) {
1374 				skb = sock_alloc_send_skb(sk,
1375 						alloclen + hh_len,
1376 						(flags & MSG_DONTWAIT), &err);
1377 			} else {
1378 				skb = NULL;
1379 				if (atomic_read(&sk->sk_wmem_alloc) <=
1380 				    2 * sk->sk_sndbuf)
1381 					skb = sock_wmalloc(sk,
1382 							   alloclen + hh_len, 1,
1383 							   sk->sk_allocation);
1384 				if (unlikely(skb == NULL))
1385 					err = -ENOBUFS;
1386 			}
1387 			if (skb == NULL)
1388 				goto error;
1389 			/*
1390 			 *	Fill in the control structures
1391 			 */
1392 			skb->protocol = htons(ETH_P_IPV6);
1393 			skb->ip_summed = csummode;
1394 			skb->csum = 0;
1395 			/* reserve for fragmentation and ipsec header */
1396 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1397 				    dst_exthdrlen);
1398 
1399 			/* Only the initial fragment is time stamped */
1400 			skb_shinfo(skb)->tx_flags = tx_flags;
1401 			tx_flags = 0;
1402 			skb_shinfo(skb)->tskey = tskey;
1403 			tskey = 0;
1404 
1405 			/*
1406 			 *	Find where to start putting bytes
1407 			 */
1408 			data = skb_put(skb, fraglen);
1409 			skb_set_network_header(skb, exthdrlen);
1410 			data += fragheaderlen;
1411 			skb->transport_header = (skb->network_header +
1412 						 fragheaderlen);
1413 			if (fraggap) {
1414 				skb->csum = skb_copy_and_csum_bits(
1415 					skb_prev, maxfraglen,
1416 					data + transhdrlen, fraggap, 0);
1417 				skb_prev->csum = csum_sub(skb_prev->csum,
1418 							  skb->csum);
1419 				data += fraggap;
1420 				pskb_trim_unique(skb_prev, maxfraglen);
1421 			}
1422 			copy = datalen - transhdrlen - fraggap;
1423 
1424 			if (copy < 0) {
1425 				err = -EINVAL;
1426 				kfree_skb(skb);
1427 				goto error;
1428 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1429 				err = -EFAULT;
1430 				kfree_skb(skb);
1431 				goto error;
1432 			}
1433 
1434 			offset += copy;
1435 			length -= datalen - fraggap;
1436 			transhdrlen = 0;
1437 			exthdrlen = 0;
1438 			dst_exthdrlen = 0;
1439 
1440 			/*
1441 			 * Put the packet on the pending queue
1442 			 */
1443 			__skb_queue_tail(queue, skb);
1444 			continue;
1445 		}
1446 
1447 		if (copy > length)
1448 			copy = length;
1449 
1450 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1451 			unsigned int off;
1452 
1453 			off = skb->len;
1454 			if (getfrag(from, skb_put(skb, copy),
1455 						offset, copy, off, skb) < 0) {
1456 				__skb_trim(skb, off);
1457 				err = -EFAULT;
1458 				goto error;
1459 			}
1460 		} else {
1461 			int i = skb_shinfo(skb)->nr_frags;
1462 
1463 			err = -ENOMEM;
1464 			if (!sk_page_frag_refill(sk, pfrag))
1465 				goto error;
1466 
1467 			if (!skb_can_coalesce(skb, i, pfrag->page,
1468 					      pfrag->offset)) {
1469 				err = -EMSGSIZE;
1470 				if (i == MAX_SKB_FRAGS)
1471 					goto error;
1472 
1473 				__skb_fill_page_desc(skb, i, pfrag->page,
1474 						     pfrag->offset, 0);
1475 				skb_shinfo(skb)->nr_frags = ++i;
1476 				get_page(pfrag->page);
1477 			}
1478 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1479 			if (getfrag(from,
1480 				    page_address(pfrag->page) + pfrag->offset,
1481 				    offset, copy, skb->len, skb) < 0)
1482 				goto error_efault;
1483 
1484 			pfrag->offset += copy;
1485 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1486 			skb->len += copy;
1487 			skb->data_len += copy;
1488 			skb->truesize += copy;
1489 			atomic_add(copy, &sk->sk_wmem_alloc);
1490 		}
1491 		offset += copy;
1492 		length -= copy;
1493 	}
1494 
1495 	return 0;
1496 
1497 error_efault:
1498 	err = -EFAULT;
1499 error:
1500 	cork->length -= length;
1501 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1502 	return err;
1503 }
1504 
1505 int ip6_append_data(struct sock *sk,
1506 		    int getfrag(void *from, char *to, int offset, int len,
1507 				int odd, struct sk_buff *skb),
1508 		    void *from, int length, int transhdrlen, int hlimit,
1509 		    int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1510 		    struct rt6_info *rt, unsigned int flags, int dontfrag)
1511 {
1512 	struct inet_sock *inet = inet_sk(sk);
1513 	struct ipv6_pinfo *np = inet6_sk(sk);
1514 	int exthdrlen;
1515 	int err;
1516 
1517 	if (flags&MSG_PROBE)
1518 		return 0;
1519 	if (skb_queue_empty(&sk->sk_write_queue)) {
1520 		/*
1521 		 * setup for corking
1522 		 */
1523 		err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
1524 				     tclass, opt, rt, fl6);
1525 		if (err)
1526 			return err;
1527 
1528 		exthdrlen = (opt ? opt->opt_flen : 0);
1529 		length += exthdrlen;
1530 		transhdrlen += exthdrlen;
1531 	} else {
1532 		fl6 = &inet->cork.fl.u.ip6;
1533 		transhdrlen = 0;
1534 	}
1535 
1536 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1537 				 &np->cork, sk_page_frag(sk), getfrag,
1538 				 from, length, transhdrlen, flags, dontfrag);
1539 }
1540 EXPORT_SYMBOL_GPL(ip6_append_data);
1541 
1542 static void ip6_cork_release(struct inet_cork_full *cork,
1543 			     struct inet6_cork *v6_cork)
1544 {
1545 	if (v6_cork->opt) {
1546 		kfree(v6_cork->opt->dst0opt);
1547 		kfree(v6_cork->opt->dst1opt);
1548 		kfree(v6_cork->opt->hopopt);
1549 		kfree(v6_cork->opt->srcrt);
1550 		kfree(v6_cork->opt);
1551 		v6_cork->opt = NULL;
1552 	}
1553 
1554 	if (cork->base.dst) {
1555 		dst_release(cork->base.dst);
1556 		cork->base.dst = NULL;
1557 		cork->base.flags &= ~IPCORK_ALLFRAG;
1558 	}
1559 	memset(&cork->fl, 0, sizeof(cork->fl));
1560 }
1561 
1562 struct sk_buff *__ip6_make_skb(struct sock *sk,
1563 			       struct sk_buff_head *queue,
1564 			       struct inet_cork_full *cork,
1565 			       struct inet6_cork *v6_cork)
1566 {
1567 	struct sk_buff *skb, *tmp_skb;
1568 	struct sk_buff **tail_skb;
1569 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1570 	struct ipv6_pinfo *np = inet6_sk(sk);
1571 	struct net *net = sock_net(sk);
1572 	struct ipv6hdr *hdr;
1573 	struct ipv6_txoptions *opt = v6_cork->opt;
1574 	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1575 	struct flowi6 *fl6 = &cork->fl.u.ip6;
1576 	unsigned char proto = fl6->flowi6_proto;
1577 
1578 	skb = __skb_dequeue(queue);
1579 	if (skb == NULL)
1580 		goto out;
1581 	tail_skb = &(skb_shinfo(skb)->frag_list);
1582 
1583 	/* move skb->data to ip header from ext header */
1584 	if (skb->data < skb_network_header(skb))
1585 		__skb_pull(skb, skb_network_offset(skb));
1586 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1587 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1588 		*tail_skb = tmp_skb;
1589 		tail_skb = &(tmp_skb->next);
1590 		skb->len += tmp_skb->len;
1591 		skb->data_len += tmp_skb->len;
1592 		skb->truesize += tmp_skb->truesize;
1593 		tmp_skb->destructor = NULL;
1594 		tmp_skb->sk = NULL;
1595 	}
1596 
1597 	/* Allow local fragmentation. */
1598 	skb->ignore_df = ip6_sk_ignore_df(sk);
1599 
1600 	*final_dst = fl6->daddr;
1601 	__skb_pull(skb, skb_network_header_len(skb));
1602 	if (opt && opt->opt_flen)
1603 		ipv6_push_frag_opts(skb, opt, &proto);
1604 	if (opt && opt->opt_nflen)
1605 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1606 
1607 	skb_push(skb, sizeof(struct ipv6hdr));
1608 	skb_reset_network_header(skb);
1609 	hdr = ipv6_hdr(skb);
1610 
1611 	ip6_flow_hdr(hdr, v6_cork->tclass,
1612 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1613 					np->autoflowlabel));
1614 	hdr->hop_limit = v6_cork->hop_limit;
1615 	hdr->nexthdr = proto;
1616 	hdr->saddr = fl6->saddr;
1617 	hdr->daddr = *final_dst;
1618 
1619 	skb->priority = sk->sk_priority;
1620 	skb->mark = sk->sk_mark;
1621 
1622 	skb_dst_set(skb, dst_clone(&rt->dst));
1623 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1624 	if (proto == IPPROTO_ICMPV6) {
1625 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1626 
1627 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1628 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1629 	}
1630 
1631 	ip6_cork_release(cork, v6_cork);
1632 out:
1633 	return skb;
1634 }
1635 
1636 int ip6_send_skb(struct sk_buff *skb)
1637 {
1638 	struct net *net = sock_net(skb->sk);
1639 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1640 	int err;
1641 
1642 	err = ip6_local_out(skb);
1643 	if (err) {
1644 		if (err > 0)
1645 			err = net_xmit_errno(err);
1646 		if (err)
1647 			IP6_INC_STATS(net, rt->rt6i_idev,
1648 				      IPSTATS_MIB_OUTDISCARDS);
1649 	}
1650 
1651 	return err;
1652 }
1653 
1654 int ip6_push_pending_frames(struct sock *sk)
1655 {
1656 	struct sk_buff *skb;
1657 
1658 	skb = ip6_finish_skb(sk);
1659 	if (!skb)
1660 		return 0;
1661 
1662 	return ip6_send_skb(skb);
1663 }
1664 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1665 
1666 static void __ip6_flush_pending_frames(struct sock *sk,
1667 				       struct sk_buff_head *queue,
1668 				       struct inet_cork_full *cork,
1669 				       struct inet6_cork *v6_cork)
1670 {
1671 	struct sk_buff *skb;
1672 
1673 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1674 		if (skb_dst(skb))
1675 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1676 				      IPSTATS_MIB_OUTDISCARDS);
1677 		kfree_skb(skb);
1678 	}
1679 
1680 	ip6_cork_release(cork, v6_cork);
1681 }
1682 
1683 void ip6_flush_pending_frames(struct sock *sk)
1684 {
1685 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1686 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1687 }
1688 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1689 
1690 struct sk_buff *ip6_make_skb(struct sock *sk,
1691 			     int getfrag(void *from, char *to, int offset,
1692 					 int len, int odd, struct sk_buff *skb),
1693 			     void *from, int length, int transhdrlen,
1694 			     int hlimit, int tclass,
1695 			     struct ipv6_txoptions *opt, struct flowi6 *fl6,
1696 			     struct rt6_info *rt, unsigned int flags,
1697 			     int dontfrag)
1698 {
1699 	struct inet_cork_full cork;
1700 	struct inet6_cork v6_cork;
1701 	struct sk_buff_head queue;
1702 	int exthdrlen = (opt ? opt->opt_flen : 0);
1703 	int err;
1704 
1705 	if (flags & MSG_PROBE)
1706 		return NULL;
1707 
1708 	__skb_queue_head_init(&queue);
1709 
1710 	cork.base.flags = 0;
1711 	cork.base.addr = 0;
1712 	cork.base.opt = NULL;
1713 	v6_cork.opt = NULL;
1714 	err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
1715 	if (err)
1716 		return ERR_PTR(err);
1717 
1718 	if (dontfrag < 0)
1719 		dontfrag = inet6_sk(sk)->dontfrag;
1720 
1721 	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1722 				&current->task_frag, getfrag, from,
1723 				length + exthdrlen, transhdrlen + exthdrlen,
1724 				flags, dontfrag);
1725 	if (err) {
1726 		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1727 		return ERR_PTR(err);
1728 	}
1729 
1730 	return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
1731 }
1732