xref: /openbmc/linux/net/ipv6/ip6_output.c (revision 0edbfea5)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	:	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44 
45 #include <net/sock.h>
46 #include <net/snmp.h>
47 
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 #include <net/l3mdev.h>
59 
60 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
61 {
62 	struct dst_entry *dst = skb_dst(skb);
63 	struct net_device *dev = dst->dev;
64 	struct neighbour *neigh;
65 	struct in6_addr *nexthop;
66 	int ret;
67 
68 	skb->protocol = htons(ETH_P_IPV6);
69 	skb->dev = dev;
70 
71 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
72 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
73 
74 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
75 		    ((mroute6_socket(net, skb) &&
76 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
77 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
78 					 &ipv6_hdr(skb)->saddr))) {
79 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
80 
81 			/* Do not check for IFF_ALLMULTI; multicast routing
82 			   is not supported in any case.
83 			 */
84 			if (newskb)
85 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
86 					net, sk, newskb, NULL, newskb->dev,
87 					dev_loopback_xmit);
88 
89 			if (ipv6_hdr(skb)->hop_limit == 0) {
90 				IP6_INC_STATS(net, idev,
91 					      IPSTATS_MIB_OUTDISCARDS);
92 				kfree_skb(skb);
93 				return 0;
94 			}
95 		}
96 
97 		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
98 
99 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 		    IPV6_ADDR_SCOPE_NODELOCAL &&
101 		    !(dev->flags & IFF_LOOPBACK)) {
102 			kfree_skb(skb);
103 			return 0;
104 		}
105 	}
106 
107 	rcu_read_lock_bh();
108 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
109 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 	if (unlikely(!neigh))
111 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 	if (!IS_ERR(neigh)) {
113 		ret = dst_neigh_output(dst, neigh, skb);
114 		rcu_read_unlock_bh();
115 		return ret;
116 	}
117 	rcu_read_unlock_bh();
118 
119 	IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
120 	kfree_skb(skb);
121 	return -EINVAL;
122 }
123 
124 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
125 {
126 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
127 	    dst_allfrag(skb_dst(skb)) ||
128 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
129 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
130 	else
131 		return ip6_finish_output2(net, sk, skb);
132 }
133 
134 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
135 {
136 	struct net_device *dev = skb_dst(skb)->dev;
137 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
138 
139 	if (unlikely(idev->cnf.disable_ipv6)) {
140 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
141 		kfree_skb(skb);
142 		return 0;
143 	}
144 
145 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
146 			    net, sk, skb, NULL, dev,
147 			    ip6_finish_output,
148 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150 
151 /*
152  * xmit an sk_buff (used by TCP, SCTP and DCCP)
153  * Note : socket lock is not held for SYNACK packets, but might be modified
154  * by calls to skb_set_owner_w() and ipv6_local_error(),
155  * which are using proper atomic operations or spinlocks.
156  */
157 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
158 	     struct ipv6_txoptions *opt, int tclass)
159 {
160 	struct net *net = sock_net(sk);
161 	const struct ipv6_pinfo *np = inet6_sk(sk);
162 	struct in6_addr *first_hop = &fl6->daddr;
163 	struct dst_entry *dst = skb_dst(skb);
164 	struct ipv6hdr *hdr;
165 	u8  proto = fl6->flowi6_proto;
166 	int seg_len = skb->len;
167 	int hlimit = -1;
168 	u32 mtu;
169 
170 	if (opt) {
171 		unsigned int head_room;
172 
173 		/* First: exthdrs may take lots of space (~8K for now)
174 		   MAX_HEADER is not enough.
175 		 */
176 		head_room = opt->opt_nflen + opt->opt_flen;
177 		seg_len += head_room;
178 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
179 
180 		if (skb_headroom(skb) < head_room) {
181 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
182 			if (!skb2) {
183 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
184 					      IPSTATS_MIB_OUTDISCARDS);
185 				kfree_skb(skb);
186 				return -ENOBUFS;
187 			}
188 			consume_skb(skb);
189 			skb = skb2;
190 			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
191 			 * it is safe to call in our context (socket lock not held)
192 			 */
193 			skb_set_owner_w(skb, (struct sock *)sk);
194 		}
195 		if (opt->opt_flen)
196 			ipv6_push_frag_opts(skb, opt, &proto);
197 		if (opt->opt_nflen)
198 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
199 	}
200 
201 	skb_push(skb, sizeof(struct ipv6hdr));
202 	skb_reset_network_header(skb);
203 	hdr = ipv6_hdr(skb);
204 
205 	/*
206 	 *	Fill in the IPv6 header
207 	 */
208 	if (np)
209 		hlimit = np->hop_limit;
210 	if (hlimit < 0)
211 		hlimit = ip6_dst_hoplimit(dst);
212 
213 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
214 						     np->autoflowlabel, fl6));
215 
216 	hdr->payload_len = htons(seg_len);
217 	hdr->nexthdr = proto;
218 	hdr->hop_limit = hlimit;
219 
220 	hdr->saddr = fl6->saddr;
221 	hdr->daddr = *first_hop;
222 
223 	skb->protocol = htons(ETH_P_IPV6);
224 	skb->priority = sk->sk_priority;
225 	skb->mark = sk->sk_mark;
226 
227 	mtu = dst_mtu(dst);
228 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
229 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
230 			      IPSTATS_MIB_OUT, skb->len);
231 		/* hooks should never assume socket lock is held.
232 		 * we promote our socket to non const
233 		 */
234 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
235 			       net, (struct sock *)sk, skb, NULL, dst->dev,
236 			       dst_output);
237 	}
238 
239 	skb->dev = dst->dev;
240 	/* ipv6_local_error() does not require socket lock,
241 	 * we promote our socket to non const
242 	 */
243 	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
244 
245 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
246 	kfree_skb(skb);
247 	return -EMSGSIZE;
248 }
249 EXPORT_SYMBOL(ip6_xmit);
250 
251 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
252 {
253 	struct ip6_ra_chain *ra;
254 	struct sock *last = NULL;
255 
256 	read_lock(&ip6_ra_lock);
257 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
258 		struct sock *sk = ra->sk;
259 		if (sk && ra->sel == sel &&
260 		    (!sk->sk_bound_dev_if ||
261 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
262 			if (last) {
263 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
264 				if (skb2)
265 					rawv6_rcv(last, skb2);
266 			}
267 			last = sk;
268 		}
269 	}
270 
271 	if (last) {
272 		rawv6_rcv(last, skb);
273 		read_unlock(&ip6_ra_lock);
274 		return 1;
275 	}
276 	read_unlock(&ip6_ra_lock);
277 	return 0;
278 }
279 
280 static int ip6_forward_proxy_check(struct sk_buff *skb)
281 {
282 	struct ipv6hdr *hdr = ipv6_hdr(skb);
283 	u8 nexthdr = hdr->nexthdr;
284 	__be16 frag_off;
285 	int offset;
286 
287 	if (ipv6_ext_hdr(nexthdr)) {
288 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
289 		if (offset < 0)
290 			return 0;
291 	} else
292 		offset = sizeof(struct ipv6hdr);
293 
294 	if (nexthdr == IPPROTO_ICMPV6) {
295 		struct icmp6hdr *icmp6;
296 
297 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
298 					 offset + 1 - skb->data)))
299 			return 0;
300 
301 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
302 
303 		switch (icmp6->icmp6_type) {
304 		case NDISC_ROUTER_SOLICITATION:
305 		case NDISC_ROUTER_ADVERTISEMENT:
306 		case NDISC_NEIGHBOUR_SOLICITATION:
307 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
308 		case NDISC_REDIRECT:
309 			/* For reaction involving unicast neighbor discovery
310 			 * message destined to the proxied address, pass it to
311 			 * input function.
312 			 */
313 			return 1;
314 		default:
315 			break;
316 		}
317 	}
318 
319 	/*
320 	 * The proxying router can't forward traffic sent to a link-local
321 	 * address, so signal the sender and discard the packet. This
322 	 * behavior is clarified by the MIPv6 specification.
323 	 */
324 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
325 		dst_link_failure(skb);
326 		return -1;
327 	}
328 
329 	return 0;
330 }
331 
332 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
333 				     struct sk_buff *skb)
334 {
335 	return dst_output(net, sk, skb);
336 }
337 
338 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
339 {
340 	unsigned int mtu;
341 	struct inet6_dev *idev;
342 
343 	if (dst_metric_locked(dst, RTAX_MTU)) {
344 		mtu = dst_metric_raw(dst, RTAX_MTU);
345 		if (mtu)
346 			return mtu;
347 	}
348 
349 	mtu = IPV6_MIN_MTU;
350 	rcu_read_lock();
351 	idev = __in6_dev_get(dst->dev);
352 	if (idev)
353 		mtu = idev->cnf.mtu6;
354 	rcu_read_unlock();
355 
356 	return mtu;
357 }
358 
359 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
360 {
361 	if (skb->len <= mtu)
362 		return false;
363 
364 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
365 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
366 		return true;
367 
368 	if (skb->ignore_df)
369 		return false;
370 
371 	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
372 		return false;
373 
374 	return true;
375 }
376 
377 int ip6_forward(struct sk_buff *skb)
378 {
379 	struct dst_entry *dst = skb_dst(skb);
380 	struct ipv6hdr *hdr = ipv6_hdr(skb);
381 	struct inet6_skb_parm *opt = IP6CB(skb);
382 	struct net *net = dev_net(dst->dev);
383 	u32 mtu;
384 
385 	if (net->ipv6.devconf_all->forwarding == 0)
386 		goto error;
387 
388 	if (skb->pkt_type != PACKET_HOST)
389 		goto drop;
390 
391 	if (unlikely(skb->sk))
392 		goto drop;
393 
394 	if (skb_warn_if_lro(skb))
395 		goto drop;
396 
397 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
398 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
399 				IPSTATS_MIB_INDISCARDS);
400 		goto drop;
401 	}
402 
403 	skb_forward_csum(skb);
404 
405 	/*
406 	 *	We DO NOT make any processing on
407 	 *	RA packets, pushing them to user level AS IS
408 	 *	without ane WARRANTY that application will be able
409 	 *	to interpret them. The reason is that we
410 	 *	cannot make anything clever here.
411 	 *
412 	 *	We are not end-node, so that if packet contains
413 	 *	AH/ESP, we cannot make anything.
414 	 *	Defragmentation also would be mistake, RA packets
415 	 *	cannot be fragmented, because there is no warranty
416 	 *	that different fragments will go along one path. --ANK
417 	 */
418 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
419 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
420 			return 0;
421 	}
422 
423 	/*
424 	 *	check and decrement ttl
425 	 */
426 	if (hdr->hop_limit <= 1) {
427 		/* Force OUTPUT device used as source address */
428 		skb->dev = dst->dev;
429 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
430 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
431 				IPSTATS_MIB_INHDRERRORS);
432 
433 		kfree_skb(skb);
434 		return -ETIMEDOUT;
435 	}
436 
437 	/* XXX: idev->cnf.proxy_ndp? */
438 	if (net->ipv6.devconf_all->proxy_ndp &&
439 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
440 		int proxied = ip6_forward_proxy_check(skb);
441 		if (proxied > 0)
442 			return ip6_input(skb);
443 		else if (proxied < 0) {
444 			__IP6_INC_STATS(net, ip6_dst_idev(dst),
445 					IPSTATS_MIB_INDISCARDS);
446 			goto drop;
447 		}
448 	}
449 
450 	if (!xfrm6_route_forward(skb)) {
451 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
452 				IPSTATS_MIB_INDISCARDS);
453 		goto drop;
454 	}
455 	dst = skb_dst(skb);
456 
457 	/* IPv6 specs say nothing about it, but it is clear that we cannot
458 	   send redirects to source routed frames.
459 	   We don't send redirects to frames decapsulated from IPsec.
460 	 */
461 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
462 		struct in6_addr *target = NULL;
463 		struct inet_peer *peer;
464 		struct rt6_info *rt;
465 
466 		/*
467 		 *	incoming and outgoing devices are the same
468 		 *	send a redirect.
469 		 */
470 
471 		rt = (struct rt6_info *) dst;
472 		if (rt->rt6i_flags & RTF_GATEWAY)
473 			target = &rt->rt6i_gateway;
474 		else
475 			target = &hdr->daddr;
476 
477 		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
478 
479 		/* Limit redirects both by destination (here)
480 		   and by source (inside ndisc_send_redirect)
481 		 */
482 		if (inet_peer_xrlim_allow(peer, 1*HZ))
483 			ndisc_send_redirect(skb, target);
484 		if (peer)
485 			inet_putpeer(peer);
486 	} else {
487 		int addrtype = ipv6_addr_type(&hdr->saddr);
488 
489 		/* This check is security critical. */
490 		if (addrtype == IPV6_ADDR_ANY ||
491 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
492 			goto error;
493 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
494 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
495 				    ICMPV6_NOT_NEIGHBOUR, 0);
496 			goto error;
497 		}
498 	}
499 
500 	mtu = ip6_dst_mtu_forward(dst);
501 	if (mtu < IPV6_MIN_MTU)
502 		mtu = IPV6_MIN_MTU;
503 
504 	if (ip6_pkt_too_big(skb, mtu)) {
505 		/* Again, force OUTPUT device used as source address */
506 		skb->dev = dst->dev;
507 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
508 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
509 				IPSTATS_MIB_INTOOBIGERRORS);
510 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
511 				IPSTATS_MIB_FRAGFAILS);
512 		kfree_skb(skb);
513 		return -EMSGSIZE;
514 	}
515 
516 	if (skb_cow(skb, dst->dev->hard_header_len)) {
517 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
518 				IPSTATS_MIB_OUTDISCARDS);
519 		goto drop;
520 	}
521 
522 	hdr = ipv6_hdr(skb);
523 
524 	/* Mangling hops number delayed to point after skb COW */
525 
526 	hdr->hop_limit--;
527 
528 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
529 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
530 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
531 		       net, NULL, skb, skb->dev, dst->dev,
532 		       ip6_forward_finish);
533 
534 error:
535 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
536 drop:
537 	kfree_skb(skb);
538 	return -EINVAL;
539 }
540 
541 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
542 {
543 	to->pkt_type = from->pkt_type;
544 	to->priority = from->priority;
545 	to->protocol = from->protocol;
546 	skb_dst_drop(to);
547 	skb_dst_set(to, dst_clone(skb_dst(from)));
548 	to->dev = from->dev;
549 	to->mark = from->mark;
550 
551 #ifdef CONFIG_NET_SCHED
552 	to->tc_index = from->tc_index;
553 #endif
554 	nf_copy(to, from);
555 	skb_copy_secmark(to, from);
556 }
557 
558 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
559 		 int (*output)(struct net *, struct sock *, struct sk_buff *))
560 {
561 	struct sk_buff *frag;
562 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
563 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
564 				inet6_sk(skb->sk) : NULL;
565 	struct ipv6hdr *tmp_hdr;
566 	struct frag_hdr *fh;
567 	unsigned int mtu, hlen, left, len;
568 	int hroom, troom;
569 	__be32 frag_id;
570 	int ptr, offset = 0, err = 0;
571 	u8 *prevhdr, nexthdr = 0;
572 
573 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
574 	nexthdr = *prevhdr;
575 
576 	mtu = ip6_skb_dst_mtu(skb);
577 
578 	/* We must not fragment if the socket is set to force MTU discovery
579 	 * or if the skb it not generated by a local socket.
580 	 */
581 	if (unlikely(!skb->ignore_df && skb->len > mtu))
582 		goto fail_toobig;
583 
584 	if (IP6CB(skb)->frag_max_size) {
585 		if (IP6CB(skb)->frag_max_size > mtu)
586 			goto fail_toobig;
587 
588 		/* don't send fragments larger than what we received */
589 		mtu = IP6CB(skb)->frag_max_size;
590 		if (mtu < IPV6_MIN_MTU)
591 			mtu = IPV6_MIN_MTU;
592 	}
593 
594 	if (np && np->frag_size < mtu) {
595 		if (np->frag_size)
596 			mtu = np->frag_size;
597 	}
598 	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
599 		goto fail_toobig;
600 	mtu -= hlen + sizeof(struct frag_hdr);
601 
602 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
603 				    &ipv6_hdr(skb)->saddr);
604 
605 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
606 	    (err = skb_checksum_help(skb)))
607 		goto fail;
608 
609 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
610 	if (skb_has_frag_list(skb)) {
611 		int first_len = skb_pagelen(skb);
612 		struct sk_buff *frag2;
613 
614 		if (first_len - hlen > mtu ||
615 		    ((first_len - hlen) & 7) ||
616 		    skb_cloned(skb) ||
617 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
618 			goto slow_path;
619 
620 		skb_walk_frags(skb, frag) {
621 			/* Correct geometry. */
622 			if (frag->len > mtu ||
623 			    ((frag->len & 7) && frag->next) ||
624 			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
625 				goto slow_path_clean;
626 
627 			/* Partially cloned skb? */
628 			if (skb_shared(frag))
629 				goto slow_path_clean;
630 
631 			BUG_ON(frag->sk);
632 			if (skb->sk) {
633 				frag->sk = skb->sk;
634 				frag->destructor = sock_wfree;
635 			}
636 			skb->truesize -= frag->truesize;
637 		}
638 
639 		err = 0;
640 		offset = 0;
641 		/* BUILD HEADER */
642 
643 		*prevhdr = NEXTHDR_FRAGMENT;
644 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
645 		if (!tmp_hdr) {
646 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
647 				      IPSTATS_MIB_FRAGFAILS);
648 			err = -ENOMEM;
649 			goto fail;
650 		}
651 		frag = skb_shinfo(skb)->frag_list;
652 		skb_frag_list_init(skb);
653 
654 		__skb_pull(skb, hlen);
655 		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
656 		__skb_push(skb, hlen);
657 		skb_reset_network_header(skb);
658 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
659 
660 		fh->nexthdr = nexthdr;
661 		fh->reserved = 0;
662 		fh->frag_off = htons(IP6_MF);
663 		fh->identification = frag_id;
664 
665 		first_len = skb_pagelen(skb);
666 		skb->data_len = first_len - skb_headlen(skb);
667 		skb->len = first_len;
668 		ipv6_hdr(skb)->payload_len = htons(first_len -
669 						   sizeof(struct ipv6hdr));
670 
671 		dst_hold(&rt->dst);
672 
673 		for (;;) {
674 			/* Prepare header of the next frame,
675 			 * before previous one went down. */
676 			if (frag) {
677 				frag->ip_summed = CHECKSUM_NONE;
678 				skb_reset_transport_header(frag);
679 				fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
680 				__skb_push(frag, hlen);
681 				skb_reset_network_header(frag);
682 				memcpy(skb_network_header(frag), tmp_hdr,
683 				       hlen);
684 				offset += skb->len - hlen - sizeof(struct frag_hdr);
685 				fh->nexthdr = nexthdr;
686 				fh->reserved = 0;
687 				fh->frag_off = htons(offset);
688 				if (frag->next)
689 					fh->frag_off |= htons(IP6_MF);
690 				fh->identification = frag_id;
691 				ipv6_hdr(frag)->payload_len =
692 						htons(frag->len -
693 						      sizeof(struct ipv6hdr));
694 				ip6_copy_metadata(frag, skb);
695 			}
696 
697 			err = output(net, sk, skb);
698 			if (!err)
699 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
700 					      IPSTATS_MIB_FRAGCREATES);
701 
702 			if (err || !frag)
703 				break;
704 
705 			skb = frag;
706 			frag = skb->next;
707 			skb->next = NULL;
708 		}
709 
710 		kfree(tmp_hdr);
711 
712 		if (err == 0) {
713 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
714 				      IPSTATS_MIB_FRAGOKS);
715 			ip6_rt_put(rt);
716 			return 0;
717 		}
718 
719 		kfree_skb_list(frag);
720 
721 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
722 			      IPSTATS_MIB_FRAGFAILS);
723 		ip6_rt_put(rt);
724 		return err;
725 
726 slow_path_clean:
727 		skb_walk_frags(skb, frag2) {
728 			if (frag2 == frag)
729 				break;
730 			frag2->sk = NULL;
731 			frag2->destructor = NULL;
732 			skb->truesize += frag2->truesize;
733 		}
734 	}
735 
736 slow_path:
737 	left = skb->len - hlen;		/* Space per frame */
738 	ptr = hlen;			/* Where to start from */
739 
740 	/*
741 	 *	Fragment the datagram.
742 	 */
743 
744 	*prevhdr = NEXTHDR_FRAGMENT;
745 	troom = rt->dst.dev->needed_tailroom;
746 
747 	/*
748 	 *	Keep copying data until we run out.
749 	 */
750 	while (left > 0)	{
751 		len = left;
752 		/* IF: it doesn't fit, use 'mtu' - the data space left */
753 		if (len > mtu)
754 			len = mtu;
755 		/* IF: we are not sending up to and including the packet end
756 		   then align the next start on an eight byte boundary */
757 		if (len < left)	{
758 			len &= ~7;
759 		}
760 
761 		/* Allocate buffer */
762 		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
763 				 hroom + troom, GFP_ATOMIC);
764 		if (!frag) {
765 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
766 				      IPSTATS_MIB_FRAGFAILS);
767 			err = -ENOMEM;
768 			goto fail;
769 		}
770 
771 		/*
772 		 *	Set up data on packet
773 		 */
774 
775 		ip6_copy_metadata(frag, skb);
776 		skb_reserve(frag, hroom);
777 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
778 		skb_reset_network_header(frag);
779 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
780 		frag->transport_header = (frag->network_header + hlen +
781 					  sizeof(struct frag_hdr));
782 
783 		/*
784 		 *	Charge the memory for the fragment to any owner
785 		 *	it might possess
786 		 */
787 		if (skb->sk)
788 			skb_set_owner_w(frag, skb->sk);
789 
790 		/*
791 		 *	Copy the packet header into the new buffer.
792 		 */
793 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
794 
795 		/*
796 		 *	Build fragment header.
797 		 */
798 		fh->nexthdr = nexthdr;
799 		fh->reserved = 0;
800 		fh->identification = frag_id;
801 
802 		/*
803 		 *	Copy a block of the IP datagram.
804 		 */
805 		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
806 				     len));
807 		left -= len;
808 
809 		fh->frag_off = htons(offset);
810 		if (left > 0)
811 			fh->frag_off |= htons(IP6_MF);
812 		ipv6_hdr(frag)->payload_len = htons(frag->len -
813 						    sizeof(struct ipv6hdr));
814 
815 		ptr += len;
816 		offset += len;
817 
818 		/*
819 		 *	Put this fragment into the sending queue.
820 		 */
821 		err = output(net, sk, frag);
822 		if (err)
823 			goto fail;
824 
825 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
826 			      IPSTATS_MIB_FRAGCREATES);
827 	}
828 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
829 		      IPSTATS_MIB_FRAGOKS);
830 	consume_skb(skb);
831 	return err;
832 
833 fail_toobig:
834 	if (skb->sk && dst_allfrag(skb_dst(skb)))
835 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
836 
837 	skb->dev = skb_dst(skb)->dev;
838 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
839 	err = -EMSGSIZE;
840 
841 fail:
842 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
843 		      IPSTATS_MIB_FRAGFAILS);
844 	kfree_skb(skb);
845 	return err;
846 }
847 
848 static inline int ip6_rt_check(const struct rt6key *rt_key,
849 			       const struct in6_addr *fl_addr,
850 			       const struct in6_addr *addr_cache)
851 {
852 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
853 		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
854 }
855 
856 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
857 					  struct dst_entry *dst,
858 					  const struct flowi6 *fl6)
859 {
860 	struct ipv6_pinfo *np = inet6_sk(sk);
861 	struct rt6_info *rt;
862 
863 	if (!dst)
864 		goto out;
865 
866 	if (dst->ops->family != AF_INET6) {
867 		dst_release(dst);
868 		return NULL;
869 	}
870 
871 	rt = (struct rt6_info *)dst;
872 	/* Yes, checking route validity in not connected
873 	 * case is not very simple. Take into account,
874 	 * that we do not support routing by source, TOS,
875 	 * and MSG_DONTROUTE		--ANK (980726)
876 	 *
877 	 * 1. ip6_rt_check(): If route was host route,
878 	 *    check that cached destination is current.
879 	 *    If it is network route, we still may
880 	 *    check its validity using saved pointer
881 	 *    to the last used address: daddr_cache.
882 	 *    We do not want to save whole address now,
883 	 *    (because main consumer of this service
884 	 *    is tcp, which has not this problem),
885 	 *    so that the last trick works only on connected
886 	 *    sockets.
887 	 * 2. oif also should be the same.
888 	 */
889 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
890 #ifdef CONFIG_IPV6_SUBTREES
891 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
892 #endif
893 	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
894 	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
895 		dst_release(dst);
896 		dst = NULL;
897 	}
898 
899 out:
900 	return dst;
901 }
902 
903 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
904 			       struct dst_entry **dst, struct flowi6 *fl6)
905 {
906 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
907 	struct neighbour *n;
908 	struct rt6_info *rt;
909 #endif
910 	int err;
911 	int flags = 0;
912 
913 	/* The correct way to handle this would be to do
914 	 * ip6_route_get_saddr, and then ip6_route_output; however,
915 	 * the route-specific preferred source forces the
916 	 * ip6_route_output call _before_ ip6_route_get_saddr.
917 	 *
918 	 * In source specific routing (no src=any default route),
919 	 * ip6_route_output will fail given src=any saddr, though, so
920 	 * that's why we try it again later.
921 	 */
922 	if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
923 		struct rt6_info *rt;
924 		bool had_dst = *dst != NULL;
925 
926 		if (!had_dst)
927 			*dst = ip6_route_output(net, sk, fl6);
928 		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
929 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
930 					  sk ? inet6_sk(sk)->srcprefs : 0,
931 					  &fl6->saddr);
932 		if (err)
933 			goto out_err_release;
934 
935 		/* If we had an erroneous initial result, pretend it
936 		 * never existed and let the SA-enabled version take
937 		 * over.
938 		 */
939 		if (!had_dst && (*dst)->error) {
940 			dst_release(*dst);
941 			*dst = NULL;
942 		}
943 
944 		if (fl6->flowi6_oif)
945 			flags |= RT6_LOOKUP_F_IFACE;
946 	}
947 
948 	if (!*dst)
949 		*dst = ip6_route_output_flags(net, sk, fl6, flags);
950 
951 	err = (*dst)->error;
952 	if (err)
953 		goto out_err_release;
954 
955 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
956 	/*
957 	 * Here if the dst entry we've looked up
958 	 * has a neighbour entry that is in the INCOMPLETE
959 	 * state and the src address from the flow is
960 	 * marked as OPTIMISTIC, we release the found
961 	 * dst entry and replace it instead with the
962 	 * dst entry of the nexthop router
963 	 */
964 	rt = (struct rt6_info *) *dst;
965 	rcu_read_lock_bh();
966 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
967 				      rt6_nexthop(rt, &fl6->daddr));
968 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
969 	rcu_read_unlock_bh();
970 
971 	if (err) {
972 		struct inet6_ifaddr *ifp;
973 		struct flowi6 fl_gw6;
974 		int redirect;
975 
976 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
977 				      (*dst)->dev, 1);
978 
979 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
980 		if (ifp)
981 			in6_ifa_put(ifp);
982 
983 		if (redirect) {
984 			/*
985 			 * We need to get the dst entry for the
986 			 * default router instead
987 			 */
988 			dst_release(*dst);
989 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
990 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
991 			*dst = ip6_route_output(net, sk, &fl_gw6);
992 			err = (*dst)->error;
993 			if (err)
994 				goto out_err_release;
995 		}
996 	}
997 #endif
998 
999 	return 0;
1000 
1001 out_err_release:
1002 	if (err == -ENETUNREACH)
1003 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1004 	dst_release(*dst);
1005 	*dst = NULL;
1006 	return err;
1007 }
1008 
1009 /**
1010  *	ip6_dst_lookup - perform route lookup on flow
1011  *	@sk: socket which provides route info
1012  *	@dst: pointer to dst_entry * for result
1013  *	@fl6: flow to lookup
1014  *
1015  *	This function performs a route lookup on the given flow.
1016  *
1017  *	It returns zero on success, or a standard errno code on error.
1018  */
1019 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1020 		   struct flowi6 *fl6)
1021 {
1022 	*dst = NULL;
1023 	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1024 }
1025 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1026 
1027 /**
1028  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1029  *	@sk: socket which provides route info
1030  *	@fl6: flow to lookup
1031  *	@final_dst: final destination address for ipsec lookup
1032  *
1033  *	This function performs a route lookup on the given flow.
1034  *
1035  *	It returns a valid dst pointer on success, or a pointer encoded
1036  *	error code.
1037  */
1038 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1039 				      const struct in6_addr *final_dst)
1040 {
1041 	struct dst_entry *dst = NULL;
1042 	int err;
1043 
1044 	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1045 	if (err)
1046 		return ERR_PTR(err);
1047 	if (final_dst)
1048 		fl6->daddr = *final_dst;
1049 	if (!fl6->flowi6_oif)
1050 		fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
1051 
1052 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1053 }
1054 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1055 
1056 /**
1057  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1058  *	@sk: socket which provides the dst cache and route info
1059  *	@fl6: flow to lookup
1060  *	@final_dst: final destination address for ipsec lookup
1061  *
1062  *	This function performs a route lookup on the given flow with the
1063  *	possibility of using the cached route in the socket if it is valid.
1064  *	It will take the socket dst lock when operating on the dst cache.
1065  *	As a result, this function can only be used in process context.
1066  *
1067  *	It returns a valid dst pointer on success, or a pointer encoded
1068  *	error code.
1069  */
1070 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1071 					 const struct in6_addr *final_dst)
1072 {
1073 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1074 	int err;
1075 
1076 	dst = ip6_sk_dst_check(sk, dst, fl6);
1077 
1078 	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1079 	if (err)
1080 		return ERR_PTR(err);
1081 	if (final_dst)
1082 		fl6->daddr = *final_dst;
1083 
1084 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1085 }
1086 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1087 
1088 static inline int ip6_ufo_append_data(struct sock *sk,
1089 			struct sk_buff_head *queue,
1090 			int getfrag(void *from, char *to, int offset, int len,
1091 			int odd, struct sk_buff *skb),
1092 			void *from, int length, int hh_len, int fragheaderlen,
1093 			int exthdrlen, int transhdrlen, int mtu,
1094 			unsigned int flags, const struct flowi6 *fl6)
1095 
1096 {
1097 	struct sk_buff *skb;
1098 	int err;
1099 
1100 	/* There is support for UDP large send offload by network
1101 	 * device, so create one single skb packet containing complete
1102 	 * udp datagram
1103 	 */
1104 	skb = skb_peek_tail(queue);
1105 	if (!skb) {
1106 		skb = sock_alloc_send_skb(sk,
1107 			hh_len + fragheaderlen + transhdrlen + 20,
1108 			(flags & MSG_DONTWAIT), &err);
1109 		if (!skb)
1110 			return err;
1111 
1112 		/* reserve space for Hardware header */
1113 		skb_reserve(skb, hh_len);
1114 
1115 		/* create space for UDP/IP header */
1116 		skb_put(skb, fragheaderlen + transhdrlen);
1117 
1118 		/* initialize network header pointer */
1119 		skb_set_network_header(skb, exthdrlen);
1120 
1121 		/* initialize protocol header pointer */
1122 		skb->transport_header = skb->network_header + fragheaderlen;
1123 
1124 		skb->protocol = htons(ETH_P_IPV6);
1125 		skb->csum = 0;
1126 
1127 		__skb_queue_tail(queue, skb);
1128 	} else if (skb_is_gso(skb)) {
1129 		goto append;
1130 	}
1131 
1132 	skb->ip_summed = CHECKSUM_PARTIAL;
1133 	/* Specify the length of each IPv6 datagram fragment.
1134 	 * It has to be a multiple of 8.
1135 	 */
1136 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1137 				     sizeof(struct frag_hdr)) & ~7;
1138 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1139 	skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1140 							 &fl6->daddr,
1141 							 &fl6->saddr);
1142 
1143 append:
1144 	return skb_append_datato_frags(sk, skb, getfrag, from,
1145 				       (length - transhdrlen));
1146 }
1147 
1148 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1149 					       gfp_t gfp)
1150 {
1151 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1152 }
1153 
1154 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1155 						gfp_t gfp)
1156 {
1157 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1158 }
1159 
1160 static void ip6_append_data_mtu(unsigned int *mtu,
1161 				int *maxfraglen,
1162 				unsigned int fragheaderlen,
1163 				struct sk_buff *skb,
1164 				struct rt6_info *rt,
1165 				unsigned int orig_mtu)
1166 {
1167 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1168 		if (!skb) {
1169 			/* first fragment, reserve header_len */
1170 			*mtu = orig_mtu - rt->dst.header_len;
1171 
1172 		} else {
1173 			/*
1174 			 * this fragment is not first, the headers
1175 			 * space is regarded as data space.
1176 			 */
1177 			*mtu = orig_mtu;
1178 		}
1179 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1180 			      + fragheaderlen - sizeof(struct frag_hdr);
1181 	}
1182 }
1183 
1184 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1185 			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1186 			  struct rt6_info *rt, struct flowi6 *fl6)
1187 {
1188 	struct ipv6_pinfo *np = inet6_sk(sk);
1189 	unsigned int mtu;
1190 	struct ipv6_txoptions *opt = ipc6->opt;
1191 
1192 	/*
1193 	 * setup for corking
1194 	 */
1195 	if (opt) {
1196 		if (WARN_ON(v6_cork->opt))
1197 			return -EINVAL;
1198 
1199 		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1200 		if (unlikely(!v6_cork->opt))
1201 			return -ENOBUFS;
1202 
1203 		v6_cork->opt->tot_len = opt->tot_len;
1204 		v6_cork->opt->opt_flen = opt->opt_flen;
1205 		v6_cork->opt->opt_nflen = opt->opt_nflen;
1206 
1207 		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1208 						    sk->sk_allocation);
1209 		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1210 			return -ENOBUFS;
1211 
1212 		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1213 						    sk->sk_allocation);
1214 		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1215 			return -ENOBUFS;
1216 
1217 		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1218 						   sk->sk_allocation);
1219 		if (opt->hopopt && !v6_cork->opt->hopopt)
1220 			return -ENOBUFS;
1221 
1222 		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1223 						    sk->sk_allocation);
1224 		if (opt->srcrt && !v6_cork->opt->srcrt)
1225 			return -ENOBUFS;
1226 
1227 		/* need source address above miyazawa*/
1228 	}
1229 	dst_hold(&rt->dst);
1230 	cork->base.dst = &rt->dst;
1231 	cork->fl.u.ip6 = *fl6;
1232 	v6_cork->hop_limit = ipc6->hlimit;
1233 	v6_cork->tclass = ipc6->tclass;
1234 	if (rt->dst.flags & DST_XFRM_TUNNEL)
1235 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1236 		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1237 	else
1238 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1239 		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1240 	if (np->frag_size < mtu) {
1241 		if (np->frag_size)
1242 			mtu = np->frag_size;
1243 	}
1244 	cork->base.fragsize = mtu;
1245 	if (dst_allfrag(rt->dst.path))
1246 		cork->base.flags |= IPCORK_ALLFRAG;
1247 	cork->base.length = 0;
1248 
1249 	return 0;
1250 }
1251 
1252 static int __ip6_append_data(struct sock *sk,
1253 			     struct flowi6 *fl6,
1254 			     struct sk_buff_head *queue,
1255 			     struct inet_cork *cork,
1256 			     struct inet6_cork *v6_cork,
1257 			     struct page_frag *pfrag,
1258 			     int getfrag(void *from, char *to, int offset,
1259 					 int len, int odd, struct sk_buff *skb),
1260 			     void *from, int length, int transhdrlen,
1261 			     unsigned int flags, struct ipcm6_cookie *ipc6,
1262 			     const struct sockcm_cookie *sockc)
1263 {
1264 	struct sk_buff *skb, *skb_prev = NULL;
1265 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1266 	int exthdrlen = 0;
1267 	int dst_exthdrlen = 0;
1268 	int hh_len;
1269 	int copy;
1270 	int err;
1271 	int offset = 0;
1272 	__u8 tx_flags = 0;
1273 	u32 tskey = 0;
1274 	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1275 	struct ipv6_txoptions *opt = v6_cork->opt;
1276 	int csummode = CHECKSUM_NONE;
1277 	unsigned int maxnonfragsize, headersize;
1278 
1279 	skb = skb_peek_tail(queue);
1280 	if (!skb) {
1281 		exthdrlen = opt ? opt->opt_flen : 0;
1282 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1283 	}
1284 
1285 	mtu = cork->fragsize;
1286 	orig_mtu = mtu;
1287 
1288 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1289 
1290 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1291 			(opt ? opt->opt_nflen : 0);
1292 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1293 		     sizeof(struct frag_hdr);
1294 
1295 	headersize = sizeof(struct ipv6hdr) +
1296 		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1297 		     (dst_allfrag(&rt->dst) ?
1298 		      sizeof(struct frag_hdr) : 0) +
1299 		     rt->rt6i_nfheader_len;
1300 
1301 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1302 	    (sk->sk_protocol == IPPROTO_UDP ||
1303 	     sk->sk_protocol == IPPROTO_RAW)) {
1304 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1305 				sizeof(struct ipv6hdr));
1306 		goto emsgsize;
1307 	}
1308 
1309 	if (ip6_sk_ignore_df(sk))
1310 		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1311 	else
1312 		maxnonfragsize = mtu;
1313 
1314 	if (cork->length + length > maxnonfragsize - headersize) {
1315 emsgsize:
1316 		ipv6_local_error(sk, EMSGSIZE, fl6,
1317 				 mtu - headersize +
1318 				 sizeof(struct ipv6hdr));
1319 		return -EMSGSIZE;
1320 	}
1321 
1322 	/* CHECKSUM_PARTIAL only with no extension headers and when
1323 	 * we are not going to fragment
1324 	 */
1325 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1326 	    headersize == sizeof(struct ipv6hdr) &&
1327 	    length < mtu - headersize &&
1328 	    !(flags & MSG_MORE) &&
1329 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1330 		csummode = CHECKSUM_PARTIAL;
1331 
1332 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1333 		sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
1334 		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1335 		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1336 			tskey = sk->sk_tskey++;
1337 	}
1338 
1339 	/*
1340 	 * Let's try using as much space as possible.
1341 	 * Use MTU if total length of the message fits into the MTU.
1342 	 * Otherwise, we need to reserve fragment header and
1343 	 * fragment alignment (= 8-15 octects, in total).
1344 	 *
1345 	 * Note that we may need to "move" the data from the tail of
1346 	 * of the buffer to the new fragment when we split
1347 	 * the message.
1348 	 *
1349 	 * FIXME: It may be fragmented into multiple chunks
1350 	 *        at once if non-fragmentable extension headers
1351 	 *        are too large.
1352 	 * --yoshfuji
1353 	 */
1354 
1355 	cork->length += length;
1356 	if (((length > mtu) ||
1357 	     (skb && skb_is_gso(skb))) &&
1358 	    (sk->sk_protocol == IPPROTO_UDP) &&
1359 	    (rt->dst.dev->features & NETIF_F_UFO) &&
1360 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1361 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1362 					  hh_len, fragheaderlen, exthdrlen,
1363 					  transhdrlen, mtu, flags, fl6);
1364 		if (err)
1365 			goto error;
1366 		return 0;
1367 	}
1368 
1369 	if (!skb)
1370 		goto alloc_new_skb;
1371 
1372 	while (length > 0) {
1373 		/* Check if the remaining data fits into current packet. */
1374 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1375 		if (copy < length)
1376 			copy = maxfraglen - skb->len;
1377 
1378 		if (copy <= 0) {
1379 			char *data;
1380 			unsigned int datalen;
1381 			unsigned int fraglen;
1382 			unsigned int fraggap;
1383 			unsigned int alloclen;
1384 alloc_new_skb:
1385 			/* There's no room in the current skb */
1386 			if (skb)
1387 				fraggap = skb->len - maxfraglen;
1388 			else
1389 				fraggap = 0;
1390 			/* update mtu and maxfraglen if necessary */
1391 			if (!skb || !skb_prev)
1392 				ip6_append_data_mtu(&mtu, &maxfraglen,
1393 						    fragheaderlen, skb, rt,
1394 						    orig_mtu);
1395 
1396 			skb_prev = skb;
1397 
1398 			/*
1399 			 * If remaining data exceeds the mtu,
1400 			 * we know we need more fragment(s).
1401 			 */
1402 			datalen = length + fraggap;
1403 
1404 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1405 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1406 			if ((flags & MSG_MORE) &&
1407 			    !(rt->dst.dev->features&NETIF_F_SG))
1408 				alloclen = mtu;
1409 			else
1410 				alloclen = datalen + fragheaderlen;
1411 
1412 			alloclen += dst_exthdrlen;
1413 
1414 			if (datalen != length + fraggap) {
1415 				/*
1416 				 * this is not the last fragment, the trailer
1417 				 * space is regarded as data space.
1418 				 */
1419 				datalen += rt->dst.trailer_len;
1420 			}
1421 
1422 			alloclen += rt->dst.trailer_len;
1423 			fraglen = datalen + fragheaderlen;
1424 
1425 			/*
1426 			 * We just reserve space for fragment header.
1427 			 * Note: this may be overallocation if the message
1428 			 * (without MSG_MORE) fits into the MTU.
1429 			 */
1430 			alloclen += sizeof(struct frag_hdr);
1431 
1432 			if (transhdrlen) {
1433 				skb = sock_alloc_send_skb(sk,
1434 						alloclen + hh_len,
1435 						(flags & MSG_DONTWAIT), &err);
1436 			} else {
1437 				skb = NULL;
1438 				if (atomic_read(&sk->sk_wmem_alloc) <=
1439 				    2 * sk->sk_sndbuf)
1440 					skb = sock_wmalloc(sk,
1441 							   alloclen + hh_len, 1,
1442 							   sk->sk_allocation);
1443 				if (unlikely(!skb))
1444 					err = -ENOBUFS;
1445 			}
1446 			if (!skb)
1447 				goto error;
1448 			/*
1449 			 *	Fill in the control structures
1450 			 */
1451 			skb->protocol = htons(ETH_P_IPV6);
1452 			skb->ip_summed = csummode;
1453 			skb->csum = 0;
1454 			/* reserve for fragmentation and ipsec header */
1455 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1456 				    dst_exthdrlen);
1457 
1458 			/* Only the initial fragment is time stamped */
1459 			skb_shinfo(skb)->tx_flags = tx_flags;
1460 			tx_flags = 0;
1461 			skb_shinfo(skb)->tskey = tskey;
1462 			tskey = 0;
1463 
1464 			/*
1465 			 *	Find where to start putting bytes
1466 			 */
1467 			data = skb_put(skb, fraglen);
1468 			skb_set_network_header(skb, exthdrlen);
1469 			data += fragheaderlen;
1470 			skb->transport_header = (skb->network_header +
1471 						 fragheaderlen);
1472 			if (fraggap) {
1473 				skb->csum = skb_copy_and_csum_bits(
1474 					skb_prev, maxfraglen,
1475 					data + transhdrlen, fraggap, 0);
1476 				skb_prev->csum = csum_sub(skb_prev->csum,
1477 							  skb->csum);
1478 				data += fraggap;
1479 				pskb_trim_unique(skb_prev, maxfraglen);
1480 			}
1481 			copy = datalen - transhdrlen - fraggap;
1482 
1483 			if (copy < 0) {
1484 				err = -EINVAL;
1485 				kfree_skb(skb);
1486 				goto error;
1487 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1488 				err = -EFAULT;
1489 				kfree_skb(skb);
1490 				goto error;
1491 			}
1492 
1493 			offset += copy;
1494 			length -= datalen - fraggap;
1495 			transhdrlen = 0;
1496 			exthdrlen = 0;
1497 			dst_exthdrlen = 0;
1498 
1499 			/*
1500 			 * Put the packet on the pending queue
1501 			 */
1502 			__skb_queue_tail(queue, skb);
1503 			continue;
1504 		}
1505 
1506 		if (copy > length)
1507 			copy = length;
1508 
1509 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1510 			unsigned int off;
1511 
1512 			off = skb->len;
1513 			if (getfrag(from, skb_put(skb, copy),
1514 						offset, copy, off, skb) < 0) {
1515 				__skb_trim(skb, off);
1516 				err = -EFAULT;
1517 				goto error;
1518 			}
1519 		} else {
1520 			int i = skb_shinfo(skb)->nr_frags;
1521 
1522 			err = -ENOMEM;
1523 			if (!sk_page_frag_refill(sk, pfrag))
1524 				goto error;
1525 
1526 			if (!skb_can_coalesce(skb, i, pfrag->page,
1527 					      pfrag->offset)) {
1528 				err = -EMSGSIZE;
1529 				if (i == MAX_SKB_FRAGS)
1530 					goto error;
1531 
1532 				__skb_fill_page_desc(skb, i, pfrag->page,
1533 						     pfrag->offset, 0);
1534 				skb_shinfo(skb)->nr_frags = ++i;
1535 				get_page(pfrag->page);
1536 			}
1537 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1538 			if (getfrag(from,
1539 				    page_address(pfrag->page) + pfrag->offset,
1540 				    offset, copy, skb->len, skb) < 0)
1541 				goto error_efault;
1542 
1543 			pfrag->offset += copy;
1544 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1545 			skb->len += copy;
1546 			skb->data_len += copy;
1547 			skb->truesize += copy;
1548 			atomic_add(copy, &sk->sk_wmem_alloc);
1549 		}
1550 		offset += copy;
1551 		length -= copy;
1552 	}
1553 
1554 	return 0;
1555 
1556 error_efault:
1557 	err = -EFAULT;
1558 error:
1559 	cork->length -= length;
1560 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1561 	return err;
1562 }
1563 
1564 int ip6_append_data(struct sock *sk,
1565 		    int getfrag(void *from, char *to, int offset, int len,
1566 				int odd, struct sk_buff *skb),
1567 		    void *from, int length, int transhdrlen,
1568 		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1569 		    struct rt6_info *rt, unsigned int flags,
1570 		    const struct sockcm_cookie *sockc)
1571 {
1572 	struct inet_sock *inet = inet_sk(sk);
1573 	struct ipv6_pinfo *np = inet6_sk(sk);
1574 	int exthdrlen;
1575 	int err;
1576 
1577 	if (flags&MSG_PROBE)
1578 		return 0;
1579 	if (skb_queue_empty(&sk->sk_write_queue)) {
1580 		/*
1581 		 * setup for corking
1582 		 */
1583 		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1584 				     ipc6, rt, fl6);
1585 		if (err)
1586 			return err;
1587 
1588 		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1589 		length += exthdrlen;
1590 		transhdrlen += exthdrlen;
1591 	} else {
1592 		fl6 = &inet->cork.fl.u.ip6;
1593 		transhdrlen = 0;
1594 	}
1595 
1596 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1597 				 &np->cork, sk_page_frag(sk), getfrag,
1598 				 from, length, transhdrlen, flags, ipc6, sockc);
1599 }
1600 EXPORT_SYMBOL_GPL(ip6_append_data);
1601 
1602 static void ip6_cork_release(struct inet_cork_full *cork,
1603 			     struct inet6_cork *v6_cork)
1604 {
1605 	if (v6_cork->opt) {
1606 		kfree(v6_cork->opt->dst0opt);
1607 		kfree(v6_cork->opt->dst1opt);
1608 		kfree(v6_cork->opt->hopopt);
1609 		kfree(v6_cork->opt->srcrt);
1610 		kfree(v6_cork->opt);
1611 		v6_cork->opt = NULL;
1612 	}
1613 
1614 	if (cork->base.dst) {
1615 		dst_release(cork->base.dst);
1616 		cork->base.dst = NULL;
1617 		cork->base.flags &= ~IPCORK_ALLFRAG;
1618 	}
1619 	memset(&cork->fl, 0, sizeof(cork->fl));
1620 }
1621 
1622 struct sk_buff *__ip6_make_skb(struct sock *sk,
1623 			       struct sk_buff_head *queue,
1624 			       struct inet_cork_full *cork,
1625 			       struct inet6_cork *v6_cork)
1626 {
1627 	struct sk_buff *skb, *tmp_skb;
1628 	struct sk_buff **tail_skb;
1629 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1630 	struct ipv6_pinfo *np = inet6_sk(sk);
1631 	struct net *net = sock_net(sk);
1632 	struct ipv6hdr *hdr;
1633 	struct ipv6_txoptions *opt = v6_cork->opt;
1634 	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1635 	struct flowi6 *fl6 = &cork->fl.u.ip6;
1636 	unsigned char proto = fl6->flowi6_proto;
1637 
1638 	skb = __skb_dequeue(queue);
1639 	if (!skb)
1640 		goto out;
1641 	tail_skb = &(skb_shinfo(skb)->frag_list);
1642 
1643 	/* move skb->data to ip header from ext header */
1644 	if (skb->data < skb_network_header(skb))
1645 		__skb_pull(skb, skb_network_offset(skb));
1646 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1647 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1648 		*tail_skb = tmp_skb;
1649 		tail_skb = &(tmp_skb->next);
1650 		skb->len += tmp_skb->len;
1651 		skb->data_len += tmp_skb->len;
1652 		skb->truesize += tmp_skb->truesize;
1653 		tmp_skb->destructor = NULL;
1654 		tmp_skb->sk = NULL;
1655 	}
1656 
1657 	/* Allow local fragmentation. */
1658 	skb->ignore_df = ip6_sk_ignore_df(sk);
1659 
1660 	*final_dst = fl6->daddr;
1661 	__skb_pull(skb, skb_network_header_len(skb));
1662 	if (opt && opt->opt_flen)
1663 		ipv6_push_frag_opts(skb, opt, &proto);
1664 	if (opt && opt->opt_nflen)
1665 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1666 
1667 	skb_push(skb, sizeof(struct ipv6hdr));
1668 	skb_reset_network_header(skb);
1669 	hdr = ipv6_hdr(skb);
1670 
1671 	ip6_flow_hdr(hdr, v6_cork->tclass,
1672 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1673 					np->autoflowlabel, fl6));
1674 	hdr->hop_limit = v6_cork->hop_limit;
1675 	hdr->nexthdr = proto;
1676 	hdr->saddr = fl6->saddr;
1677 	hdr->daddr = *final_dst;
1678 
1679 	skb->priority = sk->sk_priority;
1680 	skb->mark = sk->sk_mark;
1681 
1682 	skb_dst_set(skb, dst_clone(&rt->dst));
1683 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1684 	if (proto == IPPROTO_ICMPV6) {
1685 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1686 
1687 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1688 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1689 	}
1690 
1691 	ip6_cork_release(cork, v6_cork);
1692 out:
1693 	return skb;
1694 }
1695 
1696 int ip6_send_skb(struct sk_buff *skb)
1697 {
1698 	struct net *net = sock_net(skb->sk);
1699 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1700 	int err;
1701 
1702 	err = ip6_local_out(net, skb->sk, skb);
1703 	if (err) {
1704 		if (err > 0)
1705 			err = net_xmit_errno(err);
1706 		if (err)
1707 			IP6_INC_STATS(net, rt->rt6i_idev,
1708 				      IPSTATS_MIB_OUTDISCARDS);
1709 	}
1710 
1711 	return err;
1712 }
1713 
1714 int ip6_push_pending_frames(struct sock *sk)
1715 {
1716 	struct sk_buff *skb;
1717 
1718 	skb = ip6_finish_skb(sk);
1719 	if (!skb)
1720 		return 0;
1721 
1722 	return ip6_send_skb(skb);
1723 }
1724 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1725 
1726 static void __ip6_flush_pending_frames(struct sock *sk,
1727 				       struct sk_buff_head *queue,
1728 				       struct inet_cork_full *cork,
1729 				       struct inet6_cork *v6_cork)
1730 {
1731 	struct sk_buff *skb;
1732 
1733 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1734 		if (skb_dst(skb))
1735 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1736 				      IPSTATS_MIB_OUTDISCARDS);
1737 		kfree_skb(skb);
1738 	}
1739 
1740 	ip6_cork_release(cork, v6_cork);
1741 }
1742 
1743 void ip6_flush_pending_frames(struct sock *sk)
1744 {
1745 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1746 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1747 }
1748 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1749 
1750 struct sk_buff *ip6_make_skb(struct sock *sk,
1751 			     int getfrag(void *from, char *to, int offset,
1752 					 int len, int odd, struct sk_buff *skb),
1753 			     void *from, int length, int transhdrlen,
1754 			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1755 			     struct rt6_info *rt, unsigned int flags,
1756 			     const struct sockcm_cookie *sockc)
1757 {
1758 	struct inet_cork_full cork;
1759 	struct inet6_cork v6_cork;
1760 	struct sk_buff_head queue;
1761 	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1762 	int err;
1763 
1764 	if (flags & MSG_PROBE)
1765 		return NULL;
1766 
1767 	__skb_queue_head_init(&queue);
1768 
1769 	cork.base.flags = 0;
1770 	cork.base.addr = 0;
1771 	cork.base.opt = NULL;
1772 	v6_cork.opt = NULL;
1773 	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1774 	if (err)
1775 		return ERR_PTR(err);
1776 
1777 	if (ipc6->dontfrag < 0)
1778 		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1779 
1780 	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1781 				&current->task_frag, getfrag, from,
1782 				length + exthdrlen, transhdrlen + exthdrlen,
1783 				flags, ipc6, sockc);
1784 	if (err) {
1785 		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1786 		return ERR_PTR(err);
1787 	}
1788 
1789 	return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
1790 }
1791