xref: /openbmc/linux/net/ipv6/ip6_output.c (revision 6a551c11)
1 /*
2  *	IPv6 output functions
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on linux/net/ipv4/ip_output.c
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *	Changes:
16  *	A.N.Kuznetsov	:	airthmetics in fragmentation.
17  *				extension headers are implemented.
18  *				route changes now work.
19  *				ip6_forward does not confuse sniffers.
20  *				etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *	Imran Patel	:	frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *			:       add ip6_append_data and related functions
26  *				for datagram xmit
27  */
28 
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44 
45 #include <net/sock.h>
46 #include <net/snmp.h>
47 
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 #include <net/l3mdev.h>
59 
60 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
61 {
62 	struct dst_entry *dst = skb_dst(skb);
63 	struct net_device *dev = dst->dev;
64 	struct neighbour *neigh;
65 	struct in6_addr *nexthop;
66 	int ret;
67 
68 	skb->protocol = htons(ETH_P_IPV6);
69 	skb->dev = dev;
70 
71 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
72 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
73 
74 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
75 		    ((mroute6_socket(net, skb) &&
76 		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
77 		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
78 					 &ipv6_hdr(skb)->saddr))) {
79 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
80 
81 			/* Do not check for IFF_ALLMULTI; multicast routing
82 			   is not supported in any case.
83 			 */
84 			if (newskb)
85 				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
86 					net, sk, newskb, NULL, newskb->dev,
87 					dev_loopback_xmit);
88 
89 			if (ipv6_hdr(skb)->hop_limit == 0) {
90 				IP6_INC_STATS(net, idev,
91 					      IPSTATS_MIB_OUTDISCARDS);
92 				kfree_skb(skb);
93 				return 0;
94 			}
95 		}
96 
97 		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
98 
99 		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 		    IPV6_ADDR_SCOPE_NODELOCAL &&
101 		    !(dev->flags & IFF_LOOPBACK)) {
102 			kfree_skb(skb);
103 			return 0;
104 		}
105 	}
106 
107 	rcu_read_lock_bh();
108 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
109 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 	if (unlikely(!neigh))
111 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 	if (!IS_ERR(neigh)) {
113 		ret = dst_neigh_output(dst, neigh, skb);
114 		rcu_read_unlock_bh();
115 		return ret;
116 	}
117 	rcu_read_unlock_bh();
118 
119 	IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
120 	kfree_skb(skb);
121 	return -EINVAL;
122 }
123 
124 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
125 {
126 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
127 	    dst_allfrag(skb_dst(skb)) ||
128 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
129 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
130 	else
131 		return ip6_finish_output2(net, sk, skb);
132 }
133 
134 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
135 {
136 	struct net_device *dev = skb_dst(skb)->dev;
137 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
138 
139 	if (unlikely(idev->cnf.disable_ipv6)) {
140 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
141 		kfree_skb(skb);
142 		return 0;
143 	}
144 
145 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
146 			    net, sk, skb, NULL, dev,
147 			    ip6_finish_output,
148 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150 
151 /*
152  * xmit an sk_buff (used by TCP, SCTP and DCCP)
153  * Note : socket lock is not held for SYNACK packets, but might be modified
154  * by calls to skb_set_owner_w() and ipv6_local_error(),
155  * which are using proper atomic operations or spinlocks.
156  */
157 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
158 	     struct ipv6_txoptions *opt, int tclass)
159 {
160 	struct net *net = sock_net(sk);
161 	const struct ipv6_pinfo *np = inet6_sk(sk);
162 	struct in6_addr *first_hop = &fl6->daddr;
163 	struct dst_entry *dst = skb_dst(skb);
164 	struct ipv6hdr *hdr;
165 	u8  proto = fl6->flowi6_proto;
166 	int seg_len = skb->len;
167 	int hlimit = -1;
168 	u32 mtu;
169 
170 	if (opt) {
171 		unsigned int head_room;
172 
173 		/* First: exthdrs may take lots of space (~8K for now)
174 		   MAX_HEADER is not enough.
175 		 */
176 		head_room = opt->opt_nflen + opt->opt_flen;
177 		seg_len += head_room;
178 		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
179 
180 		if (skb_headroom(skb) < head_room) {
181 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
182 			if (!skb2) {
183 				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
184 					      IPSTATS_MIB_OUTDISCARDS);
185 				kfree_skb(skb);
186 				return -ENOBUFS;
187 			}
188 			consume_skb(skb);
189 			skb = skb2;
190 			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
191 			 * it is safe to call in our context (socket lock not held)
192 			 */
193 			skb_set_owner_w(skb, (struct sock *)sk);
194 		}
195 		if (opt->opt_flen)
196 			ipv6_push_frag_opts(skb, opt, &proto);
197 		if (opt->opt_nflen)
198 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
199 	}
200 
201 	skb_push(skb, sizeof(struct ipv6hdr));
202 	skb_reset_network_header(skb);
203 	hdr = ipv6_hdr(skb);
204 
205 	/*
206 	 *	Fill in the IPv6 header
207 	 */
208 	if (np)
209 		hlimit = np->hop_limit;
210 	if (hlimit < 0)
211 		hlimit = ip6_dst_hoplimit(dst);
212 
213 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
214 						     np->autoflowlabel, fl6));
215 
216 	hdr->payload_len = htons(seg_len);
217 	hdr->nexthdr = proto;
218 	hdr->hop_limit = hlimit;
219 
220 	hdr->saddr = fl6->saddr;
221 	hdr->daddr = *first_hop;
222 
223 	skb->protocol = htons(ETH_P_IPV6);
224 	skb->priority = sk->sk_priority;
225 	skb->mark = sk->sk_mark;
226 
227 	mtu = dst_mtu(dst);
228 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
229 		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
230 			      IPSTATS_MIB_OUT, skb->len);
231 		/* hooks should never assume socket lock is held.
232 		 * we promote our socket to non const
233 		 */
234 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
235 			       net, (struct sock *)sk, skb, NULL, dst->dev,
236 			       dst_output);
237 	}
238 
239 	skb->dev = dst->dev;
240 	/* ipv6_local_error() does not require socket lock,
241 	 * we promote our socket to non const
242 	 */
243 	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
244 
245 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
246 	kfree_skb(skb);
247 	return -EMSGSIZE;
248 }
249 EXPORT_SYMBOL(ip6_xmit);
250 
251 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
252 {
253 	struct ip6_ra_chain *ra;
254 	struct sock *last = NULL;
255 
256 	read_lock(&ip6_ra_lock);
257 	for (ra = ip6_ra_chain; ra; ra = ra->next) {
258 		struct sock *sk = ra->sk;
259 		if (sk && ra->sel == sel &&
260 		    (!sk->sk_bound_dev_if ||
261 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
262 			if (last) {
263 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
264 				if (skb2)
265 					rawv6_rcv(last, skb2);
266 			}
267 			last = sk;
268 		}
269 	}
270 
271 	if (last) {
272 		rawv6_rcv(last, skb);
273 		read_unlock(&ip6_ra_lock);
274 		return 1;
275 	}
276 	read_unlock(&ip6_ra_lock);
277 	return 0;
278 }
279 
280 static int ip6_forward_proxy_check(struct sk_buff *skb)
281 {
282 	struct ipv6hdr *hdr = ipv6_hdr(skb);
283 	u8 nexthdr = hdr->nexthdr;
284 	__be16 frag_off;
285 	int offset;
286 
287 	if (ipv6_ext_hdr(nexthdr)) {
288 		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
289 		if (offset < 0)
290 			return 0;
291 	} else
292 		offset = sizeof(struct ipv6hdr);
293 
294 	if (nexthdr == IPPROTO_ICMPV6) {
295 		struct icmp6hdr *icmp6;
296 
297 		if (!pskb_may_pull(skb, (skb_network_header(skb) +
298 					 offset + 1 - skb->data)))
299 			return 0;
300 
301 		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
302 
303 		switch (icmp6->icmp6_type) {
304 		case NDISC_ROUTER_SOLICITATION:
305 		case NDISC_ROUTER_ADVERTISEMENT:
306 		case NDISC_NEIGHBOUR_SOLICITATION:
307 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
308 		case NDISC_REDIRECT:
309 			/* For reaction involving unicast neighbor discovery
310 			 * message destined to the proxied address, pass it to
311 			 * input function.
312 			 */
313 			return 1;
314 		default:
315 			break;
316 		}
317 	}
318 
319 	/*
320 	 * The proxying router can't forward traffic sent to a link-local
321 	 * address, so signal the sender and discard the packet. This
322 	 * behavior is clarified by the MIPv6 specification.
323 	 */
324 	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
325 		dst_link_failure(skb);
326 		return -1;
327 	}
328 
329 	return 0;
330 }
331 
332 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
333 				     struct sk_buff *skb)
334 {
335 	return dst_output(net, sk, skb);
336 }
337 
338 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
339 {
340 	unsigned int mtu;
341 	struct inet6_dev *idev;
342 
343 	if (dst_metric_locked(dst, RTAX_MTU)) {
344 		mtu = dst_metric_raw(dst, RTAX_MTU);
345 		if (mtu)
346 			return mtu;
347 	}
348 
349 	mtu = IPV6_MIN_MTU;
350 	rcu_read_lock();
351 	idev = __in6_dev_get(dst->dev);
352 	if (idev)
353 		mtu = idev->cnf.mtu6;
354 	rcu_read_unlock();
355 
356 	return mtu;
357 }
358 
359 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
360 {
361 	if (skb->len <= mtu)
362 		return false;
363 
364 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
365 	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
366 		return true;
367 
368 	if (skb->ignore_df)
369 		return false;
370 
371 	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
372 		return false;
373 
374 	return true;
375 }
376 
377 int ip6_forward(struct sk_buff *skb)
378 {
379 	struct dst_entry *dst = skb_dst(skb);
380 	struct ipv6hdr *hdr = ipv6_hdr(skb);
381 	struct inet6_skb_parm *opt = IP6CB(skb);
382 	struct net *net = dev_net(dst->dev);
383 	u32 mtu;
384 
385 	if (net->ipv6.devconf_all->forwarding == 0)
386 		goto error;
387 
388 	if (skb->pkt_type != PACKET_HOST)
389 		goto drop;
390 
391 	if (unlikely(skb->sk))
392 		goto drop;
393 
394 	if (skb_warn_if_lro(skb))
395 		goto drop;
396 
397 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
398 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
399 				IPSTATS_MIB_INDISCARDS);
400 		goto drop;
401 	}
402 
403 	skb_forward_csum(skb);
404 
405 	/*
406 	 *	We DO NOT make any processing on
407 	 *	RA packets, pushing them to user level AS IS
408 	 *	without ane WARRANTY that application will be able
409 	 *	to interpret them. The reason is that we
410 	 *	cannot make anything clever here.
411 	 *
412 	 *	We are not end-node, so that if packet contains
413 	 *	AH/ESP, we cannot make anything.
414 	 *	Defragmentation also would be mistake, RA packets
415 	 *	cannot be fragmented, because there is no warranty
416 	 *	that different fragments will go along one path. --ANK
417 	 */
418 	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
419 		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
420 			return 0;
421 	}
422 
423 	/*
424 	 *	check and decrement ttl
425 	 */
426 	if (hdr->hop_limit <= 1) {
427 		/* Force OUTPUT device used as source address */
428 		skb->dev = dst->dev;
429 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
430 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
431 				IPSTATS_MIB_INHDRERRORS);
432 
433 		kfree_skb(skb);
434 		return -ETIMEDOUT;
435 	}
436 
437 	/* XXX: idev->cnf.proxy_ndp? */
438 	if (net->ipv6.devconf_all->proxy_ndp &&
439 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
440 		int proxied = ip6_forward_proxy_check(skb);
441 		if (proxied > 0)
442 			return ip6_input(skb);
443 		else if (proxied < 0) {
444 			__IP6_INC_STATS(net, ip6_dst_idev(dst),
445 					IPSTATS_MIB_INDISCARDS);
446 			goto drop;
447 		}
448 	}
449 
450 	if (!xfrm6_route_forward(skb)) {
451 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
452 				IPSTATS_MIB_INDISCARDS);
453 		goto drop;
454 	}
455 	dst = skb_dst(skb);
456 
457 	/* IPv6 specs say nothing about it, but it is clear that we cannot
458 	   send redirects to source routed frames.
459 	   We don't send redirects to frames decapsulated from IPsec.
460 	 */
461 	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
462 		struct in6_addr *target = NULL;
463 		struct inet_peer *peer;
464 		struct rt6_info *rt;
465 
466 		/*
467 		 *	incoming and outgoing devices are the same
468 		 *	send a redirect.
469 		 */
470 
471 		rt = (struct rt6_info *) dst;
472 		if (rt->rt6i_flags & RTF_GATEWAY)
473 			target = &rt->rt6i_gateway;
474 		else
475 			target = &hdr->daddr;
476 
477 		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
478 
479 		/* Limit redirects both by destination (here)
480 		   and by source (inside ndisc_send_redirect)
481 		 */
482 		if (inet_peer_xrlim_allow(peer, 1*HZ))
483 			ndisc_send_redirect(skb, target);
484 		if (peer)
485 			inet_putpeer(peer);
486 	} else {
487 		int addrtype = ipv6_addr_type(&hdr->saddr);
488 
489 		/* This check is security critical. */
490 		if (addrtype == IPV6_ADDR_ANY ||
491 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
492 			goto error;
493 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
494 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
495 				    ICMPV6_NOT_NEIGHBOUR, 0);
496 			goto error;
497 		}
498 	}
499 
500 	mtu = ip6_dst_mtu_forward(dst);
501 	if (mtu < IPV6_MIN_MTU)
502 		mtu = IPV6_MIN_MTU;
503 
504 	if (ip6_pkt_too_big(skb, mtu)) {
505 		/* Again, force OUTPUT device used as source address */
506 		skb->dev = dst->dev;
507 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
508 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
509 				IPSTATS_MIB_INTOOBIGERRORS);
510 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
511 				IPSTATS_MIB_FRAGFAILS);
512 		kfree_skb(skb);
513 		return -EMSGSIZE;
514 	}
515 
516 	if (skb_cow(skb, dst->dev->hard_header_len)) {
517 		__IP6_INC_STATS(net, ip6_dst_idev(dst),
518 				IPSTATS_MIB_OUTDISCARDS);
519 		goto drop;
520 	}
521 
522 	hdr = ipv6_hdr(skb);
523 
524 	/* Mangling hops number delayed to point after skb COW */
525 
526 	hdr->hop_limit--;
527 
528 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
529 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
530 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
531 		       net, NULL, skb, skb->dev, dst->dev,
532 		       ip6_forward_finish);
533 
534 error:
535 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
536 drop:
537 	kfree_skb(skb);
538 	return -EINVAL;
539 }
540 
541 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
542 {
543 	to->pkt_type = from->pkt_type;
544 	to->priority = from->priority;
545 	to->protocol = from->protocol;
546 	skb_dst_drop(to);
547 	skb_dst_set(to, dst_clone(skb_dst(from)));
548 	to->dev = from->dev;
549 	to->mark = from->mark;
550 
551 #ifdef CONFIG_NET_SCHED
552 	to->tc_index = from->tc_index;
553 #endif
554 	nf_copy(to, from);
555 	skb_copy_secmark(to, from);
556 }
557 
558 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
559 		 int (*output)(struct net *, struct sock *, struct sk_buff *))
560 {
561 	struct sk_buff *frag;
562 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
563 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
564 				inet6_sk(skb->sk) : NULL;
565 	struct ipv6hdr *tmp_hdr;
566 	struct frag_hdr *fh;
567 	unsigned int mtu, hlen, left, len;
568 	int hroom, troom;
569 	__be32 frag_id;
570 	int ptr, offset = 0, err = 0;
571 	u8 *prevhdr, nexthdr = 0;
572 
573 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
574 	nexthdr = *prevhdr;
575 
576 	mtu = ip6_skb_dst_mtu(skb);
577 
578 	/* We must not fragment if the socket is set to force MTU discovery
579 	 * or if the skb it not generated by a local socket.
580 	 */
581 	if (unlikely(!skb->ignore_df && skb->len > mtu))
582 		goto fail_toobig;
583 
584 	if (IP6CB(skb)->frag_max_size) {
585 		if (IP6CB(skb)->frag_max_size > mtu)
586 			goto fail_toobig;
587 
588 		/* don't send fragments larger than what we received */
589 		mtu = IP6CB(skb)->frag_max_size;
590 		if (mtu < IPV6_MIN_MTU)
591 			mtu = IPV6_MIN_MTU;
592 	}
593 
594 	if (np && np->frag_size < mtu) {
595 		if (np->frag_size)
596 			mtu = np->frag_size;
597 	}
598 	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
599 		goto fail_toobig;
600 	mtu -= hlen + sizeof(struct frag_hdr);
601 
602 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
603 				    &ipv6_hdr(skb)->saddr);
604 
605 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
606 	    (err = skb_checksum_help(skb)))
607 		goto fail;
608 
609 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
610 	if (skb_has_frag_list(skb)) {
611 		int first_len = skb_pagelen(skb);
612 		struct sk_buff *frag2;
613 
614 		if (first_len - hlen > mtu ||
615 		    ((first_len - hlen) & 7) ||
616 		    skb_cloned(skb) ||
617 		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
618 			goto slow_path;
619 
620 		skb_walk_frags(skb, frag) {
621 			/* Correct geometry. */
622 			if (frag->len > mtu ||
623 			    ((frag->len & 7) && frag->next) ||
624 			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
625 				goto slow_path_clean;
626 
627 			/* Partially cloned skb? */
628 			if (skb_shared(frag))
629 				goto slow_path_clean;
630 
631 			BUG_ON(frag->sk);
632 			if (skb->sk) {
633 				frag->sk = skb->sk;
634 				frag->destructor = sock_wfree;
635 			}
636 			skb->truesize -= frag->truesize;
637 		}
638 
639 		err = 0;
640 		offset = 0;
641 		/* BUILD HEADER */
642 
643 		*prevhdr = NEXTHDR_FRAGMENT;
644 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
645 		if (!tmp_hdr) {
646 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
647 				      IPSTATS_MIB_FRAGFAILS);
648 			err = -ENOMEM;
649 			goto fail;
650 		}
651 		frag = skb_shinfo(skb)->frag_list;
652 		skb_frag_list_init(skb);
653 
654 		__skb_pull(skb, hlen);
655 		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
656 		__skb_push(skb, hlen);
657 		skb_reset_network_header(skb);
658 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
659 
660 		fh->nexthdr = nexthdr;
661 		fh->reserved = 0;
662 		fh->frag_off = htons(IP6_MF);
663 		fh->identification = frag_id;
664 
665 		first_len = skb_pagelen(skb);
666 		skb->data_len = first_len - skb_headlen(skb);
667 		skb->len = first_len;
668 		ipv6_hdr(skb)->payload_len = htons(first_len -
669 						   sizeof(struct ipv6hdr));
670 
671 		dst_hold(&rt->dst);
672 
673 		for (;;) {
674 			/* Prepare header of the next frame,
675 			 * before previous one went down. */
676 			if (frag) {
677 				frag->ip_summed = CHECKSUM_NONE;
678 				skb_reset_transport_header(frag);
679 				fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
680 				__skb_push(frag, hlen);
681 				skb_reset_network_header(frag);
682 				memcpy(skb_network_header(frag), tmp_hdr,
683 				       hlen);
684 				offset += skb->len - hlen - sizeof(struct frag_hdr);
685 				fh->nexthdr = nexthdr;
686 				fh->reserved = 0;
687 				fh->frag_off = htons(offset);
688 				if (frag->next)
689 					fh->frag_off |= htons(IP6_MF);
690 				fh->identification = frag_id;
691 				ipv6_hdr(frag)->payload_len =
692 						htons(frag->len -
693 						      sizeof(struct ipv6hdr));
694 				ip6_copy_metadata(frag, skb);
695 			}
696 
697 			err = output(net, sk, skb);
698 			if (!err)
699 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
700 					      IPSTATS_MIB_FRAGCREATES);
701 
702 			if (err || !frag)
703 				break;
704 
705 			skb = frag;
706 			frag = skb->next;
707 			skb->next = NULL;
708 		}
709 
710 		kfree(tmp_hdr);
711 
712 		if (err == 0) {
713 			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
714 				      IPSTATS_MIB_FRAGOKS);
715 			ip6_rt_put(rt);
716 			return 0;
717 		}
718 
719 		kfree_skb_list(frag);
720 
721 		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
722 			      IPSTATS_MIB_FRAGFAILS);
723 		ip6_rt_put(rt);
724 		return err;
725 
726 slow_path_clean:
727 		skb_walk_frags(skb, frag2) {
728 			if (frag2 == frag)
729 				break;
730 			frag2->sk = NULL;
731 			frag2->destructor = NULL;
732 			skb->truesize += frag2->truesize;
733 		}
734 	}
735 
736 slow_path:
737 	left = skb->len - hlen;		/* Space per frame */
738 	ptr = hlen;			/* Where to start from */
739 
740 	/*
741 	 *	Fragment the datagram.
742 	 */
743 
744 	*prevhdr = NEXTHDR_FRAGMENT;
745 	troom = rt->dst.dev->needed_tailroom;
746 
747 	/*
748 	 *	Keep copying data until we run out.
749 	 */
750 	while (left > 0)	{
751 		len = left;
752 		/* IF: it doesn't fit, use 'mtu' - the data space left */
753 		if (len > mtu)
754 			len = mtu;
755 		/* IF: we are not sending up to and including the packet end
756 		   then align the next start on an eight byte boundary */
757 		if (len < left)	{
758 			len &= ~7;
759 		}
760 
761 		/* Allocate buffer */
762 		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
763 				 hroom + troom, GFP_ATOMIC);
764 		if (!frag) {
765 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
766 				      IPSTATS_MIB_FRAGFAILS);
767 			err = -ENOMEM;
768 			goto fail;
769 		}
770 
771 		/*
772 		 *	Set up data on packet
773 		 */
774 
775 		ip6_copy_metadata(frag, skb);
776 		skb_reserve(frag, hroom);
777 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
778 		skb_reset_network_header(frag);
779 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
780 		frag->transport_header = (frag->network_header + hlen +
781 					  sizeof(struct frag_hdr));
782 
783 		/*
784 		 *	Charge the memory for the fragment to any owner
785 		 *	it might possess
786 		 */
787 		if (skb->sk)
788 			skb_set_owner_w(frag, skb->sk);
789 
790 		/*
791 		 *	Copy the packet header into the new buffer.
792 		 */
793 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
794 
795 		/*
796 		 *	Build fragment header.
797 		 */
798 		fh->nexthdr = nexthdr;
799 		fh->reserved = 0;
800 		fh->identification = frag_id;
801 
802 		/*
803 		 *	Copy a block of the IP datagram.
804 		 */
805 		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
806 				     len));
807 		left -= len;
808 
809 		fh->frag_off = htons(offset);
810 		if (left > 0)
811 			fh->frag_off |= htons(IP6_MF);
812 		ipv6_hdr(frag)->payload_len = htons(frag->len -
813 						    sizeof(struct ipv6hdr));
814 
815 		ptr += len;
816 		offset += len;
817 
818 		/*
819 		 *	Put this fragment into the sending queue.
820 		 */
821 		err = output(net, sk, frag);
822 		if (err)
823 			goto fail;
824 
825 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
826 			      IPSTATS_MIB_FRAGCREATES);
827 	}
828 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
829 		      IPSTATS_MIB_FRAGOKS);
830 	consume_skb(skb);
831 	return err;
832 
833 fail_toobig:
834 	if (skb->sk && dst_allfrag(skb_dst(skb)))
835 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
836 
837 	skb->dev = skb_dst(skb)->dev;
838 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
839 	err = -EMSGSIZE;
840 
841 fail:
842 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
843 		      IPSTATS_MIB_FRAGFAILS);
844 	kfree_skb(skb);
845 	return err;
846 }
847 
848 static inline int ip6_rt_check(const struct rt6key *rt_key,
849 			       const struct in6_addr *fl_addr,
850 			       const struct in6_addr *addr_cache)
851 {
852 	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
853 		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
854 }
855 
856 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
857 					  struct dst_entry *dst,
858 					  const struct flowi6 *fl6)
859 {
860 	struct ipv6_pinfo *np = inet6_sk(sk);
861 	struct rt6_info *rt;
862 
863 	if (!dst)
864 		goto out;
865 
866 	if (dst->ops->family != AF_INET6) {
867 		dst_release(dst);
868 		return NULL;
869 	}
870 
871 	rt = (struct rt6_info *)dst;
872 	/* Yes, checking route validity in not connected
873 	 * case is not very simple. Take into account,
874 	 * that we do not support routing by source, TOS,
875 	 * and MSG_DONTROUTE		--ANK (980726)
876 	 *
877 	 * 1. ip6_rt_check(): If route was host route,
878 	 *    check that cached destination is current.
879 	 *    If it is network route, we still may
880 	 *    check its validity using saved pointer
881 	 *    to the last used address: daddr_cache.
882 	 *    We do not want to save whole address now,
883 	 *    (because main consumer of this service
884 	 *    is tcp, which has not this problem),
885 	 *    so that the last trick works only on connected
886 	 *    sockets.
887 	 * 2. oif also should be the same.
888 	 */
889 	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
890 #ifdef CONFIG_IPV6_SUBTREES
891 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
892 #endif
893 	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
894 	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
895 		dst_release(dst);
896 		dst = NULL;
897 	}
898 
899 out:
900 	return dst;
901 }
902 
903 static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
904 			       struct dst_entry **dst, struct flowi6 *fl6)
905 {
906 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
907 	struct neighbour *n;
908 	struct rt6_info *rt;
909 #endif
910 	int err;
911 	int flags = 0;
912 
913 	/* The correct way to handle this would be to do
914 	 * ip6_route_get_saddr, and then ip6_route_output; however,
915 	 * the route-specific preferred source forces the
916 	 * ip6_route_output call _before_ ip6_route_get_saddr.
917 	 *
918 	 * In source specific routing (no src=any default route),
919 	 * ip6_route_output will fail given src=any saddr, though, so
920 	 * that's why we try it again later.
921 	 */
922 	if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
923 		struct rt6_info *rt;
924 		bool had_dst = *dst != NULL;
925 
926 		if (!had_dst)
927 			*dst = ip6_route_output(net, sk, fl6);
928 		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
929 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
930 					  sk ? inet6_sk(sk)->srcprefs : 0,
931 					  &fl6->saddr);
932 		if (err)
933 			goto out_err_release;
934 
935 		/* If we had an erroneous initial result, pretend it
936 		 * never existed and let the SA-enabled version take
937 		 * over.
938 		 */
939 		if (!had_dst && (*dst)->error) {
940 			dst_release(*dst);
941 			*dst = NULL;
942 		}
943 
944 		if (fl6->flowi6_oif)
945 			flags |= RT6_LOOKUP_F_IFACE;
946 	}
947 
948 	if (!*dst)
949 		*dst = ip6_route_output_flags(net, sk, fl6, flags);
950 
951 	err = (*dst)->error;
952 	if (err)
953 		goto out_err_release;
954 
955 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
956 	/*
957 	 * Here if the dst entry we've looked up
958 	 * has a neighbour entry that is in the INCOMPLETE
959 	 * state and the src address from the flow is
960 	 * marked as OPTIMISTIC, we release the found
961 	 * dst entry and replace it instead with the
962 	 * dst entry of the nexthop router
963 	 */
964 	rt = (struct rt6_info *) *dst;
965 	rcu_read_lock_bh();
966 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
967 				      rt6_nexthop(rt, &fl6->daddr));
968 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
969 	rcu_read_unlock_bh();
970 
971 	if (err) {
972 		struct inet6_ifaddr *ifp;
973 		struct flowi6 fl_gw6;
974 		int redirect;
975 
976 		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
977 				      (*dst)->dev, 1);
978 
979 		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
980 		if (ifp)
981 			in6_ifa_put(ifp);
982 
983 		if (redirect) {
984 			/*
985 			 * We need to get the dst entry for the
986 			 * default router instead
987 			 */
988 			dst_release(*dst);
989 			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
990 			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
991 			*dst = ip6_route_output(net, sk, &fl_gw6);
992 			err = (*dst)->error;
993 			if (err)
994 				goto out_err_release;
995 		}
996 	}
997 #endif
998 
999 	return 0;
1000 
1001 out_err_release:
1002 	if (err == -ENETUNREACH)
1003 		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1004 	dst_release(*dst);
1005 	*dst = NULL;
1006 	return err;
1007 }
1008 
1009 /**
1010  *	ip6_dst_lookup - perform route lookup on flow
1011  *	@sk: socket which provides route info
1012  *	@dst: pointer to dst_entry * for result
1013  *	@fl6: flow to lookup
1014  *
1015  *	This function performs a route lookup on the given flow.
1016  *
1017  *	It returns zero on success, or a standard errno code on error.
1018  */
1019 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1020 		   struct flowi6 *fl6)
1021 {
1022 	*dst = NULL;
1023 	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1024 }
1025 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1026 
1027 /**
1028  *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1029  *	@sk: socket which provides route info
1030  *	@fl6: flow to lookup
1031  *	@final_dst: final destination address for ipsec lookup
1032  *
1033  *	This function performs a route lookup on the given flow.
1034  *
1035  *	It returns a valid dst pointer on success, or a pointer encoded
1036  *	error code.
1037  */
1038 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1039 				      const struct in6_addr *final_dst)
1040 {
1041 	struct dst_entry *dst = NULL;
1042 	int err;
1043 
1044 	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1045 	if (err)
1046 		return ERR_PTR(err);
1047 	if (final_dst)
1048 		fl6->daddr = *final_dst;
1049 	if (!fl6->flowi6_oif)
1050 		fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
1051 
1052 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1053 }
1054 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1055 
1056 /**
1057  *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1058  *	@sk: socket which provides the dst cache and route info
1059  *	@fl6: flow to lookup
1060  *	@final_dst: final destination address for ipsec lookup
1061  *
1062  *	This function performs a route lookup on the given flow with the
1063  *	possibility of using the cached route in the socket if it is valid.
1064  *	It will take the socket dst lock when operating on the dst cache.
1065  *	As a result, this function can only be used in process context.
1066  *
1067  *	It returns a valid dst pointer on success, or a pointer encoded
1068  *	error code.
1069  */
1070 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1071 					 const struct in6_addr *final_dst)
1072 {
1073 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1074 
1075 	dst = ip6_sk_dst_check(sk, dst, fl6);
1076 	if (!dst)
1077 		dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1078 
1079 	return dst;
1080 }
1081 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1082 
1083 static inline int ip6_ufo_append_data(struct sock *sk,
1084 			struct sk_buff_head *queue,
1085 			int getfrag(void *from, char *to, int offset, int len,
1086 			int odd, struct sk_buff *skb),
1087 			void *from, int length, int hh_len, int fragheaderlen,
1088 			int exthdrlen, int transhdrlen, int mtu,
1089 			unsigned int flags, const struct flowi6 *fl6)
1090 
1091 {
1092 	struct sk_buff *skb;
1093 	int err;
1094 
1095 	/* There is support for UDP large send offload by network
1096 	 * device, so create one single skb packet containing complete
1097 	 * udp datagram
1098 	 */
1099 	skb = skb_peek_tail(queue);
1100 	if (!skb) {
1101 		skb = sock_alloc_send_skb(sk,
1102 			hh_len + fragheaderlen + transhdrlen + 20,
1103 			(flags & MSG_DONTWAIT), &err);
1104 		if (!skb)
1105 			return err;
1106 
1107 		/* reserve space for Hardware header */
1108 		skb_reserve(skb, hh_len);
1109 
1110 		/* create space for UDP/IP header */
1111 		skb_put(skb, fragheaderlen + transhdrlen);
1112 
1113 		/* initialize network header pointer */
1114 		skb_set_network_header(skb, exthdrlen);
1115 
1116 		/* initialize protocol header pointer */
1117 		skb->transport_header = skb->network_header + fragheaderlen;
1118 
1119 		skb->protocol = htons(ETH_P_IPV6);
1120 		skb->csum = 0;
1121 
1122 		__skb_queue_tail(queue, skb);
1123 	} else if (skb_is_gso(skb)) {
1124 		goto append;
1125 	}
1126 
1127 	skb->ip_summed = CHECKSUM_PARTIAL;
1128 	/* Specify the length of each IPv6 datagram fragment.
1129 	 * It has to be a multiple of 8.
1130 	 */
1131 	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1132 				     sizeof(struct frag_hdr)) & ~7;
1133 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1134 	skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1135 							 &fl6->daddr,
1136 							 &fl6->saddr);
1137 
1138 append:
1139 	return skb_append_datato_frags(sk, skb, getfrag, from,
1140 				       (length - transhdrlen));
1141 }
1142 
1143 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1144 					       gfp_t gfp)
1145 {
1146 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1147 }
1148 
1149 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1150 						gfp_t gfp)
1151 {
1152 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1153 }
1154 
1155 static void ip6_append_data_mtu(unsigned int *mtu,
1156 				int *maxfraglen,
1157 				unsigned int fragheaderlen,
1158 				struct sk_buff *skb,
1159 				struct rt6_info *rt,
1160 				unsigned int orig_mtu)
1161 {
1162 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1163 		if (!skb) {
1164 			/* first fragment, reserve header_len */
1165 			*mtu = orig_mtu - rt->dst.header_len;
1166 
1167 		} else {
1168 			/*
1169 			 * this fragment is not first, the headers
1170 			 * space is regarded as data space.
1171 			 */
1172 			*mtu = orig_mtu;
1173 		}
1174 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1175 			      + fragheaderlen - sizeof(struct frag_hdr);
1176 	}
1177 }
1178 
1179 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1180 			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1181 			  struct rt6_info *rt, struct flowi6 *fl6)
1182 {
1183 	struct ipv6_pinfo *np = inet6_sk(sk);
1184 	unsigned int mtu;
1185 	struct ipv6_txoptions *opt = ipc6->opt;
1186 
1187 	/*
1188 	 * setup for corking
1189 	 */
1190 	if (opt) {
1191 		if (WARN_ON(v6_cork->opt))
1192 			return -EINVAL;
1193 
1194 		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1195 		if (unlikely(!v6_cork->opt))
1196 			return -ENOBUFS;
1197 
1198 		v6_cork->opt->tot_len = opt->tot_len;
1199 		v6_cork->opt->opt_flen = opt->opt_flen;
1200 		v6_cork->opt->opt_nflen = opt->opt_nflen;
1201 
1202 		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1203 						    sk->sk_allocation);
1204 		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1205 			return -ENOBUFS;
1206 
1207 		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1208 						    sk->sk_allocation);
1209 		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1210 			return -ENOBUFS;
1211 
1212 		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1213 						   sk->sk_allocation);
1214 		if (opt->hopopt && !v6_cork->opt->hopopt)
1215 			return -ENOBUFS;
1216 
1217 		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1218 						    sk->sk_allocation);
1219 		if (opt->srcrt && !v6_cork->opt->srcrt)
1220 			return -ENOBUFS;
1221 
1222 		/* need source address above miyazawa*/
1223 	}
1224 	dst_hold(&rt->dst);
1225 	cork->base.dst = &rt->dst;
1226 	cork->fl.u.ip6 = *fl6;
1227 	v6_cork->hop_limit = ipc6->hlimit;
1228 	v6_cork->tclass = ipc6->tclass;
1229 	if (rt->dst.flags & DST_XFRM_TUNNEL)
1230 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1231 		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1232 	else
1233 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1234 		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1235 	if (np->frag_size < mtu) {
1236 		if (np->frag_size)
1237 			mtu = np->frag_size;
1238 	}
1239 	cork->base.fragsize = mtu;
1240 	if (dst_allfrag(rt->dst.path))
1241 		cork->base.flags |= IPCORK_ALLFRAG;
1242 	cork->base.length = 0;
1243 
1244 	return 0;
1245 }
1246 
1247 static int __ip6_append_data(struct sock *sk,
1248 			     struct flowi6 *fl6,
1249 			     struct sk_buff_head *queue,
1250 			     struct inet_cork *cork,
1251 			     struct inet6_cork *v6_cork,
1252 			     struct page_frag *pfrag,
1253 			     int getfrag(void *from, char *to, int offset,
1254 					 int len, int odd, struct sk_buff *skb),
1255 			     void *from, int length, int transhdrlen,
1256 			     unsigned int flags, struct ipcm6_cookie *ipc6,
1257 			     const struct sockcm_cookie *sockc)
1258 {
1259 	struct sk_buff *skb, *skb_prev = NULL;
1260 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1261 	int exthdrlen = 0;
1262 	int dst_exthdrlen = 0;
1263 	int hh_len;
1264 	int copy;
1265 	int err;
1266 	int offset = 0;
1267 	__u8 tx_flags = 0;
1268 	u32 tskey = 0;
1269 	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1270 	struct ipv6_txoptions *opt = v6_cork->opt;
1271 	int csummode = CHECKSUM_NONE;
1272 	unsigned int maxnonfragsize, headersize;
1273 
1274 	skb = skb_peek_tail(queue);
1275 	if (!skb) {
1276 		exthdrlen = opt ? opt->opt_flen : 0;
1277 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1278 	}
1279 
1280 	mtu = cork->fragsize;
1281 	orig_mtu = mtu;
1282 
1283 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1284 
1285 	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1286 			(opt ? opt->opt_nflen : 0);
1287 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1288 		     sizeof(struct frag_hdr);
1289 
1290 	headersize = sizeof(struct ipv6hdr) +
1291 		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1292 		     (dst_allfrag(&rt->dst) ?
1293 		      sizeof(struct frag_hdr) : 0) +
1294 		     rt->rt6i_nfheader_len;
1295 
1296 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1297 	    (sk->sk_protocol == IPPROTO_UDP ||
1298 	     sk->sk_protocol == IPPROTO_RAW)) {
1299 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1300 				sizeof(struct ipv6hdr));
1301 		goto emsgsize;
1302 	}
1303 
1304 	if (ip6_sk_ignore_df(sk))
1305 		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1306 	else
1307 		maxnonfragsize = mtu;
1308 
1309 	if (cork->length + length > maxnonfragsize - headersize) {
1310 emsgsize:
1311 		ipv6_local_error(sk, EMSGSIZE, fl6,
1312 				 mtu - headersize +
1313 				 sizeof(struct ipv6hdr));
1314 		return -EMSGSIZE;
1315 	}
1316 
1317 	/* CHECKSUM_PARTIAL only with no extension headers and when
1318 	 * we are not going to fragment
1319 	 */
1320 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1321 	    headersize == sizeof(struct ipv6hdr) &&
1322 	    length < mtu - headersize &&
1323 	    !(flags & MSG_MORE) &&
1324 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1325 		csummode = CHECKSUM_PARTIAL;
1326 
1327 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1328 		sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
1329 		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1330 		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1331 			tskey = sk->sk_tskey++;
1332 	}
1333 
1334 	/*
1335 	 * Let's try using as much space as possible.
1336 	 * Use MTU if total length of the message fits into the MTU.
1337 	 * Otherwise, we need to reserve fragment header and
1338 	 * fragment alignment (= 8-15 octects, in total).
1339 	 *
1340 	 * Note that we may need to "move" the data from the tail of
1341 	 * of the buffer to the new fragment when we split
1342 	 * the message.
1343 	 *
1344 	 * FIXME: It may be fragmented into multiple chunks
1345 	 *        at once if non-fragmentable extension headers
1346 	 *        are too large.
1347 	 * --yoshfuji
1348 	 */
1349 
1350 	cork->length += length;
1351 	if (((length > mtu) ||
1352 	     (skb && skb_is_gso(skb))) &&
1353 	    (sk->sk_protocol == IPPROTO_UDP) &&
1354 	    (rt->dst.dev->features & NETIF_F_UFO) &&
1355 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1356 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1357 					  hh_len, fragheaderlen, exthdrlen,
1358 					  transhdrlen, mtu, flags, fl6);
1359 		if (err)
1360 			goto error;
1361 		return 0;
1362 	}
1363 
1364 	if (!skb)
1365 		goto alloc_new_skb;
1366 
1367 	while (length > 0) {
1368 		/* Check if the remaining data fits into current packet. */
1369 		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1370 		if (copy < length)
1371 			copy = maxfraglen - skb->len;
1372 
1373 		if (copy <= 0) {
1374 			char *data;
1375 			unsigned int datalen;
1376 			unsigned int fraglen;
1377 			unsigned int fraggap;
1378 			unsigned int alloclen;
1379 alloc_new_skb:
1380 			/* There's no room in the current skb */
1381 			if (skb)
1382 				fraggap = skb->len - maxfraglen;
1383 			else
1384 				fraggap = 0;
1385 			/* update mtu and maxfraglen if necessary */
1386 			if (!skb || !skb_prev)
1387 				ip6_append_data_mtu(&mtu, &maxfraglen,
1388 						    fragheaderlen, skb, rt,
1389 						    orig_mtu);
1390 
1391 			skb_prev = skb;
1392 
1393 			/*
1394 			 * If remaining data exceeds the mtu,
1395 			 * we know we need more fragment(s).
1396 			 */
1397 			datalen = length + fraggap;
1398 
1399 			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1400 				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1401 			if ((flags & MSG_MORE) &&
1402 			    !(rt->dst.dev->features&NETIF_F_SG))
1403 				alloclen = mtu;
1404 			else
1405 				alloclen = datalen + fragheaderlen;
1406 
1407 			alloclen += dst_exthdrlen;
1408 
1409 			if (datalen != length + fraggap) {
1410 				/*
1411 				 * this is not the last fragment, the trailer
1412 				 * space is regarded as data space.
1413 				 */
1414 				datalen += rt->dst.trailer_len;
1415 			}
1416 
1417 			alloclen += rt->dst.trailer_len;
1418 			fraglen = datalen + fragheaderlen;
1419 
1420 			/*
1421 			 * We just reserve space for fragment header.
1422 			 * Note: this may be overallocation if the message
1423 			 * (without MSG_MORE) fits into the MTU.
1424 			 */
1425 			alloclen += sizeof(struct frag_hdr);
1426 
1427 			if (transhdrlen) {
1428 				skb = sock_alloc_send_skb(sk,
1429 						alloclen + hh_len,
1430 						(flags & MSG_DONTWAIT), &err);
1431 			} else {
1432 				skb = NULL;
1433 				if (atomic_read(&sk->sk_wmem_alloc) <=
1434 				    2 * sk->sk_sndbuf)
1435 					skb = sock_wmalloc(sk,
1436 							   alloclen + hh_len, 1,
1437 							   sk->sk_allocation);
1438 				if (unlikely(!skb))
1439 					err = -ENOBUFS;
1440 			}
1441 			if (!skb)
1442 				goto error;
1443 			/*
1444 			 *	Fill in the control structures
1445 			 */
1446 			skb->protocol = htons(ETH_P_IPV6);
1447 			skb->ip_summed = csummode;
1448 			skb->csum = 0;
1449 			/* reserve for fragmentation and ipsec header */
1450 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1451 				    dst_exthdrlen);
1452 
1453 			/* Only the initial fragment is time stamped */
1454 			skb_shinfo(skb)->tx_flags = tx_flags;
1455 			tx_flags = 0;
1456 			skb_shinfo(skb)->tskey = tskey;
1457 			tskey = 0;
1458 
1459 			/*
1460 			 *	Find where to start putting bytes
1461 			 */
1462 			data = skb_put(skb, fraglen);
1463 			skb_set_network_header(skb, exthdrlen);
1464 			data += fragheaderlen;
1465 			skb->transport_header = (skb->network_header +
1466 						 fragheaderlen);
1467 			if (fraggap) {
1468 				skb->csum = skb_copy_and_csum_bits(
1469 					skb_prev, maxfraglen,
1470 					data + transhdrlen, fraggap, 0);
1471 				skb_prev->csum = csum_sub(skb_prev->csum,
1472 							  skb->csum);
1473 				data += fraggap;
1474 				pskb_trim_unique(skb_prev, maxfraglen);
1475 			}
1476 			copy = datalen - transhdrlen - fraggap;
1477 
1478 			if (copy < 0) {
1479 				err = -EINVAL;
1480 				kfree_skb(skb);
1481 				goto error;
1482 			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1483 				err = -EFAULT;
1484 				kfree_skb(skb);
1485 				goto error;
1486 			}
1487 
1488 			offset += copy;
1489 			length -= datalen - fraggap;
1490 			transhdrlen = 0;
1491 			exthdrlen = 0;
1492 			dst_exthdrlen = 0;
1493 
1494 			/*
1495 			 * Put the packet on the pending queue
1496 			 */
1497 			__skb_queue_tail(queue, skb);
1498 			continue;
1499 		}
1500 
1501 		if (copy > length)
1502 			copy = length;
1503 
1504 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1505 			unsigned int off;
1506 
1507 			off = skb->len;
1508 			if (getfrag(from, skb_put(skb, copy),
1509 						offset, copy, off, skb) < 0) {
1510 				__skb_trim(skb, off);
1511 				err = -EFAULT;
1512 				goto error;
1513 			}
1514 		} else {
1515 			int i = skb_shinfo(skb)->nr_frags;
1516 
1517 			err = -ENOMEM;
1518 			if (!sk_page_frag_refill(sk, pfrag))
1519 				goto error;
1520 
1521 			if (!skb_can_coalesce(skb, i, pfrag->page,
1522 					      pfrag->offset)) {
1523 				err = -EMSGSIZE;
1524 				if (i == MAX_SKB_FRAGS)
1525 					goto error;
1526 
1527 				__skb_fill_page_desc(skb, i, pfrag->page,
1528 						     pfrag->offset, 0);
1529 				skb_shinfo(skb)->nr_frags = ++i;
1530 				get_page(pfrag->page);
1531 			}
1532 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1533 			if (getfrag(from,
1534 				    page_address(pfrag->page) + pfrag->offset,
1535 				    offset, copy, skb->len, skb) < 0)
1536 				goto error_efault;
1537 
1538 			pfrag->offset += copy;
1539 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1540 			skb->len += copy;
1541 			skb->data_len += copy;
1542 			skb->truesize += copy;
1543 			atomic_add(copy, &sk->sk_wmem_alloc);
1544 		}
1545 		offset += copy;
1546 		length -= copy;
1547 	}
1548 
1549 	return 0;
1550 
1551 error_efault:
1552 	err = -EFAULT;
1553 error:
1554 	cork->length -= length;
1555 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1556 	return err;
1557 }
1558 
1559 int ip6_append_data(struct sock *sk,
1560 		    int getfrag(void *from, char *to, int offset, int len,
1561 				int odd, struct sk_buff *skb),
1562 		    void *from, int length, int transhdrlen,
1563 		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1564 		    struct rt6_info *rt, unsigned int flags,
1565 		    const struct sockcm_cookie *sockc)
1566 {
1567 	struct inet_sock *inet = inet_sk(sk);
1568 	struct ipv6_pinfo *np = inet6_sk(sk);
1569 	int exthdrlen;
1570 	int err;
1571 
1572 	if (flags&MSG_PROBE)
1573 		return 0;
1574 	if (skb_queue_empty(&sk->sk_write_queue)) {
1575 		/*
1576 		 * setup for corking
1577 		 */
1578 		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1579 				     ipc6, rt, fl6);
1580 		if (err)
1581 			return err;
1582 
1583 		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1584 		length += exthdrlen;
1585 		transhdrlen += exthdrlen;
1586 	} else {
1587 		fl6 = &inet->cork.fl.u.ip6;
1588 		transhdrlen = 0;
1589 	}
1590 
1591 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1592 				 &np->cork, sk_page_frag(sk), getfrag,
1593 				 from, length, transhdrlen, flags, ipc6, sockc);
1594 }
1595 EXPORT_SYMBOL_GPL(ip6_append_data);
1596 
1597 static void ip6_cork_release(struct inet_cork_full *cork,
1598 			     struct inet6_cork *v6_cork)
1599 {
1600 	if (v6_cork->opt) {
1601 		kfree(v6_cork->opt->dst0opt);
1602 		kfree(v6_cork->opt->dst1opt);
1603 		kfree(v6_cork->opt->hopopt);
1604 		kfree(v6_cork->opt->srcrt);
1605 		kfree(v6_cork->opt);
1606 		v6_cork->opt = NULL;
1607 	}
1608 
1609 	if (cork->base.dst) {
1610 		dst_release(cork->base.dst);
1611 		cork->base.dst = NULL;
1612 		cork->base.flags &= ~IPCORK_ALLFRAG;
1613 	}
1614 	memset(&cork->fl, 0, sizeof(cork->fl));
1615 }
1616 
1617 struct sk_buff *__ip6_make_skb(struct sock *sk,
1618 			       struct sk_buff_head *queue,
1619 			       struct inet_cork_full *cork,
1620 			       struct inet6_cork *v6_cork)
1621 {
1622 	struct sk_buff *skb, *tmp_skb;
1623 	struct sk_buff **tail_skb;
1624 	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1625 	struct ipv6_pinfo *np = inet6_sk(sk);
1626 	struct net *net = sock_net(sk);
1627 	struct ipv6hdr *hdr;
1628 	struct ipv6_txoptions *opt = v6_cork->opt;
1629 	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1630 	struct flowi6 *fl6 = &cork->fl.u.ip6;
1631 	unsigned char proto = fl6->flowi6_proto;
1632 
1633 	skb = __skb_dequeue(queue);
1634 	if (!skb)
1635 		goto out;
1636 	tail_skb = &(skb_shinfo(skb)->frag_list);
1637 
1638 	/* move skb->data to ip header from ext header */
1639 	if (skb->data < skb_network_header(skb))
1640 		__skb_pull(skb, skb_network_offset(skb));
1641 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1642 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1643 		*tail_skb = tmp_skb;
1644 		tail_skb = &(tmp_skb->next);
1645 		skb->len += tmp_skb->len;
1646 		skb->data_len += tmp_skb->len;
1647 		skb->truesize += tmp_skb->truesize;
1648 		tmp_skb->destructor = NULL;
1649 		tmp_skb->sk = NULL;
1650 	}
1651 
1652 	/* Allow local fragmentation. */
1653 	skb->ignore_df = ip6_sk_ignore_df(sk);
1654 
1655 	*final_dst = fl6->daddr;
1656 	__skb_pull(skb, skb_network_header_len(skb));
1657 	if (opt && opt->opt_flen)
1658 		ipv6_push_frag_opts(skb, opt, &proto);
1659 	if (opt && opt->opt_nflen)
1660 		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1661 
1662 	skb_push(skb, sizeof(struct ipv6hdr));
1663 	skb_reset_network_header(skb);
1664 	hdr = ipv6_hdr(skb);
1665 
1666 	ip6_flow_hdr(hdr, v6_cork->tclass,
1667 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1668 					np->autoflowlabel, fl6));
1669 	hdr->hop_limit = v6_cork->hop_limit;
1670 	hdr->nexthdr = proto;
1671 	hdr->saddr = fl6->saddr;
1672 	hdr->daddr = *final_dst;
1673 
1674 	skb->priority = sk->sk_priority;
1675 	skb->mark = sk->sk_mark;
1676 
1677 	skb_dst_set(skb, dst_clone(&rt->dst));
1678 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1679 	if (proto == IPPROTO_ICMPV6) {
1680 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1681 
1682 		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1683 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1684 	}
1685 
1686 	ip6_cork_release(cork, v6_cork);
1687 out:
1688 	return skb;
1689 }
1690 
1691 int ip6_send_skb(struct sk_buff *skb)
1692 {
1693 	struct net *net = sock_net(skb->sk);
1694 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1695 	int err;
1696 
1697 	err = ip6_local_out(net, skb->sk, skb);
1698 	if (err) {
1699 		if (err > 0)
1700 			err = net_xmit_errno(err);
1701 		if (err)
1702 			IP6_INC_STATS(net, rt->rt6i_idev,
1703 				      IPSTATS_MIB_OUTDISCARDS);
1704 	}
1705 
1706 	return err;
1707 }
1708 
1709 int ip6_push_pending_frames(struct sock *sk)
1710 {
1711 	struct sk_buff *skb;
1712 
1713 	skb = ip6_finish_skb(sk);
1714 	if (!skb)
1715 		return 0;
1716 
1717 	return ip6_send_skb(skb);
1718 }
1719 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1720 
1721 static void __ip6_flush_pending_frames(struct sock *sk,
1722 				       struct sk_buff_head *queue,
1723 				       struct inet_cork_full *cork,
1724 				       struct inet6_cork *v6_cork)
1725 {
1726 	struct sk_buff *skb;
1727 
1728 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1729 		if (skb_dst(skb))
1730 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1731 				      IPSTATS_MIB_OUTDISCARDS);
1732 		kfree_skb(skb);
1733 	}
1734 
1735 	ip6_cork_release(cork, v6_cork);
1736 }
1737 
1738 void ip6_flush_pending_frames(struct sock *sk)
1739 {
1740 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1741 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1742 }
1743 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1744 
1745 struct sk_buff *ip6_make_skb(struct sock *sk,
1746 			     int getfrag(void *from, char *to, int offset,
1747 					 int len, int odd, struct sk_buff *skb),
1748 			     void *from, int length, int transhdrlen,
1749 			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1750 			     struct rt6_info *rt, unsigned int flags,
1751 			     const struct sockcm_cookie *sockc)
1752 {
1753 	struct inet_cork_full cork;
1754 	struct inet6_cork v6_cork;
1755 	struct sk_buff_head queue;
1756 	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1757 	int err;
1758 
1759 	if (flags & MSG_PROBE)
1760 		return NULL;
1761 
1762 	__skb_queue_head_init(&queue);
1763 
1764 	cork.base.flags = 0;
1765 	cork.base.addr = 0;
1766 	cork.base.opt = NULL;
1767 	v6_cork.opt = NULL;
1768 	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1769 	if (err)
1770 		return ERR_PTR(err);
1771 
1772 	if (ipc6->dontfrag < 0)
1773 		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1774 
1775 	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1776 				&current->task_frag, getfrag, from,
1777 				length + exthdrlen, transhdrlen + exthdrlen,
1778 				flags, ipc6, sockc);
1779 	if (err) {
1780 		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1781 		return ERR_PTR(err);
1782 	}
1783 
1784 	return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
1785 }
1786