xref: /openbmc/linux/net/ipv6/route.c (revision fd155792)
1 /*
2  *	Linux INET6 implementation
3  *	FIB front-end.
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 /*	Changes:
15  *
16  *	YOSHIFUJI Hideaki @USAGI
17  *		reworked default router selection.
18  *		- respect outgoing interface
19  *		- select from (probably) reachable routers (i.e.
20  *		routers in REACHABLE, STALE, DELAY or PROBE states).
21  *		- always select the same router if it is (probably)
22  *		reachable.  otherwise, round-robin the list.
23  *	Ville Nuorvala
24  *		Fixed routing subtrees.
25  */
26 
27 #define pr_fmt(fmt) "IPv6: " fmt
28 
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
61 
62 #include <asm/uaccess.h>
63 
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
67 
68 enum rt6_nud_state {
69 	RT6_NUD_FAIL_HARD = -3,
70 	RT6_NUD_FAIL_PROBE = -2,
71 	RT6_NUD_FAIL_DO_RR = -1,
72 	RT6_NUD_SUCCEED = 1
73 };
74 
75 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
76 				    const struct in6_addr *dest);
77 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
78 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
79 static unsigned int	 ip6_mtu(const struct dst_entry *dst);
80 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
81 static void		ip6_dst_destroy(struct dst_entry *);
82 static void		ip6_dst_ifdown(struct dst_entry *,
83 				       struct net_device *dev, int how);
84 static int		 ip6_dst_gc(struct dst_ops *ops);
85 
86 static int		ip6_pkt_discard(struct sk_buff *skb);
87 static int		ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
88 static int		ip6_pkt_prohibit(struct sk_buff *skb);
89 static int		ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
90 static void		ip6_link_failure(struct sk_buff *skb);
91 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
92 					   struct sk_buff *skb, u32 mtu);
93 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
94 					struct sk_buff *skb);
95 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
96 
97 #ifdef CONFIG_IPV6_ROUTE_INFO
98 static struct rt6_info *rt6_add_route_info(struct net *net,
99 					   const struct in6_addr *prefix, int prefixlen,
100 					   const struct in6_addr *gwaddr, int ifindex,
101 					   unsigned int pref);
102 static struct rt6_info *rt6_get_route_info(struct net *net,
103 					   const struct in6_addr *prefix, int prefixlen,
104 					   const struct in6_addr *gwaddr, int ifindex);
105 #endif
106 
107 static void rt6_bind_peer(struct rt6_info *rt, int create)
108 {
109 	struct inet_peer_base *base;
110 	struct inet_peer *peer;
111 
112 	base = inetpeer_base_ptr(rt->_rt6i_peer);
113 	if (!base)
114 		return;
115 
116 	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
117 	if (peer) {
118 		if (!rt6_set_peer(rt, peer))
119 			inet_putpeer(peer);
120 	}
121 }
122 
123 static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
124 {
125 	if (rt6_has_peer(rt))
126 		return rt6_peer_ptr(rt);
127 
128 	rt6_bind_peer(rt, create);
129 	return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
130 }
131 
132 static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
133 {
134 	return __rt6_get_peer(rt, 1);
135 }
136 
137 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
138 {
139 	struct rt6_info *rt = (struct rt6_info *) dst;
140 	struct inet_peer *peer;
141 	u32 *p = NULL;
142 
143 	if (!(rt->dst.flags & DST_HOST))
144 		return dst_cow_metrics_generic(dst, old);
145 
146 	peer = rt6_get_peer_create(rt);
147 	if (peer) {
148 		u32 *old_p = __DST_METRICS_PTR(old);
149 		unsigned long prev, new;
150 
151 		p = peer->metrics;
152 		if (inet_metrics_new(peer) ||
153 		    (old & DST_METRICS_FORCE_OVERWRITE))
154 			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
155 
156 		new = (unsigned long) p;
157 		prev = cmpxchg(&dst->_metrics, old, new);
158 
159 		if (prev != old) {
160 			p = __DST_METRICS_PTR(prev);
161 			if (prev & DST_METRICS_READ_ONLY)
162 				p = NULL;
163 		}
164 	}
165 	return p;
166 }
167 
168 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
169 					     struct sk_buff *skb,
170 					     const void *daddr)
171 {
172 	struct in6_addr *p = &rt->rt6i_gateway;
173 
174 	if (!ipv6_addr_any(p))
175 		return (const void *) p;
176 	else if (skb)
177 		return &ipv6_hdr(skb)->daddr;
178 	return daddr;
179 }
180 
181 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
182 					  struct sk_buff *skb,
183 					  const void *daddr)
184 {
185 	struct rt6_info *rt = (struct rt6_info *) dst;
186 	struct neighbour *n;
187 
188 	daddr = choose_neigh_daddr(rt, skb, daddr);
189 	n = __ipv6_neigh_lookup(dst->dev, daddr);
190 	if (n)
191 		return n;
192 	return neigh_create(&nd_tbl, daddr, dst->dev);
193 }
194 
195 static struct dst_ops ip6_dst_ops_template = {
196 	.family			=	AF_INET6,
197 	.gc			=	ip6_dst_gc,
198 	.gc_thresh		=	1024,
199 	.check			=	ip6_dst_check,
200 	.default_advmss		=	ip6_default_advmss,
201 	.mtu			=	ip6_mtu,
202 	.cow_metrics		=	ipv6_cow_metrics,
203 	.destroy		=	ip6_dst_destroy,
204 	.ifdown			=	ip6_dst_ifdown,
205 	.negative_advice	=	ip6_negative_advice,
206 	.link_failure		=	ip6_link_failure,
207 	.update_pmtu		=	ip6_rt_update_pmtu,
208 	.redirect		=	rt6_do_redirect,
209 	.local_out		=	__ip6_local_out,
210 	.neigh_lookup		=	ip6_neigh_lookup,
211 };
212 
213 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
214 {
215 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
216 
217 	return mtu ? : dst->dev->mtu;
218 }
219 
220 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
221 					 struct sk_buff *skb, u32 mtu)
222 {
223 }
224 
225 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
226 				      struct sk_buff *skb)
227 {
228 }
229 
230 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
231 					 unsigned long old)
232 {
233 	return NULL;
234 }
235 
236 static struct dst_ops ip6_dst_blackhole_ops = {
237 	.family			=	AF_INET6,
238 	.destroy		=	ip6_dst_destroy,
239 	.check			=	ip6_dst_check,
240 	.mtu			=	ip6_blackhole_mtu,
241 	.default_advmss		=	ip6_default_advmss,
242 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
243 	.redirect		=	ip6_rt_blackhole_redirect,
244 	.cow_metrics		=	ip6_rt_blackhole_cow_metrics,
245 	.neigh_lookup		=	ip6_neigh_lookup,
246 };
247 
248 static const u32 ip6_template_metrics[RTAX_MAX] = {
249 	[RTAX_HOPLIMIT - 1] = 0,
250 };
251 
252 static const struct rt6_info ip6_null_entry_template = {
253 	.dst = {
254 		.__refcnt	= ATOMIC_INIT(1),
255 		.__use		= 1,
256 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
257 		.error		= -ENETUNREACH,
258 		.input		= ip6_pkt_discard,
259 		.output		= ip6_pkt_discard_out,
260 	},
261 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
262 	.rt6i_protocol  = RTPROT_KERNEL,
263 	.rt6i_metric	= ~(u32) 0,
264 	.rt6i_ref	= ATOMIC_INIT(1),
265 };
266 
267 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
268 
269 static const struct rt6_info ip6_prohibit_entry_template = {
270 	.dst = {
271 		.__refcnt	= ATOMIC_INIT(1),
272 		.__use		= 1,
273 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
274 		.error		= -EACCES,
275 		.input		= ip6_pkt_prohibit,
276 		.output		= ip6_pkt_prohibit_out,
277 	},
278 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
279 	.rt6i_protocol  = RTPROT_KERNEL,
280 	.rt6i_metric	= ~(u32) 0,
281 	.rt6i_ref	= ATOMIC_INIT(1),
282 };
283 
284 static const struct rt6_info ip6_blk_hole_entry_template = {
285 	.dst = {
286 		.__refcnt	= ATOMIC_INIT(1),
287 		.__use		= 1,
288 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
289 		.error		= -EINVAL,
290 		.input		= dst_discard,
291 		.output		= dst_discard_sk,
292 	},
293 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
294 	.rt6i_protocol  = RTPROT_KERNEL,
295 	.rt6i_metric	= ~(u32) 0,
296 	.rt6i_ref	= ATOMIC_INIT(1),
297 };
298 
299 #endif
300 
301 /* allocate dst with ip6_dst_ops */
302 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
303 					     struct net_device *dev,
304 					     int flags,
305 					     struct fib6_table *table)
306 {
307 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
308 					0, DST_OBSOLETE_FORCE_CHK, flags);
309 
310 	if (rt) {
311 		struct dst_entry *dst = &rt->dst;
312 
313 		memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
314 		rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
315 		INIT_LIST_HEAD(&rt->rt6i_siblings);
316 	}
317 	return rt;
318 }
319 
320 static void ip6_dst_destroy(struct dst_entry *dst)
321 {
322 	struct rt6_info *rt = (struct rt6_info *)dst;
323 	struct inet6_dev *idev = rt->rt6i_idev;
324 	struct dst_entry *from = dst->from;
325 
326 	if (!(rt->dst.flags & DST_HOST))
327 		dst_destroy_metrics_generic(dst);
328 
329 	if (idev) {
330 		rt->rt6i_idev = NULL;
331 		in6_dev_put(idev);
332 	}
333 
334 	dst->from = NULL;
335 	dst_release(from);
336 
337 	if (rt6_has_peer(rt)) {
338 		struct inet_peer *peer = rt6_peer_ptr(rt);
339 		inet_putpeer(peer);
340 	}
341 }
342 
343 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
344 			   int how)
345 {
346 	struct rt6_info *rt = (struct rt6_info *)dst;
347 	struct inet6_dev *idev = rt->rt6i_idev;
348 	struct net_device *loopback_dev =
349 		dev_net(dev)->loopback_dev;
350 
351 	if (dev != loopback_dev) {
352 		if (idev && idev->dev == dev) {
353 			struct inet6_dev *loopback_idev =
354 				in6_dev_get(loopback_dev);
355 			if (loopback_idev) {
356 				rt->rt6i_idev = loopback_idev;
357 				in6_dev_put(idev);
358 			}
359 		}
360 	}
361 }
362 
363 static bool rt6_check_expired(const struct rt6_info *rt)
364 {
365 	if (rt->rt6i_flags & RTF_EXPIRES) {
366 		if (time_after(jiffies, rt->dst.expires))
367 			return true;
368 	} else if (rt->dst.from) {
369 		return rt6_check_expired((struct rt6_info *) rt->dst.from);
370 	}
371 	return false;
372 }
373 
374 /* Multipath route selection:
375  *   Hash based function using packet header and flowlabel.
376  * Adapted from fib_info_hashfn()
377  */
378 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
379 			       const struct flowi6 *fl6)
380 {
381 	unsigned int val = fl6->flowi6_proto;
382 
383 	val ^= ipv6_addr_hash(&fl6->daddr);
384 	val ^= ipv6_addr_hash(&fl6->saddr);
385 
386 	/* Work only if this not encapsulated */
387 	switch (fl6->flowi6_proto) {
388 	case IPPROTO_UDP:
389 	case IPPROTO_TCP:
390 	case IPPROTO_SCTP:
391 		val ^= (__force u16)fl6->fl6_sport;
392 		val ^= (__force u16)fl6->fl6_dport;
393 		break;
394 
395 	case IPPROTO_ICMPV6:
396 		val ^= (__force u16)fl6->fl6_icmp_type;
397 		val ^= (__force u16)fl6->fl6_icmp_code;
398 		break;
399 	}
400 	/* RFC6438 recommands to use flowlabel */
401 	val ^= (__force u32)fl6->flowlabel;
402 
403 	/* Perhaps, we need to tune, this function? */
404 	val = val ^ (val >> 7) ^ (val >> 12);
405 	return val % candidate_count;
406 }
407 
408 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
409 					     struct flowi6 *fl6, int oif,
410 					     int strict)
411 {
412 	struct rt6_info *sibling, *next_sibling;
413 	int route_choosen;
414 
415 	route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
416 	/* Don't change the route, if route_choosen == 0
417 	 * (siblings does not include ourself)
418 	 */
419 	if (route_choosen)
420 		list_for_each_entry_safe(sibling, next_sibling,
421 				&match->rt6i_siblings, rt6i_siblings) {
422 			route_choosen--;
423 			if (route_choosen == 0) {
424 				if (rt6_score_route(sibling, oif, strict) < 0)
425 					break;
426 				match = sibling;
427 				break;
428 			}
429 		}
430 	return match;
431 }
432 
433 /*
434  *	Route lookup. Any table->tb6_lock is implied.
435  */
436 
437 static inline struct rt6_info *rt6_device_match(struct net *net,
438 						    struct rt6_info *rt,
439 						    const struct in6_addr *saddr,
440 						    int oif,
441 						    int flags)
442 {
443 	struct rt6_info *local = NULL;
444 	struct rt6_info *sprt;
445 
446 	if (!oif && ipv6_addr_any(saddr))
447 		goto out;
448 
449 	for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
450 		struct net_device *dev = sprt->dst.dev;
451 
452 		if (oif) {
453 			if (dev->ifindex == oif)
454 				return sprt;
455 			if (dev->flags & IFF_LOOPBACK) {
456 				if (!sprt->rt6i_idev ||
457 				    sprt->rt6i_idev->dev->ifindex != oif) {
458 					if (flags & RT6_LOOKUP_F_IFACE && oif)
459 						continue;
460 					if (local && (!oif ||
461 						      local->rt6i_idev->dev->ifindex == oif))
462 						continue;
463 				}
464 				local = sprt;
465 			}
466 		} else {
467 			if (ipv6_chk_addr(net, saddr, dev,
468 					  flags & RT6_LOOKUP_F_IFACE))
469 				return sprt;
470 		}
471 	}
472 
473 	if (oif) {
474 		if (local)
475 			return local;
476 
477 		if (flags & RT6_LOOKUP_F_IFACE)
478 			return net->ipv6.ip6_null_entry;
479 	}
480 out:
481 	return rt;
482 }
483 
484 #ifdef CONFIG_IPV6_ROUTER_PREF
485 struct __rt6_probe_work {
486 	struct work_struct work;
487 	struct in6_addr target;
488 	struct net_device *dev;
489 };
490 
491 static void rt6_probe_deferred(struct work_struct *w)
492 {
493 	struct in6_addr mcaddr;
494 	struct __rt6_probe_work *work =
495 		container_of(w, struct __rt6_probe_work, work);
496 
497 	addrconf_addr_solict_mult(&work->target, &mcaddr);
498 	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
499 	dev_put(work->dev);
500 	kfree(work);
501 }
502 
503 static void rt6_probe(struct rt6_info *rt)
504 {
505 	struct neighbour *neigh;
506 	/*
507 	 * Okay, this does not seem to be appropriate
508 	 * for now, however, we need to check if it
509 	 * is really so; aka Router Reachability Probing.
510 	 *
511 	 * Router Reachability Probe MUST be rate-limited
512 	 * to no more than one per minute.
513 	 */
514 	if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
515 		return;
516 	rcu_read_lock_bh();
517 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
518 	if (neigh) {
519 		write_lock(&neigh->lock);
520 		if (neigh->nud_state & NUD_VALID)
521 			goto out;
522 	}
523 
524 	if (!neigh ||
525 	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
526 		struct __rt6_probe_work *work;
527 
528 		work = kmalloc(sizeof(*work), GFP_ATOMIC);
529 
530 		if (neigh && work)
531 			__neigh_set_probe_once(neigh);
532 
533 		if (neigh)
534 			write_unlock(&neigh->lock);
535 
536 		if (work) {
537 			INIT_WORK(&work->work, rt6_probe_deferred);
538 			work->target = rt->rt6i_gateway;
539 			dev_hold(rt->dst.dev);
540 			work->dev = rt->dst.dev;
541 			schedule_work(&work->work);
542 		}
543 	} else {
544 out:
545 		write_unlock(&neigh->lock);
546 	}
547 	rcu_read_unlock_bh();
548 }
549 #else
550 static inline void rt6_probe(struct rt6_info *rt)
551 {
552 }
553 #endif
554 
555 /*
556  * Default Router Selection (RFC 2461 6.3.6)
557  */
558 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
559 {
560 	struct net_device *dev = rt->dst.dev;
561 	if (!oif || dev->ifindex == oif)
562 		return 2;
563 	if ((dev->flags & IFF_LOOPBACK) &&
564 	    rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
565 		return 1;
566 	return 0;
567 }
568 
569 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
570 {
571 	struct neighbour *neigh;
572 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
573 
574 	if (rt->rt6i_flags & RTF_NONEXTHOP ||
575 	    !(rt->rt6i_flags & RTF_GATEWAY))
576 		return RT6_NUD_SUCCEED;
577 
578 	rcu_read_lock_bh();
579 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
580 	if (neigh) {
581 		read_lock(&neigh->lock);
582 		if (neigh->nud_state & NUD_VALID)
583 			ret = RT6_NUD_SUCCEED;
584 #ifdef CONFIG_IPV6_ROUTER_PREF
585 		else if (!(neigh->nud_state & NUD_FAILED))
586 			ret = RT6_NUD_SUCCEED;
587 		else
588 			ret = RT6_NUD_FAIL_PROBE;
589 #endif
590 		read_unlock(&neigh->lock);
591 	} else {
592 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
593 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
594 	}
595 	rcu_read_unlock_bh();
596 
597 	return ret;
598 }
599 
600 static int rt6_score_route(struct rt6_info *rt, int oif,
601 			   int strict)
602 {
603 	int m;
604 
605 	m = rt6_check_dev(rt, oif);
606 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
607 		return RT6_NUD_FAIL_HARD;
608 #ifdef CONFIG_IPV6_ROUTER_PREF
609 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
610 #endif
611 	if (strict & RT6_LOOKUP_F_REACHABLE) {
612 		int n = rt6_check_neigh(rt);
613 		if (n < 0)
614 			return n;
615 	}
616 	return m;
617 }
618 
619 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
620 				   int *mpri, struct rt6_info *match,
621 				   bool *do_rr)
622 {
623 	int m;
624 	bool match_do_rr = false;
625 
626 	if (rt6_check_expired(rt))
627 		goto out;
628 
629 	m = rt6_score_route(rt, oif, strict);
630 	if (m == RT6_NUD_FAIL_DO_RR) {
631 		match_do_rr = true;
632 		m = 0; /* lowest valid score */
633 	} else if (m == RT6_NUD_FAIL_HARD) {
634 		goto out;
635 	}
636 
637 	if (strict & RT6_LOOKUP_F_REACHABLE)
638 		rt6_probe(rt);
639 
640 	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
641 	if (m > *mpri) {
642 		*do_rr = match_do_rr;
643 		*mpri = m;
644 		match = rt;
645 	}
646 out:
647 	return match;
648 }
649 
650 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
651 				     struct rt6_info *rr_head,
652 				     u32 metric, int oif, int strict,
653 				     bool *do_rr)
654 {
655 	struct rt6_info *rt, *match;
656 	int mpri = -1;
657 
658 	match = NULL;
659 	for (rt = rr_head; rt && rt->rt6i_metric == metric;
660 	     rt = rt->dst.rt6_next)
661 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
662 	for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
663 	     rt = rt->dst.rt6_next)
664 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
665 
666 	return match;
667 }
668 
669 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
670 {
671 	struct rt6_info *match, *rt0;
672 	struct net *net;
673 	bool do_rr = false;
674 
675 	rt0 = fn->rr_ptr;
676 	if (!rt0)
677 		fn->rr_ptr = rt0 = fn->leaf;
678 
679 	match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
680 			     &do_rr);
681 
682 	if (do_rr) {
683 		struct rt6_info *next = rt0->dst.rt6_next;
684 
685 		/* no entries matched; do round-robin */
686 		if (!next || next->rt6i_metric != rt0->rt6i_metric)
687 			next = fn->leaf;
688 
689 		if (next != rt0)
690 			fn->rr_ptr = next;
691 	}
692 
693 	net = dev_net(rt0->dst.dev);
694 	return match ? match : net->ipv6.ip6_null_entry;
695 }
696 
697 #ifdef CONFIG_IPV6_ROUTE_INFO
698 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
699 		  const struct in6_addr *gwaddr)
700 {
701 	struct net *net = dev_net(dev);
702 	struct route_info *rinfo = (struct route_info *) opt;
703 	struct in6_addr prefix_buf, *prefix;
704 	unsigned int pref;
705 	unsigned long lifetime;
706 	struct rt6_info *rt;
707 
708 	if (len < sizeof(struct route_info)) {
709 		return -EINVAL;
710 	}
711 
712 	/* Sanity check for prefix_len and length */
713 	if (rinfo->length > 3) {
714 		return -EINVAL;
715 	} else if (rinfo->prefix_len > 128) {
716 		return -EINVAL;
717 	} else if (rinfo->prefix_len > 64) {
718 		if (rinfo->length < 2) {
719 			return -EINVAL;
720 		}
721 	} else if (rinfo->prefix_len > 0) {
722 		if (rinfo->length < 1) {
723 			return -EINVAL;
724 		}
725 	}
726 
727 	pref = rinfo->route_pref;
728 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
729 		return -EINVAL;
730 
731 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
732 
733 	if (rinfo->length == 3)
734 		prefix = (struct in6_addr *)rinfo->prefix;
735 	else {
736 		/* this function is safe */
737 		ipv6_addr_prefix(&prefix_buf,
738 				 (struct in6_addr *)rinfo->prefix,
739 				 rinfo->prefix_len);
740 		prefix = &prefix_buf;
741 	}
742 
743 	if (rinfo->prefix_len == 0)
744 		rt = rt6_get_dflt_router(gwaddr, dev);
745 	else
746 		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
747 					gwaddr, dev->ifindex);
748 
749 	if (rt && !lifetime) {
750 		ip6_del_rt(rt);
751 		rt = NULL;
752 	}
753 
754 	if (!rt && lifetime)
755 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
756 					pref);
757 	else if (rt)
758 		rt->rt6i_flags = RTF_ROUTEINFO |
759 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
760 
761 	if (rt) {
762 		if (!addrconf_finite_timeout(lifetime))
763 			rt6_clean_expires(rt);
764 		else
765 			rt6_set_expires(rt, jiffies + HZ * lifetime);
766 
767 		ip6_rt_put(rt);
768 	}
769 	return 0;
770 }
771 #endif
772 
773 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
774 					struct in6_addr *saddr)
775 {
776 	struct fib6_node *pn;
777 	while (1) {
778 		if (fn->fn_flags & RTN_TL_ROOT)
779 			return NULL;
780 		pn = fn->parent;
781 		if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
782 			fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
783 		else
784 			fn = pn;
785 		if (fn->fn_flags & RTN_RTINFO)
786 			return fn;
787 	}
788 }
789 
790 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
791 					     struct fib6_table *table,
792 					     struct flowi6 *fl6, int flags)
793 {
794 	struct fib6_node *fn;
795 	struct rt6_info *rt;
796 
797 	read_lock_bh(&table->tb6_lock);
798 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
799 restart:
800 	rt = fn->leaf;
801 	rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
802 	if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
803 		rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
804 	if (rt == net->ipv6.ip6_null_entry) {
805 		fn = fib6_backtrack(fn, &fl6->saddr);
806 		if (fn)
807 			goto restart;
808 	}
809 	dst_use(&rt->dst, jiffies);
810 	read_unlock_bh(&table->tb6_lock);
811 	return rt;
812 
813 }
814 
815 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
816 				    int flags)
817 {
818 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
819 }
820 EXPORT_SYMBOL_GPL(ip6_route_lookup);
821 
822 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
823 			    const struct in6_addr *saddr, int oif, int strict)
824 {
825 	struct flowi6 fl6 = {
826 		.flowi6_oif = oif,
827 		.daddr = *daddr,
828 	};
829 	struct dst_entry *dst;
830 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
831 
832 	if (saddr) {
833 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
834 		flags |= RT6_LOOKUP_F_HAS_SADDR;
835 	}
836 
837 	dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
838 	if (dst->error == 0)
839 		return (struct rt6_info *) dst;
840 
841 	dst_release(dst);
842 
843 	return NULL;
844 }
845 EXPORT_SYMBOL(rt6_lookup);
846 
847 /* ip6_ins_rt is called with FREE table->tb6_lock.
848    It takes new route entry, the addition fails by any reason the
849    route is freed. In any case, if caller does not hold it, it may
850    be destroyed.
851  */
852 
853 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
854 			struct mx6_config *mxc)
855 {
856 	int err;
857 	struct fib6_table *table;
858 
859 	table = rt->rt6i_table;
860 	write_lock_bh(&table->tb6_lock);
861 	err = fib6_add(&table->tb6_root, rt, info, mxc);
862 	write_unlock_bh(&table->tb6_lock);
863 
864 	return err;
865 }
866 
867 int ip6_ins_rt(struct rt6_info *rt)
868 {
869 	struct nl_info info = {	.nl_net = dev_net(rt->dst.dev), };
870 	struct mx6_config mxc = { .mx = NULL, };
871 
872 	return __ip6_ins_rt(rt, &info, &mxc);
873 }
874 
875 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
876 				      const struct in6_addr *daddr,
877 				      const struct in6_addr *saddr)
878 {
879 	struct rt6_info *rt;
880 
881 	/*
882 	 *	Clone the route.
883 	 */
884 
885 	rt = ip6_rt_copy(ort, daddr);
886 
887 	if (rt) {
888 		if (ort->rt6i_dst.plen != 128 &&
889 		    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
890 			rt->rt6i_flags |= RTF_ANYCAST;
891 
892 		rt->rt6i_flags |= RTF_CACHE;
893 
894 #ifdef CONFIG_IPV6_SUBTREES
895 		if (rt->rt6i_src.plen && saddr) {
896 			rt->rt6i_src.addr = *saddr;
897 			rt->rt6i_src.plen = 128;
898 		}
899 #endif
900 	}
901 
902 	return rt;
903 }
904 
905 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
906 					const struct in6_addr *daddr)
907 {
908 	struct rt6_info *rt = ip6_rt_copy(ort, daddr);
909 
910 	if (rt)
911 		rt->rt6i_flags |= RTF_CACHE;
912 	return rt;
913 }
914 
915 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
916 				      struct flowi6 *fl6, int flags)
917 {
918 	struct fib6_node *fn, *saved_fn;
919 	struct rt6_info *rt, *nrt;
920 	int strict = 0;
921 	int attempts = 3;
922 	int err;
923 
924 	strict |= flags & RT6_LOOKUP_F_IFACE;
925 	if (net->ipv6.devconf_all->forwarding == 0)
926 		strict |= RT6_LOOKUP_F_REACHABLE;
927 
928 redo_fib6_lookup_lock:
929 	read_lock_bh(&table->tb6_lock);
930 
931 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
932 	saved_fn = fn;
933 
934 redo_rt6_select:
935 	rt = rt6_select(fn, oif, strict);
936 	if (rt->rt6i_nsiblings)
937 		rt = rt6_multipath_select(rt, fl6, oif, strict);
938 	if (rt == net->ipv6.ip6_null_entry) {
939 		fn = fib6_backtrack(fn, &fl6->saddr);
940 		if (fn)
941 			goto redo_rt6_select;
942 		else if (strict & RT6_LOOKUP_F_REACHABLE) {
943 			/* also consider unreachable route */
944 			strict &= ~RT6_LOOKUP_F_REACHABLE;
945 			fn = saved_fn;
946 			goto redo_rt6_select;
947 		} else {
948 			dst_hold(&rt->dst);
949 			read_unlock_bh(&table->tb6_lock);
950 			goto out2;
951 		}
952 	}
953 
954 	dst_hold(&rt->dst);
955 	read_unlock_bh(&table->tb6_lock);
956 
957 	if (rt->rt6i_flags & RTF_CACHE)
958 		goto out2;
959 
960 	if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
961 		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
962 	else if (!(rt->dst.flags & DST_HOST))
963 		nrt = rt6_alloc_clone(rt, &fl6->daddr);
964 	else
965 		goto out2;
966 
967 	ip6_rt_put(rt);
968 	rt = nrt ? : net->ipv6.ip6_null_entry;
969 
970 	dst_hold(&rt->dst);
971 	if (nrt) {
972 		err = ip6_ins_rt(nrt);
973 		if (!err)
974 			goto out2;
975 	}
976 
977 	if (--attempts <= 0)
978 		goto out2;
979 
980 	/*
981 	 * Race condition! In the gap, when table->tb6_lock was
982 	 * released someone could insert this route.  Relookup.
983 	 */
984 	ip6_rt_put(rt);
985 	goto redo_fib6_lookup_lock;
986 
987 out2:
988 	rt->dst.lastuse = jiffies;
989 	rt->dst.__use++;
990 
991 	return rt;
992 }
993 
994 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
995 					    struct flowi6 *fl6, int flags)
996 {
997 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
998 }
999 
1000 static struct dst_entry *ip6_route_input_lookup(struct net *net,
1001 						struct net_device *dev,
1002 						struct flowi6 *fl6, int flags)
1003 {
1004 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1005 		flags |= RT6_LOOKUP_F_IFACE;
1006 
1007 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1008 }
1009 
1010 void ip6_route_input(struct sk_buff *skb)
1011 {
1012 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1013 	struct net *net = dev_net(skb->dev);
1014 	int flags = RT6_LOOKUP_F_HAS_SADDR;
1015 	struct flowi6 fl6 = {
1016 		.flowi6_iif = skb->dev->ifindex,
1017 		.daddr = iph->daddr,
1018 		.saddr = iph->saddr,
1019 		.flowlabel = ip6_flowinfo(iph),
1020 		.flowi6_mark = skb->mark,
1021 		.flowi6_proto = iph->nexthdr,
1022 	};
1023 
1024 	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1025 }
1026 
1027 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1028 					     struct flowi6 *fl6, int flags)
1029 {
1030 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1031 }
1032 
1033 struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
1034 				    struct flowi6 *fl6)
1035 {
1036 	int flags = 0;
1037 
1038 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
1039 
1040 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1041 		flags |= RT6_LOOKUP_F_IFACE;
1042 
1043 	if (!ipv6_addr_any(&fl6->saddr))
1044 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1045 	else if (sk)
1046 		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1047 
1048 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1049 }
1050 EXPORT_SYMBOL(ip6_route_output);
1051 
1052 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1053 {
1054 	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1055 	struct dst_entry *new = NULL;
1056 
1057 	rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1058 	if (rt) {
1059 		new = &rt->dst;
1060 
1061 		memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1062 		rt6_init_peer(rt, net->ipv6.peers);
1063 
1064 		new->__use = 1;
1065 		new->input = dst_discard;
1066 		new->output = dst_discard_sk;
1067 
1068 		if (dst_metrics_read_only(&ort->dst))
1069 			new->_metrics = ort->dst._metrics;
1070 		else
1071 			dst_copy_metrics(new, &ort->dst);
1072 		rt->rt6i_idev = ort->rt6i_idev;
1073 		if (rt->rt6i_idev)
1074 			in6_dev_hold(rt->rt6i_idev);
1075 
1076 		rt->rt6i_gateway = ort->rt6i_gateway;
1077 		rt->rt6i_flags = ort->rt6i_flags;
1078 		rt->rt6i_metric = 0;
1079 
1080 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1081 #ifdef CONFIG_IPV6_SUBTREES
1082 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1083 #endif
1084 
1085 		dst_free(new);
1086 	}
1087 
1088 	dst_release(dst_orig);
1089 	return new ? new : ERR_PTR(-ENOMEM);
1090 }
1091 
1092 /*
1093  *	Destination cache support functions
1094  */
1095 
1096 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1097 {
1098 	struct rt6_info *rt;
1099 
1100 	rt = (struct rt6_info *) dst;
1101 
1102 	/* All IPV6 dsts are created with ->obsolete set to the value
1103 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1104 	 * into this function always.
1105 	 */
1106 	if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1107 		return NULL;
1108 
1109 	if (rt6_check_expired(rt))
1110 		return NULL;
1111 
1112 	return dst;
1113 }
1114 
1115 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1116 {
1117 	struct rt6_info *rt = (struct rt6_info *) dst;
1118 
1119 	if (rt) {
1120 		if (rt->rt6i_flags & RTF_CACHE) {
1121 			if (rt6_check_expired(rt)) {
1122 				ip6_del_rt(rt);
1123 				dst = NULL;
1124 			}
1125 		} else {
1126 			dst_release(dst);
1127 			dst = NULL;
1128 		}
1129 	}
1130 	return dst;
1131 }
1132 
1133 static void ip6_link_failure(struct sk_buff *skb)
1134 {
1135 	struct rt6_info *rt;
1136 
1137 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1138 
1139 	rt = (struct rt6_info *) skb_dst(skb);
1140 	if (rt) {
1141 		if (rt->rt6i_flags & RTF_CACHE) {
1142 			dst_hold(&rt->dst);
1143 			if (ip6_del_rt(rt))
1144 				dst_free(&rt->dst);
1145 		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1146 			rt->rt6i_node->fn_sernum = -1;
1147 		}
1148 	}
1149 }
1150 
1151 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1152 			       struct sk_buff *skb, u32 mtu)
1153 {
1154 	struct rt6_info *rt6 = (struct rt6_info *)dst;
1155 
1156 	dst_confirm(dst);
1157 	if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1158 		struct net *net = dev_net(dst->dev);
1159 
1160 		rt6->rt6i_flags |= RTF_MODIFIED;
1161 		if (mtu < IPV6_MIN_MTU)
1162 			mtu = IPV6_MIN_MTU;
1163 
1164 		dst_metric_set(dst, RTAX_MTU, mtu);
1165 		rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1166 	}
1167 }
1168 
1169 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1170 		     int oif, u32 mark)
1171 {
1172 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1173 	struct dst_entry *dst;
1174 	struct flowi6 fl6;
1175 
1176 	memset(&fl6, 0, sizeof(fl6));
1177 	fl6.flowi6_oif = oif;
1178 	fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1179 	fl6.daddr = iph->daddr;
1180 	fl6.saddr = iph->saddr;
1181 	fl6.flowlabel = ip6_flowinfo(iph);
1182 
1183 	dst = ip6_route_output(net, NULL, &fl6);
1184 	if (!dst->error)
1185 		ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1186 	dst_release(dst);
1187 }
1188 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1189 
1190 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1191 {
1192 	ip6_update_pmtu(skb, sock_net(sk), mtu,
1193 			sk->sk_bound_dev_if, sk->sk_mark);
1194 }
1195 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1196 
1197 /* Handle redirects */
1198 struct ip6rd_flowi {
1199 	struct flowi6 fl6;
1200 	struct in6_addr gateway;
1201 };
1202 
1203 static struct rt6_info *__ip6_route_redirect(struct net *net,
1204 					     struct fib6_table *table,
1205 					     struct flowi6 *fl6,
1206 					     int flags)
1207 {
1208 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1209 	struct rt6_info *rt;
1210 	struct fib6_node *fn;
1211 
1212 	/* Get the "current" route for this destination and
1213 	 * check if the redirect has come from approriate router.
1214 	 *
1215 	 * RFC 4861 specifies that redirects should only be
1216 	 * accepted if they come from the nexthop to the target.
1217 	 * Due to the way the routes are chosen, this notion
1218 	 * is a bit fuzzy and one might need to check all possible
1219 	 * routes.
1220 	 */
1221 
1222 	read_lock_bh(&table->tb6_lock);
1223 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1224 restart:
1225 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1226 		if (rt6_check_expired(rt))
1227 			continue;
1228 		if (rt->dst.error)
1229 			break;
1230 		if (!(rt->rt6i_flags & RTF_GATEWAY))
1231 			continue;
1232 		if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1233 			continue;
1234 		if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1235 			continue;
1236 		break;
1237 	}
1238 
1239 	if (!rt)
1240 		rt = net->ipv6.ip6_null_entry;
1241 	else if (rt->dst.error) {
1242 		rt = net->ipv6.ip6_null_entry;
1243 		goto out;
1244 	}
1245 
1246 	if (rt == net->ipv6.ip6_null_entry) {
1247 		fn = fib6_backtrack(fn, &fl6->saddr);
1248 		if (fn)
1249 			goto restart;
1250 	}
1251 
1252 out:
1253 	dst_hold(&rt->dst);
1254 
1255 	read_unlock_bh(&table->tb6_lock);
1256 
1257 	return rt;
1258 };
1259 
1260 static struct dst_entry *ip6_route_redirect(struct net *net,
1261 					const struct flowi6 *fl6,
1262 					const struct in6_addr *gateway)
1263 {
1264 	int flags = RT6_LOOKUP_F_HAS_SADDR;
1265 	struct ip6rd_flowi rdfl;
1266 
1267 	rdfl.fl6 = *fl6;
1268 	rdfl.gateway = *gateway;
1269 
1270 	return fib6_rule_lookup(net, &rdfl.fl6,
1271 				flags, __ip6_route_redirect);
1272 }
1273 
1274 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1275 {
1276 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1277 	struct dst_entry *dst;
1278 	struct flowi6 fl6;
1279 
1280 	memset(&fl6, 0, sizeof(fl6));
1281 	fl6.flowi6_iif = LOOPBACK_IFINDEX;
1282 	fl6.flowi6_oif = oif;
1283 	fl6.flowi6_mark = mark;
1284 	fl6.daddr = iph->daddr;
1285 	fl6.saddr = iph->saddr;
1286 	fl6.flowlabel = ip6_flowinfo(iph);
1287 
1288 	dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1289 	rt6_do_redirect(dst, NULL, skb);
1290 	dst_release(dst);
1291 }
1292 EXPORT_SYMBOL_GPL(ip6_redirect);
1293 
1294 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1295 			    u32 mark)
1296 {
1297 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1298 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1299 	struct dst_entry *dst;
1300 	struct flowi6 fl6;
1301 
1302 	memset(&fl6, 0, sizeof(fl6));
1303 	fl6.flowi6_iif = LOOPBACK_IFINDEX;
1304 	fl6.flowi6_oif = oif;
1305 	fl6.flowi6_mark = mark;
1306 	fl6.daddr = msg->dest;
1307 	fl6.saddr = iph->daddr;
1308 
1309 	dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1310 	rt6_do_redirect(dst, NULL, skb);
1311 	dst_release(dst);
1312 }
1313 
1314 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1315 {
1316 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1317 }
1318 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1319 
1320 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1321 {
1322 	struct net_device *dev = dst->dev;
1323 	unsigned int mtu = dst_mtu(dst);
1324 	struct net *net = dev_net(dev);
1325 
1326 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1327 
1328 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1329 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1330 
1331 	/*
1332 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1333 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1334 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
1335 	 * rely only on pmtu discovery"
1336 	 */
1337 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1338 		mtu = IPV6_MAXPLEN;
1339 	return mtu;
1340 }
1341 
1342 static unsigned int ip6_mtu(const struct dst_entry *dst)
1343 {
1344 	struct inet6_dev *idev;
1345 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1346 
1347 	if (mtu)
1348 		goto out;
1349 
1350 	mtu = IPV6_MIN_MTU;
1351 
1352 	rcu_read_lock();
1353 	idev = __in6_dev_get(dst->dev);
1354 	if (idev)
1355 		mtu = idev->cnf.mtu6;
1356 	rcu_read_unlock();
1357 
1358 out:
1359 	return min_t(unsigned int, mtu, IP6_MAX_MTU);
1360 }
1361 
1362 static struct dst_entry *icmp6_dst_gc_list;
1363 static DEFINE_SPINLOCK(icmp6_dst_lock);
1364 
1365 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1366 				  struct flowi6 *fl6)
1367 {
1368 	struct dst_entry *dst;
1369 	struct rt6_info *rt;
1370 	struct inet6_dev *idev = in6_dev_get(dev);
1371 	struct net *net = dev_net(dev);
1372 
1373 	if (unlikely(!idev))
1374 		return ERR_PTR(-ENODEV);
1375 
1376 	rt = ip6_dst_alloc(net, dev, 0, NULL);
1377 	if (unlikely(!rt)) {
1378 		in6_dev_put(idev);
1379 		dst = ERR_PTR(-ENOMEM);
1380 		goto out;
1381 	}
1382 
1383 	rt->dst.flags |= DST_HOST;
1384 	rt->dst.output  = ip6_output;
1385 	atomic_set(&rt->dst.__refcnt, 1);
1386 	rt->rt6i_gateway  = fl6->daddr;
1387 	rt->rt6i_dst.addr = fl6->daddr;
1388 	rt->rt6i_dst.plen = 128;
1389 	rt->rt6i_idev     = idev;
1390 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1391 
1392 	spin_lock_bh(&icmp6_dst_lock);
1393 	rt->dst.next = icmp6_dst_gc_list;
1394 	icmp6_dst_gc_list = &rt->dst;
1395 	spin_unlock_bh(&icmp6_dst_lock);
1396 
1397 	fib6_force_start_gc(net);
1398 
1399 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1400 
1401 out:
1402 	return dst;
1403 }
1404 
1405 int icmp6_dst_gc(void)
1406 {
1407 	struct dst_entry *dst, **pprev;
1408 	int more = 0;
1409 
1410 	spin_lock_bh(&icmp6_dst_lock);
1411 	pprev = &icmp6_dst_gc_list;
1412 
1413 	while ((dst = *pprev) != NULL) {
1414 		if (!atomic_read(&dst->__refcnt)) {
1415 			*pprev = dst->next;
1416 			dst_free(dst);
1417 		} else {
1418 			pprev = &dst->next;
1419 			++more;
1420 		}
1421 	}
1422 
1423 	spin_unlock_bh(&icmp6_dst_lock);
1424 
1425 	return more;
1426 }
1427 
1428 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1429 			    void *arg)
1430 {
1431 	struct dst_entry *dst, **pprev;
1432 
1433 	spin_lock_bh(&icmp6_dst_lock);
1434 	pprev = &icmp6_dst_gc_list;
1435 	while ((dst = *pprev) != NULL) {
1436 		struct rt6_info *rt = (struct rt6_info *) dst;
1437 		if (func(rt, arg)) {
1438 			*pprev = dst->next;
1439 			dst_free(dst);
1440 		} else {
1441 			pprev = &dst->next;
1442 		}
1443 	}
1444 	spin_unlock_bh(&icmp6_dst_lock);
1445 }
1446 
1447 static int ip6_dst_gc(struct dst_ops *ops)
1448 {
1449 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1450 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1451 	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1452 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1453 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1454 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1455 	int entries;
1456 
1457 	entries = dst_entries_get_fast(ops);
1458 	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1459 	    entries <= rt_max_size)
1460 		goto out;
1461 
1462 	net->ipv6.ip6_rt_gc_expire++;
1463 	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1464 	entries = dst_entries_get_slow(ops);
1465 	if (entries < ops->gc_thresh)
1466 		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1467 out:
1468 	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1469 	return entries > rt_max_size;
1470 }
1471 
1472 static int ip6_convert_metrics(struct mx6_config *mxc,
1473 			       const struct fib6_config *cfg)
1474 {
1475 	struct nlattr *nla;
1476 	int remaining;
1477 	u32 *mp;
1478 
1479 	if (!cfg->fc_mx)
1480 		return 0;
1481 
1482 	mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1483 	if (unlikely(!mp))
1484 		return -ENOMEM;
1485 
1486 	nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1487 		int type = nla_type(nla);
1488 
1489 		if (type) {
1490 			u32 val;
1491 
1492 			if (unlikely(type > RTAX_MAX))
1493 				goto err;
1494 			if (type == RTAX_CC_ALGO) {
1495 				char tmp[TCP_CA_NAME_MAX];
1496 
1497 				nla_strlcpy(tmp, nla, sizeof(tmp));
1498 				val = tcp_ca_get_key_by_name(tmp);
1499 				if (val == TCP_CA_UNSPEC)
1500 					goto err;
1501 			} else {
1502 				val = nla_get_u32(nla);
1503 			}
1504 
1505 			mp[type - 1] = val;
1506 			__set_bit(type - 1, mxc->mx_valid);
1507 		}
1508 	}
1509 
1510 	mxc->mx = mp;
1511 
1512 	return 0;
1513  err:
1514 	kfree(mp);
1515 	return -EINVAL;
1516 }
1517 
1518 int ip6_route_add(struct fib6_config *cfg)
1519 {
1520 	int err;
1521 	struct net *net = cfg->fc_nlinfo.nl_net;
1522 	struct rt6_info *rt = NULL;
1523 	struct net_device *dev = NULL;
1524 	struct inet6_dev *idev = NULL;
1525 	struct fib6_table *table;
1526 	struct mx6_config mxc = { .mx = NULL, };
1527 	int addr_type;
1528 
1529 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1530 		return -EINVAL;
1531 #ifndef CONFIG_IPV6_SUBTREES
1532 	if (cfg->fc_src_len)
1533 		return -EINVAL;
1534 #endif
1535 	if (cfg->fc_ifindex) {
1536 		err = -ENODEV;
1537 		dev = dev_get_by_index(net, cfg->fc_ifindex);
1538 		if (!dev)
1539 			goto out;
1540 		idev = in6_dev_get(dev);
1541 		if (!idev)
1542 			goto out;
1543 	}
1544 
1545 	if (cfg->fc_metric == 0)
1546 		cfg->fc_metric = IP6_RT_PRIO_USER;
1547 
1548 	err = -ENOBUFS;
1549 	if (cfg->fc_nlinfo.nlh &&
1550 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1551 		table = fib6_get_table(net, cfg->fc_table);
1552 		if (!table) {
1553 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1554 			table = fib6_new_table(net, cfg->fc_table);
1555 		}
1556 	} else {
1557 		table = fib6_new_table(net, cfg->fc_table);
1558 	}
1559 
1560 	if (!table)
1561 		goto out;
1562 
1563 	rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
1564 
1565 	if (!rt) {
1566 		err = -ENOMEM;
1567 		goto out;
1568 	}
1569 
1570 	if (cfg->fc_flags & RTF_EXPIRES)
1571 		rt6_set_expires(rt, jiffies +
1572 				clock_t_to_jiffies(cfg->fc_expires));
1573 	else
1574 		rt6_clean_expires(rt);
1575 
1576 	if (cfg->fc_protocol == RTPROT_UNSPEC)
1577 		cfg->fc_protocol = RTPROT_BOOT;
1578 	rt->rt6i_protocol = cfg->fc_protocol;
1579 
1580 	addr_type = ipv6_addr_type(&cfg->fc_dst);
1581 
1582 	if (addr_type & IPV6_ADDR_MULTICAST)
1583 		rt->dst.input = ip6_mc_input;
1584 	else if (cfg->fc_flags & RTF_LOCAL)
1585 		rt->dst.input = ip6_input;
1586 	else
1587 		rt->dst.input = ip6_forward;
1588 
1589 	rt->dst.output = ip6_output;
1590 
1591 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1592 	rt->rt6i_dst.plen = cfg->fc_dst_len;
1593 	if (rt->rt6i_dst.plen == 128) {
1594 		rt->dst.flags |= DST_HOST;
1595 		dst_metrics_set_force_overwrite(&rt->dst);
1596 	}
1597 
1598 #ifdef CONFIG_IPV6_SUBTREES
1599 	ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1600 	rt->rt6i_src.plen = cfg->fc_src_len;
1601 #endif
1602 
1603 	rt->rt6i_metric = cfg->fc_metric;
1604 
1605 	/* We cannot add true routes via loopback here,
1606 	   they would result in kernel looping; promote them to reject routes
1607 	 */
1608 	if ((cfg->fc_flags & RTF_REJECT) ||
1609 	    (dev && (dev->flags & IFF_LOOPBACK) &&
1610 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
1611 	     !(cfg->fc_flags & RTF_LOCAL))) {
1612 		/* hold loopback dev/idev if we haven't done so. */
1613 		if (dev != net->loopback_dev) {
1614 			if (dev) {
1615 				dev_put(dev);
1616 				in6_dev_put(idev);
1617 			}
1618 			dev = net->loopback_dev;
1619 			dev_hold(dev);
1620 			idev = in6_dev_get(dev);
1621 			if (!idev) {
1622 				err = -ENODEV;
1623 				goto out;
1624 			}
1625 		}
1626 		rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1627 		switch (cfg->fc_type) {
1628 		case RTN_BLACKHOLE:
1629 			rt->dst.error = -EINVAL;
1630 			rt->dst.output = dst_discard_sk;
1631 			rt->dst.input = dst_discard;
1632 			break;
1633 		case RTN_PROHIBIT:
1634 			rt->dst.error = -EACCES;
1635 			rt->dst.output = ip6_pkt_prohibit_out;
1636 			rt->dst.input = ip6_pkt_prohibit;
1637 			break;
1638 		case RTN_THROW:
1639 		default:
1640 			rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1641 					: -ENETUNREACH;
1642 			rt->dst.output = ip6_pkt_discard_out;
1643 			rt->dst.input = ip6_pkt_discard;
1644 			break;
1645 		}
1646 		goto install_route;
1647 	}
1648 
1649 	if (cfg->fc_flags & RTF_GATEWAY) {
1650 		const struct in6_addr *gw_addr;
1651 		int gwa_type;
1652 
1653 		gw_addr = &cfg->fc_gateway;
1654 		rt->rt6i_gateway = *gw_addr;
1655 		gwa_type = ipv6_addr_type(gw_addr);
1656 
1657 		if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1658 			struct rt6_info *grt;
1659 
1660 			/* IPv6 strictly inhibits using not link-local
1661 			   addresses as nexthop address.
1662 			   Otherwise, router will not able to send redirects.
1663 			   It is very good, but in some (rare!) circumstances
1664 			   (SIT, PtP, NBMA NOARP links) it is handy to allow
1665 			   some exceptions. --ANK
1666 			 */
1667 			err = -EINVAL;
1668 			if (!(gwa_type & IPV6_ADDR_UNICAST))
1669 				goto out;
1670 
1671 			grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1672 
1673 			err = -EHOSTUNREACH;
1674 			if (!grt)
1675 				goto out;
1676 			if (dev) {
1677 				if (dev != grt->dst.dev) {
1678 					ip6_rt_put(grt);
1679 					goto out;
1680 				}
1681 			} else {
1682 				dev = grt->dst.dev;
1683 				idev = grt->rt6i_idev;
1684 				dev_hold(dev);
1685 				in6_dev_hold(grt->rt6i_idev);
1686 			}
1687 			if (!(grt->rt6i_flags & RTF_GATEWAY))
1688 				err = 0;
1689 			ip6_rt_put(grt);
1690 
1691 			if (err)
1692 				goto out;
1693 		}
1694 		err = -EINVAL;
1695 		if (!dev || (dev->flags & IFF_LOOPBACK))
1696 			goto out;
1697 	}
1698 
1699 	err = -ENODEV;
1700 	if (!dev)
1701 		goto out;
1702 
1703 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1704 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1705 			err = -EINVAL;
1706 			goto out;
1707 		}
1708 		rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1709 		rt->rt6i_prefsrc.plen = 128;
1710 	} else
1711 		rt->rt6i_prefsrc.plen = 0;
1712 
1713 	rt->rt6i_flags = cfg->fc_flags;
1714 
1715 install_route:
1716 	rt->dst.dev = dev;
1717 	rt->rt6i_idev = idev;
1718 	rt->rt6i_table = table;
1719 
1720 	cfg->fc_nlinfo.nl_net = dev_net(dev);
1721 
1722 	err = ip6_convert_metrics(&mxc, cfg);
1723 	if (err)
1724 		goto out;
1725 
1726 	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
1727 
1728 	kfree(mxc.mx);
1729 	return err;
1730 out:
1731 	if (dev)
1732 		dev_put(dev);
1733 	if (idev)
1734 		in6_dev_put(idev);
1735 	if (rt)
1736 		dst_free(&rt->dst);
1737 	return err;
1738 }
1739 
1740 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1741 {
1742 	int err;
1743 	struct fib6_table *table;
1744 	struct net *net = dev_net(rt->dst.dev);
1745 
1746 	if (rt == net->ipv6.ip6_null_entry) {
1747 		err = -ENOENT;
1748 		goto out;
1749 	}
1750 
1751 	table = rt->rt6i_table;
1752 	write_lock_bh(&table->tb6_lock);
1753 	err = fib6_del(rt, info);
1754 	write_unlock_bh(&table->tb6_lock);
1755 
1756 out:
1757 	ip6_rt_put(rt);
1758 	return err;
1759 }
1760 
1761 int ip6_del_rt(struct rt6_info *rt)
1762 {
1763 	struct nl_info info = {
1764 		.nl_net = dev_net(rt->dst.dev),
1765 	};
1766 	return __ip6_del_rt(rt, &info);
1767 }
1768 
1769 static int ip6_route_del(struct fib6_config *cfg)
1770 {
1771 	struct fib6_table *table;
1772 	struct fib6_node *fn;
1773 	struct rt6_info *rt;
1774 	int err = -ESRCH;
1775 
1776 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1777 	if (!table)
1778 		return err;
1779 
1780 	read_lock_bh(&table->tb6_lock);
1781 
1782 	fn = fib6_locate(&table->tb6_root,
1783 			 &cfg->fc_dst, cfg->fc_dst_len,
1784 			 &cfg->fc_src, cfg->fc_src_len);
1785 
1786 	if (fn) {
1787 		for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1788 			if (cfg->fc_ifindex &&
1789 			    (!rt->dst.dev ||
1790 			     rt->dst.dev->ifindex != cfg->fc_ifindex))
1791 				continue;
1792 			if (cfg->fc_flags & RTF_GATEWAY &&
1793 			    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1794 				continue;
1795 			if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1796 				continue;
1797 			dst_hold(&rt->dst);
1798 			read_unlock_bh(&table->tb6_lock);
1799 
1800 			return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1801 		}
1802 	}
1803 	read_unlock_bh(&table->tb6_lock);
1804 
1805 	return err;
1806 }
1807 
1808 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1809 {
1810 	struct net *net = dev_net(skb->dev);
1811 	struct netevent_redirect netevent;
1812 	struct rt6_info *rt, *nrt = NULL;
1813 	struct ndisc_options ndopts;
1814 	struct inet6_dev *in6_dev;
1815 	struct neighbour *neigh;
1816 	struct rd_msg *msg;
1817 	int optlen, on_link;
1818 	u8 *lladdr;
1819 
1820 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1821 	optlen -= sizeof(*msg);
1822 
1823 	if (optlen < 0) {
1824 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1825 		return;
1826 	}
1827 
1828 	msg = (struct rd_msg *)icmp6_hdr(skb);
1829 
1830 	if (ipv6_addr_is_multicast(&msg->dest)) {
1831 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1832 		return;
1833 	}
1834 
1835 	on_link = 0;
1836 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1837 		on_link = 1;
1838 	} else if (ipv6_addr_type(&msg->target) !=
1839 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1840 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1841 		return;
1842 	}
1843 
1844 	in6_dev = __in6_dev_get(skb->dev);
1845 	if (!in6_dev)
1846 		return;
1847 	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1848 		return;
1849 
1850 	/* RFC2461 8.1:
1851 	 *	The IP source address of the Redirect MUST be the same as the current
1852 	 *	first-hop router for the specified ICMP Destination Address.
1853 	 */
1854 
1855 	if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1856 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1857 		return;
1858 	}
1859 
1860 	lladdr = NULL;
1861 	if (ndopts.nd_opts_tgt_lladdr) {
1862 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1863 					     skb->dev);
1864 		if (!lladdr) {
1865 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1866 			return;
1867 		}
1868 	}
1869 
1870 	rt = (struct rt6_info *) dst;
1871 	if (rt == net->ipv6.ip6_null_entry) {
1872 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1873 		return;
1874 	}
1875 
1876 	/* Redirect received -> path was valid.
1877 	 * Look, redirects are sent only in response to data packets,
1878 	 * so that this nexthop apparently is reachable. --ANK
1879 	 */
1880 	dst_confirm(&rt->dst);
1881 
1882 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1883 	if (!neigh)
1884 		return;
1885 
1886 	/*
1887 	 *	We have finally decided to accept it.
1888 	 */
1889 
1890 	neigh_update(neigh, lladdr, NUD_STALE,
1891 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
1892 		     NEIGH_UPDATE_F_OVERRIDE|
1893 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1894 				     NEIGH_UPDATE_F_ISROUTER))
1895 		     );
1896 
1897 	nrt = ip6_rt_copy(rt, &msg->dest);
1898 	if (!nrt)
1899 		goto out;
1900 
1901 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1902 	if (on_link)
1903 		nrt->rt6i_flags &= ~RTF_GATEWAY;
1904 
1905 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1906 
1907 	if (ip6_ins_rt(nrt))
1908 		goto out;
1909 
1910 	netevent.old = &rt->dst;
1911 	netevent.new = &nrt->dst;
1912 	netevent.daddr = &msg->dest;
1913 	netevent.neigh = neigh;
1914 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1915 
1916 	if (rt->rt6i_flags & RTF_CACHE) {
1917 		rt = (struct rt6_info *) dst_clone(&rt->dst);
1918 		ip6_del_rt(rt);
1919 	}
1920 
1921 out:
1922 	neigh_release(neigh);
1923 }
1924 
1925 /*
1926  *	Misc support functions
1927  */
1928 
1929 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1930 				    const struct in6_addr *dest)
1931 {
1932 	struct net *net = dev_net(ort->dst.dev);
1933 	struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1934 					    ort->rt6i_table);
1935 
1936 	if (rt) {
1937 		rt->dst.input = ort->dst.input;
1938 		rt->dst.output = ort->dst.output;
1939 		rt->dst.flags |= DST_HOST;
1940 
1941 		rt->rt6i_dst.addr = *dest;
1942 		rt->rt6i_dst.plen = 128;
1943 		dst_copy_metrics(&rt->dst, &ort->dst);
1944 		rt->dst.error = ort->dst.error;
1945 		rt->rt6i_idev = ort->rt6i_idev;
1946 		if (rt->rt6i_idev)
1947 			in6_dev_hold(rt->rt6i_idev);
1948 		rt->dst.lastuse = jiffies;
1949 
1950 		if (ort->rt6i_flags & RTF_GATEWAY)
1951 			rt->rt6i_gateway = ort->rt6i_gateway;
1952 		else
1953 			rt->rt6i_gateway = *dest;
1954 		rt->rt6i_flags = ort->rt6i_flags;
1955 		rt6_set_from(rt, ort);
1956 		rt->rt6i_metric = 0;
1957 
1958 #ifdef CONFIG_IPV6_SUBTREES
1959 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1960 #endif
1961 		memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1962 		rt->rt6i_table = ort->rt6i_table;
1963 	}
1964 	return rt;
1965 }
1966 
1967 #ifdef CONFIG_IPV6_ROUTE_INFO
1968 static struct rt6_info *rt6_get_route_info(struct net *net,
1969 					   const struct in6_addr *prefix, int prefixlen,
1970 					   const struct in6_addr *gwaddr, int ifindex)
1971 {
1972 	struct fib6_node *fn;
1973 	struct rt6_info *rt = NULL;
1974 	struct fib6_table *table;
1975 
1976 	table = fib6_get_table(net, RT6_TABLE_INFO);
1977 	if (!table)
1978 		return NULL;
1979 
1980 	read_lock_bh(&table->tb6_lock);
1981 	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
1982 	if (!fn)
1983 		goto out;
1984 
1985 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1986 		if (rt->dst.dev->ifindex != ifindex)
1987 			continue;
1988 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1989 			continue;
1990 		if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1991 			continue;
1992 		dst_hold(&rt->dst);
1993 		break;
1994 	}
1995 out:
1996 	read_unlock_bh(&table->tb6_lock);
1997 	return rt;
1998 }
1999 
2000 static struct rt6_info *rt6_add_route_info(struct net *net,
2001 					   const struct in6_addr *prefix, int prefixlen,
2002 					   const struct in6_addr *gwaddr, int ifindex,
2003 					   unsigned int pref)
2004 {
2005 	struct fib6_config cfg = {
2006 		.fc_table	= RT6_TABLE_INFO,
2007 		.fc_metric	= IP6_RT_PRIO_USER,
2008 		.fc_ifindex	= ifindex,
2009 		.fc_dst_len	= prefixlen,
2010 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2011 				  RTF_UP | RTF_PREF(pref),
2012 		.fc_nlinfo.portid = 0,
2013 		.fc_nlinfo.nlh = NULL,
2014 		.fc_nlinfo.nl_net = net,
2015 	};
2016 
2017 	cfg.fc_dst = *prefix;
2018 	cfg.fc_gateway = *gwaddr;
2019 
2020 	/* We should treat it as a default route if prefix length is 0. */
2021 	if (!prefixlen)
2022 		cfg.fc_flags |= RTF_DEFAULT;
2023 
2024 	ip6_route_add(&cfg);
2025 
2026 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
2027 }
2028 #endif
2029 
2030 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2031 {
2032 	struct rt6_info *rt;
2033 	struct fib6_table *table;
2034 
2035 	table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
2036 	if (!table)
2037 		return NULL;
2038 
2039 	read_lock_bh(&table->tb6_lock);
2040 	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2041 		if (dev == rt->dst.dev &&
2042 		    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2043 		    ipv6_addr_equal(&rt->rt6i_gateway, addr))
2044 			break;
2045 	}
2046 	if (rt)
2047 		dst_hold(&rt->dst);
2048 	read_unlock_bh(&table->tb6_lock);
2049 	return rt;
2050 }
2051 
2052 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2053 				     struct net_device *dev,
2054 				     unsigned int pref)
2055 {
2056 	struct fib6_config cfg = {
2057 		.fc_table	= RT6_TABLE_DFLT,
2058 		.fc_metric	= IP6_RT_PRIO_USER,
2059 		.fc_ifindex	= dev->ifindex,
2060 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2061 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2062 		.fc_nlinfo.portid = 0,
2063 		.fc_nlinfo.nlh = NULL,
2064 		.fc_nlinfo.nl_net = dev_net(dev),
2065 	};
2066 
2067 	cfg.fc_gateway = *gwaddr;
2068 
2069 	ip6_route_add(&cfg);
2070 
2071 	return rt6_get_dflt_router(gwaddr, dev);
2072 }
2073 
2074 void rt6_purge_dflt_routers(struct net *net)
2075 {
2076 	struct rt6_info *rt;
2077 	struct fib6_table *table;
2078 
2079 	/* NOTE: Keep consistent with rt6_get_dflt_router */
2080 	table = fib6_get_table(net, RT6_TABLE_DFLT);
2081 	if (!table)
2082 		return;
2083 
2084 restart:
2085 	read_lock_bh(&table->tb6_lock);
2086 	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2087 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2088 		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2089 			dst_hold(&rt->dst);
2090 			read_unlock_bh(&table->tb6_lock);
2091 			ip6_del_rt(rt);
2092 			goto restart;
2093 		}
2094 	}
2095 	read_unlock_bh(&table->tb6_lock);
2096 }
2097 
2098 static void rtmsg_to_fib6_config(struct net *net,
2099 				 struct in6_rtmsg *rtmsg,
2100 				 struct fib6_config *cfg)
2101 {
2102 	memset(cfg, 0, sizeof(*cfg));
2103 
2104 	cfg->fc_table = RT6_TABLE_MAIN;
2105 	cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2106 	cfg->fc_metric = rtmsg->rtmsg_metric;
2107 	cfg->fc_expires = rtmsg->rtmsg_info;
2108 	cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2109 	cfg->fc_src_len = rtmsg->rtmsg_src_len;
2110 	cfg->fc_flags = rtmsg->rtmsg_flags;
2111 
2112 	cfg->fc_nlinfo.nl_net = net;
2113 
2114 	cfg->fc_dst = rtmsg->rtmsg_dst;
2115 	cfg->fc_src = rtmsg->rtmsg_src;
2116 	cfg->fc_gateway = rtmsg->rtmsg_gateway;
2117 }
2118 
2119 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2120 {
2121 	struct fib6_config cfg;
2122 	struct in6_rtmsg rtmsg;
2123 	int err;
2124 
2125 	switch (cmd) {
2126 	case SIOCADDRT:		/* Add a route */
2127 	case SIOCDELRT:		/* Delete a route */
2128 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2129 			return -EPERM;
2130 		err = copy_from_user(&rtmsg, arg,
2131 				     sizeof(struct in6_rtmsg));
2132 		if (err)
2133 			return -EFAULT;
2134 
2135 		rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2136 
2137 		rtnl_lock();
2138 		switch (cmd) {
2139 		case SIOCADDRT:
2140 			err = ip6_route_add(&cfg);
2141 			break;
2142 		case SIOCDELRT:
2143 			err = ip6_route_del(&cfg);
2144 			break;
2145 		default:
2146 			err = -EINVAL;
2147 		}
2148 		rtnl_unlock();
2149 
2150 		return err;
2151 	}
2152 
2153 	return -EINVAL;
2154 }
2155 
2156 /*
2157  *	Drop the packet on the floor
2158  */
2159 
2160 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2161 {
2162 	int type;
2163 	struct dst_entry *dst = skb_dst(skb);
2164 	switch (ipstats_mib_noroutes) {
2165 	case IPSTATS_MIB_INNOROUTES:
2166 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2167 		if (type == IPV6_ADDR_ANY) {
2168 			IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2169 				      IPSTATS_MIB_INADDRERRORS);
2170 			break;
2171 		}
2172 		/* FALLTHROUGH */
2173 	case IPSTATS_MIB_OUTNOROUTES:
2174 		IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2175 			      ipstats_mib_noroutes);
2176 		break;
2177 	}
2178 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2179 	kfree_skb(skb);
2180 	return 0;
2181 }
2182 
2183 static int ip6_pkt_discard(struct sk_buff *skb)
2184 {
2185 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2186 }
2187 
2188 static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
2189 {
2190 	skb->dev = skb_dst(skb)->dev;
2191 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2192 }
2193 
2194 static int ip6_pkt_prohibit(struct sk_buff *skb)
2195 {
2196 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2197 }
2198 
2199 static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
2200 {
2201 	skb->dev = skb_dst(skb)->dev;
2202 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2203 }
2204 
2205 /*
2206  *	Allocate a dst for local (unicast / anycast) address.
2207  */
2208 
2209 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2210 				    const struct in6_addr *addr,
2211 				    bool anycast)
2212 {
2213 	struct net *net = dev_net(idev->dev);
2214 	struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2215 					    DST_NOCOUNT, NULL);
2216 	if (!rt)
2217 		return ERR_PTR(-ENOMEM);
2218 
2219 	in6_dev_hold(idev);
2220 
2221 	rt->dst.flags |= DST_HOST;
2222 	rt->dst.input = ip6_input;
2223 	rt->dst.output = ip6_output;
2224 	rt->rt6i_idev = idev;
2225 
2226 	rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2227 	if (anycast)
2228 		rt->rt6i_flags |= RTF_ANYCAST;
2229 	else
2230 		rt->rt6i_flags |= RTF_LOCAL;
2231 
2232 	rt->rt6i_gateway  = *addr;
2233 	rt->rt6i_dst.addr = *addr;
2234 	rt->rt6i_dst.plen = 128;
2235 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2236 
2237 	atomic_set(&rt->dst.__refcnt, 1);
2238 
2239 	return rt;
2240 }
2241 
2242 int ip6_route_get_saddr(struct net *net,
2243 			struct rt6_info *rt,
2244 			const struct in6_addr *daddr,
2245 			unsigned int prefs,
2246 			struct in6_addr *saddr)
2247 {
2248 	struct inet6_dev *idev =
2249 		rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2250 	int err = 0;
2251 	if (rt && rt->rt6i_prefsrc.plen)
2252 		*saddr = rt->rt6i_prefsrc.addr;
2253 	else
2254 		err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2255 					 daddr, prefs, saddr);
2256 	return err;
2257 }
2258 
2259 /* remove deleted ip from prefsrc entries */
2260 struct arg_dev_net_ip {
2261 	struct net_device *dev;
2262 	struct net *net;
2263 	struct in6_addr *addr;
2264 };
2265 
2266 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2267 {
2268 	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2269 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2270 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2271 
2272 	if (((void *)rt->dst.dev == dev || !dev) &&
2273 	    rt != net->ipv6.ip6_null_entry &&
2274 	    ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2275 		/* remove prefsrc entry */
2276 		rt->rt6i_prefsrc.plen = 0;
2277 	}
2278 	return 0;
2279 }
2280 
2281 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2282 {
2283 	struct net *net = dev_net(ifp->idev->dev);
2284 	struct arg_dev_net_ip adni = {
2285 		.dev = ifp->idev->dev,
2286 		.net = net,
2287 		.addr = &ifp->addr,
2288 	};
2289 	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2290 }
2291 
2292 #define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2293 #define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)
2294 
2295 /* Remove routers and update dst entries when gateway turn into host. */
2296 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2297 {
2298 	struct in6_addr *gateway = (struct in6_addr *)arg;
2299 
2300 	if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2301 	     ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2302 	     ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2303 		return -1;
2304 	}
2305 	return 0;
2306 }
2307 
2308 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2309 {
2310 	fib6_clean_all(net, fib6_clean_tohost, gateway);
2311 }
2312 
2313 struct arg_dev_net {
2314 	struct net_device *dev;
2315 	struct net *net;
2316 };
2317 
2318 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2319 {
2320 	const struct arg_dev_net *adn = arg;
2321 	const struct net_device *dev = adn->dev;
2322 
2323 	if ((rt->dst.dev == dev || !dev) &&
2324 	    rt != adn->net->ipv6.ip6_null_entry)
2325 		return -1;
2326 
2327 	return 0;
2328 }
2329 
2330 void rt6_ifdown(struct net *net, struct net_device *dev)
2331 {
2332 	struct arg_dev_net adn = {
2333 		.dev = dev,
2334 		.net = net,
2335 	};
2336 
2337 	fib6_clean_all(net, fib6_ifdown, &adn);
2338 	icmp6_clean_all(fib6_ifdown, &adn);
2339 }
2340 
2341 struct rt6_mtu_change_arg {
2342 	struct net_device *dev;
2343 	unsigned int mtu;
2344 };
2345 
2346 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2347 {
2348 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2349 	struct inet6_dev *idev;
2350 
2351 	/* In IPv6 pmtu discovery is not optional,
2352 	   so that RTAX_MTU lock cannot disable it.
2353 	   We still use this lock to block changes
2354 	   caused by addrconf/ndisc.
2355 	*/
2356 
2357 	idev = __in6_dev_get(arg->dev);
2358 	if (!idev)
2359 		return 0;
2360 
2361 	/* For administrative MTU increase, there is no way to discover
2362 	   IPv6 PMTU increase, so PMTU increase should be updated here.
2363 	   Since RFC 1981 doesn't include administrative MTU increase
2364 	   update PMTU increase is a MUST. (i.e. jumbo frame)
2365 	 */
2366 	/*
2367 	   If new MTU is less than route PMTU, this new MTU will be the
2368 	   lowest MTU in the path, update the route PMTU to reflect PMTU
2369 	   decreases; if new MTU is greater than route PMTU, and the
2370 	   old MTU is the lowest MTU in the path, update the route PMTU
2371 	   to reflect the increase. In this case if the other nodes' MTU
2372 	   also have the lowest MTU, TOO BIG MESSAGE will be lead to
2373 	   PMTU discouvery.
2374 	 */
2375 	if (rt->dst.dev == arg->dev &&
2376 	    !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2377 	    (dst_mtu(&rt->dst) >= arg->mtu ||
2378 	     (dst_mtu(&rt->dst) < arg->mtu &&
2379 	      dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2380 		dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2381 	}
2382 	return 0;
2383 }
2384 
2385 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2386 {
2387 	struct rt6_mtu_change_arg arg = {
2388 		.dev = dev,
2389 		.mtu = mtu,
2390 	};
2391 
2392 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2393 }
2394 
2395 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2396 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
2397 	[RTA_OIF]               = { .type = NLA_U32 },
2398 	[RTA_IIF]		= { .type = NLA_U32 },
2399 	[RTA_PRIORITY]          = { .type = NLA_U32 },
2400 	[RTA_METRICS]           = { .type = NLA_NESTED },
2401 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
2402 	[RTA_PREF]              = { .type = NLA_U8 },
2403 };
2404 
2405 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2406 			      struct fib6_config *cfg)
2407 {
2408 	struct rtmsg *rtm;
2409 	struct nlattr *tb[RTA_MAX+1];
2410 	unsigned int pref;
2411 	int err;
2412 
2413 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2414 	if (err < 0)
2415 		goto errout;
2416 
2417 	err = -EINVAL;
2418 	rtm = nlmsg_data(nlh);
2419 	memset(cfg, 0, sizeof(*cfg));
2420 
2421 	cfg->fc_table = rtm->rtm_table;
2422 	cfg->fc_dst_len = rtm->rtm_dst_len;
2423 	cfg->fc_src_len = rtm->rtm_src_len;
2424 	cfg->fc_flags = RTF_UP;
2425 	cfg->fc_protocol = rtm->rtm_protocol;
2426 	cfg->fc_type = rtm->rtm_type;
2427 
2428 	if (rtm->rtm_type == RTN_UNREACHABLE ||
2429 	    rtm->rtm_type == RTN_BLACKHOLE ||
2430 	    rtm->rtm_type == RTN_PROHIBIT ||
2431 	    rtm->rtm_type == RTN_THROW)
2432 		cfg->fc_flags |= RTF_REJECT;
2433 
2434 	if (rtm->rtm_type == RTN_LOCAL)
2435 		cfg->fc_flags |= RTF_LOCAL;
2436 
2437 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2438 	cfg->fc_nlinfo.nlh = nlh;
2439 	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2440 
2441 	if (tb[RTA_GATEWAY]) {
2442 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2443 		cfg->fc_flags |= RTF_GATEWAY;
2444 	}
2445 
2446 	if (tb[RTA_DST]) {
2447 		int plen = (rtm->rtm_dst_len + 7) >> 3;
2448 
2449 		if (nla_len(tb[RTA_DST]) < plen)
2450 			goto errout;
2451 
2452 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2453 	}
2454 
2455 	if (tb[RTA_SRC]) {
2456 		int plen = (rtm->rtm_src_len + 7) >> 3;
2457 
2458 		if (nla_len(tb[RTA_SRC]) < plen)
2459 			goto errout;
2460 
2461 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2462 	}
2463 
2464 	if (tb[RTA_PREFSRC])
2465 		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2466 
2467 	if (tb[RTA_OIF])
2468 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2469 
2470 	if (tb[RTA_PRIORITY])
2471 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2472 
2473 	if (tb[RTA_METRICS]) {
2474 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2475 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2476 	}
2477 
2478 	if (tb[RTA_TABLE])
2479 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2480 
2481 	if (tb[RTA_MULTIPATH]) {
2482 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2483 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2484 	}
2485 
2486 	if (tb[RTA_PREF]) {
2487 		pref = nla_get_u8(tb[RTA_PREF]);
2488 		if (pref != ICMPV6_ROUTER_PREF_LOW &&
2489 		    pref != ICMPV6_ROUTER_PREF_HIGH)
2490 			pref = ICMPV6_ROUTER_PREF_MEDIUM;
2491 		cfg->fc_flags |= RTF_PREF(pref);
2492 	}
2493 
2494 	err = 0;
2495 errout:
2496 	return err;
2497 }
2498 
2499 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2500 {
2501 	struct fib6_config r_cfg;
2502 	struct rtnexthop *rtnh;
2503 	int remaining;
2504 	int attrlen;
2505 	int err = 0, last_err = 0;
2506 
2507 beginning:
2508 	rtnh = (struct rtnexthop *)cfg->fc_mp;
2509 	remaining = cfg->fc_mp_len;
2510 
2511 	/* Parse a Multipath Entry */
2512 	while (rtnh_ok(rtnh, remaining)) {
2513 		memcpy(&r_cfg, cfg, sizeof(*cfg));
2514 		if (rtnh->rtnh_ifindex)
2515 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2516 
2517 		attrlen = rtnh_attrlen(rtnh);
2518 		if (attrlen > 0) {
2519 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2520 
2521 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2522 			if (nla) {
2523 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
2524 				r_cfg.fc_flags |= RTF_GATEWAY;
2525 			}
2526 		}
2527 		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2528 		if (err) {
2529 			last_err = err;
2530 			/* If we are trying to remove a route, do not stop the
2531 			 * loop when ip6_route_del() fails (because next hop is
2532 			 * already gone), we should try to remove all next hops.
2533 			 */
2534 			if (add) {
2535 				/* If add fails, we should try to delete all
2536 				 * next hops that have been already added.
2537 				 */
2538 				add = 0;
2539 				goto beginning;
2540 			}
2541 		}
2542 		/* Because each route is added like a single route we remove
2543 		 * this flag after the first nexthop (if there is a collision,
2544 		 * we have already fail to add the first nexthop:
2545 		 * fib6_add_rt2node() has reject it).
2546 		 */
2547 		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2548 		rtnh = rtnh_next(rtnh, &remaining);
2549 	}
2550 
2551 	return last_err;
2552 }
2553 
2554 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2555 {
2556 	struct fib6_config cfg;
2557 	int err;
2558 
2559 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2560 	if (err < 0)
2561 		return err;
2562 
2563 	if (cfg.fc_mp)
2564 		return ip6_route_multipath(&cfg, 0);
2565 	else
2566 		return ip6_route_del(&cfg);
2567 }
2568 
2569 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2570 {
2571 	struct fib6_config cfg;
2572 	int err;
2573 
2574 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2575 	if (err < 0)
2576 		return err;
2577 
2578 	if (cfg.fc_mp)
2579 		return ip6_route_multipath(&cfg, 1);
2580 	else
2581 		return ip6_route_add(&cfg);
2582 }
2583 
2584 static inline size_t rt6_nlmsg_size(void)
2585 {
2586 	return NLMSG_ALIGN(sizeof(struct rtmsg))
2587 	       + nla_total_size(16) /* RTA_SRC */
2588 	       + nla_total_size(16) /* RTA_DST */
2589 	       + nla_total_size(16) /* RTA_GATEWAY */
2590 	       + nla_total_size(16) /* RTA_PREFSRC */
2591 	       + nla_total_size(4) /* RTA_TABLE */
2592 	       + nla_total_size(4) /* RTA_IIF */
2593 	       + nla_total_size(4) /* RTA_OIF */
2594 	       + nla_total_size(4) /* RTA_PRIORITY */
2595 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2596 	       + nla_total_size(sizeof(struct rta_cacheinfo))
2597 	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
2598 	       + nla_total_size(1); /* RTA_PREF */
2599 }
2600 
2601 static int rt6_fill_node(struct net *net,
2602 			 struct sk_buff *skb, struct rt6_info *rt,
2603 			 struct in6_addr *dst, struct in6_addr *src,
2604 			 int iif, int type, u32 portid, u32 seq,
2605 			 int prefix, int nowait, unsigned int flags)
2606 {
2607 	struct rtmsg *rtm;
2608 	struct nlmsghdr *nlh;
2609 	long expires;
2610 	u32 table;
2611 
2612 	if (prefix) {	/* user wants prefix routes only */
2613 		if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2614 			/* success since this is not a prefix route */
2615 			return 1;
2616 		}
2617 	}
2618 
2619 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2620 	if (!nlh)
2621 		return -EMSGSIZE;
2622 
2623 	rtm = nlmsg_data(nlh);
2624 	rtm->rtm_family = AF_INET6;
2625 	rtm->rtm_dst_len = rt->rt6i_dst.plen;
2626 	rtm->rtm_src_len = rt->rt6i_src.plen;
2627 	rtm->rtm_tos = 0;
2628 	if (rt->rt6i_table)
2629 		table = rt->rt6i_table->tb6_id;
2630 	else
2631 		table = RT6_TABLE_UNSPEC;
2632 	rtm->rtm_table = table;
2633 	if (nla_put_u32(skb, RTA_TABLE, table))
2634 		goto nla_put_failure;
2635 	if (rt->rt6i_flags & RTF_REJECT) {
2636 		switch (rt->dst.error) {
2637 		case -EINVAL:
2638 			rtm->rtm_type = RTN_BLACKHOLE;
2639 			break;
2640 		case -EACCES:
2641 			rtm->rtm_type = RTN_PROHIBIT;
2642 			break;
2643 		case -EAGAIN:
2644 			rtm->rtm_type = RTN_THROW;
2645 			break;
2646 		default:
2647 			rtm->rtm_type = RTN_UNREACHABLE;
2648 			break;
2649 		}
2650 	}
2651 	else if (rt->rt6i_flags & RTF_LOCAL)
2652 		rtm->rtm_type = RTN_LOCAL;
2653 	else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2654 		rtm->rtm_type = RTN_LOCAL;
2655 	else
2656 		rtm->rtm_type = RTN_UNICAST;
2657 	rtm->rtm_flags = 0;
2658 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2659 	rtm->rtm_protocol = rt->rt6i_protocol;
2660 	if (rt->rt6i_flags & RTF_DYNAMIC)
2661 		rtm->rtm_protocol = RTPROT_REDIRECT;
2662 	else if (rt->rt6i_flags & RTF_ADDRCONF) {
2663 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2664 			rtm->rtm_protocol = RTPROT_RA;
2665 		else
2666 			rtm->rtm_protocol = RTPROT_KERNEL;
2667 	}
2668 
2669 	if (rt->rt6i_flags & RTF_CACHE)
2670 		rtm->rtm_flags |= RTM_F_CLONED;
2671 
2672 	if (dst) {
2673 		if (nla_put_in6_addr(skb, RTA_DST, dst))
2674 			goto nla_put_failure;
2675 		rtm->rtm_dst_len = 128;
2676 	} else if (rtm->rtm_dst_len)
2677 		if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
2678 			goto nla_put_failure;
2679 #ifdef CONFIG_IPV6_SUBTREES
2680 	if (src) {
2681 		if (nla_put_in6_addr(skb, RTA_SRC, src))
2682 			goto nla_put_failure;
2683 		rtm->rtm_src_len = 128;
2684 	} else if (rtm->rtm_src_len &&
2685 		   nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
2686 		goto nla_put_failure;
2687 #endif
2688 	if (iif) {
2689 #ifdef CONFIG_IPV6_MROUTE
2690 		if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2691 			int err = ip6mr_get_route(net, skb, rtm, nowait);
2692 			if (err <= 0) {
2693 				if (!nowait) {
2694 					if (err == 0)
2695 						return 0;
2696 					goto nla_put_failure;
2697 				} else {
2698 					if (err == -EMSGSIZE)
2699 						goto nla_put_failure;
2700 				}
2701 			}
2702 		} else
2703 #endif
2704 			if (nla_put_u32(skb, RTA_IIF, iif))
2705 				goto nla_put_failure;
2706 	} else if (dst) {
2707 		struct in6_addr saddr_buf;
2708 		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2709 		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
2710 			goto nla_put_failure;
2711 	}
2712 
2713 	if (rt->rt6i_prefsrc.plen) {
2714 		struct in6_addr saddr_buf;
2715 		saddr_buf = rt->rt6i_prefsrc.addr;
2716 		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
2717 			goto nla_put_failure;
2718 	}
2719 
2720 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2721 		goto nla_put_failure;
2722 
2723 	if (rt->rt6i_flags & RTF_GATEWAY) {
2724 		if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
2725 			goto nla_put_failure;
2726 	}
2727 
2728 	if (rt->dst.dev &&
2729 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2730 		goto nla_put_failure;
2731 	if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2732 		goto nla_put_failure;
2733 
2734 	expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2735 
2736 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2737 		goto nla_put_failure;
2738 
2739 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
2740 		goto nla_put_failure;
2741 
2742 	nlmsg_end(skb, nlh);
2743 	return 0;
2744 
2745 nla_put_failure:
2746 	nlmsg_cancel(skb, nlh);
2747 	return -EMSGSIZE;
2748 }
2749 
2750 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2751 {
2752 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2753 	int prefix;
2754 
2755 	if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2756 		struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2757 		prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2758 	} else
2759 		prefix = 0;
2760 
2761 	return rt6_fill_node(arg->net,
2762 		     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2763 		     NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2764 		     prefix, 0, NLM_F_MULTI);
2765 }
2766 
2767 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2768 {
2769 	struct net *net = sock_net(in_skb->sk);
2770 	struct nlattr *tb[RTA_MAX+1];
2771 	struct rt6_info *rt;
2772 	struct sk_buff *skb;
2773 	struct rtmsg *rtm;
2774 	struct flowi6 fl6;
2775 	int err, iif = 0, oif = 0;
2776 
2777 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2778 	if (err < 0)
2779 		goto errout;
2780 
2781 	err = -EINVAL;
2782 	memset(&fl6, 0, sizeof(fl6));
2783 
2784 	if (tb[RTA_SRC]) {
2785 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2786 			goto errout;
2787 
2788 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2789 	}
2790 
2791 	if (tb[RTA_DST]) {
2792 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2793 			goto errout;
2794 
2795 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2796 	}
2797 
2798 	if (tb[RTA_IIF])
2799 		iif = nla_get_u32(tb[RTA_IIF]);
2800 
2801 	if (tb[RTA_OIF])
2802 		oif = nla_get_u32(tb[RTA_OIF]);
2803 
2804 	if (tb[RTA_MARK])
2805 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2806 
2807 	if (iif) {
2808 		struct net_device *dev;
2809 		int flags = 0;
2810 
2811 		dev = __dev_get_by_index(net, iif);
2812 		if (!dev) {
2813 			err = -ENODEV;
2814 			goto errout;
2815 		}
2816 
2817 		fl6.flowi6_iif = iif;
2818 
2819 		if (!ipv6_addr_any(&fl6.saddr))
2820 			flags |= RT6_LOOKUP_F_HAS_SADDR;
2821 
2822 		rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2823 							       flags);
2824 	} else {
2825 		fl6.flowi6_oif = oif;
2826 
2827 		rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2828 	}
2829 
2830 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2831 	if (!skb) {
2832 		ip6_rt_put(rt);
2833 		err = -ENOBUFS;
2834 		goto errout;
2835 	}
2836 
2837 	/* Reserve room for dummy headers, this skb can pass
2838 	   through good chunk of routing engine.
2839 	 */
2840 	skb_reset_mac_header(skb);
2841 	skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2842 
2843 	skb_dst_set(skb, &rt->dst);
2844 
2845 	err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2846 			    RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2847 			    nlh->nlmsg_seq, 0, 0, 0);
2848 	if (err < 0) {
2849 		kfree_skb(skb);
2850 		goto errout;
2851 	}
2852 
2853 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2854 errout:
2855 	return err;
2856 }
2857 
2858 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2859 {
2860 	struct sk_buff *skb;
2861 	struct net *net = info->nl_net;
2862 	u32 seq;
2863 	int err;
2864 
2865 	err = -ENOBUFS;
2866 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2867 
2868 	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2869 	if (!skb)
2870 		goto errout;
2871 
2872 	err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2873 				event, info->portid, seq, 0, 0, 0);
2874 	if (err < 0) {
2875 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2876 		WARN_ON(err == -EMSGSIZE);
2877 		kfree_skb(skb);
2878 		goto errout;
2879 	}
2880 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2881 		    info->nlh, gfp_any());
2882 	return;
2883 errout:
2884 	if (err < 0)
2885 		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2886 }
2887 
2888 static int ip6_route_dev_notify(struct notifier_block *this,
2889 				unsigned long event, void *ptr)
2890 {
2891 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2892 	struct net *net = dev_net(dev);
2893 
2894 	if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2895 		net->ipv6.ip6_null_entry->dst.dev = dev;
2896 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2897 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2898 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2899 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2900 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2901 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2902 #endif
2903 	}
2904 
2905 	return NOTIFY_OK;
2906 }
2907 
2908 /*
2909  *	/proc
2910  */
2911 
2912 #ifdef CONFIG_PROC_FS
2913 
2914 static const struct file_operations ipv6_route_proc_fops = {
2915 	.owner		= THIS_MODULE,
2916 	.open		= ipv6_route_open,
2917 	.read		= seq_read,
2918 	.llseek		= seq_lseek,
2919 	.release	= seq_release_net,
2920 };
2921 
2922 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2923 {
2924 	struct net *net = (struct net *)seq->private;
2925 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2926 		   net->ipv6.rt6_stats->fib_nodes,
2927 		   net->ipv6.rt6_stats->fib_route_nodes,
2928 		   net->ipv6.rt6_stats->fib_rt_alloc,
2929 		   net->ipv6.rt6_stats->fib_rt_entries,
2930 		   net->ipv6.rt6_stats->fib_rt_cache,
2931 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2932 		   net->ipv6.rt6_stats->fib_discarded_routes);
2933 
2934 	return 0;
2935 }
2936 
2937 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2938 {
2939 	return single_open_net(inode, file, rt6_stats_seq_show);
2940 }
2941 
2942 static const struct file_operations rt6_stats_seq_fops = {
2943 	.owner	 = THIS_MODULE,
2944 	.open	 = rt6_stats_seq_open,
2945 	.read	 = seq_read,
2946 	.llseek	 = seq_lseek,
2947 	.release = single_release_net,
2948 };
2949 #endif	/* CONFIG_PROC_FS */
2950 
2951 #ifdef CONFIG_SYSCTL
2952 
2953 static
2954 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
2955 			      void __user *buffer, size_t *lenp, loff_t *ppos)
2956 {
2957 	struct net *net;
2958 	int delay;
2959 	if (!write)
2960 		return -EINVAL;
2961 
2962 	net = (struct net *)ctl->extra1;
2963 	delay = net->ipv6.sysctl.flush_delay;
2964 	proc_dointvec(ctl, write, buffer, lenp, ppos);
2965 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
2966 	return 0;
2967 }
2968 
2969 struct ctl_table ipv6_route_table_template[] = {
2970 	{
2971 		.procname	=	"flush",
2972 		.data		=	&init_net.ipv6.sysctl.flush_delay,
2973 		.maxlen		=	sizeof(int),
2974 		.mode		=	0200,
2975 		.proc_handler	=	ipv6_sysctl_rtcache_flush
2976 	},
2977 	{
2978 		.procname	=	"gc_thresh",
2979 		.data		=	&ip6_dst_ops_template.gc_thresh,
2980 		.maxlen		=	sizeof(int),
2981 		.mode		=	0644,
2982 		.proc_handler	=	proc_dointvec,
2983 	},
2984 	{
2985 		.procname	=	"max_size",
2986 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
2987 		.maxlen		=	sizeof(int),
2988 		.mode		=	0644,
2989 		.proc_handler	=	proc_dointvec,
2990 	},
2991 	{
2992 		.procname	=	"gc_min_interval",
2993 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2994 		.maxlen		=	sizeof(int),
2995 		.mode		=	0644,
2996 		.proc_handler	=	proc_dointvec_jiffies,
2997 	},
2998 	{
2999 		.procname	=	"gc_timeout",
3000 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3001 		.maxlen		=	sizeof(int),
3002 		.mode		=	0644,
3003 		.proc_handler	=	proc_dointvec_jiffies,
3004 	},
3005 	{
3006 		.procname	=	"gc_interval",
3007 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
3008 		.maxlen		=	sizeof(int),
3009 		.mode		=	0644,
3010 		.proc_handler	=	proc_dointvec_jiffies,
3011 	},
3012 	{
3013 		.procname	=	"gc_elasticity",
3014 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3015 		.maxlen		=	sizeof(int),
3016 		.mode		=	0644,
3017 		.proc_handler	=	proc_dointvec,
3018 	},
3019 	{
3020 		.procname	=	"mtu_expires",
3021 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3022 		.maxlen		=	sizeof(int),
3023 		.mode		=	0644,
3024 		.proc_handler	=	proc_dointvec_jiffies,
3025 	},
3026 	{
3027 		.procname	=	"min_adv_mss",
3028 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
3029 		.maxlen		=	sizeof(int),
3030 		.mode		=	0644,
3031 		.proc_handler	=	proc_dointvec,
3032 	},
3033 	{
3034 		.procname	=	"gc_min_interval_ms",
3035 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3036 		.maxlen		=	sizeof(int),
3037 		.mode		=	0644,
3038 		.proc_handler	=	proc_dointvec_ms_jiffies,
3039 	},
3040 	{ }
3041 };
3042 
3043 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3044 {
3045 	struct ctl_table *table;
3046 
3047 	table = kmemdup(ipv6_route_table_template,
3048 			sizeof(ipv6_route_table_template),
3049 			GFP_KERNEL);
3050 
3051 	if (table) {
3052 		table[0].data = &net->ipv6.sysctl.flush_delay;
3053 		table[0].extra1 = net;
3054 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3055 		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3056 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3057 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3058 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3059 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3060 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3061 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3062 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3063 
3064 		/* Don't export sysctls to unprivileged users */
3065 		if (net->user_ns != &init_user_ns)
3066 			table[0].procname = NULL;
3067 	}
3068 
3069 	return table;
3070 }
3071 #endif
3072 
3073 static int __net_init ip6_route_net_init(struct net *net)
3074 {
3075 	int ret = -ENOMEM;
3076 
3077 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3078 	       sizeof(net->ipv6.ip6_dst_ops));
3079 
3080 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3081 		goto out_ip6_dst_ops;
3082 
3083 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3084 					   sizeof(*net->ipv6.ip6_null_entry),
3085 					   GFP_KERNEL);
3086 	if (!net->ipv6.ip6_null_entry)
3087 		goto out_ip6_dst_entries;
3088 	net->ipv6.ip6_null_entry->dst.path =
3089 		(struct dst_entry *)net->ipv6.ip6_null_entry;
3090 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3091 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3092 			 ip6_template_metrics, true);
3093 
3094 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3095 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3096 					       sizeof(*net->ipv6.ip6_prohibit_entry),
3097 					       GFP_KERNEL);
3098 	if (!net->ipv6.ip6_prohibit_entry)
3099 		goto out_ip6_null_entry;
3100 	net->ipv6.ip6_prohibit_entry->dst.path =
3101 		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3102 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3103 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3104 			 ip6_template_metrics, true);
3105 
3106 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3107 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
3108 					       GFP_KERNEL);
3109 	if (!net->ipv6.ip6_blk_hole_entry)
3110 		goto out_ip6_prohibit_entry;
3111 	net->ipv6.ip6_blk_hole_entry->dst.path =
3112 		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3113 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3114 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3115 			 ip6_template_metrics, true);
3116 #endif
3117 
3118 	net->ipv6.sysctl.flush_delay = 0;
3119 	net->ipv6.sysctl.ip6_rt_max_size = 4096;
3120 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3121 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3122 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3123 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3124 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3125 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3126 
3127 	net->ipv6.ip6_rt_gc_expire = 30*HZ;
3128 
3129 	ret = 0;
3130 out:
3131 	return ret;
3132 
3133 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3134 out_ip6_prohibit_entry:
3135 	kfree(net->ipv6.ip6_prohibit_entry);
3136 out_ip6_null_entry:
3137 	kfree(net->ipv6.ip6_null_entry);
3138 #endif
3139 out_ip6_dst_entries:
3140 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3141 out_ip6_dst_ops:
3142 	goto out;
3143 }
3144 
3145 static void __net_exit ip6_route_net_exit(struct net *net)
3146 {
3147 	kfree(net->ipv6.ip6_null_entry);
3148 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3149 	kfree(net->ipv6.ip6_prohibit_entry);
3150 	kfree(net->ipv6.ip6_blk_hole_entry);
3151 #endif
3152 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3153 }
3154 
3155 static int __net_init ip6_route_net_init_late(struct net *net)
3156 {
3157 #ifdef CONFIG_PROC_FS
3158 	proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3159 	proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3160 #endif
3161 	return 0;
3162 }
3163 
3164 static void __net_exit ip6_route_net_exit_late(struct net *net)
3165 {
3166 #ifdef CONFIG_PROC_FS
3167 	remove_proc_entry("ipv6_route", net->proc_net);
3168 	remove_proc_entry("rt6_stats", net->proc_net);
3169 #endif
3170 }
3171 
3172 static struct pernet_operations ip6_route_net_ops = {
3173 	.init = ip6_route_net_init,
3174 	.exit = ip6_route_net_exit,
3175 };
3176 
3177 static int __net_init ipv6_inetpeer_init(struct net *net)
3178 {
3179 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3180 
3181 	if (!bp)
3182 		return -ENOMEM;
3183 	inet_peer_base_init(bp);
3184 	net->ipv6.peers = bp;
3185 	return 0;
3186 }
3187 
3188 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3189 {
3190 	struct inet_peer_base *bp = net->ipv6.peers;
3191 
3192 	net->ipv6.peers = NULL;
3193 	inetpeer_invalidate_tree(bp);
3194 	kfree(bp);
3195 }
3196 
3197 static struct pernet_operations ipv6_inetpeer_ops = {
3198 	.init	=	ipv6_inetpeer_init,
3199 	.exit	=	ipv6_inetpeer_exit,
3200 };
3201 
3202 static struct pernet_operations ip6_route_net_late_ops = {
3203 	.init = ip6_route_net_init_late,
3204 	.exit = ip6_route_net_exit_late,
3205 };
3206 
3207 static struct notifier_block ip6_route_dev_notifier = {
3208 	.notifier_call = ip6_route_dev_notify,
3209 	.priority = 0,
3210 };
3211 
3212 int __init ip6_route_init(void)
3213 {
3214 	int ret;
3215 
3216 	ret = -ENOMEM;
3217 	ip6_dst_ops_template.kmem_cachep =
3218 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3219 				  SLAB_HWCACHE_ALIGN, NULL);
3220 	if (!ip6_dst_ops_template.kmem_cachep)
3221 		goto out;
3222 
3223 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
3224 	if (ret)
3225 		goto out_kmem_cache;
3226 
3227 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3228 	if (ret)
3229 		goto out_dst_entries;
3230 
3231 	ret = register_pernet_subsys(&ip6_route_net_ops);
3232 	if (ret)
3233 		goto out_register_inetpeer;
3234 
3235 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3236 
3237 	/* Registering of the loopback is done before this portion of code,
3238 	 * the loopback reference in rt6_info will not be taken, do it
3239 	 * manually for init_net */
3240 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3241 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3242   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3243 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3244 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3245 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3246 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3247   #endif
3248 	ret = fib6_init();
3249 	if (ret)
3250 		goto out_register_subsys;
3251 
3252 	ret = xfrm6_init();
3253 	if (ret)
3254 		goto out_fib6_init;
3255 
3256 	ret = fib6_rules_init();
3257 	if (ret)
3258 		goto xfrm6_init;
3259 
3260 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
3261 	if (ret)
3262 		goto fib6_rules_init;
3263 
3264 	ret = -ENOBUFS;
3265 	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3266 	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3267 	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3268 		goto out_register_late_subsys;
3269 
3270 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3271 	if (ret)
3272 		goto out_register_late_subsys;
3273 
3274 out:
3275 	return ret;
3276 
3277 out_register_late_subsys:
3278 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3279 fib6_rules_init:
3280 	fib6_rules_cleanup();
3281 xfrm6_init:
3282 	xfrm6_fini();
3283 out_fib6_init:
3284 	fib6_gc_cleanup();
3285 out_register_subsys:
3286 	unregister_pernet_subsys(&ip6_route_net_ops);
3287 out_register_inetpeer:
3288 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3289 out_dst_entries:
3290 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3291 out_kmem_cache:
3292 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3293 	goto out;
3294 }
3295 
3296 void ip6_route_cleanup(void)
3297 {
3298 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
3299 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3300 	fib6_rules_cleanup();
3301 	xfrm6_fini();
3302 	fib6_gc_cleanup();
3303 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3304 	unregister_pernet_subsys(&ip6_route_net_ops);
3305 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3306 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3307 }
3308