xref: /openbmc/linux/net/ipv6/route.c (revision b34081f1)
1 /*
2  *	Linux INET6 implementation
3  *	FIB front-end.
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 /*	Changes:
15  *
16  *	YOSHIFUJI Hideaki @USAGI
17  *		reworked default router selection.
18  *		- respect outgoing interface
19  *		- select from (probably) reachable routers (i.e.
20  *		routers in REACHABLE, STALE, DELAY or PROBE states).
21  *		- always select the same router if it is (probably)
22  *		reachable.  otherwise, round-robin the list.
23  *	Ville Nuorvala
24  *		Fixed routing subtrees.
25  */
26 
27 #define pr_fmt(fmt) "IPv6: " fmt
28 
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
61 
62 #include <asm/uaccess.h>
63 
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
67 
68 enum rt6_nud_state {
69 	RT6_NUD_FAIL_HARD = -2,
70 	RT6_NUD_FAIL_SOFT = -1,
71 	RT6_NUD_SUCCEED = 1
72 };
73 
74 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
75 				    const struct in6_addr *dest);
76 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
77 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
78 static unsigned int	 ip6_mtu(const struct dst_entry *dst);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void		ip6_dst_destroy(struct dst_entry *);
81 static void		ip6_dst_ifdown(struct dst_entry *,
82 				       struct net_device *dev, int how);
83 static int		 ip6_dst_gc(struct dst_ops *ops);
84 
85 static int		ip6_pkt_discard(struct sk_buff *skb);
86 static int		ip6_pkt_discard_out(struct sk_buff *skb);
87 static void		ip6_link_failure(struct sk_buff *skb);
88 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
89 					   struct sk_buff *skb, u32 mtu);
90 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
91 					struct sk_buff *skb);
92 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
93 
94 #ifdef CONFIG_IPV6_ROUTE_INFO
95 static struct rt6_info *rt6_add_route_info(struct net *net,
96 					   const struct in6_addr *prefix, int prefixlen,
97 					   const struct in6_addr *gwaddr, int ifindex,
98 					   unsigned int pref);
99 static struct rt6_info *rt6_get_route_info(struct net *net,
100 					   const struct in6_addr *prefix, int prefixlen,
101 					   const struct in6_addr *gwaddr, int ifindex);
102 #endif
103 
104 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
105 {
106 	struct rt6_info *rt = (struct rt6_info *) dst;
107 	struct inet_peer *peer;
108 	u32 *p = NULL;
109 
110 	if (!(rt->dst.flags & DST_HOST))
111 		return NULL;
112 
113 	peer = rt6_get_peer_create(rt);
114 	if (peer) {
115 		u32 *old_p = __DST_METRICS_PTR(old);
116 		unsigned long prev, new;
117 
118 		p = peer->metrics;
119 		if (inet_metrics_new(peer))
120 			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
121 
122 		new = (unsigned long) p;
123 		prev = cmpxchg(&dst->_metrics, old, new);
124 
125 		if (prev != old) {
126 			p = __DST_METRICS_PTR(prev);
127 			if (prev & DST_METRICS_READ_ONLY)
128 				p = NULL;
129 		}
130 	}
131 	return p;
132 }
133 
134 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
135 					     struct sk_buff *skb,
136 					     const void *daddr)
137 {
138 	struct in6_addr *p = &rt->rt6i_gateway;
139 
140 	if (!ipv6_addr_any(p))
141 		return (const void *) p;
142 	else if (skb)
143 		return &ipv6_hdr(skb)->daddr;
144 	return daddr;
145 }
146 
147 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
148 					  struct sk_buff *skb,
149 					  const void *daddr)
150 {
151 	struct rt6_info *rt = (struct rt6_info *) dst;
152 	struct neighbour *n;
153 
154 	daddr = choose_neigh_daddr(rt, skb, daddr);
155 	n = __ipv6_neigh_lookup(dst->dev, daddr);
156 	if (n)
157 		return n;
158 	return neigh_create(&nd_tbl, daddr, dst->dev);
159 }
160 
161 static struct dst_ops ip6_dst_ops_template = {
162 	.family			=	AF_INET6,
163 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
164 	.gc			=	ip6_dst_gc,
165 	.gc_thresh		=	1024,
166 	.check			=	ip6_dst_check,
167 	.default_advmss		=	ip6_default_advmss,
168 	.mtu			=	ip6_mtu,
169 	.cow_metrics		=	ipv6_cow_metrics,
170 	.destroy		=	ip6_dst_destroy,
171 	.ifdown			=	ip6_dst_ifdown,
172 	.negative_advice	=	ip6_negative_advice,
173 	.link_failure		=	ip6_link_failure,
174 	.update_pmtu		=	ip6_rt_update_pmtu,
175 	.redirect		=	rt6_do_redirect,
176 	.local_out		=	__ip6_local_out,
177 	.neigh_lookup		=	ip6_neigh_lookup,
178 };
179 
180 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
181 {
182 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
183 
184 	return mtu ? : dst->dev->mtu;
185 }
186 
187 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
188 					 struct sk_buff *skb, u32 mtu)
189 {
190 }
191 
192 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
193 				      struct sk_buff *skb)
194 {
195 }
196 
197 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
198 					 unsigned long old)
199 {
200 	return NULL;
201 }
202 
203 static struct dst_ops ip6_dst_blackhole_ops = {
204 	.family			=	AF_INET6,
205 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
206 	.destroy		=	ip6_dst_destroy,
207 	.check			=	ip6_dst_check,
208 	.mtu			=	ip6_blackhole_mtu,
209 	.default_advmss		=	ip6_default_advmss,
210 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
211 	.redirect		=	ip6_rt_blackhole_redirect,
212 	.cow_metrics		=	ip6_rt_blackhole_cow_metrics,
213 	.neigh_lookup		=	ip6_neigh_lookup,
214 };
215 
216 static const u32 ip6_template_metrics[RTAX_MAX] = {
217 	[RTAX_HOPLIMIT - 1] = 0,
218 };
219 
220 static const struct rt6_info ip6_null_entry_template = {
221 	.dst = {
222 		.__refcnt	= ATOMIC_INIT(1),
223 		.__use		= 1,
224 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
225 		.error		= -ENETUNREACH,
226 		.input		= ip6_pkt_discard,
227 		.output		= ip6_pkt_discard_out,
228 	},
229 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
230 	.rt6i_protocol  = RTPROT_KERNEL,
231 	.rt6i_metric	= ~(u32) 0,
232 	.rt6i_ref	= ATOMIC_INIT(1),
233 };
234 
235 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
236 
237 static int ip6_pkt_prohibit(struct sk_buff *skb);
238 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
239 
240 static const struct rt6_info ip6_prohibit_entry_template = {
241 	.dst = {
242 		.__refcnt	= ATOMIC_INIT(1),
243 		.__use		= 1,
244 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
245 		.error		= -EACCES,
246 		.input		= ip6_pkt_prohibit,
247 		.output		= ip6_pkt_prohibit_out,
248 	},
249 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
250 	.rt6i_protocol  = RTPROT_KERNEL,
251 	.rt6i_metric	= ~(u32) 0,
252 	.rt6i_ref	= ATOMIC_INIT(1),
253 };
254 
255 static const struct rt6_info ip6_blk_hole_entry_template = {
256 	.dst = {
257 		.__refcnt	= ATOMIC_INIT(1),
258 		.__use		= 1,
259 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
260 		.error		= -EINVAL,
261 		.input		= dst_discard,
262 		.output		= dst_discard,
263 	},
264 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
265 	.rt6i_protocol  = RTPROT_KERNEL,
266 	.rt6i_metric	= ~(u32) 0,
267 	.rt6i_ref	= ATOMIC_INIT(1),
268 };
269 
270 #endif
271 
272 /* allocate dst with ip6_dst_ops */
273 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
274 					     struct net_device *dev,
275 					     int flags,
276 					     struct fib6_table *table)
277 {
278 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
279 					0, DST_OBSOLETE_FORCE_CHK, flags);
280 
281 	if (rt) {
282 		struct dst_entry *dst = &rt->dst;
283 
284 		memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
285 		rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
286 		rt->rt6i_genid = rt_genid_ipv6(net);
287 		INIT_LIST_HEAD(&rt->rt6i_siblings);
288 	}
289 	return rt;
290 }
291 
292 static void ip6_dst_destroy(struct dst_entry *dst)
293 {
294 	struct rt6_info *rt = (struct rt6_info *)dst;
295 	struct inet6_dev *idev = rt->rt6i_idev;
296 	struct dst_entry *from = dst->from;
297 
298 	if (!(rt->dst.flags & DST_HOST))
299 		dst_destroy_metrics_generic(dst);
300 
301 	if (idev) {
302 		rt->rt6i_idev = NULL;
303 		in6_dev_put(idev);
304 	}
305 
306 	dst->from = NULL;
307 	dst_release(from);
308 
309 	if (rt6_has_peer(rt)) {
310 		struct inet_peer *peer = rt6_peer_ptr(rt);
311 		inet_putpeer(peer);
312 	}
313 }
314 
315 void rt6_bind_peer(struct rt6_info *rt, int create)
316 {
317 	struct inet_peer_base *base;
318 	struct inet_peer *peer;
319 
320 	base = inetpeer_base_ptr(rt->_rt6i_peer);
321 	if (!base)
322 		return;
323 
324 	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
325 	if (peer) {
326 		if (!rt6_set_peer(rt, peer))
327 			inet_putpeer(peer);
328 	}
329 }
330 
331 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
332 			   int how)
333 {
334 	struct rt6_info *rt = (struct rt6_info *)dst;
335 	struct inet6_dev *idev = rt->rt6i_idev;
336 	struct net_device *loopback_dev =
337 		dev_net(dev)->loopback_dev;
338 
339 	if (dev != loopback_dev) {
340 		if (idev && idev->dev == dev) {
341 			struct inet6_dev *loopback_idev =
342 				in6_dev_get(loopback_dev);
343 			if (loopback_idev) {
344 				rt->rt6i_idev = loopback_idev;
345 				in6_dev_put(idev);
346 			}
347 		}
348 	}
349 }
350 
351 static bool rt6_check_expired(const struct rt6_info *rt)
352 {
353 	if (rt->rt6i_flags & RTF_EXPIRES) {
354 		if (time_after(jiffies, rt->dst.expires))
355 			return true;
356 	} else if (rt->dst.from) {
357 		return rt6_check_expired((struct rt6_info *) rt->dst.from);
358 	}
359 	return false;
360 }
361 
362 static bool rt6_need_strict(const struct in6_addr *daddr)
363 {
364 	return ipv6_addr_type(daddr) &
365 		(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
366 }
367 
368 /* Multipath route selection:
369  *   Hash based function using packet header and flowlabel.
370  * Adapted from fib_info_hashfn()
371  */
372 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
373 			       const struct flowi6 *fl6)
374 {
375 	unsigned int val = fl6->flowi6_proto;
376 
377 	val ^= ipv6_addr_hash(&fl6->daddr);
378 	val ^= ipv6_addr_hash(&fl6->saddr);
379 
380 	/* Work only if this not encapsulated */
381 	switch (fl6->flowi6_proto) {
382 	case IPPROTO_UDP:
383 	case IPPROTO_TCP:
384 	case IPPROTO_SCTP:
385 		val ^= (__force u16)fl6->fl6_sport;
386 		val ^= (__force u16)fl6->fl6_dport;
387 		break;
388 
389 	case IPPROTO_ICMPV6:
390 		val ^= (__force u16)fl6->fl6_icmp_type;
391 		val ^= (__force u16)fl6->fl6_icmp_code;
392 		break;
393 	}
394 	/* RFC6438 recommands to use flowlabel */
395 	val ^= (__force u32)fl6->flowlabel;
396 
397 	/* Perhaps, we need to tune, this function? */
398 	val = val ^ (val >> 7) ^ (val >> 12);
399 	return val % candidate_count;
400 }
401 
402 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
403 					     struct flowi6 *fl6, int oif,
404 					     int strict)
405 {
406 	struct rt6_info *sibling, *next_sibling;
407 	int route_choosen;
408 
409 	route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
410 	/* Don't change the route, if route_choosen == 0
411 	 * (siblings does not include ourself)
412 	 */
413 	if (route_choosen)
414 		list_for_each_entry_safe(sibling, next_sibling,
415 				&match->rt6i_siblings, rt6i_siblings) {
416 			route_choosen--;
417 			if (route_choosen == 0) {
418 				if (rt6_score_route(sibling, oif, strict) < 0)
419 					break;
420 				match = sibling;
421 				break;
422 			}
423 		}
424 	return match;
425 }
426 
427 /*
428  *	Route lookup. Any table->tb6_lock is implied.
429  */
430 
431 static inline struct rt6_info *rt6_device_match(struct net *net,
432 						    struct rt6_info *rt,
433 						    const struct in6_addr *saddr,
434 						    int oif,
435 						    int flags)
436 {
437 	struct rt6_info *local = NULL;
438 	struct rt6_info *sprt;
439 
440 	if (!oif && ipv6_addr_any(saddr))
441 		goto out;
442 
443 	for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
444 		struct net_device *dev = sprt->dst.dev;
445 
446 		if (oif) {
447 			if (dev->ifindex == oif)
448 				return sprt;
449 			if (dev->flags & IFF_LOOPBACK) {
450 				if (!sprt->rt6i_idev ||
451 				    sprt->rt6i_idev->dev->ifindex != oif) {
452 					if (flags & RT6_LOOKUP_F_IFACE && oif)
453 						continue;
454 					if (local && (!oif ||
455 						      local->rt6i_idev->dev->ifindex == oif))
456 						continue;
457 				}
458 				local = sprt;
459 			}
460 		} else {
461 			if (ipv6_chk_addr(net, saddr, dev,
462 					  flags & RT6_LOOKUP_F_IFACE))
463 				return sprt;
464 		}
465 	}
466 
467 	if (oif) {
468 		if (local)
469 			return local;
470 
471 		if (flags & RT6_LOOKUP_F_IFACE)
472 			return net->ipv6.ip6_null_entry;
473 	}
474 out:
475 	return rt;
476 }
477 
478 #ifdef CONFIG_IPV6_ROUTER_PREF
479 static void rt6_probe(struct rt6_info *rt)
480 {
481 	struct neighbour *neigh;
482 	/*
483 	 * Okay, this does not seem to be appropriate
484 	 * for now, however, we need to check if it
485 	 * is really so; aka Router Reachability Probing.
486 	 *
487 	 * Router Reachability Probe MUST be rate-limited
488 	 * to no more than one per minute.
489 	 */
490 	if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
491 		return;
492 	rcu_read_lock_bh();
493 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
494 	if (neigh) {
495 		write_lock(&neigh->lock);
496 		if (neigh->nud_state & NUD_VALID)
497 			goto out;
498 	}
499 
500 	if (!neigh ||
501 	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
502 		struct in6_addr mcaddr;
503 		struct in6_addr *target;
504 
505 		if (neigh) {
506 			neigh->updated = jiffies;
507 			write_unlock(&neigh->lock);
508 		}
509 
510 		target = (struct in6_addr *)&rt->rt6i_gateway;
511 		addrconf_addr_solict_mult(target, &mcaddr);
512 		ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
513 	} else {
514 out:
515 		write_unlock(&neigh->lock);
516 	}
517 	rcu_read_unlock_bh();
518 }
519 #else
520 static inline void rt6_probe(struct rt6_info *rt)
521 {
522 }
523 #endif
524 
525 /*
526  * Default Router Selection (RFC 2461 6.3.6)
527  */
528 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
529 {
530 	struct net_device *dev = rt->dst.dev;
531 	if (!oif || dev->ifindex == oif)
532 		return 2;
533 	if ((dev->flags & IFF_LOOPBACK) &&
534 	    rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
535 		return 1;
536 	return 0;
537 }
538 
539 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
540 {
541 	struct neighbour *neigh;
542 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
543 
544 	if (rt->rt6i_flags & RTF_NONEXTHOP ||
545 	    !(rt->rt6i_flags & RTF_GATEWAY))
546 		return RT6_NUD_SUCCEED;
547 
548 	rcu_read_lock_bh();
549 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
550 	if (neigh) {
551 		read_lock(&neigh->lock);
552 		if (neigh->nud_state & NUD_VALID)
553 			ret = RT6_NUD_SUCCEED;
554 #ifdef CONFIG_IPV6_ROUTER_PREF
555 		else if (!(neigh->nud_state & NUD_FAILED))
556 			ret = RT6_NUD_SUCCEED;
557 #endif
558 		read_unlock(&neigh->lock);
559 	} else {
560 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
561 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
562 	}
563 	rcu_read_unlock_bh();
564 
565 	return ret;
566 }
567 
568 static int rt6_score_route(struct rt6_info *rt, int oif,
569 			   int strict)
570 {
571 	int m;
572 
573 	m = rt6_check_dev(rt, oif);
574 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
575 		return RT6_NUD_FAIL_HARD;
576 #ifdef CONFIG_IPV6_ROUTER_PREF
577 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
578 #endif
579 	if (strict & RT6_LOOKUP_F_REACHABLE) {
580 		int n = rt6_check_neigh(rt);
581 		if (n < 0)
582 			return n;
583 	}
584 	return m;
585 }
586 
587 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
588 				   int *mpri, struct rt6_info *match,
589 				   bool *do_rr)
590 {
591 	int m;
592 	bool match_do_rr = false;
593 
594 	if (rt6_check_expired(rt))
595 		goto out;
596 
597 	m = rt6_score_route(rt, oif, strict);
598 	if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
599 		match_do_rr = true;
600 		m = 0; /* lowest valid score */
601 	} else if (m < 0) {
602 		goto out;
603 	}
604 
605 	if (strict & RT6_LOOKUP_F_REACHABLE)
606 		rt6_probe(rt);
607 
608 	if (m > *mpri) {
609 		*do_rr = match_do_rr;
610 		*mpri = m;
611 		match = rt;
612 	}
613 out:
614 	return match;
615 }
616 
617 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
618 				     struct rt6_info *rr_head,
619 				     u32 metric, int oif, int strict,
620 				     bool *do_rr)
621 {
622 	struct rt6_info *rt, *match;
623 	int mpri = -1;
624 
625 	match = NULL;
626 	for (rt = rr_head; rt && rt->rt6i_metric == metric;
627 	     rt = rt->dst.rt6_next)
628 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
629 	for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
630 	     rt = rt->dst.rt6_next)
631 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
632 
633 	return match;
634 }
635 
636 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
637 {
638 	struct rt6_info *match, *rt0;
639 	struct net *net;
640 	bool do_rr = false;
641 
642 	rt0 = fn->rr_ptr;
643 	if (!rt0)
644 		fn->rr_ptr = rt0 = fn->leaf;
645 
646 	match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
647 			     &do_rr);
648 
649 	if (do_rr) {
650 		struct rt6_info *next = rt0->dst.rt6_next;
651 
652 		/* no entries matched; do round-robin */
653 		if (!next || next->rt6i_metric != rt0->rt6i_metric)
654 			next = fn->leaf;
655 
656 		if (next != rt0)
657 			fn->rr_ptr = next;
658 	}
659 
660 	net = dev_net(rt0->dst.dev);
661 	return match ? match : net->ipv6.ip6_null_entry;
662 }
663 
664 #ifdef CONFIG_IPV6_ROUTE_INFO
665 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
666 		  const struct in6_addr *gwaddr)
667 {
668 	struct net *net = dev_net(dev);
669 	struct route_info *rinfo = (struct route_info *) opt;
670 	struct in6_addr prefix_buf, *prefix;
671 	unsigned int pref;
672 	unsigned long lifetime;
673 	struct rt6_info *rt;
674 
675 	if (len < sizeof(struct route_info)) {
676 		return -EINVAL;
677 	}
678 
679 	/* Sanity check for prefix_len and length */
680 	if (rinfo->length > 3) {
681 		return -EINVAL;
682 	} else if (rinfo->prefix_len > 128) {
683 		return -EINVAL;
684 	} else if (rinfo->prefix_len > 64) {
685 		if (rinfo->length < 2) {
686 			return -EINVAL;
687 		}
688 	} else if (rinfo->prefix_len > 0) {
689 		if (rinfo->length < 1) {
690 			return -EINVAL;
691 		}
692 	}
693 
694 	pref = rinfo->route_pref;
695 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
696 		return -EINVAL;
697 
698 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
699 
700 	if (rinfo->length == 3)
701 		prefix = (struct in6_addr *)rinfo->prefix;
702 	else {
703 		/* this function is safe */
704 		ipv6_addr_prefix(&prefix_buf,
705 				 (struct in6_addr *)rinfo->prefix,
706 				 rinfo->prefix_len);
707 		prefix = &prefix_buf;
708 	}
709 
710 	rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
711 				dev->ifindex);
712 
713 	if (rt && !lifetime) {
714 		ip6_del_rt(rt);
715 		rt = NULL;
716 	}
717 
718 	if (!rt && lifetime)
719 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
720 					pref);
721 	else if (rt)
722 		rt->rt6i_flags = RTF_ROUTEINFO |
723 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
724 
725 	if (rt) {
726 		if (!addrconf_finite_timeout(lifetime))
727 			rt6_clean_expires(rt);
728 		else
729 			rt6_set_expires(rt, jiffies + HZ * lifetime);
730 
731 		ip6_rt_put(rt);
732 	}
733 	return 0;
734 }
735 #endif
736 
737 #define BACKTRACK(__net, saddr)			\
738 do { \
739 	if (rt == __net->ipv6.ip6_null_entry) {	\
740 		struct fib6_node *pn; \
741 		while (1) { \
742 			if (fn->fn_flags & RTN_TL_ROOT) \
743 				goto out; \
744 			pn = fn->parent; \
745 			if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
746 				fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
747 			else \
748 				fn = pn; \
749 			if (fn->fn_flags & RTN_RTINFO) \
750 				goto restart; \
751 		} \
752 	} \
753 } while (0)
754 
755 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
756 					     struct fib6_table *table,
757 					     struct flowi6 *fl6, int flags)
758 {
759 	struct fib6_node *fn;
760 	struct rt6_info *rt;
761 
762 	read_lock_bh(&table->tb6_lock);
763 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
764 restart:
765 	rt = fn->leaf;
766 	rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
767 	if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
768 		rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
769 	BACKTRACK(net, &fl6->saddr);
770 out:
771 	dst_use(&rt->dst, jiffies);
772 	read_unlock_bh(&table->tb6_lock);
773 	return rt;
774 
775 }
776 
777 struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
778 				    int flags)
779 {
780 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
781 }
782 EXPORT_SYMBOL_GPL(ip6_route_lookup);
783 
784 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
785 			    const struct in6_addr *saddr, int oif, int strict)
786 {
787 	struct flowi6 fl6 = {
788 		.flowi6_oif = oif,
789 		.daddr = *daddr,
790 	};
791 	struct dst_entry *dst;
792 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
793 
794 	if (saddr) {
795 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
796 		flags |= RT6_LOOKUP_F_HAS_SADDR;
797 	}
798 
799 	dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
800 	if (dst->error == 0)
801 		return (struct rt6_info *) dst;
802 
803 	dst_release(dst);
804 
805 	return NULL;
806 }
807 
808 EXPORT_SYMBOL(rt6_lookup);
809 
810 /* ip6_ins_rt is called with FREE table->tb6_lock.
811    It takes new route entry, the addition fails by any reason the
812    route is freed. In any case, if caller does not hold it, it may
813    be destroyed.
814  */
815 
816 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
817 {
818 	int err;
819 	struct fib6_table *table;
820 
821 	table = rt->rt6i_table;
822 	write_lock_bh(&table->tb6_lock);
823 	err = fib6_add(&table->tb6_root, rt, info);
824 	write_unlock_bh(&table->tb6_lock);
825 
826 	return err;
827 }
828 
829 int ip6_ins_rt(struct rt6_info *rt)
830 {
831 	struct nl_info info = {
832 		.nl_net = dev_net(rt->dst.dev),
833 	};
834 	return __ip6_ins_rt(rt, &info);
835 }
836 
837 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
838 				      const struct in6_addr *daddr,
839 				      const struct in6_addr *saddr)
840 {
841 	struct rt6_info *rt;
842 
843 	/*
844 	 *	Clone the route.
845 	 */
846 
847 	rt = ip6_rt_copy(ort, daddr);
848 
849 	if (rt) {
850 		if (!(rt->rt6i_flags & RTF_GATEWAY)) {
851 			if (ort->rt6i_dst.plen != 128 &&
852 			    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
853 				rt->rt6i_flags |= RTF_ANYCAST;
854 			rt->rt6i_gateway = *daddr;
855 		}
856 
857 		rt->rt6i_flags |= RTF_CACHE;
858 
859 #ifdef CONFIG_IPV6_SUBTREES
860 		if (rt->rt6i_src.plen && saddr) {
861 			rt->rt6i_src.addr = *saddr;
862 			rt->rt6i_src.plen = 128;
863 		}
864 #endif
865 	}
866 
867 	return rt;
868 }
869 
870 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
871 					const struct in6_addr *daddr)
872 {
873 	struct rt6_info *rt = ip6_rt_copy(ort, daddr);
874 
875 	if (rt)
876 		rt->rt6i_flags |= RTF_CACHE;
877 	return rt;
878 }
879 
880 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
881 				      struct flowi6 *fl6, int flags)
882 {
883 	struct fib6_node *fn;
884 	struct rt6_info *rt, *nrt;
885 	int strict = 0;
886 	int attempts = 3;
887 	int err;
888 	int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
889 
890 	strict |= flags & RT6_LOOKUP_F_IFACE;
891 
892 relookup:
893 	read_lock_bh(&table->tb6_lock);
894 
895 restart_2:
896 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
897 
898 restart:
899 	rt = rt6_select(fn, oif, strict | reachable);
900 	if (rt->rt6i_nsiblings)
901 		rt = rt6_multipath_select(rt, fl6, oif, strict | reachable);
902 	BACKTRACK(net, &fl6->saddr);
903 	if (rt == net->ipv6.ip6_null_entry ||
904 	    rt->rt6i_flags & RTF_CACHE)
905 		goto out;
906 
907 	dst_hold(&rt->dst);
908 	read_unlock_bh(&table->tb6_lock);
909 
910 	if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
911 		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
912 	else if (!(rt->dst.flags & DST_HOST))
913 		nrt = rt6_alloc_clone(rt, &fl6->daddr);
914 	else
915 		goto out2;
916 
917 	ip6_rt_put(rt);
918 	rt = nrt ? : net->ipv6.ip6_null_entry;
919 
920 	dst_hold(&rt->dst);
921 	if (nrt) {
922 		err = ip6_ins_rt(nrt);
923 		if (!err)
924 			goto out2;
925 	}
926 
927 	if (--attempts <= 0)
928 		goto out2;
929 
930 	/*
931 	 * Race condition! In the gap, when table->tb6_lock was
932 	 * released someone could insert this route.  Relookup.
933 	 */
934 	ip6_rt_put(rt);
935 	goto relookup;
936 
937 out:
938 	if (reachable) {
939 		reachable = 0;
940 		goto restart_2;
941 	}
942 	dst_hold(&rt->dst);
943 	read_unlock_bh(&table->tb6_lock);
944 out2:
945 	rt->dst.lastuse = jiffies;
946 	rt->dst.__use++;
947 
948 	return rt;
949 }
950 
951 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
952 					    struct flowi6 *fl6, int flags)
953 {
954 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
955 }
956 
957 static struct dst_entry *ip6_route_input_lookup(struct net *net,
958 						struct net_device *dev,
959 						struct flowi6 *fl6, int flags)
960 {
961 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
962 		flags |= RT6_LOOKUP_F_IFACE;
963 
964 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
965 }
966 
967 void ip6_route_input(struct sk_buff *skb)
968 {
969 	const struct ipv6hdr *iph = ipv6_hdr(skb);
970 	struct net *net = dev_net(skb->dev);
971 	int flags = RT6_LOOKUP_F_HAS_SADDR;
972 	struct flowi6 fl6 = {
973 		.flowi6_iif = skb->dev->ifindex,
974 		.daddr = iph->daddr,
975 		.saddr = iph->saddr,
976 		.flowlabel = ip6_flowinfo(iph),
977 		.flowi6_mark = skb->mark,
978 		.flowi6_proto = iph->nexthdr,
979 	};
980 
981 	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
982 }
983 
984 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
985 					     struct flowi6 *fl6, int flags)
986 {
987 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
988 }
989 
990 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
991 				    struct flowi6 *fl6)
992 {
993 	int flags = 0;
994 
995 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
996 
997 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
998 		flags |= RT6_LOOKUP_F_IFACE;
999 
1000 	if (!ipv6_addr_any(&fl6->saddr))
1001 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1002 	else if (sk)
1003 		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1004 
1005 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1006 }
1007 
1008 EXPORT_SYMBOL(ip6_route_output);
1009 
1010 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1011 {
1012 	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1013 	struct dst_entry *new = NULL;
1014 
1015 	rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1016 	if (rt) {
1017 		new = &rt->dst;
1018 
1019 		memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1020 		rt6_init_peer(rt, net->ipv6.peers);
1021 
1022 		new->__use = 1;
1023 		new->input = dst_discard;
1024 		new->output = dst_discard;
1025 
1026 		if (dst_metrics_read_only(&ort->dst))
1027 			new->_metrics = ort->dst._metrics;
1028 		else
1029 			dst_copy_metrics(new, &ort->dst);
1030 		rt->rt6i_idev = ort->rt6i_idev;
1031 		if (rt->rt6i_idev)
1032 			in6_dev_hold(rt->rt6i_idev);
1033 
1034 		rt->rt6i_gateway = ort->rt6i_gateway;
1035 		rt->rt6i_flags = ort->rt6i_flags;
1036 		rt->rt6i_metric = 0;
1037 
1038 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1039 #ifdef CONFIG_IPV6_SUBTREES
1040 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1041 #endif
1042 
1043 		dst_free(new);
1044 	}
1045 
1046 	dst_release(dst_orig);
1047 	return new ? new : ERR_PTR(-ENOMEM);
1048 }
1049 
1050 /*
1051  *	Destination cache support functions
1052  */
1053 
1054 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1055 {
1056 	struct rt6_info *rt;
1057 
1058 	rt = (struct rt6_info *) dst;
1059 
1060 	/* All IPV6 dsts are created with ->obsolete set to the value
1061 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1062 	 * into this function always.
1063 	 */
1064 	if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
1065 		return NULL;
1066 
1067 	if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
1068 		return dst;
1069 
1070 	return NULL;
1071 }
1072 
1073 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1074 {
1075 	struct rt6_info *rt = (struct rt6_info *) dst;
1076 
1077 	if (rt) {
1078 		if (rt->rt6i_flags & RTF_CACHE) {
1079 			if (rt6_check_expired(rt)) {
1080 				ip6_del_rt(rt);
1081 				dst = NULL;
1082 			}
1083 		} else {
1084 			dst_release(dst);
1085 			dst = NULL;
1086 		}
1087 	}
1088 	return dst;
1089 }
1090 
1091 static void ip6_link_failure(struct sk_buff *skb)
1092 {
1093 	struct rt6_info *rt;
1094 
1095 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1096 
1097 	rt = (struct rt6_info *) skb_dst(skb);
1098 	if (rt) {
1099 		if (rt->rt6i_flags & RTF_CACHE) {
1100 			dst_hold(&rt->dst);
1101 			if (ip6_del_rt(rt))
1102 				dst_free(&rt->dst);
1103 		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1104 			rt->rt6i_node->fn_sernum = -1;
1105 		}
1106 	}
1107 }
1108 
1109 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1110 			       struct sk_buff *skb, u32 mtu)
1111 {
1112 	struct rt6_info *rt6 = (struct rt6_info*)dst;
1113 
1114 	dst_confirm(dst);
1115 	if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1116 		struct net *net = dev_net(dst->dev);
1117 
1118 		rt6->rt6i_flags |= RTF_MODIFIED;
1119 		if (mtu < IPV6_MIN_MTU) {
1120 			u32 features = dst_metric(dst, RTAX_FEATURES);
1121 			mtu = IPV6_MIN_MTU;
1122 			features |= RTAX_FEATURE_ALLFRAG;
1123 			dst_metric_set(dst, RTAX_FEATURES, features);
1124 		}
1125 		dst_metric_set(dst, RTAX_MTU, mtu);
1126 		rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1127 	}
1128 }
1129 
1130 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1131 		     int oif, u32 mark)
1132 {
1133 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1134 	struct dst_entry *dst;
1135 	struct flowi6 fl6;
1136 
1137 	memset(&fl6, 0, sizeof(fl6));
1138 	fl6.flowi6_oif = oif;
1139 	fl6.flowi6_mark = mark;
1140 	fl6.flowi6_flags = 0;
1141 	fl6.daddr = iph->daddr;
1142 	fl6.saddr = iph->saddr;
1143 	fl6.flowlabel = ip6_flowinfo(iph);
1144 
1145 	dst = ip6_route_output(net, NULL, &fl6);
1146 	if (!dst->error)
1147 		ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1148 	dst_release(dst);
1149 }
1150 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1151 
1152 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1153 {
1154 	ip6_update_pmtu(skb, sock_net(sk), mtu,
1155 			sk->sk_bound_dev_if, sk->sk_mark);
1156 }
1157 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1158 
1159 /* Handle redirects */
1160 struct ip6rd_flowi {
1161 	struct flowi6 fl6;
1162 	struct in6_addr gateway;
1163 };
1164 
1165 static struct rt6_info *__ip6_route_redirect(struct net *net,
1166 					     struct fib6_table *table,
1167 					     struct flowi6 *fl6,
1168 					     int flags)
1169 {
1170 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1171 	struct rt6_info *rt;
1172 	struct fib6_node *fn;
1173 
1174 	/* Get the "current" route for this destination and
1175 	 * check if the redirect has come from approriate router.
1176 	 *
1177 	 * RFC 4861 specifies that redirects should only be
1178 	 * accepted if they come from the nexthop to the target.
1179 	 * Due to the way the routes are chosen, this notion
1180 	 * is a bit fuzzy and one might need to check all possible
1181 	 * routes.
1182 	 */
1183 
1184 	read_lock_bh(&table->tb6_lock);
1185 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1186 restart:
1187 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1188 		if (rt6_check_expired(rt))
1189 			continue;
1190 		if (rt->dst.error)
1191 			break;
1192 		if (!(rt->rt6i_flags & RTF_GATEWAY))
1193 			continue;
1194 		if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1195 			continue;
1196 		if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1197 			continue;
1198 		break;
1199 	}
1200 
1201 	if (!rt)
1202 		rt = net->ipv6.ip6_null_entry;
1203 	else if (rt->dst.error) {
1204 		rt = net->ipv6.ip6_null_entry;
1205 		goto out;
1206 	}
1207 	BACKTRACK(net, &fl6->saddr);
1208 out:
1209 	dst_hold(&rt->dst);
1210 
1211 	read_unlock_bh(&table->tb6_lock);
1212 
1213 	return rt;
1214 };
1215 
1216 static struct dst_entry *ip6_route_redirect(struct net *net,
1217 					const struct flowi6 *fl6,
1218 					const struct in6_addr *gateway)
1219 {
1220 	int flags = RT6_LOOKUP_F_HAS_SADDR;
1221 	struct ip6rd_flowi rdfl;
1222 
1223 	rdfl.fl6 = *fl6;
1224 	rdfl.gateway = *gateway;
1225 
1226 	return fib6_rule_lookup(net, &rdfl.fl6,
1227 				flags, __ip6_route_redirect);
1228 }
1229 
1230 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1231 {
1232 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1233 	struct dst_entry *dst;
1234 	struct flowi6 fl6;
1235 
1236 	memset(&fl6, 0, sizeof(fl6));
1237 	fl6.flowi6_oif = oif;
1238 	fl6.flowi6_mark = mark;
1239 	fl6.flowi6_flags = 0;
1240 	fl6.daddr = iph->daddr;
1241 	fl6.saddr = iph->saddr;
1242 	fl6.flowlabel = ip6_flowinfo(iph);
1243 
1244 	dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1245 	rt6_do_redirect(dst, NULL, skb);
1246 	dst_release(dst);
1247 }
1248 EXPORT_SYMBOL_GPL(ip6_redirect);
1249 
1250 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1251 			    u32 mark)
1252 {
1253 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1254 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1255 	struct dst_entry *dst;
1256 	struct flowi6 fl6;
1257 
1258 	memset(&fl6, 0, sizeof(fl6));
1259 	fl6.flowi6_oif = oif;
1260 	fl6.flowi6_mark = mark;
1261 	fl6.flowi6_flags = 0;
1262 	fl6.daddr = msg->dest;
1263 	fl6.saddr = iph->daddr;
1264 
1265 	dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1266 	rt6_do_redirect(dst, NULL, skb);
1267 	dst_release(dst);
1268 }
1269 
1270 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1271 {
1272 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1273 }
1274 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1275 
1276 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1277 {
1278 	struct net_device *dev = dst->dev;
1279 	unsigned int mtu = dst_mtu(dst);
1280 	struct net *net = dev_net(dev);
1281 
1282 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1283 
1284 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1285 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1286 
1287 	/*
1288 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1289 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1290 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
1291 	 * rely only on pmtu discovery"
1292 	 */
1293 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1294 		mtu = IPV6_MAXPLEN;
1295 	return mtu;
1296 }
1297 
1298 static unsigned int ip6_mtu(const struct dst_entry *dst)
1299 {
1300 	struct inet6_dev *idev;
1301 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1302 
1303 	if (mtu)
1304 		return mtu;
1305 
1306 	mtu = IPV6_MIN_MTU;
1307 
1308 	rcu_read_lock();
1309 	idev = __in6_dev_get(dst->dev);
1310 	if (idev)
1311 		mtu = idev->cnf.mtu6;
1312 	rcu_read_unlock();
1313 
1314 	return mtu;
1315 }
1316 
1317 static struct dst_entry *icmp6_dst_gc_list;
1318 static DEFINE_SPINLOCK(icmp6_dst_lock);
1319 
1320 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1321 				  struct flowi6 *fl6)
1322 {
1323 	struct dst_entry *dst;
1324 	struct rt6_info *rt;
1325 	struct inet6_dev *idev = in6_dev_get(dev);
1326 	struct net *net = dev_net(dev);
1327 
1328 	if (unlikely(!idev))
1329 		return ERR_PTR(-ENODEV);
1330 
1331 	rt = ip6_dst_alloc(net, dev, 0, NULL);
1332 	if (unlikely(!rt)) {
1333 		in6_dev_put(idev);
1334 		dst = ERR_PTR(-ENOMEM);
1335 		goto out;
1336 	}
1337 
1338 	rt->dst.flags |= DST_HOST;
1339 	rt->dst.output  = ip6_output;
1340 	atomic_set(&rt->dst.__refcnt, 1);
1341 	rt->rt6i_dst.addr = fl6->daddr;
1342 	rt->rt6i_dst.plen = 128;
1343 	rt->rt6i_idev     = idev;
1344 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1345 
1346 	spin_lock_bh(&icmp6_dst_lock);
1347 	rt->dst.next = icmp6_dst_gc_list;
1348 	icmp6_dst_gc_list = &rt->dst;
1349 	spin_unlock_bh(&icmp6_dst_lock);
1350 
1351 	fib6_force_start_gc(net);
1352 
1353 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1354 
1355 out:
1356 	return dst;
1357 }
1358 
1359 int icmp6_dst_gc(void)
1360 {
1361 	struct dst_entry *dst, **pprev;
1362 	int more = 0;
1363 
1364 	spin_lock_bh(&icmp6_dst_lock);
1365 	pprev = &icmp6_dst_gc_list;
1366 
1367 	while ((dst = *pprev) != NULL) {
1368 		if (!atomic_read(&dst->__refcnt)) {
1369 			*pprev = dst->next;
1370 			dst_free(dst);
1371 		} else {
1372 			pprev = &dst->next;
1373 			++more;
1374 		}
1375 	}
1376 
1377 	spin_unlock_bh(&icmp6_dst_lock);
1378 
1379 	return more;
1380 }
1381 
1382 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1383 			    void *arg)
1384 {
1385 	struct dst_entry *dst, **pprev;
1386 
1387 	spin_lock_bh(&icmp6_dst_lock);
1388 	pprev = &icmp6_dst_gc_list;
1389 	while ((dst = *pprev) != NULL) {
1390 		struct rt6_info *rt = (struct rt6_info *) dst;
1391 		if (func(rt, arg)) {
1392 			*pprev = dst->next;
1393 			dst_free(dst);
1394 		} else {
1395 			pprev = &dst->next;
1396 		}
1397 	}
1398 	spin_unlock_bh(&icmp6_dst_lock);
1399 }
1400 
1401 static int ip6_dst_gc(struct dst_ops *ops)
1402 {
1403 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1404 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1405 	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1406 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1407 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1408 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1409 	int entries;
1410 
1411 	entries = dst_entries_get_fast(ops);
1412 	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1413 	    entries <= rt_max_size)
1414 		goto out;
1415 
1416 	net->ipv6.ip6_rt_gc_expire++;
1417 	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
1418 	entries = dst_entries_get_slow(ops);
1419 	if (entries < ops->gc_thresh)
1420 		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1421 out:
1422 	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1423 	return entries > rt_max_size;
1424 }
1425 
1426 /*
1427  *
1428  */
1429 
1430 int ip6_route_add(struct fib6_config *cfg)
1431 {
1432 	int err;
1433 	struct net *net = cfg->fc_nlinfo.nl_net;
1434 	struct rt6_info *rt = NULL;
1435 	struct net_device *dev = NULL;
1436 	struct inet6_dev *idev = NULL;
1437 	struct fib6_table *table;
1438 	int addr_type;
1439 
1440 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1441 		return -EINVAL;
1442 #ifndef CONFIG_IPV6_SUBTREES
1443 	if (cfg->fc_src_len)
1444 		return -EINVAL;
1445 #endif
1446 	if (cfg->fc_ifindex) {
1447 		err = -ENODEV;
1448 		dev = dev_get_by_index(net, cfg->fc_ifindex);
1449 		if (!dev)
1450 			goto out;
1451 		idev = in6_dev_get(dev);
1452 		if (!idev)
1453 			goto out;
1454 	}
1455 
1456 	if (cfg->fc_metric == 0)
1457 		cfg->fc_metric = IP6_RT_PRIO_USER;
1458 
1459 	err = -ENOBUFS;
1460 	if (cfg->fc_nlinfo.nlh &&
1461 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1462 		table = fib6_get_table(net, cfg->fc_table);
1463 		if (!table) {
1464 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1465 			table = fib6_new_table(net, cfg->fc_table);
1466 		}
1467 	} else {
1468 		table = fib6_new_table(net, cfg->fc_table);
1469 	}
1470 
1471 	if (!table)
1472 		goto out;
1473 
1474 	rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
1475 
1476 	if (!rt) {
1477 		err = -ENOMEM;
1478 		goto out;
1479 	}
1480 
1481 	if (cfg->fc_flags & RTF_EXPIRES)
1482 		rt6_set_expires(rt, jiffies +
1483 				clock_t_to_jiffies(cfg->fc_expires));
1484 	else
1485 		rt6_clean_expires(rt);
1486 
1487 	if (cfg->fc_protocol == RTPROT_UNSPEC)
1488 		cfg->fc_protocol = RTPROT_BOOT;
1489 	rt->rt6i_protocol = cfg->fc_protocol;
1490 
1491 	addr_type = ipv6_addr_type(&cfg->fc_dst);
1492 
1493 	if (addr_type & IPV6_ADDR_MULTICAST)
1494 		rt->dst.input = ip6_mc_input;
1495 	else if (cfg->fc_flags & RTF_LOCAL)
1496 		rt->dst.input = ip6_input;
1497 	else
1498 		rt->dst.input = ip6_forward;
1499 
1500 	rt->dst.output = ip6_output;
1501 
1502 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1503 	rt->rt6i_dst.plen = cfg->fc_dst_len;
1504 	if (rt->rt6i_dst.plen == 128)
1505 	       rt->dst.flags |= DST_HOST;
1506 
1507 	if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1508 		u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1509 		if (!metrics) {
1510 			err = -ENOMEM;
1511 			goto out;
1512 		}
1513 		dst_init_metrics(&rt->dst, metrics, 0);
1514 	}
1515 #ifdef CONFIG_IPV6_SUBTREES
1516 	ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1517 	rt->rt6i_src.plen = cfg->fc_src_len;
1518 #endif
1519 
1520 	rt->rt6i_metric = cfg->fc_metric;
1521 
1522 	/* We cannot add true routes via loopback here,
1523 	   they would result in kernel looping; promote them to reject routes
1524 	 */
1525 	if ((cfg->fc_flags & RTF_REJECT) ||
1526 	    (dev && (dev->flags & IFF_LOOPBACK) &&
1527 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
1528 	     !(cfg->fc_flags & RTF_LOCAL))) {
1529 		/* hold loopback dev/idev if we haven't done so. */
1530 		if (dev != net->loopback_dev) {
1531 			if (dev) {
1532 				dev_put(dev);
1533 				in6_dev_put(idev);
1534 			}
1535 			dev = net->loopback_dev;
1536 			dev_hold(dev);
1537 			idev = in6_dev_get(dev);
1538 			if (!idev) {
1539 				err = -ENODEV;
1540 				goto out;
1541 			}
1542 		}
1543 		rt->dst.output = ip6_pkt_discard_out;
1544 		rt->dst.input = ip6_pkt_discard;
1545 		rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1546 		switch (cfg->fc_type) {
1547 		case RTN_BLACKHOLE:
1548 			rt->dst.error = -EINVAL;
1549 			break;
1550 		case RTN_PROHIBIT:
1551 			rt->dst.error = -EACCES;
1552 			break;
1553 		case RTN_THROW:
1554 			rt->dst.error = -EAGAIN;
1555 			break;
1556 		default:
1557 			rt->dst.error = -ENETUNREACH;
1558 			break;
1559 		}
1560 		goto install_route;
1561 	}
1562 
1563 	if (cfg->fc_flags & RTF_GATEWAY) {
1564 		const struct in6_addr *gw_addr;
1565 		int gwa_type;
1566 
1567 		gw_addr = &cfg->fc_gateway;
1568 		rt->rt6i_gateway = *gw_addr;
1569 		gwa_type = ipv6_addr_type(gw_addr);
1570 
1571 		if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1572 			struct rt6_info *grt;
1573 
1574 			/* IPv6 strictly inhibits using not link-local
1575 			   addresses as nexthop address.
1576 			   Otherwise, router will not able to send redirects.
1577 			   It is very good, but in some (rare!) circumstances
1578 			   (SIT, PtP, NBMA NOARP links) it is handy to allow
1579 			   some exceptions. --ANK
1580 			 */
1581 			err = -EINVAL;
1582 			if (!(gwa_type & IPV6_ADDR_UNICAST))
1583 				goto out;
1584 
1585 			grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1586 
1587 			err = -EHOSTUNREACH;
1588 			if (!grt)
1589 				goto out;
1590 			if (dev) {
1591 				if (dev != grt->dst.dev) {
1592 					ip6_rt_put(grt);
1593 					goto out;
1594 				}
1595 			} else {
1596 				dev = grt->dst.dev;
1597 				idev = grt->rt6i_idev;
1598 				dev_hold(dev);
1599 				in6_dev_hold(grt->rt6i_idev);
1600 			}
1601 			if (!(grt->rt6i_flags & RTF_GATEWAY))
1602 				err = 0;
1603 			ip6_rt_put(grt);
1604 
1605 			if (err)
1606 				goto out;
1607 		}
1608 		err = -EINVAL;
1609 		if (!dev || (dev->flags & IFF_LOOPBACK))
1610 			goto out;
1611 	}
1612 
1613 	err = -ENODEV;
1614 	if (!dev)
1615 		goto out;
1616 
1617 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1618 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1619 			err = -EINVAL;
1620 			goto out;
1621 		}
1622 		rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1623 		rt->rt6i_prefsrc.plen = 128;
1624 	} else
1625 		rt->rt6i_prefsrc.plen = 0;
1626 
1627 	rt->rt6i_flags = cfg->fc_flags;
1628 
1629 install_route:
1630 	if (cfg->fc_mx) {
1631 		struct nlattr *nla;
1632 		int remaining;
1633 
1634 		nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1635 			int type = nla_type(nla);
1636 
1637 			if (type) {
1638 				if (type > RTAX_MAX) {
1639 					err = -EINVAL;
1640 					goto out;
1641 				}
1642 
1643 				dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1644 			}
1645 		}
1646 	}
1647 
1648 	rt->dst.dev = dev;
1649 	rt->rt6i_idev = idev;
1650 	rt->rt6i_table = table;
1651 
1652 	cfg->fc_nlinfo.nl_net = dev_net(dev);
1653 
1654 	return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1655 
1656 out:
1657 	if (dev)
1658 		dev_put(dev);
1659 	if (idev)
1660 		in6_dev_put(idev);
1661 	if (rt)
1662 		dst_free(&rt->dst);
1663 	return err;
1664 }
1665 
1666 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1667 {
1668 	int err;
1669 	struct fib6_table *table;
1670 	struct net *net = dev_net(rt->dst.dev);
1671 
1672 	if (rt == net->ipv6.ip6_null_entry) {
1673 		err = -ENOENT;
1674 		goto out;
1675 	}
1676 
1677 	table = rt->rt6i_table;
1678 	write_lock_bh(&table->tb6_lock);
1679 	err = fib6_del(rt, info);
1680 	write_unlock_bh(&table->tb6_lock);
1681 
1682 out:
1683 	ip6_rt_put(rt);
1684 	return err;
1685 }
1686 
1687 int ip6_del_rt(struct rt6_info *rt)
1688 {
1689 	struct nl_info info = {
1690 		.nl_net = dev_net(rt->dst.dev),
1691 	};
1692 	return __ip6_del_rt(rt, &info);
1693 }
1694 
1695 static int ip6_route_del(struct fib6_config *cfg)
1696 {
1697 	struct fib6_table *table;
1698 	struct fib6_node *fn;
1699 	struct rt6_info *rt;
1700 	int err = -ESRCH;
1701 
1702 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1703 	if (!table)
1704 		return err;
1705 
1706 	read_lock_bh(&table->tb6_lock);
1707 
1708 	fn = fib6_locate(&table->tb6_root,
1709 			 &cfg->fc_dst, cfg->fc_dst_len,
1710 			 &cfg->fc_src, cfg->fc_src_len);
1711 
1712 	if (fn) {
1713 		for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1714 			if (cfg->fc_ifindex &&
1715 			    (!rt->dst.dev ||
1716 			     rt->dst.dev->ifindex != cfg->fc_ifindex))
1717 				continue;
1718 			if (cfg->fc_flags & RTF_GATEWAY &&
1719 			    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1720 				continue;
1721 			if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1722 				continue;
1723 			dst_hold(&rt->dst);
1724 			read_unlock_bh(&table->tb6_lock);
1725 
1726 			return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1727 		}
1728 	}
1729 	read_unlock_bh(&table->tb6_lock);
1730 
1731 	return err;
1732 }
1733 
1734 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1735 {
1736 	struct net *net = dev_net(skb->dev);
1737 	struct netevent_redirect netevent;
1738 	struct rt6_info *rt, *nrt = NULL;
1739 	struct ndisc_options ndopts;
1740 	struct inet6_dev *in6_dev;
1741 	struct neighbour *neigh;
1742 	struct rd_msg *msg;
1743 	int optlen, on_link;
1744 	u8 *lladdr;
1745 
1746 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1747 	optlen -= sizeof(*msg);
1748 
1749 	if (optlen < 0) {
1750 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1751 		return;
1752 	}
1753 
1754 	msg = (struct rd_msg *)icmp6_hdr(skb);
1755 
1756 	if (ipv6_addr_is_multicast(&msg->dest)) {
1757 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1758 		return;
1759 	}
1760 
1761 	on_link = 0;
1762 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1763 		on_link = 1;
1764 	} else if (ipv6_addr_type(&msg->target) !=
1765 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1766 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1767 		return;
1768 	}
1769 
1770 	in6_dev = __in6_dev_get(skb->dev);
1771 	if (!in6_dev)
1772 		return;
1773 	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1774 		return;
1775 
1776 	/* RFC2461 8.1:
1777 	 *	The IP source address of the Redirect MUST be the same as the current
1778 	 *	first-hop router for the specified ICMP Destination Address.
1779 	 */
1780 
1781 	if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1782 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1783 		return;
1784 	}
1785 
1786 	lladdr = NULL;
1787 	if (ndopts.nd_opts_tgt_lladdr) {
1788 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1789 					     skb->dev);
1790 		if (!lladdr) {
1791 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1792 			return;
1793 		}
1794 	}
1795 
1796 	rt = (struct rt6_info *) dst;
1797 	if (rt == net->ipv6.ip6_null_entry) {
1798 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1799 		return;
1800 	}
1801 
1802 	/* Redirect received -> path was valid.
1803 	 * Look, redirects are sent only in response to data packets,
1804 	 * so that this nexthop apparently is reachable. --ANK
1805 	 */
1806 	dst_confirm(&rt->dst);
1807 
1808 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1809 	if (!neigh)
1810 		return;
1811 
1812 	/*
1813 	 *	We have finally decided to accept it.
1814 	 */
1815 
1816 	neigh_update(neigh, lladdr, NUD_STALE,
1817 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
1818 		     NEIGH_UPDATE_F_OVERRIDE|
1819 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1820 				     NEIGH_UPDATE_F_ISROUTER))
1821 		     );
1822 
1823 	nrt = ip6_rt_copy(rt, &msg->dest);
1824 	if (!nrt)
1825 		goto out;
1826 
1827 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1828 	if (on_link)
1829 		nrt->rt6i_flags &= ~RTF_GATEWAY;
1830 
1831 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1832 
1833 	if (ip6_ins_rt(nrt))
1834 		goto out;
1835 
1836 	netevent.old = &rt->dst;
1837 	netevent.new = &nrt->dst;
1838 	netevent.daddr = &msg->dest;
1839 	netevent.neigh = neigh;
1840 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1841 
1842 	if (rt->rt6i_flags & RTF_CACHE) {
1843 		rt = (struct rt6_info *) dst_clone(&rt->dst);
1844 		ip6_del_rt(rt);
1845 	}
1846 
1847 out:
1848 	neigh_release(neigh);
1849 }
1850 
1851 /*
1852  *	Misc support functions
1853  */
1854 
1855 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1856 				    const struct in6_addr *dest)
1857 {
1858 	struct net *net = dev_net(ort->dst.dev);
1859 	struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1860 					    ort->rt6i_table);
1861 
1862 	if (rt) {
1863 		rt->dst.input = ort->dst.input;
1864 		rt->dst.output = ort->dst.output;
1865 		rt->dst.flags |= DST_HOST;
1866 
1867 		rt->rt6i_dst.addr = *dest;
1868 		rt->rt6i_dst.plen = 128;
1869 		dst_copy_metrics(&rt->dst, &ort->dst);
1870 		rt->dst.error = ort->dst.error;
1871 		rt->rt6i_idev = ort->rt6i_idev;
1872 		if (rt->rt6i_idev)
1873 			in6_dev_hold(rt->rt6i_idev);
1874 		rt->dst.lastuse = jiffies;
1875 
1876 		rt->rt6i_gateway = ort->rt6i_gateway;
1877 		rt->rt6i_flags = ort->rt6i_flags;
1878 		if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1879 		    (RTF_DEFAULT | RTF_ADDRCONF))
1880 			rt6_set_from(rt, ort);
1881 		rt->rt6i_metric = 0;
1882 
1883 #ifdef CONFIG_IPV6_SUBTREES
1884 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1885 #endif
1886 		memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1887 		rt->rt6i_table = ort->rt6i_table;
1888 	}
1889 	return rt;
1890 }
1891 
1892 #ifdef CONFIG_IPV6_ROUTE_INFO
1893 static struct rt6_info *rt6_get_route_info(struct net *net,
1894 					   const struct in6_addr *prefix, int prefixlen,
1895 					   const struct in6_addr *gwaddr, int ifindex)
1896 {
1897 	struct fib6_node *fn;
1898 	struct rt6_info *rt = NULL;
1899 	struct fib6_table *table;
1900 
1901 	table = fib6_get_table(net, RT6_TABLE_INFO);
1902 	if (!table)
1903 		return NULL;
1904 
1905 	read_lock_bh(&table->tb6_lock);
1906 	fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1907 	if (!fn)
1908 		goto out;
1909 
1910 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1911 		if (rt->dst.dev->ifindex != ifindex)
1912 			continue;
1913 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1914 			continue;
1915 		if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1916 			continue;
1917 		dst_hold(&rt->dst);
1918 		break;
1919 	}
1920 out:
1921 	read_unlock_bh(&table->tb6_lock);
1922 	return rt;
1923 }
1924 
1925 static struct rt6_info *rt6_add_route_info(struct net *net,
1926 					   const struct in6_addr *prefix, int prefixlen,
1927 					   const struct in6_addr *gwaddr, int ifindex,
1928 					   unsigned int pref)
1929 {
1930 	struct fib6_config cfg = {
1931 		.fc_table	= RT6_TABLE_INFO,
1932 		.fc_metric	= IP6_RT_PRIO_USER,
1933 		.fc_ifindex	= ifindex,
1934 		.fc_dst_len	= prefixlen,
1935 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1936 				  RTF_UP | RTF_PREF(pref),
1937 		.fc_nlinfo.portid = 0,
1938 		.fc_nlinfo.nlh = NULL,
1939 		.fc_nlinfo.nl_net = net,
1940 	};
1941 
1942 	cfg.fc_dst = *prefix;
1943 	cfg.fc_gateway = *gwaddr;
1944 
1945 	/* We should treat it as a default route if prefix length is 0. */
1946 	if (!prefixlen)
1947 		cfg.fc_flags |= RTF_DEFAULT;
1948 
1949 	ip6_route_add(&cfg);
1950 
1951 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1952 }
1953 #endif
1954 
1955 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1956 {
1957 	struct rt6_info *rt;
1958 	struct fib6_table *table;
1959 
1960 	table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1961 	if (!table)
1962 		return NULL;
1963 
1964 	read_lock_bh(&table->tb6_lock);
1965 	for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1966 		if (dev == rt->dst.dev &&
1967 		    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1968 		    ipv6_addr_equal(&rt->rt6i_gateway, addr))
1969 			break;
1970 	}
1971 	if (rt)
1972 		dst_hold(&rt->dst);
1973 	read_unlock_bh(&table->tb6_lock);
1974 	return rt;
1975 }
1976 
1977 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1978 				     struct net_device *dev,
1979 				     unsigned int pref)
1980 {
1981 	struct fib6_config cfg = {
1982 		.fc_table	= RT6_TABLE_DFLT,
1983 		.fc_metric	= IP6_RT_PRIO_USER,
1984 		.fc_ifindex	= dev->ifindex,
1985 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1986 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1987 		.fc_nlinfo.portid = 0,
1988 		.fc_nlinfo.nlh = NULL,
1989 		.fc_nlinfo.nl_net = dev_net(dev),
1990 	};
1991 
1992 	cfg.fc_gateway = *gwaddr;
1993 
1994 	ip6_route_add(&cfg);
1995 
1996 	return rt6_get_dflt_router(gwaddr, dev);
1997 }
1998 
1999 void rt6_purge_dflt_routers(struct net *net)
2000 {
2001 	struct rt6_info *rt;
2002 	struct fib6_table *table;
2003 
2004 	/* NOTE: Keep consistent with rt6_get_dflt_router */
2005 	table = fib6_get_table(net, RT6_TABLE_DFLT);
2006 	if (!table)
2007 		return;
2008 
2009 restart:
2010 	read_lock_bh(&table->tb6_lock);
2011 	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2012 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2013 		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2014 			dst_hold(&rt->dst);
2015 			read_unlock_bh(&table->tb6_lock);
2016 			ip6_del_rt(rt);
2017 			goto restart;
2018 		}
2019 	}
2020 	read_unlock_bh(&table->tb6_lock);
2021 }
2022 
2023 static void rtmsg_to_fib6_config(struct net *net,
2024 				 struct in6_rtmsg *rtmsg,
2025 				 struct fib6_config *cfg)
2026 {
2027 	memset(cfg, 0, sizeof(*cfg));
2028 
2029 	cfg->fc_table = RT6_TABLE_MAIN;
2030 	cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2031 	cfg->fc_metric = rtmsg->rtmsg_metric;
2032 	cfg->fc_expires = rtmsg->rtmsg_info;
2033 	cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2034 	cfg->fc_src_len = rtmsg->rtmsg_src_len;
2035 	cfg->fc_flags = rtmsg->rtmsg_flags;
2036 
2037 	cfg->fc_nlinfo.nl_net = net;
2038 
2039 	cfg->fc_dst = rtmsg->rtmsg_dst;
2040 	cfg->fc_src = rtmsg->rtmsg_src;
2041 	cfg->fc_gateway = rtmsg->rtmsg_gateway;
2042 }
2043 
2044 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2045 {
2046 	struct fib6_config cfg;
2047 	struct in6_rtmsg rtmsg;
2048 	int err;
2049 
2050 	switch(cmd) {
2051 	case SIOCADDRT:		/* Add a route */
2052 	case SIOCDELRT:		/* Delete a route */
2053 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2054 			return -EPERM;
2055 		err = copy_from_user(&rtmsg, arg,
2056 				     sizeof(struct in6_rtmsg));
2057 		if (err)
2058 			return -EFAULT;
2059 
2060 		rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2061 
2062 		rtnl_lock();
2063 		switch (cmd) {
2064 		case SIOCADDRT:
2065 			err = ip6_route_add(&cfg);
2066 			break;
2067 		case SIOCDELRT:
2068 			err = ip6_route_del(&cfg);
2069 			break;
2070 		default:
2071 			err = -EINVAL;
2072 		}
2073 		rtnl_unlock();
2074 
2075 		return err;
2076 	}
2077 
2078 	return -EINVAL;
2079 }
2080 
2081 /*
2082  *	Drop the packet on the floor
2083  */
2084 
2085 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2086 {
2087 	int type;
2088 	struct dst_entry *dst = skb_dst(skb);
2089 	switch (ipstats_mib_noroutes) {
2090 	case IPSTATS_MIB_INNOROUTES:
2091 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2092 		if (type == IPV6_ADDR_ANY) {
2093 			IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2094 				      IPSTATS_MIB_INADDRERRORS);
2095 			break;
2096 		}
2097 		/* FALLTHROUGH */
2098 	case IPSTATS_MIB_OUTNOROUTES:
2099 		IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2100 			      ipstats_mib_noroutes);
2101 		break;
2102 	}
2103 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2104 	kfree_skb(skb);
2105 	return 0;
2106 }
2107 
2108 static int ip6_pkt_discard(struct sk_buff *skb)
2109 {
2110 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2111 }
2112 
2113 static int ip6_pkt_discard_out(struct sk_buff *skb)
2114 {
2115 	skb->dev = skb_dst(skb)->dev;
2116 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2117 }
2118 
2119 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2120 
2121 static int ip6_pkt_prohibit(struct sk_buff *skb)
2122 {
2123 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2124 }
2125 
2126 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2127 {
2128 	skb->dev = skb_dst(skb)->dev;
2129 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2130 }
2131 
2132 #endif
2133 
2134 /*
2135  *	Allocate a dst for local (unicast / anycast) address.
2136  */
2137 
2138 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2139 				    const struct in6_addr *addr,
2140 				    bool anycast)
2141 {
2142 	struct net *net = dev_net(idev->dev);
2143 	struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
2144 
2145 	if (!rt) {
2146 		net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
2147 		return ERR_PTR(-ENOMEM);
2148 	}
2149 
2150 	in6_dev_hold(idev);
2151 
2152 	rt->dst.flags |= DST_HOST;
2153 	rt->dst.input = ip6_input;
2154 	rt->dst.output = ip6_output;
2155 	rt->rt6i_idev = idev;
2156 
2157 	rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2158 	if (anycast)
2159 		rt->rt6i_flags |= RTF_ANYCAST;
2160 	else
2161 		rt->rt6i_flags |= RTF_LOCAL;
2162 
2163 	rt->rt6i_dst.addr = *addr;
2164 	rt->rt6i_dst.plen = 128;
2165 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2166 
2167 	atomic_set(&rt->dst.__refcnt, 1);
2168 
2169 	return rt;
2170 }
2171 
2172 int ip6_route_get_saddr(struct net *net,
2173 			struct rt6_info *rt,
2174 			const struct in6_addr *daddr,
2175 			unsigned int prefs,
2176 			struct in6_addr *saddr)
2177 {
2178 	struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2179 	int err = 0;
2180 	if (rt->rt6i_prefsrc.plen)
2181 		*saddr = rt->rt6i_prefsrc.addr;
2182 	else
2183 		err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2184 					 daddr, prefs, saddr);
2185 	return err;
2186 }
2187 
2188 /* remove deleted ip from prefsrc entries */
2189 struct arg_dev_net_ip {
2190 	struct net_device *dev;
2191 	struct net *net;
2192 	struct in6_addr *addr;
2193 };
2194 
2195 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2196 {
2197 	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2198 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2199 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2200 
2201 	if (((void *)rt->dst.dev == dev || !dev) &&
2202 	    rt != net->ipv6.ip6_null_entry &&
2203 	    ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2204 		/* remove prefsrc entry */
2205 		rt->rt6i_prefsrc.plen = 0;
2206 	}
2207 	return 0;
2208 }
2209 
2210 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2211 {
2212 	struct net *net = dev_net(ifp->idev->dev);
2213 	struct arg_dev_net_ip adni = {
2214 		.dev = ifp->idev->dev,
2215 		.net = net,
2216 		.addr = &ifp->addr,
2217 	};
2218 	fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2219 }
2220 
2221 struct arg_dev_net {
2222 	struct net_device *dev;
2223 	struct net *net;
2224 };
2225 
2226 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2227 {
2228 	const struct arg_dev_net *adn = arg;
2229 	const struct net_device *dev = adn->dev;
2230 
2231 	if ((rt->dst.dev == dev || !dev) &&
2232 	    rt != adn->net->ipv6.ip6_null_entry)
2233 		return -1;
2234 
2235 	return 0;
2236 }
2237 
2238 void rt6_ifdown(struct net *net, struct net_device *dev)
2239 {
2240 	struct arg_dev_net adn = {
2241 		.dev = dev,
2242 		.net = net,
2243 	};
2244 
2245 	fib6_clean_all(net, fib6_ifdown, 0, &adn);
2246 	icmp6_clean_all(fib6_ifdown, &adn);
2247 }
2248 
2249 struct rt6_mtu_change_arg {
2250 	struct net_device *dev;
2251 	unsigned int mtu;
2252 };
2253 
2254 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2255 {
2256 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2257 	struct inet6_dev *idev;
2258 
2259 	/* In IPv6 pmtu discovery is not optional,
2260 	   so that RTAX_MTU lock cannot disable it.
2261 	   We still use this lock to block changes
2262 	   caused by addrconf/ndisc.
2263 	*/
2264 
2265 	idev = __in6_dev_get(arg->dev);
2266 	if (!idev)
2267 		return 0;
2268 
2269 	/* For administrative MTU increase, there is no way to discover
2270 	   IPv6 PMTU increase, so PMTU increase should be updated here.
2271 	   Since RFC 1981 doesn't include administrative MTU increase
2272 	   update PMTU increase is a MUST. (i.e. jumbo frame)
2273 	 */
2274 	/*
2275 	   If new MTU is less than route PMTU, this new MTU will be the
2276 	   lowest MTU in the path, update the route PMTU to reflect PMTU
2277 	   decreases; if new MTU is greater than route PMTU, and the
2278 	   old MTU is the lowest MTU in the path, update the route PMTU
2279 	   to reflect the increase. In this case if the other nodes' MTU
2280 	   also have the lowest MTU, TOO BIG MESSAGE will be lead to
2281 	   PMTU discouvery.
2282 	 */
2283 	if (rt->dst.dev == arg->dev &&
2284 	    !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2285 	    (dst_mtu(&rt->dst) >= arg->mtu ||
2286 	     (dst_mtu(&rt->dst) < arg->mtu &&
2287 	      dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2288 		dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2289 	}
2290 	return 0;
2291 }
2292 
2293 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2294 {
2295 	struct rt6_mtu_change_arg arg = {
2296 		.dev = dev,
2297 		.mtu = mtu,
2298 	};
2299 
2300 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2301 }
2302 
2303 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2304 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
2305 	[RTA_OIF]               = { .type = NLA_U32 },
2306 	[RTA_IIF]		= { .type = NLA_U32 },
2307 	[RTA_PRIORITY]          = { .type = NLA_U32 },
2308 	[RTA_METRICS]           = { .type = NLA_NESTED },
2309 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
2310 };
2311 
2312 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2313 			      struct fib6_config *cfg)
2314 {
2315 	struct rtmsg *rtm;
2316 	struct nlattr *tb[RTA_MAX+1];
2317 	int err;
2318 
2319 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2320 	if (err < 0)
2321 		goto errout;
2322 
2323 	err = -EINVAL;
2324 	rtm = nlmsg_data(nlh);
2325 	memset(cfg, 0, sizeof(*cfg));
2326 
2327 	cfg->fc_table = rtm->rtm_table;
2328 	cfg->fc_dst_len = rtm->rtm_dst_len;
2329 	cfg->fc_src_len = rtm->rtm_src_len;
2330 	cfg->fc_flags = RTF_UP;
2331 	cfg->fc_protocol = rtm->rtm_protocol;
2332 	cfg->fc_type = rtm->rtm_type;
2333 
2334 	if (rtm->rtm_type == RTN_UNREACHABLE ||
2335 	    rtm->rtm_type == RTN_BLACKHOLE ||
2336 	    rtm->rtm_type == RTN_PROHIBIT ||
2337 	    rtm->rtm_type == RTN_THROW)
2338 		cfg->fc_flags |= RTF_REJECT;
2339 
2340 	if (rtm->rtm_type == RTN_LOCAL)
2341 		cfg->fc_flags |= RTF_LOCAL;
2342 
2343 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2344 	cfg->fc_nlinfo.nlh = nlh;
2345 	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2346 
2347 	if (tb[RTA_GATEWAY]) {
2348 		nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2349 		cfg->fc_flags |= RTF_GATEWAY;
2350 	}
2351 
2352 	if (tb[RTA_DST]) {
2353 		int plen = (rtm->rtm_dst_len + 7) >> 3;
2354 
2355 		if (nla_len(tb[RTA_DST]) < plen)
2356 			goto errout;
2357 
2358 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2359 	}
2360 
2361 	if (tb[RTA_SRC]) {
2362 		int plen = (rtm->rtm_src_len + 7) >> 3;
2363 
2364 		if (nla_len(tb[RTA_SRC]) < plen)
2365 			goto errout;
2366 
2367 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2368 	}
2369 
2370 	if (tb[RTA_PREFSRC])
2371 		nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2372 
2373 	if (tb[RTA_OIF])
2374 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2375 
2376 	if (tb[RTA_PRIORITY])
2377 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2378 
2379 	if (tb[RTA_METRICS]) {
2380 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2381 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2382 	}
2383 
2384 	if (tb[RTA_TABLE])
2385 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2386 
2387 	if (tb[RTA_MULTIPATH]) {
2388 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2389 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2390 	}
2391 
2392 	err = 0;
2393 errout:
2394 	return err;
2395 }
2396 
2397 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2398 {
2399 	struct fib6_config r_cfg;
2400 	struct rtnexthop *rtnh;
2401 	int remaining;
2402 	int attrlen;
2403 	int err = 0, last_err = 0;
2404 
2405 beginning:
2406 	rtnh = (struct rtnexthop *)cfg->fc_mp;
2407 	remaining = cfg->fc_mp_len;
2408 
2409 	/* Parse a Multipath Entry */
2410 	while (rtnh_ok(rtnh, remaining)) {
2411 		memcpy(&r_cfg, cfg, sizeof(*cfg));
2412 		if (rtnh->rtnh_ifindex)
2413 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2414 
2415 		attrlen = rtnh_attrlen(rtnh);
2416 		if (attrlen > 0) {
2417 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2418 
2419 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2420 			if (nla) {
2421 				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2422 				r_cfg.fc_flags |= RTF_GATEWAY;
2423 			}
2424 		}
2425 		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2426 		if (err) {
2427 			last_err = err;
2428 			/* If we are trying to remove a route, do not stop the
2429 			 * loop when ip6_route_del() fails (because next hop is
2430 			 * already gone), we should try to remove all next hops.
2431 			 */
2432 			if (add) {
2433 				/* If add fails, we should try to delete all
2434 				 * next hops that have been already added.
2435 				 */
2436 				add = 0;
2437 				goto beginning;
2438 			}
2439 		}
2440 		/* Because each route is added like a single route we remove
2441 		 * this flag after the first nexthop (if there is a collision,
2442 		 * we have already fail to add the first nexthop:
2443 		 * fib6_add_rt2node() has reject it).
2444 		 */
2445 		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2446 		rtnh = rtnh_next(rtnh, &remaining);
2447 	}
2448 
2449 	return last_err;
2450 }
2451 
2452 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2453 {
2454 	struct fib6_config cfg;
2455 	int err;
2456 
2457 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2458 	if (err < 0)
2459 		return err;
2460 
2461 	if (cfg.fc_mp)
2462 		return ip6_route_multipath(&cfg, 0);
2463 	else
2464 		return ip6_route_del(&cfg);
2465 }
2466 
2467 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2468 {
2469 	struct fib6_config cfg;
2470 	int err;
2471 
2472 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2473 	if (err < 0)
2474 		return err;
2475 
2476 	if (cfg.fc_mp)
2477 		return ip6_route_multipath(&cfg, 1);
2478 	else
2479 		return ip6_route_add(&cfg);
2480 }
2481 
2482 static inline size_t rt6_nlmsg_size(void)
2483 {
2484 	return NLMSG_ALIGN(sizeof(struct rtmsg))
2485 	       + nla_total_size(16) /* RTA_SRC */
2486 	       + nla_total_size(16) /* RTA_DST */
2487 	       + nla_total_size(16) /* RTA_GATEWAY */
2488 	       + nla_total_size(16) /* RTA_PREFSRC */
2489 	       + nla_total_size(4) /* RTA_TABLE */
2490 	       + nla_total_size(4) /* RTA_IIF */
2491 	       + nla_total_size(4) /* RTA_OIF */
2492 	       + nla_total_size(4) /* RTA_PRIORITY */
2493 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2494 	       + nla_total_size(sizeof(struct rta_cacheinfo));
2495 }
2496 
2497 static int rt6_fill_node(struct net *net,
2498 			 struct sk_buff *skb, struct rt6_info *rt,
2499 			 struct in6_addr *dst, struct in6_addr *src,
2500 			 int iif, int type, u32 portid, u32 seq,
2501 			 int prefix, int nowait, unsigned int flags)
2502 {
2503 	struct rtmsg *rtm;
2504 	struct nlmsghdr *nlh;
2505 	long expires;
2506 	u32 table;
2507 
2508 	if (prefix) {	/* user wants prefix routes only */
2509 		if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2510 			/* success since this is not a prefix route */
2511 			return 1;
2512 		}
2513 	}
2514 
2515 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2516 	if (!nlh)
2517 		return -EMSGSIZE;
2518 
2519 	rtm = nlmsg_data(nlh);
2520 	rtm->rtm_family = AF_INET6;
2521 	rtm->rtm_dst_len = rt->rt6i_dst.plen;
2522 	rtm->rtm_src_len = rt->rt6i_src.plen;
2523 	rtm->rtm_tos = 0;
2524 	if (rt->rt6i_table)
2525 		table = rt->rt6i_table->tb6_id;
2526 	else
2527 		table = RT6_TABLE_UNSPEC;
2528 	rtm->rtm_table = table;
2529 	if (nla_put_u32(skb, RTA_TABLE, table))
2530 		goto nla_put_failure;
2531 	if (rt->rt6i_flags & RTF_REJECT) {
2532 		switch (rt->dst.error) {
2533 		case -EINVAL:
2534 			rtm->rtm_type = RTN_BLACKHOLE;
2535 			break;
2536 		case -EACCES:
2537 			rtm->rtm_type = RTN_PROHIBIT;
2538 			break;
2539 		case -EAGAIN:
2540 			rtm->rtm_type = RTN_THROW;
2541 			break;
2542 		default:
2543 			rtm->rtm_type = RTN_UNREACHABLE;
2544 			break;
2545 		}
2546 	}
2547 	else if (rt->rt6i_flags & RTF_LOCAL)
2548 		rtm->rtm_type = RTN_LOCAL;
2549 	else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2550 		rtm->rtm_type = RTN_LOCAL;
2551 	else
2552 		rtm->rtm_type = RTN_UNICAST;
2553 	rtm->rtm_flags = 0;
2554 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2555 	rtm->rtm_protocol = rt->rt6i_protocol;
2556 	if (rt->rt6i_flags & RTF_DYNAMIC)
2557 		rtm->rtm_protocol = RTPROT_REDIRECT;
2558 	else if (rt->rt6i_flags & RTF_ADDRCONF) {
2559 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2560 			rtm->rtm_protocol = RTPROT_RA;
2561 		else
2562 			rtm->rtm_protocol = RTPROT_KERNEL;
2563 	}
2564 
2565 	if (rt->rt6i_flags & RTF_CACHE)
2566 		rtm->rtm_flags |= RTM_F_CLONED;
2567 
2568 	if (dst) {
2569 		if (nla_put(skb, RTA_DST, 16, dst))
2570 			goto nla_put_failure;
2571 		rtm->rtm_dst_len = 128;
2572 	} else if (rtm->rtm_dst_len)
2573 		if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2574 			goto nla_put_failure;
2575 #ifdef CONFIG_IPV6_SUBTREES
2576 	if (src) {
2577 		if (nla_put(skb, RTA_SRC, 16, src))
2578 			goto nla_put_failure;
2579 		rtm->rtm_src_len = 128;
2580 	} else if (rtm->rtm_src_len &&
2581 		   nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2582 		goto nla_put_failure;
2583 #endif
2584 	if (iif) {
2585 #ifdef CONFIG_IPV6_MROUTE
2586 		if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2587 			int err = ip6mr_get_route(net, skb, rtm, nowait);
2588 			if (err <= 0) {
2589 				if (!nowait) {
2590 					if (err == 0)
2591 						return 0;
2592 					goto nla_put_failure;
2593 				} else {
2594 					if (err == -EMSGSIZE)
2595 						goto nla_put_failure;
2596 				}
2597 			}
2598 		} else
2599 #endif
2600 			if (nla_put_u32(skb, RTA_IIF, iif))
2601 				goto nla_put_failure;
2602 	} else if (dst) {
2603 		struct in6_addr saddr_buf;
2604 		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2605 		    nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2606 			goto nla_put_failure;
2607 	}
2608 
2609 	if (rt->rt6i_prefsrc.plen) {
2610 		struct in6_addr saddr_buf;
2611 		saddr_buf = rt->rt6i_prefsrc.addr;
2612 		if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2613 			goto nla_put_failure;
2614 	}
2615 
2616 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2617 		goto nla_put_failure;
2618 
2619 	if (rt->rt6i_flags & RTF_GATEWAY) {
2620 		if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2621 			goto nla_put_failure;
2622 	}
2623 
2624 	if (rt->dst.dev &&
2625 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2626 		goto nla_put_failure;
2627 	if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2628 		goto nla_put_failure;
2629 
2630 	expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2631 
2632 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2633 		goto nla_put_failure;
2634 
2635 	return nlmsg_end(skb, nlh);
2636 
2637 nla_put_failure:
2638 	nlmsg_cancel(skb, nlh);
2639 	return -EMSGSIZE;
2640 }
2641 
2642 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2643 {
2644 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2645 	int prefix;
2646 
2647 	if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2648 		struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2649 		prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2650 	} else
2651 		prefix = 0;
2652 
2653 	return rt6_fill_node(arg->net,
2654 		     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2655 		     NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2656 		     prefix, 0, NLM_F_MULTI);
2657 }
2658 
2659 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2660 {
2661 	struct net *net = sock_net(in_skb->sk);
2662 	struct nlattr *tb[RTA_MAX+1];
2663 	struct rt6_info *rt;
2664 	struct sk_buff *skb;
2665 	struct rtmsg *rtm;
2666 	struct flowi6 fl6;
2667 	int err, iif = 0, oif = 0;
2668 
2669 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2670 	if (err < 0)
2671 		goto errout;
2672 
2673 	err = -EINVAL;
2674 	memset(&fl6, 0, sizeof(fl6));
2675 
2676 	if (tb[RTA_SRC]) {
2677 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2678 			goto errout;
2679 
2680 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2681 	}
2682 
2683 	if (tb[RTA_DST]) {
2684 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2685 			goto errout;
2686 
2687 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2688 	}
2689 
2690 	if (tb[RTA_IIF])
2691 		iif = nla_get_u32(tb[RTA_IIF]);
2692 
2693 	if (tb[RTA_OIF])
2694 		oif = nla_get_u32(tb[RTA_OIF]);
2695 
2696 	if (iif) {
2697 		struct net_device *dev;
2698 		int flags = 0;
2699 
2700 		dev = __dev_get_by_index(net, iif);
2701 		if (!dev) {
2702 			err = -ENODEV;
2703 			goto errout;
2704 		}
2705 
2706 		fl6.flowi6_iif = iif;
2707 
2708 		if (!ipv6_addr_any(&fl6.saddr))
2709 			flags |= RT6_LOOKUP_F_HAS_SADDR;
2710 
2711 		rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2712 							       flags);
2713 	} else {
2714 		fl6.flowi6_oif = oif;
2715 
2716 		rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2717 	}
2718 
2719 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2720 	if (!skb) {
2721 		ip6_rt_put(rt);
2722 		err = -ENOBUFS;
2723 		goto errout;
2724 	}
2725 
2726 	/* Reserve room for dummy headers, this skb can pass
2727 	   through good chunk of routing engine.
2728 	 */
2729 	skb_reset_mac_header(skb);
2730 	skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2731 
2732 	skb_dst_set(skb, &rt->dst);
2733 
2734 	err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2735 			    RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2736 			    nlh->nlmsg_seq, 0, 0, 0);
2737 	if (err < 0) {
2738 		kfree_skb(skb);
2739 		goto errout;
2740 	}
2741 
2742 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2743 errout:
2744 	return err;
2745 }
2746 
2747 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2748 {
2749 	struct sk_buff *skb;
2750 	struct net *net = info->nl_net;
2751 	u32 seq;
2752 	int err;
2753 
2754 	err = -ENOBUFS;
2755 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2756 
2757 	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2758 	if (!skb)
2759 		goto errout;
2760 
2761 	err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2762 				event, info->portid, seq, 0, 0, 0);
2763 	if (err < 0) {
2764 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2765 		WARN_ON(err == -EMSGSIZE);
2766 		kfree_skb(skb);
2767 		goto errout;
2768 	}
2769 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2770 		    info->nlh, gfp_any());
2771 	return;
2772 errout:
2773 	if (err < 0)
2774 		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2775 }
2776 
2777 static int ip6_route_dev_notify(struct notifier_block *this,
2778 				unsigned long event, void *ptr)
2779 {
2780 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2781 	struct net *net = dev_net(dev);
2782 
2783 	if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2784 		net->ipv6.ip6_null_entry->dst.dev = dev;
2785 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2786 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2787 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2788 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2789 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2790 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2791 #endif
2792 	}
2793 
2794 	return NOTIFY_OK;
2795 }
2796 
2797 /*
2798  *	/proc
2799  */
2800 
2801 #ifdef CONFIG_PROC_FS
2802 
2803 struct rt6_proc_arg
2804 {
2805 	char *buffer;
2806 	int offset;
2807 	int length;
2808 	int skip;
2809 	int len;
2810 };
2811 
2812 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2813 {
2814 	struct seq_file *m = p_arg;
2815 
2816 	seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2817 
2818 #ifdef CONFIG_IPV6_SUBTREES
2819 	seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2820 #else
2821 	seq_puts(m, "00000000000000000000000000000000 00 ");
2822 #endif
2823 	if (rt->rt6i_flags & RTF_GATEWAY) {
2824 		seq_printf(m, "%pi6", &rt->rt6i_gateway);
2825 	} else {
2826 		seq_puts(m, "00000000000000000000000000000000");
2827 	}
2828 	seq_printf(m, " %08x %08x %08x %08x %8s\n",
2829 		   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2830 		   rt->dst.__use, rt->rt6i_flags,
2831 		   rt->dst.dev ? rt->dst.dev->name : "");
2832 	return 0;
2833 }
2834 
2835 static int ipv6_route_show(struct seq_file *m, void *v)
2836 {
2837 	struct net *net = (struct net *)m->private;
2838 	fib6_clean_all_ro(net, rt6_info_route, 0, m);
2839 	return 0;
2840 }
2841 
2842 static int ipv6_route_open(struct inode *inode, struct file *file)
2843 {
2844 	return single_open_net(inode, file, ipv6_route_show);
2845 }
2846 
2847 static const struct file_operations ipv6_route_proc_fops = {
2848 	.owner		= THIS_MODULE,
2849 	.open		= ipv6_route_open,
2850 	.read		= seq_read,
2851 	.llseek		= seq_lseek,
2852 	.release	= single_release_net,
2853 };
2854 
2855 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2856 {
2857 	struct net *net = (struct net *)seq->private;
2858 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2859 		   net->ipv6.rt6_stats->fib_nodes,
2860 		   net->ipv6.rt6_stats->fib_route_nodes,
2861 		   net->ipv6.rt6_stats->fib_rt_alloc,
2862 		   net->ipv6.rt6_stats->fib_rt_entries,
2863 		   net->ipv6.rt6_stats->fib_rt_cache,
2864 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2865 		   net->ipv6.rt6_stats->fib_discarded_routes);
2866 
2867 	return 0;
2868 }
2869 
2870 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2871 {
2872 	return single_open_net(inode, file, rt6_stats_seq_show);
2873 }
2874 
2875 static const struct file_operations rt6_stats_seq_fops = {
2876 	.owner	 = THIS_MODULE,
2877 	.open	 = rt6_stats_seq_open,
2878 	.read	 = seq_read,
2879 	.llseek	 = seq_lseek,
2880 	.release = single_release_net,
2881 };
2882 #endif	/* CONFIG_PROC_FS */
2883 
2884 #ifdef CONFIG_SYSCTL
2885 
2886 static
2887 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
2888 			      void __user *buffer, size_t *lenp, loff_t *ppos)
2889 {
2890 	struct net *net;
2891 	int delay;
2892 	if (!write)
2893 		return -EINVAL;
2894 
2895 	net = (struct net *)ctl->extra1;
2896 	delay = net->ipv6.sysctl.flush_delay;
2897 	proc_dointvec(ctl, write, buffer, lenp, ppos);
2898 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
2899 	return 0;
2900 }
2901 
2902 struct ctl_table ipv6_route_table_template[] = {
2903 	{
2904 		.procname	=	"flush",
2905 		.data		=	&init_net.ipv6.sysctl.flush_delay,
2906 		.maxlen		=	sizeof(int),
2907 		.mode		=	0200,
2908 		.proc_handler	=	ipv6_sysctl_rtcache_flush
2909 	},
2910 	{
2911 		.procname	=	"gc_thresh",
2912 		.data		=	&ip6_dst_ops_template.gc_thresh,
2913 		.maxlen		=	sizeof(int),
2914 		.mode		=	0644,
2915 		.proc_handler	=	proc_dointvec,
2916 	},
2917 	{
2918 		.procname	=	"max_size",
2919 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
2920 		.maxlen		=	sizeof(int),
2921 		.mode		=	0644,
2922 		.proc_handler	=	proc_dointvec,
2923 	},
2924 	{
2925 		.procname	=	"gc_min_interval",
2926 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2927 		.maxlen		=	sizeof(int),
2928 		.mode		=	0644,
2929 		.proc_handler	=	proc_dointvec_jiffies,
2930 	},
2931 	{
2932 		.procname	=	"gc_timeout",
2933 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2934 		.maxlen		=	sizeof(int),
2935 		.mode		=	0644,
2936 		.proc_handler	=	proc_dointvec_jiffies,
2937 	},
2938 	{
2939 		.procname	=	"gc_interval",
2940 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
2941 		.maxlen		=	sizeof(int),
2942 		.mode		=	0644,
2943 		.proc_handler	=	proc_dointvec_jiffies,
2944 	},
2945 	{
2946 		.procname	=	"gc_elasticity",
2947 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2948 		.maxlen		=	sizeof(int),
2949 		.mode		=	0644,
2950 		.proc_handler	=	proc_dointvec,
2951 	},
2952 	{
2953 		.procname	=	"mtu_expires",
2954 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2955 		.maxlen		=	sizeof(int),
2956 		.mode		=	0644,
2957 		.proc_handler	=	proc_dointvec_jiffies,
2958 	},
2959 	{
2960 		.procname	=	"min_adv_mss",
2961 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
2962 		.maxlen		=	sizeof(int),
2963 		.mode		=	0644,
2964 		.proc_handler	=	proc_dointvec,
2965 	},
2966 	{
2967 		.procname	=	"gc_min_interval_ms",
2968 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2969 		.maxlen		=	sizeof(int),
2970 		.mode		=	0644,
2971 		.proc_handler	=	proc_dointvec_ms_jiffies,
2972 	},
2973 	{ }
2974 };
2975 
2976 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2977 {
2978 	struct ctl_table *table;
2979 
2980 	table = kmemdup(ipv6_route_table_template,
2981 			sizeof(ipv6_route_table_template),
2982 			GFP_KERNEL);
2983 
2984 	if (table) {
2985 		table[0].data = &net->ipv6.sysctl.flush_delay;
2986 		table[0].extra1 = net;
2987 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2988 		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2989 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2990 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2991 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2992 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2993 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2994 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2995 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2996 
2997 		/* Don't export sysctls to unprivileged users */
2998 		if (net->user_ns != &init_user_ns)
2999 			table[0].procname = NULL;
3000 	}
3001 
3002 	return table;
3003 }
3004 #endif
3005 
3006 static int __net_init ip6_route_net_init(struct net *net)
3007 {
3008 	int ret = -ENOMEM;
3009 
3010 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3011 	       sizeof(net->ipv6.ip6_dst_ops));
3012 
3013 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3014 		goto out_ip6_dst_ops;
3015 
3016 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3017 					   sizeof(*net->ipv6.ip6_null_entry),
3018 					   GFP_KERNEL);
3019 	if (!net->ipv6.ip6_null_entry)
3020 		goto out_ip6_dst_entries;
3021 	net->ipv6.ip6_null_entry->dst.path =
3022 		(struct dst_entry *)net->ipv6.ip6_null_entry;
3023 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3024 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3025 			 ip6_template_metrics, true);
3026 
3027 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3028 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3029 					       sizeof(*net->ipv6.ip6_prohibit_entry),
3030 					       GFP_KERNEL);
3031 	if (!net->ipv6.ip6_prohibit_entry)
3032 		goto out_ip6_null_entry;
3033 	net->ipv6.ip6_prohibit_entry->dst.path =
3034 		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3035 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3036 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3037 			 ip6_template_metrics, true);
3038 
3039 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3040 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
3041 					       GFP_KERNEL);
3042 	if (!net->ipv6.ip6_blk_hole_entry)
3043 		goto out_ip6_prohibit_entry;
3044 	net->ipv6.ip6_blk_hole_entry->dst.path =
3045 		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3046 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3047 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3048 			 ip6_template_metrics, true);
3049 #endif
3050 
3051 	net->ipv6.sysctl.flush_delay = 0;
3052 	net->ipv6.sysctl.ip6_rt_max_size = 4096;
3053 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3054 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3055 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3056 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3057 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3058 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3059 
3060 	net->ipv6.ip6_rt_gc_expire = 30*HZ;
3061 
3062 	ret = 0;
3063 out:
3064 	return ret;
3065 
3066 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3067 out_ip6_prohibit_entry:
3068 	kfree(net->ipv6.ip6_prohibit_entry);
3069 out_ip6_null_entry:
3070 	kfree(net->ipv6.ip6_null_entry);
3071 #endif
3072 out_ip6_dst_entries:
3073 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3074 out_ip6_dst_ops:
3075 	goto out;
3076 }
3077 
3078 static void __net_exit ip6_route_net_exit(struct net *net)
3079 {
3080 	kfree(net->ipv6.ip6_null_entry);
3081 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3082 	kfree(net->ipv6.ip6_prohibit_entry);
3083 	kfree(net->ipv6.ip6_blk_hole_entry);
3084 #endif
3085 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3086 }
3087 
3088 static int __net_init ip6_route_net_init_late(struct net *net)
3089 {
3090 #ifdef CONFIG_PROC_FS
3091 	proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3092 	proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3093 #endif
3094 	return 0;
3095 }
3096 
3097 static void __net_exit ip6_route_net_exit_late(struct net *net)
3098 {
3099 #ifdef CONFIG_PROC_FS
3100 	remove_proc_entry("ipv6_route", net->proc_net);
3101 	remove_proc_entry("rt6_stats", net->proc_net);
3102 #endif
3103 }
3104 
3105 static struct pernet_operations ip6_route_net_ops = {
3106 	.init = ip6_route_net_init,
3107 	.exit = ip6_route_net_exit,
3108 };
3109 
3110 static int __net_init ipv6_inetpeer_init(struct net *net)
3111 {
3112 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3113 
3114 	if (!bp)
3115 		return -ENOMEM;
3116 	inet_peer_base_init(bp);
3117 	net->ipv6.peers = bp;
3118 	return 0;
3119 }
3120 
3121 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3122 {
3123 	struct inet_peer_base *bp = net->ipv6.peers;
3124 
3125 	net->ipv6.peers = NULL;
3126 	inetpeer_invalidate_tree(bp);
3127 	kfree(bp);
3128 }
3129 
3130 static struct pernet_operations ipv6_inetpeer_ops = {
3131 	.init	=	ipv6_inetpeer_init,
3132 	.exit	=	ipv6_inetpeer_exit,
3133 };
3134 
3135 static struct pernet_operations ip6_route_net_late_ops = {
3136 	.init = ip6_route_net_init_late,
3137 	.exit = ip6_route_net_exit_late,
3138 };
3139 
3140 static struct notifier_block ip6_route_dev_notifier = {
3141 	.notifier_call = ip6_route_dev_notify,
3142 	.priority = 0,
3143 };
3144 
3145 int __init ip6_route_init(void)
3146 {
3147 	int ret;
3148 
3149 	ret = -ENOMEM;
3150 	ip6_dst_ops_template.kmem_cachep =
3151 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3152 				  SLAB_HWCACHE_ALIGN, NULL);
3153 	if (!ip6_dst_ops_template.kmem_cachep)
3154 		goto out;
3155 
3156 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
3157 	if (ret)
3158 		goto out_kmem_cache;
3159 
3160 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3161 	if (ret)
3162 		goto out_dst_entries;
3163 
3164 	ret = register_pernet_subsys(&ip6_route_net_ops);
3165 	if (ret)
3166 		goto out_register_inetpeer;
3167 
3168 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3169 
3170 	/* Registering of the loopback is done before this portion of code,
3171 	 * the loopback reference in rt6_info will not be taken, do it
3172 	 * manually for init_net */
3173 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3174 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3175   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3176 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3177 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3178 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3179 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3180   #endif
3181 	ret = fib6_init();
3182 	if (ret)
3183 		goto out_register_subsys;
3184 
3185 	ret = xfrm6_init();
3186 	if (ret)
3187 		goto out_fib6_init;
3188 
3189 	ret = fib6_rules_init();
3190 	if (ret)
3191 		goto xfrm6_init;
3192 
3193 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
3194 	if (ret)
3195 		goto fib6_rules_init;
3196 
3197 	ret = -ENOBUFS;
3198 	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3199 	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3200 	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3201 		goto out_register_late_subsys;
3202 
3203 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3204 	if (ret)
3205 		goto out_register_late_subsys;
3206 
3207 out:
3208 	return ret;
3209 
3210 out_register_late_subsys:
3211 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3212 fib6_rules_init:
3213 	fib6_rules_cleanup();
3214 xfrm6_init:
3215 	xfrm6_fini();
3216 out_fib6_init:
3217 	fib6_gc_cleanup();
3218 out_register_subsys:
3219 	unregister_pernet_subsys(&ip6_route_net_ops);
3220 out_register_inetpeer:
3221 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3222 out_dst_entries:
3223 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3224 out_kmem_cache:
3225 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3226 	goto out;
3227 }
3228 
3229 void ip6_route_cleanup(void)
3230 {
3231 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
3232 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3233 	fib6_rules_cleanup();
3234 	xfrm6_fini();
3235 	fib6_gc_cleanup();
3236 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3237 	unregister_pernet_subsys(&ip6_route_net_ops);
3238 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3239 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3240 }
3241