xref: /openbmc/linux/net/ipv6/route.c (revision 550bab42f83308c9d6ab04a980cc4333cef1c8fa)
1 /*
2  *	Linux INET6 implementation
3  *	FIB front-end.
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 /*	Changes:
15  *
16  *	YOSHIFUJI Hideaki @USAGI
17  *		reworked default router selection.
18  *		- respect outgoing interface
19  *		- select from (probably) reachable routers (i.e.
20  *		routers in REACHABLE, STALE, DELAY or PROBE states).
21  *		- always select the same router if it is (probably)
22  *		reachable.  otherwise, round-robin the list.
23  *	Ville Nuorvala
24  *		Fixed routing subtrees.
25  */
26 
27 #define pr_fmt(fmt) "IPv6: " fmt
28 
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
61 
62 #include <asm/uaccess.h>
63 
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
67 
68 enum rt6_nud_state {
69 	RT6_NUD_FAIL_HARD = -2,
70 	RT6_NUD_FAIL_SOFT = -1,
71 	RT6_NUD_SUCCEED = 1
72 };
73 
74 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
75 				    const struct in6_addr *dest);
76 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
77 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
78 static unsigned int	 ip6_mtu(const struct dst_entry *dst);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void		ip6_dst_destroy(struct dst_entry *);
81 static void		ip6_dst_ifdown(struct dst_entry *,
82 				       struct net_device *dev, int how);
83 static int		 ip6_dst_gc(struct dst_ops *ops);
84 
85 static int		ip6_pkt_discard(struct sk_buff *skb);
86 static int		ip6_pkt_discard_out(struct sk_buff *skb);
87 static void		ip6_link_failure(struct sk_buff *skb);
88 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
89 					   struct sk_buff *skb, u32 mtu);
90 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
91 					struct sk_buff *skb);
92 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
93 
94 #ifdef CONFIG_IPV6_ROUTE_INFO
95 static struct rt6_info *rt6_add_route_info(struct net *net,
96 					   const struct in6_addr *prefix, int prefixlen,
97 					   const struct in6_addr *gwaddr, int ifindex,
98 					   unsigned int pref);
99 static struct rt6_info *rt6_get_route_info(struct net *net,
100 					   const struct in6_addr *prefix, int prefixlen,
101 					   const struct in6_addr *gwaddr, int ifindex);
102 #endif
103 
104 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
105 {
106 	struct rt6_info *rt = (struct rt6_info *) dst;
107 	struct inet_peer *peer;
108 	u32 *p = NULL;
109 
110 	if (!(rt->dst.flags & DST_HOST))
111 		return NULL;
112 
113 	peer = rt6_get_peer_create(rt);
114 	if (peer) {
115 		u32 *old_p = __DST_METRICS_PTR(old);
116 		unsigned long prev, new;
117 
118 		p = peer->metrics;
119 		if (inet_metrics_new(peer))
120 			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
121 
122 		new = (unsigned long) p;
123 		prev = cmpxchg(&dst->_metrics, old, new);
124 
125 		if (prev != old) {
126 			p = __DST_METRICS_PTR(prev);
127 			if (prev & DST_METRICS_READ_ONLY)
128 				p = NULL;
129 		}
130 	}
131 	return p;
132 }
133 
134 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
135 					     struct sk_buff *skb,
136 					     const void *daddr)
137 {
138 	struct in6_addr *p = &rt->rt6i_gateway;
139 
140 	if (!ipv6_addr_any(p))
141 		return (const void *) p;
142 	else if (skb)
143 		return &ipv6_hdr(skb)->daddr;
144 	return daddr;
145 }
146 
147 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
148 					  struct sk_buff *skb,
149 					  const void *daddr)
150 {
151 	struct rt6_info *rt = (struct rt6_info *) dst;
152 	struct neighbour *n;
153 
154 	daddr = choose_neigh_daddr(rt, skb, daddr);
155 	n = __ipv6_neigh_lookup(dst->dev, daddr);
156 	if (n)
157 		return n;
158 	return neigh_create(&nd_tbl, daddr, dst->dev);
159 }
160 
161 static struct dst_ops ip6_dst_ops_template = {
162 	.family			=	AF_INET6,
163 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
164 	.gc			=	ip6_dst_gc,
165 	.gc_thresh		=	1024,
166 	.check			=	ip6_dst_check,
167 	.default_advmss		=	ip6_default_advmss,
168 	.mtu			=	ip6_mtu,
169 	.cow_metrics		=	ipv6_cow_metrics,
170 	.destroy		=	ip6_dst_destroy,
171 	.ifdown			=	ip6_dst_ifdown,
172 	.negative_advice	=	ip6_negative_advice,
173 	.link_failure		=	ip6_link_failure,
174 	.update_pmtu		=	ip6_rt_update_pmtu,
175 	.redirect		=	rt6_do_redirect,
176 	.local_out		=	__ip6_local_out,
177 	.neigh_lookup		=	ip6_neigh_lookup,
178 };
179 
180 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
181 {
182 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
183 
184 	return mtu ? : dst->dev->mtu;
185 }
186 
187 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
188 					 struct sk_buff *skb, u32 mtu)
189 {
190 }
191 
192 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
193 				      struct sk_buff *skb)
194 {
195 }
196 
197 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
198 					 unsigned long old)
199 {
200 	return NULL;
201 }
202 
203 static struct dst_ops ip6_dst_blackhole_ops = {
204 	.family			=	AF_INET6,
205 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
206 	.destroy		=	ip6_dst_destroy,
207 	.check			=	ip6_dst_check,
208 	.mtu			=	ip6_blackhole_mtu,
209 	.default_advmss		=	ip6_default_advmss,
210 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
211 	.redirect		=	ip6_rt_blackhole_redirect,
212 	.cow_metrics		=	ip6_rt_blackhole_cow_metrics,
213 	.neigh_lookup		=	ip6_neigh_lookup,
214 };
215 
216 static const u32 ip6_template_metrics[RTAX_MAX] = {
217 	[RTAX_HOPLIMIT - 1] = 0,
218 };
219 
220 static const struct rt6_info ip6_null_entry_template = {
221 	.dst = {
222 		.__refcnt	= ATOMIC_INIT(1),
223 		.__use		= 1,
224 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
225 		.error		= -ENETUNREACH,
226 		.input		= ip6_pkt_discard,
227 		.output		= ip6_pkt_discard_out,
228 	},
229 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
230 	.rt6i_protocol  = RTPROT_KERNEL,
231 	.rt6i_metric	= ~(u32) 0,
232 	.rt6i_ref	= ATOMIC_INIT(1),
233 };
234 
235 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
236 
237 static int ip6_pkt_prohibit(struct sk_buff *skb);
238 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
239 
240 static const struct rt6_info ip6_prohibit_entry_template = {
241 	.dst = {
242 		.__refcnt	= ATOMIC_INIT(1),
243 		.__use		= 1,
244 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
245 		.error		= -EACCES,
246 		.input		= ip6_pkt_prohibit,
247 		.output		= ip6_pkt_prohibit_out,
248 	},
249 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
250 	.rt6i_protocol  = RTPROT_KERNEL,
251 	.rt6i_metric	= ~(u32) 0,
252 	.rt6i_ref	= ATOMIC_INIT(1),
253 };
254 
255 static const struct rt6_info ip6_blk_hole_entry_template = {
256 	.dst = {
257 		.__refcnt	= ATOMIC_INIT(1),
258 		.__use		= 1,
259 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
260 		.error		= -EINVAL,
261 		.input		= dst_discard,
262 		.output		= dst_discard,
263 	},
264 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
265 	.rt6i_protocol  = RTPROT_KERNEL,
266 	.rt6i_metric	= ~(u32) 0,
267 	.rt6i_ref	= ATOMIC_INIT(1),
268 };
269 
270 #endif
271 
272 /* allocate dst with ip6_dst_ops */
273 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
274 					     struct net_device *dev,
275 					     int flags,
276 					     struct fib6_table *table)
277 {
278 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
279 					0, DST_OBSOLETE_FORCE_CHK, flags);
280 
281 	if (rt) {
282 		struct dst_entry *dst = &rt->dst;
283 
284 		memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
285 		rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
286 		rt->rt6i_genid = rt_genid_ipv6(net);
287 		INIT_LIST_HEAD(&rt->rt6i_siblings);
288 	}
289 	return rt;
290 }
291 
292 static void ip6_dst_destroy(struct dst_entry *dst)
293 {
294 	struct rt6_info *rt = (struct rt6_info *)dst;
295 	struct inet6_dev *idev = rt->rt6i_idev;
296 	struct dst_entry *from = dst->from;
297 
298 	if (!(rt->dst.flags & DST_HOST))
299 		dst_destroy_metrics_generic(dst);
300 
301 	if (idev) {
302 		rt->rt6i_idev = NULL;
303 		in6_dev_put(idev);
304 	}
305 
306 	dst->from = NULL;
307 	dst_release(from);
308 
309 	if (rt6_has_peer(rt)) {
310 		struct inet_peer *peer = rt6_peer_ptr(rt);
311 		inet_putpeer(peer);
312 	}
313 }
314 
315 void rt6_bind_peer(struct rt6_info *rt, int create)
316 {
317 	struct inet_peer_base *base;
318 	struct inet_peer *peer;
319 
320 	base = inetpeer_base_ptr(rt->_rt6i_peer);
321 	if (!base)
322 		return;
323 
324 	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
325 	if (peer) {
326 		if (!rt6_set_peer(rt, peer))
327 			inet_putpeer(peer);
328 	}
329 }
330 
331 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
332 			   int how)
333 {
334 	struct rt6_info *rt = (struct rt6_info *)dst;
335 	struct inet6_dev *idev = rt->rt6i_idev;
336 	struct net_device *loopback_dev =
337 		dev_net(dev)->loopback_dev;
338 
339 	if (dev != loopback_dev) {
340 		if (idev && idev->dev == dev) {
341 			struct inet6_dev *loopback_idev =
342 				in6_dev_get(loopback_dev);
343 			if (loopback_idev) {
344 				rt->rt6i_idev = loopback_idev;
345 				in6_dev_put(idev);
346 			}
347 		}
348 	}
349 }
350 
351 static bool rt6_check_expired(const struct rt6_info *rt)
352 {
353 	if (rt->rt6i_flags & RTF_EXPIRES) {
354 		if (time_after(jiffies, rt->dst.expires))
355 			return true;
356 	} else if (rt->dst.from) {
357 		return rt6_check_expired((struct rt6_info *) rt->dst.from);
358 	}
359 	return false;
360 }
361 
362 static bool rt6_need_strict(const struct in6_addr *daddr)
363 {
364 	return ipv6_addr_type(daddr) &
365 		(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
366 }
367 
368 /* Multipath route selection:
369  *   Hash based function using packet header and flowlabel.
370  * Adapted from fib_info_hashfn()
371  */
372 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
373 			       const struct flowi6 *fl6)
374 {
375 	unsigned int val = fl6->flowi6_proto;
376 
377 	val ^= ipv6_addr_hash(&fl6->daddr);
378 	val ^= ipv6_addr_hash(&fl6->saddr);
379 
380 	/* Work only if this not encapsulated */
381 	switch (fl6->flowi6_proto) {
382 	case IPPROTO_UDP:
383 	case IPPROTO_TCP:
384 	case IPPROTO_SCTP:
385 		val ^= (__force u16)fl6->fl6_sport;
386 		val ^= (__force u16)fl6->fl6_dport;
387 		break;
388 
389 	case IPPROTO_ICMPV6:
390 		val ^= (__force u16)fl6->fl6_icmp_type;
391 		val ^= (__force u16)fl6->fl6_icmp_code;
392 		break;
393 	}
394 	/* RFC6438 recommands to use flowlabel */
395 	val ^= (__force u32)fl6->flowlabel;
396 
397 	/* Perhaps, we need to tune, this function? */
398 	val = val ^ (val >> 7) ^ (val >> 12);
399 	return val % candidate_count;
400 }
401 
402 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
403 					     struct flowi6 *fl6, int oif,
404 					     int strict)
405 {
406 	struct rt6_info *sibling, *next_sibling;
407 	int route_choosen;
408 
409 	route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
410 	/* Don't change the route, if route_choosen == 0
411 	 * (siblings does not include ourself)
412 	 */
413 	if (route_choosen)
414 		list_for_each_entry_safe(sibling, next_sibling,
415 				&match->rt6i_siblings, rt6i_siblings) {
416 			route_choosen--;
417 			if (route_choosen == 0) {
418 				if (rt6_score_route(sibling, oif, strict) < 0)
419 					break;
420 				match = sibling;
421 				break;
422 			}
423 		}
424 	return match;
425 }
426 
427 /*
428  *	Route lookup. Any table->tb6_lock is implied.
429  */
430 
431 static inline struct rt6_info *rt6_device_match(struct net *net,
432 						    struct rt6_info *rt,
433 						    const struct in6_addr *saddr,
434 						    int oif,
435 						    int flags)
436 {
437 	struct rt6_info *local = NULL;
438 	struct rt6_info *sprt;
439 
440 	if (!oif && ipv6_addr_any(saddr))
441 		goto out;
442 
443 	for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
444 		struct net_device *dev = sprt->dst.dev;
445 
446 		if (oif) {
447 			if (dev->ifindex == oif)
448 				return sprt;
449 			if (dev->flags & IFF_LOOPBACK) {
450 				if (!sprt->rt6i_idev ||
451 				    sprt->rt6i_idev->dev->ifindex != oif) {
452 					if (flags & RT6_LOOKUP_F_IFACE && oif)
453 						continue;
454 					if (local && (!oif ||
455 						      local->rt6i_idev->dev->ifindex == oif))
456 						continue;
457 				}
458 				local = sprt;
459 			}
460 		} else {
461 			if (ipv6_chk_addr(net, saddr, dev,
462 					  flags & RT6_LOOKUP_F_IFACE))
463 				return sprt;
464 		}
465 	}
466 
467 	if (oif) {
468 		if (local)
469 			return local;
470 
471 		if (flags & RT6_LOOKUP_F_IFACE)
472 			return net->ipv6.ip6_null_entry;
473 	}
474 out:
475 	return rt;
476 }
477 
478 #ifdef CONFIG_IPV6_ROUTER_PREF
479 static void rt6_probe(struct rt6_info *rt)
480 {
481 	struct neighbour *neigh;
482 	/*
483 	 * Okay, this does not seem to be appropriate
484 	 * for now, however, we need to check if it
485 	 * is really so; aka Router Reachability Probing.
486 	 *
487 	 * Router Reachability Probe MUST be rate-limited
488 	 * to no more than one per minute.
489 	 */
490 	if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
491 		return;
492 	rcu_read_lock_bh();
493 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
494 	if (neigh) {
495 		write_lock(&neigh->lock);
496 		if (neigh->nud_state & NUD_VALID)
497 			goto out;
498 	}
499 
500 	if (!neigh ||
501 	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
502 		struct in6_addr mcaddr;
503 		struct in6_addr *target;
504 
505 		if (neigh) {
506 			neigh->updated = jiffies;
507 			write_unlock(&neigh->lock);
508 		}
509 
510 		target = (struct in6_addr *)&rt->rt6i_gateway;
511 		addrconf_addr_solict_mult(target, &mcaddr);
512 		ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
513 	} else {
514 out:
515 		write_unlock(&neigh->lock);
516 	}
517 	rcu_read_unlock_bh();
518 }
519 #else
520 static inline void rt6_probe(struct rt6_info *rt)
521 {
522 }
523 #endif
524 
525 /*
526  * Default Router Selection (RFC 2461 6.3.6)
527  */
528 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
529 {
530 	struct net_device *dev = rt->dst.dev;
531 	if (!oif || dev->ifindex == oif)
532 		return 2;
533 	if ((dev->flags & IFF_LOOPBACK) &&
534 	    rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
535 		return 1;
536 	return 0;
537 }
538 
539 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
540 {
541 	struct neighbour *neigh;
542 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
543 
544 	if (rt->rt6i_flags & RTF_NONEXTHOP ||
545 	    !(rt->rt6i_flags & RTF_GATEWAY))
546 		return RT6_NUD_SUCCEED;
547 
548 	rcu_read_lock_bh();
549 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
550 	if (neigh) {
551 		read_lock(&neigh->lock);
552 		if (neigh->nud_state & NUD_VALID)
553 			ret = RT6_NUD_SUCCEED;
554 #ifdef CONFIG_IPV6_ROUTER_PREF
555 		else if (!(neigh->nud_state & NUD_FAILED))
556 			ret = RT6_NUD_SUCCEED;
557 #endif
558 		read_unlock(&neigh->lock);
559 	} else {
560 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
561 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
562 	}
563 	rcu_read_unlock_bh();
564 
565 	return ret;
566 }
567 
568 static int rt6_score_route(struct rt6_info *rt, int oif,
569 			   int strict)
570 {
571 	int m;
572 
573 	m = rt6_check_dev(rt, oif);
574 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
575 		return RT6_NUD_FAIL_HARD;
576 #ifdef CONFIG_IPV6_ROUTER_PREF
577 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
578 #endif
579 	if (strict & RT6_LOOKUP_F_REACHABLE) {
580 		int n = rt6_check_neigh(rt);
581 		if (n < 0)
582 			return n;
583 	}
584 	return m;
585 }
586 
587 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
588 				   int *mpri, struct rt6_info *match,
589 				   bool *do_rr)
590 {
591 	int m;
592 	bool match_do_rr = false;
593 
594 	if (rt6_check_expired(rt))
595 		goto out;
596 
597 	m = rt6_score_route(rt, oif, strict);
598 	if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
599 		match_do_rr = true;
600 		m = 0; /* lowest valid score */
601 	} else if (m < 0) {
602 		goto out;
603 	}
604 
605 	if (strict & RT6_LOOKUP_F_REACHABLE)
606 		rt6_probe(rt);
607 
608 	if (m > *mpri) {
609 		*do_rr = match_do_rr;
610 		*mpri = m;
611 		match = rt;
612 	}
613 out:
614 	return match;
615 }
616 
617 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
618 				     struct rt6_info *rr_head,
619 				     u32 metric, int oif, int strict,
620 				     bool *do_rr)
621 {
622 	struct rt6_info *rt, *match;
623 	int mpri = -1;
624 
625 	match = NULL;
626 	for (rt = rr_head; rt && rt->rt6i_metric == metric;
627 	     rt = rt->dst.rt6_next)
628 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
629 	for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
630 	     rt = rt->dst.rt6_next)
631 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
632 
633 	return match;
634 }
635 
636 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
637 {
638 	struct rt6_info *match, *rt0;
639 	struct net *net;
640 	bool do_rr = false;
641 
642 	rt0 = fn->rr_ptr;
643 	if (!rt0)
644 		fn->rr_ptr = rt0 = fn->leaf;
645 
646 	match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
647 			     &do_rr);
648 
649 	if (do_rr) {
650 		struct rt6_info *next = rt0->dst.rt6_next;
651 
652 		/* no entries matched; do round-robin */
653 		if (!next || next->rt6i_metric != rt0->rt6i_metric)
654 			next = fn->leaf;
655 
656 		if (next != rt0)
657 			fn->rr_ptr = next;
658 	}
659 
660 	net = dev_net(rt0->dst.dev);
661 	return match ? match : net->ipv6.ip6_null_entry;
662 }
663 
664 #ifdef CONFIG_IPV6_ROUTE_INFO
665 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
666 		  const struct in6_addr *gwaddr)
667 {
668 	struct net *net = dev_net(dev);
669 	struct route_info *rinfo = (struct route_info *) opt;
670 	struct in6_addr prefix_buf, *prefix;
671 	unsigned int pref;
672 	unsigned long lifetime;
673 	struct rt6_info *rt;
674 
675 	if (len < sizeof(struct route_info)) {
676 		return -EINVAL;
677 	}
678 
679 	/* Sanity check for prefix_len and length */
680 	if (rinfo->length > 3) {
681 		return -EINVAL;
682 	} else if (rinfo->prefix_len > 128) {
683 		return -EINVAL;
684 	} else if (rinfo->prefix_len > 64) {
685 		if (rinfo->length < 2) {
686 			return -EINVAL;
687 		}
688 	} else if (rinfo->prefix_len > 0) {
689 		if (rinfo->length < 1) {
690 			return -EINVAL;
691 		}
692 	}
693 
694 	pref = rinfo->route_pref;
695 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
696 		return -EINVAL;
697 
698 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
699 
700 	if (rinfo->length == 3)
701 		prefix = (struct in6_addr *)rinfo->prefix;
702 	else {
703 		/* this function is safe */
704 		ipv6_addr_prefix(&prefix_buf,
705 				 (struct in6_addr *)rinfo->prefix,
706 				 rinfo->prefix_len);
707 		prefix = &prefix_buf;
708 	}
709 
710 	rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
711 				dev->ifindex);
712 
713 	if (rt && !lifetime) {
714 		ip6_del_rt(rt);
715 		rt = NULL;
716 	}
717 
718 	if (!rt && lifetime)
719 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
720 					pref);
721 	else if (rt)
722 		rt->rt6i_flags = RTF_ROUTEINFO |
723 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
724 
725 	if (rt) {
726 		if (!addrconf_finite_timeout(lifetime))
727 			rt6_clean_expires(rt);
728 		else
729 			rt6_set_expires(rt, jiffies + HZ * lifetime);
730 
731 		ip6_rt_put(rt);
732 	}
733 	return 0;
734 }
735 #endif
736 
737 #define BACKTRACK(__net, saddr)			\
738 do { \
739 	if (rt == __net->ipv6.ip6_null_entry) {	\
740 		struct fib6_node *pn; \
741 		while (1) { \
742 			if (fn->fn_flags & RTN_TL_ROOT) \
743 				goto out; \
744 			pn = fn->parent; \
745 			if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
746 				fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
747 			else \
748 				fn = pn; \
749 			if (fn->fn_flags & RTN_RTINFO) \
750 				goto restart; \
751 		} \
752 	} \
753 } while (0)
754 
755 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
756 					     struct fib6_table *table,
757 					     struct flowi6 *fl6, int flags)
758 {
759 	struct fib6_node *fn;
760 	struct rt6_info *rt;
761 
762 	read_lock_bh(&table->tb6_lock);
763 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
764 restart:
765 	rt = fn->leaf;
766 	rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
767 	if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
768 		rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
769 	BACKTRACK(net, &fl6->saddr);
770 out:
771 	dst_use(&rt->dst, jiffies);
772 	read_unlock_bh(&table->tb6_lock);
773 	return rt;
774 
775 }
776 
777 struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
778 				    int flags)
779 {
780 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
781 }
782 EXPORT_SYMBOL_GPL(ip6_route_lookup);
783 
784 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
785 			    const struct in6_addr *saddr, int oif, int strict)
786 {
787 	struct flowi6 fl6 = {
788 		.flowi6_oif = oif,
789 		.daddr = *daddr,
790 	};
791 	struct dst_entry *dst;
792 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
793 
794 	if (saddr) {
795 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
796 		flags |= RT6_LOOKUP_F_HAS_SADDR;
797 	}
798 
799 	dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
800 	if (dst->error == 0)
801 		return (struct rt6_info *) dst;
802 
803 	dst_release(dst);
804 
805 	return NULL;
806 }
807 
808 EXPORT_SYMBOL(rt6_lookup);
809 
810 /* ip6_ins_rt is called with FREE table->tb6_lock.
811    It takes new route entry, the addition fails by any reason the
812    route is freed. In any case, if caller does not hold it, it may
813    be destroyed.
814  */
815 
816 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
817 {
818 	int err;
819 	struct fib6_table *table;
820 
821 	table = rt->rt6i_table;
822 	write_lock_bh(&table->tb6_lock);
823 	err = fib6_add(&table->tb6_root, rt, info);
824 	write_unlock_bh(&table->tb6_lock);
825 
826 	return err;
827 }
828 
829 int ip6_ins_rt(struct rt6_info *rt)
830 {
831 	struct nl_info info = {
832 		.nl_net = dev_net(rt->dst.dev),
833 	};
834 	return __ip6_ins_rt(rt, &info);
835 }
836 
837 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
838 				      const struct in6_addr *daddr,
839 				      const struct in6_addr *saddr)
840 {
841 	struct rt6_info *rt;
842 
843 	/*
844 	 *	Clone the route.
845 	 */
846 
847 	rt = ip6_rt_copy(ort, daddr);
848 
849 	if (rt) {
850 		if (!(rt->rt6i_flags & RTF_GATEWAY)) {
851 			if (ort->rt6i_dst.plen != 128 &&
852 			    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
853 				rt->rt6i_flags |= RTF_ANYCAST;
854 		}
855 
856 		rt->rt6i_flags |= RTF_CACHE;
857 
858 #ifdef CONFIG_IPV6_SUBTREES
859 		if (rt->rt6i_src.plen && saddr) {
860 			rt->rt6i_src.addr = *saddr;
861 			rt->rt6i_src.plen = 128;
862 		}
863 #endif
864 	}
865 
866 	return rt;
867 }
868 
869 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
870 					const struct in6_addr *daddr)
871 {
872 	struct rt6_info *rt = ip6_rt_copy(ort, daddr);
873 
874 	if (rt)
875 		rt->rt6i_flags |= RTF_CACHE;
876 	return rt;
877 }
878 
879 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
880 				      struct flowi6 *fl6, int flags)
881 {
882 	struct fib6_node *fn;
883 	struct rt6_info *rt, *nrt;
884 	int strict = 0;
885 	int attempts = 3;
886 	int err;
887 	int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
888 
889 	strict |= flags & RT6_LOOKUP_F_IFACE;
890 
891 relookup:
892 	read_lock_bh(&table->tb6_lock);
893 
894 restart_2:
895 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
896 
897 restart:
898 	rt = rt6_select(fn, oif, strict | reachable);
899 	if (rt->rt6i_nsiblings)
900 		rt = rt6_multipath_select(rt, fl6, oif, strict | reachable);
901 	BACKTRACK(net, &fl6->saddr);
902 	if (rt == net->ipv6.ip6_null_entry ||
903 	    rt->rt6i_flags & RTF_CACHE)
904 		goto out;
905 
906 	dst_hold(&rt->dst);
907 	read_unlock_bh(&table->tb6_lock);
908 
909 	if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
910 		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
911 	else if (!(rt->dst.flags & DST_HOST))
912 		nrt = rt6_alloc_clone(rt, &fl6->daddr);
913 	else
914 		goto out2;
915 
916 	ip6_rt_put(rt);
917 	rt = nrt ? : net->ipv6.ip6_null_entry;
918 
919 	dst_hold(&rt->dst);
920 	if (nrt) {
921 		err = ip6_ins_rt(nrt);
922 		if (!err)
923 			goto out2;
924 	}
925 
926 	if (--attempts <= 0)
927 		goto out2;
928 
929 	/*
930 	 * Race condition! In the gap, when table->tb6_lock was
931 	 * released someone could insert this route.  Relookup.
932 	 */
933 	ip6_rt_put(rt);
934 	goto relookup;
935 
936 out:
937 	if (reachable) {
938 		reachable = 0;
939 		goto restart_2;
940 	}
941 	dst_hold(&rt->dst);
942 	read_unlock_bh(&table->tb6_lock);
943 out2:
944 	rt->dst.lastuse = jiffies;
945 	rt->dst.__use++;
946 
947 	return rt;
948 }
949 
950 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
951 					    struct flowi6 *fl6, int flags)
952 {
953 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
954 }
955 
956 static struct dst_entry *ip6_route_input_lookup(struct net *net,
957 						struct net_device *dev,
958 						struct flowi6 *fl6, int flags)
959 {
960 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
961 		flags |= RT6_LOOKUP_F_IFACE;
962 
963 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
964 }
965 
966 void ip6_route_input(struct sk_buff *skb)
967 {
968 	const struct ipv6hdr *iph = ipv6_hdr(skb);
969 	struct net *net = dev_net(skb->dev);
970 	int flags = RT6_LOOKUP_F_HAS_SADDR;
971 	struct flowi6 fl6 = {
972 		.flowi6_iif = skb->dev->ifindex,
973 		.daddr = iph->daddr,
974 		.saddr = iph->saddr,
975 		.flowlabel = ip6_flowinfo(iph),
976 		.flowi6_mark = skb->mark,
977 		.flowi6_proto = iph->nexthdr,
978 	};
979 
980 	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
981 }
982 
983 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
984 					     struct flowi6 *fl6, int flags)
985 {
986 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
987 }
988 
989 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
990 				    struct flowi6 *fl6)
991 {
992 	int flags = 0;
993 
994 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
995 
996 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
997 		flags |= RT6_LOOKUP_F_IFACE;
998 
999 	if (!ipv6_addr_any(&fl6->saddr))
1000 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1001 	else if (sk)
1002 		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1003 
1004 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1005 }
1006 
1007 EXPORT_SYMBOL(ip6_route_output);
1008 
1009 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1010 {
1011 	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1012 	struct dst_entry *new = NULL;
1013 
1014 	rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1015 	if (rt) {
1016 		new = &rt->dst;
1017 
1018 		memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1019 		rt6_init_peer(rt, net->ipv6.peers);
1020 
1021 		new->__use = 1;
1022 		new->input = dst_discard;
1023 		new->output = dst_discard;
1024 
1025 		if (dst_metrics_read_only(&ort->dst))
1026 			new->_metrics = ort->dst._metrics;
1027 		else
1028 			dst_copy_metrics(new, &ort->dst);
1029 		rt->rt6i_idev = ort->rt6i_idev;
1030 		if (rt->rt6i_idev)
1031 			in6_dev_hold(rt->rt6i_idev);
1032 
1033 		rt->rt6i_gateway = ort->rt6i_gateway;
1034 		rt->rt6i_flags = ort->rt6i_flags;
1035 		rt->rt6i_metric = 0;
1036 
1037 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1038 #ifdef CONFIG_IPV6_SUBTREES
1039 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1040 #endif
1041 
1042 		dst_free(new);
1043 	}
1044 
1045 	dst_release(dst_orig);
1046 	return new ? new : ERR_PTR(-ENOMEM);
1047 }
1048 
1049 /*
1050  *	Destination cache support functions
1051  */
1052 
1053 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1054 {
1055 	struct rt6_info *rt;
1056 
1057 	rt = (struct rt6_info *) dst;
1058 
1059 	/* All IPV6 dsts are created with ->obsolete set to the value
1060 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1061 	 * into this function always.
1062 	 */
1063 	if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
1064 		return NULL;
1065 
1066 	if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
1067 		return dst;
1068 
1069 	return NULL;
1070 }
1071 
1072 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1073 {
1074 	struct rt6_info *rt = (struct rt6_info *) dst;
1075 
1076 	if (rt) {
1077 		if (rt->rt6i_flags & RTF_CACHE) {
1078 			if (rt6_check_expired(rt)) {
1079 				ip6_del_rt(rt);
1080 				dst = NULL;
1081 			}
1082 		} else {
1083 			dst_release(dst);
1084 			dst = NULL;
1085 		}
1086 	}
1087 	return dst;
1088 }
1089 
1090 static void ip6_link_failure(struct sk_buff *skb)
1091 {
1092 	struct rt6_info *rt;
1093 
1094 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1095 
1096 	rt = (struct rt6_info *) skb_dst(skb);
1097 	if (rt) {
1098 		if (rt->rt6i_flags & RTF_CACHE) {
1099 			dst_hold(&rt->dst);
1100 			if (ip6_del_rt(rt))
1101 				dst_free(&rt->dst);
1102 		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1103 			rt->rt6i_node->fn_sernum = -1;
1104 		}
1105 	}
1106 }
1107 
1108 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1109 			       struct sk_buff *skb, u32 mtu)
1110 {
1111 	struct rt6_info *rt6 = (struct rt6_info*)dst;
1112 
1113 	dst_confirm(dst);
1114 	if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1115 		struct net *net = dev_net(dst->dev);
1116 
1117 		rt6->rt6i_flags |= RTF_MODIFIED;
1118 		if (mtu < IPV6_MIN_MTU) {
1119 			u32 features = dst_metric(dst, RTAX_FEATURES);
1120 			mtu = IPV6_MIN_MTU;
1121 			features |= RTAX_FEATURE_ALLFRAG;
1122 			dst_metric_set(dst, RTAX_FEATURES, features);
1123 		}
1124 		dst_metric_set(dst, RTAX_MTU, mtu);
1125 		rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1126 	}
1127 }
1128 
1129 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1130 		     int oif, u32 mark)
1131 {
1132 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1133 	struct dst_entry *dst;
1134 	struct flowi6 fl6;
1135 
1136 	memset(&fl6, 0, sizeof(fl6));
1137 	fl6.flowi6_oif = oif;
1138 	fl6.flowi6_mark = mark;
1139 	fl6.flowi6_flags = 0;
1140 	fl6.daddr = iph->daddr;
1141 	fl6.saddr = iph->saddr;
1142 	fl6.flowlabel = ip6_flowinfo(iph);
1143 
1144 	dst = ip6_route_output(net, NULL, &fl6);
1145 	if (!dst->error)
1146 		ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1147 	dst_release(dst);
1148 }
1149 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1150 
1151 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1152 {
1153 	ip6_update_pmtu(skb, sock_net(sk), mtu,
1154 			sk->sk_bound_dev_if, sk->sk_mark);
1155 }
1156 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1157 
1158 /* Handle redirects */
1159 struct ip6rd_flowi {
1160 	struct flowi6 fl6;
1161 	struct in6_addr gateway;
1162 };
1163 
1164 static struct rt6_info *__ip6_route_redirect(struct net *net,
1165 					     struct fib6_table *table,
1166 					     struct flowi6 *fl6,
1167 					     int flags)
1168 {
1169 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1170 	struct rt6_info *rt;
1171 	struct fib6_node *fn;
1172 
1173 	/* Get the "current" route for this destination and
1174 	 * check if the redirect has come from approriate router.
1175 	 *
1176 	 * RFC 4861 specifies that redirects should only be
1177 	 * accepted if they come from the nexthop to the target.
1178 	 * Due to the way the routes are chosen, this notion
1179 	 * is a bit fuzzy and one might need to check all possible
1180 	 * routes.
1181 	 */
1182 
1183 	read_lock_bh(&table->tb6_lock);
1184 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1185 restart:
1186 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1187 		if (rt6_check_expired(rt))
1188 			continue;
1189 		if (rt->dst.error)
1190 			break;
1191 		if (!(rt->rt6i_flags & RTF_GATEWAY))
1192 			continue;
1193 		if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1194 			continue;
1195 		if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1196 			continue;
1197 		break;
1198 	}
1199 
1200 	if (!rt)
1201 		rt = net->ipv6.ip6_null_entry;
1202 	else if (rt->dst.error) {
1203 		rt = net->ipv6.ip6_null_entry;
1204 		goto out;
1205 	}
1206 	BACKTRACK(net, &fl6->saddr);
1207 out:
1208 	dst_hold(&rt->dst);
1209 
1210 	read_unlock_bh(&table->tb6_lock);
1211 
1212 	return rt;
1213 };
1214 
1215 static struct dst_entry *ip6_route_redirect(struct net *net,
1216 					const struct flowi6 *fl6,
1217 					const struct in6_addr *gateway)
1218 {
1219 	int flags = RT6_LOOKUP_F_HAS_SADDR;
1220 	struct ip6rd_flowi rdfl;
1221 
1222 	rdfl.fl6 = *fl6;
1223 	rdfl.gateway = *gateway;
1224 
1225 	return fib6_rule_lookup(net, &rdfl.fl6,
1226 				flags, __ip6_route_redirect);
1227 }
1228 
1229 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1230 {
1231 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1232 	struct dst_entry *dst;
1233 	struct flowi6 fl6;
1234 
1235 	memset(&fl6, 0, sizeof(fl6));
1236 	fl6.flowi6_oif = oif;
1237 	fl6.flowi6_mark = mark;
1238 	fl6.flowi6_flags = 0;
1239 	fl6.daddr = iph->daddr;
1240 	fl6.saddr = iph->saddr;
1241 	fl6.flowlabel = ip6_flowinfo(iph);
1242 
1243 	dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1244 	rt6_do_redirect(dst, NULL, skb);
1245 	dst_release(dst);
1246 }
1247 EXPORT_SYMBOL_GPL(ip6_redirect);
1248 
1249 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1250 			    u32 mark)
1251 {
1252 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1253 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1254 	struct dst_entry *dst;
1255 	struct flowi6 fl6;
1256 
1257 	memset(&fl6, 0, sizeof(fl6));
1258 	fl6.flowi6_oif = oif;
1259 	fl6.flowi6_mark = mark;
1260 	fl6.flowi6_flags = 0;
1261 	fl6.daddr = msg->dest;
1262 	fl6.saddr = iph->daddr;
1263 
1264 	dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1265 	rt6_do_redirect(dst, NULL, skb);
1266 	dst_release(dst);
1267 }
1268 
1269 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1270 {
1271 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1272 }
1273 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1274 
1275 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1276 {
1277 	struct net_device *dev = dst->dev;
1278 	unsigned int mtu = dst_mtu(dst);
1279 	struct net *net = dev_net(dev);
1280 
1281 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1282 
1283 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1284 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1285 
1286 	/*
1287 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1288 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1289 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
1290 	 * rely only on pmtu discovery"
1291 	 */
1292 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1293 		mtu = IPV6_MAXPLEN;
1294 	return mtu;
1295 }
1296 
1297 static unsigned int ip6_mtu(const struct dst_entry *dst)
1298 {
1299 	struct inet6_dev *idev;
1300 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1301 
1302 	if (mtu)
1303 		return mtu;
1304 
1305 	mtu = IPV6_MIN_MTU;
1306 
1307 	rcu_read_lock();
1308 	idev = __in6_dev_get(dst->dev);
1309 	if (idev)
1310 		mtu = idev->cnf.mtu6;
1311 	rcu_read_unlock();
1312 
1313 	return mtu;
1314 }
1315 
1316 static struct dst_entry *icmp6_dst_gc_list;
1317 static DEFINE_SPINLOCK(icmp6_dst_lock);
1318 
1319 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1320 				  struct flowi6 *fl6)
1321 {
1322 	struct dst_entry *dst;
1323 	struct rt6_info *rt;
1324 	struct inet6_dev *idev = in6_dev_get(dev);
1325 	struct net *net = dev_net(dev);
1326 
1327 	if (unlikely(!idev))
1328 		return ERR_PTR(-ENODEV);
1329 
1330 	rt = ip6_dst_alloc(net, dev, 0, NULL);
1331 	if (unlikely(!rt)) {
1332 		in6_dev_put(idev);
1333 		dst = ERR_PTR(-ENOMEM);
1334 		goto out;
1335 	}
1336 
1337 	rt->dst.flags |= DST_HOST;
1338 	rt->dst.output  = ip6_output;
1339 	atomic_set(&rt->dst.__refcnt, 1);
1340 	rt->rt6i_gateway  = fl6->daddr;
1341 	rt->rt6i_dst.addr = fl6->daddr;
1342 	rt->rt6i_dst.plen = 128;
1343 	rt->rt6i_idev     = idev;
1344 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1345 
1346 	spin_lock_bh(&icmp6_dst_lock);
1347 	rt->dst.next = icmp6_dst_gc_list;
1348 	icmp6_dst_gc_list = &rt->dst;
1349 	spin_unlock_bh(&icmp6_dst_lock);
1350 
1351 	fib6_force_start_gc(net);
1352 
1353 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1354 
1355 out:
1356 	return dst;
1357 }
1358 
1359 int icmp6_dst_gc(void)
1360 {
1361 	struct dst_entry *dst, **pprev;
1362 	int more = 0;
1363 
1364 	spin_lock_bh(&icmp6_dst_lock);
1365 	pprev = &icmp6_dst_gc_list;
1366 
1367 	while ((dst = *pprev) != NULL) {
1368 		if (!atomic_read(&dst->__refcnt)) {
1369 			*pprev = dst->next;
1370 			dst_free(dst);
1371 		} else {
1372 			pprev = &dst->next;
1373 			++more;
1374 		}
1375 	}
1376 
1377 	spin_unlock_bh(&icmp6_dst_lock);
1378 
1379 	return more;
1380 }
1381 
1382 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1383 			    void *arg)
1384 {
1385 	struct dst_entry *dst, **pprev;
1386 
1387 	spin_lock_bh(&icmp6_dst_lock);
1388 	pprev = &icmp6_dst_gc_list;
1389 	while ((dst = *pprev) != NULL) {
1390 		struct rt6_info *rt = (struct rt6_info *) dst;
1391 		if (func(rt, arg)) {
1392 			*pprev = dst->next;
1393 			dst_free(dst);
1394 		} else {
1395 			pprev = &dst->next;
1396 		}
1397 	}
1398 	spin_unlock_bh(&icmp6_dst_lock);
1399 }
1400 
1401 static int ip6_dst_gc(struct dst_ops *ops)
1402 {
1403 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1404 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1405 	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1406 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1407 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1408 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1409 	int entries;
1410 
1411 	entries = dst_entries_get_fast(ops);
1412 	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1413 	    entries <= rt_max_size)
1414 		goto out;
1415 
1416 	net->ipv6.ip6_rt_gc_expire++;
1417 	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
1418 	entries = dst_entries_get_slow(ops);
1419 	if (entries < ops->gc_thresh)
1420 		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1421 out:
1422 	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1423 	return entries > rt_max_size;
1424 }
1425 
1426 /*
1427  *
1428  */
1429 
1430 int ip6_route_add(struct fib6_config *cfg)
1431 {
1432 	int err;
1433 	struct net *net = cfg->fc_nlinfo.nl_net;
1434 	struct rt6_info *rt = NULL;
1435 	struct net_device *dev = NULL;
1436 	struct inet6_dev *idev = NULL;
1437 	struct fib6_table *table;
1438 	int addr_type;
1439 
1440 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1441 		return -EINVAL;
1442 #ifndef CONFIG_IPV6_SUBTREES
1443 	if (cfg->fc_src_len)
1444 		return -EINVAL;
1445 #endif
1446 	if (cfg->fc_ifindex) {
1447 		err = -ENODEV;
1448 		dev = dev_get_by_index(net, cfg->fc_ifindex);
1449 		if (!dev)
1450 			goto out;
1451 		idev = in6_dev_get(dev);
1452 		if (!idev)
1453 			goto out;
1454 	}
1455 
1456 	if (cfg->fc_metric == 0)
1457 		cfg->fc_metric = IP6_RT_PRIO_USER;
1458 
1459 	err = -ENOBUFS;
1460 	if (cfg->fc_nlinfo.nlh &&
1461 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1462 		table = fib6_get_table(net, cfg->fc_table);
1463 		if (!table) {
1464 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1465 			table = fib6_new_table(net, cfg->fc_table);
1466 		}
1467 	} else {
1468 		table = fib6_new_table(net, cfg->fc_table);
1469 	}
1470 
1471 	if (!table)
1472 		goto out;
1473 
1474 	rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
1475 
1476 	if (!rt) {
1477 		err = -ENOMEM;
1478 		goto out;
1479 	}
1480 
1481 	if (cfg->fc_flags & RTF_EXPIRES)
1482 		rt6_set_expires(rt, jiffies +
1483 				clock_t_to_jiffies(cfg->fc_expires));
1484 	else
1485 		rt6_clean_expires(rt);
1486 
1487 	if (cfg->fc_protocol == RTPROT_UNSPEC)
1488 		cfg->fc_protocol = RTPROT_BOOT;
1489 	rt->rt6i_protocol = cfg->fc_protocol;
1490 
1491 	addr_type = ipv6_addr_type(&cfg->fc_dst);
1492 
1493 	if (addr_type & IPV6_ADDR_MULTICAST)
1494 		rt->dst.input = ip6_mc_input;
1495 	else if (cfg->fc_flags & RTF_LOCAL)
1496 		rt->dst.input = ip6_input;
1497 	else
1498 		rt->dst.input = ip6_forward;
1499 
1500 	rt->dst.output = ip6_output;
1501 
1502 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1503 	rt->rt6i_dst.plen = cfg->fc_dst_len;
1504 	if (rt->rt6i_dst.plen == 128)
1505 	       rt->dst.flags |= DST_HOST;
1506 
1507 	if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1508 		u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1509 		if (!metrics) {
1510 			err = -ENOMEM;
1511 			goto out;
1512 		}
1513 		dst_init_metrics(&rt->dst, metrics, 0);
1514 	}
1515 #ifdef CONFIG_IPV6_SUBTREES
1516 	ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1517 	rt->rt6i_src.plen = cfg->fc_src_len;
1518 #endif
1519 
1520 	rt->rt6i_metric = cfg->fc_metric;
1521 
1522 	/* We cannot add true routes via loopback here,
1523 	   they would result in kernel looping; promote them to reject routes
1524 	 */
1525 	if ((cfg->fc_flags & RTF_REJECT) ||
1526 	    (dev && (dev->flags & IFF_LOOPBACK) &&
1527 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
1528 	     !(cfg->fc_flags & RTF_LOCAL))) {
1529 		/* hold loopback dev/idev if we haven't done so. */
1530 		if (dev != net->loopback_dev) {
1531 			if (dev) {
1532 				dev_put(dev);
1533 				in6_dev_put(idev);
1534 			}
1535 			dev = net->loopback_dev;
1536 			dev_hold(dev);
1537 			idev = in6_dev_get(dev);
1538 			if (!idev) {
1539 				err = -ENODEV;
1540 				goto out;
1541 			}
1542 		}
1543 		rt->dst.output = ip6_pkt_discard_out;
1544 		rt->dst.input = ip6_pkt_discard;
1545 		rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1546 		switch (cfg->fc_type) {
1547 		case RTN_BLACKHOLE:
1548 			rt->dst.error = -EINVAL;
1549 			break;
1550 		case RTN_PROHIBIT:
1551 			rt->dst.error = -EACCES;
1552 			break;
1553 		case RTN_THROW:
1554 			rt->dst.error = -EAGAIN;
1555 			break;
1556 		default:
1557 			rt->dst.error = -ENETUNREACH;
1558 			break;
1559 		}
1560 		goto install_route;
1561 	}
1562 
1563 	if (cfg->fc_flags & RTF_GATEWAY) {
1564 		const struct in6_addr *gw_addr;
1565 		int gwa_type;
1566 
1567 		gw_addr = &cfg->fc_gateway;
1568 		rt->rt6i_gateway = *gw_addr;
1569 		gwa_type = ipv6_addr_type(gw_addr);
1570 
1571 		if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1572 			struct rt6_info *grt;
1573 
1574 			/* IPv6 strictly inhibits using not link-local
1575 			   addresses as nexthop address.
1576 			   Otherwise, router will not able to send redirects.
1577 			   It is very good, but in some (rare!) circumstances
1578 			   (SIT, PtP, NBMA NOARP links) it is handy to allow
1579 			   some exceptions. --ANK
1580 			 */
1581 			err = -EINVAL;
1582 			if (!(gwa_type & IPV6_ADDR_UNICAST))
1583 				goto out;
1584 
1585 			grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1586 
1587 			err = -EHOSTUNREACH;
1588 			if (!grt)
1589 				goto out;
1590 			if (dev) {
1591 				if (dev != grt->dst.dev) {
1592 					ip6_rt_put(grt);
1593 					goto out;
1594 				}
1595 			} else {
1596 				dev = grt->dst.dev;
1597 				idev = grt->rt6i_idev;
1598 				dev_hold(dev);
1599 				in6_dev_hold(grt->rt6i_idev);
1600 			}
1601 			if (!(grt->rt6i_flags & RTF_GATEWAY))
1602 				err = 0;
1603 			ip6_rt_put(grt);
1604 
1605 			if (err)
1606 				goto out;
1607 		}
1608 		err = -EINVAL;
1609 		if (!dev || (dev->flags & IFF_LOOPBACK))
1610 			goto out;
1611 	}
1612 
1613 	err = -ENODEV;
1614 	if (!dev)
1615 		goto out;
1616 
1617 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1618 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1619 			err = -EINVAL;
1620 			goto out;
1621 		}
1622 		rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1623 		rt->rt6i_prefsrc.plen = 128;
1624 	} else
1625 		rt->rt6i_prefsrc.plen = 0;
1626 
1627 	rt->rt6i_flags = cfg->fc_flags;
1628 
1629 install_route:
1630 	if (cfg->fc_mx) {
1631 		struct nlattr *nla;
1632 		int remaining;
1633 
1634 		nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1635 			int type = nla_type(nla);
1636 
1637 			if (type) {
1638 				if (type > RTAX_MAX) {
1639 					err = -EINVAL;
1640 					goto out;
1641 				}
1642 
1643 				dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1644 			}
1645 		}
1646 	}
1647 
1648 	rt->dst.dev = dev;
1649 	rt->rt6i_idev = idev;
1650 	rt->rt6i_table = table;
1651 
1652 	cfg->fc_nlinfo.nl_net = dev_net(dev);
1653 
1654 	return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1655 
1656 out:
1657 	if (dev)
1658 		dev_put(dev);
1659 	if (idev)
1660 		in6_dev_put(idev);
1661 	if (rt)
1662 		dst_free(&rt->dst);
1663 	return err;
1664 }
1665 
1666 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1667 {
1668 	int err;
1669 	struct fib6_table *table;
1670 	struct net *net = dev_net(rt->dst.dev);
1671 
1672 	if (rt == net->ipv6.ip6_null_entry) {
1673 		err = -ENOENT;
1674 		goto out;
1675 	}
1676 
1677 	table = rt->rt6i_table;
1678 	write_lock_bh(&table->tb6_lock);
1679 	err = fib6_del(rt, info);
1680 	write_unlock_bh(&table->tb6_lock);
1681 
1682 out:
1683 	ip6_rt_put(rt);
1684 	return err;
1685 }
1686 
1687 int ip6_del_rt(struct rt6_info *rt)
1688 {
1689 	struct nl_info info = {
1690 		.nl_net = dev_net(rt->dst.dev),
1691 	};
1692 	return __ip6_del_rt(rt, &info);
1693 }
1694 
1695 static int ip6_route_del(struct fib6_config *cfg)
1696 {
1697 	struct fib6_table *table;
1698 	struct fib6_node *fn;
1699 	struct rt6_info *rt;
1700 	int err = -ESRCH;
1701 
1702 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1703 	if (!table)
1704 		return err;
1705 
1706 	read_lock_bh(&table->tb6_lock);
1707 
1708 	fn = fib6_locate(&table->tb6_root,
1709 			 &cfg->fc_dst, cfg->fc_dst_len,
1710 			 &cfg->fc_src, cfg->fc_src_len);
1711 
1712 	if (fn) {
1713 		for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1714 			if (cfg->fc_ifindex &&
1715 			    (!rt->dst.dev ||
1716 			     rt->dst.dev->ifindex != cfg->fc_ifindex))
1717 				continue;
1718 			if (cfg->fc_flags & RTF_GATEWAY &&
1719 			    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1720 				continue;
1721 			if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1722 				continue;
1723 			dst_hold(&rt->dst);
1724 			read_unlock_bh(&table->tb6_lock);
1725 
1726 			return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1727 		}
1728 	}
1729 	read_unlock_bh(&table->tb6_lock);
1730 
1731 	return err;
1732 }
1733 
1734 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1735 {
1736 	struct net *net = dev_net(skb->dev);
1737 	struct netevent_redirect netevent;
1738 	struct rt6_info *rt, *nrt = NULL;
1739 	struct ndisc_options ndopts;
1740 	struct inet6_dev *in6_dev;
1741 	struct neighbour *neigh;
1742 	struct rd_msg *msg;
1743 	int optlen, on_link;
1744 	u8 *lladdr;
1745 
1746 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1747 	optlen -= sizeof(*msg);
1748 
1749 	if (optlen < 0) {
1750 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1751 		return;
1752 	}
1753 
1754 	msg = (struct rd_msg *)icmp6_hdr(skb);
1755 
1756 	if (ipv6_addr_is_multicast(&msg->dest)) {
1757 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1758 		return;
1759 	}
1760 
1761 	on_link = 0;
1762 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1763 		on_link = 1;
1764 	} else if (ipv6_addr_type(&msg->target) !=
1765 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1766 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1767 		return;
1768 	}
1769 
1770 	in6_dev = __in6_dev_get(skb->dev);
1771 	if (!in6_dev)
1772 		return;
1773 	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1774 		return;
1775 
1776 	/* RFC2461 8.1:
1777 	 *	The IP source address of the Redirect MUST be the same as the current
1778 	 *	first-hop router for the specified ICMP Destination Address.
1779 	 */
1780 
1781 	if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1782 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1783 		return;
1784 	}
1785 
1786 	lladdr = NULL;
1787 	if (ndopts.nd_opts_tgt_lladdr) {
1788 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1789 					     skb->dev);
1790 		if (!lladdr) {
1791 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1792 			return;
1793 		}
1794 	}
1795 
1796 	rt = (struct rt6_info *) dst;
1797 	if (rt == net->ipv6.ip6_null_entry) {
1798 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1799 		return;
1800 	}
1801 
1802 	/* Redirect received -> path was valid.
1803 	 * Look, redirects are sent only in response to data packets,
1804 	 * so that this nexthop apparently is reachable. --ANK
1805 	 */
1806 	dst_confirm(&rt->dst);
1807 
1808 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1809 	if (!neigh)
1810 		return;
1811 
1812 	/*
1813 	 *	We have finally decided to accept it.
1814 	 */
1815 
1816 	neigh_update(neigh, lladdr, NUD_STALE,
1817 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
1818 		     NEIGH_UPDATE_F_OVERRIDE|
1819 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1820 				     NEIGH_UPDATE_F_ISROUTER))
1821 		     );
1822 
1823 	nrt = ip6_rt_copy(rt, &msg->dest);
1824 	if (!nrt)
1825 		goto out;
1826 
1827 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1828 	if (on_link)
1829 		nrt->rt6i_flags &= ~RTF_GATEWAY;
1830 
1831 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1832 
1833 	if (ip6_ins_rt(nrt))
1834 		goto out;
1835 
1836 	netevent.old = &rt->dst;
1837 	netevent.new = &nrt->dst;
1838 	netevent.daddr = &msg->dest;
1839 	netevent.neigh = neigh;
1840 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1841 
1842 	if (rt->rt6i_flags & RTF_CACHE) {
1843 		rt = (struct rt6_info *) dst_clone(&rt->dst);
1844 		ip6_del_rt(rt);
1845 	}
1846 
1847 out:
1848 	neigh_release(neigh);
1849 }
1850 
1851 /*
1852  *	Misc support functions
1853  */
1854 
1855 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1856 				    const struct in6_addr *dest)
1857 {
1858 	struct net *net = dev_net(ort->dst.dev);
1859 	struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1860 					    ort->rt6i_table);
1861 
1862 	if (rt) {
1863 		rt->dst.input = ort->dst.input;
1864 		rt->dst.output = ort->dst.output;
1865 		rt->dst.flags |= DST_HOST;
1866 
1867 		rt->rt6i_dst.addr = *dest;
1868 		rt->rt6i_dst.plen = 128;
1869 		dst_copy_metrics(&rt->dst, &ort->dst);
1870 		rt->dst.error = ort->dst.error;
1871 		rt->rt6i_idev = ort->rt6i_idev;
1872 		if (rt->rt6i_idev)
1873 			in6_dev_hold(rt->rt6i_idev);
1874 		rt->dst.lastuse = jiffies;
1875 
1876 		if (ort->rt6i_flags & RTF_GATEWAY)
1877 			rt->rt6i_gateway = ort->rt6i_gateway;
1878 		else
1879 			rt->rt6i_gateway = *dest;
1880 		rt->rt6i_flags = ort->rt6i_flags;
1881 		if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1882 		    (RTF_DEFAULT | RTF_ADDRCONF))
1883 			rt6_set_from(rt, ort);
1884 		rt->rt6i_metric = 0;
1885 
1886 #ifdef CONFIG_IPV6_SUBTREES
1887 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1888 #endif
1889 		memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1890 		rt->rt6i_table = ort->rt6i_table;
1891 	}
1892 	return rt;
1893 }
1894 
1895 #ifdef CONFIG_IPV6_ROUTE_INFO
1896 static struct rt6_info *rt6_get_route_info(struct net *net,
1897 					   const struct in6_addr *prefix, int prefixlen,
1898 					   const struct in6_addr *gwaddr, int ifindex)
1899 {
1900 	struct fib6_node *fn;
1901 	struct rt6_info *rt = NULL;
1902 	struct fib6_table *table;
1903 
1904 	table = fib6_get_table(net, RT6_TABLE_INFO);
1905 	if (!table)
1906 		return NULL;
1907 
1908 	read_lock_bh(&table->tb6_lock);
1909 	fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1910 	if (!fn)
1911 		goto out;
1912 
1913 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1914 		if (rt->dst.dev->ifindex != ifindex)
1915 			continue;
1916 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1917 			continue;
1918 		if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1919 			continue;
1920 		dst_hold(&rt->dst);
1921 		break;
1922 	}
1923 out:
1924 	read_unlock_bh(&table->tb6_lock);
1925 	return rt;
1926 }
1927 
1928 static struct rt6_info *rt6_add_route_info(struct net *net,
1929 					   const struct in6_addr *prefix, int prefixlen,
1930 					   const struct in6_addr *gwaddr, int ifindex,
1931 					   unsigned int pref)
1932 {
1933 	struct fib6_config cfg = {
1934 		.fc_table	= RT6_TABLE_INFO,
1935 		.fc_metric	= IP6_RT_PRIO_USER,
1936 		.fc_ifindex	= ifindex,
1937 		.fc_dst_len	= prefixlen,
1938 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1939 				  RTF_UP | RTF_PREF(pref),
1940 		.fc_nlinfo.portid = 0,
1941 		.fc_nlinfo.nlh = NULL,
1942 		.fc_nlinfo.nl_net = net,
1943 	};
1944 
1945 	cfg.fc_dst = *prefix;
1946 	cfg.fc_gateway = *gwaddr;
1947 
1948 	/* We should treat it as a default route if prefix length is 0. */
1949 	if (!prefixlen)
1950 		cfg.fc_flags |= RTF_DEFAULT;
1951 
1952 	ip6_route_add(&cfg);
1953 
1954 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1955 }
1956 #endif
1957 
1958 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1959 {
1960 	struct rt6_info *rt;
1961 	struct fib6_table *table;
1962 
1963 	table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1964 	if (!table)
1965 		return NULL;
1966 
1967 	read_lock_bh(&table->tb6_lock);
1968 	for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1969 		if (dev == rt->dst.dev &&
1970 		    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1971 		    ipv6_addr_equal(&rt->rt6i_gateway, addr))
1972 			break;
1973 	}
1974 	if (rt)
1975 		dst_hold(&rt->dst);
1976 	read_unlock_bh(&table->tb6_lock);
1977 	return rt;
1978 }
1979 
1980 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1981 				     struct net_device *dev,
1982 				     unsigned int pref)
1983 {
1984 	struct fib6_config cfg = {
1985 		.fc_table	= RT6_TABLE_DFLT,
1986 		.fc_metric	= IP6_RT_PRIO_USER,
1987 		.fc_ifindex	= dev->ifindex,
1988 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1989 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1990 		.fc_nlinfo.portid = 0,
1991 		.fc_nlinfo.nlh = NULL,
1992 		.fc_nlinfo.nl_net = dev_net(dev),
1993 	};
1994 
1995 	cfg.fc_gateway = *gwaddr;
1996 
1997 	ip6_route_add(&cfg);
1998 
1999 	return rt6_get_dflt_router(gwaddr, dev);
2000 }
2001 
2002 void rt6_purge_dflt_routers(struct net *net)
2003 {
2004 	struct rt6_info *rt;
2005 	struct fib6_table *table;
2006 
2007 	/* NOTE: Keep consistent with rt6_get_dflt_router */
2008 	table = fib6_get_table(net, RT6_TABLE_DFLT);
2009 	if (!table)
2010 		return;
2011 
2012 restart:
2013 	read_lock_bh(&table->tb6_lock);
2014 	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2015 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2016 		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2017 			dst_hold(&rt->dst);
2018 			read_unlock_bh(&table->tb6_lock);
2019 			ip6_del_rt(rt);
2020 			goto restart;
2021 		}
2022 	}
2023 	read_unlock_bh(&table->tb6_lock);
2024 }
2025 
2026 static void rtmsg_to_fib6_config(struct net *net,
2027 				 struct in6_rtmsg *rtmsg,
2028 				 struct fib6_config *cfg)
2029 {
2030 	memset(cfg, 0, sizeof(*cfg));
2031 
2032 	cfg->fc_table = RT6_TABLE_MAIN;
2033 	cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2034 	cfg->fc_metric = rtmsg->rtmsg_metric;
2035 	cfg->fc_expires = rtmsg->rtmsg_info;
2036 	cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2037 	cfg->fc_src_len = rtmsg->rtmsg_src_len;
2038 	cfg->fc_flags = rtmsg->rtmsg_flags;
2039 
2040 	cfg->fc_nlinfo.nl_net = net;
2041 
2042 	cfg->fc_dst = rtmsg->rtmsg_dst;
2043 	cfg->fc_src = rtmsg->rtmsg_src;
2044 	cfg->fc_gateway = rtmsg->rtmsg_gateway;
2045 }
2046 
2047 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2048 {
2049 	struct fib6_config cfg;
2050 	struct in6_rtmsg rtmsg;
2051 	int err;
2052 
2053 	switch(cmd) {
2054 	case SIOCADDRT:		/* Add a route */
2055 	case SIOCDELRT:		/* Delete a route */
2056 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2057 			return -EPERM;
2058 		err = copy_from_user(&rtmsg, arg,
2059 				     sizeof(struct in6_rtmsg));
2060 		if (err)
2061 			return -EFAULT;
2062 
2063 		rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2064 
2065 		rtnl_lock();
2066 		switch (cmd) {
2067 		case SIOCADDRT:
2068 			err = ip6_route_add(&cfg);
2069 			break;
2070 		case SIOCDELRT:
2071 			err = ip6_route_del(&cfg);
2072 			break;
2073 		default:
2074 			err = -EINVAL;
2075 		}
2076 		rtnl_unlock();
2077 
2078 		return err;
2079 	}
2080 
2081 	return -EINVAL;
2082 }
2083 
2084 /*
2085  *	Drop the packet on the floor
2086  */
2087 
2088 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2089 {
2090 	int type;
2091 	struct dst_entry *dst = skb_dst(skb);
2092 	switch (ipstats_mib_noroutes) {
2093 	case IPSTATS_MIB_INNOROUTES:
2094 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2095 		if (type == IPV6_ADDR_ANY) {
2096 			IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2097 				      IPSTATS_MIB_INADDRERRORS);
2098 			break;
2099 		}
2100 		/* FALLTHROUGH */
2101 	case IPSTATS_MIB_OUTNOROUTES:
2102 		IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2103 			      ipstats_mib_noroutes);
2104 		break;
2105 	}
2106 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2107 	kfree_skb(skb);
2108 	return 0;
2109 }
2110 
2111 static int ip6_pkt_discard(struct sk_buff *skb)
2112 {
2113 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2114 }
2115 
2116 static int ip6_pkt_discard_out(struct sk_buff *skb)
2117 {
2118 	skb->dev = skb_dst(skb)->dev;
2119 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2120 }
2121 
2122 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2123 
2124 static int ip6_pkt_prohibit(struct sk_buff *skb)
2125 {
2126 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2127 }
2128 
2129 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2130 {
2131 	skb->dev = skb_dst(skb)->dev;
2132 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2133 }
2134 
2135 #endif
2136 
2137 /*
2138  *	Allocate a dst for local (unicast / anycast) address.
2139  */
2140 
2141 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2142 				    const struct in6_addr *addr,
2143 				    bool anycast)
2144 {
2145 	struct net *net = dev_net(idev->dev);
2146 	struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
2147 
2148 	if (!rt) {
2149 		net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
2150 		return ERR_PTR(-ENOMEM);
2151 	}
2152 
2153 	in6_dev_hold(idev);
2154 
2155 	rt->dst.flags |= DST_HOST;
2156 	rt->dst.input = ip6_input;
2157 	rt->dst.output = ip6_output;
2158 	rt->rt6i_idev = idev;
2159 
2160 	rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2161 	if (anycast)
2162 		rt->rt6i_flags |= RTF_ANYCAST;
2163 	else
2164 		rt->rt6i_flags |= RTF_LOCAL;
2165 
2166 	rt->rt6i_gateway  = *addr;
2167 	rt->rt6i_dst.addr = *addr;
2168 	rt->rt6i_dst.plen = 128;
2169 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2170 
2171 	atomic_set(&rt->dst.__refcnt, 1);
2172 
2173 	return rt;
2174 }
2175 
2176 int ip6_route_get_saddr(struct net *net,
2177 			struct rt6_info *rt,
2178 			const struct in6_addr *daddr,
2179 			unsigned int prefs,
2180 			struct in6_addr *saddr)
2181 {
2182 	struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2183 	int err = 0;
2184 	if (rt->rt6i_prefsrc.plen)
2185 		*saddr = rt->rt6i_prefsrc.addr;
2186 	else
2187 		err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2188 					 daddr, prefs, saddr);
2189 	return err;
2190 }
2191 
2192 /* remove deleted ip from prefsrc entries */
2193 struct arg_dev_net_ip {
2194 	struct net_device *dev;
2195 	struct net *net;
2196 	struct in6_addr *addr;
2197 };
2198 
2199 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2200 {
2201 	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2202 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2203 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2204 
2205 	if (((void *)rt->dst.dev == dev || !dev) &&
2206 	    rt != net->ipv6.ip6_null_entry &&
2207 	    ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2208 		/* remove prefsrc entry */
2209 		rt->rt6i_prefsrc.plen = 0;
2210 	}
2211 	return 0;
2212 }
2213 
2214 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2215 {
2216 	struct net *net = dev_net(ifp->idev->dev);
2217 	struct arg_dev_net_ip adni = {
2218 		.dev = ifp->idev->dev,
2219 		.net = net,
2220 		.addr = &ifp->addr,
2221 	};
2222 	fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2223 }
2224 
2225 struct arg_dev_net {
2226 	struct net_device *dev;
2227 	struct net *net;
2228 };
2229 
2230 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2231 {
2232 	const struct arg_dev_net *adn = arg;
2233 	const struct net_device *dev = adn->dev;
2234 
2235 	if ((rt->dst.dev == dev || !dev) &&
2236 	    rt != adn->net->ipv6.ip6_null_entry)
2237 		return -1;
2238 
2239 	return 0;
2240 }
2241 
2242 void rt6_ifdown(struct net *net, struct net_device *dev)
2243 {
2244 	struct arg_dev_net adn = {
2245 		.dev = dev,
2246 		.net = net,
2247 	};
2248 
2249 	fib6_clean_all(net, fib6_ifdown, 0, &adn);
2250 	icmp6_clean_all(fib6_ifdown, &adn);
2251 }
2252 
2253 struct rt6_mtu_change_arg {
2254 	struct net_device *dev;
2255 	unsigned int mtu;
2256 };
2257 
2258 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2259 {
2260 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2261 	struct inet6_dev *idev;
2262 
2263 	/* In IPv6 pmtu discovery is not optional,
2264 	   so that RTAX_MTU lock cannot disable it.
2265 	   We still use this lock to block changes
2266 	   caused by addrconf/ndisc.
2267 	*/
2268 
2269 	idev = __in6_dev_get(arg->dev);
2270 	if (!idev)
2271 		return 0;
2272 
2273 	/* For administrative MTU increase, there is no way to discover
2274 	   IPv6 PMTU increase, so PMTU increase should be updated here.
2275 	   Since RFC 1981 doesn't include administrative MTU increase
2276 	   update PMTU increase is a MUST. (i.e. jumbo frame)
2277 	 */
2278 	/*
2279 	   If new MTU is less than route PMTU, this new MTU will be the
2280 	   lowest MTU in the path, update the route PMTU to reflect PMTU
2281 	   decreases; if new MTU is greater than route PMTU, and the
2282 	   old MTU is the lowest MTU in the path, update the route PMTU
2283 	   to reflect the increase. In this case if the other nodes' MTU
2284 	   also have the lowest MTU, TOO BIG MESSAGE will be lead to
2285 	   PMTU discouvery.
2286 	 */
2287 	if (rt->dst.dev == arg->dev &&
2288 	    !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2289 	    (dst_mtu(&rt->dst) >= arg->mtu ||
2290 	     (dst_mtu(&rt->dst) < arg->mtu &&
2291 	      dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2292 		dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2293 	}
2294 	return 0;
2295 }
2296 
2297 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2298 {
2299 	struct rt6_mtu_change_arg arg = {
2300 		.dev = dev,
2301 		.mtu = mtu,
2302 	};
2303 
2304 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2305 }
2306 
2307 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2308 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
2309 	[RTA_OIF]               = { .type = NLA_U32 },
2310 	[RTA_IIF]		= { .type = NLA_U32 },
2311 	[RTA_PRIORITY]          = { .type = NLA_U32 },
2312 	[RTA_METRICS]           = { .type = NLA_NESTED },
2313 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
2314 };
2315 
2316 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2317 			      struct fib6_config *cfg)
2318 {
2319 	struct rtmsg *rtm;
2320 	struct nlattr *tb[RTA_MAX+1];
2321 	int err;
2322 
2323 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2324 	if (err < 0)
2325 		goto errout;
2326 
2327 	err = -EINVAL;
2328 	rtm = nlmsg_data(nlh);
2329 	memset(cfg, 0, sizeof(*cfg));
2330 
2331 	cfg->fc_table = rtm->rtm_table;
2332 	cfg->fc_dst_len = rtm->rtm_dst_len;
2333 	cfg->fc_src_len = rtm->rtm_src_len;
2334 	cfg->fc_flags = RTF_UP;
2335 	cfg->fc_protocol = rtm->rtm_protocol;
2336 	cfg->fc_type = rtm->rtm_type;
2337 
2338 	if (rtm->rtm_type == RTN_UNREACHABLE ||
2339 	    rtm->rtm_type == RTN_BLACKHOLE ||
2340 	    rtm->rtm_type == RTN_PROHIBIT ||
2341 	    rtm->rtm_type == RTN_THROW)
2342 		cfg->fc_flags |= RTF_REJECT;
2343 
2344 	if (rtm->rtm_type == RTN_LOCAL)
2345 		cfg->fc_flags |= RTF_LOCAL;
2346 
2347 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2348 	cfg->fc_nlinfo.nlh = nlh;
2349 	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2350 
2351 	if (tb[RTA_GATEWAY]) {
2352 		nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2353 		cfg->fc_flags |= RTF_GATEWAY;
2354 	}
2355 
2356 	if (tb[RTA_DST]) {
2357 		int plen = (rtm->rtm_dst_len + 7) >> 3;
2358 
2359 		if (nla_len(tb[RTA_DST]) < plen)
2360 			goto errout;
2361 
2362 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2363 	}
2364 
2365 	if (tb[RTA_SRC]) {
2366 		int plen = (rtm->rtm_src_len + 7) >> 3;
2367 
2368 		if (nla_len(tb[RTA_SRC]) < plen)
2369 			goto errout;
2370 
2371 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2372 	}
2373 
2374 	if (tb[RTA_PREFSRC])
2375 		nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2376 
2377 	if (tb[RTA_OIF])
2378 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2379 
2380 	if (tb[RTA_PRIORITY])
2381 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2382 
2383 	if (tb[RTA_METRICS]) {
2384 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2385 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2386 	}
2387 
2388 	if (tb[RTA_TABLE])
2389 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2390 
2391 	if (tb[RTA_MULTIPATH]) {
2392 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2393 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2394 	}
2395 
2396 	err = 0;
2397 errout:
2398 	return err;
2399 }
2400 
2401 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2402 {
2403 	struct fib6_config r_cfg;
2404 	struct rtnexthop *rtnh;
2405 	int remaining;
2406 	int attrlen;
2407 	int err = 0, last_err = 0;
2408 
2409 beginning:
2410 	rtnh = (struct rtnexthop *)cfg->fc_mp;
2411 	remaining = cfg->fc_mp_len;
2412 
2413 	/* Parse a Multipath Entry */
2414 	while (rtnh_ok(rtnh, remaining)) {
2415 		memcpy(&r_cfg, cfg, sizeof(*cfg));
2416 		if (rtnh->rtnh_ifindex)
2417 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2418 
2419 		attrlen = rtnh_attrlen(rtnh);
2420 		if (attrlen > 0) {
2421 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2422 
2423 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2424 			if (nla) {
2425 				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2426 				r_cfg.fc_flags |= RTF_GATEWAY;
2427 			}
2428 		}
2429 		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2430 		if (err) {
2431 			last_err = err;
2432 			/* If we are trying to remove a route, do not stop the
2433 			 * loop when ip6_route_del() fails (because next hop is
2434 			 * already gone), we should try to remove all next hops.
2435 			 */
2436 			if (add) {
2437 				/* If add fails, we should try to delete all
2438 				 * next hops that have been already added.
2439 				 */
2440 				add = 0;
2441 				goto beginning;
2442 			}
2443 		}
2444 		/* Because each route is added like a single route we remove
2445 		 * this flag after the first nexthop (if there is a collision,
2446 		 * we have already fail to add the first nexthop:
2447 		 * fib6_add_rt2node() has reject it).
2448 		 */
2449 		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2450 		rtnh = rtnh_next(rtnh, &remaining);
2451 	}
2452 
2453 	return last_err;
2454 }
2455 
2456 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2457 {
2458 	struct fib6_config cfg;
2459 	int err;
2460 
2461 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2462 	if (err < 0)
2463 		return err;
2464 
2465 	if (cfg.fc_mp)
2466 		return ip6_route_multipath(&cfg, 0);
2467 	else
2468 		return ip6_route_del(&cfg);
2469 }
2470 
2471 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2472 {
2473 	struct fib6_config cfg;
2474 	int err;
2475 
2476 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2477 	if (err < 0)
2478 		return err;
2479 
2480 	if (cfg.fc_mp)
2481 		return ip6_route_multipath(&cfg, 1);
2482 	else
2483 		return ip6_route_add(&cfg);
2484 }
2485 
2486 static inline size_t rt6_nlmsg_size(void)
2487 {
2488 	return NLMSG_ALIGN(sizeof(struct rtmsg))
2489 	       + nla_total_size(16) /* RTA_SRC */
2490 	       + nla_total_size(16) /* RTA_DST */
2491 	       + nla_total_size(16) /* RTA_GATEWAY */
2492 	       + nla_total_size(16) /* RTA_PREFSRC */
2493 	       + nla_total_size(4) /* RTA_TABLE */
2494 	       + nla_total_size(4) /* RTA_IIF */
2495 	       + nla_total_size(4) /* RTA_OIF */
2496 	       + nla_total_size(4) /* RTA_PRIORITY */
2497 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2498 	       + nla_total_size(sizeof(struct rta_cacheinfo));
2499 }
2500 
2501 static int rt6_fill_node(struct net *net,
2502 			 struct sk_buff *skb, struct rt6_info *rt,
2503 			 struct in6_addr *dst, struct in6_addr *src,
2504 			 int iif, int type, u32 portid, u32 seq,
2505 			 int prefix, int nowait, unsigned int flags)
2506 {
2507 	struct rtmsg *rtm;
2508 	struct nlmsghdr *nlh;
2509 	long expires;
2510 	u32 table;
2511 
2512 	if (prefix) {	/* user wants prefix routes only */
2513 		if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2514 			/* success since this is not a prefix route */
2515 			return 1;
2516 		}
2517 	}
2518 
2519 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2520 	if (!nlh)
2521 		return -EMSGSIZE;
2522 
2523 	rtm = nlmsg_data(nlh);
2524 	rtm->rtm_family = AF_INET6;
2525 	rtm->rtm_dst_len = rt->rt6i_dst.plen;
2526 	rtm->rtm_src_len = rt->rt6i_src.plen;
2527 	rtm->rtm_tos = 0;
2528 	if (rt->rt6i_table)
2529 		table = rt->rt6i_table->tb6_id;
2530 	else
2531 		table = RT6_TABLE_UNSPEC;
2532 	rtm->rtm_table = table;
2533 	if (nla_put_u32(skb, RTA_TABLE, table))
2534 		goto nla_put_failure;
2535 	if (rt->rt6i_flags & RTF_REJECT) {
2536 		switch (rt->dst.error) {
2537 		case -EINVAL:
2538 			rtm->rtm_type = RTN_BLACKHOLE;
2539 			break;
2540 		case -EACCES:
2541 			rtm->rtm_type = RTN_PROHIBIT;
2542 			break;
2543 		case -EAGAIN:
2544 			rtm->rtm_type = RTN_THROW;
2545 			break;
2546 		default:
2547 			rtm->rtm_type = RTN_UNREACHABLE;
2548 			break;
2549 		}
2550 	}
2551 	else if (rt->rt6i_flags & RTF_LOCAL)
2552 		rtm->rtm_type = RTN_LOCAL;
2553 	else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2554 		rtm->rtm_type = RTN_LOCAL;
2555 	else
2556 		rtm->rtm_type = RTN_UNICAST;
2557 	rtm->rtm_flags = 0;
2558 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2559 	rtm->rtm_protocol = rt->rt6i_protocol;
2560 	if (rt->rt6i_flags & RTF_DYNAMIC)
2561 		rtm->rtm_protocol = RTPROT_REDIRECT;
2562 	else if (rt->rt6i_flags & RTF_ADDRCONF) {
2563 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2564 			rtm->rtm_protocol = RTPROT_RA;
2565 		else
2566 			rtm->rtm_protocol = RTPROT_KERNEL;
2567 	}
2568 
2569 	if (rt->rt6i_flags & RTF_CACHE)
2570 		rtm->rtm_flags |= RTM_F_CLONED;
2571 
2572 	if (dst) {
2573 		if (nla_put(skb, RTA_DST, 16, dst))
2574 			goto nla_put_failure;
2575 		rtm->rtm_dst_len = 128;
2576 	} else if (rtm->rtm_dst_len)
2577 		if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2578 			goto nla_put_failure;
2579 #ifdef CONFIG_IPV6_SUBTREES
2580 	if (src) {
2581 		if (nla_put(skb, RTA_SRC, 16, src))
2582 			goto nla_put_failure;
2583 		rtm->rtm_src_len = 128;
2584 	} else if (rtm->rtm_src_len &&
2585 		   nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2586 		goto nla_put_failure;
2587 #endif
2588 	if (iif) {
2589 #ifdef CONFIG_IPV6_MROUTE
2590 		if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2591 			int err = ip6mr_get_route(net, skb, rtm, nowait);
2592 			if (err <= 0) {
2593 				if (!nowait) {
2594 					if (err == 0)
2595 						return 0;
2596 					goto nla_put_failure;
2597 				} else {
2598 					if (err == -EMSGSIZE)
2599 						goto nla_put_failure;
2600 				}
2601 			}
2602 		} else
2603 #endif
2604 			if (nla_put_u32(skb, RTA_IIF, iif))
2605 				goto nla_put_failure;
2606 	} else if (dst) {
2607 		struct in6_addr saddr_buf;
2608 		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2609 		    nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2610 			goto nla_put_failure;
2611 	}
2612 
2613 	if (rt->rt6i_prefsrc.plen) {
2614 		struct in6_addr saddr_buf;
2615 		saddr_buf = rt->rt6i_prefsrc.addr;
2616 		if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2617 			goto nla_put_failure;
2618 	}
2619 
2620 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2621 		goto nla_put_failure;
2622 
2623 	if (rt->rt6i_flags & RTF_GATEWAY) {
2624 		if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2625 			goto nla_put_failure;
2626 	}
2627 
2628 	if (rt->dst.dev &&
2629 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2630 		goto nla_put_failure;
2631 	if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2632 		goto nla_put_failure;
2633 
2634 	expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2635 
2636 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2637 		goto nla_put_failure;
2638 
2639 	return nlmsg_end(skb, nlh);
2640 
2641 nla_put_failure:
2642 	nlmsg_cancel(skb, nlh);
2643 	return -EMSGSIZE;
2644 }
2645 
2646 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2647 {
2648 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2649 	int prefix;
2650 
2651 	if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2652 		struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2653 		prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2654 	} else
2655 		prefix = 0;
2656 
2657 	return rt6_fill_node(arg->net,
2658 		     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2659 		     NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2660 		     prefix, 0, NLM_F_MULTI);
2661 }
2662 
2663 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2664 {
2665 	struct net *net = sock_net(in_skb->sk);
2666 	struct nlattr *tb[RTA_MAX+1];
2667 	struct rt6_info *rt;
2668 	struct sk_buff *skb;
2669 	struct rtmsg *rtm;
2670 	struct flowi6 fl6;
2671 	int err, iif = 0, oif = 0;
2672 
2673 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2674 	if (err < 0)
2675 		goto errout;
2676 
2677 	err = -EINVAL;
2678 	memset(&fl6, 0, sizeof(fl6));
2679 
2680 	if (tb[RTA_SRC]) {
2681 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2682 			goto errout;
2683 
2684 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2685 	}
2686 
2687 	if (tb[RTA_DST]) {
2688 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2689 			goto errout;
2690 
2691 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2692 	}
2693 
2694 	if (tb[RTA_IIF])
2695 		iif = nla_get_u32(tb[RTA_IIF]);
2696 
2697 	if (tb[RTA_OIF])
2698 		oif = nla_get_u32(tb[RTA_OIF]);
2699 
2700 	if (iif) {
2701 		struct net_device *dev;
2702 		int flags = 0;
2703 
2704 		dev = __dev_get_by_index(net, iif);
2705 		if (!dev) {
2706 			err = -ENODEV;
2707 			goto errout;
2708 		}
2709 
2710 		fl6.flowi6_iif = iif;
2711 
2712 		if (!ipv6_addr_any(&fl6.saddr))
2713 			flags |= RT6_LOOKUP_F_HAS_SADDR;
2714 
2715 		rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2716 							       flags);
2717 	} else {
2718 		fl6.flowi6_oif = oif;
2719 
2720 		rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2721 	}
2722 
2723 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2724 	if (!skb) {
2725 		ip6_rt_put(rt);
2726 		err = -ENOBUFS;
2727 		goto errout;
2728 	}
2729 
2730 	/* Reserve room for dummy headers, this skb can pass
2731 	   through good chunk of routing engine.
2732 	 */
2733 	skb_reset_mac_header(skb);
2734 	skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2735 
2736 	skb_dst_set(skb, &rt->dst);
2737 
2738 	err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2739 			    RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2740 			    nlh->nlmsg_seq, 0, 0, 0);
2741 	if (err < 0) {
2742 		kfree_skb(skb);
2743 		goto errout;
2744 	}
2745 
2746 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2747 errout:
2748 	return err;
2749 }
2750 
2751 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2752 {
2753 	struct sk_buff *skb;
2754 	struct net *net = info->nl_net;
2755 	u32 seq;
2756 	int err;
2757 
2758 	err = -ENOBUFS;
2759 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2760 
2761 	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2762 	if (!skb)
2763 		goto errout;
2764 
2765 	err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2766 				event, info->portid, seq, 0, 0, 0);
2767 	if (err < 0) {
2768 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2769 		WARN_ON(err == -EMSGSIZE);
2770 		kfree_skb(skb);
2771 		goto errout;
2772 	}
2773 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2774 		    info->nlh, gfp_any());
2775 	return;
2776 errout:
2777 	if (err < 0)
2778 		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2779 }
2780 
2781 static int ip6_route_dev_notify(struct notifier_block *this,
2782 				unsigned long event, void *ptr)
2783 {
2784 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2785 	struct net *net = dev_net(dev);
2786 
2787 	if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2788 		net->ipv6.ip6_null_entry->dst.dev = dev;
2789 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2790 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2791 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2792 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2793 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2794 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2795 #endif
2796 	}
2797 
2798 	return NOTIFY_OK;
2799 }
2800 
2801 /*
2802  *	/proc
2803  */
2804 
2805 #ifdef CONFIG_PROC_FS
2806 
2807 struct rt6_proc_arg
2808 {
2809 	char *buffer;
2810 	int offset;
2811 	int length;
2812 	int skip;
2813 	int len;
2814 };
2815 
2816 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2817 {
2818 	struct seq_file *m = p_arg;
2819 
2820 	seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2821 
2822 #ifdef CONFIG_IPV6_SUBTREES
2823 	seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2824 #else
2825 	seq_puts(m, "00000000000000000000000000000000 00 ");
2826 #endif
2827 	if (rt->rt6i_flags & RTF_GATEWAY) {
2828 		seq_printf(m, "%pi6", &rt->rt6i_gateway);
2829 	} else {
2830 		seq_puts(m, "00000000000000000000000000000000");
2831 	}
2832 	seq_printf(m, " %08x %08x %08x %08x %8s\n",
2833 		   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2834 		   rt->dst.__use, rt->rt6i_flags,
2835 		   rt->dst.dev ? rt->dst.dev->name : "");
2836 	return 0;
2837 }
2838 
2839 static int ipv6_route_show(struct seq_file *m, void *v)
2840 {
2841 	struct net *net = (struct net *)m->private;
2842 	fib6_clean_all_ro(net, rt6_info_route, 0, m);
2843 	return 0;
2844 }
2845 
2846 static int ipv6_route_open(struct inode *inode, struct file *file)
2847 {
2848 	return single_open_net(inode, file, ipv6_route_show);
2849 }
2850 
2851 static const struct file_operations ipv6_route_proc_fops = {
2852 	.owner		= THIS_MODULE,
2853 	.open		= ipv6_route_open,
2854 	.read		= seq_read,
2855 	.llseek		= seq_lseek,
2856 	.release	= single_release_net,
2857 };
2858 
2859 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2860 {
2861 	struct net *net = (struct net *)seq->private;
2862 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2863 		   net->ipv6.rt6_stats->fib_nodes,
2864 		   net->ipv6.rt6_stats->fib_route_nodes,
2865 		   net->ipv6.rt6_stats->fib_rt_alloc,
2866 		   net->ipv6.rt6_stats->fib_rt_entries,
2867 		   net->ipv6.rt6_stats->fib_rt_cache,
2868 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2869 		   net->ipv6.rt6_stats->fib_discarded_routes);
2870 
2871 	return 0;
2872 }
2873 
2874 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2875 {
2876 	return single_open_net(inode, file, rt6_stats_seq_show);
2877 }
2878 
2879 static const struct file_operations rt6_stats_seq_fops = {
2880 	.owner	 = THIS_MODULE,
2881 	.open	 = rt6_stats_seq_open,
2882 	.read	 = seq_read,
2883 	.llseek	 = seq_lseek,
2884 	.release = single_release_net,
2885 };
2886 #endif	/* CONFIG_PROC_FS */
2887 
2888 #ifdef CONFIG_SYSCTL
2889 
2890 static
2891 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
2892 			      void __user *buffer, size_t *lenp, loff_t *ppos)
2893 {
2894 	struct net *net;
2895 	int delay;
2896 	if (!write)
2897 		return -EINVAL;
2898 
2899 	net = (struct net *)ctl->extra1;
2900 	delay = net->ipv6.sysctl.flush_delay;
2901 	proc_dointvec(ctl, write, buffer, lenp, ppos);
2902 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
2903 	return 0;
2904 }
2905 
2906 struct ctl_table ipv6_route_table_template[] = {
2907 	{
2908 		.procname	=	"flush",
2909 		.data		=	&init_net.ipv6.sysctl.flush_delay,
2910 		.maxlen		=	sizeof(int),
2911 		.mode		=	0200,
2912 		.proc_handler	=	ipv6_sysctl_rtcache_flush
2913 	},
2914 	{
2915 		.procname	=	"gc_thresh",
2916 		.data		=	&ip6_dst_ops_template.gc_thresh,
2917 		.maxlen		=	sizeof(int),
2918 		.mode		=	0644,
2919 		.proc_handler	=	proc_dointvec,
2920 	},
2921 	{
2922 		.procname	=	"max_size",
2923 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
2924 		.maxlen		=	sizeof(int),
2925 		.mode		=	0644,
2926 		.proc_handler	=	proc_dointvec,
2927 	},
2928 	{
2929 		.procname	=	"gc_min_interval",
2930 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2931 		.maxlen		=	sizeof(int),
2932 		.mode		=	0644,
2933 		.proc_handler	=	proc_dointvec_jiffies,
2934 	},
2935 	{
2936 		.procname	=	"gc_timeout",
2937 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2938 		.maxlen		=	sizeof(int),
2939 		.mode		=	0644,
2940 		.proc_handler	=	proc_dointvec_jiffies,
2941 	},
2942 	{
2943 		.procname	=	"gc_interval",
2944 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
2945 		.maxlen		=	sizeof(int),
2946 		.mode		=	0644,
2947 		.proc_handler	=	proc_dointvec_jiffies,
2948 	},
2949 	{
2950 		.procname	=	"gc_elasticity",
2951 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2952 		.maxlen		=	sizeof(int),
2953 		.mode		=	0644,
2954 		.proc_handler	=	proc_dointvec,
2955 	},
2956 	{
2957 		.procname	=	"mtu_expires",
2958 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2959 		.maxlen		=	sizeof(int),
2960 		.mode		=	0644,
2961 		.proc_handler	=	proc_dointvec_jiffies,
2962 	},
2963 	{
2964 		.procname	=	"min_adv_mss",
2965 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
2966 		.maxlen		=	sizeof(int),
2967 		.mode		=	0644,
2968 		.proc_handler	=	proc_dointvec,
2969 	},
2970 	{
2971 		.procname	=	"gc_min_interval_ms",
2972 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2973 		.maxlen		=	sizeof(int),
2974 		.mode		=	0644,
2975 		.proc_handler	=	proc_dointvec_ms_jiffies,
2976 	},
2977 	{ }
2978 };
2979 
2980 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2981 {
2982 	struct ctl_table *table;
2983 
2984 	table = kmemdup(ipv6_route_table_template,
2985 			sizeof(ipv6_route_table_template),
2986 			GFP_KERNEL);
2987 
2988 	if (table) {
2989 		table[0].data = &net->ipv6.sysctl.flush_delay;
2990 		table[0].extra1 = net;
2991 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2992 		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2993 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2994 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2995 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2996 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2997 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2998 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2999 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3000 
3001 		/* Don't export sysctls to unprivileged users */
3002 		if (net->user_ns != &init_user_ns)
3003 			table[0].procname = NULL;
3004 	}
3005 
3006 	return table;
3007 }
3008 #endif
3009 
3010 static int __net_init ip6_route_net_init(struct net *net)
3011 {
3012 	int ret = -ENOMEM;
3013 
3014 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3015 	       sizeof(net->ipv6.ip6_dst_ops));
3016 
3017 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3018 		goto out_ip6_dst_ops;
3019 
3020 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3021 					   sizeof(*net->ipv6.ip6_null_entry),
3022 					   GFP_KERNEL);
3023 	if (!net->ipv6.ip6_null_entry)
3024 		goto out_ip6_dst_entries;
3025 	net->ipv6.ip6_null_entry->dst.path =
3026 		(struct dst_entry *)net->ipv6.ip6_null_entry;
3027 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3028 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3029 			 ip6_template_metrics, true);
3030 
3031 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3032 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3033 					       sizeof(*net->ipv6.ip6_prohibit_entry),
3034 					       GFP_KERNEL);
3035 	if (!net->ipv6.ip6_prohibit_entry)
3036 		goto out_ip6_null_entry;
3037 	net->ipv6.ip6_prohibit_entry->dst.path =
3038 		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3039 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3040 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3041 			 ip6_template_metrics, true);
3042 
3043 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3044 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
3045 					       GFP_KERNEL);
3046 	if (!net->ipv6.ip6_blk_hole_entry)
3047 		goto out_ip6_prohibit_entry;
3048 	net->ipv6.ip6_blk_hole_entry->dst.path =
3049 		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3050 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3051 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3052 			 ip6_template_metrics, true);
3053 #endif
3054 
3055 	net->ipv6.sysctl.flush_delay = 0;
3056 	net->ipv6.sysctl.ip6_rt_max_size = 4096;
3057 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3058 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3059 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3060 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3061 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3062 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3063 
3064 	net->ipv6.ip6_rt_gc_expire = 30*HZ;
3065 
3066 	ret = 0;
3067 out:
3068 	return ret;
3069 
3070 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3071 out_ip6_prohibit_entry:
3072 	kfree(net->ipv6.ip6_prohibit_entry);
3073 out_ip6_null_entry:
3074 	kfree(net->ipv6.ip6_null_entry);
3075 #endif
3076 out_ip6_dst_entries:
3077 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3078 out_ip6_dst_ops:
3079 	goto out;
3080 }
3081 
3082 static void __net_exit ip6_route_net_exit(struct net *net)
3083 {
3084 	kfree(net->ipv6.ip6_null_entry);
3085 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3086 	kfree(net->ipv6.ip6_prohibit_entry);
3087 	kfree(net->ipv6.ip6_blk_hole_entry);
3088 #endif
3089 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3090 }
3091 
3092 static int __net_init ip6_route_net_init_late(struct net *net)
3093 {
3094 #ifdef CONFIG_PROC_FS
3095 	proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3096 	proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3097 #endif
3098 	return 0;
3099 }
3100 
3101 static void __net_exit ip6_route_net_exit_late(struct net *net)
3102 {
3103 #ifdef CONFIG_PROC_FS
3104 	remove_proc_entry("ipv6_route", net->proc_net);
3105 	remove_proc_entry("rt6_stats", net->proc_net);
3106 #endif
3107 }
3108 
3109 static struct pernet_operations ip6_route_net_ops = {
3110 	.init = ip6_route_net_init,
3111 	.exit = ip6_route_net_exit,
3112 };
3113 
3114 static int __net_init ipv6_inetpeer_init(struct net *net)
3115 {
3116 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3117 
3118 	if (!bp)
3119 		return -ENOMEM;
3120 	inet_peer_base_init(bp);
3121 	net->ipv6.peers = bp;
3122 	return 0;
3123 }
3124 
3125 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3126 {
3127 	struct inet_peer_base *bp = net->ipv6.peers;
3128 
3129 	net->ipv6.peers = NULL;
3130 	inetpeer_invalidate_tree(bp);
3131 	kfree(bp);
3132 }
3133 
3134 static struct pernet_operations ipv6_inetpeer_ops = {
3135 	.init	=	ipv6_inetpeer_init,
3136 	.exit	=	ipv6_inetpeer_exit,
3137 };
3138 
3139 static struct pernet_operations ip6_route_net_late_ops = {
3140 	.init = ip6_route_net_init_late,
3141 	.exit = ip6_route_net_exit_late,
3142 };
3143 
3144 static struct notifier_block ip6_route_dev_notifier = {
3145 	.notifier_call = ip6_route_dev_notify,
3146 	.priority = 0,
3147 };
3148 
3149 int __init ip6_route_init(void)
3150 {
3151 	int ret;
3152 
3153 	ret = -ENOMEM;
3154 	ip6_dst_ops_template.kmem_cachep =
3155 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3156 				  SLAB_HWCACHE_ALIGN, NULL);
3157 	if (!ip6_dst_ops_template.kmem_cachep)
3158 		goto out;
3159 
3160 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
3161 	if (ret)
3162 		goto out_kmem_cache;
3163 
3164 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3165 	if (ret)
3166 		goto out_dst_entries;
3167 
3168 	ret = register_pernet_subsys(&ip6_route_net_ops);
3169 	if (ret)
3170 		goto out_register_inetpeer;
3171 
3172 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3173 
3174 	/* Registering of the loopback is done before this portion of code,
3175 	 * the loopback reference in rt6_info will not be taken, do it
3176 	 * manually for init_net */
3177 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3178 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3179   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3180 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3181 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3182 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3183 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3184   #endif
3185 	ret = fib6_init();
3186 	if (ret)
3187 		goto out_register_subsys;
3188 
3189 	ret = xfrm6_init();
3190 	if (ret)
3191 		goto out_fib6_init;
3192 
3193 	ret = fib6_rules_init();
3194 	if (ret)
3195 		goto xfrm6_init;
3196 
3197 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
3198 	if (ret)
3199 		goto fib6_rules_init;
3200 
3201 	ret = -ENOBUFS;
3202 	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3203 	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3204 	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3205 		goto out_register_late_subsys;
3206 
3207 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3208 	if (ret)
3209 		goto out_register_late_subsys;
3210 
3211 out:
3212 	return ret;
3213 
3214 out_register_late_subsys:
3215 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3216 fib6_rules_init:
3217 	fib6_rules_cleanup();
3218 xfrm6_init:
3219 	xfrm6_fini();
3220 out_fib6_init:
3221 	fib6_gc_cleanup();
3222 out_register_subsys:
3223 	unregister_pernet_subsys(&ip6_route_net_ops);
3224 out_register_inetpeer:
3225 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3226 out_dst_entries:
3227 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3228 out_kmem_cache:
3229 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3230 	goto out;
3231 }
3232 
3233 void ip6_route_cleanup(void)
3234 {
3235 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
3236 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3237 	fib6_rules_cleanup();
3238 	xfrm6_fini();
3239 	fib6_gc_cleanup();
3240 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3241 	unregister_pernet_subsys(&ip6_route_net_ops);
3242 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3243 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3244 }
3245