xref: /openbmc/linux/net/ipv6/route.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux INET6 implementation
4  *	FIB front-end.
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  */
9 
10 /*	Changes:
11  *
12  *	YOSHIFUJI Hideaki @USAGI
13  *		reworked default router selection.
14  *		- respect outgoing interface
15  *		- select from (probably) reachable routers (i.e.
16  *		routers in REACHABLE, STALE, DELAY or PROBE states).
17  *		- always select the same router if it is (probably)
18  *		reachable.  otherwise, round-robin the list.
19  *	Ville Nuorvala
20  *		Fixed routing subtrees.
21  */
22 
23 #define pr_fmt(fmt) "IPv6: " fmt
24 
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/dst_metadata.h>
55 #include <net/xfrm.h>
56 #include <net/netevent.h>
57 #include <net/netlink.h>
58 #include <net/rtnh.h>
59 #include <net/lwtunnel.h>
60 #include <net/ip_tunnels.h>
61 #include <net/l3mdev.h>
62 #include <net/ip.h>
63 #include <linux/uaccess.h>
64 
65 #ifdef CONFIG_SYSCTL
66 #include <linux/sysctl.h>
67 #endif
68 
69 static int ip6_rt_type_to_error(u8 fib6_type);
70 
71 #define CREATE_TRACE_POINTS
72 #include <trace/events/fib6.h>
73 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
74 #undef CREATE_TRACE_POINTS
75 
76 enum rt6_nud_state {
77 	RT6_NUD_FAIL_HARD = -3,
78 	RT6_NUD_FAIL_PROBE = -2,
79 	RT6_NUD_FAIL_DO_RR = -1,
80 	RT6_NUD_SUCCEED = 1
81 };
82 
83 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
84 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
85 static unsigned int	 ip6_mtu(const struct dst_entry *dst);
86 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87 static void		ip6_dst_destroy(struct dst_entry *);
88 static void		ip6_dst_ifdown(struct dst_entry *,
89 				       struct net_device *dev, int how);
90 static int		 ip6_dst_gc(struct dst_ops *ops);
91 
92 static int		ip6_pkt_discard(struct sk_buff *skb);
93 static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static int		ip6_pkt_prohibit(struct sk_buff *skb);
95 static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96 static void		ip6_link_failure(struct sk_buff *skb);
97 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 					   struct sk_buff *skb, u32 mtu,
99 					   bool confirm_neigh);
100 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
101 					struct sk_buff *skb);
102 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
103 			   int strict);
104 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
105 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
106 			 struct fib6_info *rt, struct dst_entry *dst,
107 			 struct in6_addr *dest, struct in6_addr *src,
108 			 int iif, int type, u32 portid, u32 seq,
109 			 unsigned int flags);
110 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
111 					   const struct in6_addr *daddr,
112 					   const struct in6_addr *saddr);
113 
114 #ifdef CONFIG_IPV6_ROUTE_INFO
115 static struct fib6_info *rt6_add_route_info(struct net *net,
116 					   const struct in6_addr *prefix, int prefixlen,
117 					   const struct in6_addr *gwaddr,
118 					   struct net_device *dev,
119 					   unsigned int pref);
120 static struct fib6_info *rt6_get_route_info(struct net *net,
121 					   const struct in6_addr *prefix, int prefixlen,
122 					   const struct in6_addr *gwaddr,
123 					   struct net_device *dev);
124 #endif
125 
126 struct uncached_list {
127 	spinlock_t		lock;
128 	struct list_head	head;
129 };
130 
131 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
132 
133 void rt6_uncached_list_add(struct rt6_info *rt)
134 {
135 	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
136 
137 	rt->rt6i_uncached_list = ul;
138 
139 	spin_lock_bh(&ul->lock);
140 	list_add_tail(&rt->rt6i_uncached, &ul->head);
141 	spin_unlock_bh(&ul->lock);
142 }
143 
144 void rt6_uncached_list_del(struct rt6_info *rt)
145 {
146 	if (!list_empty(&rt->rt6i_uncached)) {
147 		struct uncached_list *ul = rt->rt6i_uncached_list;
148 		struct net *net = dev_net(rt->dst.dev);
149 
150 		spin_lock_bh(&ul->lock);
151 		list_del(&rt->rt6i_uncached);
152 		atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
153 		spin_unlock_bh(&ul->lock);
154 	}
155 }
156 
157 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
158 {
159 	struct net_device *loopback_dev = net->loopback_dev;
160 	int cpu;
161 
162 	if (dev == loopback_dev)
163 		return;
164 
165 	for_each_possible_cpu(cpu) {
166 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
167 		struct rt6_info *rt;
168 
169 		spin_lock_bh(&ul->lock);
170 		list_for_each_entry(rt, &ul->head, rt6i_uncached) {
171 			struct inet6_dev *rt_idev = rt->rt6i_idev;
172 			struct net_device *rt_dev = rt->dst.dev;
173 
174 			if (rt_idev->dev == dev) {
175 				rt->rt6i_idev = in6_dev_get(loopback_dev);
176 				in6_dev_put(rt_idev);
177 			}
178 
179 			if (rt_dev == dev) {
180 				rt->dst.dev = blackhole_netdev;
181 				dev_hold(rt->dst.dev);
182 				dev_put(rt_dev);
183 			}
184 		}
185 		spin_unlock_bh(&ul->lock);
186 	}
187 }
188 
189 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
190 					     struct sk_buff *skb,
191 					     const void *daddr)
192 {
193 	if (!ipv6_addr_any(p))
194 		return (const void *) p;
195 	else if (skb)
196 		return &ipv6_hdr(skb)->daddr;
197 	return daddr;
198 }
199 
200 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
201 				   struct net_device *dev,
202 				   struct sk_buff *skb,
203 				   const void *daddr)
204 {
205 	struct neighbour *n;
206 
207 	daddr = choose_neigh_daddr(gw, skb, daddr);
208 	n = __ipv6_neigh_lookup(dev, daddr);
209 	if (n)
210 		return n;
211 
212 	n = neigh_create(&nd_tbl, daddr, dev);
213 	return IS_ERR(n) ? NULL : n;
214 }
215 
216 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
217 					      struct sk_buff *skb,
218 					      const void *daddr)
219 {
220 	const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
221 
222 	return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
223 				dst->dev, skb, daddr);
224 }
225 
226 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
227 {
228 	struct net_device *dev = dst->dev;
229 	struct rt6_info *rt = (struct rt6_info *)dst;
230 
231 	daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
232 	if (!daddr)
233 		return;
234 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
235 		return;
236 	if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
237 		return;
238 	__ipv6_confirm_neigh(dev, daddr);
239 }
240 
241 static struct dst_ops ip6_dst_ops_template = {
242 	.family			=	AF_INET6,
243 	.gc			=	ip6_dst_gc,
244 	.gc_thresh		=	1024,
245 	.check			=	ip6_dst_check,
246 	.default_advmss		=	ip6_default_advmss,
247 	.mtu			=	ip6_mtu,
248 	.cow_metrics		=	dst_cow_metrics_generic,
249 	.destroy		=	ip6_dst_destroy,
250 	.ifdown			=	ip6_dst_ifdown,
251 	.negative_advice	=	ip6_negative_advice,
252 	.link_failure		=	ip6_link_failure,
253 	.update_pmtu		=	ip6_rt_update_pmtu,
254 	.redirect		=	rt6_do_redirect,
255 	.local_out		=	__ip6_local_out,
256 	.neigh_lookup		=	ip6_dst_neigh_lookup,
257 	.confirm_neigh		=	ip6_confirm_neigh,
258 };
259 
260 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
261 {
262 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
263 
264 	return mtu ? : dst->dev->mtu;
265 }
266 
267 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
268 					 struct sk_buff *skb, u32 mtu,
269 					 bool confirm_neigh)
270 {
271 }
272 
273 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
274 				      struct sk_buff *skb)
275 {
276 }
277 
278 static struct dst_ops ip6_dst_blackhole_ops = {
279 	.family			=	AF_INET6,
280 	.destroy		=	ip6_dst_destroy,
281 	.check			=	ip6_dst_check,
282 	.mtu			=	ip6_blackhole_mtu,
283 	.default_advmss		=	ip6_default_advmss,
284 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
285 	.redirect		=	ip6_rt_blackhole_redirect,
286 	.cow_metrics		=	dst_cow_metrics_generic,
287 	.neigh_lookup		=	ip6_dst_neigh_lookup,
288 };
289 
290 static const u32 ip6_template_metrics[RTAX_MAX] = {
291 	[RTAX_HOPLIMIT - 1] = 0,
292 };
293 
294 static const struct fib6_info fib6_null_entry_template = {
295 	.fib6_flags	= (RTF_REJECT | RTF_NONEXTHOP),
296 	.fib6_protocol  = RTPROT_KERNEL,
297 	.fib6_metric	= ~(u32)0,
298 	.fib6_ref	= REFCOUNT_INIT(1),
299 	.fib6_type	= RTN_UNREACHABLE,
300 	.fib6_metrics	= (struct dst_metrics *)&dst_default_metrics,
301 };
302 
303 static const struct rt6_info ip6_null_entry_template = {
304 	.dst = {
305 		.__refcnt	= ATOMIC_INIT(1),
306 		.__use		= 1,
307 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
308 		.error		= -ENETUNREACH,
309 		.input		= ip6_pkt_discard,
310 		.output		= ip6_pkt_discard_out,
311 	},
312 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
313 };
314 
315 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
316 
317 static const struct rt6_info ip6_prohibit_entry_template = {
318 	.dst = {
319 		.__refcnt	= ATOMIC_INIT(1),
320 		.__use		= 1,
321 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
322 		.error		= -EACCES,
323 		.input		= ip6_pkt_prohibit,
324 		.output		= ip6_pkt_prohibit_out,
325 	},
326 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
327 };
328 
329 static const struct rt6_info ip6_blk_hole_entry_template = {
330 	.dst = {
331 		.__refcnt	= ATOMIC_INIT(1),
332 		.__use		= 1,
333 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
334 		.error		= -EINVAL,
335 		.input		= dst_discard,
336 		.output		= dst_discard_out,
337 	},
338 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
339 };
340 
341 #endif
342 
343 static void rt6_info_init(struct rt6_info *rt)
344 {
345 	struct dst_entry *dst = &rt->dst;
346 
347 	memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
348 	INIT_LIST_HEAD(&rt->rt6i_uncached);
349 }
350 
351 /* allocate dst with ip6_dst_ops */
352 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
353 			       int flags)
354 {
355 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
356 					1, DST_OBSOLETE_FORCE_CHK, flags);
357 
358 	if (rt) {
359 		rt6_info_init(rt);
360 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
361 	}
362 
363 	return rt;
364 }
365 EXPORT_SYMBOL(ip6_dst_alloc);
366 
367 static void ip6_dst_destroy(struct dst_entry *dst)
368 {
369 	struct rt6_info *rt = (struct rt6_info *)dst;
370 	struct fib6_info *from;
371 	struct inet6_dev *idev;
372 
373 	ip_dst_metrics_put(dst);
374 	rt6_uncached_list_del(rt);
375 
376 	idev = rt->rt6i_idev;
377 	if (idev) {
378 		rt->rt6i_idev = NULL;
379 		in6_dev_put(idev);
380 	}
381 
382 	from = xchg((__force struct fib6_info **)&rt->from, NULL);
383 	fib6_info_release(from);
384 }
385 
386 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
387 			   int how)
388 {
389 	struct rt6_info *rt = (struct rt6_info *)dst;
390 	struct inet6_dev *idev = rt->rt6i_idev;
391 	struct net_device *loopback_dev =
392 		dev_net(dev)->loopback_dev;
393 
394 	if (idev && idev->dev != loopback_dev) {
395 		struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
396 		if (loopback_idev) {
397 			rt->rt6i_idev = loopback_idev;
398 			in6_dev_put(idev);
399 		}
400 	}
401 }
402 
403 static bool __rt6_check_expired(const struct rt6_info *rt)
404 {
405 	if (rt->rt6i_flags & RTF_EXPIRES)
406 		return time_after(jiffies, rt->dst.expires);
407 	else
408 		return false;
409 }
410 
411 static bool rt6_check_expired(const struct rt6_info *rt)
412 {
413 	struct fib6_info *from;
414 
415 	from = rcu_dereference(rt->from);
416 
417 	if (rt->rt6i_flags & RTF_EXPIRES) {
418 		if (time_after(jiffies, rt->dst.expires))
419 			return true;
420 	} else if (from) {
421 		return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
422 			fib6_check_expired(from);
423 	}
424 	return false;
425 }
426 
427 void fib6_select_path(const struct net *net, struct fib6_result *res,
428 		      struct flowi6 *fl6, int oif, bool have_oif_match,
429 		      const struct sk_buff *skb, int strict)
430 {
431 	struct fib6_info *sibling, *next_sibling;
432 	struct fib6_info *match = res->f6i;
433 
434 	if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
435 		goto out;
436 
437 	/* We might have already computed the hash for ICMPv6 errors. In such
438 	 * case it will always be non-zero. Otherwise now is the time to do it.
439 	 */
440 	if (!fl6->mp_hash &&
441 	    (!match->nh || nexthop_is_multipath(match->nh)))
442 		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
443 
444 	if (unlikely(match->nh)) {
445 		nexthop_path_fib6_result(res, fl6->mp_hash);
446 		return;
447 	}
448 
449 	if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
450 		goto out;
451 
452 	list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
453 				 fib6_siblings) {
454 		const struct fib6_nh *nh = sibling->fib6_nh;
455 		int nh_upper_bound;
456 
457 		nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
458 		if (fl6->mp_hash > nh_upper_bound)
459 			continue;
460 		if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
461 			break;
462 		match = sibling;
463 		break;
464 	}
465 
466 out:
467 	res->f6i = match;
468 	res->nh = match->fib6_nh;
469 }
470 
471 /*
472  *	Route lookup. rcu_read_lock() should be held.
473  */
474 
475 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
476 			       const struct in6_addr *saddr, int oif, int flags)
477 {
478 	const struct net_device *dev;
479 
480 	if (nh->fib_nh_flags & RTNH_F_DEAD)
481 		return false;
482 
483 	dev = nh->fib_nh_dev;
484 	if (oif) {
485 		if (dev->ifindex == oif)
486 			return true;
487 	} else {
488 		if (ipv6_chk_addr(net, saddr, dev,
489 				  flags & RT6_LOOKUP_F_IFACE))
490 			return true;
491 	}
492 
493 	return false;
494 }
495 
496 struct fib6_nh_dm_arg {
497 	struct net		*net;
498 	const struct in6_addr	*saddr;
499 	int			oif;
500 	int			flags;
501 	struct fib6_nh		*nh;
502 };
503 
504 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
505 {
506 	struct fib6_nh_dm_arg *arg = _arg;
507 
508 	arg->nh = nh;
509 	return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
510 				  arg->flags);
511 }
512 
513 /* returns fib6_nh from nexthop or NULL */
514 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
515 					struct fib6_result *res,
516 					const struct in6_addr *saddr,
517 					int oif, int flags)
518 {
519 	struct fib6_nh_dm_arg arg = {
520 		.net   = net,
521 		.saddr = saddr,
522 		.oif   = oif,
523 		.flags = flags,
524 	};
525 
526 	if (nexthop_is_blackhole(nh))
527 		return NULL;
528 
529 	if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
530 		return arg.nh;
531 
532 	return NULL;
533 }
534 
535 static void rt6_device_match(struct net *net, struct fib6_result *res,
536 			     const struct in6_addr *saddr, int oif, int flags)
537 {
538 	struct fib6_info *f6i = res->f6i;
539 	struct fib6_info *spf6i;
540 	struct fib6_nh *nh;
541 
542 	if (!oif && ipv6_addr_any(saddr)) {
543 		if (unlikely(f6i->nh)) {
544 			nh = nexthop_fib6_nh(f6i->nh);
545 			if (nexthop_is_blackhole(f6i->nh))
546 				goto out_blackhole;
547 		} else {
548 			nh = f6i->fib6_nh;
549 		}
550 		if (!(nh->fib_nh_flags & RTNH_F_DEAD))
551 			goto out;
552 	}
553 
554 	for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
555 		bool matched = false;
556 
557 		if (unlikely(spf6i->nh)) {
558 			nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
559 					      oif, flags);
560 			if (nh)
561 				matched = true;
562 		} else {
563 			nh = spf6i->fib6_nh;
564 			if (__rt6_device_match(net, nh, saddr, oif, flags))
565 				matched = true;
566 		}
567 		if (matched) {
568 			res->f6i = spf6i;
569 			goto out;
570 		}
571 	}
572 
573 	if (oif && flags & RT6_LOOKUP_F_IFACE) {
574 		res->f6i = net->ipv6.fib6_null_entry;
575 		nh = res->f6i->fib6_nh;
576 		goto out;
577 	}
578 
579 	if (unlikely(f6i->nh)) {
580 		nh = nexthop_fib6_nh(f6i->nh);
581 		if (nexthop_is_blackhole(f6i->nh))
582 			goto out_blackhole;
583 	} else {
584 		nh = f6i->fib6_nh;
585 	}
586 
587 	if (nh->fib_nh_flags & RTNH_F_DEAD) {
588 		res->f6i = net->ipv6.fib6_null_entry;
589 		nh = res->f6i->fib6_nh;
590 	}
591 out:
592 	res->nh = nh;
593 	res->fib6_type = res->f6i->fib6_type;
594 	res->fib6_flags = res->f6i->fib6_flags;
595 	return;
596 
597 out_blackhole:
598 	res->fib6_flags |= RTF_REJECT;
599 	res->fib6_type = RTN_BLACKHOLE;
600 	res->nh = nh;
601 }
602 
603 #ifdef CONFIG_IPV6_ROUTER_PREF
604 struct __rt6_probe_work {
605 	struct work_struct work;
606 	struct in6_addr target;
607 	struct net_device *dev;
608 };
609 
610 static void rt6_probe_deferred(struct work_struct *w)
611 {
612 	struct in6_addr mcaddr;
613 	struct __rt6_probe_work *work =
614 		container_of(w, struct __rt6_probe_work, work);
615 
616 	addrconf_addr_solict_mult(&work->target, &mcaddr);
617 	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
618 	dev_put(work->dev);
619 	kfree(work);
620 }
621 
622 static void rt6_probe(struct fib6_nh *fib6_nh)
623 {
624 	struct __rt6_probe_work *work = NULL;
625 	const struct in6_addr *nh_gw;
626 	unsigned long last_probe;
627 	struct neighbour *neigh;
628 	struct net_device *dev;
629 	struct inet6_dev *idev;
630 
631 	/*
632 	 * Okay, this does not seem to be appropriate
633 	 * for now, however, we need to check if it
634 	 * is really so; aka Router Reachability Probing.
635 	 *
636 	 * Router Reachability Probe MUST be rate-limited
637 	 * to no more than one per minute.
638 	 */
639 	if (!fib6_nh->fib_nh_gw_family)
640 		return;
641 
642 	nh_gw = &fib6_nh->fib_nh_gw6;
643 	dev = fib6_nh->fib_nh_dev;
644 	rcu_read_lock_bh();
645 	last_probe = READ_ONCE(fib6_nh->last_probe);
646 	idev = __in6_dev_get(dev);
647 	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
648 	if (neigh) {
649 		if (neigh->nud_state & NUD_VALID)
650 			goto out;
651 
652 		write_lock(&neigh->lock);
653 		if (!(neigh->nud_state & NUD_VALID) &&
654 		    time_after(jiffies,
655 			       neigh->updated + idev->cnf.rtr_probe_interval)) {
656 			work = kmalloc(sizeof(*work), GFP_ATOMIC);
657 			if (work)
658 				__neigh_set_probe_once(neigh);
659 		}
660 		write_unlock(&neigh->lock);
661 	} else if (time_after(jiffies, last_probe +
662 				       idev->cnf.rtr_probe_interval)) {
663 		work = kmalloc(sizeof(*work), GFP_ATOMIC);
664 	}
665 
666 	if (!work || cmpxchg(&fib6_nh->last_probe,
667 			     last_probe, jiffies) != last_probe) {
668 		kfree(work);
669 	} else {
670 		INIT_WORK(&work->work, rt6_probe_deferred);
671 		work->target = *nh_gw;
672 		dev_hold(dev);
673 		work->dev = dev;
674 		schedule_work(&work->work);
675 	}
676 
677 out:
678 	rcu_read_unlock_bh();
679 }
680 #else
681 static inline void rt6_probe(struct fib6_nh *fib6_nh)
682 {
683 }
684 #endif
685 
686 /*
687  * Default Router Selection (RFC 2461 6.3.6)
688  */
689 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
690 {
691 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
692 	struct neighbour *neigh;
693 
694 	rcu_read_lock_bh();
695 	neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
696 					  &fib6_nh->fib_nh_gw6);
697 	if (neigh) {
698 		read_lock(&neigh->lock);
699 		if (neigh->nud_state & NUD_VALID)
700 			ret = RT6_NUD_SUCCEED;
701 #ifdef CONFIG_IPV6_ROUTER_PREF
702 		else if (!(neigh->nud_state & NUD_FAILED))
703 			ret = RT6_NUD_SUCCEED;
704 		else
705 			ret = RT6_NUD_FAIL_PROBE;
706 #endif
707 		read_unlock(&neigh->lock);
708 	} else {
709 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
710 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
711 	}
712 	rcu_read_unlock_bh();
713 
714 	return ret;
715 }
716 
717 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
718 			   int strict)
719 {
720 	int m = 0;
721 
722 	if (!oif || nh->fib_nh_dev->ifindex == oif)
723 		m = 2;
724 
725 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
726 		return RT6_NUD_FAIL_HARD;
727 #ifdef CONFIG_IPV6_ROUTER_PREF
728 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
729 #endif
730 	if ((strict & RT6_LOOKUP_F_REACHABLE) &&
731 	    !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
732 		int n = rt6_check_neigh(nh);
733 		if (n < 0)
734 			return n;
735 	}
736 	return m;
737 }
738 
739 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
740 		       int oif, int strict, int *mpri, bool *do_rr)
741 {
742 	bool match_do_rr = false;
743 	bool rc = false;
744 	int m;
745 
746 	if (nh->fib_nh_flags & RTNH_F_DEAD)
747 		goto out;
748 
749 	if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
750 	    nh->fib_nh_flags & RTNH_F_LINKDOWN &&
751 	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
752 		goto out;
753 
754 	m = rt6_score_route(nh, fib6_flags, oif, strict);
755 	if (m == RT6_NUD_FAIL_DO_RR) {
756 		match_do_rr = true;
757 		m = 0; /* lowest valid score */
758 	} else if (m == RT6_NUD_FAIL_HARD) {
759 		goto out;
760 	}
761 
762 	if (strict & RT6_LOOKUP_F_REACHABLE)
763 		rt6_probe(nh);
764 
765 	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
766 	if (m > *mpri) {
767 		*do_rr = match_do_rr;
768 		*mpri = m;
769 		rc = true;
770 	}
771 out:
772 	return rc;
773 }
774 
775 struct fib6_nh_frl_arg {
776 	u32		flags;
777 	int		oif;
778 	int		strict;
779 	int		*mpri;
780 	bool		*do_rr;
781 	struct fib6_nh	*nh;
782 };
783 
784 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
785 {
786 	struct fib6_nh_frl_arg *arg = _arg;
787 
788 	arg->nh = nh;
789 	return find_match(nh, arg->flags, arg->oif, arg->strict,
790 			  arg->mpri, arg->do_rr);
791 }
792 
793 static void __find_rr_leaf(struct fib6_info *f6i_start,
794 			   struct fib6_info *nomatch, u32 metric,
795 			   struct fib6_result *res, struct fib6_info **cont,
796 			   int oif, int strict, bool *do_rr, int *mpri)
797 {
798 	struct fib6_info *f6i;
799 
800 	for (f6i = f6i_start;
801 	     f6i && f6i != nomatch;
802 	     f6i = rcu_dereference(f6i->fib6_next)) {
803 		bool matched = false;
804 		struct fib6_nh *nh;
805 
806 		if (cont && f6i->fib6_metric != metric) {
807 			*cont = f6i;
808 			return;
809 		}
810 
811 		if (fib6_check_expired(f6i))
812 			continue;
813 
814 		if (unlikely(f6i->nh)) {
815 			struct fib6_nh_frl_arg arg = {
816 				.flags  = f6i->fib6_flags,
817 				.oif    = oif,
818 				.strict = strict,
819 				.mpri   = mpri,
820 				.do_rr  = do_rr
821 			};
822 
823 			if (nexthop_is_blackhole(f6i->nh)) {
824 				res->fib6_flags = RTF_REJECT;
825 				res->fib6_type = RTN_BLACKHOLE;
826 				res->f6i = f6i;
827 				res->nh = nexthop_fib6_nh(f6i->nh);
828 				return;
829 			}
830 			if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
831 						     &arg)) {
832 				matched = true;
833 				nh = arg.nh;
834 			}
835 		} else {
836 			nh = f6i->fib6_nh;
837 			if (find_match(nh, f6i->fib6_flags, oif, strict,
838 				       mpri, do_rr))
839 				matched = true;
840 		}
841 		if (matched) {
842 			res->f6i = f6i;
843 			res->nh = nh;
844 			res->fib6_flags = f6i->fib6_flags;
845 			res->fib6_type = f6i->fib6_type;
846 		}
847 	}
848 }
849 
850 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
851 			 struct fib6_info *rr_head, int oif, int strict,
852 			 bool *do_rr, struct fib6_result *res)
853 {
854 	u32 metric = rr_head->fib6_metric;
855 	struct fib6_info *cont = NULL;
856 	int mpri = -1;
857 
858 	__find_rr_leaf(rr_head, NULL, metric, res, &cont,
859 		       oif, strict, do_rr, &mpri);
860 
861 	__find_rr_leaf(leaf, rr_head, metric, res, &cont,
862 		       oif, strict, do_rr, &mpri);
863 
864 	if (res->f6i || !cont)
865 		return;
866 
867 	__find_rr_leaf(cont, NULL, metric, res, NULL,
868 		       oif, strict, do_rr, &mpri);
869 }
870 
871 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
872 		       struct fib6_result *res, int strict)
873 {
874 	struct fib6_info *leaf = rcu_dereference(fn->leaf);
875 	struct fib6_info *rt0;
876 	bool do_rr = false;
877 	int key_plen;
878 
879 	/* make sure this function or its helpers sets f6i */
880 	res->f6i = NULL;
881 
882 	if (!leaf || leaf == net->ipv6.fib6_null_entry)
883 		goto out;
884 
885 	rt0 = rcu_dereference(fn->rr_ptr);
886 	if (!rt0)
887 		rt0 = leaf;
888 
889 	/* Double check to make sure fn is not an intermediate node
890 	 * and fn->leaf does not points to its child's leaf
891 	 * (This might happen if all routes under fn are deleted from
892 	 * the tree and fib6_repair_tree() is called on the node.)
893 	 */
894 	key_plen = rt0->fib6_dst.plen;
895 #ifdef CONFIG_IPV6_SUBTREES
896 	if (rt0->fib6_src.plen)
897 		key_plen = rt0->fib6_src.plen;
898 #endif
899 	if (fn->fn_bit != key_plen)
900 		goto out;
901 
902 	find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
903 	if (do_rr) {
904 		struct fib6_info *next = rcu_dereference(rt0->fib6_next);
905 
906 		/* no entries matched; do round-robin */
907 		if (!next || next->fib6_metric != rt0->fib6_metric)
908 			next = leaf;
909 
910 		if (next != rt0) {
911 			spin_lock_bh(&leaf->fib6_table->tb6_lock);
912 			/* make sure next is not being deleted from the tree */
913 			if (next->fib6_node)
914 				rcu_assign_pointer(fn->rr_ptr, next);
915 			spin_unlock_bh(&leaf->fib6_table->tb6_lock);
916 		}
917 	}
918 
919 out:
920 	if (!res->f6i) {
921 		res->f6i = net->ipv6.fib6_null_entry;
922 		res->nh = res->f6i->fib6_nh;
923 		res->fib6_flags = res->f6i->fib6_flags;
924 		res->fib6_type = res->f6i->fib6_type;
925 	}
926 }
927 
928 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
929 {
930 	return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
931 	       res->nh->fib_nh_gw_family;
932 }
933 
934 #ifdef CONFIG_IPV6_ROUTE_INFO
935 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
936 		  const struct in6_addr *gwaddr)
937 {
938 	struct net *net = dev_net(dev);
939 	struct route_info *rinfo = (struct route_info *) opt;
940 	struct in6_addr prefix_buf, *prefix;
941 	unsigned int pref;
942 	unsigned long lifetime;
943 	struct fib6_info *rt;
944 
945 	if (len < sizeof(struct route_info)) {
946 		return -EINVAL;
947 	}
948 
949 	/* Sanity check for prefix_len and length */
950 	if (rinfo->length > 3) {
951 		return -EINVAL;
952 	} else if (rinfo->prefix_len > 128) {
953 		return -EINVAL;
954 	} else if (rinfo->prefix_len > 64) {
955 		if (rinfo->length < 2) {
956 			return -EINVAL;
957 		}
958 	} else if (rinfo->prefix_len > 0) {
959 		if (rinfo->length < 1) {
960 			return -EINVAL;
961 		}
962 	}
963 
964 	pref = rinfo->route_pref;
965 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
966 		return -EINVAL;
967 
968 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
969 
970 	if (rinfo->length == 3)
971 		prefix = (struct in6_addr *)rinfo->prefix;
972 	else {
973 		/* this function is safe */
974 		ipv6_addr_prefix(&prefix_buf,
975 				 (struct in6_addr *)rinfo->prefix,
976 				 rinfo->prefix_len);
977 		prefix = &prefix_buf;
978 	}
979 
980 	if (rinfo->prefix_len == 0)
981 		rt = rt6_get_dflt_router(net, gwaddr, dev);
982 	else
983 		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
984 					gwaddr, dev);
985 
986 	if (rt && !lifetime) {
987 		ip6_del_rt(net, rt, false);
988 		rt = NULL;
989 	}
990 
991 	if (!rt && lifetime)
992 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
993 					dev, pref);
994 	else if (rt)
995 		rt->fib6_flags = RTF_ROUTEINFO |
996 				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
997 
998 	if (rt) {
999 		if (!addrconf_finite_timeout(lifetime))
1000 			fib6_clean_expires(rt);
1001 		else
1002 			fib6_set_expires(rt, jiffies + HZ * lifetime);
1003 
1004 		fib6_info_release(rt);
1005 	}
1006 	return 0;
1007 }
1008 #endif
1009 
1010 /*
1011  *	Misc support functions
1012  */
1013 
1014 /* called with rcu_lock held */
1015 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1016 {
1017 	struct net_device *dev = res->nh->fib_nh_dev;
1018 
1019 	if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1020 		/* for copies of local routes, dst->dev needs to be the
1021 		 * device if it is a master device, the master device if
1022 		 * device is enslaved, and the loopback as the default
1023 		 */
1024 		if (netif_is_l3_slave(dev) &&
1025 		    !rt6_need_strict(&res->f6i->fib6_dst.addr))
1026 			dev = l3mdev_master_dev_rcu(dev);
1027 		else if (!netif_is_l3_master(dev))
1028 			dev = dev_net(dev)->loopback_dev;
1029 		/* last case is netif_is_l3_master(dev) is true in which
1030 		 * case we want dev returned to be dev
1031 		 */
1032 	}
1033 
1034 	return dev;
1035 }
1036 
1037 static const int fib6_prop[RTN_MAX + 1] = {
1038 	[RTN_UNSPEC]	= 0,
1039 	[RTN_UNICAST]	= 0,
1040 	[RTN_LOCAL]	= 0,
1041 	[RTN_BROADCAST]	= 0,
1042 	[RTN_ANYCAST]	= 0,
1043 	[RTN_MULTICAST]	= 0,
1044 	[RTN_BLACKHOLE]	= -EINVAL,
1045 	[RTN_UNREACHABLE] = -EHOSTUNREACH,
1046 	[RTN_PROHIBIT]	= -EACCES,
1047 	[RTN_THROW]	= -EAGAIN,
1048 	[RTN_NAT]	= -EINVAL,
1049 	[RTN_XRESOLVE]	= -EINVAL,
1050 };
1051 
1052 static int ip6_rt_type_to_error(u8 fib6_type)
1053 {
1054 	return fib6_prop[fib6_type];
1055 }
1056 
1057 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1058 {
1059 	unsigned short flags = 0;
1060 
1061 	if (rt->dst_nocount)
1062 		flags |= DST_NOCOUNT;
1063 	if (rt->dst_nopolicy)
1064 		flags |= DST_NOPOLICY;
1065 
1066 	return flags;
1067 }
1068 
1069 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1070 {
1071 	rt->dst.error = ip6_rt_type_to_error(fib6_type);
1072 
1073 	switch (fib6_type) {
1074 	case RTN_BLACKHOLE:
1075 		rt->dst.output = dst_discard_out;
1076 		rt->dst.input = dst_discard;
1077 		break;
1078 	case RTN_PROHIBIT:
1079 		rt->dst.output = ip6_pkt_prohibit_out;
1080 		rt->dst.input = ip6_pkt_prohibit;
1081 		break;
1082 	case RTN_THROW:
1083 	case RTN_UNREACHABLE:
1084 	default:
1085 		rt->dst.output = ip6_pkt_discard_out;
1086 		rt->dst.input = ip6_pkt_discard;
1087 		break;
1088 	}
1089 }
1090 
1091 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1092 {
1093 	struct fib6_info *f6i = res->f6i;
1094 
1095 	if (res->fib6_flags & RTF_REJECT) {
1096 		ip6_rt_init_dst_reject(rt, res->fib6_type);
1097 		return;
1098 	}
1099 
1100 	rt->dst.error = 0;
1101 	rt->dst.output = ip6_output;
1102 
1103 	if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1104 		rt->dst.input = ip6_input;
1105 	} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1106 		rt->dst.input = ip6_mc_input;
1107 	} else {
1108 		rt->dst.input = ip6_forward;
1109 	}
1110 
1111 	if (res->nh->fib_nh_lws) {
1112 		rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1113 		lwtunnel_set_redirect(&rt->dst);
1114 	}
1115 
1116 	rt->dst.lastuse = jiffies;
1117 }
1118 
1119 /* Caller must already hold reference to @from */
1120 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1121 {
1122 	rt->rt6i_flags &= ~RTF_EXPIRES;
1123 	rcu_assign_pointer(rt->from, from);
1124 	ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1125 }
1126 
1127 /* Caller must already hold reference to f6i in result */
1128 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1129 {
1130 	const struct fib6_nh *nh = res->nh;
1131 	const struct net_device *dev = nh->fib_nh_dev;
1132 	struct fib6_info *f6i = res->f6i;
1133 
1134 	ip6_rt_init_dst(rt, res);
1135 
1136 	rt->rt6i_dst = f6i->fib6_dst;
1137 	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1138 	rt->rt6i_flags = res->fib6_flags;
1139 	if (nh->fib_nh_gw_family) {
1140 		rt->rt6i_gateway = nh->fib_nh_gw6;
1141 		rt->rt6i_flags |= RTF_GATEWAY;
1142 	}
1143 	rt6_set_from(rt, f6i);
1144 #ifdef CONFIG_IPV6_SUBTREES
1145 	rt->rt6i_src = f6i->fib6_src;
1146 #endif
1147 }
1148 
1149 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1150 					struct in6_addr *saddr)
1151 {
1152 	struct fib6_node *pn, *sn;
1153 	while (1) {
1154 		if (fn->fn_flags & RTN_TL_ROOT)
1155 			return NULL;
1156 		pn = rcu_dereference(fn->parent);
1157 		sn = FIB6_SUBTREE(pn);
1158 		if (sn && sn != fn)
1159 			fn = fib6_node_lookup(sn, NULL, saddr);
1160 		else
1161 			fn = pn;
1162 		if (fn->fn_flags & RTN_RTINFO)
1163 			return fn;
1164 	}
1165 }
1166 
1167 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1168 {
1169 	struct rt6_info *rt = *prt;
1170 
1171 	if (dst_hold_safe(&rt->dst))
1172 		return true;
1173 	if (net) {
1174 		rt = net->ipv6.ip6_null_entry;
1175 		dst_hold(&rt->dst);
1176 	} else {
1177 		rt = NULL;
1178 	}
1179 	*prt = rt;
1180 	return false;
1181 }
1182 
1183 /* called with rcu_lock held */
1184 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1185 {
1186 	struct net_device *dev = res->nh->fib_nh_dev;
1187 	struct fib6_info *f6i = res->f6i;
1188 	unsigned short flags;
1189 	struct rt6_info *nrt;
1190 
1191 	if (!fib6_info_hold_safe(f6i))
1192 		goto fallback;
1193 
1194 	flags = fib6_info_dst_flags(f6i);
1195 	nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1196 	if (!nrt) {
1197 		fib6_info_release(f6i);
1198 		goto fallback;
1199 	}
1200 
1201 	ip6_rt_copy_init(nrt, res);
1202 	return nrt;
1203 
1204 fallback:
1205 	nrt = dev_net(dev)->ipv6.ip6_null_entry;
1206 	dst_hold(&nrt->dst);
1207 	return nrt;
1208 }
1209 
1210 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1211 					     struct fib6_table *table,
1212 					     struct flowi6 *fl6,
1213 					     const struct sk_buff *skb,
1214 					     int flags)
1215 {
1216 	struct fib6_result res = {};
1217 	struct fib6_node *fn;
1218 	struct rt6_info *rt;
1219 
1220 	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1221 		flags &= ~RT6_LOOKUP_F_IFACE;
1222 
1223 	rcu_read_lock();
1224 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1225 restart:
1226 	res.f6i = rcu_dereference(fn->leaf);
1227 	if (!res.f6i)
1228 		res.f6i = net->ipv6.fib6_null_entry;
1229 	else
1230 		rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1231 				 flags);
1232 
1233 	if (res.f6i == net->ipv6.fib6_null_entry) {
1234 		fn = fib6_backtrack(fn, &fl6->saddr);
1235 		if (fn)
1236 			goto restart;
1237 
1238 		rt = net->ipv6.ip6_null_entry;
1239 		dst_hold(&rt->dst);
1240 		goto out;
1241 	} else if (res.fib6_flags & RTF_REJECT) {
1242 		goto do_create;
1243 	}
1244 
1245 	fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1246 			 fl6->flowi6_oif != 0, skb, flags);
1247 
1248 	/* Search through exception table */
1249 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1250 	if (rt) {
1251 		if (ip6_hold_safe(net, &rt))
1252 			dst_use_noref(&rt->dst, jiffies);
1253 	} else {
1254 do_create:
1255 		rt = ip6_create_rt_rcu(&res);
1256 	}
1257 
1258 out:
1259 	trace_fib6_table_lookup(net, &res, table, fl6);
1260 
1261 	rcu_read_unlock();
1262 
1263 	return rt;
1264 }
1265 
1266 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1267 				   const struct sk_buff *skb, int flags)
1268 {
1269 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1270 }
1271 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1272 
1273 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1274 			    const struct in6_addr *saddr, int oif,
1275 			    const struct sk_buff *skb, int strict)
1276 {
1277 	struct flowi6 fl6 = {
1278 		.flowi6_oif = oif,
1279 		.daddr = *daddr,
1280 	};
1281 	struct dst_entry *dst;
1282 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1283 
1284 	if (saddr) {
1285 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1286 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1287 	}
1288 
1289 	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1290 	if (dst->error == 0)
1291 		return (struct rt6_info *) dst;
1292 
1293 	dst_release(dst);
1294 
1295 	return NULL;
1296 }
1297 EXPORT_SYMBOL(rt6_lookup);
1298 
1299 /* ip6_ins_rt is called with FREE table->tb6_lock.
1300  * It takes new route entry, the addition fails by any reason the
1301  * route is released.
1302  * Caller must hold dst before calling it.
1303  */
1304 
1305 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1306 			struct netlink_ext_ack *extack)
1307 {
1308 	int err;
1309 	struct fib6_table *table;
1310 
1311 	table = rt->fib6_table;
1312 	spin_lock_bh(&table->tb6_lock);
1313 	err = fib6_add(&table->tb6_root, rt, info, extack);
1314 	spin_unlock_bh(&table->tb6_lock);
1315 
1316 	return err;
1317 }
1318 
1319 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1320 {
1321 	struct nl_info info = {	.nl_net = net, };
1322 
1323 	return __ip6_ins_rt(rt, &info, NULL);
1324 }
1325 
1326 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1327 					   const struct in6_addr *daddr,
1328 					   const struct in6_addr *saddr)
1329 {
1330 	struct fib6_info *f6i = res->f6i;
1331 	struct net_device *dev;
1332 	struct rt6_info *rt;
1333 
1334 	/*
1335 	 *	Clone the route.
1336 	 */
1337 
1338 	if (!fib6_info_hold_safe(f6i))
1339 		return NULL;
1340 
1341 	dev = ip6_rt_get_dev_rcu(res);
1342 	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1343 	if (!rt) {
1344 		fib6_info_release(f6i);
1345 		return NULL;
1346 	}
1347 
1348 	ip6_rt_copy_init(rt, res);
1349 	rt->rt6i_flags |= RTF_CACHE;
1350 	rt->rt6i_dst.addr = *daddr;
1351 	rt->rt6i_dst.plen = 128;
1352 
1353 	if (!rt6_is_gw_or_nonexthop(res)) {
1354 		if (f6i->fib6_dst.plen != 128 &&
1355 		    ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1356 			rt->rt6i_flags |= RTF_ANYCAST;
1357 #ifdef CONFIG_IPV6_SUBTREES
1358 		if (rt->rt6i_src.plen && saddr) {
1359 			rt->rt6i_src.addr = *saddr;
1360 			rt->rt6i_src.plen = 128;
1361 		}
1362 #endif
1363 	}
1364 
1365 	return rt;
1366 }
1367 
1368 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1369 {
1370 	struct fib6_info *f6i = res->f6i;
1371 	unsigned short flags = fib6_info_dst_flags(f6i);
1372 	struct net_device *dev;
1373 	struct rt6_info *pcpu_rt;
1374 
1375 	if (!fib6_info_hold_safe(f6i))
1376 		return NULL;
1377 
1378 	rcu_read_lock();
1379 	dev = ip6_rt_get_dev_rcu(res);
1380 	pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1381 	rcu_read_unlock();
1382 	if (!pcpu_rt) {
1383 		fib6_info_release(f6i);
1384 		return NULL;
1385 	}
1386 	ip6_rt_copy_init(pcpu_rt, res);
1387 	pcpu_rt->rt6i_flags |= RTF_PCPU;
1388 
1389 	if (f6i->nh)
1390 		pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1391 
1392 	return pcpu_rt;
1393 }
1394 
1395 static bool rt6_is_valid(const struct rt6_info *rt6)
1396 {
1397 	return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1398 }
1399 
1400 /* It should be called with rcu_read_lock() acquired */
1401 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1402 {
1403 	struct rt6_info *pcpu_rt;
1404 
1405 	pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1406 
1407 	if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1408 		struct rt6_info *prev, **p;
1409 
1410 		p = this_cpu_ptr(res->nh->rt6i_pcpu);
1411 		prev = xchg(p, NULL);
1412 		if (prev) {
1413 			dst_dev_put(&prev->dst);
1414 			dst_release(&prev->dst);
1415 		}
1416 
1417 		pcpu_rt = NULL;
1418 	}
1419 
1420 	return pcpu_rt;
1421 }
1422 
1423 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1424 					    const struct fib6_result *res)
1425 {
1426 	struct rt6_info *pcpu_rt, *prev, **p;
1427 
1428 	pcpu_rt = ip6_rt_pcpu_alloc(res);
1429 	if (!pcpu_rt)
1430 		return NULL;
1431 
1432 	p = this_cpu_ptr(res->nh->rt6i_pcpu);
1433 	prev = cmpxchg(p, NULL, pcpu_rt);
1434 	BUG_ON(prev);
1435 
1436 	if (res->f6i->fib6_destroying) {
1437 		struct fib6_info *from;
1438 
1439 		from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1440 		fib6_info_release(from);
1441 	}
1442 
1443 	return pcpu_rt;
1444 }
1445 
1446 /* exception hash table implementation
1447  */
1448 static DEFINE_SPINLOCK(rt6_exception_lock);
1449 
1450 /* Remove rt6_ex from hash table and free the memory
1451  * Caller must hold rt6_exception_lock
1452  */
1453 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1454 				 struct rt6_exception *rt6_ex)
1455 {
1456 	struct fib6_info *from;
1457 	struct net *net;
1458 
1459 	if (!bucket || !rt6_ex)
1460 		return;
1461 
1462 	net = dev_net(rt6_ex->rt6i->dst.dev);
1463 	net->ipv6.rt6_stats->fib_rt_cache--;
1464 
1465 	/* purge completely the exception to allow releasing the held resources:
1466 	 * some [sk] cache may keep the dst around for unlimited time
1467 	 */
1468 	from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1469 	fib6_info_release(from);
1470 	dst_dev_put(&rt6_ex->rt6i->dst);
1471 
1472 	hlist_del_rcu(&rt6_ex->hlist);
1473 	dst_release(&rt6_ex->rt6i->dst);
1474 	kfree_rcu(rt6_ex, rcu);
1475 	WARN_ON_ONCE(!bucket->depth);
1476 	bucket->depth--;
1477 }
1478 
1479 /* Remove oldest rt6_ex in bucket and free the memory
1480  * Caller must hold rt6_exception_lock
1481  */
1482 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1483 {
1484 	struct rt6_exception *rt6_ex, *oldest = NULL;
1485 
1486 	if (!bucket)
1487 		return;
1488 
1489 	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1490 		if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1491 			oldest = rt6_ex;
1492 	}
1493 	rt6_remove_exception(bucket, oldest);
1494 }
1495 
1496 static u32 rt6_exception_hash(const struct in6_addr *dst,
1497 			      const struct in6_addr *src)
1498 {
1499 	static u32 seed __read_mostly;
1500 	u32 val;
1501 
1502 	net_get_random_once(&seed, sizeof(seed));
1503 	val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
1504 
1505 #ifdef CONFIG_IPV6_SUBTREES
1506 	if (src)
1507 		val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
1508 #endif
1509 	return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1510 }
1511 
1512 /* Helper function to find the cached rt in the hash table
1513  * and update bucket pointer to point to the bucket for this
1514  * (daddr, saddr) pair
1515  * Caller must hold rt6_exception_lock
1516  */
1517 static struct rt6_exception *
1518 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1519 			      const struct in6_addr *daddr,
1520 			      const struct in6_addr *saddr)
1521 {
1522 	struct rt6_exception *rt6_ex;
1523 	u32 hval;
1524 
1525 	if (!(*bucket) || !daddr)
1526 		return NULL;
1527 
1528 	hval = rt6_exception_hash(daddr, saddr);
1529 	*bucket += hval;
1530 
1531 	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1532 		struct rt6_info *rt6 = rt6_ex->rt6i;
1533 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1534 
1535 #ifdef CONFIG_IPV6_SUBTREES
1536 		if (matched && saddr)
1537 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1538 #endif
1539 		if (matched)
1540 			return rt6_ex;
1541 	}
1542 	return NULL;
1543 }
1544 
1545 /* Helper function to find the cached rt in the hash table
1546  * and update bucket pointer to point to the bucket for this
1547  * (daddr, saddr) pair
1548  * Caller must hold rcu_read_lock()
1549  */
1550 static struct rt6_exception *
1551 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1552 			 const struct in6_addr *daddr,
1553 			 const struct in6_addr *saddr)
1554 {
1555 	struct rt6_exception *rt6_ex;
1556 	u32 hval;
1557 
1558 	WARN_ON_ONCE(!rcu_read_lock_held());
1559 
1560 	if (!(*bucket) || !daddr)
1561 		return NULL;
1562 
1563 	hval = rt6_exception_hash(daddr, saddr);
1564 	*bucket += hval;
1565 
1566 	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1567 		struct rt6_info *rt6 = rt6_ex->rt6i;
1568 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1569 
1570 #ifdef CONFIG_IPV6_SUBTREES
1571 		if (matched && saddr)
1572 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1573 #endif
1574 		if (matched)
1575 			return rt6_ex;
1576 	}
1577 	return NULL;
1578 }
1579 
1580 static unsigned int fib6_mtu(const struct fib6_result *res)
1581 {
1582 	const struct fib6_nh *nh = res->nh;
1583 	unsigned int mtu;
1584 
1585 	if (res->f6i->fib6_pmtu) {
1586 		mtu = res->f6i->fib6_pmtu;
1587 	} else {
1588 		struct net_device *dev = nh->fib_nh_dev;
1589 		struct inet6_dev *idev;
1590 
1591 		rcu_read_lock();
1592 		idev = __in6_dev_get(dev);
1593 		mtu = idev->cnf.mtu6;
1594 		rcu_read_unlock();
1595 	}
1596 
1597 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1598 
1599 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1600 }
1601 
1602 #define FIB6_EXCEPTION_BUCKET_FLUSHED  0x1UL
1603 
1604 /* used when the flushed bit is not relevant, only access to the bucket
1605  * (ie., all bucket users except rt6_insert_exception);
1606  *
1607  * called under rcu lock; sometimes called with rt6_exception_lock held
1608  */
1609 static
1610 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1611 						       spinlock_t *lock)
1612 {
1613 	struct rt6_exception_bucket *bucket;
1614 
1615 	if (lock)
1616 		bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1617 						   lockdep_is_held(lock));
1618 	else
1619 		bucket = rcu_dereference(nh->rt6i_exception_bucket);
1620 
1621 	/* remove bucket flushed bit if set */
1622 	if (bucket) {
1623 		unsigned long p = (unsigned long)bucket;
1624 
1625 		p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1626 		bucket = (struct rt6_exception_bucket *)p;
1627 	}
1628 
1629 	return bucket;
1630 }
1631 
1632 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1633 {
1634 	unsigned long p = (unsigned long)bucket;
1635 
1636 	return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1637 }
1638 
1639 /* called with rt6_exception_lock held */
1640 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1641 					      spinlock_t *lock)
1642 {
1643 	struct rt6_exception_bucket *bucket;
1644 	unsigned long p;
1645 
1646 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1647 					   lockdep_is_held(lock));
1648 
1649 	p = (unsigned long)bucket;
1650 	p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1651 	bucket = (struct rt6_exception_bucket *)p;
1652 	rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1653 }
1654 
1655 static int rt6_insert_exception(struct rt6_info *nrt,
1656 				const struct fib6_result *res)
1657 {
1658 	struct net *net = dev_net(nrt->dst.dev);
1659 	struct rt6_exception_bucket *bucket;
1660 	struct fib6_info *f6i = res->f6i;
1661 	struct in6_addr *src_key = NULL;
1662 	struct rt6_exception *rt6_ex;
1663 	struct fib6_nh *nh = res->nh;
1664 	int err = 0;
1665 
1666 	spin_lock_bh(&rt6_exception_lock);
1667 
1668 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1669 					  lockdep_is_held(&rt6_exception_lock));
1670 	if (!bucket) {
1671 		bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1672 				 GFP_ATOMIC);
1673 		if (!bucket) {
1674 			err = -ENOMEM;
1675 			goto out;
1676 		}
1677 		rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1678 	} else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1679 		err = -EINVAL;
1680 		goto out;
1681 	}
1682 
1683 #ifdef CONFIG_IPV6_SUBTREES
1684 	/* fib6_src.plen != 0 indicates f6i is in subtree
1685 	 * and exception table is indexed by a hash of
1686 	 * both fib6_dst and fib6_src.
1687 	 * Otherwise, the exception table is indexed by
1688 	 * a hash of only fib6_dst.
1689 	 */
1690 	if (f6i->fib6_src.plen)
1691 		src_key = &nrt->rt6i_src.addr;
1692 #endif
1693 	/* rt6_mtu_change() might lower mtu on f6i.
1694 	 * Only insert this exception route if its mtu
1695 	 * is less than f6i's mtu value.
1696 	 */
1697 	if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1698 		err = -EINVAL;
1699 		goto out;
1700 	}
1701 
1702 	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1703 					       src_key);
1704 	if (rt6_ex)
1705 		rt6_remove_exception(bucket, rt6_ex);
1706 
1707 	rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1708 	if (!rt6_ex) {
1709 		err = -ENOMEM;
1710 		goto out;
1711 	}
1712 	rt6_ex->rt6i = nrt;
1713 	rt6_ex->stamp = jiffies;
1714 	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1715 	bucket->depth++;
1716 	net->ipv6.rt6_stats->fib_rt_cache++;
1717 
1718 	if (bucket->depth > FIB6_MAX_DEPTH)
1719 		rt6_exception_remove_oldest(bucket);
1720 
1721 out:
1722 	spin_unlock_bh(&rt6_exception_lock);
1723 
1724 	/* Update fn->fn_sernum to invalidate all cached dst */
1725 	if (!err) {
1726 		spin_lock_bh(&f6i->fib6_table->tb6_lock);
1727 		fib6_update_sernum(net, f6i);
1728 		spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1729 		fib6_force_start_gc(net);
1730 	}
1731 
1732 	return err;
1733 }
1734 
1735 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1736 {
1737 	struct rt6_exception_bucket *bucket;
1738 	struct rt6_exception *rt6_ex;
1739 	struct hlist_node *tmp;
1740 	int i;
1741 
1742 	spin_lock_bh(&rt6_exception_lock);
1743 
1744 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1745 	if (!bucket)
1746 		goto out;
1747 
1748 	/* Prevent rt6_insert_exception() to recreate the bucket list */
1749 	if (!from)
1750 		fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1751 
1752 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1753 		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1754 			if (!from ||
1755 			    rcu_access_pointer(rt6_ex->rt6i->from) == from)
1756 				rt6_remove_exception(bucket, rt6_ex);
1757 		}
1758 		WARN_ON_ONCE(!from && bucket->depth);
1759 		bucket++;
1760 	}
1761 out:
1762 	spin_unlock_bh(&rt6_exception_lock);
1763 }
1764 
1765 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1766 {
1767 	struct fib6_info *f6i = arg;
1768 
1769 	fib6_nh_flush_exceptions(nh, f6i);
1770 
1771 	return 0;
1772 }
1773 
1774 void rt6_flush_exceptions(struct fib6_info *f6i)
1775 {
1776 	if (f6i->nh)
1777 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1778 					 f6i);
1779 	else
1780 		fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1781 }
1782 
1783 /* Find cached rt in the hash table inside passed in rt
1784  * Caller has to hold rcu_read_lock()
1785  */
1786 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1787 					   const struct in6_addr *daddr,
1788 					   const struct in6_addr *saddr)
1789 {
1790 	const struct in6_addr *src_key = NULL;
1791 	struct rt6_exception_bucket *bucket;
1792 	struct rt6_exception *rt6_ex;
1793 	struct rt6_info *ret = NULL;
1794 
1795 #ifdef CONFIG_IPV6_SUBTREES
1796 	/* fib6i_src.plen != 0 indicates f6i is in subtree
1797 	 * and exception table is indexed by a hash of
1798 	 * both fib6_dst and fib6_src.
1799 	 * However, the src addr used to create the hash
1800 	 * might not be exactly the passed in saddr which
1801 	 * is a /128 addr from the flow.
1802 	 * So we need to use f6i->fib6_src to redo lookup
1803 	 * if the passed in saddr does not find anything.
1804 	 * (See the logic in ip6_rt_cache_alloc() on how
1805 	 * rt->rt6i_src is updated.)
1806 	 */
1807 	if (res->f6i->fib6_src.plen)
1808 		src_key = saddr;
1809 find_ex:
1810 #endif
1811 	bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1812 	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1813 
1814 	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1815 		ret = rt6_ex->rt6i;
1816 
1817 #ifdef CONFIG_IPV6_SUBTREES
1818 	/* Use fib6_src as src_key and redo lookup */
1819 	if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1820 		src_key = &res->f6i->fib6_src.addr;
1821 		goto find_ex;
1822 	}
1823 #endif
1824 
1825 	return ret;
1826 }
1827 
1828 /* Remove the passed in cached rt from the hash table that contains it */
1829 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1830 				    const struct rt6_info *rt)
1831 {
1832 	const struct in6_addr *src_key = NULL;
1833 	struct rt6_exception_bucket *bucket;
1834 	struct rt6_exception *rt6_ex;
1835 	int err;
1836 
1837 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1838 		return -ENOENT;
1839 
1840 	spin_lock_bh(&rt6_exception_lock);
1841 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1842 
1843 #ifdef CONFIG_IPV6_SUBTREES
1844 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1845 	 * and exception table is indexed by a hash of
1846 	 * both rt6i_dst and rt6i_src.
1847 	 * Otherwise, the exception table is indexed by
1848 	 * a hash of only rt6i_dst.
1849 	 */
1850 	if (plen)
1851 		src_key = &rt->rt6i_src.addr;
1852 #endif
1853 	rt6_ex = __rt6_find_exception_spinlock(&bucket,
1854 					       &rt->rt6i_dst.addr,
1855 					       src_key);
1856 	if (rt6_ex) {
1857 		rt6_remove_exception(bucket, rt6_ex);
1858 		err = 0;
1859 	} else {
1860 		err = -ENOENT;
1861 	}
1862 
1863 	spin_unlock_bh(&rt6_exception_lock);
1864 	return err;
1865 }
1866 
1867 struct fib6_nh_excptn_arg {
1868 	struct rt6_info	*rt;
1869 	int		plen;
1870 };
1871 
1872 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1873 {
1874 	struct fib6_nh_excptn_arg *arg = _arg;
1875 	int err;
1876 
1877 	err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1878 	if (err == 0)
1879 		return 1;
1880 
1881 	return 0;
1882 }
1883 
1884 static int rt6_remove_exception_rt(struct rt6_info *rt)
1885 {
1886 	struct fib6_info *from;
1887 
1888 	from = rcu_dereference(rt->from);
1889 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
1890 		return -EINVAL;
1891 
1892 	if (from->nh) {
1893 		struct fib6_nh_excptn_arg arg = {
1894 			.rt = rt,
1895 			.plen = from->fib6_src.plen
1896 		};
1897 		int rc;
1898 
1899 		/* rc = 1 means an entry was found */
1900 		rc = nexthop_for_each_fib6_nh(from->nh,
1901 					      rt6_nh_remove_exception_rt,
1902 					      &arg);
1903 		return rc ? 0 : -ENOENT;
1904 	}
1905 
1906 	return fib6_nh_remove_exception(from->fib6_nh,
1907 					from->fib6_src.plen, rt);
1908 }
1909 
1910 /* Find rt6_ex which contains the passed in rt cache and
1911  * refresh its stamp
1912  */
1913 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1914 				     const struct rt6_info *rt)
1915 {
1916 	const struct in6_addr *src_key = NULL;
1917 	struct rt6_exception_bucket *bucket;
1918 	struct rt6_exception *rt6_ex;
1919 
1920 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1921 #ifdef CONFIG_IPV6_SUBTREES
1922 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1923 	 * and exception table is indexed by a hash of
1924 	 * both rt6i_dst and rt6i_src.
1925 	 * Otherwise, the exception table is indexed by
1926 	 * a hash of only rt6i_dst.
1927 	 */
1928 	if (plen)
1929 		src_key = &rt->rt6i_src.addr;
1930 #endif
1931 	rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1932 	if (rt6_ex)
1933 		rt6_ex->stamp = jiffies;
1934 }
1935 
1936 struct fib6_nh_match_arg {
1937 	const struct net_device *dev;
1938 	const struct in6_addr	*gw;
1939 	struct fib6_nh		*match;
1940 };
1941 
1942 /* determine if fib6_nh has given device and gateway */
1943 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1944 {
1945 	struct fib6_nh_match_arg *arg = _arg;
1946 
1947 	if (arg->dev != nh->fib_nh_dev ||
1948 	    (arg->gw && !nh->fib_nh_gw_family) ||
1949 	    (!arg->gw && nh->fib_nh_gw_family) ||
1950 	    (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1951 		return 0;
1952 
1953 	arg->match = nh;
1954 
1955 	/* found a match, break the loop */
1956 	return 1;
1957 }
1958 
1959 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1960 {
1961 	struct fib6_info *from;
1962 	struct fib6_nh *fib6_nh;
1963 
1964 	rcu_read_lock();
1965 
1966 	from = rcu_dereference(rt->from);
1967 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
1968 		goto unlock;
1969 
1970 	if (from->nh) {
1971 		struct fib6_nh_match_arg arg = {
1972 			.dev = rt->dst.dev,
1973 			.gw = &rt->rt6i_gateway,
1974 		};
1975 
1976 		nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1977 
1978 		if (!arg.match)
1979 			goto unlock;
1980 		fib6_nh = arg.match;
1981 	} else {
1982 		fib6_nh = from->fib6_nh;
1983 	}
1984 	fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1985 unlock:
1986 	rcu_read_unlock();
1987 }
1988 
1989 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1990 					 struct rt6_info *rt, int mtu)
1991 {
1992 	/* If the new MTU is lower than the route PMTU, this new MTU will be the
1993 	 * lowest MTU in the path: always allow updating the route PMTU to
1994 	 * reflect PMTU decreases.
1995 	 *
1996 	 * If the new MTU is higher, and the route PMTU is equal to the local
1997 	 * MTU, this means the old MTU is the lowest in the path, so allow
1998 	 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1999 	 * handle this.
2000 	 */
2001 
2002 	if (dst_mtu(&rt->dst) >= mtu)
2003 		return true;
2004 
2005 	if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2006 		return true;
2007 
2008 	return false;
2009 }
2010 
2011 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2012 				       const struct fib6_nh *nh, int mtu)
2013 {
2014 	struct rt6_exception_bucket *bucket;
2015 	struct rt6_exception *rt6_ex;
2016 	int i;
2017 
2018 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2019 	if (!bucket)
2020 		return;
2021 
2022 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2023 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2024 			struct rt6_info *entry = rt6_ex->rt6i;
2025 
2026 			/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2027 			 * route), the metrics of its rt->from have already
2028 			 * been updated.
2029 			 */
2030 			if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2031 			    rt6_mtu_change_route_allowed(idev, entry, mtu))
2032 				dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2033 		}
2034 		bucket++;
2035 	}
2036 }
2037 
2038 #define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)
2039 
2040 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2041 					    const struct in6_addr *gateway)
2042 {
2043 	struct rt6_exception_bucket *bucket;
2044 	struct rt6_exception *rt6_ex;
2045 	struct hlist_node *tmp;
2046 	int i;
2047 
2048 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2049 		return;
2050 
2051 	spin_lock_bh(&rt6_exception_lock);
2052 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2053 	if (bucket) {
2054 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2055 			hlist_for_each_entry_safe(rt6_ex, tmp,
2056 						  &bucket->chain, hlist) {
2057 				struct rt6_info *entry = rt6_ex->rt6i;
2058 
2059 				if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2060 				    RTF_CACHE_GATEWAY &&
2061 				    ipv6_addr_equal(gateway,
2062 						    &entry->rt6i_gateway)) {
2063 					rt6_remove_exception(bucket, rt6_ex);
2064 				}
2065 			}
2066 			bucket++;
2067 		}
2068 	}
2069 
2070 	spin_unlock_bh(&rt6_exception_lock);
2071 }
2072 
2073 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2074 				      struct rt6_exception *rt6_ex,
2075 				      struct fib6_gc_args *gc_args,
2076 				      unsigned long now)
2077 {
2078 	struct rt6_info *rt = rt6_ex->rt6i;
2079 
2080 	/* we are pruning and obsoleting aged-out and non gateway exceptions
2081 	 * even if others have still references to them, so that on next
2082 	 * dst_check() such references can be dropped.
2083 	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2084 	 * expired, independently from their aging, as per RFC 8201 section 4
2085 	 */
2086 	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2087 		if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2088 			RT6_TRACE("aging clone %p\n", rt);
2089 			rt6_remove_exception(bucket, rt6_ex);
2090 			return;
2091 		}
2092 	} else if (time_after(jiffies, rt->dst.expires)) {
2093 		RT6_TRACE("purging expired route %p\n", rt);
2094 		rt6_remove_exception(bucket, rt6_ex);
2095 		return;
2096 	}
2097 
2098 	if (rt->rt6i_flags & RTF_GATEWAY) {
2099 		struct neighbour *neigh;
2100 		__u8 neigh_flags = 0;
2101 
2102 		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2103 		if (neigh)
2104 			neigh_flags = neigh->flags;
2105 
2106 		if (!(neigh_flags & NTF_ROUTER)) {
2107 			RT6_TRACE("purging route %p via non-router but gateway\n",
2108 				  rt);
2109 			rt6_remove_exception(bucket, rt6_ex);
2110 			return;
2111 		}
2112 	}
2113 
2114 	gc_args->more++;
2115 }
2116 
2117 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2118 				   struct fib6_gc_args *gc_args,
2119 				   unsigned long now)
2120 {
2121 	struct rt6_exception_bucket *bucket;
2122 	struct rt6_exception *rt6_ex;
2123 	struct hlist_node *tmp;
2124 	int i;
2125 
2126 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2127 		return;
2128 
2129 	rcu_read_lock_bh();
2130 	spin_lock(&rt6_exception_lock);
2131 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2132 	if (bucket) {
2133 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2134 			hlist_for_each_entry_safe(rt6_ex, tmp,
2135 						  &bucket->chain, hlist) {
2136 				rt6_age_examine_exception(bucket, rt6_ex,
2137 							  gc_args, now);
2138 			}
2139 			bucket++;
2140 		}
2141 	}
2142 	spin_unlock(&rt6_exception_lock);
2143 	rcu_read_unlock_bh();
2144 }
2145 
2146 struct fib6_nh_age_excptn_arg {
2147 	struct fib6_gc_args	*gc_args;
2148 	unsigned long		now;
2149 };
2150 
2151 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2152 {
2153 	struct fib6_nh_age_excptn_arg *arg = _arg;
2154 
2155 	fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2156 	return 0;
2157 }
2158 
2159 void rt6_age_exceptions(struct fib6_info *f6i,
2160 			struct fib6_gc_args *gc_args,
2161 			unsigned long now)
2162 {
2163 	if (f6i->nh) {
2164 		struct fib6_nh_age_excptn_arg arg = {
2165 			.gc_args = gc_args,
2166 			.now = now
2167 		};
2168 
2169 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2170 					 &arg);
2171 	} else {
2172 		fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2173 	}
2174 }
2175 
2176 /* must be called with rcu lock held */
2177 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2178 		      struct flowi6 *fl6, struct fib6_result *res, int strict)
2179 {
2180 	struct fib6_node *fn, *saved_fn;
2181 
2182 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2183 	saved_fn = fn;
2184 
2185 	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2186 		oif = 0;
2187 
2188 redo_rt6_select:
2189 	rt6_select(net, fn, oif, res, strict);
2190 	if (res->f6i == net->ipv6.fib6_null_entry) {
2191 		fn = fib6_backtrack(fn, &fl6->saddr);
2192 		if (fn)
2193 			goto redo_rt6_select;
2194 		else if (strict & RT6_LOOKUP_F_REACHABLE) {
2195 			/* also consider unreachable route */
2196 			strict &= ~RT6_LOOKUP_F_REACHABLE;
2197 			fn = saved_fn;
2198 			goto redo_rt6_select;
2199 		}
2200 	}
2201 
2202 	trace_fib6_table_lookup(net, res, table, fl6);
2203 
2204 	return 0;
2205 }
2206 
2207 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2208 			       int oif, struct flowi6 *fl6,
2209 			       const struct sk_buff *skb, int flags)
2210 {
2211 	struct fib6_result res = {};
2212 	struct rt6_info *rt = NULL;
2213 	int strict = 0;
2214 
2215 	WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2216 		     !rcu_read_lock_held());
2217 
2218 	strict |= flags & RT6_LOOKUP_F_IFACE;
2219 	strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2220 	if (net->ipv6.devconf_all->forwarding == 0)
2221 		strict |= RT6_LOOKUP_F_REACHABLE;
2222 
2223 	rcu_read_lock();
2224 
2225 	fib6_table_lookup(net, table, oif, fl6, &res, strict);
2226 	if (res.f6i == net->ipv6.fib6_null_entry)
2227 		goto out;
2228 
2229 	fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2230 
2231 	/*Search through exception table */
2232 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2233 	if (rt) {
2234 		goto out;
2235 	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2236 			    !res.nh->fib_nh_gw_family)) {
2237 		/* Create a RTF_CACHE clone which will not be
2238 		 * owned by the fib6 tree.  It is for the special case where
2239 		 * the daddr in the skb during the neighbor look-up is different
2240 		 * from the fl6->daddr used to look-up route here.
2241 		 */
2242 		rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2243 
2244 		if (rt) {
2245 			/* 1 refcnt is taken during ip6_rt_cache_alloc().
2246 			 * As rt6_uncached_list_add() does not consume refcnt,
2247 			 * this refcnt is always returned to the caller even
2248 			 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2249 			 */
2250 			rt6_uncached_list_add(rt);
2251 			atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2252 			rcu_read_unlock();
2253 
2254 			return rt;
2255 		}
2256 	} else {
2257 		/* Get a percpu copy */
2258 		local_bh_disable();
2259 		rt = rt6_get_pcpu_route(&res);
2260 
2261 		if (!rt)
2262 			rt = rt6_make_pcpu_route(net, &res);
2263 
2264 		local_bh_enable();
2265 	}
2266 out:
2267 	if (!rt)
2268 		rt = net->ipv6.ip6_null_entry;
2269 	if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2270 		ip6_hold_safe(net, &rt);
2271 	rcu_read_unlock();
2272 
2273 	return rt;
2274 }
2275 EXPORT_SYMBOL_GPL(ip6_pol_route);
2276 
2277 static struct rt6_info *ip6_pol_route_input(struct net *net,
2278 					    struct fib6_table *table,
2279 					    struct flowi6 *fl6,
2280 					    const struct sk_buff *skb,
2281 					    int flags)
2282 {
2283 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2284 }
2285 
2286 struct dst_entry *ip6_route_input_lookup(struct net *net,
2287 					 struct net_device *dev,
2288 					 struct flowi6 *fl6,
2289 					 const struct sk_buff *skb,
2290 					 int flags)
2291 {
2292 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2293 		flags |= RT6_LOOKUP_F_IFACE;
2294 
2295 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2296 }
2297 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2298 
2299 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2300 				  struct flow_keys *keys,
2301 				  struct flow_keys *flkeys)
2302 {
2303 	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2304 	const struct ipv6hdr *key_iph = outer_iph;
2305 	struct flow_keys *_flkeys = flkeys;
2306 	const struct ipv6hdr *inner_iph;
2307 	const struct icmp6hdr *icmph;
2308 	struct ipv6hdr _inner_iph;
2309 	struct icmp6hdr _icmph;
2310 
2311 	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2312 		goto out;
2313 
2314 	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2315 				   sizeof(_icmph), &_icmph);
2316 	if (!icmph)
2317 		goto out;
2318 
2319 	if (!icmpv6_is_err(icmph->icmp6_type))
2320 		goto out;
2321 
2322 	inner_iph = skb_header_pointer(skb,
2323 				       skb_transport_offset(skb) + sizeof(*icmph),
2324 				       sizeof(_inner_iph), &_inner_iph);
2325 	if (!inner_iph)
2326 		goto out;
2327 
2328 	key_iph = inner_iph;
2329 	_flkeys = NULL;
2330 out:
2331 	if (_flkeys) {
2332 		keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2333 		keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2334 		keys->tags.flow_label = _flkeys->tags.flow_label;
2335 		keys->basic.ip_proto = _flkeys->basic.ip_proto;
2336 	} else {
2337 		keys->addrs.v6addrs.src = key_iph->saddr;
2338 		keys->addrs.v6addrs.dst = key_iph->daddr;
2339 		keys->tags.flow_label = ip6_flowlabel(key_iph);
2340 		keys->basic.ip_proto = key_iph->nexthdr;
2341 	}
2342 }
2343 
2344 /* if skb is set it will be used and fl6 can be NULL */
2345 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2346 		       const struct sk_buff *skb, struct flow_keys *flkeys)
2347 {
2348 	struct flow_keys hash_keys;
2349 	u32 mhash;
2350 
2351 	switch (ip6_multipath_hash_policy(net)) {
2352 	case 0:
2353 		memset(&hash_keys, 0, sizeof(hash_keys));
2354 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2355 		if (skb) {
2356 			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2357 		} else {
2358 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2359 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2360 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2361 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2362 		}
2363 		break;
2364 	case 1:
2365 		if (skb) {
2366 			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2367 			struct flow_keys keys;
2368 
2369 			/* short-circuit if we already have L4 hash present */
2370 			if (skb->l4_hash)
2371 				return skb_get_hash_raw(skb) >> 1;
2372 
2373 			memset(&hash_keys, 0, sizeof(hash_keys));
2374 
2375                         if (!flkeys) {
2376 				skb_flow_dissect_flow_keys(skb, &keys, flag);
2377 				flkeys = &keys;
2378 			}
2379 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2380 			hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2381 			hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2382 			hash_keys.ports.src = flkeys->ports.src;
2383 			hash_keys.ports.dst = flkeys->ports.dst;
2384 			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2385 		} else {
2386 			memset(&hash_keys, 0, sizeof(hash_keys));
2387 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2388 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2389 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2390 			hash_keys.ports.src = fl6->fl6_sport;
2391 			hash_keys.ports.dst = fl6->fl6_dport;
2392 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2393 		}
2394 		break;
2395 	case 2:
2396 		memset(&hash_keys, 0, sizeof(hash_keys));
2397 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2398 		if (skb) {
2399 			struct flow_keys keys;
2400 
2401 			if (!flkeys) {
2402 				skb_flow_dissect_flow_keys(skb, &keys, 0);
2403 				flkeys = &keys;
2404 			}
2405 
2406 			/* Inner can be v4 or v6 */
2407 			if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2408 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2409 				hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2410 				hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2411 			} else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2412 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2413 				hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2414 				hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2415 				hash_keys.tags.flow_label = flkeys->tags.flow_label;
2416 				hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2417 			} else {
2418 				/* Same as case 0 */
2419 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2420 				ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2421 			}
2422 		} else {
2423 			/* Same as case 0 */
2424 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2425 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2426 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2427 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2428 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2429 		}
2430 		break;
2431 	}
2432 	mhash = flow_hash_from_keys(&hash_keys);
2433 
2434 	return mhash >> 1;
2435 }
2436 
2437 /* Called with rcu held */
2438 void ip6_route_input(struct sk_buff *skb)
2439 {
2440 	const struct ipv6hdr *iph = ipv6_hdr(skb);
2441 	struct net *net = dev_net(skb->dev);
2442 	int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2443 	struct ip_tunnel_info *tun_info;
2444 	struct flowi6 fl6 = {
2445 		.flowi6_iif = skb->dev->ifindex,
2446 		.daddr = iph->daddr,
2447 		.saddr = iph->saddr,
2448 		.flowlabel = ip6_flowinfo(iph),
2449 		.flowi6_mark = skb->mark,
2450 		.flowi6_proto = iph->nexthdr,
2451 	};
2452 	struct flow_keys *flkeys = NULL, _flkeys;
2453 
2454 	tun_info = skb_tunnel_info(skb);
2455 	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2456 		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2457 
2458 	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2459 		flkeys = &_flkeys;
2460 
2461 	if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2462 		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2463 	skb_dst_drop(skb);
2464 	skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2465 						      &fl6, skb, flags));
2466 }
2467 
2468 static struct rt6_info *ip6_pol_route_output(struct net *net,
2469 					     struct fib6_table *table,
2470 					     struct flowi6 *fl6,
2471 					     const struct sk_buff *skb,
2472 					     int flags)
2473 {
2474 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2475 }
2476 
2477 struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2478 					       const struct sock *sk,
2479 					       struct flowi6 *fl6, int flags)
2480 {
2481 	bool any_src;
2482 
2483 	if (ipv6_addr_type(&fl6->daddr) &
2484 	    (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2485 		struct dst_entry *dst;
2486 
2487 		/* This function does not take refcnt on the dst */
2488 		dst = l3mdev_link_scope_lookup(net, fl6);
2489 		if (dst)
2490 			return dst;
2491 	}
2492 
2493 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
2494 
2495 	flags |= RT6_LOOKUP_F_DST_NOREF;
2496 	any_src = ipv6_addr_any(&fl6->saddr);
2497 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2498 	    (fl6->flowi6_oif && any_src))
2499 		flags |= RT6_LOOKUP_F_IFACE;
2500 
2501 	if (!any_src)
2502 		flags |= RT6_LOOKUP_F_HAS_SADDR;
2503 	else if (sk)
2504 		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2505 
2506 	return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2507 }
2508 EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2509 
2510 struct dst_entry *ip6_route_output_flags(struct net *net,
2511 					 const struct sock *sk,
2512 					 struct flowi6 *fl6,
2513 					 int flags)
2514 {
2515         struct dst_entry *dst;
2516         struct rt6_info *rt6;
2517 
2518         rcu_read_lock();
2519         dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2520         rt6 = (struct rt6_info *)dst;
2521         /* For dst cached in uncached_list, refcnt is already taken. */
2522         if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2523                 dst = &net->ipv6.ip6_null_entry->dst;
2524                 dst_hold(dst);
2525         }
2526         rcu_read_unlock();
2527 
2528         return dst;
2529 }
2530 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2531 
2532 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2533 {
2534 	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2535 	struct net_device *loopback_dev = net->loopback_dev;
2536 	struct dst_entry *new = NULL;
2537 
2538 	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2539 		       DST_OBSOLETE_DEAD, 0);
2540 	if (rt) {
2541 		rt6_info_init(rt);
2542 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2543 
2544 		new = &rt->dst;
2545 		new->__use = 1;
2546 		new->input = dst_discard;
2547 		new->output = dst_discard_out;
2548 
2549 		dst_copy_metrics(new, &ort->dst);
2550 
2551 		rt->rt6i_idev = in6_dev_get(loopback_dev);
2552 		rt->rt6i_gateway = ort->rt6i_gateway;
2553 		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2554 
2555 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2556 #ifdef CONFIG_IPV6_SUBTREES
2557 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2558 #endif
2559 	}
2560 
2561 	dst_release(dst_orig);
2562 	return new ? new : ERR_PTR(-ENOMEM);
2563 }
2564 
2565 /*
2566  *	Destination cache support functions
2567  */
2568 
2569 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2570 {
2571 	u32 rt_cookie = 0;
2572 
2573 	if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2574 		return false;
2575 
2576 	if (fib6_check_expired(f6i))
2577 		return false;
2578 
2579 	return true;
2580 }
2581 
2582 static struct dst_entry *rt6_check(struct rt6_info *rt,
2583 				   struct fib6_info *from,
2584 				   u32 cookie)
2585 {
2586 	u32 rt_cookie = 0;
2587 
2588 	if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2589 	    rt_cookie != cookie)
2590 		return NULL;
2591 
2592 	if (rt6_check_expired(rt))
2593 		return NULL;
2594 
2595 	return &rt->dst;
2596 }
2597 
2598 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2599 					    struct fib6_info *from,
2600 					    u32 cookie)
2601 {
2602 	if (!__rt6_check_expired(rt) &&
2603 	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2604 	    fib6_check(from, cookie))
2605 		return &rt->dst;
2606 	else
2607 		return NULL;
2608 }
2609 
2610 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2611 {
2612 	struct dst_entry *dst_ret;
2613 	struct fib6_info *from;
2614 	struct rt6_info *rt;
2615 
2616 	rt = container_of(dst, struct rt6_info, dst);
2617 
2618 	if (rt->sernum)
2619 		return rt6_is_valid(rt) ? dst : NULL;
2620 
2621 	rcu_read_lock();
2622 
2623 	/* All IPV6 dsts are created with ->obsolete set to the value
2624 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2625 	 * into this function always.
2626 	 */
2627 
2628 	from = rcu_dereference(rt->from);
2629 
2630 	if (from && (rt->rt6i_flags & RTF_PCPU ||
2631 	    unlikely(!list_empty(&rt->rt6i_uncached))))
2632 		dst_ret = rt6_dst_from_check(rt, from, cookie);
2633 	else
2634 		dst_ret = rt6_check(rt, from, cookie);
2635 
2636 	rcu_read_unlock();
2637 
2638 	return dst_ret;
2639 }
2640 
2641 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2642 {
2643 	struct rt6_info *rt = (struct rt6_info *) dst;
2644 
2645 	if (rt) {
2646 		if (rt->rt6i_flags & RTF_CACHE) {
2647 			rcu_read_lock();
2648 			if (rt6_check_expired(rt)) {
2649 				rt6_remove_exception_rt(rt);
2650 				dst = NULL;
2651 			}
2652 			rcu_read_unlock();
2653 		} else {
2654 			dst_release(dst);
2655 			dst = NULL;
2656 		}
2657 	}
2658 	return dst;
2659 }
2660 
2661 static void ip6_link_failure(struct sk_buff *skb)
2662 {
2663 	struct rt6_info *rt;
2664 
2665 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2666 
2667 	rt = (struct rt6_info *) skb_dst(skb);
2668 	if (rt) {
2669 		rcu_read_lock();
2670 		if (rt->rt6i_flags & RTF_CACHE) {
2671 			rt6_remove_exception_rt(rt);
2672 		} else {
2673 			struct fib6_info *from;
2674 			struct fib6_node *fn;
2675 
2676 			from = rcu_dereference(rt->from);
2677 			if (from) {
2678 				fn = rcu_dereference(from->fib6_node);
2679 				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2680 					fn->fn_sernum = -1;
2681 			}
2682 		}
2683 		rcu_read_unlock();
2684 	}
2685 }
2686 
2687 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2688 {
2689 	if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2690 		struct fib6_info *from;
2691 
2692 		rcu_read_lock();
2693 		from = rcu_dereference(rt0->from);
2694 		if (from)
2695 			rt0->dst.expires = from->expires;
2696 		rcu_read_unlock();
2697 	}
2698 
2699 	dst_set_expires(&rt0->dst, timeout);
2700 	rt0->rt6i_flags |= RTF_EXPIRES;
2701 }
2702 
2703 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2704 {
2705 	struct net *net = dev_net(rt->dst.dev);
2706 
2707 	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2708 	rt->rt6i_flags |= RTF_MODIFIED;
2709 	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2710 }
2711 
2712 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2713 {
2714 	return !(rt->rt6i_flags & RTF_CACHE) &&
2715 		(rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2716 }
2717 
2718 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2719 				 const struct ipv6hdr *iph, u32 mtu,
2720 				 bool confirm_neigh)
2721 {
2722 	const struct in6_addr *daddr, *saddr;
2723 	struct rt6_info *rt6 = (struct rt6_info *)dst;
2724 
2725 	/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2726 	 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2727 	 * [see also comment in rt6_mtu_change_route()]
2728 	 */
2729 
2730 	if (iph) {
2731 		daddr = &iph->daddr;
2732 		saddr = &iph->saddr;
2733 	} else if (sk) {
2734 		daddr = &sk->sk_v6_daddr;
2735 		saddr = &inet6_sk(sk)->saddr;
2736 	} else {
2737 		daddr = NULL;
2738 		saddr = NULL;
2739 	}
2740 
2741 	if (confirm_neigh)
2742 		dst_confirm_neigh(dst, daddr);
2743 
2744 	mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2745 	if (mtu >= dst_mtu(dst))
2746 		return;
2747 
2748 	if (!rt6_cache_allowed_for_pmtu(rt6)) {
2749 		rt6_do_update_pmtu(rt6, mtu);
2750 		/* update rt6_ex->stamp for cache */
2751 		if (rt6->rt6i_flags & RTF_CACHE)
2752 			rt6_update_exception_stamp_rt(rt6);
2753 	} else if (daddr) {
2754 		struct fib6_result res = {};
2755 		struct rt6_info *nrt6;
2756 
2757 		rcu_read_lock();
2758 		res.f6i = rcu_dereference(rt6->from);
2759 		if (!res.f6i)
2760 			goto out_unlock;
2761 
2762 		res.fib6_flags = res.f6i->fib6_flags;
2763 		res.fib6_type = res.f6i->fib6_type;
2764 
2765 		if (res.f6i->nh) {
2766 			struct fib6_nh_match_arg arg = {
2767 				.dev = dst->dev,
2768 				.gw = &rt6->rt6i_gateway,
2769 			};
2770 
2771 			nexthop_for_each_fib6_nh(res.f6i->nh,
2772 						 fib6_nh_find_match, &arg);
2773 
2774 			/* fib6_info uses a nexthop that does not have fib6_nh
2775 			 * using the dst->dev + gw. Should be impossible.
2776 			 */
2777 			if (!arg.match)
2778 				goto out_unlock;
2779 
2780 			res.nh = arg.match;
2781 		} else {
2782 			res.nh = res.f6i->fib6_nh;
2783 		}
2784 
2785 		nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2786 		if (nrt6) {
2787 			rt6_do_update_pmtu(nrt6, mtu);
2788 			if (rt6_insert_exception(nrt6, &res))
2789 				dst_release_immediate(&nrt6->dst);
2790 		}
2791 out_unlock:
2792 		rcu_read_unlock();
2793 	}
2794 }
2795 
2796 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2797 			       struct sk_buff *skb, u32 mtu,
2798 			       bool confirm_neigh)
2799 {
2800 	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2801 			     confirm_neigh);
2802 }
2803 
2804 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2805 		     int oif, u32 mark, kuid_t uid)
2806 {
2807 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2808 	struct dst_entry *dst;
2809 	struct flowi6 fl6 = {
2810 		.flowi6_oif = oif,
2811 		.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2812 		.daddr = iph->daddr,
2813 		.saddr = iph->saddr,
2814 		.flowlabel = ip6_flowinfo(iph),
2815 		.flowi6_uid = uid,
2816 	};
2817 
2818 	dst = ip6_route_output(net, NULL, &fl6);
2819 	if (!dst->error)
2820 		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
2821 	dst_release(dst);
2822 }
2823 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2824 
2825 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2826 {
2827 	int oif = sk->sk_bound_dev_if;
2828 	struct dst_entry *dst;
2829 
2830 	if (!oif && skb->dev)
2831 		oif = l3mdev_master_ifindex(skb->dev);
2832 
2833 	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2834 
2835 	dst = __sk_dst_get(sk);
2836 	if (!dst || !dst->obsolete ||
2837 	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2838 		return;
2839 
2840 	bh_lock_sock(sk);
2841 	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2842 		ip6_datagram_dst_update(sk, false);
2843 	bh_unlock_sock(sk);
2844 }
2845 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2846 
2847 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2848 			   const struct flowi6 *fl6)
2849 {
2850 #ifdef CONFIG_IPV6_SUBTREES
2851 	struct ipv6_pinfo *np = inet6_sk(sk);
2852 #endif
2853 
2854 	ip6_dst_store(sk, dst,
2855 		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2856 		      &sk->sk_v6_daddr : NULL,
2857 #ifdef CONFIG_IPV6_SUBTREES
2858 		      ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2859 		      &np->saddr :
2860 #endif
2861 		      NULL);
2862 }
2863 
2864 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2865 				  struct flowi6 *fl6,
2866 				  const struct in6_addr *gw,
2867 				  struct rt6_info **ret)
2868 {
2869 	const struct fib6_nh *nh = res->nh;
2870 
2871 	if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2872 	    fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2873 		return false;
2874 
2875 	/* rt_cache's gateway might be different from its 'parent'
2876 	 * in the case of an ip redirect.
2877 	 * So we keep searching in the exception table if the gateway
2878 	 * is different.
2879 	 */
2880 	if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2881 		struct rt6_info *rt_cache;
2882 
2883 		rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2884 		if (rt_cache &&
2885 		    ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2886 			*ret = rt_cache;
2887 			return true;
2888 		}
2889 		return false;
2890 	}
2891 	return true;
2892 }
2893 
2894 struct fib6_nh_rd_arg {
2895 	struct fib6_result	*res;
2896 	struct flowi6		*fl6;
2897 	const struct in6_addr	*gw;
2898 	struct rt6_info		**ret;
2899 };
2900 
2901 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2902 {
2903 	struct fib6_nh_rd_arg *arg = _arg;
2904 
2905 	arg->res->nh = nh;
2906 	return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2907 }
2908 
2909 /* Handle redirects */
2910 struct ip6rd_flowi {
2911 	struct flowi6 fl6;
2912 	struct in6_addr gateway;
2913 };
2914 
2915 static struct rt6_info *__ip6_route_redirect(struct net *net,
2916 					     struct fib6_table *table,
2917 					     struct flowi6 *fl6,
2918 					     const struct sk_buff *skb,
2919 					     int flags)
2920 {
2921 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2922 	struct rt6_info *ret = NULL;
2923 	struct fib6_result res = {};
2924 	struct fib6_nh_rd_arg arg = {
2925 		.res = &res,
2926 		.fl6 = fl6,
2927 		.gw  = &rdfl->gateway,
2928 		.ret = &ret
2929 	};
2930 	struct fib6_info *rt;
2931 	struct fib6_node *fn;
2932 
2933 	/* l3mdev_update_flow overrides oif if the device is enslaved; in
2934 	 * this case we must match on the real ingress device, so reset it
2935 	 */
2936 	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2937 		fl6->flowi6_oif = skb->dev->ifindex;
2938 
2939 	/* Get the "current" route for this destination and
2940 	 * check if the redirect has come from appropriate router.
2941 	 *
2942 	 * RFC 4861 specifies that redirects should only be
2943 	 * accepted if they come from the nexthop to the target.
2944 	 * Due to the way the routes are chosen, this notion
2945 	 * is a bit fuzzy and one might need to check all possible
2946 	 * routes.
2947 	 */
2948 
2949 	rcu_read_lock();
2950 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2951 restart:
2952 	for_each_fib6_node_rt_rcu(fn) {
2953 		res.f6i = rt;
2954 		if (fib6_check_expired(rt))
2955 			continue;
2956 		if (rt->fib6_flags & RTF_REJECT)
2957 			break;
2958 		if (unlikely(rt->nh)) {
2959 			if (nexthop_is_blackhole(rt->nh))
2960 				continue;
2961 			/* on match, res->nh is filled in and potentially ret */
2962 			if (nexthop_for_each_fib6_nh(rt->nh,
2963 						     fib6_nh_redirect_match,
2964 						     &arg))
2965 				goto out;
2966 		} else {
2967 			res.nh = rt->fib6_nh;
2968 			if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2969 						  &ret))
2970 				goto out;
2971 		}
2972 	}
2973 
2974 	if (!rt)
2975 		rt = net->ipv6.fib6_null_entry;
2976 	else if (rt->fib6_flags & RTF_REJECT) {
2977 		ret = net->ipv6.ip6_null_entry;
2978 		goto out;
2979 	}
2980 
2981 	if (rt == net->ipv6.fib6_null_entry) {
2982 		fn = fib6_backtrack(fn, &fl6->saddr);
2983 		if (fn)
2984 			goto restart;
2985 	}
2986 
2987 	res.f6i = rt;
2988 	res.nh = rt->fib6_nh;
2989 out:
2990 	if (ret) {
2991 		ip6_hold_safe(net, &ret);
2992 	} else {
2993 		res.fib6_flags = res.f6i->fib6_flags;
2994 		res.fib6_type = res.f6i->fib6_type;
2995 		ret = ip6_create_rt_rcu(&res);
2996 	}
2997 
2998 	rcu_read_unlock();
2999 
3000 	trace_fib6_table_lookup(net, &res, table, fl6);
3001 	return ret;
3002 };
3003 
3004 static struct dst_entry *ip6_route_redirect(struct net *net,
3005 					    const struct flowi6 *fl6,
3006 					    const struct sk_buff *skb,
3007 					    const struct in6_addr *gateway)
3008 {
3009 	int flags = RT6_LOOKUP_F_HAS_SADDR;
3010 	struct ip6rd_flowi rdfl;
3011 
3012 	rdfl.fl6 = *fl6;
3013 	rdfl.gateway = *gateway;
3014 
3015 	return fib6_rule_lookup(net, &rdfl.fl6, skb,
3016 				flags, __ip6_route_redirect);
3017 }
3018 
3019 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3020 		  kuid_t uid)
3021 {
3022 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3023 	struct dst_entry *dst;
3024 	struct flowi6 fl6 = {
3025 		.flowi6_iif = LOOPBACK_IFINDEX,
3026 		.flowi6_oif = oif,
3027 		.flowi6_mark = mark,
3028 		.daddr = iph->daddr,
3029 		.saddr = iph->saddr,
3030 		.flowlabel = ip6_flowinfo(iph),
3031 		.flowi6_uid = uid,
3032 	};
3033 
3034 	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3035 	rt6_do_redirect(dst, NULL, skb);
3036 	dst_release(dst);
3037 }
3038 EXPORT_SYMBOL_GPL(ip6_redirect);
3039 
3040 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3041 {
3042 	const struct ipv6hdr *iph = ipv6_hdr(skb);
3043 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3044 	struct dst_entry *dst;
3045 	struct flowi6 fl6 = {
3046 		.flowi6_iif = LOOPBACK_IFINDEX,
3047 		.flowi6_oif = oif,
3048 		.daddr = msg->dest,
3049 		.saddr = iph->daddr,
3050 		.flowi6_uid = sock_net_uid(net, NULL),
3051 	};
3052 
3053 	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3054 	rt6_do_redirect(dst, NULL, skb);
3055 	dst_release(dst);
3056 }
3057 
3058 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3059 {
3060 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3061 		     sk->sk_uid);
3062 }
3063 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3064 
3065 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3066 {
3067 	struct net_device *dev = dst->dev;
3068 	unsigned int mtu = dst_mtu(dst);
3069 	struct net *net = dev_net(dev);
3070 
3071 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3072 
3073 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3074 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3075 
3076 	/*
3077 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3078 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3079 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
3080 	 * rely only on pmtu discovery"
3081 	 */
3082 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3083 		mtu = IPV6_MAXPLEN;
3084 	return mtu;
3085 }
3086 
3087 static unsigned int ip6_mtu(const struct dst_entry *dst)
3088 {
3089 	struct inet6_dev *idev;
3090 	unsigned int mtu;
3091 
3092 	mtu = dst_metric_raw(dst, RTAX_MTU);
3093 	if (mtu)
3094 		goto out;
3095 
3096 	mtu = IPV6_MIN_MTU;
3097 
3098 	rcu_read_lock();
3099 	idev = __in6_dev_get(dst->dev);
3100 	if (idev)
3101 		mtu = idev->cnf.mtu6;
3102 	rcu_read_unlock();
3103 
3104 out:
3105 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3106 
3107 	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3108 }
3109 
3110 /* MTU selection:
3111  * 1. mtu on route is locked - use it
3112  * 2. mtu from nexthop exception
3113  * 3. mtu from egress device
3114  *
3115  * based on ip6_dst_mtu_forward and exception logic of
3116  * rt6_find_cached_rt; called with rcu_read_lock
3117  */
3118 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3119 		      const struct in6_addr *daddr,
3120 		      const struct in6_addr *saddr)
3121 {
3122 	const struct fib6_nh *nh = res->nh;
3123 	struct fib6_info *f6i = res->f6i;
3124 	struct inet6_dev *idev;
3125 	struct rt6_info *rt;
3126 	u32 mtu = 0;
3127 
3128 	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3129 		mtu = f6i->fib6_pmtu;
3130 		if (mtu)
3131 			goto out;
3132 	}
3133 
3134 	rt = rt6_find_cached_rt(res, daddr, saddr);
3135 	if (unlikely(rt)) {
3136 		mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3137 	} else {
3138 		struct net_device *dev = nh->fib_nh_dev;
3139 
3140 		mtu = IPV6_MIN_MTU;
3141 		idev = __in6_dev_get(dev);
3142 		if (idev && idev->cnf.mtu6 > mtu)
3143 			mtu = idev->cnf.mtu6;
3144 	}
3145 
3146 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3147 out:
3148 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3149 }
3150 
3151 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3152 				  struct flowi6 *fl6)
3153 {
3154 	struct dst_entry *dst;
3155 	struct rt6_info *rt;
3156 	struct inet6_dev *idev = in6_dev_get(dev);
3157 	struct net *net = dev_net(dev);
3158 
3159 	if (unlikely(!idev))
3160 		return ERR_PTR(-ENODEV);
3161 
3162 	rt = ip6_dst_alloc(net, dev, 0);
3163 	if (unlikely(!rt)) {
3164 		in6_dev_put(idev);
3165 		dst = ERR_PTR(-ENOMEM);
3166 		goto out;
3167 	}
3168 
3169 	rt->dst.input = ip6_input;
3170 	rt->dst.output  = ip6_output;
3171 	rt->rt6i_gateway  = fl6->daddr;
3172 	rt->rt6i_dst.addr = fl6->daddr;
3173 	rt->rt6i_dst.plen = 128;
3174 	rt->rt6i_idev     = idev;
3175 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3176 
3177 	/* Add this dst into uncached_list so that rt6_disable_ip() can
3178 	 * do proper release of the net_device
3179 	 */
3180 	rt6_uncached_list_add(rt);
3181 	atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
3182 
3183 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3184 
3185 out:
3186 	return dst;
3187 }
3188 
3189 static int ip6_dst_gc(struct dst_ops *ops)
3190 {
3191 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3192 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3193 	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
3194 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3195 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3196 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3197 	int entries;
3198 
3199 	entries = dst_entries_get_fast(ops);
3200 	if (entries > rt_max_size)
3201 		entries = dst_entries_get_slow(ops);
3202 
3203 	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
3204 	    entries <= rt_max_size)
3205 		goto out;
3206 
3207 	net->ipv6.ip6_rt_gc_expire++;
3208 	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
3209 	entries = dst_entries_get_slow(ops);
3210 	if (entries < ops->gc_thresh)
3211 		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
3212 out:
3213 	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3214 	return entries > rt_max_size;
3215 }
3216 
3217 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3218 			       const struct in6_addr *gw_addr, u32 tbid,
3219 			       int flags, struct fib6_result *res)
3220 {
3221 	struct flowi6 fl6 = {
3222 		.flowi6_oif = cfg->fc_ifindex,
3223 		.daddr = *gw_addr,
3224 		.saddr = cfg->fc_prefsrc,
3225 	};
3226 	struct fib6_table *table;
3227 	int err;
3228 
3229 	table = fib6_get_table(net, tbid);
3230 	if (!table)
3231 		return -EINVAL;
3232 
3233 	if (!ipv6_addr_any(&cfg->fc_prefsrc))
3234 		flags |= RT6_LOOKUP_F_HAS_SADDR;
3235 
3236 	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3237 
3238 	err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3239 	if (!err && res->f6i != net->ipv6.fib6_null_entry)
3240 		fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3241 				 cfg->fc_ifindex != 0, NULL, flags);
3242 
3243 	return err;
3244 }
3245 
3246 static int ip6_route_check_nh_onlink(struct net *net,
3247 				     struct fib6_config *cfg,
3248 				     const struct net_device *dev,
3249 				     struct netlink_ext_ack *extack)
3250 {
3251 	u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3252 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3253 	struct fib6_result res = {};
3254 	int err;
3255 
3256 	err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3257 	if (!err && !(res.fib6_flags & RTF_REJECT) &&
3258 	    /* ignore match if it is the default route */
3259 	    !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3260 	    (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3261 		NL_SET_ERR_MSG(extack,
3262 			       "Nexthop has invalid gateway or device mismatch");
3263 		err = -EINVAL;
3264 	}
3265 
3266 	return err;
3267 }
3268 
3269 static int ip6_route_check_nh(struct net *net,
3270 			      struct fib6_config *cfg,
3271 			      struct net_device **_dev,
3272 			      struct inet6_dev **idev)
3273 {
3274 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3275 	struct net_device *dev = _dev ? *_dev : NULL;
3276 	int flags = RT6_LOOKUP_F_IFACE;
3277 	struct fib6_result res = {};
3278 	int err = -EHOSTUNREACH;
3279 
3280 	if (cfg->fc_table) {
3281 		err = ip6_nh_lookup_table(net, cfg, gw_addr,
3282 					  cfg->fc_table, flags, &res);
3283 		/* gw_addr can not require a gateway or resolve to a reject
3284 		 * route. If a device is given, it must match the result.
3285 		 */
3286 		if (err || res.fib6_flags & RTF_REJECT ||
3287 		    res.nh->fib_nh_gw_family ||
3288 		    (dev && dev != res.nh->fib_nh_dev))
3289 			err = -EHOSTUNREACH;
3290 	}
3291 
3292 	if (err < 0) {
3293 		struct flowi6 fl6 = {
3294 			.flowi6_oif = cfg->fc_ifindex,
3295 			.daddr = *gw_addr,
3296 		};
3297 
3298 		err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3299 		if (err || res.fib6_flags & RTF_REJECT ||
3300 		    res.nh->fib_nh_gw_family)
3301 			err = -EHOSTUNREACH;
3302 
3303 		if (err)
3304 			return err;
3305 
3306 		fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3307 				 cfg->fc_ifindex != 0, NULL, flags);
3308 	}
3309 
3310 	err = 0;
3311 	if (dev) {
3312 		if (dev != res.nh->fib_nh_dev)
3313 			err = -EHOSTUNREACH;
3314 	} else {
3315 		*_dev = dev = res.nh->fib_nh_dev;
3316 		dev_hold(dev);
3317 		*idev = in6_dev_get(dev);
3318 	}
3319 
3320 	return err;
3321 }
3322 
3323 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3324 			   struct net_device **_dev, struct inet6_dev **idev,
3325 			   struct netlink_ext_ack *extack)
3326 {
3327 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3328 	int gwa_type = ipv6_addr_type(gw_addr);
3329 	bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3330 	const struct net_device *dev = *_dev;
3331 	bool need_addr_check = !dev;
3332 	int err = -EINVAL;
3333 
3334 	/* if gw_addr is local we will fail to detect this in case
3335 	 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3336 	 * will return already-added prefix route via interface that
3337 	 * prefix route was assigned to, which might be non-loopback.
3338 	 */
3339 	if (dev &&
3340 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3341 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3342 		goto out;
3343 	}
3344 
3345 	if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3346 		/* IPv6 strictly inhibits using not link-local
3347 		 * addresses as nexthop address.
3348 		 * Otherwise, router will not able to send redirects.
3349 		 * It is very good, but in some (rare!) circumstances
3350 		 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3351 		 * some exceptions. --ANK
3352 		 * We allow IPv4-mapped nexthops to support RFC4798-type
3353 		 * addressing
3354 		 */
3355 		if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3356 			NL_SET_ERR_MSG(extack, "Invalid gateway address");
3357 			goto out;
3358 		}
3359 
3360 		rcu_read_lock();
3361 
3362 		if (cfg->fc_flags & RTNH_F_ONLINK)
3363 			err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3364 		else
3365 			err = ip6_route_check_nh(net, cfg, _dev, idev);
3366 
3367 		rcu_read_unlock();
3368 
3369 		if (err)
3370 			goto out;
3371 	}
3372 
3373 	/* reload in case device was changed */
3374 	dev = *_dev;
3375 
3376 	err = -EINVAL;
3377 	if (!dev) {
3378 		NL_SET_ERR_MSG(extack, "Egress device not specified");
3379 		goto out;
3380 	} else if (dev->flags & IFF_LOOPBACK) {
3381 		NL_SET_ERR_MSG(extack,
3382 			       "Egress device can not be loopback device for this route");
3383 		goto out;
3384 	}
3385 
3386 	/* if we did not check gw_addr above, do so now that the
3387 	 * egress device has been resolved.
3388 	 */
3389 	if (need_addr_check &&
3390 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3391 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3392 		goto out;
3393 	}
3394 
3395 	err = 0;
3396 out:
3397 	return err;
3398 }
3399 
3400 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3401 {
3402 	if ((flags & RTF_REJECT) ||
3403 	    (dev && (dev->flags & IFF_LOOPBACK) &&
3404 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
3405 	     !(flags & RTF_LOCAL)))
3406 		return true;
3407 
3408 	return false;
3409 }
3410 
3411 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3412 		 struct fib6_config *cfg, gfp_t gfp_flags,
3413 		 struct netlink_ext_ack *extack)
3414 {
3415 	struct net_device *dev = NULL;
3416 	struct inet6_dev *idev = NULL;
3417 	int addr_type;
3418 	int err;
3419 
3420 	fib6_nh->fib_nh_family = AF_INET6;
3421 #ifdef CONFIG_IPV6_ROUTER_PREF
3422 	fib6_nh->last_probe = jiffies;
3423 #endif
3424 	if (cfg->fc_is_fdb) {
3425 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3426 		fib6_nh->fib_nh_gw_family = AF_INET6;
3427 		return 0;
3428 	}
3429 
3430 	err = -ENODEV;
3431 	if (cfg->fc_ifindex) {
3432 		dev = dev_get_by_index(net, cfg->fc_ifindex);
3433 		if (!dev)
3434 			goto out;
3435 		idev = in6_dev_get(dev);
3436 		if (!idev)
3437 			goto out;
3438 	}
3439 
3440 	if (cfg->fc_flags & RTNH_F_ONLINK) {
3441 		if (!dev) {
3442 			NL_SET_ERR_MSG(extack,
3443 				       "Nexthop device required for onlink");
3444 			goto out;
3445 		}
3446 
3447 		if (!(dev->flags & IFF_UP)) {
3448 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3449 			err = -ENETDOWN;
3450 			goto out;
3451 		}
3452 
3453 		fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3454 	}
3455 
3456 	fib6_nh->fib_nh_weight = 1;
3457 
3458 	/* We cannot add true routes via loopback here,
3459 	 * they would result in kernel looping; promote them to reject routes
3460 	 */
3461 	addr_type = ipv6_addr_type(&cfg->fc_dst);
3462 	if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3463 		/* hold loopback dev/idev if we haven't done so. */
3464 		if (dev != net->loopback_dev) {
3465 			if (dev) {
3466 				dev_put(dev);
3467 				in6_dev_put(idev);
3468 			}
3469 			dev = net->loopback_dev;
3470 			dev_hold(dev);
3471 			idev = in6_dev_get(dev);
3472 			if (!idev) {
3473 				err = -ENODEV;
3474 				goto out;
3475 			}
3476 		}
3477 		goto pcpu_alloc;
3478 	}
3479 
3480 	if (cfg->fc_flags & RTF_GATEWAY) {
3481 		err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3482 		if (err)
3483 			goto out;
3484 
3485 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3486 		fib6_nh->fib_nh_gw_family = AF_INET6;
3487 	}
3488 
3489 	err = -ENODEV;
3490 	if (!dev)
3491 		goto out;
3492 
3493 	if (idev->cnf.disable_ipv6) {
3494 		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3495 		err = -EACCES;
3496 		goto out;
3497 	}
3498 
3499 	if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3500 		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3501 		err = -ENETDOWN;
3502 		goto out;
3503 	}
3504 
3505 	if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3506 	    !netif_carrier_ok(dev))
3507 		fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3508 
3509 	err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3510 				 cfg->fc_encap_type, cfg, gfp_flags, extack);
3511 	if (err)
3512 		goto out;
3513 
3514 pcpu_alloc:
3515 	fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3516 	if (!fib6_nh->rt6i_pcpu) {
3517 		err = -ENOMEM;
3518 		goto out;
3519 	}
3520 
3521 	fib6_nh->fib_nh_dev = dev;
3522 	fib6_nh->fib_nh_oif = dev->ifindex;
3523 	err = 0;
3524 out:
3525 	if (idev)
3526 		in6_dev_put(idev);
3527 
3528 	if (err) {
3529 		lwtstate_put(fib6_nh->fib_nh_lws);
3530 		fib6_nh->fib_nh_lws = NULL;
3531 		if (dev)
3532 			dev_put(dev);
3533 	}
3534 
3535 	return err;
3536 }
3537 
3538 void fib6_nh_release(struct fib6_nh *fib6_nh)
3539 {
3540 	struct rt6_exception_bucket *bucket;
3541 
3542 	rcu_read_lock();
3543 
3544 	fib6_nh_flush_exceptions(fib6_nh, NULL);
3545 	bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3546 	if (bucket) {
3547 		rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3548 		kfree(bucket);
3549 	}
3550 
3551 	rcu_read_unlock();
3552 
3553 	if (fib6_nh->rt6i_pcpu) {
3554 		int cpu;
3555 
3556 		for_each_possible_cpu(cpu) {
3557 			struct rt6_info **ppcpu_rt;
3558 			struct rt6_info *pcpu_rt;
3559 
3560 			ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3561 			pcpu_rt = *ppcpu_rt;
3562 			if (pcpu_rt) {
3563 				dst_dev_put(&pcpu_rt->dst);
3564 				dst_release(&pcpu_rt->dst);
3565 				*ppcpu_rt = NULL;
3566 			}
3567 		}
3568 
3569 		free_percpu(fib6_nh->rt6i_pcpu);
3570 	}
3571 
3572 	fib_nh_common_release(&fib6_nh->nh_common);
3573 }
3574 
3575 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3576 					      gfp_t gfp_flags,
3577 					      struct netlink_ext_ack *extack)
3578 {
3579 	struct net *net = cfg->fc_nlinfo.nl_net;
3580 	struct fib6_info *rt = NULL;
3581 	struct nexthop *nh = NULL;
3582 	struct fib6_table *table;
3583 	struct fib6_nh *fib6_nh;
3584 	int err = -EINVAL;
3585 	int addr_type;
3586 
3587 	/* RTF_PCPU is an internal flag; can not be set by userspace */
3588 	if (cfg->fc_flags & RTF_PCPU) {
3589 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3590 		goto out;
3591 	}
3592 
3593 	/* RTF_CACHE is an internal flag; can not be set by userspace */
3594 	if (cfg->fc_flags & RTF_CACHE) {
3595 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3596 		goto out;
3597 	}
3598 
3599 	if (cfg->fc_type > RTN_MAX) {
3600 		NL_SET_ERR_MSG(extack, "Invalid route type");
3601 		goto out;
3602 	}
3603 
3604 	if (cfg->fc_dst_len > 128) {
3605 		NL_SET_ERR_MSG(extack, "Invalid prefix length");
3606 		goto out;
3607 	}
3608 	if (cfg->fc_src_len > 128) {
3609 		NL_SET_ERR_MSG(extack, "Invalid source address length");
3610 		goto out;
3611 	}
3612 #ifndef CONFIG_IPV6_SUBTREES
3613 	if (cfg->fc_src_len) {
3614 		NL_SET_ERR_MSG(extack,
3615 			       "Specifying source address requires IPV6_SUBTREES to be enabled");
3616 		goto out;
3617 	}
3618 #endif
3619 	if (cfg->fc_nh_id) {
3620 		nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3621 		if (!nh) {
3622 			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3623 			goto out;
3624 		}
3625 		err = fib6_check_nexthop(nh, cfg, extack);
3626 		if (err)
3627 			goto out;
3628 	}
3629 
3630 	err = -ENOBUFS;
3631 	if (cfg->fc_nlinfo.nlh &&
3632 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3633 		table = fib6_get_table(net, cfg->fc_table);
3634 		if (!table) {
3635 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3636 			table = fib6_new_table(net, cfg->fc_table);
3637 		}
3638 	} else {
3639 		table = fib6_new_table(net, cfg->fc_table);
3640 	}
3641 
3642 	if (!table)
3643 		goto out;
3644 
3645 	err = -ENOMEM;
3646 	rt = fib6_info_alloc(gfp_flags, !nh);
3647 	if (!rt)
3648 		goto out;
3649 
3650 	rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3651 					       extack);
3652 	if (IS_ERR(rt->fib6_metrics)) {
3653 		err = PTR_ERR(rt->fib6_metrics);
3654 		/* Do not leave garbage there. */
3655 		rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3656 		goto out;
3657 	}
3658 
3659 	if (cfg->fc_flags & RTF_ADDRCONF)
3660 		rt->dst_nocount = true;
3661 
3662 	if (cfg->fc_flags & RTF_EXPIRES)
3663 		fib6_set_expires(rt, jiffies +
3664 				clock_t_to_jiffies(cfg->fc_expires));
3665 	else
3666 		fib6_clean_expires(rt);
3667 
3668 	if (cfg->fc_protocol == RTPROT_UNSPEC)
3669 		cfg->fc_protocol = RTPROT_BOOT;
3670 	rt->fib6_protocol = cfg->fc_protocol;
3671 
3672 	rt->fib6_table = table;
3673 	rt->fib6_metric = cfg->fc_metric;
3674 	rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3675 	rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3676 
3677 	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3678 	rt->fib6_dst.plen = cfg->fc_dst_len;
3679 
3680 #ifdef CONFIG_IPV6_SUBTREES
3681 	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3682 	rt->fib6_src.plen = cfg->fc_src_len;
3683 #endif
3684 	if (nh) {
3685 		if (!nexthop_get(nh)) {
3686 			NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3687 			goto out;
3688 		}
3689 		if (rt->fib6_src.plen) {
3690 			NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3691 			goto out;
3692 		}
3693 		rt->nh = nh;
3694 		fib6_nh = nexthop_fib6_nh(rt->nh);
3695 	} else {
3696 		err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3697 		if (err)
3698 			goto out;
3699 
3700 		fib6_nh = rt->fib6_nh;
3701 
3702 		/* We cannot add true routes via loopback here, they would
3703 		 * result in kernel looping; promote them to reject routes
3704 		 */
3705 		addr_type = ipv6_addr_type(&cfg->fc_dst);
3706 		if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3707 				   addr_type))
3708 			rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3709 	}
3710 
3711 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3712 		struct net_device *dev = fib6_nh->fib_nh_dev;
3713 
3714 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3715 			NL_SET_ERR_MSG(extack, "Invalid source address");
3716 			err = -EINVAL;
3717 			goto out;
3718 		}
3719 		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3720 		rt->fib6_prefsrc.plen = 128;
3721 	} else
3722 		rt->fib6_prefsrc.plen = 0;
3723 
3724 	return rt;
3725 out:
3726 	fib6_info_release(rt);
3727 	return ERR_PTR(err);
3728 }
3729 
3730 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3731 		  struct netlink_ext_ack *extack)
3732 {
3733 	struct fib6_info *rt;
3734 	int err;
3735 
3736 	rt = ip6_route_info_create(cfg, gfp_flags, extack);
3737 	if (IS_ERR(rt))
3738 		return PTR_ERR(rt);
3739 
3740 	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3741 	fib6_info_release(rt);
3742 
3743 	return err;
3744 }
3745 
3746 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3747 {
3748 	struct net *net = info->nl_net;
3749 	struct fib6_table *table;
3750 	int err;
3751 
3752 	if (rt == net->ipv6.fib6_null_entry) {
3753 		err = -ENOENT;
3754 		goto out;
3755 	}
3756 
3757 	table = rt->fib6_table;
3758 	spin_lock_bh(&table->tb6_lock);
3759 	err = fib6_del(rt, info);
3760 	spin_unlock_bh(&table->tb6_lock);
3761 
3762 out:
3763 	fib6_info_release(rt);
3764 	return err;
3765 }
3766 
3767 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3768 {
3769 	struct nl_info info = {
3770 		.nl_net = net,
3771 		.skip_notify = skip_notify
3772 	};
3773 
3774 	return __ip6_del_rt(rt, &info);
3775 }
3776 
3777 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3778 {
3779 	struct nl_info *info = &cfg->fc_nlinfo;
3780 	struct net *net = info->nl_net;
3781 	struct sk_buff *skb = NULL;
3782 	struct fib6_table *table;
3783 	int err = -ENOENT;
3784 
3785 	if (rt == net->ipv6.fib6_null_entry)
3786 		goto out_put;
3787 	table = rt->fib6_table;
3788 	spin_lock_bh(&table->tb6_lock);
3789 
3790 	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3791 		struct fib6_info *sibling, *next_sibling;
3792 		struct fib6_node *fn;
3793 
3794 		/* prefer to send a single notification with all hops */
3795 		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3796 		if (skb) {
3797 			u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3798 
3799 			if (rt6_fill_node(net, skb, rt, NULL,
3800 					  NULL, NULL, 0, RTM_DELROUTE,
3801 					  info->portid, seq, 0) < 0) {
3802 				kfree_skb(skb);
3803 				skb = NULL;
3804 			} else
3805 				info->skip_notify = 1;
3806 		}
3807 
3808 		/* 'rt' points to the first sibling route. If it is not the
3809 		 * leaf, then we do not need to send a notification. Otherwise,
3810 		 * we need to check if the last sibling has a next route or not
3811 		 * and emit a replace or delete notification, respectively.
3812 		 */
3813 		info->skip_notify_kernel = 1;
3814 		fn = rcu_dereference_protected(rt->fib6_node,
3815 					    lockdep_is_held(&table->tb6_lock));
3816 		if (rcu_access_pointer(fn->leaf) == rt) {
3817 			struct fib6_info *last_sibling, *replace_rt;
3818 
3819 			last_sibling = list_last_entry(&rt->fib6_siblings,
3820 						       struct fib6_info,
3821 						       fib6_siblings);
3822 			replace_rt = rcu_dereference_protected(
3823 					    last_sibling->fib6_next,
3824 					    lockdep_is_held(&table->tb6_lock));
3825 			if (replace_rt)
3826 				call_fib6_entry_notifiers_replace(net,
3827 								  replace_rt);
3828 			else
3829 				call_fib6_multipath_entry_notifiers(net,
3830 						       FIB_EVENT_ENTRY_DEL,
3831 						       rt, rt->fib6_nsiblings,
3832 						       NULL);
3833 		}
3834 		list_for_each_entry_safe(sibling, next_sibling,
3835 					 &rt->fib6_siblings,
3836 					 fib6_siblings) {
3837 			err = fib6_del(sibling, info);
3838 			if (err)
3839 				goto out_unlock;
3840 		}
3841 	}
3842 
3843 	err = fib6_del(rt, info);
3844 out_unlock:
3845 	spin_unlock_bh(&table->tb6_lock);
3846 out_put:
3847 	fib6_info_release(rt);
3848 
3849 	if (skb) {
3850 		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3851 			    info->nlh, gfp_any());
3852 	}
3853 	return err;
3854 }
3855 
3856 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3857 {
3858 	int rc = -ESRCH;
3859 
3860 	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3861 		goto out;
3862 
3863 	if (cfg->fc_flags & RTF_GATEWAY &&
3864 	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3865 		goto out;
3866 
3867 	rc = rt6_remove_exception_rt(rt);
3868 out:
3869 	return rc;
3870 }
3871 
3872 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3873 			     struct fib6_nh *nh)
3874 {
3875 	struct fib6_result res = {
3876 		.f6i = rt,
3877 		.nh = nh,
3878 	};
3879 	struct rt6_info *rt_cache;
3880 
3881 	rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3882 	if (rt_cache)
3883 		return __ip6_del_cached_rt(rt_cache, cfg);
3884 
3885 	return 0;
3886 }
3887 
3888 struct fib6_nh_del_cached_rt_arg {
3889 	struct fib6_config *cfg;
3890 	struct fib6_info *f6i;
3891 };
3892 
3893 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3894 {
3895 	struct fib6_nh_del_cached_rt_arg *arg = _arg;
3896 	int rc;
3897 
3898 	rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3899 	return rc != -ESRCH ? rc : 0;
3900 }
3901 
3902 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3903 {
3904 	struct fib6_nh_del_cached_rt_arg arg = {
3905 		.cfg = cfg,
3906 		.f6i = f6i
3907 	};
3908 
3909 	return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3910 }
3911 
3912 static int ip6_route_del(struct fib6_config *cfg,
3913 			 struct netlink_ext_ack *extack)
3914 {
3915 	struct fib6_table *table;
3916 	struct fib6_info *rt;
3917 	struct fib6_node *fn;
3918 	int err = -ESRCH;
3919 
3920 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3921 	if (!table) {
3922 		NL_SET_ERR_MSG(extack, "FIB table does not exist");
3923 		return err;
3924 	}
3925 
3926 	rcu_read_lock();
3927 
3928 	fn = fib6_locate(&table->tb6_root,
3929 			 &cfg->fc_dst, cfg->fc_dst_len,
3930 			 &cfg->fc_src, cfg->fc_src_len,
3931 			 !(cfg->fc_flags & RTF_CACHE));
3932 
3933 	if (fn) {
3934 		for_each_fib6_node_rt_rcu(fn) {
3935 			struct fib6_nh *nh;
3936 
3937 			if (rt->nh && cfg->fc_nh_id &&
3938 			    rt->nh->id != cfg->fc_nh_id)
3939 				continue;
3940 
3941 			if (cfg->fc_flags & RTF_CACHE) {
3942 				int rc = 0;
3943 
3944 				if (rt->nh) {
3945 					rc = ip6_del_cached_rt_nh(cfg, rt);
3946 				} else if (cfg->fc_nh_id) {
3947 					continue;
3948 				} else {
3949 					nh = rt->fib6_nh;
3950 					rc = ip6_del_cached_rt(cfg, rt, nh);
3951 				}
3952 				if (rc != -ESRCH) {
3953 					rcu_read_unlock();
3954 					return rc;
3955 				}
3956 				continue;
3957 			}
3958 
3959 			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3960 				continue;
3961 			if (cfg->fc_protocol &&
3962 			    cfg->fc_protocol != rt->fib6_protocol)
3963 				continue;
3964 
3965 			if (rt->nh) {
3966 				if (!fib6_info_hold_safe(rt))
3967 					continue;
3968 				rcu_read_unlock();
3969 
3970 				return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3971 			}
3972 			if (cfg->fc_nh_id)
3973 				continue;
3974 
3975 			nh = rt->fib6_nh;
3976 			if (cfg->fc_ifindex &&
3977 			    (!nh->fib_nh_dev ||
3978 			     nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3979 				continue;
3980 			if (cfg->fc_flags & RTF_GATEWAY &&
3981 			    !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3982 				continue;
3983 			if (!fib6_info_hold_safe(rt))
3984 				continue;
3985 			rcu_read_unlock();
3986 
3987 			/* if gateway was specified only delete the one hop */
3988 			if (cfg->fc_flags & RTF_GATEWAY)
3989 				return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3990 
3991 			return __ip6_del_rt_siblings(rt, cfg);
3992 		}
3993 	}
3994 	rcu_read_unlock();
3995 
3996 	return err;
3997 }
3998 
3999 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4000 {
4001 	struct netevent_redirect netevent;
4002 	struct rt6_info *rt, *nrt = NULL;
4003 	struct fib6_result res = {};
4004 	struct ndisc_options ndopts;
4005 	struct inet6_dev *in6_dev;
4006 	struct neighbour *neigh;
4007 	struct rd_msg *msg;
4008 	int optlen, on_link;
4009 	u8 *lladdr;
4010 
4011 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4012 	optlen -= sizeof(*msg);
4013 
4014 	if (optlen < 0) {
4015 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4016 		return;
4017 	}
4018 
4019 	msg = (struct rd_msg *)icmp6_hdr(skb);
4020 
4021 	if (ipv6_addr_is_multicast(&msg->dest)) {
4022 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4023 		return;
4024 	}
4025 
4026 	on_link = 0;
4027 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4028 		on_link = 1;
4029 	} else if (ipv6_addr_type(&msg->target) !=
4030 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4031 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4032 		return;
4033 	}
4034 
4035 	in6_dev = __in6_dev_get(skb->dev);
4036 	if (!in6_dev)
4037 		return;
4038 	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
4039 		return;
4040 
4041 	/* RFC2461 8.1:
4042 	 *	The IP source address of the Redirect MUST be the same as the current
4043 	 *	first-hop router for the specified ICMP Destination Address.
4044 	 */
4045 
4046 	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4047 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4048 		return;
4049 	}
4050 
4051 	lladdr = NULL;
4052 	if (ndopts.nd_opts_tgt_lladdr) {
4053 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4054 					     skb->dev);
4055 		if (!lladdr) {
4056 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4057 			return;
4058 		}
4059 	}
4060 
4061 	rt = (struct rt6_info *) dst;
4062 	if (rt->rt6i_flags & RTF_REJECT) {
4063 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4064 		return;
4065 	}
4066 
4067 	/* Redirect received -> path was valid.
4068 	 * Look, redirects are sent only in response to data packets,
4069 	 * so that this nexthop apparently is reachable. --ANK
4070 	 */
4071 	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4072 
4073 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4074 	if (!neigh)
4075 		return;
4076 
4077 	/*
4078 	 *	We have finally decided to accept it.
4079 	 */
4080 
4081 	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4082 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
4083 		     NEIGH_UPDATE_F_OVERRIDE|
4084 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4085 				     NEIGH_UPDATE_F_ISROUTER)),
4086 		     NDISC_REDIRECT, &ndopts);
4087 
4088 	rcu_read_lock();
4089 	res.f6i = rcu_dereference(rt->from);
4090 	if (!res.f6i)
4091 		goto out;
4092 
4093 	if (res.f6i->nh) {
4094 		struct fib6_nh_match_arg arg = {
4095 			.dev = dst->dev,
4096 			.gw = &rt->rt6i_gateway,
4097 		};
4098 
4099 		nexthop_for_each_fib6_nh(res.f6i->nh,
4100 					 fib6_nh_find_match, &arg);
4101 
4102 		/* fib6_info uses a nexthop that does not have fib6_nh
4103 		 * using the dst->dev. Should be impossible
4104 		 */
4105 		if (!arg.match)
4106 			goto out;
4107 		res.nh = arg.match;
4108 	} else {
4109 		res.nh = res.f6i->fib6_nh;
4110 	}
4111 
4112 	res.fib6_flags = res.f6i->fib6_flags;
4113 	res.fib6_type = res.f6i->fib6_type;
4114 	nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4115 	if (!nrt)
4116 		goto out;
4117 
4118 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4119 	if (on_link)
4120 		nrt->rt6i_flags &= ~RTF_GATEWAY;
4121 
4122 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4123 
4124 	/* rt6_insert_exception() will take care of duplicated exceptions */
4125 	if (rt6_insert_exception(nrt, &res)) {
4126 		dst_release_immediate(&nrt->dst);
4127 		goto out;
4128 	}
4129 
4130 	netevent.old = &rt->dst;
4131 	netevent.new = &nrt->dst;
4132 	netevent.daddr = &msg->dest;
4133 	netevent.neigh = neigh;
4134 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4135 
4136 out:
4137 	rcu_read_unlock();
4138 	neigh_release(neigh);
4139 }
4140 
4141 #ifdef CONFIG_IPV6_ROUTE_INFO
4142 static struct fib6_info *rt6_get_route_info(struct net *net,
4143 					   const struct in6_addr *prefix, int prefixlen,
4144 					   const struct in6_addr *gwaddr,
4145 					   struct net_device *dev)
4146 {
4147 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4148 	int ifindex = dev->ifindex;
4149 	struct fib6_node *fn;
4150 	struct fib6_info *rt = NULL;
4151 	struct fib6_table *table;
4152 
4153 	table = fib6_get_table(net, tb_id);
4154 	if (!table)
4155 		return NULL;
4156 
4157 	rcu_read_lock();
4158 	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4159 	if (!fn)
4160 		goto out;
4161 
4162 	for_each_fib6_node_rt_rcu(fn) {
4163 		/* these routes do not use nexthops */
4164 		if (rt->nh)
4165 			continue;
4166 		if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4167 			continue;
4168 		if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4169 		    !rt->fib6_nh->fib_nh_gw_family)
4170 			continue;
4171 		if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4172 			continue;
4173 		if (!fib6_info_hold_safe(rt))
4174 			continue;
4175 		break;
4176 	}
4177 out:
4178 	rcu_read_unlock();
4179 	return rt;
4180 }
4181 
4182 static struct fib6_info *rt6_add_route_info(struct net *net,
4183 					   const struct in6_addr *prefix, int prefixlen,
4184 					   const struct in6_addr *gwaddr,
4185 					   struct net_device *dev,
4186 					   unsigned int pref)
4187 {
4188 	struct fib6_config cfg = {
4189 		.fc_metric	= IP6_RT_PRIO_USER,
4190 		.fc_ifindex	= dev->ifindex,
4191 		.fc_dst_len	= prefixlen,
4192 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4193 				  RTF_UP | RTF_PREF(pref),
4194 		.fc_protocol = RTPROT_RA,
4195 		.fc_type = RTN_UNICAST,
4196 		.fc_nlinfo.portid = 0,
4197 		.fc_nlinfo.nlh = NULL,
4198 		.fc_nlinfo.nl_net = net,
4199 	};
4200 
4201 	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
4202 	cfg.fc_dst = *prefix;
4203 	cfg.fc_gateway = *gwaddr;
4204 
4205 	/* We should treat it as a default route if prefix length is 0. */
4206 	if (!prefixlen)
4207 		cfg.fc_flags |= RTF_DEFAULT;
4208 
4209 	ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4210 
4211 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4212 }
4213 #endif
4214 
4215 struct fib6_info *rt6_get_dflt_router(struct net *net,
4216 				     const struct in6_addr *addr,
4217 				     struct net_device *dev)
4218 {
4219 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4220 	struct fib6_info *rt;
4221 	struct fib6_table *table;
4222 
4223 	table = fib6_get_table(net, tb_id);
4224 	if (!table)
4225 		return NULL;
4226 
4227 	rcu_read_lock();
4228 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4229 		struct fib6_nh *nh;
4230 
4231 		/* RA routes do not use nexthops */
4232 		if (rt->nh)
4233 			continue;
4234 
4235 		nh = rt->fib6_nh;
4236 		if (dev == nh->fib_nh_dev &&
4237 		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4238 		    ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4239 			break;
4240 	}
4241 	if (rt && !fib6_info_hold_safe(rt))
4242 		rt = NULL;
4243 	rcu_read_unlock();
4244 	return rt;
4245 }
4246 
4247 struct fib6_info *rt6_add_dflt_router(struct net *net,
4248 				     const struct in6_addr *gwaddr,
4249 				     struct net_device *dev,
4250 				     unsigned int pref)
4251 {
4252 	struct fib6_config cfg = {
4253 		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4254 		.fc_metric	= IP6_RT_PRIO_USER,
4255 		.fc_ifindex	= dev->ifindex,
4256 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4257 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4258 		.fc_protocol = RTPROT_RA,
4259 		.fc_type = RTN_UNICAST,
4260 		.fc_nlinfo.portid = 0,
4261 		.fc_nlinfo.nlh = NULL,
4262 		.fc_nlinfo.nl_net = net,
4263 	};
4264 
4265 	cfg.fc_gateway = *gwaddr;
4266 
4267 	if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4268 		struct fib6_table *table;
4269 
4270 		table = fib6_get_table(dev_net(dev), cfg.fc_table);
4271 		if (table)
4272 			table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4273 	}
4274 
4275 	return rt6_get_dflt_router(net, gwaddr, dev);
4276 }
4277 
4278 static void __rt6_purge_dflt_routers(struct net *net,
4279 				     struct fib6_table *table)
4280 {
4281 	struct fib6_info *rt;
4282 
4283 restart:
4284 	rcu_read_lock();
4285 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4286 		struct net_device *dev = fib6_info_nh_dev(rt);
4287 		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4288 
4289 		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4290 		    (!idev || idev->cnf.accept_ra != 2) &&
4291 		    fib6_info_hold_safe(rt)) {
4292 			rcu_read_unlock();
4293 			ip6_del_rt(net, rt, false);
4294 			goto restart;
4295 		}
4296 	}
4297 	rcu_read_unlock();
4298 
4299 	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4300 }
4301 
4302 void rt6_purge_dflt_routers(struct net *net)
4303 {
4304 	struct fib6_table *table;
4305 	struct hlist_head *head;
4306 	unsigned int h;
4307 
4308 	rcu_read_lock();
4309 
4310 	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4311 		head = &net->ipv6.fib_table_hash[h];
4312 		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4313 			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4314 				__rt6_purge_dflt_routers(net, table);
4315 		}
4316 	}
4317 
4318 	rcu_read_unlock();
4319 }
4320 
4321 static void rtmsg_to_fib6_config(struct net *net,
4322 				 struct in6_rtmsg *rtmsg,
4323 				 struct fib6_config *cfg)
4324 {
4325 	*cfg = (struct fib6_config){
4326 		.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4327 			 : RT6_TABLE_MAIN,
4328 		.fc_ifindex = rtmsg->rtmsg_ifindex,
4329 		.fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4330 		.fc_expires = rtmsg->rtmsg_info,
4331 		.fc_dst_len = rtmsg->rtmsg_dst_len,
4332 		.fc_src_len = rtmsg->rtmsg_src_len,
4333 		.fc_flags = rtmsg->rtmsg_flags,
4334 		.fc_type = rtmsg->rtmsg_type,
4335 
4336 		.fc_nlinfo.nl_net = net,
4337 
4338 		.fc_dst = rtmsg->rtmsg_dst,
4339 		.fc_src = rtmsg->rtmsg_src,
4340 		.fc_gateway = rtmsg->rtmsg_gateway,
4341 	};
4342 }
4343 
4344 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4345 {
4346 	struct fib6_config cfg;
4347 	int err;
4348 
4349 	if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4350 		return -EINVAL;
4351 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4352 		return -EPERM;
4353 
4354 	rtmsg_to_fib6_config(net, rtmsg, &cfg);
4355 
4356 	rtnl_lock();
4357 	switch (cmd) {
4358 	case SIOCADDRT:
4359 		err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4360 		break;
4361 	case SIOCDELRT:
4362 		err = ip6_route_del(&cfg, NULL);
4363 		break;
4364 	}
4365 	rtnl_unlock();
4366 	return err;
4367 }
4368 
4369 /*
4370  *	Drop the packet on the floor
4371  */
4372 
4373 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4374 {
4375 	struct dst_entry *dst = skb_dst(skb);
4376 	struct net *net = dev_net(dst->dev);
4377 	struct inet6_dev *idev;
4378 	int type;
4379 
4380 	if (netif_is_l3_master(skb->dev) &&
4381 	    dst->dev == net->loopback_dev)
4382 		idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4383 	else
4384 		idev = ip6_dst_idev(dst);
4385 
4386 	switch (ipstats_mib_noroutes) {
4387 	case IPSTATS_MIB_INNOROUTES:
4388 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4389 		if (type == IPV6_ADDR_ANY) {
4390 			IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4391 			break;
4392 		}
4393 		fallthrough;
4394 	case IPSTATS_MIB_OUTNOROUTES:
4395 		IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4396 		break;
4397 	}
4398 
4399 	/* Start over by dropping the dst for l3mdev case */
4400 	if (netif_is_l3_master(skb->dev))
4401 		skb_dst_drop(skb);
4402 
4403 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4404 	kfree_skb(skb);
4405 	return 0;
4406 }
4407 
4408 static int ip6_pkt_discard(struct sk_buff *skb)
4409 {
4410 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4411 }
4412 
4413 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4414 {
4415 	skb->dev = skb_dst(skb)->dev;
4416 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4417 }
4418 
4419 static int ip6_pkt_prohibit(struct sk_buff *skb)
4420 {
4421 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4422 }
4423 
4424 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4425 {
4426 	skb->dev = skb_dst(skb)->dev;
4427 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4428 }
4429 
4430 /*
4431  *	Allocate a dst for local (unicast / anycast) address.
4432  */
4433 
4434 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4435 				     struct inet6_dev *idev,
4436 				     const struct in6_addr *addr,
4437 				     bool anycast, gfp_t gfp_flags)
4438 {
4439 	struct fib6_config cfg = {
4440 		.fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4441 		.fc_ifindex = idev->dev->ifindex,
4442 		.fc_flags = RTF_UP | RTF_NONEXTHOP,
4443 		.fc_dst = *addr,
4444 		.fc_dst_len = 128,
4445 		.fc_protocol = RTPROT_KERNEL,
4446 		.fc_nlinfo.nl_net = net,
4447 		.fc_ignore_dev_down = true,
4448 	};
4449 	struct fib6_info *f6i;
4450 
4451 	if (anycast) {
4452 		cfg.fc_type = RTN_ANYCAST;
4453 		cfg.fc_flags |= RTF_ANYCAST;
4454 	} else {
4455 		cfg.fc_type = RTN_LOCAL;
4456 		cfg.fc_flags |= RTF_LOCAL;
4457 	}
4458 
4459 	f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4460 	if (!IS_ERR(f6i))
4461 		f6i->dst_nocount = true;
4462 	return f6i;
4463 }
4464 
4465 /* remove deleted ip from prefsrc entries */
4466 struct arg_dev_net_ip {
4467 	struct net_device *dev;
4468 	struct net *net;
4469 	struct in6_addr *addr;
4470 };
4471 
4472 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4473 {
4474 	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4475 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4476 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4477 
4478 	if (!rt->nh &&
4479 	    ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4480 	    rt != net->ipv6.fib6_null_entry &&
4481 	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4482 		spin_lock_bh(&rt6_exception_lock);
4483 		/* remove prefsrc entry */
4484 		rt->fib6_prefsrc.plen = 0;
4485 		spin_unlock_bh(&rt6_exception_lock);
4486 	}
4487 	return 0;
4488 }
4489 
4490 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4491 {
4492 	struct net *net = dev_net(ifp->idev->dev);
4493 	struct arg_dev_net_ip adni = {
4494 		.dev = ifp->idev->dev,
4495 		.net = net,
4496 		.addr = &ifp->addr,
4497 	};
4498 	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4499 }
4500 
4501 #define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT)
4502 
4503 /* Remove routers and update dst entries when gateway turn into host. */
4504 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4505 {
4506 	struct in6_addr *gateway = (struct in6_addr *)arg;
4507 	struct fib6_nh *nh;
4508 
4509 	/* RA routes do not use nexthops */
4510 	if (rt->nh)
4511 		return 0;
4512 
4513 	nh = rt->fib6_nh;
4514 	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4515 	    nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4516 		return -1;
4517 
4518 	/* Further clean up cached routes in exception table.
4519 	 * This is needed because cached route may have a different
4520 	 * gateway than its 'parent' in the case of an ip redirect.
4521 	 */
4522 	fib6_nh_exceptions_clean_tohost(nh, gateway);
4523 
4524 	return 0;
4525 }
4526 
4527 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4528 {
4529 	fib6_clean_all(net, fib6_clean_tohost, gateway);
4530 }
4531 
4532 struct arg_netdev_event {
4533 	const struct net_device *dev;
4534 	union {
4535 		unsigned char nh_flags;
4536 		unsigned long event;
4537 	};
4538 };
4539 
4540 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4541 {
4542 	struct fib6_info *iter;
4543 	struct fib6_node *fn;
4544 
4545 	fn = rcu_dereference_protected(rt->fib6_node,
4546 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4547 	iter = rcu_dereference_protected(fn->leaf,
4548 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4549 	while (iter) {
4550 		if (iter->fib6_metric == rt->fib6_metric &&
4551 		    rt6_qualify_for_ecmp(iter))
4552 			return iter;
4553 		iter = rcu_dereference_protected(iter->fib6_next,
4554 				lockdep_is_held(&rt->fib6_table->tb6_lock));
4555 	}
4556 
4557 	return NULL;
4558 }
4559 
4560 /* only called for fib entries with builtin fib6_nh */
4561 static bool rt6_is_dead(const struct fib6_info *rt)
4562 {
4563 	if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4564 	    (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4565 	     ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4566 		return true;
4567 
4568 	return false;
4569 }
4570 
4571 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4572 {
4573 	struct fib6_info *iter;
4574 	int total = 0;
4575 
4576 	if (!rt6_is_dead(rt))
4577 		total += rt->fib6_nh->fib_nh_weight;
4578 
4579 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4580 		if (!rt6_is_dead(iter))
4581 			total += iter->fib6_nh->fib_nh_weight;
4582 	}
4583 
4584 	return total;
4585 }
4586 
4587 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4588 {
4589 	int upper_bound = -1;
4590 
4591 	if (!rt6_is_dead(rt)) {
4592 		*weight += rt->fib6_nh->fib_nh_weight;
4593 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4594 						    total) - 1;
4595 	}
4596 	atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4597 }
4598 
4599 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4600 {
4601 	struct fib6_info *iter;
4602 	int weight = 0;
4603 
4604 	rt6_upper_bound_set(rt, &weight, total);
4605 
4606 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4607 		rt6_upper_bound_set(iter, &weight, total);
4608 }
4609 
4610 void rt6_multipath_rebalance(struct fib6_info *rt)
4611 {
4612 	struct fib6_info *first;
4613 	int total;
4614 
4615 	/* In case the entire multipath route was marked for flushing,
4616 	 * then there is no need to rebalance upon the removal of every
4617 	 * sibling route.
4618 	 */
4619 	if (!rt->fib6_nsiblings || rt->should_flush)
4620 		return;
4621 
4622 	/* During lookup routes are evaluated in order, so we need to
4623 	 * make sure upper bounds are assigned from the first sibling
4624 	 * onwards.
4625 	 */
4626 	first = rt6_multipath_first_sibling(rt);
4627 	if (WARN_ON_ONCE(!first))
4628 		return;
4629 
4630 	total = rt6_multipath_total_weight(first);
4631 	rt6_multipath_upper_bound_set(first, total);
4632 }
4633 
4634 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4635 {
4636 	const struct arg_netdev_event *arg = p_arg;
4637 	struct net *net = dev_net(arg->dev);
4638 
4639 	if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4640 	    rt->fib6_nh->fib_nh_dev == arg->dev) {
4641 		rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4642 		fib6_update_sernum_upto_root(net, rt);
4643 		rt6_multipath_rebalance(rt);
4644 	}
4645 
4646 	return 0;
4647 }
4648 
4649 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4650 {
4651 	struct arg_netdev_event arg = {
4652 		.dev = dev,
4653 		{
4654 			.nh_flags = nh_flags,
4655 		},
4656 	};
4657 
4658 	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4659 		arg.nh_flags |= RTNH_F_LINKDOWN;
4660 
4661 	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4662 }
4663 
4664 /* only called for fib entries with inline fib6_nh */
4665 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4666 				   const struct net_device *dev)
4667 {
4668 	struct fib6_info *iter;
4669 
4670 	if (rt->fib6_nh->fib_nh_dev == dev)
4671 		return true;
4672 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4673 		if (iter->fib6_nh->fib_nh_dev == dev)
4674 			return true;
4675 
4676 	return false;
4677 }
4678 
4679 static void rt6_multipath_flush(struct fib6_info *rt)
4680 {
4681 	struct fib6_info *iter;
4682 
4683 	rt->should_flush = 1;
4684 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4685 		iter->should_flush = 1;
4686 }
4687 
4688 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4689 					     const struct net_device *down_dev)
4690 {
4691 	struct fib6_info *iter;
4692 	unsigned int dead = 0;
4693 
4694 	if (rt->fib6_nh->fib_nh_dev == down_dev ||
4695 	    rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4696 		dead++;
4697 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4698 		if (iter->fib6_nh->fib_nh_dev == down_dev ||
4699 		    iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4700 			dead++;
4701 
4702 	return dead;
4703 }
4704 
4705 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4706 				       const struct net_device *dev,
4707 				       unsigned char nh_flags)
4708 {
4709 	struct fib6_info *iter;
4710 
4711 	if (rt->fib6_nh->fib_nh_dev == dev)
4712 		rt->fib6_nh->fib_nh_flags |= nh_flags;
4713 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4714 		if (iter->fib6_nh->fib_nh_dev == dev)
4715 			iter->fib6_nh->fib_nh_flags |= nh_flags;
4716 }
4717 
4718 /* called with write lock held for table with rt */
4719 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4720 {
4721 	const struct arg_netdev_event *arg = p_arg;
4722 	const struct net_device *dev = arg->dev;
4723 	struct net *net = dev_net(dev);
4724 
4725 	if (rt == net->ipv6.fib6_null_entry || rt->nh)
4726 		return 0;
4727 
4728 	switch (arg->event) {
4729 	case NETDEV_UNREGISTER:
4730 		return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4731 	case NETDEV_DOWN:
4732 		if (rt->should_flush)
4733 			return -1;
4734 		if (!rt->fib6_nsiblings)
4735 			return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4736 		if (rt6_multipath_uses_dev(rt, dev)) {
4737 			unsigned int count;
4738 
4739 			count = rt6_multipath_dead_count(rt, dev);
4740 			if (rt->fib6_nsiblings + 1 == count) {
4741 				rt6_multipath_flush(rt);
4742 				return -1;
4743 			}
4744 			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4745 						   RTNH_F_LINKDOWN);
4746 			fib6_update_sernum(net, rt);
4747 			rt6_multipath_rebalance(rt);
4748 		}
4749 		return -2;
4750 	case NETDEV_CHANGE:
4751 		if (rt->fib6_nh->fib_nh_dev != dev ||
4752 		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4753 			break;
4754 		rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4755 		rt6_multipath_rebalance(rt);
4756 		break;
4757 	}
4758 
4759 	return 0;
4760 }
4761 
4762 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4763 {
4764 	struct arg_netdev_event arg = {
4765 		.dev = dev,
4766 		{
4767 			.event = event,
4768 		},
4769 	};
4770 	struct net *net = dev_net(dev);
4771 
4772 	if (net->ipv6.sysctl.skip_notify_on_dev_down)
4773 		fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4774 	else
4775 		fib6_clean_all(net, fib6_ifdown, &arg);
4776 }
4777 
4778 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4779 {
4780 	rt6_sync_down_dev(dev, event);
4781 	rt6_uncached_list_flush_dev(dev_net(dev), dev);
4782 	neigh_ifdown(&nd_tbl, dev);
4783 }
4784 
4785 struct rt6_mtu_change_arg {
4786 	struct net_device *dev;
4787 	unsigned int mtu;
4788 	struct fib6_info *f6i;
4789 };
4790 
4791 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4792 {
4793 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4794 	struct fib6_info *f6i = arg->f6i;
4795 
4796 	/* For administrative MTU increase, there is no way to discover
4797 	 * IPv6 PMTU increase, so PMTU increase should be updated here.
4798 	 * Since RFC 1981 doesn't include administrative MTU increase
4799 	 * update PMTU increase is a MUST. (i.e. jumbo frame)
4800 	 */
4801 	if (nh->fib_nh_dev == arg->dev) {
4802 		struct inet6_dev *idev = __in6_dev_get(arg->dev);
4803 		u32 mtu = f6i->fib6_pmtu;
4804 
4805 		if (mtu >= arg->mtu ||
4806 		    (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4807 			fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4808 
4809 		spin_lock_bh(&rt6_exception_lock);
4810 		rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4811 		spin_unlock_bh(&rt6_exception_lock);
4812 	}
4813 
4814 	return 0;
4815 }
4816 
4817 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4818 {
4819 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4820 	struct inet6_dev *idev;
4821 
4822 	/* In IPv6 pmtu discovery is not optional,
4823 	   so that RTAX_MTU lock cannot disable it.
4824 	   We still use this lock to block changes
4825 	   caused by addrconf/ndisc.
4826 	*/
4827 
4828 	idev = __in6_dev_get(arg->dev);
4829 	if (!idev)
4830 		return 0;
4831 
4832 	if (fib6_metric_locked(f6i, RTAX_MTU))
4833 		return 0;
4834 
4835 	arg->f6i = f6i;
4836 	if (f6i->nh) {
4837 		/* fib6_nh_mtu_change only returns 0, so this is safe */
4838 		return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4839 						arg);
4840 	}
4841 
4842 	return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4843 }
4844 
4845 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4846 {
4847 	struct rt6_mtu_change_arg arg = {
4848 		.dev = dev,
4849 		.mtu = mtu,
4850 	};
4851 
4852 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4853 }
4854 
4855 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4856 	[RTA_UNSPEC]		= { .strict_start_type = RTA_DPORT + 1 },
4857 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
4858 	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
4859 	[RTA_OIF]               = { .type = NLA_U32 },
4860 	[RTA_IIF]		= { .type = NLA_U32 },
4861 	[RTA_PRIORITY]          = { .type = NLA_U32 },
4862 	[RTA_METRICS]           = { .type = NLA_NESTED },
4863 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
4864 	[RTA_PREF]              = { .type = NLA_U8 },
4865 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
4866 	[RTA_ENCAP]		= { .type = NLA_NESTED },
4867 	[RTA_EXPIRES]		= { .type = NLA_U32 },
4868 	[RTA_UID]		= { .type = NLA_U32 },
4869 	[RTA_MARK]		= { .type = NLA_U32 },
4870 	[RTA_TABLE]		= { .type = NLA_U32 },
4871 	[RTA_IP_PROTO]		= { .type = NLA_U8 },
4872 	[RTA_SPORT]		= { .type = NLA_U16 },
4873 	[RTA_DPORT]		= { .type = NLA_U16 },
4874 	[RTA_NH_ID]		= { .type = NLA_U32 },
4875 };
4876 
4877 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4878 			      struct fib6_config *cfg,
4879 			      struct netlink_ext_ack *extack)
4880 {
4881 	struct rtmsg *rtm;
4882 	struct nlattr *tb[RTA_MAX+1];
4883 	unsigned int pref;
4884 	int err;
4885 
4886 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4887 				     rtm_ipv6_policy, extack);
4888 	if (err < 0)
4889 		goto errout;
4890 
4891 	err = -EINVAL;
4892 	rtm = nlmsg_data(nlh);
4893 
4894 	*cfg = (struct fib6_config){
4895 		.fc_table = rtm->rtm_table,
4896 		.fc_dst_len = rtm->rtm_dst_len,
4897 		.fc_src_len = rtm->rtm_src_len,
4898 		.fc_flags = RTF_UP,
4899 		.fc_protocol = rtm->rtm_protocol,
4900 		.fc_type = rtm->rtm_type,
4901 
4902 		.fc_nlinfo.portid = NETLINK_CB(skb).portid,
4903 		.fc_nlinfo.nlh = nlh,
4904 		.fc_nlinfo.nl_net = sock_net(skb->sk),
4905 	};
4906 
4907 	if (rtm->rtm_type == RTN_UNREACHABLE ||
4908 	    rtm->rtm_type == RTN_BLACKHOLE ||
4909 	    rtm->rtm_type == RTN_PROHIBIT ||
4910 	    rtm->rtm_type == RTN_THROW)
4911 		cfg->fc_flags |= RTF_REJECT;
4912 
4913 	if (rtm->rtm_type == RTN_LOCAL)
4914 		cfg->fc_flags |= RTF_LOCAL;
4915 
4916 	if (rtm->rtm_flags & RTM_F_CLONED)
4917 		cfg->fc_flags |= RTF_CACHE;
4918 
4919 	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4920 
4921 	if (tb[RTA_NH_ID]) {
4922 		if (tb[RTA_GATEWAY]   || tb[RTA_OIF] ||
4923 		    tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4924 			NL_SET_ERR_MSG(extack,
4925 				       "Nexthop specification and nexthop id are mutually exclusive");
4926 			goto errout;
4927 		}
4928 		cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4929 	}
4930 
4931 	if (tb[RTA_GATEWAY]) {
4932 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4933 		cfg->fc_flags |= RTF_GATEWAY;
4934 	}
4935 	if (tb[RTA_VIA]) {
4936 		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4937 		goto errout;
4938 	}
4939 
4940 	if (tb[RTA_DST]) {
4941 		int plen = (rtm->rtm_dst_len + 7) >> 3;
4942 
4943 		if (nla_len(tb[RTA_DST]) < plen)
4944 			goto errout;
4945 
4946 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4947 	}
4948 
4949 	if (tb[RTA_SRC]) {
4950 		int plen = (rtm->rtm_src_len + 7) >> 3;
4951 
4952 		if (nla_len(tb[RTA_SRC]) < plen)
4953 			goto errout;
4954 
4955 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4956 	}
4957 
4958 	if (tb[RTA_PREFSRC])
4959 		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4960 
4961 	if (tb[RTA_OIF])
4962 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4963 
4964 	if (tb[RTA_PRIORITY])
4965 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4966 
4967 	if (tb[RTA_METRICS]) {
4968 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4969 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4970 	}
4971 
4972 	if (tb[RTA_TABLE])
4973 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4974 
4975 	if (tb[RTA_MULTIPATH]) {
4976 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4977 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4978 
4979 		err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4980 						     cfg->fc_mp_len, extack);
4981 		if (err < 0)
4982 			goto errout;
4983 	}
4984 
4985 	if (tb[RTA_PREF]) {
4986 		pref = nla_get_u8(tb[RTA_PREF]);
4987 		if (pref != ICMPV6_ROUTER_PREF_LOW &&
4988 		    pref != ICMPV6_ROUTER_PREF_HIGH)
4989 			pref = ICMPV6_ROUTER_PREF_MEDIUM;
4990 		cfg->fc_flags |= RTF_PREF(pref);
4991 	}
4992 
4993 	if (tb[RTA_ENCAP])
4994 		cfg->fc_encap = tb[RTA_ENCAP];
4995 
4996 	if (tb[RTA_ENCAP_TYPE]) {
4997 		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4998 
4999 		err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5000 		if (err < 0)
5001 			goto errout;
5002 	}
5003 
5004 	if (tb[RTA_EXPIRES]) {
5005 		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5006 
5007 		if (addrconf_finite_timeout(timeout)) {
5008 			cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5009 			cfg->fc_flags |= RTF_EXPIRES;
5010 		}
5011 	}
5012 
5013 	err = 0;
5014 errout:
5015 	return err;
5016 }
5017 
5018 struct rt6_nh {
5019 	struct fib6_info *fib6_info;
5020 	struct fib6_config r_cfg;
5021 	struct list_head next;
5022 };
5023 
5024 static int ip6_route_info_append(struct net *net,
5025 				 struct list_head *rt6_nh_list,
5026 				 struct fib6_info *rt,
5027 				 struct fib6_config *r_cfg)
5028 {
5029 	struct rt6_nh *nh;
5030 	int err = -EEXIST;
5031 
5032 	list_for_each_entry(nh, rt6_nh_list, next) {
5033 		/* check if fib6_info already exists */
5034 		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5035 			return err;
5036 	}
5037 
5038 	nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5039 	if (!nh)
5040 		return -ENOMEM;
5041 	nh->fib6_info = rt;
5042 	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5043 	list_add_tail(&nh->next, rt6_nh_list);
5044 
5045 	return 0;
5046 }
5047 
5048 static void ip6_route_mpath_notify(struct fib6_info *rt,
5049 				   struct fib6_info *rt_last,
5050 				   struct nl_info *info,
5051 				   __u16 nlflags)
5052 {
5053 	/* if this is an APPEND route, then rt points to the first route
5054 	 * inserted and rt_last points to last route inserted. Userspace
5055 	 * wants a consistent dump of the route which starts at the first
5056 	 * nexthop. Since sibling routes are always added at the end of
5057 	 * the list, find the first sibling of the last route appended
5058 	 */
5059 	if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5060 		rt = list_first_entry(&rt_last->fib6_siblings,
5061 				      struct fib6_info,
5062 				      fib6_siblings);
5063 	}
5064 
5065 	if (rt)
5066 		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5067 }
5068 
5069 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5070 {
5071 	bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5072 	bool should_notify = false;
5073 	struct fib6_info *leaf;
5074 	struct fib6_node *fn;
5075 
5076 	rcu_read_lock();
5077 	fn = rcu_dereference(rt->fib6_node);
5078 	if (!fn)
5079 		goto out;
5080 
5081 	leaf = rcu_dereference(fn->leaf);
5082 	if (!leaf)
5083 		goto out;
5084 
5085 	if (rt == leaf ||
5086 	    (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5087 	     rt6_qualify_for_ecmp(leaf)))
5088 		should_notify = true;
5089 out:
5090 	rcu_read_unlock();
5091 
5092 	return should_notify;
5093 }
5094 
5095 static int ip6_route_multipath_add(struct fib6_config *cfg,
5096 				   struct netlink_ext_ack *extack)
5097 {
5098 	struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5099 	struct nl_info *info = &cfg->fc_nlinfo;
5100 	struct fib6_config r_cfg;
5101 	struct rtnexthop *rtnh;
5102 	struct fib6_info *rt;
5103 	struct rt6_nh *err_nh;
5104 	struct rt6_nh *nh, *nh_safe;
5105 	__u16 nlflags;
5106 	int remaining;
5107 	int attrlen;
5108 	int err = 1;
5109 	int nhn = 0;
5110 	int replace = (cfg->fc_nlinfo.nlh &&
5111 		       (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5112 	LIST_HEAD(rt6_nh_list);
5113 
5114 	nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5115 	if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5116 		nlflags |= NLM_F_APPEND;
5117 
5118 	remaining = cfg->fc_mp_len;
5119 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5120 
5121 	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
5122 	 * fib6_info structs per nexthop
5123 	 */
5124 	while (rtnh_ok(rtnh, remaining)) {
5125 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5126 		if (rtnh->rtnh_ifindex)
5127 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5128 
5129 		attrlen = rtnh_attrlen(rtnh);
5130 		if (attrlen > 0) {
5131 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5132 
5133 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5134 			if (nla) {
5135 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5136 				r_cfg.fc_flags |= RTF_GATEWAY;
5137 			}
5138 			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5139 			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5140 			if (nla)
5141 				r_cfg.fc_encap_type = nla_get_u16(nla);
5142 		}
5143 
5144 		r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5145 		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5146 		if (IS_ERR(rt)) {
5147 			err = PTR_ERR(rt);
5148 			rt = NULL;
5149 			goto cleanup;
5150 		}
5151 		if (!rt6_qualify_for_ecmp(rt)) {
5152 			err = -EINVAL;
5153 			NL_SET_ERR_MSG(extack,
5154 				       "Device only routes can not be added for IPv6 using the multipath API.");
5155 			fib6_info_release(rt);
5156 			goto cleanup;
5157 		}
5158 
5159 		rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5160 
5161 		err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5162 					    rt, &r_cfg);
5163 		if (err) {
5164 			fib6_info_release(rt);
5165 			goto cleanup;
5166 		}
5167 
5168 		rtnh = rtnh_next(rtnh, &remaining);
5169 	}
5170 
5171 	if (list_empty(&rt6_nh_list)) {
5172 		NL_SET_ERR_MSG(extack,
5173 			       "Invalid nexthop configuration - no valid nexthops");
5174 		return -EINVAL;
5175 	}
5176 
5177 	/* for add and replace send one notification with all nexthops.
5178 	 * Skip the notification in fib6_add_rt2node and send one with
5179 	 * the full route when done
5180 	 */
5181 	info->skip_notify = 1;
5182 
5183 	/* For add and replace, send one notification with all nexthops. For
5184 	 * append, send one notification with all appended nexthops.
5185 	 */
5186 	info->skip_notify_kernel = 1;
5187 
5188 	err_nh = NULL;
5189 	list_for_each_entry(nh, &rt6_nh_list, next) {
5190 		err = __ip6_ins_rt(nh->fib6_info, info, extack);
5191 		fib6_info_release(nh->fib6_info);
5192 
5193 		if (!err) {
5194 			/* save reference to last route successfully inserted */
5195 			rt_last = nh->fib6_info;
5196 
5197 			/* save reference to first route for notification */
5198 			if (!rt_notif)
5199 				rt_notif = nh->fib6_info;
5200 		}
5201 
5202 		/* nh->fib6_info is used or freed at this point, reset to NULL*/
5203 		nh->fib6_info = NULL;
5204 		if (err) {
5205 			if (replace && nhn)
5206 				NL_SET_ERR_MSG_MOD(extack,
5207 						   "multipath route replace failed (check consistency of installed routes)");
5208 			err_nh = nh;
5209 			goto add_errout;
5210 		}
5211 
5212 		/* Because each route is added like a single route we remove
5213 		 * these flags after the first nexthop: if there is a collision,
5214 		 * we have already failed to add the first nexthop:
5215 		 * fib6_add_rt2node() has rejected it; when replacing, old
5216 		 * nexthops have been replaced by first new, the rest should
5217 		 * be added to it.
5218 		 */
5219 		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5220 						     NLM_F_REPLACE);
5221 		cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5222 		nhn++;
5223 	}
5224 
5225 	/* An in-kernel notification should only be sent in case the new
5226 	 * multipath route is added as the first route in the node, or if
5227 	 * it was appended to it. We pass 'rt_notif' since it is the first
5228 	 * sibling and might allow us to skip some checks in the replace case.
5229 	 */
5230 	if (ip6_route_mpath_should_notify(rt_notif)) {
5231 		enum fib_event_type fib_event;
5232 
5233 		if (rt_notif->fib6_nsiblings != nhn - 1)
5234 			fib_event = FIB_EVENT_ENTRY_APPEND;
5235 		else
5236 			fib_event = FIB_EVENT_ENTRY_REPLACE;
5237 
5238 		err = call_fib6_multipath_entry_notifiers(info->nl_net,
5239 							  fib_event, rt_notif,
5240 							  nhn - 1, extack);
5241 		if (err) {
5242 			/* Delete all the siblings that were just added */
5243 			err_nh = NULL;
5244 			goto add_errout;
5245 		}
5246 	}
5247 
5248 	/* success ... tell user about new route */
5249 	ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5250 	goto cleanup;
5251 
5252 add_errout:
5253 	/* send notification for routes that were added so that
5254 	 * the delete notifications sent by ip6_route_del are
5255 	 * coherent
5256 	 */
5257 	if (rt_notif)
5258 		ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5259 
5260 	/* Delete routes that were already added */
5261 	list_for_each_entry(nh, &rt6_nh_list, next) {
5262 		if (err_nh == nh)
5263 			break;
5264 		ip6_route_del(&nh->r_cfg, extack);
5265 	}
5266 
5267 cleanup:
5268 	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5269 		if (nh->fib6_info)
5270 			fib6_info_release(nh->fib6_info);
5271 		list_del(&nh->next);
5272 		kfree(nh);
5273 	}
5274 
5275 	return err;
5276 }
5277 
5278 static int ip6_route_multipath_del(struct fib6_config *cfg,
5279 				   struct netlink_ext_ack *extack)
5280 {
5281 	struct fib6_config r_cfg;
5282 	struct rtnexthop *rtnh;
5283 	int remaining;
5284 	int attrlen;
5285 	int err = 1, last_err = 0;
5286 
5287 	remaining = cfg->fc_mp_len;
5288 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5289 
5290 	/* Parse a Multipath Entry */
5291 	while (rtnh_ok(rtnh, remaining)) {
5292 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5293 		if (rtnh->rtnh_ifindex)
5294 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5295 
5296 		attrlen = rtnh_attrlen(rtnh);
5297 		if (attrlen > 0) {
5298 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5299 
5300 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5301 			if (nla) {
5302 				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
5303 				r_cfg.fc_flags |= RTF_GATEWAY;
5304 			}
5305 		}
5306 		err = ip6_route_del(&r_cfg, extack);
5307 		if (err)
5308 			last_err = err;
5309 
5310 		rtnh = rtnh_next(rtnh, &remaining);
5311 	}
5312 
5313 	return last_err;
5314 }
5315 
5316 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5317 			      struct netlink_ext_ack *extack)
5318 {
5319 	struct fib6_config cfg;
5320 	int err;
5321 
5322 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5323 	if (err < 0)
5324 		return err;
5325 
5326 	if (cfg.fc_nh_id &&
5327 	    !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5328 		NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5329 		return -EINVAL;
5330 	}
5331 
5332 	if (cfg.fc_mp)
5333 		return ip6_route_multipath_del(&cfg, extack);
5334 	else {
5335 		cfg.fc_delete_all_nh = 1;
5336 		return ip6_route_del(&cfg, extack);
5337 	}
5338 }
5339 
5340 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5341 			      struct netlink_ext_ack *extack)
5342 {
5343 	struct fib6_config cfg;
5344 	int err;
5345 
5346 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5347 	if (err < 0)
5348 		return err;
5349 
5350 	if (cfg.fc_metric == 0)
5351 		cfg.fc_metric = IP6_RT_PRIO_USER;
5352 
5353 	if (cfg.fc_mp)
5354 		return ip6_route_multipath_add(&cfg, extack);
5355 	else
5356 		return ip6_route_add(&cfg, GFP_KERNEL, extack);
5357 }
5358 
5359 /* add the overhead of this fib6_nh to nexthop_len */
5360 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5361 {
5362 	int *nexthop_len = arg;
5363 
5364 	*nexthop_len += nla_total_size(0)	 /* RTA_MULTIPATH */
5365 		     + NLA_ALIGN(sizeof(struct rtnexthop))
5366 		     + nla_total_size(16); /* RTA_GATEWAY */
5367 
5368 	if (nh->fib_nh_lws) {
5369 		/* RTA_ENCAP_TYPE */
5370 		*nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5371 		/* RTA_ENCAP */
5372 		*nexthop_len += nla_total_size(2);
5373 	}
5374 
5375 	return 0;
5376 }
5377 
5378 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5379 {
5380 	int nexthop_len;
5381 
5382 	if (f6i->nh) {
5383 		nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5384 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5385 					 &nexthop_len);
5386 	} else {
5387 		struct fib6_nh *nh = f6i->fib6_nh;
5388 
5389 		nexthop_len = 0;
5390 		if (f6i->fib6_nsiblings) {
5391 			nexthop_len = nla_total_size(0)	 /* RTA_MULTIPATH */
5392 				    + NLA_ALIGN(sizeof(struct rtnexthop))
5393 				    + nla_total_size(16) /* RTA_GATEWAY */
5394 				    + lwtunnel_get_encap_size(nh->fib_nh_lws);
5395 
5396 			nexthop_len *= f6i->fib6_nsiblings;
5397 		}
5398 		nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5399 	}
5400 
5401 	return NLMSG_ALIGN(sizeof(struct rtmsg))
5402 	       + nla_total_size(16) /* RTA_SRC */
5403 	       + nla_total_size(16) /* RTA_DST */
5404 	       + nla_total_size(16) /* RTA_GATEWAY */
5405 	       + nla_total_size(16) /* RTA_PREFSRC */
5406 	       + nla_total_size(4) /* RTA_TABLE */
5407 	       + nla_total_size(4) /* RTA_IIF */
5408 	       + nla_total_size(4) /* RTA_OIF */
5409 	       + nla_total_size(4) /* RTA_PRIORITY */
5410 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5411 	       + nla_total_size(sizeof(struct rta_cacheinfo))
5412 	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5413 	       + nla_total_size(1) /* RTA_PREF */
5414 	       + nexthop_len;
5415 }
5416 
5417 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5418 				 unsigned char *flags)
5419 {
5420 	if (nexthop_is_multipath(nh)) {
5421 		struct nlattr *mp;
5422 
5423 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5424 		if (!mp)
5425 			goto nla_put_failure;
5426 
5427 		if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5428 			goto nla_put_failure;
5429 
5430 		nla_nest_end(skb, mp);
5431 	} else {
5432 		struct fib6_nh *fib6_nh;
5433 
5434 		fib6_nh = nexthop_fib6_nh(nh);
5435 		if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5436 				     flags, false) < 0)
5437 			goto nla_put_failure;
5438 	}
5439 
5440 	return 0;
5441 
5442 nla_put_failure:
5443 	return -EMSGSIZE;
5444 }
5445 
5446 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5447 			 struct fib6_info *rt, struct dst_entry *dst,
5448 			 struct in6_addr *dest, struct in6_addr *src,
5449 			 int iif, int type, u32 portid, u32 seq,
5450 			 unsigned int flags)
5451 {
5452 	struct rt6_info *rt6 = (struct rt6_info *)dst;
5453 	struct rt6key *rt6_dst, *rt6_src;
5454 	u32 *pmetrics, table, rt6_flags;
5455 	unsigned char nh_flags = 0;
5456 	struct nlmsghdr *nlh;
5457 	struct rtmsg *rtm;
5458 	long expires = 0;
5459 
5460 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5461 	if (!nlh)
5462 		return -EMSGSIZE;
5463 
5464 	if (rt6) {
5465 		rt6_dst = &rt6->rt6i_dst;
5466 		rt6_src = &rt6->rt6i_src;
5467 		rt6_flags = rt6->rt6i_flags;
5468 	} else {
5469 		rt6_dst = &rt->fib6_dst;
5470 		rt6_src = &rt->fib6_src;
5471 		rt6_flags = rt->fib6_flags;
5472 	}
5473 
5474 	rtm = nlmsg_data(nlh);
5475 	rtm->rtm_family = AF_INET6;
5476 	rtm->rtm_dst_len = rt6_dst->plen;
5477 	rtm->rtm_src_len = rt6_src->plen;
5478 	rtm->rtm_tos = 0;
5479 	if (rt->fib6_table)
5480 		table = rt->fib6_table->tb6_id;
5481 	else
5482 		table = RT6_TABLE_UNSPEC;
5483 	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5484 	if (nla_put_u32(skb, RTA_TABLE, table))
5485 		goto nla_put_failure;
5486 
5487 	rtm->rtm_type = rt->fib6_type;
5488 	rtm->rtm_flags = 0;
5489 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5490 	rtm->rtm_protocol = rt->fib6_protocol;
5491 
5492 	if (rt6_flags & RTF_CACHE)
5493 		rtm->rtm_flags |= RTM_F_CLONED;
5494 
5495 	if (dest) {
5496 		if (nla_put_in6_addr(skb, RTA_DST, dest))
5497 			goto nla_put_failure;
5498 		rtm->rtm_dst_len = 128;
5499 	} else if (rtm->rtm_dst_len)
5500 		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5501 			goto nla_put_failure;
5502 #ifdef CONFIG_IPV6_SUBTREES
5503 	if (src) {
5504 		if (nla_put_in6_addr(skb, RTA_SRC, src))
5505 			goto nla_put_failure;
5506 		rtm->rtm_src_len = 128;
5507 	} else if (rtm->rtm_src_len &&
5508 		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5509 		goto nla_put_failure;
5510 #endif
5511 	if (iif) {
5512 #ifdef CONFIG_IPV6_MROUTE
5513 		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5514 			int err = ip6mr_get_route(net, skb, rtm, portid);
5515 
5516 			if (err == 0)
5517 				return 0;
5518 			if (err < 0)
5519 				goto nla_put_failure;
5520 		} else
5521 #endif
5522 			if (nla_put_u32(skb, RTA_IIF, iif))
5523 				goto nla_put_failure;
5524 	} else if (dest) {
5525 		struct in6_addr saddr_buf;
5526 		if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5527 		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5528 			goto nla_put_failure;
5529 	}
5530 
5531 	if (rt->fib6_prefsrc.plen) {
5532 		struct in6_addr saddr_buf;
5533 		saddr_buf = rt->fib6_prefsrc.addr;
5534 		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5535 			goto nla_put_failure;
5536 	}
5537 
5538 	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5539 	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5540 		goto nla_put_failure;
5541 
5542 	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5543 		goto nla_put_failure;
5544 
5545 	/* For multipath routes, walk the siblings list and add
5546 	 * each as a nexthop within RTA_MULTIPATH.
5547 	 */
5548 	if (rt6) {
5549 		if (rt6_flags & RTF_GATEWAY &&
5550 		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5551 			goto nla_put_failure;
5552 
5553 		if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5554 			goto nla_put_failure;
5555 	} else if (rt->fib6_nsiblings) {
5556 		struct fib6_info *sibling, *next_sibling;
5557 		struct nlattr *mp;
5558 
5559 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5560 		if (!mp)
5561 			goto nla_put_failure;
5562 
5563 		if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5564 				    rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
5565 			goto nla_put_failure;
5566 
5567 		list_for_each_entry_safe(sibling, next_sibling,
5568 					 &rt->fib6_siblings, fib6_siblings) {
5569 			if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5570 					    sibling->fib6_nh->fib_nh_weight,
5571 					    AF_INET6) < 0)
5572 				goto nla_put_failure;
5573 		}
5574 
5575 		nla_nest_end(skb, mp);
5576 	} else if (rt->nh) {
5577 		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5578 			goto nla_put_failure;
5579 
5580 		if (nexthop_is_blackhole(rt->nh))
5581 			rtm->rtm_type = RTN_BLACKHOLE;
5582 
5583 		if (net->ipv4.sysctl_nexthop_compat_mode &&
5584 		    rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5585 			goto nla_put_failure;
5586 
5587 		rtm->rtm_flags |= nh_flags;
5588 	} else {
5589 		if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5590 				     &nh_flags, false) < 0)
5591 			goto nla_put_failure;
5592 
5593 		rtm->rtm_flags |= nh_flags;
5594 	}
5595 
5596 	if (rt6_flags & RTF_EXPIRES) {
5597 		expires = dst ? dst->expires : rt->expires;
5598 		expires -= jiffies;
5599 	}
5600 
5601 	if (!dst) {
5602 		if (rt->offload)
5603 			rtm->rtm_flags |= RTM_F_OFFLOAD;
5604 		if (rt->trap)
5605 			rtm->rtm_flags |= RTM_F_TRAP;
5606 	}
5607 
5608 	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5609 		goto nla_put_failure;
5610 
5611 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5612 		goto nla_put_failure;
5613 
5614 
5615 	nlmsg_end(skb, nlh);
5616 	return 0;
5617 
5618 nla_put_failure:
5619 	nlmsg_cancel(skb, nlh);
5620 	return -EMSGSIZE;
5621 }
5622 
5623 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5624 {
5625 	const struct net_device *dev = arg;
5626 
5627 	if (nh->fib_nh_dev == dev)
5628 		return 1;
5629 
5630 	return 0;
5631 }
5632 
5633 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5634 			       const struct net_device *dev)
5635 {
5636 	if (f6i->nh) {
5637 		struct net_device *_dev = (struct net_device *)dev;
5638 
5639 		return !!nexthop_for_each_fib6_nh(f6i->nh,
5640 						  fib6_info_nh_uses_dev,
5641 						  _dev);
5642 	}
5643 
5644 	if (f6i->fib6_nh->fib_nh_dev == dev)
5645 		return true;
5646 
5647 	if (f6i->fib6_nsiblings) {
5648 		struct fib6_info *sibling, *next_sibling;
5649 
5650 		list_for_each_entry_safe(sibling, next_sibling,
5651 					 &f6i->fib6_siblings, fib6_siblings) {
5652 			if (sibling->fib6_nh->fib_nh_dev == dev)
5653 				return true;
5654 		}
5655 	}
5656 
5657 	return false;
5658 }
5659 
5660 struct fib6_nh_exception_dump_walker {
5661 	struct rt6_rtnl_dump_arg *dump;
5662 	struct fib6_info *rt;
5663 	unsigned int flags;
5664 	unsigned int skip;
5665 	unsigned int count;
5666 };
5667 
5668 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5669 {
5670 	struct fib6_nh_exception_dump_walker *w = arg;
5671 	struct rt6_rtnl_dump_arg *dump = w->dump;
5672 	struct rt6_exception_bucket *bucket;
5673 	struct rt6_exception *rt6_ex;
5674 	int i, err;
5675 
5676 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5677 	if (!bucket)
5678 		return 0;
5679 
5680 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5681 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5682 			if (w->skip) {
5683 				w->skip--;
5684 				continue;
5685 			}
5686 
5687 			/* Expiration of entries doesn't bump sernum, insertion
5688 			 * does. Removal is triggered by insertion, so we can
5689 			 * rely on the fact that if entries change between two
5690 			 * partial dumps, this node is scanned again completely,
5691 			 * see rt6_insert_exception() and fib6_dump_table().
5692 			 *
5693 			 * Count expired entries we go through as handled
5694 			 * entries that we'll skip next time, in case of partial
5695 			 * node dump. Otherwise, if entries expire meanwhile,
5696 			 * we'll skip the wrong amount.
5697 			 */
5698 			if (rt6_check_expired(rt6_ex->rt6i)) {
5699 				w->count++;
5700 				continue;
5701 			}
5702 
5703 			err = rt6_fill_node(dump->net, dump->skb, w->rt,
5704 					    &rt6_ex->rt6i->dst, NULL, NULL, 0,
5705 					    RTM_NEWROUTE,
5706 					    NETLINK_CB(dump->cb->skb).portid,
5707 					    dump->cb->nlh->nlmsg_seq, w->flags);
5708 			if (err)
5709 				return err;
5710 
5711 			w->count++;
5712 		}
5713 		bucket++;
5714 	}
5715 
5716 	return 0;
5717 }
5718 
5719 /* Return -1 if done with node, number of handled routes on partial dump */
5720 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5721 {
5722 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5723 	struct fib_dump_filter *filter = &arg->filter;
5724 	unsigned int flags = NLM_F_MULTI;
5725 	struct net *net = arg->net;
5726 	int count = 0;
5727 
5728 	if (rt == net->ipv6.fib6_null_entry)
5729 		return -1;
5730 
5731 	if ((filter->flags & RTM_F_PREFIX) &&
5732 	    !(rt->fib6_flags & RTF_PREFIX_RT)) {
5733 		/* success since this is not a prefix route */
5734 		return -1;
5735 	}
5736 	if (filter->filter_set &&
5737 	    ((filter->rt_type  && rt->fib6_type != filter->rt_type) ||
5738 	     (filter->dev      && !fib6_info_uses_dev(rt, filter->dev)) ||
5739 	     (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5740 		return -1;
5741 	}
5742 
5743 	if (filter->filter_set ||
5744 	    !filter->dump_routes || !filter->dump_exceptions) {
5745 		flags |= NLM_F_DUMP_FILTERED;
5746 	}
5747 
5748 	if (filter->dump_routes) {
5749 		if (skip) {
5750 			skip--;
5751 		} else {
5752 			if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5753 					  0, RTM_NEWROUTE,
5754 					  NETLINK_CB(arg->cb->skb).portid,
5755 					  arg->cb->nlh->nlmsg_seq, flags)) {
5756 				return 0;
5757 			}
5758 			count++;
5759 		}
5760 	}
5761 
5762 	if (filter->dump_exceptions) {
5763 		struct fib6_nh_exception_dump_walker w = { .dump = arg,
5764 							   .rt = rt,
5765 							   .flags = flags,
5766 							   .skip = skip,
5767 							   .count = 0 };
5768 		int err;
5769 
5770 		rcu_read_lock();
5771 		if (rt->nh) {
5772 			err = nexthop_for_each_fib6_nh(rt->nh,
5773 						       rt6_nh_dump_exceptions,
5774 						       &w);
5775 		} else {
5776 			err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5777 		}
5778 		rcu_read_unlock();
5779 
5780 		if (err)
5781 			return count += w.count;
5782 	}
5783 
5784 	return -1;
5785 }
5786 
5787 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5788 					const struct nlmsghdr *nlh,
5789 					struct nlattr **tb,
5790 					struct netlink_ext_ack *extack)
5791 {
5792 	struct rtmsg *rtm;
5793 	int i, err;
5794 
5795 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5796 		NL_SET_ERR_MSG_MOD(extack,
5797 				   "Invalid header for get route request");
5798 		return -EINVAL;
5799 	}
5800 
5801 	if (!netlink_strict_get_check(skb))
5802 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5803 					      rtm_ipv6_policy, extack);
5804 
5805 	rtm = nlmsg_data(nlh);
5806 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5807 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5808 	    rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5809 	    rtm->rtm_type) {
5810 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5811 		return -EINVAL;
5812 	}
5813 	if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5814 		NL_SET_ERR_MSG_MOD(extack,
5815 				   "Invalid flags for get route request");
5816 		return -EINVAL;
5817 	}
5818 
5819 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5820 					    rtm_ipv6_policy, extack);
5821 	if (err)
5822 		return err;
5823 
5824 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5825 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5826 		NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5827 		return -EINVAL;
5828 	}
5829 
5830 	for (i = 0; i <= RTA_MAX; i++) {
5831 		if (!tb[i])
5832 			continue;
5833 
5834 		switch (i) {
5835 		case RTA_SRC:
5836 		case RTA_DST:
5837 		case RTA_IIF:
5838 		case RTA_OIF:
5839 		case RTA_MARK:
5840 		case RTA_UID:
5841 		case RTA_SPORT:
5842 		case RTA_DPORT:
5843 		case RTA_IP_PROTO:
5844 			break;
5845 		default:
5846 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5847 			return -EINVAL;
5848 		}
5849 	}
5850 
5851 	return 0;
5852 }
5853 
5854 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5855 			      struct netlink_ext_ack *extack)
5856 {
5857 	struct net *net = sock_net(in_skb->sk);
5858 	struct nlattr *tb[RTA_MAX+1];
5859 	int err, iif = 0, oif = 0;
5860 	struct fib6_info *from;
5861 	struct dst_entry *dst;
5862 	struct rt6_info *rt;
5863 	struct sk_buff *skb;
5864 	struct rtmsg *rtm;
5865 	struct flowi6 fl6 = {};
5866 	bool fibmatch;
5867 
5868 	err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5869 	if (err < 0)
5870 		goto errout;
5871 
5872 	err = -EINVAL;
5873 	rtm = nlmsg_data(nlh);
5874 	fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5875 	fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5876 
5877 	if (tb[RTA_SRC]) {
5878 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5879 			goto errout;
5880 
5881 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5882 	}
5883 
5884 	if (tb[RTA_DST]) {
5885 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5886 			goto errout;
5887 
5888 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5889 	}
5890 
5891 	if (tb[RTA_IIF])
5892 		iif = nla_get_u32(tb[RTA_IIF]);
5893 
5894 	if (tb[RTA_OIF])
5895 		oif = nla_get_u32(tb[RTA_OIF]);
5896 
5897 	if (tb[RTA_MARK])
5898 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5899 
5900 	if (tb[RTA_UID])
5901 		fl6.flowi6_uid = make_kuid(current_user_ns(),
5902 					   nla_get_u32(tb[RTA_UID]));
5903 	else
5904 		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5905 
5906 	if (tb[RTA_SPORT])
5907 		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5908 
5909 	if (tb[RTA_DPORT])
5910 		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5911 
5912 	if (tb[RTA_IP_PROTO]) {
5913 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5914 						  &fl6.flowi6_proto, AF_INET6,
5915 						  extack);
5916 		if (err)
5917 			goto errout;
5918 	}
5919 
5920 	if (iif) {
5921 		struct net_device *dev;
5922 		int flags = 0;
5923 
5924 		rcu_read_lock();
5925 
5926 		dev = dev_get_by_index_rcu(net, iif);
5927 		if (!dev) {
5928 			rcu_read_unlock();
5929 			err = -ENODEV;
5930 			goto errout;
5931 		}
5932 
5933 		fl6.flowi6_iif = iif;
5934 
5935 		if (!ipv6_addr_any(&fl6.saddr))
5936 			flags |= RT6_LOOKUP_F_HAS_SADDR;
5937 
5938 		dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5939 
5940 		rcu_read_unlock();
5941 	} else {
5942 		fl6.flowi6_oif = oif;
5943 
5944 		dst = ip6_route_output(net, NULL, &fl6);
5945 	}
5946 
5947 
5948 	rt = container_of(dst, struct rt6_info, dst);
5949 	if (rt->dst.error) {
5950 		err = rt->dst.error;
5951 		ip6_rt_put(rt);
5952 		goto errout;
5953 	}
5954 
5955 	if (rt == net->ipv6.ip6_null_entry) {
5956 		err = rt->dst.error;
5957 		ip6_rt_put(rt);
5958 		goto errout;
5959 	}
5960 
5961 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5962 	if (!skb) {
5963 		ip6_rt_put(rt);
5964 		err = -ENOBUFS;
5965 		goto errout;
5966 	}
5967 
5968 	skb_dst_set(skb, &rt->dst);
5969 
5970 	rcu_read_lock();
5971 	from = rcu_dereference(rt->from);
5972 	if (from) {
5973 		if (fibmatch)
5974 			err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5975 					    iif, RTM_NEWROUTE,
5976 					    NETLINK_CB(in_skb).portid,
5977 					    nlh->nlmsg_seq, 0);
5978 		else
5979 			err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5980 					    &fl6.saddr, iif, RTM_NEWROUTE,
5981 					    NETLINK_CB(in_skb).portid,
5982 					    nlh->nlmsg_seq, 0);
5983 	} else {
5984 		err = -ENETUNREACH;
5985 	}
5986 	rcu_read_unlock();
5987 
5988 	if (err < 0) {
5989 		kfree_skb(skb);
5990 		goto errout;
5991 	}
5992 
5993 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5994 errout:
5995 	return err;
5996 }
5997 
5998 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5999 		     unsigned int nlm_flags)
6000 {
6001 	struct sk_buff *skb;
6002 	struct net *net = info->nl_net;
6003 	u32 seq;
6004 	int err;
6005 
6006 	err = -ENOBUFS;
6007 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6008 
6009 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6010 	if (!skb)
6011 		goto errout;
6012 
6013 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6014 			    event, info->portid, seq, nlm_flags);
6015 	if (err < 0) {
6016 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6017 		WARN_ON(err == -EMSGSIZE);
6018 		kfree_skb(skb);
6019 		goto errout;
6020 	}
6021 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6022 		    info->nlh, gfp_any());
6023 	return;
6024 errout:
6025 	if (err < 0)
6026 		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6027 }
6028 
6029 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6030 		    struct nl_info *info)
6031 {
6032 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6033 	struct sk_buff *skb;
6034 	int err = -ENOBUFS;
6035 
6036 	/* call_fib6_entry_notifiers will be removed when in-kernel notifier
6037 	 * is implemented and supported for nexthop objects
6038 	 */
6039 	call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
6040 
6041 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6042 	if (!skb)
6043 		goto errout;
6044 
6045 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6046 			    RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6047 	if (err < 0) {
6048 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6049 		WARN_ON(err == -EMSGSIZE);
6050 		kfree_skb(skb);
6051 		goto errout;
6052 	}
6053 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6054 		    info->nlh, gfp_any());
6055 	return;
6056 errout:
6057 	if (err < 0)
6058 		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6059 }
6060 
6061 static int ip6_route_dev_notify(struct notifier_block *this,
6062 				unsigned long event, void *ptr)
6063 {
6064 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6065 	struct net *net = dev_net(dev);
6066 
6067 	if (!(dev->flags & IFF_LOOPBACK))
6068 		return NOTIFY_OK;
6069 
6070 	if (event == NETDEV_REGISTER) {
6071 		net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6072 		net->ipv6.ip6_null_entry->dst.dev = dev;
6073 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6074 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6075 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6076 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6077 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6078 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6079 #endif
6080 	 } else if (event == NETDEV_UNREGISTER &&
6081 		    dev->reg_state != NETREG_UNREGISTERED) {
6082 		/* NETDEV_UNREGISTER could be fired for multiple times by
6083 		 * netdev_wait_allrefs(). Make sure we only call this once.
6084 		 */
6085 		in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6086 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6087 		in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6088 		in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6089 #endif
6090 	}
6091 
6092 	return NOTIFY_OK;
6093 }
6094 
6095 /*
6096  *	/proc
6097  */
6098 
6099 #ifdef CONFIG_PROC_FS
6100 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6101 {
6102 	struct net *net = (struct net *)seq->private;
6103 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6104 		   net->ipv6.rt6_stats->fib_nodes,
6105 		   net->ipv6.rt6_stats->fib_route_nodes,
6106 		   atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6107 		   net->ipv6.rt6_stats->fib_rt_entries,
6108 		   net->ipv6.rt6_stats->fib_rt_cache,
6109 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6110 		   net->ipv6.rt6_stats->fib_discarded_routes);
6111 
6112 	return 0;
6113 }
6114 #endif	/* CONFIG_PROC_FS */
6115 
6116 #ifdef CONFIG_SYSCTL
6117 
6118 static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6119 			      void *buffer, size_t *lenp, loff_t *ppos)
6120 {
6121 	struct net *net;
6122 	int delay;
6123 	int ret;
6124 	if (!write)
6125 		return -EINVAL;
6126 
6127 	net = (struct net *)ctl->extra1;
6128 	delay = net->ipv6.sysctl.flush_delay;
6129 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6130 	if (ret)
6131 		return ret;
6132 
6133 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6134 	return 0;
6135 }
6136 
6137 static struct ctl_table ipv6_route_table_template[] = {
6138 	{
6139 		.procname	=	"flush",
6140 		.data		=	&init_net.ipv6.sysctl.flush_delay,
6141 		.maxlen		=	sizeof(int),
6142 		.mode		=	0200,
6143 		.proc_handler	=	ipv6_sysctl_rtcache_flush
6144 	},
6145 	{
6146 		.procname	=	"gc_thresh",
6147 		.data		=	&ip6_dst_ops_template.gc_thresh,
6148 		.maxlen		=	sizeof(int),
6149 		.mode		=	0644,
6150 		.proc_handler	=	proc_dointvec,
6151 	},
6152 	{
6153 		.procname	=	"max_size",
6154 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
6155 		.maxlen		=	sizeof(int),
6156 		.mode		=	0644,
6157 		.proc_handler	=	proc_dointvec,
6158 	},
6159 	{
6160 		.procname	=	"gc_min_interval",
6161 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6162 		.maxlen		=	sizeof(int),
6163 		.mode		=	0644,
6164 		.proc_handler	=	proc_dointvec_jiffies,
6165 	},
6166 	{
6167 		.procname	=	"gc_timeout",
6168 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6169 		.maxlen		=	sizeof(int),
6170 		.mode		=	0644,
6171 		.proc_handler	=	proc_dointvec_jiffies,
6172 	},
6173 	{
6174 		.procname	=	"gc_interval",
6175 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
6176 		.maxlen		=	sizeof(int),
6177 		.mode		=	0644,
6178 		.proc_handler	=	proc_dointvec_jiffies,
6179 	},
6180 	{
6181 		.procname	=	"gc_elasticity",
6182 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6183 		.maxlen		=	sizeof(int),
6184 		.mode		=	0644,
6185 		.proc_handler	=	proc_dointvec,
6186 	},
6187 	{
6188 		.procname	=	"mtu_expires",
6189 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6190 		.maxlen		=	sizeof(int),
6191 		.mode		=	0644,
6192 		.proc_handler	=	proc_dointvec_jiffies,
6193 	},
6194 	{
6195 		.procname	=	"min_adv_mss",
6196 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
6197 		.maxlen		=	sizeof(int),
6198 		.mode		=	0644,
6199 		.proc_handler	=	proc_dointvec,
6200 	},
6201 	{
6202 		.procname	=	"gc_min_interval_ms",
6203 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6204 		.maxlen		=	sizeof(int),
6205 		.mode		=	0644,
6206 		.proc_handler	=	proc_dointvec_ms_jiffies,
6207 	},
6208 	{
6209 		.procname	=	"skip_notify_on_dev_down",
6210 		.data		=	&init_net.ipv6.sysctl.skip_notify_on_dev_down,
6211 		.maxlen		=	sizeof(int),
6212 		.mode		=	0644,
6213 		.proc_handler	=	proc_dointvec_minmax,
6214 		.extra1		=	SYSCTL_ZERO,
6215 		.extra2		=	SYSCTL_ONE,
6216 	},
6217 	{ }
6218 };
6219 
6220 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6221 {
6222 	struct ctl_table *table;
6223 
6224 	table = kmemdup(ipv6_route_table_template,
6225 			sizeof(ipv6_route_table_template),
6226 			GFP_KERNEL);
6227 
6228 	if (table) {
6229 		table[0].data = &net->ipv6.sysctl.flush_delay;
6230 		table[0].extra1 = net;
6231 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6232 		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6233 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6234 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6235 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6236 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6237 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6238 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6239 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6240 		table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6241 
6242 		/* Don't export sysctls to unprivileged users */
6243 		if (net->user_ns != &init_user_ns)
6244 			table[0].procname = NULL;
6245 	}
6246 
6247 	return table;
6248 }
6249 #endif
6250 
6251 static int __net_init ip6_route_net_init(struct net *net)
6252 {
6253 	int ret = -ENOMEM;
6254 
6255 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6256 	       sizeof(net->ipv6.ip6_dst_ops));
6257 
6258 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6259 		goto out_ip6_dst_ops;
6260 
6261 	net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6262 	if (!net->ipv6.fib6_null_entry)
6263 		goto out_ip6_dst_entries;
6264 	memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6265 	       sizeof(*net->ipv6.fib6_null_entry));
6266 
6267 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6268 					   sizeof(*net->ipv6.ip6_null_entry),
6269 					   GFP_KERNEL);
6270 	if (!net->ipv6.ip6_null_entry)
6271 		goto out_fib6_null_entry;
6272 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6273 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6274 			 ip6_template_metrics, true);
6275 	INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6276 
6277 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6278 	net->ipv6.fib6_has_custom_rules = false;
6279 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6280 					       sizeof(*net->ipv6.ip6_prohibit_entry),
6281 					       GFP_KERNEL);
6282 	if (!net->ipv6.ip6_prohibit_entry)
6283 		goto out_ip6_null_entry;
6284 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6285 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6286 			 ip6_template_metrics, true);
6287 	INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6288 
6289 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6290 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
6291 					       GFP_KERNEL);
6292 	if (!net->ipv6.ip6_blk_hole_entry)
6293 		goto out_ip6_prohibit_entry;
6294 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6295 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6296 			 ip6_template_metrics, true);
6297 	INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6298 #ifdef CONFIG_IPV6_SUBTREES
6299 	net->ipv6.fib6_routes_require_src = 0;
6300 #endif
6301 #endif
6302 
6303 	net->ipv6.sysctl.flush_delay = 0;
6304 	net->ipv6.sysctl.ip6_rt_max_size = 4096;
6305 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6306 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6307 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6308 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6309 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6310 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6311 	net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6312 
6313 	net->ipv6.ip6_rt_gc_expire = 30*HZ;
6314 
6315 	ret = 0;
6316 out:
6317 	return ret;
6318 
6319 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6320 out_ip6_prohibit_entry:
6321 	kfree(net->ipv6.ip6_prohibit_entry);
6322 out_ip6_null_entry:
6323 	kfree(net->ipv6.ip6_null_entry);
6324 #endif
6325 out_fib6_null_entry:
6326 	kfree(net->ipv6.fib6_null_entry);
6327 out_ip6_dst_entries:
6328 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6329 out_ip6_dst_ops:
6330 	goto out;
6331 }
6332 
6333 static void __net_exit ip6_route_net_exit(struct net *net)
6334 {
6335 	kfree(net->ipv6.fib6_null_entry);
6336 	kfree(net->ipv6.ip6_null_entry);
6337 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6338 	kfree(net->ipv6.ip6_prohibit_entry);
6339 	kfree(net->ipv6.ip6_blk_hole_entry);
6340 #endif
6341 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6342 }
6343 
6344 static int __net_init ip6_route_net_init_late(struct net *net)
6345 {
6346 #ifdef CONFIG_PROC_FS
6347 	proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
6348 			sizeof(struct ipv6_route_iter));
6349 	proc_create_net_single("rt6_stats", 0444, net->proc_net,
6350 			rt6_stats_seq_show, NULL);
6351 #endif
6352 	return 0;
6353 }
6354 
6355 static void __net_exit ip6_route_net_exit_late(struct net *net)
6356 {
6357 #ifdef CONFIG_PROC_FS
6358 	remove_proc_entry("ipv6_route", net->proc_net);
6359 	remove_proc_entry("rt6_stats", net->proc_net);
6360 #endif
6361 }
6362 
6363 static struct pernet_operations ip6_route_net_ops = {
6364 	.init = ip6_route_net_init,
6365 	.exit = ip6_route_net_exit,
6366 };
6367 
6368 static int __net_init ipv6_inetpeer_init(struct net *net)
6369 {
6370 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6371 
6372 	if (!bp)
6373 		return -ENOMEM;
6374 	inet_peer_base_init(bp);
6375 	net->ipv6.peers = bp;
6376 	return 0;
6377 }
6378 
6379 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6380 {
6381 	struct inet_peer_base *bp = net->ipv6.peers;
6382 
6383 	net->ipv6.peers = NULL;
6384 	inetpeer_invalidate_tree(bp);
6385 	kfree(bp);
6386 }
6387 
6388 static struct pernet_operations ipv6_inetpeer_ops = {
6389 	.init	=	ipv6_inetpeer_init,
6390 	.exit	=	ipv6_inetpeer_exit,
6391 };
6392 
6393 static struct pernet_operations ip6_route_net_late_ops = {
6394 	.init = ip6_route_net_init_late,
6395 	.exit = ip6_route_net_exit_late,
6396 };
6397 
6398 static struct notifier_block ip6_route_dev_notifier = {
6399 	.notifier_call = ip6_route_dev_notify,
6400 	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6401 };
6402 
6403 void __init ip6_route_init_special_entries(void)
6404 {
6405 	/* Registering of the loopback is done before this portion of code,
6406 	 * the loopback reference in rt6_info will not be taken, do it
6407 	 * manually for init_net */
6408 	init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6409 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6410 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6411   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6412 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6413 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6414 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6415 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6416   #endif
6417 }
6418 
6419 #if IS_BUILTIN(CONFIG_IPV6)
6420 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6421 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6422 
6423 static const struct bpf_iter_reg ipv6_route_reg_info = {
6424 	.target			= "ipv6_route",
6425 	.seq_ops		= &ipv6_route_seq_ops,
6426 	.init_seq_private	= bpf_iter_init_seq_net,
6427 	.fini_seq_private	= bpf_iter_fini_seq_net,
6428 	.seq_priv_size		= sizeof(struct ipv6_route_iter),
6429 	.ctx_arg_info_size	= 1,
6430 	.ctx_arg_info		= {
6431 		{ offsetof(struct bpf_iter__ipv6_route, rt),
6432 		  PTR_TO_BTF_ID_OR_NULL },
6433 	},
6434 };
6435 
6436 static int __init bpf_iter_register(void)
6437 {
6438 	return bpf_iter_reg_target(&ipv6_route_reg_info);
6439 }
6440 
6441 static void bpf_iter_unregister(void)
6442 {
6443 	bpf_iter_unreg_target(&ipv6_route_reg_info);
6444 }
6445 #endif
6446 #endif
6447 
6448 int __init ip6_route_init(void)
6449 {
6450 	int ret;
6451 	int cpu;
6452 
6453 	ret = -ENOMEM;
6454 	ip6_dst_ops_template.kmem_cachep =
6455 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6456 				  SLAB_HWCACHE_ALIGN, NULL);
6457 	if (!ip6_dst_ops_template.kmem_cachep)
6458 		goto out;
6459 
6460 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
6461 	if (ret)
6462 		goto out_kmem_cache;
6463 
6464 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6465 	if (ret)
6466 		goto out_dst_entries;
6467 
6468 	ret = register_pernet_subsys(&ip6_route_net_ops);
6469 	if (ret)
6470 		goto out_register_inetpeer;
6471 
6472 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6473 
6474 	ret = fib6_init();
6475 	if (ret)
6476 		goto out_register_subsys;
6477 
6478 	ret = xfrm6_init();
6479 	if (ret)
6480 		goto out_fib6_init;
6481 
6482 	ret = fib6_rules_init();
6483 	if (ret)
6484 		goto xfrm6_init;
6485 
6486 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
6487 	if (ret)
6488 		goto fib6_rules_init;
6489 
6490 	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6491 				   inet6_rtm_newroute, NULL, 0);
6492 	if (ret < 0)
6493 		goto out_register_late_subsys;
6494 
6495 	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6496 				   inet6_rtm_delroute, NULL, 0);
6497 	if (ret < 0)
6498 		goto out_register_late_subsys;
6499 
6500 	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6501 				   inet6_rtm_getroute, NULL,
6502 				   RTNL_FLAG_DOIT_UNLOCKED);
6503 	if (ret < 0)
6504 		goto out_register_late_subsys;
6505 
6506 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6507 	if (ret)
6508 		goto out_register_late_subsys;
6509 
6510 #if IS_BUILTIN(CONFIG_IPV6)
6511 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6512 	ret = bpf_iter_register();
6513 	if (ret)
6514 		goto out_register_late_subsys;
6515 #endif
6516 #endif
6517 
6518 	for_each_possible_cpu(cpu) {
6519 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6520 
6521 		INIT_LIST_HEAD(&ul->head);
6522 		spin_lock_init(&ul->lock);
6523 	}
6524 
6525 out:
6526 	return ret;
6527 
6528 out_register_late_subsys:
6529 	rtnl_unregister_all(PF_INET6);
6530 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6531 fib6_rules_init:
6532 	fib6_rules_cleanup();
6533 xfrm6_init:
6534 	xfrm6_fini();
6535 out_fib6_init:
6536 	fib6_gc_cleanup();
6537 out_register_subsys:
6538 	unregister_pernet_subsys(&ip6_route_net_ops);
6539 out_register_inetpeer:
6540 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6541 out_dst_entries:
6542 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6543 out_kmem_cache:
6544 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6545 	goto out;
6546 }
6547 
6548 void ip6_route_cleanup(void)
6549 {
6550 #if IS_BUILTIN(CONFIG_IPV6)
6551 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6552 	bpf_iter_unregister();
6553 #endif
6554 #endif
6555 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
6556 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6557 	fib6_rules_cleanup();
6558 	xfrm6_fini();
6559 	fib6_gc_cleanup();
6560 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6561 	unregister_pernet_subsys(&ip6_route_net_ops);
6562 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6563 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6564 }
6565