xref: /openbmc/linux/net/ipv6/addrconf.c (revision 425b9c7f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 Address [auto]configuration
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
9  */
10 
11 /*
12  *	Changes:
13  *
14  *	Janos Farkas			:	delete timer on ifdown
15  *	<chexum@bankinf.banki.hu>
16  *	Andi Kleen			:	kill double kfree on module
17  *						unload.
18  *	Maciej W. Rozycki		:	FDDI support
19  *	sekiya@USAGI			:	Don't send too many RS
20  *						packets.
21  *	yoshfuji@USAGI			:       Fixed interval between DAD
22  *						packets.
23  *	YOSHIFUJI Hideaki @USAGI	:	improved accuracy of
24  *						address validation timer.
25  *	YOSHIFUJI Hideaki @USAGI	:	Privacy Extensions (RFC3041)
26  *						support.
27  *	Yuji SEKIYA @USAGI		:	Don't assign a same IPv6
28  *						address on a same interface.
29  *	YOSHIFUJI Hideaki @USAGI	:	ARCnet support
30  *	YOSHIFUJI Hideaki @USAGI	:	convert /proc/net/if_inet6 to
31  *						seq_file.
32  *	YOSHIFUJI Hideaki @USAGI	:	improved source address
33  *						selection; consider scope,
34  *						status etc.
35  */
36 
37 #define pr_fmt(fmt) "IPv6: " fmt
38 
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
57 #ifdef CONFIG_SYSCTL
58 #include <linux/sysctl.h>
59 #endif
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/sock.h>
68 #include <net/snmp.h>
69 
70 #include <net/6lowpan.h>
71 #include <net/firewire.h>
72 #include <net/ipv6.h>
73 #include <net/protocol.h>
74 #include <net/ndisc.h>
75 #include <net/ip6_route.h>
76 #include <net/addrconf.h>
77 #include <net/tcp.h>
78 #include <net/ip.h>
79 #include <net/netlink.h>
80 #include <net/pkt_sched.h>
81 #include <net/l3mdev.h>
82 #include <linux/if_tunnel.h>
83 #include <linux/rtnetlink.h>
84 #include <linux/netconf.h>
85 #include <linux/random.h>
86 #include <linux/uaccess.h>
87 #include <asm/unaligned.h>
88 
89 #include <linux/proc_fs.h>
90 #include <linux/seq_file.h>
91 #include <linux/export.h>
92 #include <linux/ioam6.h>
93 
94 #define	INFINITY_LIFE_TIME	0xFFFFFFFF
95 
96 #define IPV6_MAX_STRLEN \
97 	sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
98 
99 static inline u32 cstamp_delta(unsigned long cstamp)
100 {
101 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
102 }
103 
104 static inline s32 rfc3315_s14_backoff_init(s32 irt)
105 {
106 	/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
107 	u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
108 	do_div(tmp, 1000000);
109 	return (s32)tmp;
110 }
111 
112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
113 {
114 	/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
115 	u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
116 	do_div(tmp, 1000000);
117 	if ((s32)tmp > mrt) {
118 		/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
119 		tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
120 		do_div(tmp, 1000000);
121 	}
122 	return (s32)tmp;
123 }
124 
125 #ifdef CONFIG_SYSCTL
126 static int addrconf_sysctl_register(struct inet6_dev *idev);
127 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
128 #else
129 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
130 {
131 	return 0;
132 }
133 
134 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
135 {
136 }
137 #endif
138 
139 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
140 
141 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
142 static int ipv6_count_addresses(const struct inet6_dev *idev);
143 static int ipv6_generate_stable_address(struct in6_addr *addr,
144 					u8 dad_count,
145 					const struct inet6_dev *idev);
146 
147 #define IN6_ADDR_HSIZE_SHIFT	8
148 #define IN6_ADDR_HSIZE		(1 << IN6_ADDR_HSIZE_SHIFT)
149 
150 static void addrconf_verify(struct net *net);
151 static void addrconf_verify_rtnl(struct net *net);
152 
153 static struct workqueue_struct *addrconf_wq;
154 
155 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
156 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
157 
158 static void addrconf_type_change(struct net_device *dev,
159 				 unsigned long event);
160 static int addrconf_ifdown(struct net_device *dev, bool unregister);
161 
162 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
163 						  int plen,
164 						  const struct net_device *dev,
165 						  u32 flags, u32 noflags,
166 						  bool no_gw);
167 
168 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
169 static void addrconf_dad_work(struct work_struct *w);
170 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
171 				   bool send_na);
172 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
173 static void addrconf_rs_timer(struct timer_list *t);
174 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
175 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
176 
177 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
178 				struct prefix_info *pinfo);
179 
180 static struct ipv6_devconf ipv6_devconf __read_mostly = {
181 	.forwarding		= 0,
182 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
183 	.mtu6			= IPV6_MIN_MTU,
184 	.accept_ra		= 1,
185 	.accept_redirects	= 1,
186 	.autoconf		= 1,
187 	.force_mld_version	= 0,
188 	.mldv1_unsolicited_report_interval = 10 * HZ,
189 	.mldv2_unsolicited_report_interval = HZ,
190 	.dad_transmits		= 1,
191 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
192 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
193 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
194 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
195 	.use_tempaddr		= 0,
196 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
197 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
198 	.regen_max_retry	= REGEN_MAX_RETRY,
199 	.max_desync_factor	= MAX_DESYNC_FACTOR,
200 	.max_addresses		= IPV6_MAX_ADDRESSES,
201 	.accept_ra_defrtr	= 1,
202 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
203 	.accept_ra_from_local	= 0,
204 	.accept_ra_min_hop_limit= 1,
205 	.accept_ra_pinfo	= 1,
206 #ifdef CONFIG_IPV6_ROUTER_PREF
207 	.accept_ra_rtr_pref	= 1,
208 	.rtr_probe_interval	= 60 * HZ,
209 #ifdef CONFIG_IPV6_ROUTE_INFO
210 	.accept_ra_rt_info_min_plen = 0,
211 	.accept_ra_rt_info_max_plen = 0,
212 #endif
213 #endif
214 	.proxy_ndp		= 0,
215 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
216 	.disable_ipv6		= 0,
217 	.accept_dad		= 0,
218 	.suppress_frag_ndisc	= 1,
219 	.accept_ra_mtu		= 1,
220 	.stable_secret		= {
221 		.initialized = false,
222 	},
223 	.use_oif_addrs_only	= 0,
224 	.ignore_routes_with_linkdown = 0,
225 	.keep_addr_on_down	= 0,
226 	.seg6_enabled		= 0,
227 #ifdef CONFIG_IPV6_SEG6_HMAC
228 	.seg6_require_hmac	= 0,
229 #endif
230 	.enhanced_dad           = 1,
231 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
232 	.disable_policy		= 0,
233 	.rpl_seg_enabled	= 0,
234 	.ioam6_enabled		= 0,
235 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
236 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
237 	.ndisc_evict_nocarrier	= 1,
238 };
239 
240 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
241 	.forwarding		= 0,
242 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
243 	.mtu6			= IPV6_MIN_MTU,
244 	.accept_ra		= 1,
245 	.accept_redirects	= 1,
246 	.autoconf		= 1,
247 	.force_mld_version	= 0,
248 	.mldv1_unsolicited_report_interval = 10 * HZ,
249 	.mldv2_unsolicited_report_interval = HZ,
250 	.dad_transmits		= 1,
251 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
252 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
253 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
254 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
255 	.use_tempaddr		= 0,
256 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
257 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
258 	.regen_max_retry	= REGEN_MAX_RETRY,
259 	.max_desync_factor	= MAX_DESYNC_FACTOR,
260 	.max_addresses		= IPV6_MAX_ADDRESSES,
261 	.accept_ra_defrtr	= 1,
262 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
263 	.accept_ra_from_local	= 0,
264 	.accept_ra_min_hop_limit= 1,
265 	.accept_ra_pinfo	= 1,
266 #ifdef CONFIG_IPV6_ROUTER_PREF
267 	.accept_ra_rtr_pref	= 1,
268 	.rtr_probe_interval	= 60 * HZ,
269 #ifdef CONFIG_IPV6_ROUTE_INFO
270 	.accept_ra_rt_info_min_plen = 0,
271 	.accept_ra_rt_info_max_plen = 0,
272 #endif
273 #endif
274 	.proxy_ndp		= 0,
275 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
276 	.disable_ipv6		= 0,
277 	.accept_dad		= 1,
278 	.suppress_frag_ndisc	= 1,
279 	.accept_ra_mtu		= 1,
280 	.stable_secret		= {
281 		.initialized = false,
282 	},
283 	.use_oif_addrs_only	= 0,
284 	.ignore_routes_with_linkdown = 0,
285 	.keep_addr_on_down	= 0,
286 	.seg6_enabled		= 0,
287 #ifdef CONFIG_IPV6_SEG6_HMAC
288 	.seg6_require_hmac	= 0,
289 #endif
290 	.enhanced_dad           = 1,
291 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
292 	.disable_policy		= 0,
293 	.rpl_seg_enabled	= 0,
294 	.ioam6_enabled		= 0,
295 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
296 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
297 	.ndisc_evict_nocarrier	= 1,
298 };
299 
300 /* Check if link is ready: is it up and is a valid qdisc available */
301 static inline bool addrconf_link_ready(const struct net_device *dev)
302 {
303 	return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
304 }
305 
306 static void addrconf_del_rs_timer(struct inet6_dev *idev)
307 {
308 	if (del_timer(&idev->rs_timer))
309 		__in6_dev_put(idev);
310 }
311 
312 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
313 {
314 	if (cancel_delayed_work(&ifp->dad_work))
315 		__in6_ifa_put(ifp);
316 }
317 
318 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
319 				  unsigned long when)
320 {
321 	if (!timer_pending(&idev->rs_timer))
322 		in6_dev_hold(idev);
323 	mod_timer(&idev->rs_timer, jiffies + when);
324 }
325 
326 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
327 				   unsigned long delay)
328 {
329 	in6_ifa_hold(ifp);
330 	if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
331 		in6_ifa_put(ifp);
332 }
333 
334 static int snmp6_alloc_dev(struct inet6_dev *idev)
335 {
336 	int i;
337 
338 	idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
339 	if (!idev->stats.ipv6)
340 		goto err_ip;
341 
342 	for_each_possible_cpu(i) {
343 		struct ipstats_mib *addrconf_stats;
344 		addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
345 		u64_stats_init(&addrconf_stats->syncp);
346 	}
347 
348 
349 	idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
350 					GFP_KERNEL);
351 	if (!idev->stats.icmpv6dev)
352 		goto err_icmp;
353 	idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
354 					   GFP_KERNEL_ACCOUNT);
355 	if (!idev->stats.icmpv6msgdev)
356 		goto err_icmpmsg;
357 
358 	return 0;
359 
360 err_icmpmsg:
361 	kfree(idev->stats.icmpv6dev);
362 err_icmp:
363 	free_percpu(idev->stats.ipv6);
364 err_ip:
365 	return -ENOMEM;
366 }
367 
368 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
369 {
370 	struct inet6_dev *ndev;
371 	int err = -ENOMEM;
372 
373 	ASSERT_RTNL();
374 
375 	if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
376 		return ERR_PTR(-EINVAL);
377 
378 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT);
379 	if (!ndev)
380 		return ERR_PTR(err);
381 
382 	rwlock_init(&ndev->lock);
383 	ndev->dev = dev;
384 	INIT_LIST_HEAD(&ndev->addr_list);
385 	timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
386 	memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
387 
388 	if (ndev->cnf.stable_secret.initialized)
389 		ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
390 
391 	ndev->cnf.mtu6 = dev->mtu;
392 	ndev->ra_mtu = 0;
393 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
394 	if (!ndev->nd_parms) {
395 		kfree(ndev);
396 		return ERR_PTR(err);
397 	}
398 	if (ndev->cnf.forwarding)
399 		dev_disable_lro(dev);
400 	/* We refer to the device */
401 	dev_hold_track(dev, &ndev->dev_tracker, GFP_KERNEL);
402 
403 	if (snmp6_alloc_dev(ndev) < 0) {
404 		netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
405 			   __func__);
406 		neigh_parms_release(&nd_tbl, ndev->nd_parms);
407 		dev_put_track(dev, &ndev->dev_tracker);
408 		kfree(ndev);
409 		return ERR_PTR(err);
410 	}
411 
412 	if (dev != blackhole_netdev) {
413 		if (snmp6_register_dev(ndev) < 0) {
414 			netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
415 				   __func__, dev->name);
416 			goto err_release;
417 		}
418 	}
419 	/* One reference from device. */
420 	refcount_set(&ndev->refcnt, 1);
421 
422 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
423 		ndev->cnf.accept_dad = -1;
424 
425 #if IS_ENABLED(CONFIG_IPV6_SIT)
426 	if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
427 		pr_info("%s: Disabled Multicast RS\n", dev->name);
428 		ndev->cnf.rtr_solicits = 0;
429 	}
430 #endif
431 
432 	INIT_LIST_HEAD(&ndev->tempaddr_list);
433 	ndev->desync_factor = U32_MAX;
434 	if ((dev->flags&IFF_LOOPBACK) ||
435 	    dev->type == ARPHRD_TUNNEL ||
436 	    dev->type == ARPHRD_TUNNEL6 ||
437 	    dev->type == ARPHRD_SIT ||
438 	    dev->type == ARPHRD_NONE) {
439 		ndev->cnf.use_tempaddr = -1;
440 	}
441 
442 	ndev->token = in6addr_any;
443 
444 	if (netif_running(dev) && addrconf_link_ready(dev))
445 		ndev->if_flags |= IF_READY;
446 
447 	ipv6_mc_init_dev(ndev);
448 	ndev->tstamp = jiffies;
449 	if (dev != blackhole_netdev) {
450 		err = addrconf_sysctl_register(ndev);
451 		if (err) {
452 			ipv6_mc_destroy_dev(ndev);
453 			snmp6_unregister_dev(ndev);
454 			goto err_release;
455 		}
456 	}
457 	/* protected by rtnl_lock */
458 	rcu_assign_pointer(dev->ip6_ptr, ndev);
459 
460 	if (dev != blackhole_netdev) {
461 		/* Join interface-local all-node multicast group */
462 		ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
463 
464 		/* Join all-node multicast group */
465 		ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
466 
467 		/* Join all-router multicast group if forwarding is set */
468 		if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
469 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
470 	}
471 	return ndev;
472 
473 err_release:
474 	neigh_parms_release(&nd_tbl, ndev->nd_parms);
475 	ndev->dead = 1;
476 	in6_dev_finish_destroy(ndev);
477 	return ERR_PTR(err);
478 }
479 
480 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
481 {
482 	struct inet6_dev *idev;
483 
484 	ASSERT_RTNL();
485 
486 	idev = __in6_dev_get(dev);
487 	if (!idev) {
488 		idev = ipv6_add_dev(dev);
489 		if (IS_ERR(idev))
490 			return idev;
491 	}
492 
493 	if (dev->flags&IFF_UP)
494 		ipv6_mc_up(idev);
495 	return idev;
496 }
497 
498 static int inet6_netconf_msgsize_devconf(int type)
499 {
500 	int size =  NLMSG_ALIGN(sizeof(struct netconfmsg))
501 		    + nla_total_size(4);	/* NETCONFA_IFINDEX */
502 	bool all = false;
503 
504 	if (type == NETCONFA_ALL)
505 		all = true;
506 
507 	if (all || type == NETCONFA_FORWARDING)
508 		size += nla_total_size(4);
509 #ifdef CONFIG_IPV6_MROUTE
510 	if (all || type == NETCONFA_MC_FORWARDING)
511 		size += nla_total_size(4);
512 #endif
513 	if (all || type == NETCONFA_PROXY_NEIGH)
514 		size += nla_total_size(4);
515 
516 	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
517 		size += nla_total_size(4);
518 
519 	return size;
520 }
521 
522 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
523 				      struct ipv6_devconf *devconf, u32 portid,
524 				      u32 seq, int event, unsigned int flags,
525 				      int type)
526 {
527 	struct nlmsghdr  *nlh;
528 	struct netconfmsg *ncm;
529 	bool all = false;
530 
531 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
532 			flags);
533 	if (!nlh)
534 		return -EMSGSIZE;
535 
536 	if (type == NETCONFA_ALL)
537 		all = true;
538 
539 	ncm = nlmsg_data(nlh);
540 	ncm->ncm_family = AF_INET6;
541 
542 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
543 		goto nla_put_failure;
544 
545 	if (!devconf)
546 		goto out;
547 
548 	if ((all || type == NETCONFA_FORWARDING) &&
549 	    nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
550 		goto nla_put_failure;
551 #ifdef CONFIG_IPV6_MROUTE
552 	if ((all || type == NETCONFA_MC_FORWARDING) &&
553 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
554 			atomic_read(&devconf->mc_forwarding)) < 0)
555 		goto nla_put_failure;
556 #endif
557 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
558 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
559 		goto nla_put_failure;
560 
561 	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
562 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
563 			devconf->ignore_routes_with_linkdown) < 0)
564 		goto nla_put_failure;
565 
566 out:
567 	nlmsg_end(skb, nlh);
568 	return 0;
569 
570 nla_put_failure:
571 	nlmsg_cancel(skb, nlh);
572 	return -EMSGSIZE;
573 }
574 
575 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
576 				  int ifindex, struct ipv6_devconf *devconf)
577 {
578 	struct sk_buff *skb;
579 	int err = -ENOBUFS;
580 
581 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
582 	if (!skb)
583 		goto errout;
584 
585 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
586 					 event, 0, type);
587 	if (err < 0) {
588 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
589 		WARN_ON(err == -EMSGSIZE);
590 		kfree_skb(skb);
591 		goto errout;
592 	}
593 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
594 	return;
595 errout:
596 	rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
597 }
598 
599 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
600 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
601 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
602 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
603 	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
604 };
605 
606 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
607 				       const struct nlmsghdr *nlh,
608 				       struct nlattr **tb,
609 				       struct netlink_ext_ack *extack)
610 {
611 	int i, err;
612 
613 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
614 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
615 		return -EINVAL;
616 	}
617 
618 	if (!netlink_strict_get_check(skb))
619 		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
620 					      tb, NETCONFA_MAX,
621 					      devconf_ipv6_policy, extack);
622 
623 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
624 					    tb, NETCONFA_MAX,
625 					    devconf_ipv6_policy, extack);
626 	if (err)
627 		return err;
628 
629 	for (i = 0; i <= NETCONFA_MAX; i++) {
630 		if (!tb[i])
631 			continue;
632 
633 		switch (i) {
634 		case NETCONFA_IFINDEX:
635 			break;
636 		default:
637 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
638 			return -EINVAL;
639 		}
640 	}
641 
642 	return 0;
643 }
644 
645 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
646 				     struct nlmsghdr *nlh,
647 				     struct netlink_ext_ack *extack)
648 {
649 	struct net *net = sock_net(in_skb->sk);
650 	struct nlattr *tb[NETCONFA_MAX+1];
651 	struct inet6_dev *in6_dev = NULL;
652 	struct net_device *dev = NULL;
653 	struct sk_buff *skb;
654 	struct ipv6_devconf *devconf;
655 	int ifindex;
656 	int err;
657 
658 	err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
659 	if (err < 0)
660 		return err;
661 
662 	if (!tb[NETCONFA_IFINDEX])
663 		return -EINVAL;
664 
665 	err = -EINVAL;
666 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
667 	switch (ifindex) {
668 	case NETCONFA_IFINDEX_ALL:
669 		devconf = net->ipv6.devconf_all;
670 		break;
671 	case NETCONFA_IFINDEX_DEFAULT:
672 		devconf = net->ipv6.devconf_dflt;
673 		break;
674 	default:
675 		dev = dev_get_by_index(net, ifindex);
676 		if (!dev)
677 			return -EINVAL;
678 		in6_dev = in6_dev_get(dev);
679 		if (!in6_dev)
680 			goto errout;
681 		devconf = &in6_dev->cnf;
682 		break;
683 	}
684 
685 	err = -ENOBUFS;
686 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
687 	if (!skb)
688 		goto errout;
689 
690 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
691 					 NETLINK_CB(in_skb).portid,
692 					 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
693 					 NETCONFA_ALL);
694 	if (err < 0) {
695 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
696 		WARN_ON(err == -EMSGSIZE);
697 		kfree_skb(skb);
698 		goto errout;
699 	}
700 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
701 errout:
702 	if (in6_dev)
703 		in6_dev_put(in6_dev);
704 	dev_put(dev);
705 	return err;
706 }
707 
708 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
709 				      struct netlink_callback *cb)
710 {
711 	const struct nlmsghdr *nlh = cb->nlh;
712 	struct net *net = sock_net(skb->sk);
713 	int h, s_h;
714 	int idx, s_idx;
715 	struct net_device *dev;
716 	struct inet6_dev *idev;
717 	struct hlist_head *head;
718 
719 	if (cb->strict_check) {
720 		struct netlink_ext_ack *extack = cb->extack;
721 		struct netconfmsg *ncm;
722 
723 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
724 			NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
725 			return -EINVAL;
726 		}
727 
728 		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
729 			NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
730 			return -EINVAL;
731 		}
732 	}
733 
734 	s_h = cb->args[0];
735 	s_idx = idx = cb->args[1];
736 
737 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
738 		idx = 0;
739 		head = &net->dev_index_head[h];
740 		rcu_read_lock();
741 		cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
742 			  net->dev_base_seq;
743 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
744 			if (idx < s_idx)
745 				goto cont;
746 			idev = __in6_dev_get(dev);
747 			if (!idev)
748 				goto cont;
749 
750 			if (inet6_netconf_fill_devconf(skb, dev->ifindex,
751 						       &idev->cnf,
752 						       NETLINK_CB(cb->skb).portid,
753 						       nlh->nlmsg_seq,
754 						       RTM_NEWNETCONF,
755 						       NLM_F_MULTI,
756 						       NETCONFA_ALL) < 0) {
757 				rcu_read_unlock();
758 				goto done;
759 			}
760 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
761 cont:
762 			idx++;
763 		}
764 		rcu_read_unlock();
765 	}
766 	if (h == NETDEV_HASHENTRIES) {
767 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
768 					       net->ipv6.devconf_all,
769 					       NETLINK_CB(cb->skb).portid,
770 					       nlh->nlmsg_seq,
771 					       RTM_NEWNETCONF, NLM_F_MULTI,
772 					       NETCONFA_ALL) < 0)
773 			goto done;
774 		else
775 			h++;
776 	}
777 	if (h == NETDEV_HASHENTRIES + 1) {
778 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
779 					       net->ipv6.devconf_dflt,
780 					       NETLINK_CB(cb->skb).portid,
781 					       nlh->nlmsg_seq,
782 					       RTM_NEWNETCONF, NLM_F_MULTI,
783 					       NETCONFA_ALL) < 0)
784 			goto done;
785 		else
786 			h++;
787 	}
788 done:
789 	cb->args[0] = h;
790 	cb->args[1] = idx;
791 
792 	return skb->len;
793 }
794 
795 #ifdef CONFIG_SYSCTL
796 static void dev_forward_change(struct inet6_dev *idev)
797 {
798 	struct net_device *dev;
799 	struct inet6_ifaddr *ifa;
800 	LIST_HEAD(tmp_addr_list);
801 
802 	if (!idev)
803 		return;
804 	dev = idev->dev;
805 	if (idev->cnf.forwarding)
806 		dev_disable_lro(dev);
807 	if (dev->flags & IFF_MULTICAST) {
808 		if (idev->cnf.forwarding) {
809 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
810 			ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
811 			ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
812 		} else {
813 			ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
814 			ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
815 			ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
816 		}
817 	}
818 
819 	read_lock_bh(&idev->lock);
820 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
821 		if (ifa->flags&IFA_F_TENTATIVE)
822 			continue;
823 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
824 	}
825 	read_unlock_bh(&idev->lock);
826 
827 	while (!list_empty(&tmp_addr_list)) {
828 		ifa = list_first_entry(&tmp_addr_list,
829 				       struct inet6_ifaddr, if_list_aux);
830 		list_del(&ifa->if_list_aux);
831 		if (idev->cnf.forwarding)
832 			addrconf_join_anycast(ifa);
833 		else
834 			addrconf_leave_anycast(ifa);
835 	}
836 
837 	inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
838 				     NETCONFA_FORWARDING,
839 				     dev->ifindex, &idev->cnf);
840 }
841 
842 
843 static void addrconf_forward_change(struct net *net, __s32 newf)
844 {
845 	struct net_device *dev;
846 	struct inet6_dev *idev;
847 
848 	for_each_netdev(net, dev) {
849 		idev = __in6_dev_get(dev);
850 		if (idev) {
851 			int changed = (!idev->cnf.forwarding) ^ (!newf);
852 			idev->cnf.forwarding = newf;
853 			if (changed)
854 				dev_forward_change(idev);
855 		}
856 	}
857 }
858 
859 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
860 {
861 	struct net *net;
862 	int old;
863 
864 	if (!rtnl_trylock())
865 		return restart_syscall();
866 
867 	net = (struct net *)table->extra2;
868 	old = *p;
869 	*p = newf;
870 
871 	if (p == &net->ipv6.devconf_dflt->forwarding) {
872 		if ((!newf) ^ (!old))
873 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
874 						     NETCONFA_FORWARDING,
875 						     NETCONFA_IFINDEX_DEFAULT,
876 						     net->ipv6.devconf_dflt);
877 		rtnl_unlock();
878 		return 0;
879 	}
880 
881 	if (p == &net->ipv6.devconf_all->forwarding) {
882 		int old_dflt = net->ipv6.devconf_dflt->forwarding;
883 
884 		net->ipv6.devconf_dflt->forwarding = newf;
885 		if ((!newf) ^ (!old_dflt))
886 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
887 						     NETCONFA_FORWARDING,
888 						     NETCONFA_IFINDEX_DEFAULT,
889 						     net->ipv6.devconf_dflt);
890 
891 		addrconf_forward_change(net, newf);
892 		if ((!newf) ^ (!old))
893 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
894 						     NETCONFA_FORWARDING,
895 						     NETCONFA_IFINDEX_ALL,
896 						     net->ipv6.devconf_all);
897 	} else if ((!newf) ^ (!old))
898 		dev_forward_change((struct inet6_dev *)table->extra1);
899 	rtnl_unlock();
900 
901 	if (newf)
902 		rt6_purge_dflt_routers(net);
903 	return 1;
904 }
905 
906 static void addrconf_linkdown_change(struct net *net, __s32 newf)
907 {
908 	struct net_device *dev;
909 	struct inet6_dev *idev;
910 
911 	for_each_netdev(net, dev) {
912 		idev = __in6_dev_get(dev);
913 		if (idev) {
914 			int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
915 
916 			idev->cnf.ignore_routes_with_linkdown = newf;
917 			if (changed)
918 				inet6_netconf_notify_devconf(dev_net(dev),
919 							     RTM_NEWNETCONF,
920 							     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
921 							     dev->ifindex,
922 							     &idev->cnf);
923 		}
924 	}
925 }
926 
927 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
928 {
929 	struct net *net;
930 	int old;
931 
932 	if (!rtnl_trylock())
933 		return restart_syscall();
934 
935 	net = (struct net *)table->extra2;
936 	old = *p;
937 	*p = newf;
938 
939 	if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
940 		if ((!newf) ^ (!old))
941 			inet6_netconf_notify_devconf(net,
942 						     RTM_NEWNETCONF,
943 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
944 						     NETCONFA_IFINDEX_DEFAULT,
945 						     net->ipv6.devconf_dflt);
946 		rtnl_unlock();
947 		return 0;
948 	}
949 
950 	if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
951 		net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
952 		addrconf_linkdown_change(net, newf);
953 		if ((!newf) ^ (!old))
954 			inet6_netconf_notify_devconf(net,
955 						     RTM_NEWNETCONF,
956 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
957 						     NETCONFA_IFINDEX_ALL,
958 						     net->ipv6.devconf_all);
959 	}
960 	rtnl_unlock();
961 
962 	return 1;
963 }
964 
965 #endif
966 
967 /* Nobody refers to this ifaddr, destroy it */
968 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
969 {
970 	WARN_ON(!hlist_unhashed(&ifp->addr_lst));
971 
972 #ifdef NET_REFCNT_DEBUG
973 	pr_debug("%s\n", __func__);
974 #endif
975 
976 	in6_dev_put(ifp->idev);
977 
978 	if (cancel_delayed_work(&ifp->dad_work))
979 		pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
980 			  ifp);
981 
982 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
983 		pr_warn("Freeing alive inet6 address %p\n", ifp);
984 		return;
985 	}
986 
987 	kfree_rcu(ifp, rcu);
988 }
989 
990 static void
991 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
992 {
993 	struct list_head *p;
994 	int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
995 
996 	/*
997 	 * Each device address list is sorted in order of scope -
998 	 * global before linklocal.
999 	 */
1000 	list_for_each(p, &idev->addr_list) {
1001 		struct inet6_ifaddr *ifa
1002 			= list_entry(p, struct inet6_ifaddr, if_list);
1003 		if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
1004 			break;
1005 	}
1006 
1007 	list_add_tail_rcu(&ifp->if_list, p);
1008 }
1009 
1010 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1011 {
1012 	u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
1013 
1014 	return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1015 }
1016 
1017 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1018 			       struct net_device *dev, unsigned int hash)
1019 {
1020 	struct inet6_ifaddr *ifp;
1021 
1022 	hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1023 		if (ipv6_addr_equal(&ifp->addr, addr)) {
1024 			if (!dev || ifp->idev->dev == dev)
1025 				return true;
1026 		}
1027 	}
1028 	return false;
1029 }
1030 
1031 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1032 {
1033 	struct net *net = dev_net(dev);
1034 	unsigned int hash = inet6_addr_hash(net, &ifa->addr);
1035 	int err = 0;
1036 
1037 	spin_lock(&net->ipv6.addrconf_hash_lock);
1038 
1039 	/* Ignore adding duplicate addresses on an interface */
1040 	if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
1041 		netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1042 		err = -EEXIST;
1043 	} else {
1044 		hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
1045 	}
1046 
1047 	spin_unlock(&net->ipv6.addrconf_hash_lock);
1048 
1049 	return err;
1050 }
1051 
1052 /* On success it returns ifp with increased reference count */
1053 
1054 static struct inet6_ifaddr *
1055 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1056 	      bool can_block, struct netlink_ext_ack *extack)
1057 {
1058 	gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1059 	int addr_type = ipv6_addr_type(cfg->pfx);
1060 	struct net *net = dev_net(idev->dev);
1061 	struct inet6_ifaddr *ifa = NULL;
1062 	struct fib6_info *f6i = NULL;
1063 	int err = 0;
1064 
1065 	if (addr_type == IPV6_ADDR_ANY ||
1066 	    (addr_type & IPV6_ADDR_MULTICAST &&
1067 	     !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1068 	    (!(idev->dev->flags & IFF_LOOPBACK) &&
1069 	     !netif_is_l3_master(idev->dev) &&
1070 	     addr_type & IPV6_ADDR_LOOPBACK))
1071 		return ERR_PTR(-EADDRNOTAVAIL);
1072 
1073 	if (idev->dead) {
1074 		err = -ENODEV;			/*XXX*/
1075 		goto out;
1076 	}
1077 
1078 	if (idev->cnf.disable_ipv6) {
1079 		err = -EACCES;
1080 		goto out;
1081 	}
1082 
1083 	/* validator notifier needs to be blocking;
1084 	 * do not call in atomic context
1085 	 */
1086 	if (can_block) {
1087 		struct in6_validator_info i6vi = {
1088 			.i6vi_addr = *cfg->pfx,
1089 			.i6vi_dev = idev,
1090 			.extack = extack,
1091 		};
1092 
1093 		err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1094 		err = notifier_to_errno(err);
1095 		if (err < 0)
1096 			goto out;
1097 	}
1098 
1099 	ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT);
1100 	if (!ifa) {
1101 		err = -ENOBUFS;
1102 		goto out;
1103 	}
1104 
1105 	f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1106 	if (IS_ERR(f6i)) {
1107 		err = PTR_ERR(f6i);
1108 		f6i = NULL;
1109 		goto out;
1110 	}
1111 
1112 	if (net->ipv6.devconf_all->disable_policy ||
1113 	    idev->cnf.disable_policy)
1114 		f6i->dst_nopolicy = true;
1115 
1116 	neigh_parms_data_state_setall(idev->nd_parms);
1117 
1118 	ifa->addr = *cfg->pfx;
1119 	if (cfg->peer_pfx)
1120 		ifa->peer_addr = *cfg->peer_pfx;
1121 
1122 	spin_lock_init(&ifa->lock);
1123 	INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1124 	INIT_HLIST_NODE(&ifa->addr_lst);
1125 	ifa->scope = cfg->scope;
1126 	ifa->prefix_len = cfg->plen;
1127 	ifa->rt_priority = cfg->rt_priority;
1128 	ifa->flags = cfg->ifa_flags;
1129 	ifa->ifa_proto = cfg->ifa_proto;
1130 	/* No need to add the TENTATIVE flag for addresses with NODAD */
1131 	if (!(cfg->ifa_flags & IFA_F_NODAD))
1132 		ifa->flags |= IFA_F_TENTATIVE;
1133 	ifa->valid_lft = cfg->valid_lft;
1134 	ifa->prefered_lft = cfg->preferred_lft;
1135 	ifa->cstamp = ifa->tstamp = jiffies;
1136 	ifa->tokenized = false;
1137 
1138 	ifa->rt = f6i;
1139 
1140 	ifa->idev = idev;
1141 	in6_dev_hold(idev);
1142 
1143 	/* For caller */
1144 	refcount_set(&ifa->refcnt, 1);
1145 
1146 	rcu_read_lock_bh();
1147 
1148 	err = ipv6_add_addr_hash(idev->dev, ifa);
1149 	if (err < 0) {
1150 		rcu_read_unlock_bh();
1151 		goto out;
1152 	}
1153 
1154 	write_lock(&idev->lock);
1155 
1156 	/* Add to inet6_dev unicast addr list. */
1157 	ipv6_link_dev_addr(idev, ifa);
1158 
1159 	if (ifa->flags&IFA_F_TEMPORARY) {
1160 		list_add(&ifa->tmp_list, &idev->tempaddr_list);
1161 		in6_ifa_hold(ifa);
1162 	}
1163 
1164 	in6_ifa_hold(ifa);
1165 	write_unlock(&idev->lock);
1166 
1167 	rcu_read_unlock_bh();
1168 
1169 	inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1170 out:
1171 	if (unlikely(err < 0)) {
1172 		fib6_info_release(f6i);
1173 
1174 		if (ifa) {
1175 			if (ifa->idev)
1176 				in6_dev_put(ifa->idev);
1177 			kfree(ifa);
1178 		}
1179 		ifa = ERR_PTR(err);
1180 	}
1181 
1182 	return ifa;
1183 }
1184 
1185 enum cleanup_prefix_rt_t {
1186 	CLEANUP_PREFIX_RT_NOP,    /* no cleanup action for prefix route */
1187 	CLEANUP_PREFIX_RT_DEL,    /* delete the prefix route */
1188 	CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1189 };
1190 
1191 /*
1192  * Check, whether the prefix for ifp would still need a prefix route
1193  * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1194  * constants.
1195  *
1196  * 1) we don't purge prefix if address was not permanent.
1197  *    prefix is managed by its own lifetime.
1198  * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1199  * 3) if there are no addresses, delete prefix.
1200  * 4) if there are still other permanent address(es),
1201  *    corresponding prefix is still permanent.
1202  * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1203  *    don't purge the prefix, assume user space is managing it.
1204  * 6) otherwise, update prefix lifetime to the
1205  *    longest valid lifetime among the corresponding
1206  *    addresses on the device.
1207  *    Note: subsequent RA will update lifetime.
1208  **/
1209 static enum cleanup_prefix_rt_t
1210 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1211 {
1212 	struct inet6_ifaddr *ifa;
1213 	struct inet6_dev *idev = ifp->idev;
1214 	unsigned long lifetime;
1215 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1216 
1217 	*expires = jiffies;
1218 
1219 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
1220 		if (ifa == ifp)
1221 			continue;
1222 		if (ifa->prefix_len != ifp->prefix_len ||
1223 		    !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1224 				       ifp->prefix_len))
1225 			continue;
1226 		if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1227 			return CLEANUP_PREFIX_RT_NOP;
1228 
1229 		action = CLEANUP_PREFIX_RT_EXPIRE;
1230 
1231 		spin_lock(&ifa->lock);
1232 
1233 		lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1234 		/*
1235 		 * Note: Because this address is
1236 		 * not permanent, lifetime <
1237 		 * LONG_MAX / HZ here.
1238 		 */
1239 		if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1240 			*expires = ifa->tstamp + lifetime * HZ;
1241 		spin_unlock(&ifa->lock);
1242 	}
1243 
1244 	return action;
1245 }
1246 
1247 static void
1248 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1249 		     bool del_rt, bool del_peer)
1250 {
1251 	struct fib6_info *f6i;
1252 
1253 	f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1254 					ifp->prefix_len,
1255 					ifp->idev->dev, 0, RTF_DEFAULT, true);
1256 	if (f6i) {
1257 		if (del_rt)
1258 			ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1259 		else {
1260 			if (!(f6i->fib6_flags & RTF_EXPIRES))
1261 				fib6_set_expires(f6i, expires);
1262 			fib6_info_release(f6i);
1263 		}
1264 	}
1265 }
1266 
1267 
1268 /* This function wants to get referenced ifp and releases it before return */
1269 
1270 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1271 {
1272 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1273 	struct net *net = dev_net(ifp->idev->dev);
1274 	unsigned long expires;
1275 	int state;
1276 
1277 	ASSERT_RTNL();
1278 
1279 	spin_lock_bh(&ifp->lock);
1280 	state = ifp->state;
1281 	ifp->state = INET6_IFADDR_STATE_DEAD;
1282 	spin_unlock_bh(&ifp->lock);
1283 
1284 	if (state == INET6_IFADDR_STATE_DEAD)
1285 		goto out;
1286 
1287 	spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1288 	hlist_del_init_rcu(&ifp->addr_lst);
1289 	spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1290 
1291 	write_lock_bh(&ifp->idev->lock);
1292 
1293 	if (ifp->flags&IFA_F_TEMPORARY) {
1294 		list_del(&ifp->tmp_list);
1295 		if (ifp->ifpub) {
1296 			in6_ifa_put(ifp->ifpub);
1297 			ifp->ifpub = NULL;
1298 		}
1299 		__in6_ifa_put(ifp);
1300 	}
1301 
1302 	if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1303 		action = check_cleanup_prefix_route(ifp, &expires);
1304 
1305 	list_del_rcu(&ifp->if_list);
1306 	__in6_ifa_put(ifp);
1307 
1308 	write_unlock_bh(&ifp->idev->lock);
1309 
1310 	addrconf_del_dad_work(ifp);
1311 
1312 	ipv6_ifa_notify(RTM_DELADDR, ifp);
1313 
1314 	inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1315 
1316 	if (action != CLEANUP_PREFIX_RT_NOP) {
1317 		cleanup_prefix_route(ifp, expires,
1318 			action == CLEANUP_PREFIX_RT_DEL, false);
1319 	}
1320 
1321 	/* clean up prefsrc entries */
1322 	rt6_remove_prefsrc(ifp);
1323 out:
1324 	in6_ifa_put(ifp);
1325 }
1326 
1327 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1328 {
1329 	struct inet6_dev *idev = ifp->idev;
1330 	unsigned long tmp_tstamp, age;
1331 	unsigned long regen_advance;
1332 	unsigned long now = jiffies;
1333 	s32 cnf_temp_preferred_lft;
1334 	struct inet6_ifaddr *ift;
1335 	struct ifa6_config cfg;
1336 	long max_desync_factor;
1337 	struct in6_addr addr;
1338 	int ret = 0;
1339 
1340 	write_lock_bh(&idev->lock);
1341 
1342 retry:
1343 	in6_dev_hold(idev);
1344 	if (idev->cnf.use_tempaddr <= 0) {
1345 		write_unlock_bh(&idev->lock);
1346 		pr_info("%s: use_tempaddr is disabled\n", __func__);
1347 		in6_dev_put(idev);
1348 		ret = -1;
1349 		goto out;
1350 	}
1351 	spin_lock_bh(&ifp->lock);
1352 	if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1353 		idev->cnf.use_tempaddr = -1;	/*XXX*/
1354 		spin_unlock_bh(&ifp->lock);
1355 		write_unlock_bh(&idev->lock);
1356 		pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1357 			__func__);
1358 		in6_dev_put(idev);
1359 		ret = -1;
1360 		goto out;
1361 	}
1362 	in6_ifa_hold(ifp);
1363 	memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1364 	ipv6_gen_rnd_iid(&addr);
1365 
1366 	age = (now - ifp->tstamp) / HZ;
1367 
1368 	regen_advance = idev->cnf.regen_max_retry *
1369 			idev->cnf.dad_transmits *
1370 			max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1371 
1372 	/* recalculate max_desync_factor each time and update
1373 	 * idev->desync_factor if it's larger
1374 	 */
1375 	cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1376 	max_desync_factor = min_t(__u32,
1377 				  idev->cnf.max_desync_factor,
1378 				  cnf_temp_preferred_lft - regen_advance);
1379 
1380 	if (unlikely(idev->desync_factor > max_desync_factor)) {
1381 		if (max_desync_factor > 0) {
1382 			get_random_bytes(&idev->desync_factor,
1383 					 sizeof(idev->desync_factor));
1384 			idev->desync_factor %= max_desync_factor;
1385 		} else {
1386 			idev->desync_factor = 0;
1387 		}
1388 	}
1389 
1390 	memset(&cfg, 0, sizeof(cfg));
1391 	cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1392 			      idev->cnf.temp_valid_lft + age);
1393 	cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1394 	cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1395 
1396 	cfg.plen = ifp->prefix_len;
1397 	tmp_tstamp = ifp->tstamp;
1398 	spin_unlock_bh(&ifp->lock);
1399 
1400 	write_unlock_bh(&idev->lock);
1401 
1402 	/* A temporary address is created only if this calculated Preferred
1403 	 * Lifetime is greater than REGEN_ADVANCE time units.  In particular,
1404 	 * an implementation must not create a temporary address with a zero
1405 	 * Preferred Lifetime.
1406 	 * Use age calculation as in addrconf_verify to avoid unnecessary
1407 	 * temporary addresses being generated.
1408 	 */
1409 	age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1410 	if (cfg.preferred_lft <= regen_advance + age) {
1411 		in6_ifa_put(ifp);
1412 		in6_dev_put(idev);
1413 		ret = -1;
1414 		goto out;
1415 	}
1416 
1417 	cfg.ifa_flags = IFA_F_TEMPORARY;
1418 	/* set in addrconf_prefix_rcv() */
1419 	if (ifp->flags & IFA_F_OPTIMISTIC)
1420 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1421 
1422 	cfg.pfx = &addr;
1423 	cfg.scope = ipv6_addr_scope(cfg.pfx);
1424 
1425 	ift = ipv6_add_addr(idev, &cfg, block, NULL);
1426 	if (IS_ERR(ift)) {
1427 		in6_ifa_put(ifp);
1428 		in6_dev_put(idev);
1429 		pr_info("%s: retry temporary address regeneration\n", __func__);
1430 		write_lock_bh(&idev->lock);
1431 		goto retry;
1432 	}
1433 
1434 	spin_lock_bh(&ift->lock);
1435 	ift->ifpub = ifp;
1436 	ift->cstamp = now;
1437 	ift->tstamp = tmp_tstamp;
1438 	spin_unlock_bh(&ift->lock);
1439 
1440 	addrconf_dad_start(ift);
1441 	in6_ifa_put(ift);
1442 	in6_dev_put(idev);
1443 out:
1444 	return ret;
1445 }
1446 
1447 /*
1448  *	Choose an appropriate source address (RFC3484)
1449  */
1450 enum {
1451 	IPV6_SADDR_RULE_INIT = 0,
1452 	IPV6_SADDR_RULE_LOCAL,
1453 	IPV6_SADDR_RULE_SCOPE,
1454 	IPV6_SADDR_RULE_PREFERRED,
1455 #ifdef CONFIG_IPV6_MIP6
1456 	IPV6_SADDR_RULE_HOA,
1457 #endif
1458 	IPV6_SADDR_RULE_OIF,
1459 	IPV6_SADDR_RULE_LABEL,
1460 	IPV6_SADDR_RULE_PRIVACY,
1461 	IPV6_SADDR_RULE_ORCHID,
1462 	IPV6_SADDR_RULE_PREFIX,
1463 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1464 	IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1465 #endif
1466 	IPV6_SADDR_RULE_MAX
1467 };
1468 
1469 struct ipv6_saddr_score {
1470 	int			rule;
1471 	int			addr_type;
1472 	struct inet6_ifaddr	*ifa;
1473 	DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1474 	int			scopedist;
1475 	int			matchlen;
1476 };
1477 
1478 struct ipv6_saddr_dst {
1479 	const struct in6_addr *addr;
1480 	int ifindex;
1481 	int scope;
1482 	int label;
1483 	unsigned int prefs;
1484 };
1485 
1486 static inline int ipv6_saddr_preferred(int type)
1487 {
1488 	if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1489 		return 1;
1490 	return 0;
1491 }
1492 
1493 static bool ipv6_use_optimistic_addr(struct net *net,
1494 				     struct inet6_dev *idev)
1495 {
1496 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1497 	if (!idev)
1498 		return false;
1499 	if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1500 		return false;
1501 	if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1502 		return false;
1503 
1504 	return true;
1505 #else
1506 	return false;
1507 #endif
1508 }
1509 
1510 static bool ipv6_allow_optimistic_dad(struct net *net,
1511 				      struct inet6_dev *idev)
1512 {
1513 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1514 	if (!idev)
1515 		return false;
1516 	if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1517 		return false;
1518 
1519 	return true;
1520 #else
1521 	return false;
1522 #endif
1523 }
1524 
1525 static int ipv6_get_saddr_eval(struct net *net,
1526 			       struct ipv6_saddr_score *score,
1527 			       struct ipv6_saddr_dst *dst,
1528 			       int i)
1529 {
1530 	int ret;
1531 
1532 	if (i <= score->rule) {
1533 		switch (i) {
1534 		case IPV6_SADDR_RULE_SCOPE:
1535 			ret = score->scopedist;
1536 			break;
1537 		case IPV6_SADDR_RULE_PREFIX:
1538 			ret = score->matchlen;
1539 			break;
1540 		default:
1541 			ret = !!test_bit(i, score->scorebits);
1542 		}
1543 		goto out;
1544 	}
1545 
1546 	switch (i) {
1547 	case IPV6_SADDR_RULE_INIT:
1548 		/* Rule 0: remember if hiscore is not ready yet */
1549 		ret = !!score->ifa;
1550 		break;
1551 	case IPV6_SADDR_RULE_LOCAL:
1552 		/* Rule 1: Prefer same address */
1553 		ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1554 		break;
1555 	case IPV6_SADDR_RULE_SCOPE:
1556 		/* Rule 2: Prefer appropriate scope
1557 		 *
1558 		 *      ret
1559 		 *       ^
1560 		 *    -1 |  d 15
1561 		 *    ---+--+-+---> scope
1562 		 *       |
1563 		 *       |             d is scope of the destination.
1564 		 *  B-d  |  \
1565 		 *       |   \      <- smaller scope is better if
1566 		 *  B-15 |    \        if scope is enough for destination.
1567 		 *       |             ret = B - scope (-1 <= scope >= d <= 15).
1568 		 * d-C-1 | /
1569 		 *       |/         <- greater is better
1570 		 *   -C  /             if scope is not enough for destination.
1571 		 *      /|             ret = scope - C (-1 <= d < scope <= 15).
1572 		 *
1573 		 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1574 		 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1575 		 * Assume B = 0 and we get C > 29.
1576 		 */
1577 		ret = __ipv6_addr_src_scope(score->addr_type);
1578 		if (ret >= dst->scope)
1579 			ret = -ret;
1580 		else
1581 			ret -= 128;	/* 30 is enough */
1582 		score->scopedist = ret;
1583 		break;
1584 	case IPV6_SADDR_RULE_PREFERRED:
1585 	    {
1586 		/* Rule 3: Avoid deprecated and optimistic addresses */
1587 		u8 avoid = IFA_F_DEPRECATED;
1588 
1589 		if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1590 			avoid |= IFA_F_OPTIMISTIC;
1591 		ret = ipv6_saddr_preferred(score->addr_type) ||
1592 		      !(score->ifa->flags & avoid);
1593 		break;
1594 	    }
1595 #ifdef CONFIG_IPV6_MIP6
1596 	case IPV6_SADDR_RULE_HOA:
1597 	    {
1598 		/* Rule 4: Prefer home address */
1599 		int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1600 		ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1601 		break;
1602 	    }
1603 #endif
1604 	case IPV6_SADDR_RULE_OIF:
1605 		/* Rule 5: Prefer outgoing interface */
1606 		ret = (!dst->ifindex ||
1607 		       dst->ifindex == score->ifa->idev->dev->ifindex);
1608 		break;
1609 	case IPV6_SADDR_RULE_LABEL:
1610 		/* Rule 6: Prefer matching label */
1611 		ret = ipv6_addr_label(net,
1612 				      &score->ifa->addr, score->addr_type,
1613 				      score->ifa->idev->dev->ifindex) == dst->label;
1614 		break;
1615 	case IPV6_SADDR_RULE_PRIVACY:
1616 	    {
1617 		/* Rule 7: Prefer public address
1618 		 * Note: prefer temporary address if use_tempaddr >= 2
1619 		 */
1620 		int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1621 				!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1622 				score->ifa->idev->cnf.use_tempaddr >= 2;
1623 		ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1624 		break;
1625 	    }
1626 	case IPV6_SADDR_RULE_ORCHID:
1627 		/* Rule 8-: Prefer ORCHID vs ORCHID or
1628 		 *	    non-ORCHID vs non-ORCHID
1629 		 */
1630 		ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1631 			ipv6_addr_orchid(dst->addr));
1632 		break;
1633 	case IPV6_SADDR_RULE_PREFIX:
1634 		/* Rule 8: Use longest matching prefix */
1635 		ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1636 		if (ret > score->ifa->prefix_len)
1637 			ret = score->ifa->prefix_len;
1638 		score->matchlen = ret;
1639 		break;
1640 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1641 	case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1642 		/* Optimistic addresses still have lower precedence than other
1643 		 * preferred addresses.
1644 		 */
1645 		ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1646 		break;
1647 #endif
1648 	default:
1649 		ret = 0;
1650 	}
1651 
1652 	if (ret)
1653 		__set_bit(i, score->scorebits);
1654 	score->rule = i;
1655 out:
1656 	return ret;
1657 }
1658 
1659 static int __ipv6_dev_get_saddr(struct net *net,
1660 				struct ipv6_saddr_dst *dst,
1661 				struct inet6_dev *idev,
1662 				struct ipv6_saddr_score *scores,
1663 				int hiscore_idx)
1664 {
1665 	struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1666 
1667 	list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1668 		int i;
1669 
1670 		/*
1671 		 * - Tentative Address (RFC2462 section 5.4)
1672 		 *  - A tentative address is not considered
1673 		 *    "assigned to an interface" in the traditional
1674 		 *    sense, unless it is also flagged as optimistic.
1675 		 * - Candidate Source Address (section 4)
1676 		 *  - In any case, anycast addresses, multicast
1677 		 *    addresses, and the unspecified address MUST
1678 		 *    NOT be included in a candidate set.
1679 		 */
1680 		if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1681 		    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1682 			continue;
1683 
1684 		score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1685 
1686 		if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1687 			     score->addr_type & IPV6_ADDR_MULTICAST)) {
1688 			net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1689 					    idev->dev->name);
1690 			continue;
1691 		}
1692 
1693 		score->rule = -1;
1694 		bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1695 
1696 		for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1697 			int minihiscore, miniscore;
1698 
1699 			minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1700 			miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1701 
1702 			if (minihiscore > miniscore) {
1703 				if (i == IPV6_SADDR_RULE_SCOPE &&
1704 				    score->scopedist > 0) {
1705 					/*
1706 					 * special case:
1707 					 * each remaining entry
1708 					 * has too small (not enough)
1709 					 * scope, because ifa entries
1710 					 * are sorted by their scope
1711 					 * values.
1712 					 */
1713 					goto out;
1714 				}
1715 				break;
1716 			} else if (minihiscore < miniscore) {
1717 				swap(hiscore, score);
1718 				hiscore_idx = 1 - hiscore_idx;
1719 
1720 				/* restore our iterator */
1721 				score->ifa = hiscore->ifa;
1722 
1723 				break;
1724 			}
1725 		}
1726 	}
1727 out:
1728 	return hiscore_idx;
1729 }
1730 
1731 static int ipv6_get_saddr_master(struct net *net,
1732 				 const struct net_device *dst_dev,
1733 				 const struct net_device *master,
1734 				 struct ipv6_saddr_dst *dst,
1735 				 struct ipv6_saddr_score *scores,
1736 				 int hiscore_idx)
1737 {
1738 	struct inet6_dev *idev;
1739 
1740 	idev = __in6_dev_get(dst_dev);
1741 	if (idev)
1742 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1743 						   scores, hiscore_idx);
1744 
1745 	idev = __in6_dev_get(master);
1746 	if (idev)
1747 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1748 						   scores, hiscore_idx);
1749 
1750 	return hiscore_idx;
1751 }
1752 
1753 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1754 		       const struct in6_addr *daddr, unsigned int prefs,
1755 		       struct in6_addr *saddr)
1756 {
1757 	struct ipv6_saddr_score scores[2], *hiscore;
1758 	struct ipv6_saddr_dst dst;
1759 	struct inet6_dev *idev;
1760 	struct net_device *dev;
1761 	int dst_type;
1762 	bool use_oif_addr = false;
1763 	int hiscore_idx = 0;
1764 	int ret = 0;
1765 
1766 	dst_type = __ipv6_addr_type(daddr);
1767 	dst.addr = daddr;
1768 	dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1769 	dst.scope = __ipv6_addr_src_scope(dst_type);
1770 	dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1771 	dst.prefs = prefs;
1772 
1773 	scores[hiscore_idx].rule = -1;
1774 	scores[hiscore_idx].ifa = NULL;
1775 
1776 	rcu_read_lock();
1777 
1778 	/* Candidate Source Address (section 4)
1779 	 *  - multicast and link-local destination address,
1780 	 *    the set of candidate source address MUST only
1781 	 *    include addresses assigned to interfaces
1782 	 *    belonging to the same link as the outgoing
1783 	 *    interface.
1784 	 * (- For site-local destination addresses, the
1785 	 *    set of candidate source addresses MUST only
1786 	 *    include addresses assigned to interfaces
1787 	 *    belonging to the same site as the outgoing
1788 	 *    interface.)
1789 	 *  - "It is RECOMMENDED that the candidate source addresses
1790 	 *    be the set of unicast addresses assigned to the
1791 	 *    interface that will be used to send to the destination
1792 	 *    (the 'outgoing' interface)." (RFC 6724)
1793 	 */
1794 	if (dst_dev) {
1795 		idev = __in6_dev_get(dst_dev);
1796 		if ((dst_type & IPV6_ADDR_MULTICAST) ||
1797 		    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1798 		    (idev && idev->cnf.use_oif_addrs_only)) {
1799 			use_oif_addr = true;
1800 		}
1801 	}
1802 
1803 	if (use_oif_addr) {
1804 		if (idev)
1805 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1806 	} else {
1807 		const struct net_device *master;
1808 		int master_idx = 0;
1809 
1810 		/* if dst_dev exists and is enslaved to an L3 device, then
1811 		 * prefer addresses from dst_dev and then the master over
1812 		 * any other enslaved devices in the L3 domain.
1813 		 */
1814 		master = l3mdev_master_dev_rcu(dst_dev);
1815 		if (master) {
1816 			master_idx = master->ifindex;
1817 
1818 			hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1819 							    master, &dst,
1820 							    scores, hiscore_idx);
1821 
1822 			if (scores[hiscore_idx].ifa)
1823 				goto out;
1824 		}
1825 
1826 		for_each_netdev_rcu(net, dev) {
1827 			/* only consider addresses on devices in the
1828 			 * same L3 domain
1829 			 */
1830 			if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1831 				continue;
1832 			idev = __in6_dev_get(dev);
1833 			if (!idev)
1834 				continue;
1835 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1836 		}
1837 	}
1838 
1839 out:
1840 	hiscore = &scores[hiscore_idx];
1841 	if (!hiscore->ifa)
1842 		ret = -EADDRNOTAVAIL;
1843 	else
1844 		*saddr = hiscore->ifa->addr;
1845 
1846 	rcu_read_unlock();
1847 	return ret;
1848 }
1849 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1850 
1851 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1852 			      u32 banned_flags)
1853 {
1854 	struct inet6_ifaddr *ifp;
1855 	int err = -EADDRNOTAVAIL;
1856 
1857 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1858 		if (ifp->scope > IFA_LINK)
1859 			break;
1860 		if (ifp->scope == IFA_LINK &&
1861 		    !(ifp->flags & banned_flags)) {
1862 			*addr = ifp->addr;
1863 			err = 0;
1864 			break;
1865 		}
1866 	}
1867 	return err;
1868 }
1869 
1870 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1871 		    u32 banned_flags)
1872 {
1873 	struct inet6_dev *idev;
1874 	int err = -EADDRNOTAVAIL;
1875 
1876 	rcu_read_lock();
1877 	idev = __in6_dev_get(dev);
1878 	if (idev) {
1879 		read_lock_bh(&idev->lock);
1880 		err = __ipv6_get_lladdr(idev, addr, banned_flags);
1881 		read_unlock_bh(&idev->lock);
1882 	}
1883 	rcu_read_unlock();
1884 	return err;
1885 }
1886 
1887 static int ipv6_count_addresses(const struct inet6_dev *idev)
1888 {
1889 	const struct inet6_ifaddr *ifp;
1890 	int cnt = 0;
1891 
1892 	rcu_read_lock();
1893 	list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1894 		cnt++;
1895 	rcu_read_unlock();
1896 	return cnt;
1897 }
1898 
1899 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1900 		  const struct net_device *dev, int strict)
1901 {
1902 	return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1903 				       strict, IFA_F_TENTATIVE);
1904 }
1905 EXPORT_SYMBOL(ipv6_chk_addr);
1906 
1907 /* device argument is used to find the L3 domain of interest. If
1908  * skip_dev_check is set, then the ifp device is not checked against
1909  * the passed in dev argument. So the 2 cases for addresses checks are:
1910  *   1. does the address exist in the L3 domain that dev is part of
1911  *      (skip_dev_check = true), or
1912  *
1913  *   2. does the address exist on the specific device
1914  *      (skip_dev_check = false)
1915  */
1916 static struct net_device *
1917 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1918 			  const struct net_device *dev, bool skip_dev_check,
1919 			  int strict, u32 banned_flags)
1920 {
1921 	unsigned int hash = inet6_addr_hash(net, addr);
1922 	struct net_device *l3mdev, *ndev;
1923 	struct inet6_ifaddr *ifp;
1924 	u32 ifp_flags;
1925 
1926 	rcu_read_lock();
1927 
1928 	l3mdev = l3mdev_master_dev_rcu(dev);
1929 	if (skip_dev_check)
1930 		dev = NULL;
1931 
1932 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1933 		ndev = ifp->idev->dev;
1934 
1935 		if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1936 			continue;
1937 
1938 		/* Decouple optimistic from tentative for evaluation here.
1939 		 * Ban optimistic addresses explicitly, when required.
1940 		 */
1941 		ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1942 			    ? (ifp->flags&~IFA_F_TENTATIVE)
1943 			    : ifp->flags;
1944 		if (ipv6_addr_equal(&ifp->addr, addr) &&
1945 		    !(ifp_flags&banned_flags) &&
1946 		    (!dev || ndev == dev ||
1947 		     !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1948 			rcu_read_unlock();
1949 			return ndev;
1950 		}
1951 	}
1952 
1953 	rcu_read_unlock();
1954 	return NULL;
1955 }
1956 
1957 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1958 			    const struct net_device *dev, bool skip_dev_check,
1959 			    int strict, u32 banned_flags)
1960 {
1961 	return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
1962 					 strict, banned_flags) ? 1 : 0;
1963 }
1964 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1965 
1966 
1967 /* Compares an address/prefix_len with addresses on device @dev.
1968  * If one is found it returns true.
1969  */
1970 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1971 	const unsigned int prefix_len, struct net_device *dev)
1972 {
1973 	const struct inet6_ifaddr *ifa;
1974 	const struct inet6_dev *idev;
1975 	bool ret = false;
1976 
1977 	rcu_read_lock();
1978 	idev = __in6_dev_get(dev);
1979 	if (idev) {
1980 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1981 			ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1982 			if (ret)
1983 				break;
1984 		}
1985 	}
1986 	rcu_read_unlock();
1987 
1988 	return ret;
1989 }
1990 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1991 
1992 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1993 {
1994 	const struct inet6_ifaddr *ifa;
1995 	const struct inet6_dev *idev;
1996 	int	onlink;
1997 
1998 	onlink = 0;
1999 	rcu_read_lock();
2000 	idev = __in6_dev_get(dev);
2001 	if (idev) {
2002 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2003 			onlink = ipv6_prefix_equal(addr, &ifa->addr,
2004 						   ifa->prefix_len);
2005 			if (onlink)
2006 				break;
2007 		}
2008 	}
2009 	rcu_read_unlock();
2010 	return onlink;
2011 }
2012 EXPORT_SYMBOL(ipv6_chk_prefix);
2013 
2014 /**
2015  * ipv6_dev_find - find the first device with a given source address.
2016  * @net: the net namespace
2017  * @addr: the source address
2018  * @dev: used to find the L3 domain of interest
2019  *
2020  * The caller should be protected by RCU, or RTNL.
2021  */
2022 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2023 				 struct net_device *dev)
2024 {
2025 	return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2026 					 IFA_F_TENTATIVE);
2027 }
2028 EXPORT_SYMBOL(ipv6_dev_find);
2029 
2030 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2031 				     struct net_device *dev, int strict)
2032 {
2033 	unsigned int hash = inet6_addr_hash(net, addr);
2034 	struct inet6_ifaddr *ifp, *result = NULL;
2035 
2036 	rcu_read_lock();
2037 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
2038 		if (ipv6_addr_equal(&ifp->addr, addr)) {
2039 			if (!dev || ifp->idev->dev == dev ||
2040 			    !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2041 				result = ifp;
2042 				in6_ifa_hold(ifp);
2043 				break;
2044 			}
2045 		}
2046 	}
2047 	rcu_read_unlock();
2048 
2049 	return result;
2050 }
2051 
2052 /* Gets referenced address, destroys ifaddr */
2053 
2054 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2055 {
2056 	if (dad_failed)
2057 		ifp->flags |= IFA_F_DADFAILED;
2058 
2059 	if (ifp->flags&IFA_F_TEMPORARY) {
2060 		struct inet6_ifaddr *ifpub;
2061 		spin_lock_bh(&ifp->lock);
2062 		ifpub = ifp->ifpub;
2063 		if (ifpub) {
2064 			in6_ifa_hold(ifpub);
2065 			spin_unlock_bh(&ifp->lock);
2066 			ipv6_create_tempaddr(ifpub, true);
2067 			in6_ifa_put(ifpub);
2068 		} else {
2069 			spin_unlock_bh(&ifp->lock);
2070 		}
2071 		ipv6_del_addr(ifp);
2072 	} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2073 		spin_lock_bh(&ifp->lock);
2074 		addrconf_del_dad_work(ifp);
2075 		ifp->flags |= IFA_F_TENTATIVE;
2076 		if (dad_failed)
2077 			ifp->flags &= ~IFA_F_OPTIMISTIC;
2078 		spin_unlock_bh(&ifp->lock);
2079 		if (dad_failed)
2080 			ipv6_ifa_notify(0, ifp);
2081 		in6_ifa_put(ifp);
2082 	} else {
2083 		ipv6_del_addr(ifp);
2084 	}
2085 }
2086 
2087 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2088 {
2089 	int err = -ENOENT;
2090 
2091 	spin_lock_bh(&ifp->lock);
2092 	if (ifp->state == INET6_IFADDR_STATE_DAD) {
2093 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
2094 		err = 0;
2095 	}
2096 	spin_unlock_bh(&ifp->lock);
2097 
2098 	return err;
2099 }
2100 
2101 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2102 {
2103 	struct inet6_dev *idev = ifp->idev;
2104 	struct net *net = dev_net(idev->dev);
2105 
2106 	if (addrconf_dad_end(ifp)) {
2107 		in6_ifa_put(ifp);
2108 		return;
2109 	}
2110 
2111 	net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2112 			     ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2113 
2114 	spin_lock_bh(&ifp->lock);
2115 
2116 	if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2117 		struct in6_addr new_addr;
2118 		struct inet6_ifaddr *ifp2;
2119 		int retries = ifp->stable_privacy_retry + 1;
2120 		struct ifa6_config cfg = {
2121 			.pfx = &new_addr,
2122 			.plen = ifp->prefix_len,
2123 			.ifa_flags = ifp->flags,
2124 			.valid_lft = ifp->valid_lft,
2125 			.preferred_lft = ifp->prefered_lft,
2126 			.scope = ifp->scope,
2127 		};
2128 
2129 		if (retries > net->ipv6.sysctl.idgen_retries) {
2130 			net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2131 					     ifp->idev->dev->name);
2132 			goto errdad;
2133 		}
2134 
2135 		new_addr = ifp->addr;
2136 		if (ipv6_generate_stable_address(&new_addr, retries,
2137 						 idev))
2138 			goto errdad;
2139 
2140 		spin_unlock_bh(&ifp->lock);
2141 
2142 		if (idev->cnf.max_addresses &&
2143 		    ipv6_count_addresses(idev) >=
2144 		    idev->cnf.max_addresses)
2145 			goto lock_errdad;
2146 
2147 		net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2148 				     ifp->idev->dev->name);
2149 
2150 		ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2151 		if (IS_ERR(ifp2))
2152 			goto lock_errdad;
2153 
2154 		spin_lock_bh(&ifp2->lock);
2155 		ifp2->stable_privacy_retry = retries;
2156 		ifp2->state = INET6_IFADDR_STATE_PREDAD;
2157 		spin_unlock_bh(&ifp2->lock);
2158 
2159 		addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2160 		in6_ifa_put(ifp2);
2161 lock_errdad:
2162 		spin_lock_bh(&ifp->lock);
2163 	}
2164 
2165 errdad:
2166 	/* transition from _POSTDAD to _ERRDAD */
2167 	ifp->state = INET6_IFADDR_STATE_ERRDAD;
2168 	spin_unlock_bh(&ifp->lock);
2169 
2170 	addrconf_mod_dad_work(ifp, 0);
2171 	in6_ifa_put(ifp);
2172 }
2173 
2174 /* Join to solicited addr multicast group.
2175  * caller must hold RTNL */
2176 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2177 {
2178 	struct in6_addr maddr;
2179 
2180 	if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2181 		return;
2182 
2183 	addrconf_addr_solict_mult(addr, &maddr);
2184 	ipv6_dev_mc_inc(dev, &maddr);
2185 }
2186 
2187 /* caller must hold RTNL */
2188 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2189 {
2190 	struct in6_addr maddr;
2191 
2192 	if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2193 		return;
2194 
2195 	addrconf_addr_solict_mult(addr, &maddr);
2196 	__ipv6_dev_mc_dec(idev, &maddr);
2197 }
2198 
2199 /* caller must hold RTNL */
2200 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2201 {
2202 	struct in6_addr addr;
2203 
2204 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2205 		return;
2206 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2207 	if (ipv6_addr_any(&addr))
2208 		return;
2209 	__ipv6_dev_ac_inc(ifp->idev, &addr);
2210 }
2211 
2212 /* caller must hold RTNL */
2213 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2214 {
2215 	struct in6_addr addr;
2216 
2217 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2218 		return;
2219 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2220 	if (ipv6_addr_any(&addr))
2221 		return;
2222 	__ipv6_dev_ac_dec(ifp->idev, &addr);
2223 }
2224 
2225 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2226 {
2227 	switch (dev->addr_len) {
2228 	case ETH_ALEN:
2229 		memcpy(eui, dev->dev_addr, 3);
2230 		eui[3] = 0xFF;
2231 		eui[4] = 0xFE;
2232 		memcpy(eui + 5, dev->dev_addr + 3, 3);
2233 		break;
2234 	case EUI64_ADDR_LEN:
2235 		memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2236 		eui[0] ^= 2;
2237 		break;
2238 	default:
2239 		return -1;
2240 	}
2241 
2242 	return 0;
2243 }
2244 
2245 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2246 {
2247 	const union fwnet_hwaddr *ha;
2248 
2249 	if (dev->addr_len != FWNET_ALEN)
2250 		return -1;
2251 
2252 	ha = (const union fwnet_hwaddr *)dev->dev_addr;
2253 
2254 	memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2255 	eui[0] ^= 2;
2256 	return 0;
2257 }
2258 
2259 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2260 {
2261 	/* XXX: inherit EUI-64 from other interface -- yoshfuji */
2262 	if (dev->addr_len != ARCNET_ALEN)
2263 		return -1;
2264 	memset(eui, 0, 7);
2265 	eui[7] = *(u8 *)dev->dev_addr;
2266 	return 0;
2267 }
2268 
2269 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2270 {
2271 	if (dev->addr_len != INFINIBAND_ALEN)
2272 		return -1;
2273 	memcpy(eui, dev->dev_addr + 12, 8);
2274 	eui[0] |= 2;
2275 	return 0;
2276 }
2277 
2278 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2279 {
2280 	if (addr == 0)
2281 		return -1;
2282 	eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2283 		  ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2284 		  ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2285 		  ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2286 		  ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2287 		  ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2288 	eui[1] = 0;
2289 	eui[2] = 0x5E;
2290 	eui[3] = 0xFE;
2291 	memcpy(eui + 4, &addr, 4);
2292 	return 0;
2293 }
2294 
2295 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2296 {
2297 	if (dev->priv_flags & IFF_ISATAP)
2298 		return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2299 	return -1;
2300 }
2301 
2302 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2303 {
2304 	return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2305 }
2306 
2307 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2308 {
2309 	memcpy(eui, dev->perm_addr, 3);
2310 	memcpy(eui + 5, dev->perm_addr + 3, 3);
2311 	eui[3] = 0xFF;
2312 	eui[4] = 0xFE;
2313 	eui[0] ^= 2;
2314 	return 0;
2315 }
2316 
2317 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2318 {
2319 	switch (dev->type) {
2320 	case ARPHRD_ETHER:
2321 	case ARPHRD_FDDI:
2322 		return addrconf_ifid_eui48(eui, dev);
2323 	case ARPHRD_ARCNET:
2324 		return addrconf_ifid_arcnet(eui, dev);
2325 	case ARPHRD_INFINIBAND:
2326 		return addrconf_ifid_infiniband(eui, dev);
2327 	case ARPHRD_SIT:
2328 		return addrconf_ifid_sit(eui, dev);
2329 	case ARPHRD_IPGRE:
2330 	case ARPHRD_TUNNEL:
2331 		return addrconf_ifid_gre(eui, dev);
2332 	case ARPHRD_6LOWPAN:
2333 		return addrconf_ifid_6lowpan(eui, dev);
2334 	case ARPHRD_IEEE1394:
2335 		return addrconf_ifid_ieee1394(eui, dev);
2336 	case ARPHRD_TUNNEL6:
2337 	case ARPHRD_IP6GRE:
2338 	case ARPHRD_RAWIP:
2339 		return addrconf_ifid_ip6tnl(eui, dev);
2340 	}
2341 	return -1;
2342 }
2343 
2344 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2345 {
2346 	int err = -1;
2347 	struct inet6_ifaddr *ifp;
2348 
2349 	read_lock_bh(&idev->lock);
2350 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2351 		if (ifp->scope > IFA_LINK)
2352 			break;
2353 		if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2354 			memcpy(eui, ifp->addr.s6_addr+8, 8);
2355 			err = 0;
2356 			break;
2357 		}
2358 	}
2359 	read_unlock_bh(&idev->lock);
2360 	return err;
2361 }
2362 
2363 /* Generation of a randomized Interface Identifier
2364  * draft-ietf-6man-rfc4941bis, Section 3.3.1
2365  */
2366 
2367 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2368 {
2369 regen:
2370 	get_random_bytes(&addr->s6_addr[8], 8);
2371 
2372 	/* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2373 	 * check if generated address is not inappropriate:
2374 	 *
2375 	 * - Reserved IPv6 Interface Identifiers
2376 	 * - XXX: already assigned to an address on the device
2377 	 */
2378 
2379 	/* Subnet-router anycast: 0000:0000:0000:0000 */
2380 	if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2381 		goto regen;
2382 
2383 	/* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2384 	 * Proxy Mobile IPv6:   0200:5EFF:FE00:5213
2385 	 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2386 	 */
2387 	if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2388 	    (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2389 		goto regen;
2390 
2391 	/* Reserved subnet anycast addresses */
2392 	if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2393 	    ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2394 		goto regen;
2395 }
2396 
2397 /*
2398  *	Add prefix route.
2399  */
2400 
2401 static void
2402 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2403 		      struct net_device *dev, unsigned long expires,
2404 		      u32 flags, gfp_t gfp_flags)
2405 {
2406 	struct fib6_config cfg = {
2407 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2408 		.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2409 		.fc_ifindex = dev->ifindex,
2410 		.fc_expires = expires,
2411 		.fc_dst_len = plen,
2412 		.fc_flags = RTF_UP | flags,
2413 		.fc_nlinfo.nl_net = dev_net(dev),
2414 		.fc_protocol = RTPROT_KERNEL,
2415 		.fc_type = RTN_UNICAST,
2416 	};
2417 
2418 	cfg.fc_dst = *pfx;
2419 
2420 	/* Prevent useless cloning on PtP SIT.
2421 	   This thing is done here expecting that the whole
2422 	   class of non-broadcast devices need not cloning.
2423 	 */
2424 #if IS_ENABLED(CONFIG_IPV6_SIT)
2425 	if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2426 		cfg.fc_flags |= RTF_NONEXTHOP;
2427 #endif
2428 
2429 	ip6_route_add(&cfg, gfp_flags, NULL);
2430 }
2431 
2432 
2433 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2434 						  int plen,
2435 						  const struct net_device *dev,
2436 						  u32 flags, u32 noflags,
2437 						  bool no_gw)
2438 {
2439 	struct fib6_node *fn;
2440 	struct fib6_info *rt = NULL;
2441 	struct fib6_table *table;
2442 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2443 
2444 	table = fib6_get_table(dev_net(dev), tb_id);
2445 	if (!table)
2446 		return NULL;
2447 
2448 	rcu_read_lock();
2449 	fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2450 	if (!fn)
2451 		goto out;
2452 
2453 	for_each_fib6_node_rt_rcu(fn) {
2454 		/* prefix routes only use builtin fib6_nh */
2455 		if (rt->nh)
2456 			continue;
2457 
2458 		if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2459 			continue;
2460 		if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2461 			continue;
2462 		if ((rt->fib6_flags & flags) != flags)
2463 			continue;
2464 		if ((rt->fib6_flags & noflags) != 0)
2465 			continue;
2466 		if (!fib6_info_hold_safe(rt))
2467 			continue;
2468 		break;
2469 	}
2470 out:
2471 	rcu_read_unlock();
2472 	return rt;
2473 }
2474 
2475 
2476 /* Create "default" multicast route to the interface */
2477 
2478 static void addrconf_add_mroute(struct net_device *dev)
2479 {
2480 	struct fib6_config cfg = {
2481 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2482 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
2483 		.fc_ifindex = dev->ifindex,
2484 		.fc_dst_len = 8,
2485 		.fc_flags = RTF_UP,
2486 		.fc_type = RTN_MULTICAST,
2487 		.fc_nlinfo.nl_net = dev_net(dev),
2488 		.fc_protocol = RTPROT_KERNEL,
2489 	};
2490 
2491 	ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2492 
2493 	ip6_route_add(&cfg, GFP_KERNEL, NULL);
2494 }
2495 
2496 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2497 {
2498 	struct inet6_dev *idev;
2499 
2500 	ASSERT_RTNL();
2501 
2502 	idev = ipv6_find_idev(dev);
2503 	if (IS_ERR(idev))
2504 		return idev;
2505 
2506 	if (idev->cnf.disable_ipv6)
2507 		return ERR_PTR(-EACCES);
2508 
2509 	/* Add default multicast route */
2510 	if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2511 		addrconf_add_mroute(dev);
2512 
2513 	return idev;
2514 }
2515 
2516 static void manage_tempaddrs(struct inet6_dev *idev,
2517 			     struct inet6_ifaddr *ifp,
2518 			     __u32 valid_lft, __u32 prefered_lft,
2519 			     bool create, unsigned long now)
2520 {
2521 	u32 flags;
2522 	struct inet6_ifaddr *ift;
2523 
2524 	read_lock_bh(&idev->lock);
2525 	/* update all temporary addresses in the list */
2526 	list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2527 		int age, max_valid, max_prefered;
2528 
2529 		if (ifp != ift->ifpub)
2530 			continue;
2531 
2532 		/* RFC 4941 section 3.3:
2533 		 * If a received option will extend the lifetime of a public
2534 		 * address, the lifetimes of temporary addresses should
2535 		 * be extended, subject to the overall constraint that no
2536 		 * temporary addresses should ever remain "valid" or "preferred"
2537 		 * for a time longer than (TEMP_VALID_LIFETIME) or
2538 		 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2539 		 */
2540 		age = (now - ift->cstamp) / HZ;
2541 		max_valid = idev->cnf.temp_valid_lft - age;
2542 		if (max_valid < 0)
2543 			max_valid = 0;
2544 
2545 		max_prefered = idev->cnf.temp_prefered_lft -
2546 			       idev->desync_factor - age;
2547 		if (max_prefered < 0)
2548 			max_prefered = 0;
2549 
2550 		if (valid_lft > max_valid)
2551 			valid_lft = max_valid;
2552 
2553 		if (prefered_lft > max_prefered)
2554 			prefered_lft = max_prefered;
2555 
2556 		spin_lock(&ift->lock);
2557 		flags = ift->flags;
2558 		ift->valid_lft = valid_lft;
2559 		ift->prefered_lft = prefered_lft;
2560 		ift->tstamp = now;
2561 		if (prefered_lft > 0)
2562 			ift->flags &= ~IFA_F_DEPRECATED;
2563 
2564 		spin_unlock(&ift->lock);
2565 		if (!(flags&IFA_F_TENTATIVE))
2566 			ipv6_ifa_notify(0, ift);
2567 	}
2568 
2569 	if ((create || list_empty(&idev->tempaddr_list)) &&
2570 	    idev->cnf.use_tempaddr > 0) {
2571 		/* When a new public address is created as described
2572 		 * in [ADDRCONF], also create a new temporary address.
2573 		 * Also create a temporary address if it's enabled but
2574 		 * no temporary address currently exists.
2575 		 */
2576 		read_unlock_bh(&idev->lock);
2577 		ipv6_create_tempaddr(ifp, false);
2578 	} else {
2579 		read_unlock_bh(&idev->lock);
2580 	}
2581 }
2582 
2583 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2584 {
2585 	return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2586 	       idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2587 }
2588 
2589 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2590 				 const struct prefix_info *pinfo,
2591 				 struct inet6_dev *in6_dev,
2592 				 const struct in6_addr *addr, int addr_type,
2593 				 u32 addr_flags, bool sllao, bool tokenized,
2594 				 __u32 valid_lft, u32 prefered_lft)
2595 {
2596 	struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2597 	int create = 0, update_lft = 0;
2598 
2599 	if (!ifp && valid_lft) {
2600 		int max_addresses = in6_dev->cnf.max_addresses;
2601 		struct ifa6_config cfg = {
2602 			.pfx = addr,
2603 			.plen = pinfo->prefix_len,
2604 			.ifa_flags = addr_flags,
2605 			.valid_lft = valid_lft,
2606 			.preferred_lft = prefered_lft,
2607 			.scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2608 			.ifa_proto = IFAPROT_KERNEL_RA
2609 		};
2610 
2611 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2612 		if ((net->ipv6.devconf_all->optimistic_dad ||
2613 		     in6_dev->cnf.optimistic_dad) &&
2614 		    !net->ipv6.devconf_all->forwarding && sllao)
2615 			cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2616 #endif
2617 
2618 		/* Do not allow to create too much of autoconfigured
2619 		 * addresses; this would be too easy way to crash kernel.
2620 		 */
2621 		if (!max_addresses ||
2622 		    ipv6_count_addresses(in6_dev) < max_addresses)
2623 			ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2624 
2625 		if (IS_ERR_OR_NULL(ifp))
2626 			return -1;
2627 
2628 		create = 1;
2629 		spin_lock_bh(&ifp->lock);
2630 		ifp->flags |= IFA_F_MANAGETEMPADDR;
2631 		ifp->cstamp = jiffies;
2632 		ifp->tokenized = tokenized;
2633 		spin_unlock_bh(&ifp->lock);
2634 		addrconf_dad_start(ifp);
2635 	}
2636 
2637 	if (ifp) {
2638 		u32 flags;
2639 		unsigned long now;
2640 		u32 stored_lft;
2641 
2642 		/* update lifetime (RFC2462 5.5.3 e) */
2643 		spin_lock_bh(&ifp->lock);
2644 		now = jiffies;
2645 		if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2646 			stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2647 		else
2648 			stored_lft = 0;
2649 		if (!create && stored_lft) {
2650 			const u32 minimum_lft = min_t(u32,
2651 				stored_lft, MIN_VALID_LIFETIME);
2652 			valid_lft = max(valid_lft, minimum_lft);
2653 
2654 			/* RFC4862 Section 5.5.3e:
2655 			 * "Note that the preferred lifetime of the
2656 			 *  corresponding address is always reset to
2657 			 *  the Preferred Lifetime in the received
2658 			 *  Prefix Information option, regardless of
2659 			 *  whether the valid lifetime is also reset or
2660 			 *  ignored."
2661 			 *
2662 			 * So we should always update prefered_lft here.
2663 			 */
2664 			update_lft = 1;
2665 		}
2666 
2667 		if (update_lft) {
2668 			ifp->valid_lft = valid_lft;
2669 			ifp->prefered_lft = prefered_lft;
2670 			ifp->tstamp = now;
2671 			flags = ifp->flags;
2672 			ifp->flags &= ~IFA_F_DEPRECATED;
2673 			spin_unlock_bh(&ifp->lock);
2674 
2675 			if (!(flags&IFA_F_TENTATIVE))
2676 				ipv6_ifa_notify(0, ifp);
2677 		} else
2678 			spin_unlock_bh(&ifp->lock);
2679 
2680 		manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2681 				 create, now);
2682 
2683 		in6_ifa_put(ifp);
2684 		addrconf_verify(net);
2685 	}
2686 
2687 	return 0;
2688 }
2689 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2690 
2691 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2692 {
2693 	struct prefix_info *pinfo;
2694 	__u32 valid_lft;
2695 	__u32 prefered_lft;
2696 	int addr_type, err;
2697 	u32 addr_flags = 0;
2698 	struct inet6_dev *in6_dev;
2699 	struct net *net = dev_net(dev);
2700 
2701 	pinfo = (struct prefix_info *) opt;
2702 
2703 	if (len < sizeof(struct prefix_info)) {
2704 		netdev_dbg(dev, "addrconf: prefix option too short\n");
2705 		return;
2706 	}
2707 
2708 	/*
2709 	 *	Validation checks ([ADDRCONF], page 19)
2710 	 */
2711 
2712 	addr_type = ipv6_addr_type(&pinfo->prefix);
2713 
2714 	if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2715 		return;
2716 
2717 	valid_lft = ntohl(pinfo->valid);
2718 	prefered_lft = ntohl(pinfo->prefered);
2719 
2720 	if (prefered_lft > valid_lft) {
2721 		net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2722 		return;
2723 	}
2724 
2725 	in6_dev = in6_dev_get(dev);
2726 
2727 	if (!in6_dev) {
2728 		net_dbg_ratelimited("addrconf: device %s not configured\n",
2729 				    dev->name);
2730 		return;
2731 	}
2732 
2733 	/*
2734 	 *	Two things going on here:
2735 	 *	1) Add routes for on-link prefixes
2736 	 *	2) Configure prefixes with the auto flag set
2737 	 */
2738 
2739 	if (pinfo->onlink) {
2740 		struct fib6_info *rt;
2741 		unsigned long rt_expires;
2742 
2743 		/* Avoid arithmetic overflow. Really, we could
2744 		 * save rt_expires in seconds, likely valid_lft,
2745 		 * but it would require division in fib gc, that it
2746 		 * not good.
2747 		 */
2748 		if (HZ > USER_HZ)
2749 			rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2750 		else
2751 			rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2752 
2753 		if (addrconf_finite_timeout(rt_expires))
2754 			rt_expires *= HZ;
2755 
2756 		rt = addrconf_get_prefix_route(&pinfo->prefix,
2757 					       pinfo->prefix_len,
2758 					       dev,
2759 					       RTF_ADDRCONF | RTF_PREFIX_RT,
2760 					       RTF_DEFAULT, true);
2761 
2762 		if (rt) {
2763 			/* Autoconf prefix route */
2764 			if (valid_lft == 0) {
2765 				ip6_del_rt(net, rt, false);
2766 				rt = NULL;
2767 			} else if (addrconf_finite_timeout(rt_expires)) {
2768 				/* not infinity */
2769 				fib6_set_expires(rt, jiffies + rt_expires);
2770 			} else {
2771 				fib6_clean_expires(rt);
2772 			}
2773 		} else if (valid_lft) {
2774 			clock_t expires = 0;
2775 			int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2776 			if (addrconf_finite_timeout(rt_expires)) {
2777 				/* not infinity */
2778 				flags |= RTF_EXPIRES;
2779 				expires = jiffies_to_clock_t(rt_expires);
2780 			}
2781 			addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2782 					      0, dev, expires, flags,
2783 					      GFP_ATOMIC);
2784 		}
2785 		fib6_info_release(rt);
2786 	}
2787 
2788 	/* Try to figure out our local address for this prefix */
2789 
2790 	if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2791 		struct in6_addr addr;
2792 		bool tokenized = false, dev_addr_generated = false;
2793 
2794 		if (pinfo->prefix_len == 64) {
2795 			memcpy(&addr, &pinfo->prefix, 8);
2796 
2797 			if (!ipv6_addr_any(&in6_dev->token)) {
2798 				read_lock_bh(&in6_dev->lock);
2799 				memcpy(addr.s6_addr + 8,
2800 				       in6_dev->token.s6_addr + 8, 8);
2801 				read_unlock_bh(&in6_dev->lock);
2802 				tokenized = true;
2803 			} else if (is_addr_mode_generate_stable(in6_dev) &&
2804 				   !ipv6_generate_stable_address(&addr, 0,
2805 								 in6_dev)) {
2806 				addr_flags |= IFA_F_STABLE_PRIVACY;
2807 				goto ok;
2808 			} else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2809 				   ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2810 				goto put;
2811 			} else {
2812 				dev_addr_generated = true;
2813 			}
2814 			goto ok;
2815 		}
2816 		net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2817 				    pinfo->prefix_len);
2818 		goto put;
2819 
2820 ok:
2821 		err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2822 						   &addr, addr_type,
2823 						   addr_flags, sllao,
2824 						   tokenized, valid_lft,
2825 						   prefered_lft);
2826 		if (err)
2827 			goto put;
2828 
2829 		/* Ignore error case here because previous prefix add addr was
2830 		 * successful which will be notified.
2831 		 */
2832 		ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2833 					      addr_type, addr_flags, sllao,
2834 					      tokenized, valid_lft,
2835 					      prefered_lft,
2836 					      dev_addr_generated);
2837 	}
2838 	inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2839 put:
2840 	in6_dev_put(in6_dev);
2841 }
2842 
2843 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2844 		struct in6_ifreq *ireq)
2845 {
2846 	struct ip_tunnel_parm p = { };
2847 	int err;
2848 
2849 	if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2850 		return -EADDRNOTAVAIL;
2851 
2852 	p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2853 	p.iph.version = 4;
2854 	p.iph.ihl = 5;
2855 	p.iph.protocol = IPPROTO_IPV6;
2856 	p.iph.ttl = 64;
2857 
2858 	if (!dev->netdev_ops->ndo_tunnel_ctl)
2859 		return -EOPNOTSUPP;
2860 	err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2861 	if (err)
2862 		return err;
2863 
2864 	dev = __dev_get_by_name(net, p.name);
2865 	if (!dev)
2866 		return -ENOBUFS;
2867 	return dev_open(dev, NULL);
2868 }
2869 
2870 /*
2871  *	Set destination address.
2872  *	Special case for SIT interfaces where we create a new "virtual"
2873  *	device.
2874  */
2875 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2876 {
2877 	struct net_device *dev;
2878 	struct in6_ifreq ireq;
2879 	int err = -ENODEV;
2880 
2881 	if (!IS_ENABLED(CONFIG_IPV6_SIT))
2882 		return -ENODEV;
2883 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2884 		return -EFAULT;
2885 
2886 	rtnl_lock();
2887 	dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2888 	if (dev && dev->type == ARPHRD_SIT)
2889 		err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2890 	rtnl_unlock();
2891 	return err;
2892 }
2893 
2894 static int ipv6_mc_config(struct sock *sk, bool join,
2895 			  const struct in6_addr *addr, int ifindex)
2896 {
2897 	int ret;
2898 
2899 	ASSERT_RTNL();
2900 
2901 	lock_sock(sk);
2902 	if (join)
2903 		ret = ipv6_sock_mc_join(sk, ifindex, addr);
2904 	else
2905 		ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2906 	release_sock(sk);
2907 
2908 	return ret;
2909 }
2910 
2911 /*
2912  *	Manual configuration of address on an interface
2913  */
2914 static int inet6_addr_add(struct net *net, int ifindex,
2915 			  struct ifa6_config *cfg,
2916 			  struct netlink_ext_ack *extack)
2917 {
2918 	struct inet6_ifaddr *ifp;
2919 	struct inet6_dev *idev;
2920 	struct net_device *dev;
2921 	unsigned long timeout;
2922 	clock_t expires;
2923 	u32 flags;
2924 
2925 	ASSERT_RTNL();
2926 
2927 	if (cfg->plen > 128)
2928 		return -EINVAL;
2929 
2930 	/* check the lifetime */
2931 	if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2932 		return -EINVAL;
2933 
2934 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2935 		return -EINVAL;
2936 
2937 	dev = __dev_get_by_index(net, ifindex);
2938 	if (!dev)
2939 		return -ENODEV;
2940 
2941 	idev = addrconf_add_dev(dev);
2942 	if (IS_ERR(idev))
2943 		return PTR_ERR(idev);
2944 
2945 	if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2946 		int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2947 					 true, cfg->pfx, ifindex);
2948 
2949 		if (ret < 0)
2950 			return ret;
2951 	}
2952 
2953 	cfg->scope = ipv6_addr_scope(cfg->pfx);
2954 
2955 	timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2956 	if (addrconf_finite_timeout(timeout)) {
2957 		expires = jiffies_to_clock_t(timeout * HZ);
2958 		cfg->valid_lft = timeout;
2959 		flags = RTF_EXPIRES;
2960 	} else {
2961 		expires = 0;
2962 		flags = 0;
2963 		cfg->ifa_flags |= IFA_F_PERMANENT;
2964 	}
2965 
2966 	timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2967 	if (addrconf_finite_timeout(timeout)) {
2968 		if (timeout == 0)
2969 			cfg->ifa_flags |= IFA_F_DEPRECATED;
2970 		cfg->preferred_lft = timeout;
2971 	}
2972 
2973 	ifp = ipv6_add_addr(idev, cfg, true, extack);
2974 	if (!IS_ERR(ifp)) {
2975 		if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2976 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2977 					      ifp->rt_priority, dev, expires,
2978 					      flags, GFP_KERNEL);
2979 		}
2980 
2981 		/* Send a netlink notification if DAD is enabled and
2982 		 * optimistic flag is not set
2983 		 */
2984 		if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2985 			ipv6_ifa_notify(0, ifp);
2986 		/*
2987 		 * Note that section 3.1 of RFC 4429 indicates
2988 		 * that the Optimistic flag should not be set for
2989 		 * manually configured addresses
2990 		 */
2991 		addrconf_dad_start(ifp);
2992 		if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2993 			manage_tempaddrs(idev, ifp, cfg->valid_lft,
2994 					 cfg->preferred_lft, true, jiffies);
2995 		in6_ifa_put(ifp);
2996 		addrconf_verify_rtnl(net);
2997 		return 0;
2998 	} else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2999 		ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
3000 			       cfg->pfx, ifindex);
3001 	}
3002 
3003 	return PTR_ERR(ifp);
3004 }
3005 
3006 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3007 			  const struct in6_addr *pfx, unsigned int plen)
3008 {
3009 	struct inet6_ifaddr *ifp;
3010 	struct inet6_dev *idev;
3011 	struct net_device *dev;
3012 
3013 	if (plen > 128)
3014 		return -EINVAL;
3015 
3016 	dev = __dev_get_by_index(net, ifindex);
3017 	if (!dev)
3018 		return -ENODEV;
3019 
3020 	idev = __in6_dev_get(dev);
3021 	if (!idev)
3022 		return -ENXIO;
3023 
3024 	read_lock_bh(&idev->lock);
3025 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
3026 		if (ifp->prefix_len == plen &&
3027 		    ipv6_addr_equal(pfx, &ifp->addr)) {
3028 			in6_ifa_hold(ifp);
3029 			read_unlock_bh(&idev->lock);
3030 
3031 			if (!(ifp->flags & IFA_F_TEMPORARY) &&
3032 			    (ifa_flags & IFA_F_MANAGETEMPADDR))
3033 				manage_tempaddrs(idev, ifp, 0, 0, false,
3034 						 jiffies);
3035 			ipv6_del_addr(ifp);
3036 			addrconf_verify_rtnl(net);
3037 			if (ipv6_addr_is_multicast(pfx)) {
3038 				ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3039 					       false, pfx, dev->ifindex);
3040 			}
3041 			return 0;
3042 		}
3043 	}
3044 	read_unlock_bh(&idev->lock);
3045 	return -EADDRNOTAVAIL;
3046 }
3047 
3048 
3049 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3050 {
3051 	struct ifa6_config cfg = {
3052 		.ifa_flags = IFA_F_PERMANENT,
3053 		.preferred_lft = INFINITY_LIFE_TIME,
3054 		.valid_lft = INFINITY_LIFE_TIME,
3055 	};
3056 	struct in6_ifreq ireq;
3057 	int err;
3058 
3059 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3060 		return -EPERM;
3061 
3062 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3063 		return -EFAULT;
3064 
3065 	cfg.pfx = &ireq.ifr6_addr;
3066 	cfg.plen = ireq.ifr6_prefixlen;
3067 
3068 	rtnl_lock();
3069 	err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3070 	rtnl_unlock();
3071 	return err;
3072 }
3073 
3074 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3075 {
3076 	struct in6_ifreq ireq;
3077 	int err;
3078 
3079 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3080 		return -EPERM;
3081 
3082 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3083 		return -EFAULT;
3084 
3085 	rtnl_lock();
3086 	err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3087 			     ireq.ifr6_prefixlen);
3088 	rtnl_unlock();
3089 	return err;
3090 }
3091 
3092 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3093 		     int plen, int scope, u8 proto)
3094 {
3095 	struct inet6_ifaddr *ifp;
3096 	struct ifa6_config cfg = {
3097 		.pfx = addr,
3098 		.plen = plen,
3099 		.ifa_flags = IFA_F_PERMANENT,
3100 		.valid_lft = INFINITY_LIFE_TIME,
3101 		.preferred_lft = INFINITY_LIFE_TIME,
3102 		.scope = scope,
3103 		.ifa_proto = proto
3104 	};
3105 
3106 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3107 	if (!IS_ERR(ifp)) {
3108 		spin_lock_bh(&ifp->lock);
3109 		ifp->flags &= ~IFA_F_TENTATIVE;
3110 		spin_unlock_bh(&ifp->lock);
3111 		rt_genid_bump_ipv6(dev_net(idev->dev));
3112 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
3113 		in6_ifa_put(ifp);
3114 	}
3115 }
3116 
3117 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3118 static void add_v4_addrs(struct inet6_dev *idev)
3119 {
3120 	struct in6_addr addr;
3121 	struct net_device *dev;
3122 	struct net *net = dev_net(idev->dev);
3123 	int scope, plen, offset = 0;
3124 	u32 pflags = 0;
3125 
3126 	ASSERT_RTNL();
3127 
3128 	memset(&addr, 0, sizeof(struct in6_addr));
3129 	/* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
3130 	if (idev->dev->addr_len == sizeof(struct in6_addr))
3131 		offset = sizeof(struct in6_addr) - 4;
3132 	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
3133 
3134 	if (idev->dev->flags&IFF_POINTOPOINT) {
3135 		if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3136 			return;
3137 
3138 		addr.s6_addr32[0] = htonl(0xfe800000);
3139 		scope = IFA_LINK;
3140 		plen = 64;
3141 	} else {
3142 		scope = IPV6_ADDR_COMPATv4;
3143 		plen = 96;
3144 		pflags |= RTF_NONEXTHOP;
3145 	}
3146 
3147 	if (addr.s6_addr32[3]) {
3148 		add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC);
3149 		addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3150 				      GFP_KERNEL);
3151 		return;
3152 	}
3153 
3154 	for_each_netdev(net, dev) {
3155 		struct in_device *in_dev = __in_dev_get_rtnl(dev);
3156 		if (in_dev && (dev->flags & IFF_UP)) {
3157 			struct in_ifaddr *ifa;
3158 			int flag = scope;
3159 
3160 			in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3161 				addr.s6_addr32[3] = ifa->ifa_local;
3162 
3163 				if (ifa->ifa_scope == RT_SCOPE_LINK)
3164 					continue;
3165 				if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3166 					if (idev->dev->flags&IFF_POINTOPOINT)
3167 						continue;
3168 					flag |= IFA_HOST;
3169 				}
3170 
3171 				add_addr(idev, &addr, plen, flag,
3172 					 IFAPROT_UNSPEC);
3173 				addrconf_prefix_route(&addr, plen, 0, idev->dev,
3174 						      0, pflags, GFP_KERNEL);
3175 			}
3176 		}
3177 	}
3178 }
3179 #endif
3180 
3181 static void init_loopback(struct net_device *dev)
3182 {
3183 	struct inet6_dev  *idev;
3184 
3185 	/* ::1 */
3186 
3187 	ASSERT_RTNL();
3188 
3189 	idev = ipv6_find_idev(dev);
3190 	if (IS_ERR(idev)) {
3191 		pr_debug("%s: add_dev failed\n", __func__);
3192 		return;
3193 	}
3194 
3195 	add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO);
3196 }
3197 
3198 void addrconf_add_linklocal(struct inet6_dev *idev,
3199 			    const struct in6_addr *addr, u32 flags)
3200 {
3201 	struct ifa6_config cfg = {
3202 		.pfx = addr,
3203 		.plen = 64,
3204 		.ifa_flags = flags | IFA_F_PERMANENT,
3205 		.valid_lft = INFINITY_LIFE_TIME,
3206 		.preferred_lft = INFINITY_LIFE_TIME,
3207 		.scope = IFA_LINK,
3208 		.ifa_proto = IFAPROT_KERNEL_LL
3209 	};
3210 	struct inet6_ifaddr *ifp;
3211 
3212 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3213 	if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3214 	     idev->cnf.optimistic_dad) &&
3215 	    !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3216 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3217 #endif
3218 
3219 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3220 	if (!IS_ERR(ifp)) {
3221 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3222 				      0, 0, GFP_ATOMIC);
3223 		addrconf_dad_start(ifp);
3224 		in6_ifa_put(ifp);
3225 	}
3226 }
3227 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3228 
3229 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3230 {
3231 	if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3232 		return true;
3233 
3234 	if (address.s6_addr32[2] == htonl(0x02005eff) &&
3235 	    ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3236 		return true;
3237 
3238 	if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3239 	    ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3240 		return true;
3241 
3242 	return false;
3243 }
3244 
3245 static int ipv6_generate_stable_address(struct in6_addr *address,
3246 					u8 dad_count,
3247 					const struct inet6_dev *idev)
3248 {
3249 	static DEFINE_SPINLOCK(lock);
3250 	static __u32 digest[SHA1_DIGEST_WORDS];
3251 	static __u32 workspace[SHA1_WORKSPACE_WORDS];
3252 
3253 	static union {
3254 		char __data[SHA1_BLOCK_SIZE];
3255 		struct {
3256 			struct in6_addr secret;
3257 			__be32 prefix[2];
3258 			unsigned char hwaddr[MAX_ADDR_LEN];
3259 			u8 dad_count;
3260 		} __packed;
3261 	} data;
3262 
3263 	struct in6_addr secret;
3264 	struct in6_addr temp;
3265 	struct net *net = dev_net(idev->dev);
3266 
3267 	BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3268 
3269 	if (idev->cnf.stable_secret.initialized)
3270 		secret = idev->cnf.stable_secret.secret;
3271 	else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3272 		secret = net->ipv6.devconf_dflt->stable_secret.secret;
3273 	else
3274 		return -1;
3275 
3276 retry:
3277 	spin_lock_bh(&lock);
3278 
3279 	sha1_init(digest);
3280 	memset(&data, 0, sizeof(data));
3281 	memset(workspace, 0, sizeof(workspace));
3282 	memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3283 	data.prefix[0] = address->s6_addr32[0];
3284 	data.prefix[1] = address->s6_addr32[1];
3285 	data.secret = secret;
3286 	data.dad_count = dad_count;
3287 
3288 	sha1_transform(digest, data.__data, workspace);
3289 
3290 	temp = *address;
3291 	temp.s6_addr32[2] = (__force __be32)digest[0];
3292 	temp.s6_addr32[3] = (__force __be32)digest[1];
3293 
3294 	spin_unlock_bh(&lock);
3295 
3296 	if (ipv6_reserved_interfaceid(temp)) {
3297 		dad_count++;
3298 		if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3299 			return -1;
3300 		goto retry;
3301 	}
3302 
3303 	*address = temp;
3304 	return 0;
3305 }
3306 
3307 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3308 {
3309 	struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3310 
3311 	if (s->initialized)
3312 		return;
3313 	s = &idev->cnf.stable_secret;
3314 	get_random_bytes(&s->secret, sizeof(s->secret));
3315 	s->initialized = true;
3316 }
3317 
3318 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3319 {
3320 	struct in6_addr addr;
3321 
3322 	/* no link local addresses on L3 master devices */
3323 	if (netif_is_l3_master(idev->dev))
3324 		return;
3325 
3326 	/* no link local addresses on devices flagged as slaves */
3327 	if (idev->dev->flags & IFF_SLAVE)
3328 		return;
3329 
3330 	ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3331 
3332 	switch (idev->cnf.addr_gen_mode) {
3333 	case IN6_ADDR_GEN_MODE_RANDOM:
3334 		ipv6_gen_mode_random_init(idev);
3335 		fallthrough;
3336 	case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3337 		if (!ipv6_generate_stable_address(&addr, 0, idev))
3338 			addrconf_add_linklocal(idev, &addr,
3339 					       IFA_F_STABLE_PRIVACY);
3340 		else if (prefix_route)
3341 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3342 					      0, 0, GFP_KERNEL);
3343 		break;
3344 	case IN6_ADDR_GEN_MODE_EUI64:
3345 		/* addrconf_add_linklocal also adds a prefix_route and we
3346 		 * only need to care about prefix routes if ipv6_generate_eui64
3347 		 * couldn't generate one.
3348 		 */
3349 		if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3350 			addrconf_add_linklocal(idev, &addr, 0);
3351 		else if (prefix_route)
3352 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3353 					      0, 0, GFP_KERNEL);
3354 		break;
3355 	case IN6_ADDR_GEN_MODE_NONE:
3356 	default:
3357 		/* will not add any link local address */
3358 		break;
3359 	}
3360 }
3361 
3362 static void addrconf_dev_config(struct net_device *dev)
3363 {
3364 	struct inet6_dev *idev;
3365 
3366 	ASSERT_RTNL();
3367 
3368 	if ((dev->type != ARPHRD_ETHER) &&
3369 	    (dev->type != ARPHRD_FDDI) &&
3370 	    (dev->type != ARPHRD_ARCNET) &&
3371 	    (dev->type != ARPHRD_INFINIBAND) &&
3372 	    (dev->type != ARPHRD_IEEE1394) &&
3373 	    (dev->type != ARPHRD_TUNNEL6) &&
3374 	    (dev->type != ARPHRD_6LOWPAN) &&
3375 	    (dev->type != ARPHRD_TUNNEL) &&
3376 	    (dev->type != ARPHRD_NONE) &&
3377 	    (dev->type != ARPHRD_RAWIP)) {
3378 		/* Alas, we support only Ethernet autoconfiguration. */
3379 		idev = __in6_dev_get(dev);
3380 		if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3381 		    dev->flags & IFF_MULTICAST)
3382 			ipv6_mc_up(idev);
3383 		return;
3384 	}
3385 
3386 	idev = addrconf_add_dev(dev);
3387 	if (IS_ERR(idev))
3388 		return;
3389 
3390 	/* this device type has no EUI support */
3391 	if (dev->type == ARPHRD_NONE &&
3392 	    idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3393 		idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3394 
3395 	addrconf_addr_gen(idev, false);
3396 }
3397 
3398 #if IS_ENABLED(CONFIG_IPV6_SIT)
3399 static void addrconf_sit_config(struct net_device *dev)
3400 {
3401 	struct inet6_dev *idev;
3402 
3403 	ASSERT_RTNL();
3404 
3405 	/*
3406 	 * Configure the tunnel with one of our IPv4
3407 	 * addresses... we should configure all of
3408 	 * our v4 addrs in the tunnel
3409 	 */
3410 
3411 	idev = ipv6_find_idev(dev);
3412 	if (IS_ERR(idev)) {
3413 		pr_debug("%s: add_dev failed\n", __func__);
3414 		return;
3415 	}
3416 
3417 	if (dev->priv_flags & IFF_ISATAP) {
3418 		addrconf_addr_gen(idev, false);
3419 		return;
3420 	}
3421 
3422 	add_v4_addrs(idev);
3423 
3424 	if (dev->flags&IFF_POINTOPOINT)
3425 		addrconf_add_mroute(dev);
3426 }
3427 #endif
3428 
3429 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3430 static void addrconf_gre_config(struct net_device *dev)
3431 {
3432 	struct inet6_dev *idev;
3433 
3434 	ASSERT_RTNL();
3435 
3436 	idev = ipv6_find_idev(dev);
3437 	if (IS_ERR(idev)) {
3438 		pr_debug("%s: add_dev failed\n", __func__);
3439 		return;
3440 	}
3441 
3442 	if (dev->type == ARPHRD_ETHER) {
3443 		addrconf_addr_gen(idev, true);
3444 		return;
3445 	}
3446 
3447 	add_v4_addrs(idev);
3448 
3449 	if (dev->flags & IFF_POINTOPOINT)
3450 		addrconf_add_mroute(dev);
3451 }
3452 #endif
3453 
3454 static int fixup_permanent_addr(struct net *net,
3455 				struct inet6_dev *idev,
3456 				struct inet6_ifaddr *ifp)
3457 {
3458 	/* !fib6_node means the host route was removed from the
3459 	 * FIB, for example, if 'lo' device is taken down. In that
3460 	 * case regenerate the host route.
3461 	 */
3462 	if (!ifp->rt || !ifp->rt->fib6_node) {
3463 		struct fib6_info *f6i, *prev;
3464 
3465 		f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3466 					 GFP_ATOMIC);
3467 		if (IS_ERR(f6i))
3468 			return PTR_ERR(f6i);
3469 
3470 		/* ifp->rt can be accessed outside of rtnl */
3471 		spin_lock(&ifp->lock);
3472 		prev = ifp->rt;
3473 		ifp->rt = f6i;
3474 		spin_unlock(&ifp->lock);
3475 
3476 		fib6_info_release(prev);
3477 	}
3478 
3479 	if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3480 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3481 				      ifp->rt_priority, idev->dev, 0, 0,
3482 				      GFP_ATOMIC);
3483 	}
3484 
3485 	if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3486 		addrconf_dad_start(ifp);
3487 
3488 	return 0;
3489 }
3490 
3491 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3492 {
3493 	struct inet6_ifaddr *ifp, *tmp;
3494 	struct inet6_dev *idev;
3495 
3496 	idev = __in6_dev_get(dev);
3497 	if (!idev)
3498 		return;
3499 
3500 	write_lock_bh(&idev->lock);
3501 
3502 	list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3503 		if ((ifp->flags & IFA_F_PERMANENT) &&
3504 		    fixup_permanent_addr(net, idev, ifp) < 0) {
3505 			write_unlock_bh(&idev->lock);
3506 			in6_ifa_hold(ifp);
3507 			ipv6_del_addr(ifp);
3508 			write_lock_bh(&idev->lock);
3509 
3510 			net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3511 					     idev->dev->name, &ifp->addr);
3512 		}
3513 	}
3514 
3515 	write_unlock_bh(&idev->lock);
3516 }
3517 
3518 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3519 			   void *ptr)
3520 {
3521 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3522 	struct netdev_notifier_change_info *change_info;
3523 	struct netdev_notifier_changeupper_info *info;
3524 	struct inet6_dev *idev = __in6_dev_get(dev);
3525 	struct net *net = dev_net(dev);
3526 	int run_pending = 0;
3527 	int err;
3528 
3529 	switch (event) {
3530 	case NETDEV_REGISTER:
3531 		if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3532 			idev = ipv6_add_dev(dev);
3533 			if (IS_ERR(idev))
3534 				return notifier_from_errno(PTR_ERR(idev));
3535 		}
3536 		break;
3537 
3538 	case NETDEV_CHANGEMTU:
3539 		/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3540 		if (dev->mtu < IPV6_MIN_MTU) {
3541 			addrconf_ifdown(dev, dev != net->loopback_dev);
3542 			break;
3543 		}
3544 
3545 		if (idev) {
3546 			rt6_mtu_change(dev, dev->mtu);
3547 			idev->cnf.mtu6 = dev->mtu;
3548 			break;
3549 		}
3550 
3551 		/* allocate new idev */
3552 		idev = ipv6_add_dev(dev);
3553 		if (IS_ERR(idev))
3554 			break;
3555 
3556 		/* device is still not ready */
3557 		if (!(idev->if_flags & IF_READY))
3558 			break;
3559 
3560 		run_pending = 1;
3561 		fallthrough;
3562 	case NETDEV_UP:
3563 	case NETDEV_CHANGE:
3564 		if (dev->flags & IFF_SLAVE)
3565 			break;
3566 
3567 		if (idev && idev->cnf.disable_ipv6)
3568 			break;
3569 
3570 		if (event == NETDEV_UP) {
3571 			/* restore routes for permanent addresses */
3572 			addrconf_permanent_addr(net, dev);
3573 
3574 			if (!addrconf_link_ready(dev)) {
3575 				/* device is not ready yet. */
3576 				pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3577 					 dev->name);
3578 				break;
3579 			}
3580 
3581 			if (!idev && dev->mtu >= IPV6_MIN_MTU)
3582 				idev = ipv6_add_dev(dev);
3583 
3584 			if (!IS_ERR_OR_NULL(idev)) {
3585 				idev->if_flags |= IF_READY;
3586 				run_pending = 1;
3587 			}
3588 		} else if (event == NETDEV_CHANGE) {
3589 			if (!addrconf_link_ready(dev)) {
3590 				/* device is still not ready. */
3591 				rt6_sync_down_dev(dev, event);
3592 				break;
3593 			}
3594 
3595 			if (!IS_ERR_OR_NULL(idev)) {
3596 				if (idev->if_flags & IF_READY) {
3597 					/* device is already configured -
3598 					 * but resend MLD reports, we might
3599 					 * have roamed and need to update
3600 					 * multicast snooping switches
3601 					 */
3602 					ipv6_mc_up(idev);
3603 					change_info = ptr;
3604 					if (change_info->flags_changed & IFF_NOARP)
3605 						addrconf_dad_run(idev, true);
3606 					rt6_sync_up(dev, RTNH_F_LINKDOWN);
3607 					break;
3608 				}
3609 				idev->if_flags |= IF_READY;
3610 			}
3611 
3612 			pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3613 				dev->name);
3614 
3615 			run_pending = 1;
3616 		}
3617 
3618 		switch (dev->type) {
3619 #if IS_ENABLED(CONFIG_IPV6_SIT)
3620 		case ARPHRD_SIT:
3621 			addrconf_sit_config(dev);
3622 			break;
3623 #endif
3624 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3625 		case ARPHRD_IP6GRE:
3626 		case ARPHRD_IPGRE:
3627 			addrconf_gre_config(dev);
3628 			break;
3629 #endif
3630 		case ARPHRD_LOOPBACK:
3631 			init_loopback(dev);
3632 			break;
3633 
3634 		default:
3635 			addrconf_dev_config(dev);
3636 			break;
3637 		}
3638 
3639 		if (!IS_ERR_OR_NULL(idev)) {
3640 			if (run_pending)
3641 				addrconf_dad_run(idev, false);
3642 
3643 			/* Device has an address by now */
3644 			rt6_sync_up(dev, RTNH_F_DEAD);
3645 
3646 			/*
3647 			 * If the MTU changed during the interface down,
3648 			 * when the interface up, the changed MTU must be
3649 			 * reflected in the idev as well as routers.
3650 			 */
3651 			if (idev->cnf.mtu6 != dev->mtu &&
3652 			    dev->mtu >= IPV6_MIN_MTU) {
3653 				rt6_mtu_change(dev, dev->mtu);
3654 				idev->cnf.mtu6 = dev->mtu;
3655 			}
3656 			idev->tstamp = jiffies;
3657 			inet6_ifinfo_notify(RTM_NEWLINK, idev);
3658 
3659 			/*
3660 			 * If the changed mtu during down is lower than
3661 			 * IPV6_MIN_MTU stop IPv6 on this interface.
3662 			 */
3663 			if (dev->mtu < IPV6_MIN_MTU)
3664 				addrconf_ifdown(dev, dev != net->loopback_dev);
3665 		}
3666 		break;
3667 
3668 	case NETDEV_DOWN:
3669 	case NETDEV_UNREGISTER:
3670 		/*
3671 		 *	Remove all addresses from this interface.
3672 		 */
3673 		addrconf_ifdown(dev, event != NETDEV_DOWN);
3674 		break;
3675 
3676 	case NETDEV_CHANGENAME:
3677 		if (idev) {
3678 			snmp6_unregister_dev(idev);
3679 			addrconf_sysctl_unregister(idev);
3680 			err = addrconf_sysctl_register(idev);
3681 			if (err)
3682 				return notifier_from_errno(err);
3683 			err = snmp6_register_dev(idev);
3684 			if (err) {
3685 				addrconf_sysctl_unregister(idev);
3686 				return notifier_from_errno(err);
3687 			}
3688 		}
3689 		break;
3690 
3691 	case NETDEV_PRE_TYPE_CHANGE:
3692 	case NETDEV_POST_TYPE_CHANGE:
3693 		if (idev)
3694 			addrconf_type_change(dev, event);
3695 		break;
3696 
3697 	case NETDEV_CHANGEUPPER:
3698 		info = ptr;
3699 
3700 		/* flush all routes if dev is linked to or unlinked from
3701 		 * an L3 master device (e.g., VRF)
3702 		 */
3703 		if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3704 			addrconf_ifdown(dev, false);
3705 	}
3706 
3707 	return NOTIFY_OK;
3708 }
3709 
3710 /*
3711  *	addrconf module should be notified of a device going up
3712  */
3713 static struct notifier_block ipv6_dev_notf = {
3714 	.notifier_call = addrconf_notify,
3715 	.priority = ADDRCONF_NOTIFY_PRIORITY,
3716 };
3717 
3718 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3719 {
3720 	struct inet6_dev *idev;
3721 	ASSERT_RTNL();
3722 
3723 	idev = __in6_dev_get(dev);
3724 
3725 	if (event == NETDEV_POST_TYPE_CHANGE)
3726 		ipv6_mc_remap(idev);
3727 	else if (event == NETDEV_PRE_TYPE_CHANGE)
3728 		ipv6_mc_unmap(idev);
3729 }
3730 
3731 static bool addr_is_local(const struct in6_addr *addr)
3732 {
3733 	return ipv6_addr_type(addr) &
3734 		(IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3735 }
3736 
3737 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3738 {
3739 	unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3740 	struct net *net = dev_net(dev);
3741 	struct inet6_dev *idev;
3742 	struct inet6_ifaddr *ifa;
3743 	LIST_HEAD(tmp_addr_list);
3744 	bool keep_addr = false;
3745 	bool was_ready;
3746 	int state, i;
3747 
3748 	ASSERT_RTNL();
3749 
3750 	rt6_disable_ip(dev, event);
3751 
3752 	idev = __in6_dev_get(dev);
3753 	if (!idev)
3754 		return -ENODEV;
3755 
3756 	/*
3757 	 * Step 1: remove reference to ipv6 device from parent device.
3758 	 *	   Do not dev_put!
3759 	 */
3760 	if (unregister) {
3761 		idev->dead = 1;
3762 
3763 		/* protected by rtnl_lock */
3764 		RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3765 
3766 		/* Step 1.5: remove snmp6 entry */
3767 		snmp6_unregister_dev(idev);
3768 
3769 	}
3770 
3771 	/* combine the user config with event to determine if permanent
3772 	 * addresses are to be removed from address hash table
3773 	 */
3774 	if (!unregister && !idev->cnf.disable_ipv6) {
3775 		/* aggregate the system setting and interface setting */
3776 		int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3777 
3778 		if (!_keep_addr)
3779 			_keep_addr = idev->cnf.keep_addr_on_down;
3780 
3781 		keep_addr = (_keep_addr > 0);
3782 	}
3783 
3784 	/* Step 2: clear hash table */
3785 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3786 		struct hlist_head *h = &net->ipv6.inet6_addr_lst[i];
3787 
3788 		spin_lock_bh(&net->ipv6.addrconf_hash_lock);
3789 restart:
3790 		hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3791 			if (ifa->idev == idev) {
3792 				addrconf_del_dad_work(ifa);
3793 				/* combined flag + permanent flag decide if
3794 				 * address is retained on a down event
3795 				 */
3796 				if (!keep_addr ||
3797 				    !(ifa->flags & IFA_F_PERMANENT) ||
3798 				    addr_is_local(&ifa->addr)) {
3799 					hlist_del_init_rcu(&ifa->addr_lst);
3800 					goto restart;
3801 				}
3802 			}
3803 		}
3804 		spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
3805 	}
3806 
3807 	write_lock_bh(&idev->lock);
3808 
3809 	addrconf_del_rs_timer(idev);
3810 
3811 	/* Step 2: clear flags for stateless addrconf, repeated down
3812 	 *         detection
3813 	 */
3814 	was_ready = idev->if_flags & IF_READY;
3815 	if (!unregister)
3816 		idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3817 
3818 	/* Step 3: clear tempaddr list */
3819 	while (!list_empty(&idev->tempaddr_list)) {
3820 		ifa = list_first_entry(&idev->tempaddr_list,
3821 				       struct inet6_ifaddr, tmp_list);
3822 		list_del(&ifa->tmp_list);
3823 		write_unlock_bh(&idev->lock);
3824 		spin_lock_bh(&ifa->lock);
3825 
3826 		if (ifa->ifpub) {
3827 			in6_ifa_put(ifa->ifpub);
3828 			ifa->ifpub = NULL;
3829 		}
3830 		spin_unlock_bh(&ifa->lock);
3831 		in6_ifa_put(ifa);
3832 		write_lock_bh(&idev->lock);
3833 	}
3834 
3835 	list_for_each_entry(ifa, &idev->addr_list, if_list)
3836 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3837 	write_unlock_bh(&idev->lock);
3838 
3839 	while (!list_empty(&tmp_addr_list)) {
3840 		struct fib6_info *rt = NULL;
3841 		bool keep;
3842 
3843 		ifa = list_first_entry(&tmp_addr_list,
3844 				       struct inet6_ifaddr, if_list_aux);
3845 		list_del(&ifa->if_list_aux);
3846 
3847 		addrconf_del_dad_work(ifa);
3848 
3849 		keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3850 			!addr_is_local(&ifa->addr);
3851 
3852 		spin_lock_bh(&ifa->lock);
3853 
3854 		if (keep) {
3855 			/* set state to skip the notifier below */
3856 			state = INET6_IFADDR_STATE_DEAD;
3857 			ifa->state = INET6_IFADDR_STATE_PREDAD;
3858 			if (!(ifa->flags & IFA_F_NODAD))
3859 				ifa->flags |= IFA_F_TENTATIVE;
3860 
3861 			rt = ifa->rt;
3862 			ifa->rt = NULL;
3863 		} else {
3864 			state = ifa->state;
3865 			ifa->state = INET6_IFADDR_STATE_DEAD;
3866 		}
3867 
3868 		spin_unlock_bh(&ifa->lock);
3869 
3870 		if (rt)
3871 			ip6_del_rt(net, rt, false);
3872 
3873 		if (state != INET6_IFADDR_STATE_DEAD) {
3874 			__ipv6_ifa_notify(RTM_DELADDR, ifa);
3875 			inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3876 		} else {
3877 			if (idev->cnf.forwarding)
3878 				addrconf_leave_anycast(ifa);
3879 			addrconf_leave_solict(ifa->idev, &ifa->addr);
3880 		}
3881 
3882 		if (!keep) {
3883 			write_lock_bh(&idev->lock);
3884 			list_del_rcu(&ifa->if_list);
3885 			write_unlock_bh(&idev->lock);
3886 			in6_ifa_put(ifa);
3887 		}
3888 	}
3889 
3890 	/* Step 5: Discard anycast and multicast list */
3891 	if (unregister) {
3892 		ipv6_ac_destroy_dev(idev);
3893 		ipv6_mc_destroy_dev(idev);
3894 	} else if (was_ready) {
3895 		ipv6_mc_down(idev);
3896 	}
3897 
3898 	idev->tstamp = jiffies;
3899 	idev->ra_mtu = 0;
3900 
3901 	/* Last: Shot the device (if unregistered) */
3902 	if (unregister) {
3903 		addrconf_sysctl_unregister(idev);
3904 		neigh_parms_release(&nd_tbl, idev->nd_parms);
3905 		neigh_ifdown(&nd_tbl, dev);
3906 		in6_dev_put(idev);
3907 	}
3908 	return 0;
3909 }
3910 
3911 static void addrconf_rs_timer(struct timer_list *t)
3912 {
3913 	struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3914 	struct net_device *dev = idev->dev;
3915 	struct in6_addr lladdr;
3916 
3917 	write_lock(&idev->lock);
3918 	if (idev->dead || !(idev->if_flags & IF_READY))
3919 		goto out;
3920 
3921 	if (!ipv6_accept_ra(idev))
3922 		goto out;
3923 
3924 	/* Announcement received after solicitation was sent */
3925 	if (idev->if_flags & IF_RA_RCVD)
3926 		goto out;
3927 
3928 	if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3929 		write_unlock(&idev->lock);
3930 		if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3931 			ndisc_send_rs(dev, &lladdr,
3932 				      &in6addr_linklocal_allrouters);
3933 		else
3934 			goto put;
3935 
3936 		write_lock(&idev->lock);
3937 		idev->rs_interval = rfc3315_s14_backoff_update(
3938 			idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3939 		/* The wait after the last probe can be shorter */
3940 		addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3941 					     idev->cnf.rtr_solicits) ?
3942 				      idev->cnf.rtr_solicit_delay :
3943 				      idev->rs_interval);
3944 	} else {
3945 		/*
3946 		 * Note: we do not support deprecated "all on-link"
3947 		 * assumption any longer.
3948 		 */
3949 		pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3950 	}
3951 
3952 out:
3953 	write_unlock(&idev->lock);
3954 put:
3955 	in6_dev_put(idev);
3956 }
3957 
3958 /*
3959  *	Duplicate Address Detection
3960  */
3961 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3962 {
3963 	unsigned long rand_num;
3964 	struct inet6_dev *idev = ifp->idev;
3965 	u64 nonce;
3966 
3967 	if (ifp->flags & IFA_F_OPTIMISTIC)
3968 		rand_num = 0;
3969 	else
3970 		rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3971 
3972 	nonce = 0;
3973 	if (idev->cnf.enhanced_dad ||
3974 	    dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3975 		do
3976 			get_random_bytes(&nonce, 6);
3977 		while (nonce == 0);
3978 	}
3979 	ifp->dad_nonce = nonce;
3980 	ifp->dad_probes = idev->cnf.dad_transmits;
3981 	addrconf_mod_dad_work(ifp, rand_num);
3982 }
3983 
3984 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3985 {
3986 	struct inet6_dev *idev = ifp->idev;
3987 	struct net_device *dev = idev->dev;
3988 	bool bump_id, notify = false;
3989 	struct net *net;
3990 
3991 	addrconf_join_solict(dev, &ifp->addr);
3992 
3993 	prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3994 
3995 	read_lock_bh(&idev->lock);
3996 	spin_lock(&ifp->lock);
3997 	if (ifp->state == INET6_IFADDR_STATE_DEAD)
3998 		goto out;
3999 
4000 	net = dev_net(dev);
4001 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
4002 	    (net->ipv6.devconf_all->accept_dad < 1 &&
4003 	     idev->cnf.accept_dad < 1) ||
4004 	    !(ifp->flags&IFA_F_TENTATIVE) ||
4005 	    ifp->flags & IFA_F_NODAD) {
4006 		bool send_na = false;
4007 
4008 		if (ifp->flags & IFA_F_TENTATIVE &&
4009 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4010 			send_na = true;
4011 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4012 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4013 		spin_unlock(&ifp->lock);
4014 		read_unlock_bh(&idev->lock);
4015 
4016 		addrconf_dad_completed(ifp, bump_id, send_na);
4017 		return;
4018 	}
4019 
4020 	if (!(idev->if_flags & IF_READY)) {
4021 		spin_unlock(&ifp->lock);
4022 		read_unlock_bh(&idev->lock);
4023 		/*
4024 		 * If the device is not ready:
4025 		 * - keep it tentative if it is a permanent address.
4026 		 * - otherwise, kill it.
4027 		 */
4028 		in6_ifa_hold(ifp);
4029 		addrconf_dad_stop(ifp, 0);
4030 		return;
4031 	}
4032 
4033 	/*
4034 	 * Optimistic nodes can start receiving
4035 	 * Frames right away
4036 	 */
4037 	if (ifp->flags & IFA_F_OPTIMISTIC) {
4038 		ip6_ins_rt(net, ifp->rt);
4039 		if (ipv6_use_optimistic_addr(net, idev)) {
4040 			/* Because optimistic nodes can use this address,
4041 			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4042 			 */
4043 			notify = true;
4044 		}
4045 	}
4046 
4047 	addrconf_dad_kick(ifp);
4048 out:
4049 	spin_unlock(&ifp->lock);
4050 	read_unlock_bh(&idev->lock);
4051 	if (notify)
4052 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
4053 }
4054 
4055 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4056 {
4057 	bool begin_dad = false;
4058 
4059 	spin_lock_bh(&ifp->lock);
4060 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4061 		ifp->state = INET6_IFADDR_STATE_PREDAD;
4062 		begin_dad = true;
4063 	}
4064 	spin_unlock_bh(&ifp->lock);
4065 
4066 	if (begin_dad)
4067 		addrconf_mod_dad_work(ifp, 0);
4068 }
4069 
4070 static void addrconf_dad_work(struct work_struct *w)
4071 {
4072 	struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4073 						struct inet6_ifaddr,
4074 						dad_work);
4075 	struct inet6_dev *idev = ifp->idev;
4076 	bool bump_id, disable_ipv6 = false;
4077 	struct in6_addr mcaddr;
4078 
4079 	enum {
4080 		DAD_PROCESS,
4081 		DAD_BEGIN,
4082 		DAD_ABORT,
4083 	} action = DAD_PROCESS;
4084 
4085 	rtnl_lock();
4086 
4087 	spin_lock_bh(&ifp->lock);
4088 	if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4089 		action = DAD_BEGIN;
4090 		ifp->state = INET6_IFADDR_STATE_DAD;
4091 	} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4092 		action = DAD_ABORT;
4093 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
4094 
4095 		if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
4096 		     idev->cnf.accept_dad > 1) &&
4097 		    !idev->cnf.disable_ipv6 &&
4098 		    !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4099 			struct in6_addr addr;
4100 
4101 			addr.s6_addr32[0] = htonl(0xfe800000);
4102 			addr.s6_addr32[1] = 0;
4103 
4104 			if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4105 			    ipv6_addr_equal(&ifp->addr, &addr)) {
4106 				/* DAD failed for link-local based on MAC */
4107 				idev->cnf.disable_ipv6 = 1;
4108 
4109 				pr_info("%s: IPv6 being disabled!\n",
4110 					ifp->idev->dev->name);
4111 				disable_ipv6 = true;
4112 			}
4113 		}
4114 	}
4115 	spin_unlock_bh(&ifp->lock);
4116 
4117 	if (action == DAD_BEGIN) {
4118 		addrconf_dad_begin(ifp);
4119 		goto out;
4120 	} else if (action == DAD_ABORT) {
4121 		in6_ifa_hold(ifp);
4122 		addrconf_dad_stop(ifp, 1);
4123 		if (disable_ipv6)
4124 			addrconf_ifdown(idev->dev, false);
4125 		goto out;
4126 	}
4127 
4128 	if (!ifp->dad_probes && addrconf_dad_end(ifp))
4129 		goto out;
4130 
4131 	write_lock_bh(&idev->lock);
4132 	if (idev->dead || !(idev->if_flags & IF_READY)) {
4133 		write_unlock_bh(&idev->lock);
4134 		goto out;
4135 	}
4136 
4137 	spin_lock(&ifp->lock);
4138 	if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4139 		spin_unlock(&ifp->lock);
4140 		write_unlock_bh(&idev->lock);
4141 		goto out;
4142 	}
4143 
4144 	if (ifp->dad_probes == 0) {
4145 		bool send_na = false;
4146 
4147 		/*
4148 		 * DAD was successful
4149 		 */
4150 
4151 		if (ifp->flags & IFA_F_TENTATIVE &&
4152 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4153 			send_na = true;
4154 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4155 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4156 		spin_unlock(&ifp->lock);
4157 		write_unlock_bh(&idev->lock);
4158 
4159 		addrconf_dad_completed(ifp, bump_id, send_na);
4160 
4161 		goto out;
4162 	}
4163 
4164 	ifp->dad_probes--;
4165 	addrconf_mod_dad_work(ifp,
4166 			      max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4167 				  HZ/100));
4168 	spin_unlock(&ifp->lock);
4169 	write_unlock_bh(&idev->lock);
4170 
4171 	/* send a neighbour solicitation for our addr */
4172 	addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4173 	ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4174 		      ifp->dad_nonce);
4175 out:
4176 	in6_ifa_put(ifp);
4177 	rtnl_unlock();
4178 }
4179 
4180 /* ifp->idev must be at least read locked */
4181 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4182 {
4183 	struct inet6_ifaddr *ifpiter;
4184 	struct inet6_dev *idev = ifp->idev;
4185 
4186 	list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4187 		if (ifpiter->scope > IFA_LINK)
4188 			break;
4189 		if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4190 		    (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4191 				       IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4192 		    IFA_F_PERMANENT)
4193 			return false;
4194 	}
4195 	return true;
4196 }
4197 
4198 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4199 				   bool send_na)
4200 {
4201 	struct net_device *dev = ifp->idev->dev;
4202 	struct in6_addr lladdr;
4203 	bool send_rs, send_mld;
4204 
4205 	addrconf_del_dad_work(ifp);
4206 
4207 	/*
4208 	 *	Configure the address for reception. Now it is valid.
4209 	 */
4210 
4211 	ipv6_ifa_notify(RTM_NEWADDR, ifp);
4212 
4213 	/* If added prefix is link local and we are prepared to process
4214 	   router advertisements, start sending router solicitations.
4215 	 */
4216 
4217 	read_lock_bh(&ifp->idev->lock);
4218 	send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4219 	send_rs = send_mld &&
4220 		  ipv6_accept_ra(ifp->idev) &&
4221 		  ifp->idev->cnf.rtr_solicits != 0 &&
4222 		  (dev->flags & IFF_LOOPBACK) == 0 &&
4223 		  (dev->type != ARPHRD_TUNNEL);
4224 	read_unlock_bh(&ifp->idev->lock);
4225 
4226 	/* While dad is in progress mld report's source address is in6_addrany.
4227 	 * Resend with proper ll now.
4228 	 */
4229 	if (send_mld)
4230 		ipv6_mc_dad_complete(ifp->idev);
4231 
4232 	/* send unsolicited NA if enabled */
4233 	if (send_na &&
4234 	    (ifp->idev->cnf.ndisc_notify ||
4235 	     dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4236 		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4237 			      /*router=*/ !!ifp->idev->cnf.forwarding,
4238 			      /*solicited=*/ false, /*override=*/ true,
4239 			      /*inc_opt=*/ true);
4240 	}
4241 
4242 	if (send_rs) {
4243 		/*
4244 		 *	If a host as already performed a random delay
4245 		 *	[...] as part of DAD [...] there is no need
4246 		 *	to delay again before sending the first RS
4247 		 */
4248 		if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4249 			return;
4250 		ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4251 
4252 		write_lock_bh(&ifp->idev->lock);
4253 		spin_lock(&ifp->lock);
4254 		ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4255 			ifp->idev->cnf.rtr_solicit_interval);
4256 		ifp->idev->rs_probes = 1;
4257 		ifp->idev->if_flags |= IF_RS_SENT;
4258 		addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4259 		spin_unlock(&ifp->lock);
4260 		write_unlock_bh(&ifp->idev->lock);
4261 	}
4262 
4263 	if (bump_id)
4264 		rt_genid_bump_ipv6(dev_net(dev));
4265 
4266 	/* Make sure that a new temporary address will be created
4267 	 * before this temporary address becomes deprecated.
4268 	 */
4269 	if (ifp->flags & IFA_F_TEMPORARY)
4270 		addrconf_verify_rtnl(dev_net(dev));
4271 }
4272 
4273 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4274 {
4275 	struct inet6_ifaddr *ifp;
4276 
4277 	read_lock_bh(&idev->lock);
4278 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
4279 		spin_lock(&ifp->lock);
4280 		if ((ifp->flags & IFA_F_TENTATIVE &&
4281 		     ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4282 			if (restart)
4283 				ifp->state = INET6_IFADDR_STATE_PREDAD;
4284 			addrconf_dad_kick(ifp);
4285 		}
4286 		spin_unlock(&ifp->lock);
4287 	}
4288 	read_unlock_bh(&idev->lock);
4289 }
4290 
4291 #ifdef CONFIG_PROC_FS
4292 struct if6_iter_state {
4293 	struct seq_net_private p;
4294 	int bucket;
4295 	int offset;
4296 };
4297 
4298 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4299 {
4300 	struct if6_iter_state *state = seq->private;
4301 	struct net *net = seq_file_net(seq);
4302 	struct inet6_ifaddr *ifa = NULL;
4303 	int p = 0;
4304 
4305 	/* initial bucket if pos is 0 */
4306 	if (pos == 0) {
4307 		state->bucket = 0;
4308 		state->offset = 0;
4309 	}
4310 
4311 	for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4312 		hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket],
4313 					 addr_lst) {
4314 			/* sync with offset */
4315 			if (p < state->offset) {
4316 				p++;
4317 				continue;
4318 			}
4319 			return ifa;
4320 		}
4321 
4322 		/* prepare for next bucket */
4323 		state->offset = 0;
4324 		p = 0;
4325 	}
4326 	return NULL;
4327 }
4328 
4329 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4330 					 struct inet6_ifaddr *ifa)
4331 {
4332 	struct if6_iter_state *state = seq->private;
4333 	struct net *net = seq_file_net(seq);
4334 
4335 	hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4336 		state->offset++;
4337 		return ifa;
4338 	}
4339 
4340 	state->offset = 0;
4341 	while (++state->bucket < IN6_ADDR_HSIZE) {
4342 		hlist_for_each_entry_rcu(ifa,
4343 				     &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) {
4344 			return ifa;
4345 		}
4346 	}
4347 
4348 	return NULL;
4349 }
4350 
4351 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4352 	__acquires(rcu)
4353 {
4354 	rcu_read_lock();
4355 	return if6_get_first(seq, *pos);
4356 }
4357 
4358 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4359 {
4360 	struct inet6_ifaddr *ifa;
4361 
4362 	ifa = if6_get_next(seq, v);
4363 	++*pos;
4364 	return ifa;
4365 }
4366 
4367 static void if6_seq_stop(struct seq_file *seq, void *v)
4368 	__releases(rcu)
4369 {
4370 	rcu_read_unlock();
4371 }
4372 
4373 static int if6_seq_show(struct seq_file *seq, void *v)
4374 {
4375 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4376 	seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4377 		   &ifp->addr,
4378 		   ifp->idev->dev->ifindex,
4379 		   ifp->prefix_len,
4380 		   ifp->scope,
4381 		   (u8) ifp->flags,
4382 		   ifp->idev->dev->name);
4383 	return 0;
4384 }
4385 
4386 static const struct seq_operations if6_seq_ops = {
4387 	.start	= if6_seq_start,
4388 	.next	= if6_seq_next,
4389 	.show	= if6_seq_show,
4390 	.stop	= if6_seq_stop,
4391 };
4392 
4393 static int __net_init if6_proc_net_init(struct net *net)
4394 {
4395 	if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4396 			sizeof(struct if6_iter_state)))
4397 		return -ENOMEM;
4398 	return 0;
4399 }
4400 
4401 static void __net_exit if6_proc_net_exit(struct net *net)
4402 {
4403 	remove_proc_entry("if_inet6", net->proc_net);
4404 }
4405 
4406 static struct pernet_operations if6_proc_net_ops = {
4407 	.init = if6_proc_net_init,
4408 	.exit = if6_proc_net_exit,
4409 };
4410 
4411 int __init if6_proc_init(void)
4412 {
4413 	return register_pernet_subsys(&if6_proc_net_ops);
4414 }
4415 
4416 void if6_proc_exit(void)
4417 {
4418 	unregister_pernet_subsys(&if6_proc_net_ops);
4419 }
4420 #endif	/* CONFIG_PROC_FS */
4421 
4422 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4423 /* Check if address is a home address configured on any interface. */
4424 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4425 {
4426 	unsigned int hash = inet6_addr_hash(net, addr);
4427 	struct inet6_ifaddr *ifp = NULL;
4428 	int ret = 0;
4429 
4430 	rcu_read_lock();
4431 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4432 		if (ipv6_addr_equal(&ifp->addr, addr) &&
4433 		    (ifp->flags & IFA_F_HOMEADDRESS)) {
4434 			ret = 1;
4435 			break;
4436 		}
4437 	}
4438 	rcu_read_unlock();
4439 	return ret;
4440 }
4441 #endif
4442 
4443 /* RFC6554 has some algorithm to avoid loops in segment routing by
4444  * checking if the segments contains any of a local interface address.
4445  *
4446  * Quote:
4447  *
4448  * To detect loops in the SRH, a router MUST determine if the SRH
4449  * includes multiple addresses assigned to any interface on that router.
4450  * If such addresses appear more than once and are separated by at least
4451  * one address not assigned to that router.
4452  */
4453 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4454 			  unsigned char nsegs)
4455 {
4456 	const struct in6_addr *addr;
4457 	int i, ret = 0, found = 0;
4458 	struct inet6_ifaddr *ifp;
4459 	bool separated = false;
4460 	unsigned int hash;
4461 	bool hash_found;
4462 
4463 	rcu_read_lock();
4464 	for (i = 0; i < nsegs; i++) {
4465 		addr = &segs[i];
4466 		hash = inet6_addr_hash(net, addr);
4467 
4468 		hash_found = false;
4469 		hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4470 
4471 			if (ipv6_addr_equal(&ifp->addr, addr)) {
4472 				hash_found = true;
4473 				break;
4474 			}
4475 		}
4476 
4477 		if (hash_found) {
4478 			if (found > 1 && separated) {
4479 				ret = 1;
4480 				break;
4481 			}
4482 
4483 			separated = false;
4484 			found++;
4485 		} else {
4486 			separated = true;
4487 		}
4488 	}
4489 	rcu_read_unlock();
4490 
4491 	return ret;
4492 }
4493 
4494 /*
4495  *	Periodic address status verification
4496  */
4497 
4498 static void addrconf_verify_rtnl(struct net *net)
4499 {
4500 	unsigned long now, next, next_sec, next_sched;
4501 	struct inet6_ifaddr *ifp;
4502 	int i;
4503 
4504 	ASSERT_RTNL();
4505 
4506 	rcu_read_lock_bh();
4507 	now = jiffies;
4508 	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4509 
4510 	cancel_delayed_work(&net->ipv6.addr_chk_work);
4511 
4512 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4513 restart:
4514 		hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) {
4515 			unsigned long age;
4516 
4517 			/* When setting preferred_lft to a value not zero or
4518 			 * infinity, while valid_lft is infinity
4519 			 * IFA_F_PERMANENT has a non-infinity life time.
4520 			 */
4521 			if ((ifp->flags & IFA_F_PERMANENT) &&
4522 			    (ifp->prefered_lft == INFINITY_LIFE_TIME))
4523 				continue;
4524 
4525 			spin_lock(&ifp->lock);
4526 			/* We try to batch several events at once. */
4527 			age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4528 
4529 			if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4530 			    age >= ifp->valid_lft) {
4531 				spin_unlock(&ifp->lock);
4532 				in6_ifa_hold(ifp);
4533 				rcu_read_unlock_bh();
4534 				ipv6_del_addr(ifp);
4535 				rcu_read_lock_bh();
4536 				goto restart;
4537 			} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4538 				spin_unlock(&ifp->lock);
4539 				continue;
4540 			} else if (age >= ifp->prefered_lft) {
4541 				/* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4542 				int deprecate = 0;
4543 
4544 				if (!(ifp->flags&IFA_F_DEPRECATED)) {
4545 					deprecate = 1;
4546 					ifp->flags |= IFA_F_DEPRECATED;
4547 				}
4548 
4549 				if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4550 				    (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4551 					next = ifp->tstamp + ifp->valid_lft * HZ;
4552 
4553 				spin_unlock(&ifp->lock);
4554 
4555 				if (deprecate) {
4556 					in6_ifa_hold(ifp);
4557 
4558 					ipv6_ifa_notify(0, ifp);
4559 					in6_ifa_put(ifp);
4560 					goto restart;
4561 				}
4562 			} else if ((ifp->flags&IFA_F_TEMPORARY) &&
4563 				   !(ifp->flags&IFA_F_TENTATIVE)) {
4564 				unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4565 					ifp->idev->cnf.dad_transmits *
4566 					max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
4567 
4568 				if (age >= ifp->prefered_lft - regen_advance) {
4569 					struct inet6_ifaddr *ifpub = ifp->ifpub;
4570 					if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4571 						next = ifp->tstamp + ifp->prefered_lft * HZ;
4572 					if (!ifp->regen_count && ifpub) {
4573 						ifp->regen_count++;
4574 						in6_ifa_hold(ifp);
4575 						in6_ifa_hold(ifpub);
4576 						spin_unlock(&ifp->lock);
4577 
4578 						spin_lock(&ifpub->lock);
4579 						ifpub->regen_count = 0;
4580 						spin_unlock(&ifpub->lock);
4581 						rcu_read_unlock_bh();
4582 						ipv6_create_tempaddr(ifpub, true);
4583 						in6_ifa_put(ifpub);
4584 						in6_ifa_put(ifp);
4585 						rcu_read_lock_bh();
4586 						goto restart;
4587 					}
4588 				} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4589 					next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4590 				spin_unlock(&ifp->lock);
4591 			} else {
4592 				/* ifp->prefered_lft <= ifp->valid_lft */
4593 				if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4594 					next = ifp->tstamp + ifp->prefered_lft * HZ;
4595 				spin_unlock(&ifp->lock);
4596 			}
4597 		}
4598 	}
4599 
4600 	next_sec = round_jiffies_up(next);
4601 	next_sched = next;
4602 
4603 	/* If rounded timeout is accurate enough, accept it. */
4604 	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4605 		next_sched = next_sec;
4606 
4607 	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4608 	if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4609 		next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4610 
4611 	pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4612 		 now, next, next_sec, next_sched);
4613 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now);
4614 	rcu_read_unlock_bh();
4615 }
4616 
4617 static void addrconf_verify_work(struct work_struct *w)
4618 {
4619 	struct net *net = container_of(to_delayed_work(w), struct net,
4620 				       ipv6.addr_chk_work);
4621 
4622 	rtnl_lock();
4623 	addrconf_verify_rtnl(net);
4624 	rtnl_unlock();
4625 }
4626 
4627 static void addrconf_verify(struct net *net)
4628 {
4629 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0);
4630 }
4631 
4632 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4633 				     struct in6_addr **peer_pfx)
4634 {
4635 	struct in6_addr *pfx = NULL;
4636 
4637 	*peer_pfx = NULL;
4638 
4639 	if (addr)
4640 		pfx = nla_data(addr);
4641 
4642 	if (local) {
4643 		if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4644 			*peer_pfx = pfx;
4645 		pfx = nla_data(local);
4646 	}
4647 
4648 	return pfx;
4649 }
4650 
4651 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4652 	[IFA_ADDRESS]		= { .len = sizeof(struct in6_addr) },
4653 	[IFA_LOCAL]		= { .len = sizeof(struct in6_addr) },
4654 	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
4655 	[IFA_FLAGS]		= { .len = sizeof(u32) },
4656 	[IFA_RT_PRIORITY]	= { .len = sizeof(u32) },
4657 	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
4658 	[IFA_PROTO]		= { .type = NLA_U8 },
4659 };
4660 
4661 static int
4662 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4663 		  struct netlink_ext_ack *extack)
4664 {
4665 	struct net *net = sock_net(skb->sk);
4666 	struct ifaddrmsg *ifm;
4667 	struct nlattr *tb[IFA_MAX+1];
4668 	struct in6_addr *pfx, *peer_pfx;
4669 	u32 ifa_flags;
4670 	int err;
4671 
4672 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4673 				     ifa_ipv6_policy, extack);
4674 	if (err < 0)
4675 		return err;
4676 
4677 	ifm = nlmsg_data(nlh);
4678 	pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4679 	if (!pfx)
4680 		return -EINVAL;
4681 
4682 	ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4683 
4684 	/* We ignore other flags so far. */
4685 	ifa_flags &= IFA_F_MANAGETEMPADDR;
4686 
4687 	return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4688 			      ifm->ifa_prefixlen);
4689 }
4690 
4691 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4692 			       unsigned long expires, u32 flags,
4693 			       bool modify_peer)
4694 {
4695 	struct fib6_info *f6i;
4696 	u32 prio;
4697 
4698 	f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4699 					ifp->prefix_len,
4700 					ifp->idev->dev, 0, RTF_DEFAULT, true);
4701 	if (!f6i)
4702 		return -ENOENT;
4703 
4704 	prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4705 	if (f6i->fib6_metric != prio) {
4706 		/* delete old one */
4707 		ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4708 
4709 		/* add new one */
4710 		addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4711 				      ifp->prefix_len,
4712 				      ifp->rt_priority, ifp->idev->dev,
4713 				      expires, flags, GFP_KERNEL);
4714 	} else {
4715 		if (!expires)
4716 			fib6_clean_expires(f6i);
4717 		else
4718 			fib6_set_expires(f6i, expires);
4719 
4720 		fib6_info_release(f6i);
4721 	}
4722 
4723 	return 0;
4724 }
4725 
4726 static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
4727 			     struct ifa6_config *cfg)
4728 {
4729 	u32 flags;
4730 	clock_t expires;
4731 	unsigned long timeout;
4732 	bool was_managetempaddr;
4733 	bool had_prefixroute;
4734 	bool new_peer = false;
4735 
4736 	ASSERT_RTNL();
4737 
4738 	if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4739 		return -EINVAL;
4740 
4741 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4742 	    (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4743 		return -EINVAL;
4744 
4745 	if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4746 		cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4747 
4748 	timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4749 	if (addrconf_finite_timeout(timeout)) {
4750 		expires = jiffies_to_clock_t(timeout * HZ);
4751 		cfg->valid_lft = timeout;
4752 		flags = RTF_EXPIRES;
4753 	} else {
4754 		expires = 0;
4755 		flags = 0;
4756 		cfg->ifa_flags |= IFA_F_PERMANENT;
4757 	}
4758 
4759 	timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4760 	if (addrconf_finite_timeout(timeout)) {
4761 		if (timeout == 0)
4762 			cfg->ifa_flags |= IFA_F_DEPRECATED;
4763 		cfg->preferred_lft = timeout;
4764 	}
4765 
4766 	if (cfg->peer_pfx &&
4767 	    memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4768 		if (!ipv6_addr_any(&ifp->peer_addr))
4769 			cleanup_prefix_route(ifp, expires, true, true);
4770 		new_peer = true;
4771 	}
4772 
4773 	spin_lock_bh(&ifp->lock);
4774 	was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4775 	had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4776 			  !(ifp->flags & IFA_F_NOPREFIXROUTE);
4777 	ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4778 			IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4779 			IFA_F_NOPREFIXROUTE);
4780 	ifp->flags |= cfg->ifa_flags;
4781 	ifp->tstamp = jiffies;
4782 	ifp->valid_lft = cfg->valid_lft;
4783 	ifp->prefered_lft = cfg->preferred_lft;
4784 	ifp->ifa_proto = cfg->ifa_proto;
4785 
4786 	if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4787 		ifp->rt_priority = cfg->rt_priority;
4788 
4789 	if (new_peer)
4790 		ifp->peer_addr = *cfg->peer_pfx;
4791 
4792 	spin_unlock_bh(&ifp->lock);
4793 	if (!(ifp->flags&IFA_F_TENTATIVE))
4794 		ipv6_ifa_notify(0, ifp);
4795 
4796 	if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4797 		int rc = -ENOENT;
4798 
4799 		if (had_prefixroute)
4800 			rc = modify_prefix_route(ifp, expires, flags, false);
4801 
4802 		/* prefix route could have been deleted; if so restore it */
4803 		if (rc == -ENOENT) {
4804 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4805 					      ifp->rt_priority, ifp->idev->dev,
4806 					      expires, flags, GFP_KERNEL);
4807 		}
4808 
4809 		if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4810 			rc = modify_prefix_route(ifp, expires, flags, true);
4811 
4812 		if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4813 			addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4814 					      ifp->rt_priority, ifp->idev->dev,
4815 					      expires, flags, GFP_KERNEL);
4816 		}
4817 	} else if (had_prefixroute) {
4818 		enum cleanup_prefix_rt_t action;
4819 		unsigned long rt_expires;
4820 
4821 		write_lock_bh(&ifp->idev->lock);
4822 		action = check_cleanup_prefix_route(ifp, &rt_expires);
4823 		write_unlock_bh(&ifp->idev->lock);
4824 
4825 		if (action != CLEANUP_PREFIX_RT_NOP) {
4826 			cleanup_prefix_route(ifp, rt_expires,
4827 				action == CLEANUP_PREFIX_RT_DEL, false);
4828 		}
4829 	}
4830 
4831 	if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4832 		if (was_managetempaddr &&
4833 		    !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4834 			cfg->valid_lft = 0;
4835 			cfg->preferred_lft = 0;
4836 		}
4837 		manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4838 				 cfg->preferred_lft, !was_managetempaddr,
4839 				 jiffies);
4840 	}
4841 
4842 	addrconf_verify_rtnl(net);
4843 
4844 	return 0;
4845 }
4846 
4847 static int
4848 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4849 		  struct netlink_ext_ack *extack)
4850 {
4851 	struct net *net = sock_net(skb->sk);
4852 	struct ifaddrmsg *ifm;
4853 	struct nlattr *tb[IFA_MAX+1];
4854 	struct in6_addr *peer_pfx;
4855 	struct inet6_ifaddr *ifa;
4856 	struct net_device *dev;
4857 	struct inet6_dev *idev;
4858 	struct ifa6_config cfg;
4859 	int err;
4860 
4861 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4862 				     ifa_ipv6_policy, extack);
4863 	if (err < 0)
4864 		return err;
4865 
4866 	memset(&cfg, 0, sizeof(cfg));
4867 
4868 	ifm = nlmsg_data(nlh);
4869 	cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4870 	if (!cfg.pfx)
4871 		return -EINVAL;
4872 
4873 	cfg.peer_pfx = peer_pfx;
4874 	cfg.plen = ifm->ifa_prefixlen;
4875 	if (tb[IFA_RT_PRIORITY])
4876 		cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4877 
4878 	if (tb[IFA_PROTO])
4879 		cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
4880 
4881 	cfg.valid_lft = INFINITY_LIFE_TIME;
4882 	cfg.preferred_lft = INFINITY_LIFE_TIME;
4883 
4884 	if (tb[IFA_CACHEINFO]) {
4885 		struct ifa_cacheinfo *ci;
4886 
4887 		ci = nla_data(tb[IFA_CACHEINFO]);
4888 		cfg.valid_lft = ci->ifa_valid;
4889 		cfg.preferred_lft = ci->ifa_prefered;
4890 	}
4891 
4892 	dev =  __dev_get_by_index(net, ifm->ifa_index);
4893 	if (!dev)
4894 		return -ENODEV;
4895 
4896 	if (tb[IFA_FLAGS])
4897 		cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4898 	else
4899 		cfg.ifa_flags = ifm->ifa_flags;
4900 
4901 	/* We ignore other flags so far. */
4902 	cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4903 			 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4904 			 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4905 
4906 	idev = ipv6_find_idev(dev);
4907 	if (IS_ERR(idev))
4908 		return PTR_ERR(idev);
4909 
4910 	if (!ipv6_allow_optimistic_dad(net, idev))
4911 		cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4912 
4913 	if (cfg.ifa_flags & IFA_F_NODAD &&
4914 	    cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4915 		NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4916 		return -EINVAL;
4917 	}
4918 
4919 	ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4920 	if (!ifa) {
4921 		/*
4922 		 * It would be best to check for !NLM_F_CREATE here but
4923 		 * userspace already relies on not having to provide this.
4924 		 */
4925 		return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4926 	}
4927 
4928 	if (nlh->nlmsg_flags & NLM_F_EXCL ||
4929 	    !(nlh->nlmsg_flags & NLM_F_REPLACE))
4930 		err = -EEXIST;
4931 	else
4932 		err = inet6_addr_modify(net, ifa, &cfg);
4933 
4934 	in6_ifa_put(ifa);
4935 
4936 	return err;
4937 }
4938 
4939 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4940 			  u8 scope, int ifindex)
4941 {
4942 	struct ifaddrmsg *ifm;
4943 
4944 	ifm = nlmsg_data(nlh);
4945 	ifm->ifa_family = AF_INET6;
4946 	ifm->ifa_prefixlen = prefixlen;
4947 	ifm->ifa_flags = flags;
4948 	ifm->ifa_scope = scope;
4949 	ifm->ifa_index = ifindex;
4950 }
4951 
4952 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4953 			 unsigned long tstamp, u32 preferred, u32 valid)
4954 {
4955 	struct ifa_cacheinfo ci;
4956 
4957 	ci.cstamp = cstamp_delta(cstamp);
4958 	ci.tstamp = cstamp_delta(tstamp);
4959 	ci.ifa_prefered = preferred;
4960 	ci.ifa_valid = valid;
4961 
4962 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4963 }
4964 
4965 static inline int rt_scope(int ifa_scope)
4966 {
4967 	if (ifa_scope & IFA_HOST)
4968 		return RT_SCOPE_HOST;
4969 	else if (ifa_scope & IFA_LINK)
4970 		return RT_SCOPE_LINK;
4971 	else if (ifa_scope & IFA_SITE)
4972 		return RT_SCOPE_SITE;
4973 	else
4974 		return RT_SCOPE_UNIVERSE;
4975 }
4976 
4977 static inline int inet6_ifaddr_msgsize(void)
4978 {
4979 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4980 	       + nla_total_size(16) /* IFA_LOCAL */
4981 	       + nla_total_size(16) /* IFA_ADDRESS */
4982 	       + nla_total_size(sizeof(struct ifa_cacheinfo))
4983 	       + nla_total_size(4)  /* IFA_FLAGS */
4984 	       + nla_total_size(1)  /* IFA_PROTO */
4985 	       + nla_total_size(4)  /* IFA_RT_PRIORITY */;
4986 }
4987 
4988 enum addr_type_t {
4989 	UNICAST_ADDR,
4990 	MULTICAST_ADDR,
4991 	ANYCAST_ADDR,
4992 };
4993 
4994 struct inet6_fill_args {
4995 	u32 portid;
4996 	u32 seq;
4997 	int event;
4998 	unsigned int flags;
4999 	int netnsid;
5000 	int ifindex;
5001 	enum addr_type_t type;
5002 };
5003 
5004 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
5005 			     struct inet6_fill_args *args)
5006 {
5007 	struct nlmsghdr  *nlh;
5008 	u32 preferred, valid;
5009 
5010 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5011 			sizeof(struct ifaddrmsg), args->flags);
5012 	if (!nlh)
5013 		return -EMSGSIZE;
5014 
5015 	put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5016 		      ifa->idev->dev->ifindex);
5017 
5018 	if (args->netnsid >= 0 &&
5019 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5020 		goto error;
5021 
5022 	spin_lock_bh(&ifa->lock);
5023 	if (!((ifa->flags&IFA_F_PERMANENT) &&
5024 	      (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
5025 		preferred = ifa->prefered_lft;
5026 		valid = ifa->valid_lft;
5027 		if (preferred != INFINITY_LIFE_TIME) {
5028 			long tval = (jiffies - ifa->tstamp)/HZ;
5029 			if (preferred > tval)
5030 				preferred -= tval;
5031 			else
5032 				preferred = 0;
5033 			if (valid != INFINITY_LIFE_TIME) {
5034 				if (valid > tval)
5035 					valid -= tval;
5036 				else
5037 					valid = 0;
5038 			}
5039 		}
5040 	} else {
5041 		preferred = INFINITY_LIFE_TIME;
5042 		valid = INFINITY_LIFE_TIME;
5043 	}
5044 	spin_unlock_bh(&ifa->lock);
5045 
5046 	if (!ipv6_addr_any(&ifa->peer_addr)) {
5047 		if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5048 		    nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5049 			goto error;
5050 	} else
5051 		if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5052 			goto error;
5053 
5054 	if (ifa->rt_priority &&
5055 	    nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
5056 		goto error;
5057 
5058 	if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
5059 		goto error;
5060 
5061 	if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
5062 		goto error;
5063 
5064 	if (ifa->ifa_proto &&
5065 	    nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto))
5066 		goto error;
5067 
5068 	nlmsg_end(skb, nlh);
5069 	return 0;
5070 
5071 error:
5072 	nlmsg_cancel(skb, nlh);
5073 	return -EMSGSIZE;
5074 }
5075 
5076 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
5077 			       struct inet6_fill_args *args)
5078 {
5079 	struct nlmsghdr  *nlh;
5080 	u8 scope = RT_SCOPE_UNIVERSE;
5081 	int ifindex = ifmca->idev->dev->ifindex;
5082 
5083 	if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5084 		scope = RT_SCOPE_SITE;
5085 
5086 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5087 			sizeof(struct ifaddrmsg), args->flags);
5088 	if (!nlh)
5089 		return -EMSGSIZE;
5090 
5091 	if (args->netnsid >= 0 &&
5092 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5093 		nlmsg_cancel(skb, nlh);
5094 		return -EMSGSIZE;
5095 	}
5096 
5097 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5098 	if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5099 	    put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
5100 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5101 		nlmsg_cancel(skb, nlh);
5102 		return -EMSGSIZE;
5103 	}
5104 
5105 	nlmsg_end(skb, nlh);
5106 	return 0;
5107 }
5108 
5109 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
5110 			       struct inet6_fill_args *args)
5111 {
5112 	struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5113 	int ifindex = dev ? dev->ifindex : 1;
5114 	struct nlmsghdr  *nlh;
5115 	u8 scope = RT_SCOPE_UNIVERSE;
5116 
5117 	if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5118 		scope = RT_SCOPE_SITE;
5119 
5120 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5121 			sizeof(struct ifaddrmsg), args->flags);
5122 	if (!nlh)
5123 		return -EMSGSIZE;
5124 
5125 	if (args->netnsid >= 0 &&
5126 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5127 		nlmsg_cancel(skb, nlh);
5128 		return -EMSGSIZE;
5129 	}
5130 
5131 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5132 	if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5133 	    put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
5134 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5135 		nlmsg_cancel(skb, nlh);
5136 		return -EMSGSIZE;
5137 	}
5138 
5139 	nlmsg_end(skb, nlh);
5140 	return 0;
5141 }
5142 
5143 /* called with rcu_read_lock() */
5144 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
5145 			  struct netlink_callback *cb, int s_ip_idx,
5146 			  struct inet6_fill_args *fillargs)
5147 {
5148 	struct ifmcaddr6 *ifmca;
5149 	struct ifacaddr6 *ifaca;
5150 	int ip_idx = 0;
5151 	int err = 1;
5152 
5153 	read_lock_bh(&idev->lock);
5154 	switch (fillargs->type) {
5155 	case UNICAST_ADDR: {
5156 		struct inet6_ifaddr *ifa;
5157 		fillargs->event = RTM_NEWADDR;
5158 
5159 		/* unicast address incl. temp addr */
5160 		list_for_each_entry(ifa, &idev->addr_list, if_list) {
5161 			if (ip_idx < s_ip_idx)
5162 				goto next;
5163 			err = inet6_fill_ifaddr(skb, ifa, fillargs);
5164 			if (err < 0)
5165 				break;
5166 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5167 next:
5168 			ip_idx++;
5169 		}
5170 		break;
5171 	}
5172 	case MULTICAST_ADDR:
5173 		read_unlock_bh(&idev->lock);
5174 		fillargs->event = RTM_GETMULTICAST;
5175 
5176 		/* multicast address */
5177 		for (ifmca = rcu_dereference(idev->mc_list);
5178 		     ifmca;
5179 		     ifmca = rcu_dereference(ifmca->next), ip_idx++) {
5180 			if (ip_idx < s_ip_idx)
5181 				continue;
5182 			err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5183 			if (err < 0)
5184 				break;
5185 		}
5186 		read_lock_bh(&idev->lock);
5187 		break;
5188 	case ANYCAST_ADDR:
5189 		fillargs->event = RTM_GETANYCAST;
5190 		/* anycast address */
5191 		for (ifaca = idev->ac_list; ifaca;
5192 		     ifaca = ifaca->aca_next, ip_idx++) {
5193 			if (ip_idx < s_ip_idx)
5194 				continue;
5195 			err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5196 			if (err < 0)
5197 				break;
5198 		}
5199 		break;
5200 	default:
5201 		break;
5202 	}
5203 	read_unlock_bh(&idev->lock);
5204 	cb->args[2] = ip_idx;
5205 	return err;
5206 }
5207 
5208 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5209 				       struct inet6_fill_args *fillargs,
5210 				       struct net **tgt_net, struct sock *sk,
5211 				       struct netlink_callback *cb)
5212 {
5213 	struct netlink_ext_ack *extack = cb->extack;
5214 	struct nlattr *tb[IFA_MAX+1];
5215 	struct ifaddrmsg *ifm;
5216 	int err, i;
5217 
5218 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5219 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5220 		return -EINVAL;
5221 	}
5222 
5223 	ifm = nlmsg_data(nlh);
5224 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5225 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5226 		return -EINVAL;
5227 	}
5228 
5229 	fillargs->ifindex = ifm->ifa_index;
5230 	if (fillargs->ifindex) {
5231 		cb->answer_flags |= NLM_F_DUMP_FILTERED;
5232 		fillargs->flags |= NLM_F_DUMP_FILTERED;
5233 	}
5234 
5235 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5236 					    ifa_ipv6_policy, extack);
5237 	if (err < 0)
5238 		return err;
5239 
5240 	for (i = 0; i <= IFA_MAX; ++i) {
5241 		if (!tb[i])
5242 			continue;
5243 
5244 		if (i == IFA_TARGET_NETNSID) {
5245 			struct net *net;
5246 
5247 			fillargs->netnsid = nla_get_s32(tb[i]);
5248 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5249 			if (IS_ERR(net)) {
5250 				fillargs->netnsid = -1;
5251 				NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5252 				return PTR_ERR(net);
5253 			}
5254 			*tgt_net = net;
5255 		} else {
5256 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5257 			return -EINVAL;
5258 		}
5259 	}
5260 
5261 	return 0;
5262 }
5263 
5264 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5265 			   enum addr_type_t type)
5266 {
5267 	const struct nlmsghdr *nlh = cb->nlh;
5268 	struct inet6_fill_args fillargs = {
5269 		.portid = NETLINK_CB(cb->skb).portid,
5270 		.seq = cb->nlh->nlmsg_seq,
5271 		.flags = NLM_F_MULTI,
5272 		.netnsid = -1,
5273 		.type = type,
5274 	};
5275 	struct net *tgt_net = sock_net(skb->sk);
5276 	int idx, s_idx, s_ip_idx;
5277 	int h, s_h;
5278 	struct net_device *dev;
5279 	struct inet6_dev *idev;
5280 	struct hlist_head *head;
5281 	int err = 0;
5282 
5283 	s_h = cb->args[0];
5284 	s_idx = idx = cb->args[1];
5285 	s_ip_idx = cb->args[2];
5286 
5287 	if (cb->strict_check) {
5288 		err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5289 						  skb->sk, cb);
5290 		if (err < 0)
5291 			goto put_tgt_net;
5292 
5293 		err = 0;
5294 		if (fillargs.ifindex) {
5295 			dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
5296 			if (!dev) {
5297 				err = -ENODEV;
5298 				goto put_tgt_net;
5299 			}
5300 			idev = __in6_dev_get(dev);
5301 			if (idev) {
5302 				err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5303 						     &fillargs);
5304 				if (err > 0)
5305 					err = 0;
5306 			}
5307 			goto put_tgt_net;
5308 		}
5309 	}
5310 
5311 	rcu_read_lock();
5312 	cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
5313 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5314 		idx = 0;
5315 		head = &tgt_net->dev_index_head[h];
5316 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
5317 			if (idx < s_idx)
5318 				goto cont;
5319 			if (h > s_h || idx > s_idx)
5320 				s_ip_idx = 0;
5321 			idev = __in6_dev_get(dev);
5322 			if (!idev)
5323 				goto cont;
5324 
5325 			if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
5326 					   &fillargs) < 0)
5327 				goto done;
5328 cont:
5329 			idx++;
5330 		}
5331 	}
5332 done:
5333 	rcu_read_unlock();
5334 	cb->args[0] = h;
5335 	cb->args[1] = idx;
5336 put_tgt_net:
5337 	if (fillargs.netnsid >= 0)
5338 		put_net(tgt_net);
5339 
5340 	return skb->len ? : err;
5341 }
5342 
5343 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5344 {
5345 	enum addr_type_t type = UNICAST_ADDR;
5346 
5347 	return inet6_dump_addr(skb, cb, type);
5348 }
5349 
5350 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5351 {
5352 	enum addr_type_t type = MULTICAST_ADDR;
5353 
5354 	return inet6_dump_addr(skb, cb, type);
5355 }
5356 
5357 
5358 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5359 {
5360 	enum addr_type_t type = ANYCAST_ADDR;
5361 
5362 	return inet6_dump_addr(skb, cb, type);
5363 }
5364 
5365 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5366 				       const struct nlmsghdr *nlh,
5367 				       struct nlattr **tb,
5368 				       struct netlink_ext_ack *extack)
5369 {
5370 	struct ifaddrmsg *ifm;
5371 	int i, err;
5372 
5373 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5374 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5375 		return -EINVAL;
5376 	}
5377 
5378 	if (!netlink_strict_get_check(skb))
5379 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5380 					      ifa_ipv6_policy, extack);
5381 
5382 	ifm = nlmsg_data(nlh);
5383 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5384 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5385 		return -EINVAL;
5386 	}
5387 
5388 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5389 					    ifa_ipv6_policy, extack);
5390 	if (err)
5391 		return err;
5392 
5393 	for (i = 0; i <= IFA_MAX; i++) {
5394 		if (!tb[i])
5395 			continue;
5396 
5397 		switch (i) {
5398 		case IFA_TARGET_NETNSID:
5399 		case IFA_ADDRESS:
5400 		case IFA_LOCAL:
5401 			break;
5402 		default:
5403 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5404 			return -EINVAL;
5405 		}
5406 	}
5407 
5408 	return 0;
5409 }
5410 
5411 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5412 			     struct netlink_ext_ack *extack)
5413 {
5414 	struct net *tgt_net = sock_net(in_skb->sk);
5415 	struct inet6_fill_args fillargs = {
5416 		.portid = NETLINK_CB(in_skb).portid,
5417 		.seq = nlh->nlmsg_seq,
5418 		.event = RTM_NEWADDR,
5419 		.flags = 0,
5420 		.netnsid = -1,
5421 	};
5422 	struct ifaddrmsg *ifm;
5423 	struct nlattr *tb[IFA_MAX+1];
5424 	struct in6_addr *addr = NULL, *peer;
5425 	struct net_device *dev = NULL;
5426 	struct inet6_ifaddr *ifa;
5427 	struct sk_buff *skb;
5428 	int err;
5429 
5430 	err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5431 	if (err < 0)
5432 		return err;
5433 
5434 	if (tb[IFA_TARGET_NETNSID]) {
5435 		fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5436 
5437 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5438 						  fillargs.netnsid);
5439 		if (IS_ERR(tgt_net))
5440 			return PTR_ERR(tgt_net);
5441 	}
5442 
5443 	addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5444 	if (!addr)
5445 		return -EINVAL;
5446 
5447 	ifm = nlmsg_data(nlh);
5448 	if (ifm->ifa_index)
5449 		dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5450 
5451 	ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5452 	if (!ifa) {
5453 		err = -EADDRNOTAVAIL;
5454 		goto errout;
5455 	}
5456 
5457 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5458 	if (!skb) {
5459 		err = -ENOBUFS;
5460 		goto errout_ifa;
5461 	}
5462 
5463 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5464 	if (err < 0) {
5465 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5466 		WARN_ON(err == -EMSGSIZE);
5467 		kfree_skb(skb);
5468 		goto errout_ifa;
5469 	}
5470 	err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5471 errout_ifa:
5472 	in6_ifa_put(ifa);
5473 errout:
5474 	dev_put(dev);
5475 	if (fillargs.netnsid >= 0)
5476 		put_net(tgt_net);
5477 
5478 	return err;
5479 }
5480 
5481 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5482 {
5483 	struct sk_buff *skb;
5484 	struct net *net = dev_net(ifa->idev->dev);
5485 	struct inet6_fill_args fillargs = {
5486 		.portid = 0,
5487 		.seq = 0,
5488 		.event = event,
5489 		.flags = 0,
5490 		.netnsid = -1,
5491 	};
5492 	int err = -ENOBUFS;
5493 
5494 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5495 	if (!skb)
5496 		goto errout;
5497 
5498 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5499 	if (err < 0) {
5500 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5501 		WARN_ON(err == -EMSGSIZE);
5502 		kfree_skb(skb);
5503 		goto errout;
5504 	}
5505 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5506 	return;
5507 errout:
5508 	if (err < 0)
5509 		rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5510 }
5511 
5512 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5513 				__s32 *array, int bytes)
5514 {
5515 	BUG_ON(bytes < (DEVCONF_MAX * 4));
5516 
5517 	memset(array, 0, bytes);
5518 	array[DEVCONF_FORWARDING] = cnf->forwarding;
5519 	array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5520 	array[DEVCONF_MTU6] = cnf->mtu6;
5521 	array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5522 	array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5523 	array[DEVCONF_AUTOCONF] = cnf->autoconf;
5524 	array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5525 	array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5526 	array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5527 		jiffies_to_msecs(cnf->rtr_solicit_interval);
5528 	array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5529 		jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5530 	array[DEVCONF_RTR_SOLICIT_DELAY] =
5531 		jiffies_to_msecs(cnf->rtr_solicit_delay);
5532 	array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5533 	array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5534 		jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5535 	array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5536 		jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5537 	array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5538 	array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5539 	array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5540 	array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5541 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5542 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5543 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5544 	array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
5545 	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5546 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5547 #ifdef CONFIG_IPV6_ROUTER_PREF
5548 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5549 	array[DEVCONF_RTR_PROBE_INTERVAL] =
5550 		jiffies_to_msecs(cnf->rtr_probe_interval);
5551 #ifdef CONFIG_IPV6_ROUTE_INFO
5552 	array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5553 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5554 #endif
5555 #endif
5556 	array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5557 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5558 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5559 	array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5560 	array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5561 #endif
5562 #ifdef CONFIG_IPV6_MROUTE
5563 	array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5564 #endif
5565 	array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5566 	array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5567 	array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5568 	array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5569 	array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5570 	array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5571 	array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5572 	array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5573 	/* we omit DEVCONF_STABLE_SECRET for now */
5574 	array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5575 	array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5576 	array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5577 	array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5578 	array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5579 #ifdef CONFIG_IPV6_SEG6_HMAC
5580 	array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5581 #endif
5582 	array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5583 	array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5584 	array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5585 	array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5586 	array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
5587 	array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
5588 	array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
5589 	array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
5590 	array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
5591 	array[DEVCONF_ACCEPT_UNSOLICITED_NA] = cnf->accept_unsolicited_na;
5592 }
5593 
5594 static inline size_t inet6_ifla6_size(void)
5595 {
5596 	return nla_total_size(4) /* IFLA_INET6_FLAGS */
5597 	     + nla_total_size(sizeof(struct ifla_cacheinfo))
5598 	     + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5599 	     + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5600 	     + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5601 	     + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5602 	     + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5603 	     + nla_total_size(4) /* IFLA_INET6_RA_MTU */
5604 	     + 0;
5605 }
5606 
5607 static inline size_t inet6_if_nlmsg_size(void)
5608 {
5609 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5610 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5611 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5612 	       + nla_total_size(4) /* IFLA_MTU */
5613 	       + nla_total_size(4) /* IFLA_LINK */
5614 	       + nla_total_size(1) /* IFLA_OPERSTATE */
5615 	       + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5616 }
5617 
5618 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5619 					int bytes)
5620 {
5621 	int i;
5622 	int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5623 	BUG_ON(pad < 0);
5624 
5625 	/* Use put_unaligned() because stats may not be aligned for u64. */
5626 	put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5627 	for (i = 1; i < ICMP6_MIB_MAX; i++)
5628 		put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5629 
5630 	memset(&stats[ICMP6_MIB_MAX], 0, pad);
5631 }
5632 
5633 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5634 					int bytes, size_t syncpoff)
5635 {
5636 	int i, c;
5637 	u64 buff[IPSTATS_MIB_MAX];
5638 	int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5639 
5640 	BUG_ON(pad < 0);
5641 
5642 	memset(buff, 0, sizeof(buff));
5643 	buff[0] = IPSTATS_MIB_MAX;
5644 
5645 	for_each_possible_cpu(c) {
5646 		for (i = 1; i < IPSTATS_MIB_MAX; i++)
5647 			buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5648 	}
5649 
5650 	memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5651 	memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5652 }
5653 
5654 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5655 			     int bytes)
5656 {
5657 	switch (attrtype) {
5658 	case IFLA_INET6_STATS:
5659 		__snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5660 				     offsetof(struct ipstats_mib, syncp));
5661 		break;
5662 	case IFLA_INET6_ICMP6STATS:
5663 		__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5664 		break;
5665 	}
5666 }
5667 
5668 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5669 				  u32 ext_filter_mask)
5670 {
5671 	struct nlattr *nla;
5672 	struct ifla_cacheinfo ci;
5673 
5674 	if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5675 		goto nla_put_failure;
5676 	ci.max_reasm_len = IPV6_MAXPLEN;
5677 	ci.tstamp = cstamp_delta(idev->tstamp);
5678 	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5679 	ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5680 	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5681 		goto nla_put_failure;
5682 	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5683 	if (!nla)
5684 		goto nla_put_failure;
5685 	ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5686 
5687 	/* XXX - MC not implemented */
5688 
5689 	if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5690 		return 0;
5691 
5692 	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5693 	if (!nla)
5694 		goto nla_put_failure;
5695 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5696 
5697 	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5698 	if (!nla)
5699 		goto nla_put_failure;
5700 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5701 
5702 	nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5703 	if (!nla)
5704 		goto nla_put_failure;
5705 	read_lock_bh(&idev->lock);
5706 	memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5707 	read_unlock_bh(&idev->lock);
5708 
5709 	if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5710 		goto nla_put_failure;
5711 
5712 	if (idev->ra_mtu &&
5713 	    nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
5714 		goto nla_put_failure;
5715 
5716 	return 0;
5717 
5718 nla_put_failure:
5719 	return -EMSGSIZE;
5720 }
5721 
5722 static size_t inet6_get_link_af_size(const struct net_device *dev,
5723 				     u32 ext_filter_mask)
5724 {
5725 	if (!__in6_dev_get(dev))
5726 		return 0;
5727 
5728 	return inet6_ifla6_size();
5729 }
5730 
5731 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5732 			      u32 ext_filter_mask)
5733 {
5734 	struct inet6_dev *idev = __in6_dev_get(dev);
5735 
5736 	if (!idev)
5737 		return -ENODATA;
5738 
5739 	if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5740 		return -EMSGSIZE;
5741 
5742 	return 0;
5743 }
5744 
5745 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5746 			     struct netlink_ext_ack *extack)
5747 {
5748 	struct inet6_ifaddr *ifp;
5749 	struct net_device *dev = idev->dev;
5750 	bool clear_token, update_rs = false;
5751 	struct in6_addr ll_addr;
5752 
5753 	ASSERT_RTNL();
5754 
5755 	if (!token)
5756 		return -EINVAL;
5757 
5758 	if (dev->flags & IFF_LOOPBACK) {
5759 		NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5760 		return -EINVAL;
5761 	}
5762 
5763 	if (dev->flags & IFF_NOARP) {
5764 		NL_SET_ERR_MSG_MOD(extack,
5765 				   "Device does not do neighbour discovery");
5766 		return -EINVAL;
5767 	}
5768 
5769 	if (!ipv6_accept_ra(idev)) {
5770 		NL_SET_ERR_MSG_MOD(extack,
5771 				   "Router advertisement is disabled on device");
5772 		return -EINVAL;
5773 	}
5774 
5775 	if (idev->cnf.rtr_solicits == 0) {
5776 		NL_SET_ERR_MSG(extack,
5777 			       "Router solicitation is disabled on device");
5778 		return -EINVAL;
5779 	}
5780 
5781 	write_lock_bh(&idev->lock);
5782 
5783 	BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5784 	memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5785 
5786 	write_unlock_bh(&idev->lock);
5787 
5788 	clear_token = ipv6_addr_any(token);
5789 	if (clear_token)
5790 		goto update_lft;
5791 
5792 	if (!idev->dead && (idev->if_flags & IF_READY) &&
5793 	    !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5794 			     IFA_F_OPTIMISTIC)) {
5795 		/* If we're not ready, then normal ifup will take care
5796 		 * of this. Otherwise, we need to request our rs here.
5797 		 */
5798 		ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5799 		update_rs = true;
5800 	}
5801 
5802 update_lft:
5803 	write_lock_bh(&idev->lock);
5804 
5805 	if (update_rs) {
5806 		idev->if_flags |= IF_RS_SENT;
5807 		idev->rs_interval = rfc3315_s14_backoff_init(
5808 			idev->cnf.rtr_solicit_interval);
5809 		idev->rs_probes = 1;
5810 		addrconf_mod_rs_timer(idev, idev->rs_interval);
5811 	}
5812 
5813 	/* Well, that's kinda nasty ... */
5814 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
5815 		spin_lock(&ifp->lock);
5816 		if (ifp->tokenized) {
5817 			ifp->valid_lft = 0;
5818 			ifp->prefered_lft = 0;
5819 		}
5820 		spin_unlock(&ifp->lock);
5821 	}
5822 
5823 	write_unlock_bh(&idev->lock);
5824 	inet6_ifinfo_notify(RTM_NEWLINK, idev);
5825 	addrconf_verify_rtnl(dev_net(dev));
5826 	return 0;
5827 }
5828 
5829 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5830 	[IFLA_INET6_ADDR_GEN_MODE]	= { .type = NLA_U8 },
5831 	[IFLA_INET6_TOKEN]		= { .len = sizeof(struct in6_addr) },
5832 	[IFLA_INET6_RA_MTU]		= { .type = NLA_REJECT,
5833 					    .reject_message =
5834 						"IFLA_INET6_RA_MTU can not be set" },
5835 };
5836 
5837 static int check_addr_gen_mode(int mode)
5838 {
5839 	if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5840 	    mode != IN6_ADDR_GEN_MODE_NONE &&
5841 	    mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5842 	    mode != IN6_ADDR_GEN_MODE_RANDOM)
5843 		return -EINVAL;
5844 	return 1;
5845 }
5846 
5847 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5848 				int mode)
5849 {
5850 	if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5851 	    !idev->cnf.stable_secret.initialized &&
5852 	    !net->ipv6.devconf_dflt->stable_secret.initialized)
5853 		return -EINVAL;
5854 	return 1;
5855 }
5856 
5857 static int inet6_validate_link_af(const struct net_device *dev,
5858 				  const struct nlattr *nla,
5859 				  struct netlink_ext_ack *extack)
5860 {
5861 	struct nlattr *tb[IFLA_INET6_MAX + 1];
5862 	struct inet6_dev *idev = NULL;
5863 	int err;
5864 
5865 	if (dev) {
5866 		idev = __in6_dev_get(dev);
5867 		if (!idev)
5868 			return -EAFNOSUPPORT;
5869 	}
5870 
5871 	err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
5872 					  inet6_af_policy, extack);
5873 	if (err)
5874 		return err;
5875 
5876 	if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
5877 		return -EINVAL;
5878 
5879 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5880 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5881 
5882 		if (check_addr_gen_mode(mode) < 0)
5883 			return -EINVAL;
5884 		if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
5885 			return -EINVAL;
5886 	}
5887 
5888 	return 0;
5889 }
5890 
5891 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
5892 			     struct netlink_ext_ack *extack)
5893 {
5894 	struct inet6_dev *idev = __in6_dev_get(dev);
5895 	struct nlattr *tb[IFLA_INET6_MAX + 1];
5896 	int err;
5897 
5898 	if (!idev)
5899 		return -EAFNOSUPPORT;
5900 
5901 	if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5902 		return -EINVAL;
5903 
5904 	if (tb[IFLA_INET6_TOKEN]) {
5905 		err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
5906 					extack);
5907 		if (err)
5908 			return err;
5909 	}
5910 
5911 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5912 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5913 
5914 		idev->cnf.addr_gen_mode = mode;
5915 	}
5916 
5917 	return 0;
5918 }
5919 
5920 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5921 			     u32 portid, u32 seq, int event, unsigned int flags)
5922 {
5923 	struct net_device *dev = idev->dev;
5924 	struct ifinfomsg *hdr;
5925 	struct nlmsghdr *nlh;
5926 	void *protoinfo;
5927 
5928 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5929 	if (!nlh)
5930 		return -EMSGSIZE;
5931 
5932 	hdr = nlmsg_data(nlh);
5933 	hdr->ifi_family = AF_INET6;
5934 	hdr->__ifi_pad = 0;
5935 	hdr->ifi_type = dev->type;
5936 	hdr->ifi_index = dev->ifindex;
5937 	hdr->ifi_flags = dev_get_flags(dev);
5938 	hdr->ifi_change = 0;
5939 
5940 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5941 	    (dev->addr_len &&
5942 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5943 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5944 	    (dev->ifindex != dev_get_iflink(dev) &&
5945 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5946 	    nla_put_u8(skb, IFLA_OPERSTATE,
5947 		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5948 		goto nla_put_failure;
5949 	protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
5950 	if (!protoinfo)
5951 		goto nla_put_failure;
5952 
5953 	if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5954 		goto nla_put_failure;
5955 
5956 	nla_nest_end(skb, protoinfo);
5957 	nlmsg_end(skb, nlh);
5958 	return 0;
5959 
5960 nla_put_failure:
5961 	nlmsg_cancel(skb, nlh);
5962 	return -EMSGSIZE;
5963 }
5964 
5965 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
5966 				   struct netlink_ext_ack *extack)
5967 {
5968 	struct ifinfomsg *ifm;
5969 
5970 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5971 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
5972 		return -EINVAL;
5973 	}
5974 
5975 	if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
5976 		NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
5977 		return -EINVAL;
5978 	}
5979 
5980 	ifm = nlmsg_data(nlh);
5981 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5982 	    ifm->ifi_change || ifm->ifi_index) {
5983 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
5984 		return -EINVAL;
5985 	}
5986 
5987 	return 0;
5988 }
5989 
5990 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5991 {
5992 	struct net *net = sock_net(skb->sk);
5993 	int h, s_h;
5994 	int idx = 0, s_idx;
5995 	struct net_device *dev;
5996 	struct inet6_dev *idev;
5997 	struct hlist_head *head;
5998 
5999 	/* only requests using strict checking can pass data to
6000 	 * influence the dump
6001 	 */
6002 	if (cb->strict_check) {
6003 		int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
6004 
6005 		if (err < 0)
6006 			return err;
6007 	}
6008 
6009 	s_h = cb->args[0];
6010 	s_idx = cb->args[1];
6011 
6012 	rcu_read_lock();
6013 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
6014 		idx = 0;
6015 		head = &net->dev_index_head[h];
6016 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
6017 			if (idx < s_idx)
6018 				goto cont;
6019 			idev = __in6_dev_get(dev);
6020 			if (!idev)
6021 				goto cont;
6022 			if (inet6_fill_ifinfo(skb, idev,
6023 					      NETLINK_CB(cb->skb).portid,
6024 					      cb->nlh->nlmsg_seq,
6025 					      RTM_NEWLINK, NLM_F_MULTI) < 0)
6026 				goto out;
6027 cont:
6028 			idx++;
6029 		}
6030 	}
6031 out:
6032 	rcu_read_unlock();
6033 	cb->args[1] = idx;
6034 	cb->args[0] = h;
6035 
6036 	return skb->len;
6037 }
6038 
6039 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6040 {
6041 	struct sk_buff *skb;
6042 	struct net *net = dev_net(idev->dev);
6043 	int err = -ENOBUFS;
6044 
6045 	skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6046 	if (!skb)
6047 		goto errout;
6048 
6049 	err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6050 	if (err < 0) {
6051 		/* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6052 		WARN_ON(err == -EMSGSIZE);
6053 		kfree_skb(skb);
6054 		goto errout;
6055 	}
6056 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6057 	return;
6058 errout:
6059 	if (err < 0)
6060 		rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6061 }
6062 
6063 static inline size_t inet6_prefix_nlmsg_size(void)
6064 {
6065 	return NLMSG_ALIGN(sizeof(struct prefixmsg))
6066 	       + nla_total_size(sizeof(struct in6_addr))
6067 	       + nla_total_size(sizeof(struct prefix_cacheinfo));
6068 }
6069 
6070 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6071 			     struct prefix_info *pinfo, u32 portid, u32 seq,
6072 			     int event, unsigned int flags)
6073 {
6074 	struct prefixmsg *pmsg;
6075 	struct nlmsghdr *nlh;
6076 	struct prefix_cacheinfo	ci;
6077 
6078 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6079 	if (!nlh)
6080 		return -EMSGSIZE;
6081 
6082 	pmsg = nlmsg_data(nlh);
6083 	pmsg->prefix_family = AF_INET6;
6084 	pmsg->prefix_pad1 = 0;
6085 	pmsg->prefix_pad2 = 0;
6086 	pmsg->prefix_ifindex = idev->dev->ifindex;
6087 	pmsg->prefix_len = pinfo->prefix_len;
6088 	pmsg->prefix_type = pinfo->type;
6089 	pmsg->prefix_pad3 = 0;
6090 	pmsg->prefix_flags = 0;
6091 	if (pinfo->onlink)
6092 		pmsg->prefix_flags |= IF_PREFIX_ONLINK;
6093 	if (pinfo->autoconf)
6094 		pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
6095 
6096 	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6097 		goto nla_put_failure;
6098 	ci.preferred_time = ntohl(pinfo->prefered);
6099 	ci.valid_time = ntohl(pinfo->valid);
6100 	if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6101 		goto nla_put_failure;
6102 	nlmsg_end(skb, nlh);
6103 	return 0;
6104 
6105 nla_put_failure:
6106 	nlmsg_cancel(skb, nlh);
6107 	return -EMSGSIZE;
6108 }
6109 
6110 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6111 			 struct prefix_info *pinfo)
6112 {
6113 	struct sk_buff *skb;
6114 	struct net *net = dev_net(idev->dev);
6115 	int err = -ENOBUFS;
6116 
6117 	skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6118 	if (!skb)
6119 		goto errout;
6120 
6121 	err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6122 	if (err < 0) {
6123 		/* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6124 		WARN_ON(err == -EMSGSIZE);
6125 		kfree_skb(skb);
6126 		goto errout;
6127 	}
6128 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6129 	return;
6130 errout:
6131 	if (err < 0)
6132 		rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6133 }
6134 
6135 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6136 {
6137 	struct net *net = dev_net(ifp->idev->dev);
6138 
6139 	if (event)
6140 		ASSERT_RTNL();
6141 
6142 	inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6143 
6144 	switch (event) {
6145 	case RTM_NEWADDR:
6146 		/*
6147 		 * If the address was optimistic we inserted the route at the
6148 		 * start of our DAD process, so we don't need to do it again.
6149 		 * If the device was taken down in the middle of the DAD
6150 		 * cycle there is a race where we could get here without a
6151 		 * host route, so nothing to insert. That will be fixed when
6152 		 * the device is brought up.
6153 		 */
6154 		if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6155 			ip6_ins_rt(net, ifp->rt);
6156 		} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6157 			pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6158 				&ifp->addr, ifp->idev->dev->name);
6159 		}
6160 
6161 		if (ifp->idev->cnf.forwarding)
6162 			addrconf_join_anycast(ifp);
6163 		if (!ipv6_addr_any(&ifp->peer_addr))
6164 			addrconf_prefix_route(&ifp->peer_addr, 128,
6165 					      ifp->rt_priority, ifp->idev->dev,
6166 					      0, 0, GFP_ATOMIC);
6167 		break;
6168 	case RTM_DELADDR:
6169 		if (ifp->idev->cnf.forwarding)
6170 			addrconf_leave_anycast(ifp);
6171 		addrconf_leave_solict(ifp->idev, &ifp->addr);
6172 		if (!ipv6_addr_any(&ifp->peer_addr)) {
6173 			struct fib6_info *rt;
6174 
6175 			rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6176 						       ifp->idev->dev, 0, 0,
6177 						       false);
6178 			if (rt)
6179 				ip6_del_rt(net, rt, false);
6180 		}
6181 		if (ifp->rt) {
6182 			ip6_del_rt(net, ifp->rt, false);
6183 			ifp->rt = NULL;
6184 		}
6185 		rt_genid_bump_ipv6(net);
6186 		break;
6187 	}
6188 	atomic_inc(&net->ipv6.dev_addr_genid);
6189 }
6190 
6191 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6192 {
6193 	if (likely(ifp->idev->dead == 0))
6194 		__ipv6_ifa_notify(event, ifp);
6195 }
6196 
6197 #ifdef CONFIG_SYSCTL
6198 
6199 static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
6200 		void *buffer, size_t *lenp, loff_t *ppos)
6201 {
6202 	int *valp = ctl->data;
6203 	int val = *valp;
6204 	loff_t pos = *ppos;
6205 	struct ctl_table lctl;
6206 	int ret;
6207 
6208 	/*
6209 	 * ctl->data points to idev->cnf.forwarding, we should
6210 	 * not modify it until we get the rtnl lock.
6211 	 */
6212 	lctl = *ctl;
6213 	lctl.data = &val;
6214 
6215 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6216 
6217 	if (write)
6218 		ret = addrconf_fixup_forwarding(ctl, valp, val);
6219 	if (ret)
6220 		*ppos = pos;
6221 	return ret;
6222 }
6223 
6224 static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
6225 		void *buffer, size_t *lenp, loff_t *ppos)
6226 {
6227 	struct inet6_dev *idev = ctl->extra1;
6228 	int min_mtu = IPV6_MIN_MTU;
6229 	struct ctl_table lctl;
6230 
6231 	lctl = *ctl;
6232 	lctl.extra1 = &min_mtu;
6233 	lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6234 
6235 	return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6236 }
6237 
6238 static void dev_disable_change(struct inet6_dev *idev)
6239 {
6240 	struct netdev_notifier_info info;
6241 
6242 	if (!idev || !idev->dev)
6243 		return;
6244 
6245 	netdev_notifier_info_init(&info, idev->dev);
6246 	if (idev->cnf.disable_ipv6)
6247 		addrconf_notify(NULL, NETDEV_DOWN, &info);
6248 	else
6249 		addrconf_notify(NULL, NETDEV_UP, &info);
6250 }
6251 
6252 static void addrconf_disable_change(struct net *net, __s32 newf)
6253 {
6254 	struct net_device *dev;
6255 	struct inet6_dev *idev;
6256 
6257 	for_each_netdev(net, dev) {
6258 		idev = __in6_dev_get(dev);
6259 		if (idev) {
6260 			int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6261 			idev->cnf.disable_ipv6 = newf;
6262 			if (changed)
6263 				dev_disable_change(idev);
6264 		}
6265 	}
6266 }
6267 
6268 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
6269 {
6270 	struct net *net;
6271 	int old;
6272 
6273 	if (!rtnl_trylock())
6274 		return restart_syscall();
6275 
6276 	net = (struct net *)table->extra2;
6277 	old = *p;
6278 	*p = newf;
6279 
6280 	if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6281 		rtnl_unlock();
6282 		return 0;
6283 	}
6284 
6285 	if (p == &net->ipv6.devconf_all->disable_ipv6) {
6286 		net->ipv6.devconf_dflt->disable_ipv6 = newf;
6287 		addrconf_disable_change(net, newf);
6288 	} else if ((!newf) ^ (!old))
6289 		dev_disable_change((struct inet6_dev *)table->extra1);
6290 
6291 	rtnl_unlock();
6292 	return 0;
6293 }
6294 
6295 static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
6296 		void *buffer, size_t *lenp, loff_t *ppos)
6297 {
6298 	int *valp = ctl->data;
6299 	int val = *valp;
6300 	loff_t pos = *ppos;
6301 	struct ctl_table lctl;
6302 	int ret;
6303 
6304 	/*
6305 	 * ctl->data points to idev->cnf.disable_ipv6, we should
6306 	 * not modify it until we get the rtnl lock.
6307 	 */
6308 	lctl = *ctl;
6309 	lctl.data = &val;
6310 
6311 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6312 
6313 	if (write)
6314 		ret = addrconf_disable_ipv6(ctl, valp, val);
6315 	if (ret)
6316 		*ppos = pos;
6317 	return ret;
6318 }
6319 
6320 static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
6321 		void *buffer, size_t *lenp, loff_t *ppos)
6322 {
6323 	int *valp = ctl->data;
6324 	int ret;
6325 	int old, new;
6326 
6327 	old = *valp;
6328 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6329 	new = *valp;
6330 
6331 	if (write && old != new) {
6332 		struct net *net = ctl->extra2;
6333 
6334 		if (!rtnl_trylock())
6335 			return restart_syscall();
6336 
6337 		if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6338 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6339 						     NETCONFA_PROXY_NEIGH,
6340 						     NETCONFA_IFINDEX_DEFAULT,
6341 						     net->ipv6.devconf_dflt);
6342 		else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6343 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6344 						     NETCONFA_PROXY_NEIGH,
6345 						     NETCONFA_IFINDEX_ALL,
6346 						     net->ipv6.devconf_all);
6347 		else {
6348 			struct inet6_dev *idev = ctl->extra1;
6349 
6350 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6351 						     NETCONFA_PROXY_NEIGH,
6352 						     idev->dev->ifindex,
6353 						     &idev->cnf);
6354 		}
6355 		rtnl_unlock();
6356 	}
6357 
6358 	return ret;
6359 }
6360 
6361 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
6362 					 void *buffer, size_t *lenp,
6363 					 loff_t *ppos)
6364 {
6365 	int ret = 0;
6366 	u32 new_val;
6367 	struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6368 	struct net *net = (struct net *)ctl->extra2;
6369 	struct ctl_table tmp = {
6370 		.data = &new_val,
6371 		.maxlen = sizeof(new_val),
6372 		.mode = ctl->mode,
6373 	};
6374 
6375 	if (!rtnl_trylock())
6376 		return restart_syscall();
6377 
6378 	new_val = *((u32 *)ctl->data);
6379 
6380 	ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6381 	if (ret != 0)
6382 		goto out;
6383 
6384 	if (write) {
6385 		if (check_addr_gen_mode(new_val) < 0) {
6386 			ret = -EINVAL;
6387 			goto out;
6388 		}
6389 
6390 		if (idev) {
6391 			if (check_stable_privacy(idev, net, new_val) < 0) {
6392 				ret = -EINVAL;
6393 				goto out;
6394 			}
6395 
6396 			if (idev->cnf.addr_gen_mode != new_val) {
6397 				idev->cnf.addr_gen_mode = new_val;
6398 				addrconf_dev_config(idev->dev);
6399 			}
6400 		} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6401 			struct net_device *dev;
6402 
6403 			net->ipv6.devconf_dflt->addr_gen_mode = new_val;
6404 			for_each_netdev(net, dev) {
6405 				idev = __in6_dev_get(dev);
6406 				if (idev &&
6407 				    idev->cnf.addr_gen_mode != new_val) {
6408 					idev->cnf.addr_gen_mode = new_val;
6409 					addrconf_dev_config(idev->dev);
6410 				}
6411 			}
6412 		}
6413 
6414 		*((u32 *)ctl->data) = new_val;
6415 	}
6416 
6417 out:
6418 	rtnl_unlock();
6419 
6420 	return ret;
6421 }
6422 
6423 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6424 					 void *buffer, size_t *lenp,
6425 					 loff_t *ppos)
6426 {
6427 	int err;
6428 	struct in6_addr addr;
6429 	char str[IPV6_MAX_STRLEN];
6430 	struct ctl_table lctl = *ctl;
6431 	struct net *net = ctl->extra2;
6432 	struct ipv6_stable_secret *secret = ctl->data;
6433 
6434 	if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6435 		return -EIO;
6436 
6437 	lctl.maxlen = IPV6_MAX_STRLEN;
6438 	lctl.data = str;
6439 
6440 	if (!rtnl_trylock())
6441 		return restart_syscall();
6442 
6443 	if (!write && !secret->initialized) {
6444 		err = -EIO;
6445 		goto out;
6446 	}
6447 
6448 	err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6449 	if (err >= sizeof(str)) {
6450 		err = -EIO;
6451 		goto out;
6452 	}
6453 
6454 	err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6455 	if (err || !write)
6456 		goto out;
6457 
6458 	if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6459 		err = -EIO;
6460 		goto out;
6461 	}
6462 
6463 	secret->initialized = true;
6464 	secret->secret = addr;
6465 
6466 	if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6467 		struct net_device *dev;
6468 
6469 		for_each_netdev(net, dev) {
6470 			struct inet6_dev *idev = __in6_dev_get(dev);
6471 
6472 			if (idev) {
6473 				idev->cnf.addr_gen_mode =
6474 					IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6475 			}
6476 		}
6477 	} else {
6478 		struct inet6_dev *idev = ctl->extra1;
6479 
6480 		idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6481 	}
6482 
6483 out:
6484 	rtnl_unlock();
6485 
6486 	return err;
6487 }
6488 
6489 static
6490 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6491 						int write, void *buffer,
6492 						size_t *lenp,
6493 						loff_t *ppos)
6494 {
6495 	int *valp = ctl->data;
6496 	int val = *valp;
6497 	loff_t pos = *ppos;
6498 	struct ctl_table lctl;
6499 	int ret;
6500 
6501 	/* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6502 	 * we should not modify it until we get the rtnl lock.
6503 	 */
6504 	lctl = *ctl;
6505 	lctl.data = &val;
6506 
6507 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6508 
6509 	if (write)
6510 		ret = addrconf_fixup_linkdown(ctl, valp, val);
6511 	if (ret)
6512 		*ppos = pos;
6513 	return ret;
6514 }
6515 
6516 static
6517 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6518 {
6519 	if (rt) {
6520 		if (action)
6521 			rt->dst.flags |= DST_NOPOLICY;
6522 		else
6523 			rt->dst.flags &= ~DST_NOPOLICY;
6524 	}
6525 }
6526 
6527 static
6528 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6529 {
6530 	struct inet6_ifaddr *ifa;
6531 
6532 	read_lock_bh(&idev->lock);
6533 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
6534 		spin_lock(&ifa->lock);
6535 		if (ifa->rt) {
6536 			/* host routes only use builtin fib6_nh */
6537 			struct fib6_nh *nh = ifa->rt->fib6_nh;
6538 			int cpu;
6539 
6540 			rcu_read_lock();
6541 			ifa->rt->dst_nopolicy = val ? true : false;
6542 			if (nh->rt6i_pcpu) {
6543 				for_each_possible_cpu(cpu) {
6544 					struct rt6_info **rtp;
6545 
6546 					rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6547 					addrconf_set_nopolicy(*rtp, val);
6548 				}
6549 			}
6550 			rcu_read_unlock();
6551 		}
6552 		spin_unlock(&ifa->lock);
6553 	}
6554 	read_unlock_bh(&idev->lock);
6555 }
6556 
6557 static
6558 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6559 {
6560 	struct inet6_dev *idev;
6561 	struct net *net;
6562 
6563 	if (!rtnl_trylock())
6564 		return restart_syscall();
6565 
6566 	*valp = val;
6567 
6568 	net = (struct net *)ctl->extra2;
6569 	if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6570 		rtnl_unlock();
6571 		return 0;
6572 	}
6573 
6574 	if (valp == &net->ipv6.devconf_all->disable_policy)  {
6575 		struct net_device *dev;
6576 
6577 		for_each_netdev(net, dev) {
6578 			idev = __in6_dev_get(dev);
6579 			if (idev)
6580 				addrconf_disable_policy_idev(idev, val);
6581 		}
6582 	} else {
6583 		idev = (struct inet6_dev *)ctl->extra1;
6584 		addrconf_disable_policy_idev(idev, val);
6585 	}
6586 
6587 	rtnl_unlock();
6588 	return 0;
6589 }
6590 
6591 static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6592 				   void *buffer, size_t *lenp, loff_t *ppos)
6593 {
6594 	int *valp = ctl->data;
6595 	int val = *valp;
6596 	loff_t pos = *ppos;
6597 	struct ctl_table lctl;
6598 	int ret;
6599 
6600 	lctl = *ctl;
6601 	lctl.data = &val;
6602 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6603 
6604 	if (write && (*valp != val))
6605 		ret = addrconf_disable_policy(ctl, valp, val);
6606 
6607 	if (ret)
6608 		*ppos = pos;
6609 
6610 	return ret;
6611 }
6612 
6613 static int minus_one = -1;
6614 static const int two_five_five = 255;
6615 static u32 ioam6_if_id_max = U16_MAX;
6616 
6617 static const struct ctl_table addrconf_sysctl[] = {
6618 	{
6619 		.procname	= "forwarding",
6620 		.data		= &ipv6_devconf.forwarding,
6621 		.maxlen		= sizeof(int),
6622 		.mode		= 0644,
6623 		.proc_handler	= addrconf_sysctl_forward,
6624 	},
6625 	{
6626 		.procname	= "hop_limit",
6627 		.data		= &ipv6_devconf.hop_limit,
6628 		.maxlen		= sizeof(int),
6629 		.mode		= 0644,
6630 		.proc_handler	= proc_dointvec_minmax,
6631 		.extra1		= (void *)SYSCTL_ONE,
6632 		.extra2		= (void *)&two_five_five,
6633 	},
6634 	{
6635 		.procname	= "mtu",
6636 		.data		= &ipv6_devconf.mtu6,
6637 		.maxlen		= sizeof(int),
6638 		.mode		= 0644,
6639 		.proc_handler	= addrconf_sysctl_mtu,
6640 	},
6641 	{
6642 		.procname	= "accept_ra",
6643 		.data		= &ipv6_devconf.accept_ra,
6644 		.maxlen		= sizeof(int),
6645 		.mode		= 0644,
6646 		.proc_handler	= proc_dointvec,
6647 	},
6648 	{
6649 		.procname	= "accept_redirects",
6650 		.data		= &ipv6_devconf.accept_redirects,
6651 		.maxlen		= sizeof(int),
6652 		.mode		= 0644,
6653 		.proc_handler	= proc_dointvec,
6654 	},
6655 	{
6656 		.procname	= "autoconf",
6657 		.data		= &ipv6_devconf.autoconf,
6658 		.maxlen		= sizeof(int),
6659 		.mode		= 0644,
6660 		.proc_handler	= proc_dointvec,
6661 	},
6662 	{
6663 		.procname	= "dad_transmits",
6664 		.data		= &ipv6_devconf.dad_transmits,
6665 		.maxlen		= sizeof(int),
6666 		.mode		= 0644,
6667 		.proc_handler	= proc_dointvec,
6668 	},
6669 	{
6670 		.procname	= "router_solicitations",
6671 		.data		= &ipv6_devconf.rtr_solicits,
6672 		.maxlen		= sizeof(int),
6673 		.mode		= 0644,
6674 		.proc_handler	= proc_dointvec_minmax,
6675 		.extra1		= &minus_one,
6676 	},
6677 	{
6678 		.procname	= "router_solicitation_interval",
6679 		.data		= &ipv6_devconf.rtr_solicit_interval,
6680 		.maxlen		= sizeof(int),
6681 		.mode		= 0644,
6682 		.proc_handler	= proc_dointvec_jiffies,
6683 	},
6684 	{
6685 		.procname	= "router_solicitation_max_interval",
6686 		.data		= &ipv6_devconf.rtr_solicit_max_interval,
6687 		.maxlen		= sizeof(int),
6688 		.mode		= 0644,
6689 		.proc_handler	= proc_dointvec_jiffies,
6690 	},
6691 	{
6692 		.procname	= "router_solicitation_delay",
6693 		.data		= &ipv6_devconf.rtr_solicit_delay,
6694 		.maxlen		= sizeof(int),
6695 		.mode		= 0644,
6696 		.proc_handler	= proc_dointvec_jiffies,
6697 	},
6698 	{
6699 		.procname	= "force_mld_version",
6700 		.data		= &ipv6_devconf.force_mld_version,
6701 		.maxlen		= sizeof(int),
6702 		.mode		= 0644,
6703 		.proc_handler	= proc_dointvec,
6704 	},
6705 	{
6706 		.procname	= "mldv1_unsolicited_report_interval",
6707 		.data		=
6708 			&ipv6_devconf.mldv1_unsolicited_report_interval,
6709 		.maxlen		= sizeof(int),
6710 		.mode		= 0644,
6711 		.proc_handler	= proc_dointvec_ms_jiffies,
6712 	},
6713 	{
6714 		.procname	= "mldv2_unsolicited_report_interval",
6715 		.data		=
6716 			&ipv6_devconf.mldv2_unsolicited_report_interval,
6717 		.maxlen		= sizeof(int),
6718 		.mode		= 0644,
6719 		.proc_handler	= proc_dointvec_ms_jiffies,
6720 	},
6721 	{
6722 		.procname	= "use_tempaddr",
6723 		.data		= &ipv6_devconf.use_tempaddr,
6724 		.maxlen		= sizeof(int),
6725 		.mode		= 0644,
6726 		.proc_handler	= proc_dointvec,
6727 	},
6728 	{
6729 		.procname	= "temp_valid_lft",
6730 		.data		= &ipv6_devconf.temp_valid_lft,
6731 		.maxlen		= sizeof(int),
6732 		.mode		= 0644,
6733 		.proc_handler	= proc_dointvec,
6734 	},
6735 	{
6736 		.procname	= "temp_prefered_lft",
6737 		.data		= &ipv6_devconf.temp_prefered_lft,
6738 		.maxlen		= sizeof(int),
6739 		.mode		= 0644,
6740 		.proc_handler	= proc_dointvec,
6741 	},
6742 	{
6743 		.procname	= "regen_max_retry",
6744 		.data		= &ipv6_devconf.regen_max_retry,
6745 		.maxlen		= sizeof(int),
6746 		.mode		= 0644,
6747 		.proc_handler	= proc_dointvec,
6748 	},
6749 	{
6750 		.procname	= "max_desync_factor",
6751 		.data		= &ipv6_devconf.max_desync_factor,
6752 		.maxlen		= sizeof(int),
6753 		.mode		= 0644,
6754 		.proc_handler	= proc_dointvec,
6755 	},
6756 	{
6757 		.procname	= "max_addresses",
6758 		.data		= &ipv6_devconf.max_addresses,
6759 		.maxlen		= sizeof(int),
6760 		.mode		= 0644,
6761 		.proc_handler	= proc_dointvec,
6762 	},
6763 	{
6764 		.procname	= "accept_ra_defrtr",
6765 		.data		= &ipv6_devconf.accept_ra_defrtr,
6766 		.maxlen		= sizeof(int),
6767 		.mode		= 0644,
6768 		.proc_handler	= proc_dointvec,
6769 	},
6770 	{
6771 		.procname	= "ra_defrtr_metric",
6772 		.data		= &ipv6_devconf.ra_defrtr_metric,
6773 		.maxlen		= sizeof(u32),
6774 		.mode		= 0644,
6775 		.proc_handler	= proc_douintvec_minmax,
6776 		.extra1		= (void *)SYSCTL_ONE,
6777 	},
6778 	{
6779 		.procname	= "accept_ra_min_hop_limit",
6780 		.data		= &ipv6_devconf.accept_ra_min_hop_limit,
6781 		.maxlen		= sizeof(int),
6782 		.mode		= 0644,
6783 		.proc_handler	= proc_dointvec,
6784 	},
6785 	{
6786 		.procname	= "accept_ra_pinfo",
6787 		.data		= &ipv6_devconf.accept_ra_pinfo,
6788 		.maxlen		= sizeof(int),
6789 		.mode		= 0644,
6790 		.proc_handler	= proc_dointvec,
6791 	},
6792 #ifdef CONFIG_IPV6_ROUTER_PREF
6793 	{
6794 		.procname	= "accept_ra_rtr_pref",
6795 		.data		= &ipv6_devconf.accept_ra_rtr_pref,
6796 		.maxlen		= sizeof(int),
6797 		.mode		= 0644,
6798 		.proc_handler	= proc_dointvec,
6799 	},
6800 	{
6801 		.procname	= "router_probe_interval",
6802 		.data		= &ipv6_devconf.rtr_probe_interval,
6803 		.maxlen		= sizeof(int),
6804 		.mode		= 0644,
6805 		.proc_handler	= proc_dointvec_jiffies,
6806 	},
6807 #ifdef CONFIG_IPV6_ROUTE_INFO
6808 	{
6809 		.procname	= "accept_ra_rt_info_min_plen",
6810 		.data		= &ipv6_devconf.accept_ra_rt_info_min_plen,
6811 		.maxlen		= sizeof(int),
6812 		.mode		= 0644,
6813 		.proc_handler	= proc_dointvec,
6814 	},
6815 	{
6816 		.procname	= "accept_ra_rt_info_max_plen",
6817 		.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
6818 		.maxlen		= sizeof(int),
6819 		.mode		= 0644,
6820 		.proc_handler	= proc_dointvec,
6821 	},
6822 #endif
6823 #endif
6824 	{
6825 		.procname	= "proxy_ndp",
6826 		.data		= &ipv6_devconf.proxy_ndp,
6827 		.maxlen		= sizeof(int),
6828 		.mode		= 0644,
6829 		.proc_handler	= addrconf_sysctl_proxy_ndp,
6830 	},
6831 	{
6832 		.procname	= "accept_source_route",
6833 		.data		= &ipv6_devconf.accept_source_route,
6834 		.maxlen		= sizeof(int),
6835 		.mode		= 0644,
6836 		.proc_handler	= proc_dointvec,
6837 	},
6838 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6839 	{
6840 		.procname	= "optimistic_dad",
6841 		.data		= &ipv6_devconf.optimistic_dad,
6842 		.maxlen		= sizeof(int),
6843 		.mode		= 0644,
6844 		.proc_handler   = proc_dointvec,
6845 	},
6846 	{
6847 		.procname	= "use_optimistic",
6848 		.data		= &ipv6_devconf.use_optimistic,
6849 		.maxlen		= sizeof(int),
6850 		.mode		= 0644,
6851 		.proc_handler	= proc_dointvec,
6852 	},
6853 #endif
6854 #ifdef CONFIG_IPV6_MROUTE
6855 	{
6856 		.procname	= "mc_forwarding",
6857 		.data		= &ipv6_devconf.mc_forwarding,
6858 		.maxlen		= sizeof(int),
6859 		.mode		= 0444,
6860 		.proc_handler	= proc_dointvec,
6861 	},
6862 #endif
6863 	{
6864 		.procname	= "disable_ipv6",
6865 		.data		= &ipv6_devconf.disable_ipv6,
6866 		.maxlen		= sizeof(int),
6867 		.mode		= 0644,
6868 		.proc_handler	= addrconf_sysctl_disable,
6869 	},
6870 	{
6871 		.procname	= "accept_dad",
6872 		.data		= &ipv6_devconf.accept_dad,
6873 		.maxlen		= sizeof(int),
6874 		.mode		= 0644,
6875 		.proc_handler	= proc_dointvec,
6876 	},
6877 	{
6878 		.procname	= "force_tllao",
6879 		.data		= &ipv6_devconf.force_tllao,
6880 		.maxlen		= sizeof(int),
6881 		.mode		= 0644,
6882 		.proc_handler	= proc_dointvec
6883 	},
6884 	{
6885 		.procname	= "ndisc_notify",
6886 		.data		= &ipv6_devconf.ndisc_notify,
6887 		.maxlen		= sizeof(int),
6888 		.mode		= 0644,
6889 		.proc_handler	= proc_dointvec
6890 	},
6891 	{
6892 		.procname	= "suppress_frag_ndisc",
6893 		.data		= &ipv6_devconf.suppress_frag_ndisc,
6894 		.maxlen		= sizeof(int),
6895 		.mode		= 0644,
6896 		.proc_handler	= proc_dointvec
6897 	},
6898 	{
6899 		.procname	= "accept_ra_from_local",
6900 		.data		= &ipv6_devconf.accept_ra_from_local,
6901 		.maxlen		= sizeof(int),
6902 		.mode		= 0644,
6903 		.proc_handler	= proc_dointvec,
6904 	},
6905 	{
6906 		.procname	= "accept_ra_mtu",
6907 		.data		= &ipv6_devconf.accept_ra_mtu,
6908 		.maxlen		= sizeof(int),
6909 		.mode		= 0644,
6910 		.proc_handler	= proc_dointvec,
6911 	},
6912 	{
6913 		.procname	= "stable_secret",
6914 		.data		= &ipv6_devconf.stable_secret,
6915 		.maxlen		= IPV6_MAX_STRLEN,
6916 		.mode		= 0600,
6917 		.proc_handler	= addrconf_sysctl_stable_secret,
6918 	},
6919 	{
6920 		.procname	= "use_oif_addrs_only",
6921 		.data		= &ipv6_devconf.use_oif_addrs_only,
6922 		.maxlen		= sizeof(int),
6923 		.mode		= 0644,
6924 		.proc_handler	= proc_dointvec,
6925 	},
6926 	{
6927 		.procname	= "ignore_routes_with_linkdown",
6928 		.data		= &ipv6_devconf.ignore_routes_with_linkdown,
6929 		.maxlen		= sizeof(int),
6930 		.mode		= 0644,
6931 		.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
6932 	},
6933 	{
6934 		.procname	= "drop_unicast_in_l2_multicast",
6935 		.data		= &ipv6_devconf.drop_unicast_in_l2_multicast,
6936 		.maxlen		= sizeof(int),
6937 		.mode		= 0644,
6938 		.proc_handler	= proc_dointvec,
6939 	},
6940 	{
6941 		.procname	= "drop_unsolicited_na",
6942 		.data		= &ipv6_devconf.drop_unsolicited_na,
6943 		.maxlen		= sizeof(int),
6944 		.mode		= 0644,
6945 		.proc_handler	= proc_dointvec,
6946 	},
6947 	{
6948 		.procname	= "keep_addr_on_down",
6949 		.data		= &ipv6_devconf.keep_addr_on_down,
6950 		.maxlen		= sizeof(int),
6951 		.mode		= 0644,
6952 		.proc_handler	= proc_dointvec,
6953 
6954 	},
6955 	{
6956 		.procname	= "seg6_enabled",
6957 		.data		= &ipv6_devconf.seg6_enabled,
6958 		.maxlen		= sizeof(int),
6959 		.mode		= 0644,
6960 		.proc_handler	= proc_dointvec,
6961 	},
6962 #ifdef CONFIG_IPV6_SEG6_HMAC
6963 	{
6964 		.procname	= "seg6_require_hmac",
6965 		.data		= &ipv6_devconf.seg6_require_hmac,
6966 		.maxlen		= sizeof(int),
6967 		.mode		= 0644,
6968 		.proc_handler	= proc_dointvec,
6969 	},
6970 #endif
6971 	{
6972 		.procname       = "enhanced_dad",
6973 		.data           = &ipv6_devconf.enhanced_dad,
6974 		.maxlen         = sizeof(int),
6975 		.mode           = 0644,
6976 		.proc_handler   = proc_dointvec,
6977 	},
6978 	{
6979 		.procname	= "addr_gen_mode",
6980 		.data		= &ipv6_devconf.addr_gen_mode,
6981 		.maxlen		= sizeof(int),
6982 		.mode		= 0644,
6983 		.proc_handler	= addrconf_sysctl_addr_gen_mode,
6984 	},
6985 	{
6986 		.procname       = "disable_policy",
6987 		.data           = &ipv6_devconf.disable_policy,
6988 		.maxlen         = sizeof(int),
6989 		.mode           = 0644,
6990 		.proc_handler   = addrconf_sysctl_disable_policy,
6991 	},
6992 	{
6993 		.procname	= "ndisc_tclass",
6994 		.data		= &ipv6_devconf.ndisc_tclass,
6995 		.maxlen		= sizeof(int),
6996 		.mode		= 0644,
6997 		.proc_handler	= proc_dointvec_minmax,
6998 		.extra1		= (void *)SYSCTL_ZERO,
6999 		.extra2		= (void *)&two_five_five,
7000 	},
7001 	{
7002 		.procname	= "rpl_seg_enabled",
7003 		.data		= &ipv6_devconf.rpl_seg_enabled,
7004 		.maxlen		= sizeof(int),
7005 		.mode		= 0644,
7006 		.proc_handler	= proc_dointvec,
7007 	},
7008 	{
7009 		.procname	= "ioam6_enabled",
7010 		.data		= &ipv6_devconf.ioam6_enabled,
7011 		.maxlen		= sizeof(u8),
7012 		.mode		= 0644,
7013 		.proc_handler	= proc_dou8vec_minmax,
7014 		.extra1		= (void *)SYSCTL_ZERO,
7015 		.extra2		= (void *)SYSCTL_ONE,
7016 	},
7017 	{
7018 		.procname	= "ioam6_id",
7019 		.data		= &ipv6_devconf.ioam6_id,
7020 		.maxlen		= sizeof(u32),
7021 		.mode		= 0644,
7022 		.proc_handler	= proc_douintvec_minmax,
7023 		.extra1		= (void *)SYSCTL_ZERO,
7024 		.extra2		= (void *)&ioam6_if_id_max,
7025 	},
7026 	{
7027 		.procname	= "ioam6_id_wide",
7028 		.data		= &ipv6_devconf.ioam6_id_wide,
7029 		.maxlen		= sizeof(u32),
7030 		.mode		= 0644,
7031 		.proc_handler	= proc_douintvec,
7032 	},
7033 	{
7034 		.procname	= "ndisc_evict_nocarrier",
7035 		.data		= &ipv6_devconf.ndisc_evict_nocarrier,
7036 		.maxlen		= sizeof(u8),
7037 		.mode		= 0644,
7038 		.proc_handler	= proc_dou8vec_minmax,
7039 		.extra1		= (void *)SYSCTL_ZERO,
7040 		.extra2		= (void *)SYSCTL_ONE,
7041 	},
7042 	{
7043 		.procname	= "accept_unsolicited_na",
7044 		.data		= &ipv6_devconf.accept_unsolicited_na,
7045 		.maxlen		= sizeof(int),
7046 		.mode		= 0644,
7047 		.proc_handler	= proc_dointvec_minmax,
7048 		.extra1		= (void *)SYSCTL_ZERO,
7049 		.extra2		= (void *)SYSCTL_ONE,
7050 	},
7051 	{
7052 		/* sentinel */
7053 	}
7054 };
7055 
7056 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7057 		struct inet6_dev *idev, struct ipv6_devconf *p)
7058 {
7059 	int i, ifindex;
7060 	struct ctl_table *table;
7061 	char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7062 
7063 	table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
7064 	if (!table)
7065 		goto out;
7066 
7067 	for (i = 0; table[i].data; i++) {
7068 		table[i].data += (char *)p - (char *)&ipv6_devconf;
7069 		/* If one of these is already set, then it is not safe to
7070 		 * overwrite either of them: this makes proc_dointvec_minmax
7071 		 * usable.
7072 		 */
7073 		if (!table[i].extra1 && !table[i].extra2) {
7074 			table[i].extra1 = idev; /* embedded; no ref */
7075 			table[i].extra2 = net;
7076 		}
7077 	}
7078 
7079 	snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7080 
7081 	p->sysctl_header = register_net_sysctl(net, path, table);
7082 	if (!p->sysctl_header)
7083 		goto free;
7084 
7085 	if (!strcmp(dev_name, "all"))
7086 		ifindex = NETCONFA_IFINDEX_ALL;
7087 	else if (!strcmp(dev_name, "default"))
7088 		ifindex = NETCONFA_IFINDEX_DEFAULT;
7089 	else
7090 		ifindex = idev->dev->ifindex;
7091 	inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7092 				     ifindex, p);
7093 	return 0;
7094 
7095 free:
7096 	kfree(table);
7097 out:
7098 	return -ENOBUFS;
7099 }
7100 
7101 static void __addrconf_sysctl_unregister(struct net *net,
7102 					 struct ipv6_devconf *p, int ifindex)
7103 {
7104 	struct ctl_table *table;
7105 
7106 	if (!p->sysctl_header)
7107 		return;
7108 
7109 	table = p->sysctl_header->ctl_table_arg;
7110 	unregister_net_sysctl_table(p->sysctl_header);
7111 	p->sysctl_header = NULL;
7112 	kfree(table);
7113 
7114 	inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7115 }
7116 
7117 static int addrconf_sysctl_register(struct inet6_dev *idev)
7118 {
7119 	int err;
7120 
7121 	if (!sysctl_dev_name_is_allowed(idev->dev->name))
7122 		return -EINVAL;
7123 
7124 	err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7125 				    &ndisc_ifinfo_sysctl_change);
7126 	if (err)
7127 		return err;
7128 	err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7129 					 idev, &idev->cnf);
7130 	if (err)
7131 		neigh_sysctl_unregister(idev->nd_parms);
7132 
7133 	return err;
7134 }
7135 
7136 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7137 {
7138 	__addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7139 				     idev->dev->ifindex);
7140 	neigh_sysctl_unregister(idev->nd_parms);
7141 }
7142 
7143 
7144 #endif
7145 
7146 static int __net_init addrconf_init_net(struct net *net)
7147 {
7148 	int err = -ENOMEM;
7149 	struct ipv6_devconf *all, *dflt;
7150 
7151 	spin_lock_init(&net->ipv6.addrconf_hash_lock);
7152 	INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work);
7153 	net->ipv6.inet6_addr_lst = kcalloc(IN6_ADDR_HSIZE,
7154 					   sizeof(struct hlist_head),
7155 					   GFP_KERNEL);
7156 	if (!net->ipv6.inet6_addr_lst)
7157 		goto err_alloc_addr;
7158 
7159 	all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7160 	if (!all)
7161 		goto err_alloc_all;
7162 
7163 	dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7164 	if (!dflt)
7165 		goto err_alloc_dflt;
7166 
7167 	if (IS_ENABLED(CONFIG_SYSCTL) &&
7168 	    !net_eq(net, &init_net)) {
7169 		switch (sysctl_devconf_inherit_init_net) {
7170 		case 1:  /* copy from init_net */
7171 			memcpy(all, init_net.ipv6.devconf_all,
7172 			       sizeof(ipv6_devconf));
7173 			memcpy(dflt, init_net.ipv6.devconf_dflt,
7174 			       sizeof(ipv6_devconf_dflt));
7175 			break;
7176 		case 3: /* copy from the current netns */
7177 			memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7178 			       sizeof(ipv6_devconf));
7179 			memcpy(dflt,
7180 			       current->nsproxy->net_ns->ipv6.devconf_dflt,
7181 			       sizeof(ipv6_devconf_dflt));
7182 			break;
7183 		case 0:
7184 		case 2:
7185 			/* use compiled values */
7186 			break;
7187 		}
7188 	}
7189 
7190 	/* these will be inherited by all namespaces */
7191 	dflt->autoconf = ipv6_defaults.autoconf;
7192 	dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7193 
7194 	dflt->stable_secret.initialized = false;
7195 	all->stable_secret.initialized = false;
7196 
7197 	net->ipv6.devconf_all = all;
7198 	net->ipv6.devconf_dflt = dflt;
7199 
7200 #ifdef CONFIG_SYSCTL
7201 	err = __addrconf_sysctl_register(net, "all", NULL, all);
7202 	if (err < 0)
7203 		goto err_reg_all;
7204 
7205 	err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7206 	if (err < 0)
7207 		goto err_reg_dflt;
7208 #endif
7209 	return 0;
7210 
7211 #ifdef CONFIG_SYSCTL
7212 err_reg_dflt:
7213 	__addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7214 err_reg_all:
7215 	kfree(dflt);
7216 #endif
7217 err_alloc_dflt:
7218 	kfree(all);
7219 err_alloc_all:
7220 	kfree(net->ipv6.inet6_addr_lst);
7221 err_alloc_addr:
7222 	return err;
7223 }
7224 
7225 static void __net_exit addrconf_exit_net(struct net *net)
7226 {
7227 	int i;
7228 
7229 #ifdef CONFIG_SYSCTL
7230 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7231 				     NETCONFA_IFINDEX_DEFAULT);
7232 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7233 				     NETCONFA_IFINDEX_ALL);
7234 #endif
7235 	kfree(net->ipv6.devconf_dflt);
7236 	net->ipv6.devconf_dflt = NULL;
7237 	kfree(net->ipv6.devconf_all);
7238 	net->ipv6.devconf_all = NULL;
7239 
7240 	cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
7241 	/*
7242 	 *	Check hash table, then free it.
7243 	 */
7244 	for (i = 0; i < IN6_ADDR_HSIZE; i++)
7245 		WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i]));
7246 
7247 	kfree(net->ipv6.inet6_addr_lst);
7248 	net->ipv6.inet6_addr_lst = NULL;
7249 }
7250 
7251 static struct pernet_operations addrconf_ops = {
7252 	.init = addrconf_init_net,
7253 	.exit = addrconf_exit_net,
7254 };
7255 
7256 static struct rtnl_af_ops inet6_ops __read_mostly = {
7257 	.family		  = AF_INET6,
7258 	.fill_link_af	  = inet6_fill_link_af,
7259 	.get_link_af_size = inet6_get_link_af_size,
7260 	.validate_link_af = inet6_validate_link_af,
7261 	.set_link_af	  = inet6_set_link_af,
7262 };
7263 
7264 /*
7265  *	Init / cleanup code
7266  */
7267 
7268 int __init addrconf_init(void)
7269 {
7270 	struct inet6_dev *idev;
7271 	int err;
7272 
7273 	err = ipv6_addr_label_init();
7274 	if (err < 0) {
7275 		pr_crit("%s: cannot initialize default policy table: %d\n",
7276 			__func__, err);
7277 		goto out;
7278 	}
7279 
7280 	err = register_pernet_subsys(&addrconf_ops);
7281 	if (err < 0)
7282 		goto out_addrlabel;
7283 
7284 	addrconf_wq = create_workqueue("ipv6_addrconf");
7285 	if (!addrconf_wq) {
7286 		err = -ENOMEM;
7287 		goto out_nowq;
7288 	}
7289 
7290 	rtnl_lock();
7291 	idev = ipv6_add_dev(blackhole_netdev);
7292 	rtnl_unlock();
7293 	if (IS_ERR(idev)) {
7294 		err = PTR_ERR(idev);
7295 		goto errlo;
7296 	}
7297 
7298 	ip6_route_init_special_entries();
7299 
7300 	register_netdevice_notifier(&ipv6_dev_notf);
7301 
7302 	addrconf_verify(&init_net);
7303 
7304 	rtnl_af_register(&inet6_ops);
7305 
7306 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7307 				   NULL, inet6_dump_ifinfo, 0);
7308 	if (err < 0)
7309 		goto errout;
7310 
7311 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7312 				   inet6_rtm_newaddr, NULL, 0);
7313 	if (err < 0)
7314 		goto errout;
7315 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7316 				   inet6_rtm_deladdr, NULL, 0);
7317 	if (err < 0)
7318 		goto errout;
7319 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7320 				   inet6_rtm_getaddr, inet6_dump_ifaddr,
7321 				   RTNL_FLAG_DOIT_UNLOCKED);
7322 	if (err < 0)
7323 		goto errout;
7324 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7325 				   NULL, inet6_dump_ifmcaddr, 0);
7326 	if (err < 0)
7327 		goto errout;
7328 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7329 				   NULL, inet6_dump_ifacaddr, 0);
7330 	if (err < 0)
7331 		goto errout;
7332 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7333 				   inet6_netconf_get_devconf,
7334 				   inet6_netconf_dump_devconf,
7335 				   RTNL_FLAG_DOIT_UNLOCKED);
7336 	if (err < 0)
7337 		goto errout;
7338 	err = ipv6_addr_label_rtnl_register();
7339 	if (err < 0)
7340 		goto errout;
7341 
7342 	return 0;
7343 errout:
7344 	rtnl_unregister_all(PF_INET6);
7345 	rtnl_af_unregister(&inet6_ops);
7346 	unregister_netdevice_notifier(&ipv6_dev_notf);
7347 errlo:
7348 	destroy_workqueue(addrconf_wq);
7349 out_nowq:
7350 	unregister_pernet_subsys(&addrconf_ops);
7351 out_addrlabel:
7352 	ipv6_addr_label_cleanup();
7353 out:
7354 	return err;
7355 }
7356 
7357 void addrconf_cleanup(void)
7358 {
7359 	struct net_device *dev;
7360 
7361 	unregister_netdevice_notifier(&ipv6_dev_notf);
7362 	unregister_pernet_subsys(&addrconf_ops);
7363 	ipv6_addr_label_cleanup();
7364 
7365 	rtnl_af_unregister(&inet6_ops);
7366 
7367 	rtnl_lock();
7368 
7369 	/* clean dev list */
7370 	for_each_netdev(&init_net, dev) {
7371 		if (__in6_dev_get(dev) == NULL)
7372 			continue;
7373 		addrconf_ifdown(dev, true);
7374 	}
7375 	addrconf_ifdown(init_net.loopback_dev, true);
7376 
7377 	rtnl_unlock();
7378 
7379 	destroy_workqueue(addrconf_wq);
7380 }
7381