xref: /openbmc/linux/net/ipv6/addrconf.c (revision ecefa105)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 Address [auto]configuration
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
9  */
10 
11 /*
12  *	Changes:
13  *
14  *	Janos Farkas			:	delete timer on ifdown
15  *	<chexum@bankinf.banki.hu>
16  *	Andi Kleen			:	kill double kfree on module
17  *						unload.
18  *	Maciej W. Rozycki		:	FDDI support
19  *	sekiya@USAGI			:	Don't send too many RS
20  *						packets.
21  *	yoshfuji@USAGI			:       Fixed interval between DAD
22  *						packets.
23  *	YOSHIFUJI Hideaki @USAGI	:	improved accuracy of
24  *						address validation timer.
25  *	YOSHIFUJI Hideaki @USAGI	:	Privacy Extensions (RFC3041)
26  *						support.
27  *	Yuji SEKIYA @USAGI		:	Don't assign a same IPv6
28  *						address on a same interface.
29  *	YOSHIFUJI Hideaki @USAGI	:	ARCnet support
30  *	YOSHIFUJI Hideaki @USAGI	:	convert /proc/net/if_inet6 to
31  *						seq_file.
32  *	YOSHIFUJI Hideaki @USAGI	:	improved source address
33  *						selection; consider scope,
34  *						status etc.
35  */
36 
37 #define pr_fmt(fmt) "IPv6: " fmt
38 
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
57 #ifdef CONFIG_SYSCTL
58 #include <linux/sysctl.h>
59 #endif
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/sock.h>
68 #include <net/snmp.h>
69 
70 #include <net/6lowpan.h>
71 #include <net/firewire.h>
72 #include <net/ipv6.h>
73 #include <net/protocol.h>
74 #include <net/ndisc.h>
75 #include <net/ip6_route.h>
76 #include <net/addrconf.h>
77 #include <net/tcp.h>
78 #include <net/ip.h>
79 #include <net/netlink.h>
80 #include <net/pkt_sched.h>
81 #include <net/l3mdev.h>
82 #include <linux/if_tunnel.h>
83 #include <linux/rtnetlink.h>
84 #include <linux/netconf.h>
85 #include <linux/random.h>
86 #include <linux/uaccess.h>
87 #include <asm/unaligned.h>
88 
89 #include <linux/proc_fs.h>
90 #include <linux/seq_file.h>
91 #include <linux/export.h>
92 #include <linux/ioam6.h>
93 
94 #define	INFINITY_LIFE_TIME	0xFFFFFFFF
95 
96 #define IPV6_MAX_STRLEN \
97 	sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
98 
99 static inline u32 cstamp_delta(unsigned long cstamp)
100 {
101 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
102 }
103 
104 static inline s32 rfc3315_s14_backoff_init(s32 irt)
105 {
106 	/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
107 	u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt;
108 	do_div(tmp, 1000000);
109 	return (s32)tmp;
110 }
111 
112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
113 {
114 	/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
115 	u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt;
116 	do_div(tmp, 1000000);
117 	if ((s32)tmp > mrt) {
118 		/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
119 		tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt;
120 		do_div(tmp, 1000000);
121 	}
122 	return (s32)tmp;
123 }
124 
125 #ifdef CONFIG_SYSCTL
126 static int addrconf_sysctl_register(struct inet6_dev *idev);
127 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
128 #else
129 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
130 {
131 	return 0;
132 }
133 
134 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
135 {
136 }
137 #endif
138 
139 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
140 
141 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
142 static int ipv6_count_addresses(const struct inet6_dev *idev);
143 static int ipv6_generate_stable_address(struct in6_addr *addr,
144 					u8 dad_count,
145 					const struct inet6_dev *idev);
146 
147 #define IN6_ADDR_HSIZE_SHIFT	8
148 #define IN6_ADDR_HSIZE		(1 << IN6_ADDR_HSIZE_SHIFT)
149 
150 static void addrconf_verify(struct net *net);
151 static void addrconf_verify_rtnl(struct net *net);
152 
153 static struct workqueue_struct *addrconf_wq;
154 
155 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
156 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
157 
158 static void addrconf_type_change(struct net_device *dev,
159 				 unsigned long event);
160 static int addrconf_ifdown(struct net_device *dev, bool unregister);
161 
162 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
163 						  int plen,
164 						  const struct net_device *dev,
165 						  u32 flags, u32 noflags,
166 						  bool no_gw);
167 
168 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
169 static void addrconf_dad_work(struct work_struct *w);
170 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
171 				   bool send_na);
172 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
173 static void addrconf_rs_timer(struct timer_list *t);
174 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
175 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
176 
177 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
178 				struct prefix_info *pinfo);
179 
180 static struct ipv6_devconf ipv6_devconf __read_mostly = {
181 	.forwarding		= 0,
182 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
183 	.mtu6			= IPV6_MIN_MTU,
184 	.accept_ra		= 1,
185 	.accept_redirects	= 1,
186 	.autoconf		= 1,
187 	.force_mld_version	= 0,
188 	.mldv1_unsolicited_report_interval = 10 * HZ,
189 	.mldv2_unsolicited_report_interval = HZ,
190 	.dad_transmits		= 1,
191 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
192 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
193 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
194 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
195 	.use_tempaddr		= 0,
196 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
197 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
198 	.regen_max_retry	= REGEN_MAX_RETRY,
199 	.max_desync_factor	= MAX_DESYNC_FACTOR,
200 	.max_addresses		= IPV6_MAX_ADDRESSES,
201 	.accept_ra_defrtr	= 1,
202 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
203 	.accept_ra_from_local	= 0,
204 	.accept_ra_min_hop_limit= 1,
205 	.accept_ra_pinfo	= 1,
206 #ifdef CONFIG_IPV6_ROUTER_PREF
207 	.accept_ra_rtr_pref	= 1,
208 	.rtr_probe_interval	= 60 * HZ,
209 #ifdef CONFIG_IPV6_ROUTE_INFO
210 	.accept_ra_rt_info_min_plen = 0,
211 	.accept_ra_rt_info_max_plen = 0,
212 #endif
213 #endif
214 	.proxy_ndp		= 0,
215 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
216 	.disable_ipv6		= 0,
217 	.accept_dad		= 0,
218 	.suppress_frag_ndisc	= 1,
219 	.accept_ra_mtu		= 1,
220 	.stable_secret		= {
221 		.initialized = false,
222 	},
223 	.use_oif_addrs_only	= 0,
224 	.ignore_routes_with_linkdown = 0,
225 	.keep_addr_on_down	= 0,
226 	.seg6_enabled		= 0,
227 #ifdef CONFIG_IPV6_SEG6_HMAC
228 	.seg6_require_hmac	= 0,
229 #endif
230 	.enhanced_dad           = 1,
231 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
232 	.disable_policy		= 0,
233 	.rpl_seg_enabled	= 0,
234 	.ioam6_enabled		= 0,
235 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
236 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
237 	.ndisc_evict_nocarrier	= 1,
238 };
239 
240 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
241 	.forwarding		= 0,
242 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
243 	.mtu6			= IPV6_MIN_MTU,
244 	.accept_ra		= 1,
245 	.accept_redirects	= 1,
246 	.autoconf		= 1,
247 	.force_mld_version	= 0,
248 	.mldv1_unsolicited_report_interval = 10 * HZ,
249 	.mldv2_unsolicited_report_interval = HZ,
250 	.dad_transmits		= 1,
251 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
252 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
253 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
254 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
255 	.use_tempaddr		= 0,
256 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
257 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
258 	.regen_max_retry	= REGEN_MAX_RETRY,
259 	.max_desync_factor	= MAX_DESYNC_FACTOR,
260 	.max_addresses		= IPV6_MAX_ADDRESSES,
261 	.accept_ra_defrtr	= 1,
262 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
263 	.accept_ra_from_local	= 0,
264 	.accept_ra_min_hop_limit= 1,
265 	.accept_ra_pinfo	= 1,
266 #ifdef CONFIG_IPV6_ROUTER_PREF
267 	.accept_ra_rtr_pref	= 1,
268 	.rtr_probe_interval	= 60 * HZ,
269 #ifdef CONFIG_IPV6_ROUTE_INFO
270 	.accept_ra_rt_info_min_plen = 0,
271 	.accept_ra_rt_info_max_plen = 0,
272 #endif
273 #endif
274 	.proxy_ndp		= 0,
275 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
276 	.disable_ipv6		= 0,
277 	.accept_dad		= 1,
278 	.suppress_frag_ndisc	= 1,
279 	.accept_ra_mtu		= 1,
280 	.stable_secret		= {
281 		.initialized = false,
282 	},
283 	.use_oif_addrs_only	= 0,
284 	.ignore_routes_with_linkdown = 0,
285 	.keep_addr_on_down	= 0,
286 	.seg6_enabled		= 0,
287 #ifdef CONFIG_IPV6_SEG6_HMAC
288 	.seg6_require_hmac	= 0,
289 #endif
290 	.enhanced_dad           = 1,
291 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
292 	.disable_policy		= 0,
293 	.rpl_seg_enabled	= 0,
294 	.ioam6_enabled		= 0,
295 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
296 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
297 	.ndisc_evict_nocarrier	= 1,
298 };
299 
300 /* Check if link is ready: is it up and is a valid qdisc available */
301 static inline bool addrconf_link_ready(const struct net_device *dev)
302 {
303 	return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
304 }
305 
306 static void addrconf_del_rs_timer(struct inet6_dev *idev)
307 {
308 	if (del_timer(&idev->rs_timer))
309 		__in6_dev_put(idev);
310 }
311 
312 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
313 {
314 	if (cancel_delayed_work(&ifp->dad_work))
315 		__in6_ifa_put(ifp);
316 }
317 
318 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
319 				  unsigned long when)
320 {
321 	if (!timer_pending(&idev->rs_timer))
322 		in6_dev_hold(idev);
323 	mod_timer(&idev->rs_timer, jiffies + when);
324 }
325 
326 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
327 				   unsigned long delay)
328 {
329 	in6_ifa_hold(ifp);
330 	if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
331 		in6_ifa_put(ifp);
332 }
333 
334 static int snmp6_alloc_dev(struct inet6_dev *idev)
335 {
336 	int i;
337 
338 	idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
339 	if (!idev->stats.ipv6)
340 		goto err_ip;
341 
342 	for_each_possible_cpu(i) {
343 		struct ipstats_mib *addrconf_stats;
344 		addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
345 		u64_stats_init(&addrconf_stats->syncp);
346 	}
347 
348 
349 	idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
350 					GFP_KERNEL);
351 	if (!idev->stats.icmpv6dev)
352 		goto err_icmp;
353 	idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
354 					   GFP_KERNEL_ACCOUNT);
355 	if (!idev->stats.icmpv6msgdev)
356 		goto err_icmpmsg;
357 
358 	return 0;
359 
360 err_icmpmsg:
361 	kfree(idev->stats.icmpv6dev);
362 err_icmp:
363 	free_percpu(idev->stats.ipv6);
364 err_ip:
365 	return -ENOMEM;
366 }
367 
368 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
369 {
370 	struct inet6_dev *ndev;
371 	int err = -ENOMEM;
372 
373 	ASSERT_RTNL();
374 
375 	if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
376 		return ERR_PTR(-EINVAL);
377 
378 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT);
379 	if (!ndev)
380 		return ERR_PTR(err);
381 
382 	rwlock_init(&ndev->lock);
383 	ndev->dev = dev;
384 	INIT_LIST_HEAD(&ndev->addr_list);
385 	timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
386 	memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
387 
388 	if (ndev->cnf.stable_secret.initialized)
389 		ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
390 
391 	ndev->cnf.mtu6 = dev->mtu;
392 	ndev->ra_mtu = 0;
393 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
394 	if (!ndev->nd_parms) {
395 		kfree(ndev);
396 		return ERR_PTR(err);
397 	}
398 	if (ndev->cnf.forwarding)
399 		dev_disable_lro(dev);
400 	/* We refer to the device */
401 	netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL);
402 
403 	if (snmp6_alloc_dev(ndev) < 0) {
404 		netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
405 			   __func__);
406 		neigh_parms_release(&nd_tbl, ndev->nd_parms);
407 		netdev_put(dev, &ndev->dev_tracker);
408 		kfree(ndev);
409 		return ERR_PTR(err);
410 	}
411 
412 	if (dev != blackhole_netdev) {
413 		if (snmp6_register_dev(ndev) < 0) {
414 			netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
415 				   __func__, dev->name);
416 			goto err_release;
417 		}
418 	}
419 	/* One reference from device. */
420 	refcount_set(&ndev->refcnt, 1);
421 
422 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
423 		ndev->cnf.accept_dad = -1;
424 
425 #if IS_ENABLED(CONFIG_IPV6_SIT)
426 	if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
427 		pr_info("%s: Disabled Multicast RS\n", dev->name);
428 		ndev->cnf.rtr_solicits = 0;
429 	}
430 #endif
431 
432 	INIT_LIST_HEAD(&ndev->tempaddr_list);
433 	ndev->desync_factor = U32_MAX;
434 	if ((dev->flags&IFF_LOOPBACK) ||
435 	    dev->type == ARPHRD_TUNNEL ||
436 	    dev->type == ARPHRD_TUNNEL6 ||
437 	    dev->type == ARPHRD_SIT ||
438 	    dev->type == ARPHRD_NONE) {
439 		ndev->cnf.use_tempaddr = -1;
440 	}
441 
442 	ndev->token = in6addr_any;
443 
444 	if (netif_running(dev) && addrconf_link_ready(dev))
445 		ndev->if_flags |= IF_READY;
446 
447 	ipv6_mc_init_dev(ndev);
448 	ndev->tstamp = jiffies;
449 	if (dev != blackhole_netdev) {
450 		err = addrconf_sysctl_register(ndev);
451 		if (err) {
452 			ipv6_mc_destroy_dev(ndev);
453 			snmp6_unregister_dev(ndev);
454 			goto err_release;
455 		}
456 	}
457 	/* protected by rtnl_lock */
458 	rcu_assign_pointer(dev->ip6_ptr, ndev);
459 
460 	if (dev != blackhole_netdev) {
461 		/* Join interface-local all-node multicast group */
462 		ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
463 
464 		/* Join all-node multicast group */
465 		ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
466 
467 		/* Join all-router multicast group if forwarding is set */
468 		if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
469 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
470 	}
471 	return ndev;
472 
473 err_release:
474 	neigh_parms_release(&nd_tbl, ndev->nd_parms);
475 	ndev->dead = 1;
476 	in6_dev_finish_destroy(ndev);
477 	return ERR_PTR(err);
478 }
479 
480 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
481 {
482 	struct inet6_dev *idev;
483 
484 	ASSERT_RTNL();
485 
486 	idev = __in6_dev_get(dev);
487 	if (!idev) {
488 		idev = ipv6_add_dev(dev);
489 		if (IS_ERR(idev))
490 			return idev;
491 	}
492 
493 	if (dev->flags&IFF_UP)
494 		ipv6_mc_up(idev);
495 	return idev;
496 }
497 
498 static int inet6_netconf_msgsize_devconf(int type)
499 {
500 	int size =  NLMSG_ALIGN(sizeof(struct netconfmsg))
501 		    + nla_total_size(4);	/* NETCONFA_IFINDEX */
502 	bool all = false;
503 
504 	if (type == NETCONFA_ALL)
505 		all = true;
506 
507 	if (all || type == NETCONFA_FORWARDING)
508 		size += nla_total_size(4);
509 #ifdef CONFIG_IPV6_MROUTE
510 	if (all || type == NETCONFA_MC_FORWARDING)
511 		size += nla_total_size(4);
512 #endif
513 	if (all || type == NETCONFA_PROXY_NEIGH)
514 		size += nla_total_size(4);
515 
516 	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
517 		size += nla_total_size(4);
518 
519 	return size;
520 }
521 
522 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
523 				      struct ipv6_devconf *devconf, u32 portid,
524 				      u32 seq, int event, unsigned int flags,
525 				      int type)
526 {
527 	struct nlmsghdr  *nlh;
528 	struct netconfmsg *ncm;
529 	bool all = false;
530 
531 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
532 			flags);
533 	if (!nlh)
534 		return -EMSGSIZE;
535 
536 	if (type == NETCONFA_ALL)
537 		all = true;
538 
539 	ncm = nlmsg_data(nlh);
540 	ncm->ncm_family = AF_INET6;
541 
542 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
543 		goto nla_put_failure;
544 
545 	if (!devconf)
546 		goto out;
547 
548 	if ((all || type == NETCONFA_FORWARDING) &&
549 	    nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
550 		goto nla_put_failure;
551 #ifdef CONFIG_IPV6_MROUTE
552 	if ((all || type == NETCONFA_MC_FORWARDING) &&
553 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
554 			atomic_read(&devconf->mc_forwarding)) < 0)
555 		goto nla_put_failure;
556 #endif
557 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
558 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
559 		goto nla_put_failure;
560 
561 	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
562 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
563 			devconf->ignore_routes_with_linkdown) < 0)
564 		goto nla_put_failure;
565 
566 out:
567 	nlmsg_end(skb, nlh);
568 	return 0;
569 
570 nla_put_failure:
571 	nlmsg_cancel(skb, nlh);
572 	return -EMSGSIZE;
573 }
574 
575 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
576 				  int ifindex, struct ipv6_devconf *devconf)
577 {
578 	struct sk_buff *skb;
579 	int err = -ENOBUFS;
580 
581 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
582 	if (!skb)
583 		goto errout;
584 
585 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
586 					 event, 0, type);
587 	if (err < 0) {
588 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
589 		WARN_ON(err == -EMSGSIZE);
590 		kfree_skb(skb);
591 		goto errout;
592 	}
593 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
594 	return;
595 errout:
596 	rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
597 }
598 
599 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
600 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
601 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
602 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
603 	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
604 };
605 
606 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
607 				       const struct nlmsghdr *nlh,
608 				       struct nlattr **tb,
609 				       struct netlink_ext_ack *extack)
610 {
611 	int i, err;
612 
613 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
614 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
615 		return -EINVAL;
616 	}
617 
618 	if (!netlink_strict_get_check(skb))
619 		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
620 					      tb, NETCONFA_MAX,
621 					      devconf_ipv6_policy, extack);
622 
623 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
624 					    tb, NETCONFA_MAX,
625 					    devconf_ipv6_policy, extack);
626 	if (err)
627 		return err;
628 
629 	for (i = 0; i <= NETCONFA_MAX; i++) {
630 		if (!tb[i])
631 			continue;
632 
633 		switch (i) {
634 		case NETCONFA_IFINDEX:
635 			break;
636 		default:
637 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
638 			return -EINVAL;
639 		}
640 	}
641 
642 	return 0;
643 }
644 
645 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
646 				     struct nlmsghdr *nlh,
647 				     struct netlink_ext_ack *extack)
648 {
649 	struct net *net = sock_net(in_skb->sk);
650 	struct nlattr *tb[NETCONFA_MAX+1];
651 	struct inet6_dev *in6_dev = NULL;
652 	struct net_device *dev = NULL;
653 	struct sk_buff *skb;
654 	struct ipv6_devconf *devconf;
655 	int ifindex;
656 	int err;
657 
658 	err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
659 	if (err < 0)
660 		return err;
661 
662 	if (!tb[NETCONFA_IFINDEX])
663 		return -EINVAL;
664 
665 	err = -EINVAL;
666 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
667 	switch (ifindex) {
668 	case NETCONFA_IFINDEX_ALL:
669 		devconf = net->ipv6.devconf_all;
670 		break;
671 	case NETCONFA_IFINDEX_DEFAULT:
672 		devconf = net->ipv6.devconf_dflt;
673 		break;
674 	default:
675 		dev = dev_get_by_index(net, ifindex);
676 		if (!dev)
677 			return -EINVAL;
678 		in6_dev = in6_dev_get(dev);
679 		if (!in6_dev)
680 			goto errout;
681 		devconf = &in6_dev->cnf;
682 		break;
683 	}
684 
685 	err = -ENOBUFS;
686 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
687 	if (!skb)
688 		goto errout;
689 
690 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
691 					 NETLINK_CB(in_skb).portid,
692 					 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
693 					 NETCONFA_ALL);
694 	if (err < 0) {
695 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
696 		WARN_ON(err == -EMSGSIZE);
697 		kfree_skb(skb);
698 		goto errout;
699 	}
700 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
701 errout:
702 	if (in6_dev)
703 		in6_dev_put(in6_dev);
704 	dev_put(dev);
705 	return err;
706 }
707 
708 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
709 				      struct netlink_callback *cb)
710 {
711 	const struct nlmsghdr *nlh = cb->nlh;
712 	struct net *net = sock_net(skb->sk);
713 	int h, s_h;
714 	int idx, s_idx;
715 	struct net_device *dev;
716 	struct inet6_dev *idev;
717 	struct hlist_head *head;
718 
719 	if (cb->strict_check) {
720 		struct netlink_ext_ack *extack = cb->extack;
721 		struct netconfmsg *ncm;
722 
723 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
724 			NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
725 			return -EINVAL;
726 		}
727 
728 		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
729 			NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
730 			return -EINVAL;
731 		}
732 	}
733 
734 	s_h = cb->args[0];
735 	s_idx = idx = cb->args[1];
736 
737 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
738 		idx = 0;
739 		head = &net->dev_index_head[h];
740 		rcu_read_lock();
741 		cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
742 			  net->dev_base_seq;
743 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
744 			if (idx < s_idx)
745 				goto cont;
746 			idev = __in6_dev_get(dev);
747 			if (!idev)
748 				goto cont;
749 
750 			if (inet6_netconf_fill_devconf(skb, dev->ifindex,
751 						       &idev->cnf,
752 						       NETLINK_CB(cb->skb).portid,
753 						       nlh->nlmsg_seq,
754 						       RTM_NEWNETCONF,
755 						       NLM_F_MULTI,
756 						       NETCONFA_ALL) < 0) {
757 				rcu_read_unlock();
758 				goto done;
759 			}
760 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
761 cont:
762 			idx++;
763 		}
764 		rcu_read_unlock();
765 	}
766 	if (h == NETDEV_HASHENTRIES) {
767 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
768 					       net->ipv6.devconf_all,
769 					       NETLINK_CB(cb->skb).portid,
770 					       nlh->nlmsg_seq,
771 					       RTM_NEWNETCONF, NLM_F_MULTI,
772 					       NETCONFA_ALL) < 0)
773 			goto done;
774 		else
775 			h++;
776 	}
777 	if (h == NETDEV_HASHENTRIES + 1) {
778 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
779 					       net->ipv6.devconf_dflt,
780 					       NETLINK_CB(cb->skb).portid,
781 					       nlh->nlmsg_seq,
782 					       RTM_NEWNETCONF, NLM_F_MULTI,
783 					       NETCONFA_ALL) < 0)
784 			goto done;
785 		else
786 			h++;
787 	}
788 done:
789 	cb->args[0] = h;
790 	cb->args[1] = idx;
791 
792 	return skb->len;
793 }
794 
795 #ifdef CONFIG_SYSCTL
796 static void dev_forward_change(struct inet6_dev *idev)
797 {
798 	struct net_device *dev;
799 	struct inet6_ifaddr *ifa;
800 	LIST_HEAD(tmp_addr_list);
801 
802 	if (!idev)
803 		return;
804 	dev = idev->dev;
805 	if (idev->cnf.forwarding)
806 		dev_disable_lro(dev);
807 	if (dev->flags & IFF_MULTICAST) {
808 		if (idev->cnf.forwarding) {
809 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
810 			ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
811 			ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
812 		} else {
813 			ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
814 			ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
815 			ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
816 		}
817 	}
818 
819 	read_lock_bh(&idev->lock);
820 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
821 		if (ifa->flags&IFA_F_TENTATIVE)
822 			continue;
823 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
824 	}
825 	read_unlock_bh(&idev->lock);
826 
827 	while (!list_empty(&tmp_addr_list)) {
828 		ifa = list_first_entry(&tmp_addr_list,
829 				       struct inet6_ifaddr, if_list_aux);
830 		list_del(&ifa->if_list_aux);
831 		if (idev->cnf.forwarding)
832 			addrconf_join_anycast(ifa);
833 		else
834 			addrconf_leave_anycast(ifa);
835 	}
836 
837 	inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
838 				     NETCONFA_FORWARDING,
839 				     dev->ifindex, &idev->cnf);
840 }
841 
842 
843 static void addrconf_forward_change(struct net *net, __s32 newf)
844 {
845 	struct net_device *dev;
846 	struct inet6_dev *idev;
847 
848 	for_each_netdev(net, dev) {
849 		idev = __in6_dev_get(dev);
850 		if (idev) {
851 			int changed = (!idev->cnf.forwarding) ^ (!newf);
852 			idev->cnf.forwarding = newf;
853 			if (changed)
854 				dev_forward_change(idev);
855 		}
856 	}
857 }
858 
859 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
860 {
861 	struct net *net;
862 	int old;
863 
864 	if (!rtnl_trylock())
865 		return restart_syscall();
866 
867 	net = (struct net *)table->extra2;
868 	old = *p;
869 	*p = newf;
870 
871 	if (p == &net->ipv6.devconf_dflt->forwarding) {
872 		if ((!newf) ^ (!old))
873 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
874 						     NETCONFA_FORWARDING,
875 						     NETCONFA_IFINDEX_DEFAULT,
876 						     net->ipv6.devconf_dflt);
877 		rtnl_unlock();
878 		return 0;
879 	}
880 
881 	if (p == &net->ipv6.devconf_all->forwarding) {
882 		int old_dflt = net->ipv6.devconf_dflt->forwarding;
883 
884 		net->ipv6.devconf_dflt->forwarding = newf;
885 		if ((!newf) ^ (!old_dflt))
886 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
887 						     NETCONFA_FORWARDING,
888 						     NETCONFA_IFINDEX_DEFAULT,
889 						     net->ipv6.devconf_dflt);
890 
891 		addrconf_forward_change(net, newf);
892 		if ((!newf) ^ (!old))
893 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
894 						     NETCONFA_FORWARDING,
895 						     NETCONFA_IFINDEX_ALL,
896 						     net->ipv6.devconf_all);
897 	} else if ((!newf) ^ (!old))
898 		dev_forward_change((struct inet6_dev *)table->extra1);
899 	rtnl_unlock();
900 
901 	if (newf)
902 		rt6_purge_dflt_routers(net);
903 	return 1;
904 }
905 
906 static void addrconf_linkdown_change(struct net *net, __s32 newf)
907 {
908 	struct net_device *dev;
909 	struct inet6_dev *idev;
910 
911 	for_each_netdev(net, dev) {
912 		idev = __in6_dev_get(dev);
913 		if (idev) {
914 			int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
915 
916 			idev->cnf.ignore_routes_with_linkdown = newf;
917 			if (changed)
918 				inet6_netconf_notify_devconf(dev_net(dev),
919 							     RTM_NEWNETCONF,
920 							     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
921 							     dev->ifindex,
922 							     &idev->cnf);
923 		}
924 	}
925 }
926 
927 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
928 {
929 	struct net *net;
930 	int old;
931 
932 	if (!rtnl_trylock())
933 		return restart_syscall();
934 
935 	net = (struct net *)table->extra2;
936 	old = *p;
937 	*p = newf;
938 
939 	if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
940 		if ((!newf) ^ (!old))
941 			inet6_netconf_notify_devconf(net,
942 						     RTM_NEWNETCONF,
943 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
944 						     NETCONFA_IFINDEX_DEFAULT,
945 						     net->ipv6.devconf_dflt);
946 		rtnl_unlock();
947 		return 0;
948 	}
949 
950 	if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
951 		net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
952 		addrconf_linkdown_change(net, newf);
953 		if ((!newf) ^ (!old))
954 			inet6_netconf_notify_devconf(net,
955 						     RTM_NEWNETCONF,
956 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
957 						     NETCONFA_IFINDEX_ALL,
958 						     net->ipv6.devconf_all);
959 	}
960 	rtnl_unlock();
961 
962 	return 1;
963 }
964 
965 #endif
966 
967 /* Nobody refers to this ifaddr, destroy it */
968 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
969 {
970 	WARN_ON(!hlist_unhashed(&ifp->addr_lst));
971 
972 #ifdef NET_REFCNT_DEBUG
973 	pr_debug("%s\n", __func__);
974 #endif
975 
976 	in6_dev_put(ifp->idev);
977 
978 	if (cancel_delayed_work(&ifp->dad_work))
979 		pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
980 			  ifp);
981 
982 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
983 		pr_warn("Freeing alive inet6 address %p\n", ifp);
984 		return;
985 	}
986 
987 	kfree_rcu(ifp, rcu);
988 }
989 
990 static void
991 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
992 {
993 	struct list_head *p;
994 	int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
995 
996 	/*
997 	 * Each device address list is sorted in order of scope -
998 	 * global before linklocal.
999 	 */
1000 	list_for_each(p, &idev->addr_list) {
1001 		struct inet6_ifaddr *ifa
1002 			= list_entry(p, struct inet6_ifaddr, if_list);
1003 		if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
1004 			break;
1005 	}
1006 
1007 	list_add_tail_rcu(&ifp->if_list, p);
1008 }
1009 
1010 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1011 {
1012 	u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
1013 
1014 	return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1015 }
1016 
1017 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1018 			       struct net_device *dev, unsigned int hash)
1019 {
1020 	struct inet6_ifaddr *ifp;
1021 
1022 	hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1023 		if (ipv6_addr_equal(&ifp->addr, addr)) {
1024 			if (!dev || ifp->idev->dev == dev)
1025 				return true;
1026 		}
1027 	}
1028 	return false;
1029 }
1030 
1031 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1032 {
1033 	struct net *net = dev_net(dev);
1034 	unsigned int hash = inet6_addr_hash(net, &ifa->addr);
1035 	int err = 0;
1036 
1037 	spin_lock(&net->ipv6.addrconf_hash_lock);
1038 
1039 	/* Ignore adding duplicate addresses on an interface */
1040 	if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
1041 		netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1042 		err = -EEXIST;
1043 	} else {
1044 		hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
1045 	}
1046 
1047 	spin_unlock(&net->ipv6.addrconf_hash_lock);
1048 
1049 	return err;
1050 }
1051 
1052 /* On success it returns ifp with increased reference count */
1053 
1054 static struct inet6_ifaddr *
1055 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1056 	      bool can_block, struct netlink_ext_ack *extack)
1057 {
1058 	gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1059 	int addr_type = ipv6_addr_type(cfg->pfx);
1060 	struct net *net = dev_net(idev->dev);
1061 	struct inet6_ifaddr *ifa = NULL;
1062 	struct fib6_info *f6i = NULL;
1063 	int err = 0;
1064 
1065 	if (addr_type == IPV6_ADDR_ANY ||
1066 	    (addr_type & IPV6_ADDR_MULTICAST &&
1067 	     !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1068 	    (!(idev->dev->flags & IFF_LOOPBACK) &&
1069 	     !netif_is_l3_master(idev->dev) &&
1070 	     addr_type & IPV6_ADDR_LOOPBACK))
1071 		return ERR_PTR(-EADDRNOTAVAIL);
1072 
1073 	if (idev->dead) {
1074 		err = -ENODEV;			/*XXX*/
1075 		goto out;
1076 	}
1077 
1078 	if (idev->cnf.disable_ipv6) {
1079 		err = -EACCES;
1080 		goto out;
1081 	}
1082 
1083 	/* validator notifier needs to be blocking;
1084 	 * do not call in atomic context
1085 	 */
1086 	if (can_block) {
1087 		struct in6_validator_info i6vi = {
1088 			.i6vi_addr = *cfg->pfx,
1089 			.i6vi_dev = idev,
1090 			.extack = extack,
1091 		};
1092 
1093 		err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1094 		err = notifier_to_errno(err);
1095 		if (err < 0)
1096 			goto out;
1097 	}
1098 
1099 	ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT);
1100 	if (!ifa) {
1101 		err = -ENOBUFS;
1102 		goto out;
1103 	}
1104 
1105 	f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1106 	if (IS_ERR(f6i)) {
1107 		err = PTR_ERR(f6i);
1108 		f6i = NULL;
1109 		goto out;
1110 	}
1111 
1112 	neigh_parms_data_state_setall(idev->nd_parms);
1113 
1114 	ifa->addr = *cfg->pfx;
1115 	if (cfg->peer_pfx)
1116 		ifa->peer_addr = *cfg->peer_pfx;
1117 
1118 	spin_lock_init(&ifa->lock);
1119 	INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1120 	INIT_HLIST_NODE(&ifa->addr_lst);
1121 	ifa->scope = cfg->scope;
1122 	ifa->prefix_len = cfg->plen;
1123 	ifa->rt_priority = cfg->rt_priority;
1124 	ifa->flags = cfg->ifa_flags;
1125 	ifa->ifa_proto = cfg->ifa_proto;
1126 	/* No need to add the TENTATIVE flag for addresses with NODAD */
1127 	if (!(cfg->ifa_flags & IFA_F_NODAD))
1128 		ifa->flags |= IFA_F_TENTATIVE;
1129 	ifa->valid_lft = cfg->valid_lft;
1130 	ifa->prefered_lft = cfg->preferred_lft;
1131 	ifa->cstamp = ifa->tstamp = jiffies;
1132 	ifa->tokenized = false;
1133 
1134 	ifa->rt = f6i;
1135 
1136 	ifa->idev = idev;
1137 	in6_dev_hold(idev);
1138 
1139 	/* For caller */
1140 	refcount_set(&ifa->refcnt, 1);
1141 
1142 	rcu_read_lock_bh();
1143 
1144 	err = ipv6_add_addr_hash(idev->dev, ifa);
1145 	if (err < 0) {
1146 		rcu_read_unlock_bh();
1147 		goto out;
1148 	}
1149 
1150 	write_lock(&idev->lock);
1151 
1152 	/* Add to inet6_dev unicast addr list. */
1153 	ipv6_link_dev_addr(idev, ifa);
1154 
1155 	if (ifa->flags&IFA_F_TEMPORARY) {
1156 		list_add(&ifa->tmp_list, &idev->tempaddr_list);
1157 		in6_ifa_hold(ifa);
1158 	}
1159 
1160 	in6_ifa_hold(ifa);
1161 	write_unlock(&idev->lock);
1162 
1163 	rcu_read_unlock_bh();
1164 
1165 	inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1166 out:
1167 	if (unlikely(err < 0)) {
1168 		fib6_info_release(f6i);
1169 
1170 		if (ifa) {
1171 			if (ifa->idev)
1172 				in6_dev_put(ifa->idev);
1173 			kfree(ifa);
1174 		}
1175 		ifa = ERR_PTR(err);
1176 	}
1177 
1178 	return ifa;
1179 }
1180 
1181 enum cleanup_prefix_rt_t {
1182 	CLEANUP_PREFIX_RT_NOP,    /* no cleanup action for prefix route */
1183 	CLEANUP_PREFIX_RT_DEL,    /* delete the prefix route */
1184 	CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1185 };
1186 
1187 /*
1188  * Check, whether the prefix for ifp would still need a prefix route
1189  * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1190  * constants.
1191  *
1192  * 1) we don't purge prefix if address was not permanent.
1193  *    prefix is managed by its own lifetime.
1194  * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1195  * 3) if there are no addresses, delete prefix.
1196  * 4) if there are still other permanent address(es),
1197  *    corresponding prefix is still permanent.
1198  * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1199  *    don't purge the prefix, assume user space is managing it.
1200  * 6) otherwise, update prefix lifetime to the
1201  *    longest valid lifetime among the corresponding
1202  *    addresses on the device.
1203  *    Note: subsequent RA will update lifetime.
1204  **/
1205 static enum cleanup_prefix_rt_t
1206 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1207 {
1208 	struct inet6_ifaddr *ifa;
1209 	struct inet6_dev *idev = ifp->idev;
1210 	unsigned long lifetime;
1211 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1212 
1213 	*expires = jiffies;
1214 
1215 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
1216 		if (ifa == ifp)
1217 			continue;
1218 		if (ifa->prefix_len != ifp->prefix_len ||
1219 		    !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1220 				       ifp->prefix_len))
1221 			continue;
1222 		if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1223 			return CLEANUP_PREFIX_RT_NOP;
1224 
1225 		action = CLEANUP_PREFIX_RT_EXPIRE;
1226 
1227 		spin_lock(&ifa->lock);
1228 
1229 		lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1230 		/*
1231 		 * Note: Because this address is
1232 		 * not permanent, lifetime <
1233 		 * LONG_MAX / HZ here.
1234 		 */
1235 		if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1236 			*expires = ifa->tstamp + lifetime * HZ;
1237 		spin_unlock(&ifa->lock);
1238 	}
1239 
1240 	return action;
1241 }
1242 
1243 static void
1244 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1245 		     bool del_rt, bool del_peer)
1246 {
1247 	struct fib6_info *f6i;
1248 
1249 	f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1250 					ifp->prefix_len,
1251 					ifp->idev->dev, 0, RTF_DEFAULT, true);
1252 	if (f6i) {
1253 		if (del_rt)
1254 			ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1255 		else {
1256 			if (!(f6i->fib6_flags & RTF_EXPIRES))
1257 				fib6_set_expires(f6i, expires);
1258 			fib6_info_release(f6i);
1259 		}
1260 	}
1261 }
1262 
1263 
1264 /* This function wants to get referenced ifp and releases it before return */
1265 
1266 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1267 {
1268 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1269 	struct net *net = dev_net(ifp->idev->dev);
1270 	unsigned long expires;
1271 	int state;
1272 
1273 	ASSERT_RTNL();
1274 
1275 	spin_lock_bh(&ifp->lock);
1276 	state = ifp->state;
1277 	ifp->state = INET6_IFADDR_STATE_DEAD;
1278 	spin_unlock_bh(&ifp->lock);
1279 
1280 	if (state == INET6_IFADDR_STATE_DEAD)
1281 		goto out;
1282 
1283 	spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1284 	hlist_del_init_rcu(&ifp->addr_lst);
1285 	spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1286 
1287 	write_lock_bh(&ifp->idev->lock);
1288 
1289 	if (ifp->flags&IFA_F_TEMPORARY) {
1290 		list_del(&ifp->tmp_list);
1291 		if (ifp->ifpub) {
1292 			in6_ifa_put(ifp->ifpub);
1293 			ifp->ifpub = NULL;
1294 		}
1295 		__in6_ifa_put(ifp);
1296 	}
1297 
1298 	if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1299 		action = check_cleanup_prefix_route(ifp, &expires);
1300 
1301 	list_del_rcu(&ifp->if_list);
1302 	__in6_ifa_put(ifp);
1303 
1304 	write_unlock_bh(&ifp->idev->lock);
1305 
1306 	addrconf_del_dad_work(ifp);
1307 
1308 	ipv6_ifa_notify(RTM_DELADDR, ifp);
1309 
1310 	inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1311 
1312 	if (action != CLEANUP_PREFIX_RT_NOP) {
1313 		cleanup_prefix_route(ifp, expires,
1314 			action == CLEANUP_PREFIX_RT_DEL, false);
1315 	}
1316 
1317 	/* clean up prefsrc entries */
1318 	rt6_remove_prefsrc(ifp);
1319 out:
1320 	in6_ifa_put(ifp);
1321 }
1322 
1323 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1324 {
1325 	struct inet6_dev *idev = ifp->idev;
1326 	unsigned long tmp_tstamp, age;
1327 	unsigned long regen_advance;
1328 	unsigned long now = jiffies;
1329 	s32 cnf_temp_preferred_lft;
1330 	struct inet6_ifaddr *ift;
1331 	struct ifa6_config cfg;
1332 	long max_desync_factor;
1333 	struct in6_addr addr;
1334 	int ret = 0;
1335 
1336 	write_lock_bh(&idev->lock);
1337 
1338 retry:
1339 	in6_dev_hold(idev);
1340 	if (idev->cnf.use_tempaddr <= 0) {
1341 		write_unlock_bh(&idev->lock);
1342 		pr_info("%s: use_tempaddr is disabled\n", __func__);
1343 		in6_dev_put(idev);
1344 		ret = -1;
1345 		goto out;
1346 	}
1347 	spin_lock_bh(&ifp->lock);
1348 	if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1349 		idev->cnf.use_tempaddr = -1;	/*XXX*/
1350 		spin_unlock_bh(&ifp->lock);
1351 		write_unlock_bh(&idev->lock);
1352 		pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1353 			__func__);
1354 		in6_dev_put(idev);
1355 		ret = -1;
1356 		goto out;
1357 	}
1358 	in6_ifa_hold(ifp);
1359 	memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1360 	ipv6_gen_rnd_iid(&addr);
1361 
1362 	age = (now - ifp->tstamp) / HZ;
1363 
1364 	regen_advance = idev->cnf.regen_max_retry *
1365 			idev->cnf.dad_transmits *
1366 			max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1367 
1368 	/* recalculate max_desync_factor each time and update
1369 	 * idev->desync_factor if it's larger
1370 	 */
1371 	cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1372 	max_desync_factor = min_t(__u32,
1373 				  idev->cnf.max_desync_factor,
1374 				  cnf_temp_preferred_lft - regen_advance);
1375 
1376 	if (unlikely(idev->desync_factor > max_desync_factor)) {
1377 		if (max_desync_factor > 0) {
1378 			get_random_bytes(&idev->desync_factor,
1379 					 sizeof(idev->desync_factor));
1380 			idev->desync_factor %= max_desync_factor;
1381 		} else {
1382 			idev->desync_factor = 0;
1383 		}
1384 	}
1385 
1386 	memset(&cfg, 0, sizeof(cfg));
1387 	cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1388 			      idev->cnf.temp_valid_lft + age);
1389 	cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1390 	cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1391 
1392 	cfg.plen = ifp->prefix_len;
1393 	tmp_tstamp = ifp->tstamp;
1394 	spin_unlock_bh(&ifp->lock);
1395 
1396 	write_unlock_bh(&idev->lock);
1397 
1398 	/* A temporary address is created only if this calculated Preferred
1399 	 * Lifetime is greater than REGEN_ADVANCE time units.  In particular,
1400 	 * an implementation must not create a temporary address with a zero
1401 	 * Preferred Lifetime.
1402 	 * Use age calculation as in addrconf_verify to avoid unnecessary
1403 	 * temporary addresses being generated.
1404 	 */
1405 	age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1406 	if (cfg.preferred_lft <= regen_advance + age) {
1407 		in6_ifa_put(ifp);
1408 		in6_dev_put(idev);
1409 		ret = -1;
1410 		goto out;
1411 	}
1412 
1413 	cfg.ifa_flags = IFA_F_TEMPORARY;
1414 	/* set in addrconf_prefix_rcv() */
1415 	if (ifp->flags & IFA_F_OPTIMISTIC)
1416 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1417 
1418 	cfg.pfx = &addr;
1419 	cfg.scope = ipv6_addr_scope(cfg.pfx);
1420 
1421 	ift = ipv6_add_addr(idev, &cfg, block, NULL);
1422 	if (IS_ERR(ift)) {
1423 		in6_ifa_put(ifp);
1424 		in6_dev_put(idev);
1425 		pr_info("%s: retry temporary address regeneration\n", __func__);
1426 		write_lock_bh(&idev->lock);
1427 		goto retry;
1428 	}
1429 
1430 	spin_lock_bh(&ift->lock);
1431 	ift->ifpub = ifp;
1432 	ift->cstamp = now;
1433 	ift->tstamp = tmp_tstamp;
1434 	spin_unlock_bh(&ift->lock);
1435 
1436 	addrconf_dad_start(ift);
1437 	in6_ifa_put(ift);
1438 	in6_dev_put(idev);
1439 out:
1440 	return ret;
1441 }
1442 
1443 /*
1444  *	Choose an appropriate source address (RFC3484)
1445  */
1446 enum {
1447 	IPV6_SADDR_RULE_INIT = 0,
1448 	IPV6_SADDR_RULE_LOCAL,
1449 	IPV6_SADDR_RULE_SCOPE,
1450 	IPV6_SADDR_RULE_PREFERRED,
1451 #ifdef CONFIG_IPV6_MIP6
1452 	IPV6_SADDR_RULE_HOA,
1453 #endif
1454 	IPV6_SADDR_RULE_OIF,
1455 	IPV6_SADDR_RULE_LABEL,
1456 	IPV6_SADDR_RULE_PRIVACY,
1457 	IPV6_SADDR_RULE_ORCHID,
1458 	IPV6_SADDR_RULE_PREFIX,
1459 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1460 	IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1461 #endif
1462 	IPV6_SADDR_RULE_MAX
1463 };
1464 
1465 struct ipv6_saddr_score {
1466 	int			rule;
1467 	int			addr_type;
1468 	struct inet6_ifaddr	*ifa;
1469 	DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1470 	int			scopedist;
1471 	int			matchlen;
1472 };
1473 
1474 struct ipv6_saddr_dst {
1475 	const struct in6_addr *addr;
1476 	int ifindex;
1477 	int scope;
1478 	int label;
1479 	unsigned int prefs;
1480 };
1481 
1482 static inline int ipv6_saddr_preferred(int type)
1483 {
1484 	if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1485 		return 1;
1486 	return 0;
1487 }
1488 
1489 static bool ipv6_use_optimistic_addr(struct net *net,
1490 				     struct inet6_dev *idev)
1491 {
1492 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1493 	if (!idev)
1494 		return false;
1495 	if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1496 		return false;
1497 	if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1498 		return false;
1499 
1500 	return true;
1501 #else
1502 	return false;
1503 #endif
1504 }
1505 
1506 static bool ipv6_allow_optimistic_dad(struct net *net,
1507 				      struct inet6_dev *idev)
1508 {
1509 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1510 	if (!idev)
1511 		return false;
1512 	if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1513 		return false;
1514 
1515 	return true;
1516 #else
1517 	return false;
1518 #endif
1519 }
1520 
1521 static int ipv6_get_saddr_eval(struct net *net,
1522 			       struct ipv6_saddr_score *score,
1523 			       struct ipv6_saddr_dst *dst,
1524 			       int i)
1525 {
1526 	int ret;
1527 
1528 	if (i <= score->rule) {
1529 		switch (i) {
1530 		case IPV6_SADDR_RULE_SCOPE:
1531 			ret = score->scopedist;
1532 			break;
1533 		case IPV6_SADDR_RULE_PREFIX:
1534 			ret = score->matchlen;
1535 			break;
1536 		default:
1537 			ret = !!test_bit(i, score->scorebits);
1538 		}
1539 		goto out;
1540 	}
1541 
1542 	switch (i) {
1543 	case IPV6_SADDR_RULE_INIT:
1544 		/* Rule 0: remember if hiscore is not ready yet */
1545 		ret = !!score->ifa;
1546 		break;
1547 	case IPV6_SADDR_RULE_LOCAL:
1548 		/* Rule 1: Prefer same address */
1549 		ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1550 		break;
1551 	case IPV6_SADDR_RULE_SCOPE:
1552 		/* Rule 2: Prefer appropriate scope
1553 		 *
1554 		 *      ret
1555 		 *       ^
1556 		 *    -1 |  d 15
1557 		 *    ---+--+-+---> scope
1558 		 *       |
1559 		 *       |             d is scope of the destination.
1560 		 *  B-d  |  \
1561 		 *       |   \      <- smaller scope is better if
1562 		 *  B-15 |    \        if scope is enough for destination.
1563 		 *       |             ret = B - scope (-1 <= scope >= d <= 15).
1564 		 * d-C-1 | /
1565 		 *       |/         <- greater is better
1566 		 *   -C  /             if scope is not enough for destination.
1567 		 *      /|             ret = scope - C (-1 <= d < scope <= 15).
1568 		 *
1569 		 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1570 		 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1571 		 * Assume B = 0 and we get C > 29.
1572 		 */
1573 		ret = __ipv6_addr_src_scope(score->addr_type);
1574 		if (ret >= dst->scope)
1575 			ret = -ret;
1576 		else
1577 			ret -= 128;	/* 30 is enough */
1578 		score->scopedist = ret;
1579 		break;
1580 	case IPV6_SADDR_RULE_PREFERRED:
1581 	    {
1582 		/* Rule 3: Avoid deprecated and optimistic addresses */
1583 		u8 avoid = IFA_F_DEPRECATED;
1584 
1585 		if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1586 			avoid |= IFA_F_OPTIMISTIC;
1587 		ret = ipv6_saddr_preferred(score->addr_type) ||
1588 		      !(score->ifa->flags & avoid);
1589 		break;
1590 	    }
1591 #ifdef CONFIG_IPV6_MIP6
1592 	case IPV6_SADDR_RULE_HOA:
1593 	    {
1594 		/* Rule 4: Prefer home address */
1595 		int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1596 		ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1597 		break;
1598 	    }
1599 #endif
1600 	case IPV6_SADDR_RULE_OIF:
1601 		/* Rule 5: Prefer outgoing interface */
1602 		ret = (!dst->ifindex ||
1603 		       dst->ifindex == score->ifa->idev->dev->ifindex);
1604 		break;
1605 	case IPV6_SADDR_RULE_LABEL:
1606 		/* Rule 6: Prefer matching label */
1607 		ret = ipv6_addr_label(net,
1608 				      &score->ifa->addr, score->addr_type,
1609 				      score->ifa->idev->dev->ifindex) == dst->label;
1610 		break;
1611 	case IPV6_SADDR_RULE_PRIVACY:
1612 	    {
1613 		/* Rule 7: Prefer public address
1614 		 * Note: prefer temporary address if use_tempaddr >= 2
1615 		 */
1616 		int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1617 				!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1618 				score->ifa->idev->cnf.use_tempaddr >= 2;
1619 		ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1620 		break;
1621 	    }
1622 	case IPV6_SADDR_RULE_ORCHID:
1623 		/* Rule 8-: Prefer ORCHID vs ORCHID or
1624 		 *	    non-ORCHID vs non-ORCHID
1625 		 */
1626 		ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1627 			ipv6_addr_orchid(dst->addr));
1628 		break;
1629 	case IPV6_SADDR_RULE_PREFIX:
1630 		/* Rule 8: Use longest matching prefix */
1631 		ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1632 		if (ret > score->ifa->prefix_len)
1633 			ret = score->ifa->prefix_len;
1634 		score->matchlen = ret;
1635 		break;
1636 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1637 	case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1638 		/* Optimistic addresses still have lower precedence than other
1639 		 * preferred addresses.
1640 		 */
1641 		ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1642 		break;
1643 #endif
1644 	default:
1645 		ret = 0;
1646 	}
1647 
1648 	if (ret)
1649 		__set_bit(i, score->scorebits);
1650 	score->rule = i;
1651 out:
1652 	return ret;
1653 }
1654 
1655 static int __ipv6_dev_get_saddr(struct net *net,
1656 				struct ipv6_saddr_dst *dst,
1657 				struct inet6_dev *idev,
1658 				struct ipv6_saddr_score *scores,
1659 				int hiscore_idx)
1660 {
1661 	struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1662 
1663 	list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1664 		int i;
1665 
1666 		/*
1667 		 * - Tentative Address (RFC2462 section 5.4)
1668 		 *  - A tentative address is not considered
1669 		 *    "assigned to an interface" in the traditional
1670 		 *    sense, unless it is also flagged as optimistic.
1671 		 * - Candidate Source Address (section 4)
1672 		 *  - In any case, anycast addresses, multicast
1673 		 *    addresses, and the unspecified address MUST
1674 		 *    NOT be included in a candidate set.
1675 		 */
1676 		if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1677 		    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1678 			continue;
1679 
1680 		score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1681 
1682 		if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1683 			     score->addr_type & IPV6_ADDR_MULTICAST)) {
1684 			net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1685 					    idev->dev->name);
1686 			continue;
1687 		}
1688 
1689 		score->rule = -1;
1690 		bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1691 
1692 		for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1693 			int minihiscore, miniscore;
1694 
1695 			minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1696 			miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1697 
1698 			if (minihiscore > miniscore) {
1699 				if (i == IPV6_SADDR_RULE_SCOPE &&
1700 				    score->scopedist > 0) {
1701 					/*
1702 					 * special case:
1703 					 * each remaining entry
1704 					 * has too small (not enough)
1705 					 * scope, because ifa entries
1706 					 * are sorted by their scope
1707 					 * values.
1708 					 */
1709 					goto out;
1710 				}
1711 				break;
1712 			} else if (minihiscore < miniscore) {
1713 				swap(hiscore, score);
1714 				hiscore_idx = 1 - hiscore_idx;
1715 
1716 				/* restore our iterator */
1717 				score->ifa = hiscore->ifa;
1718 
1719 				break;
1720 			}
1721 		}
1722 	}
1723 out:
1724 	return hiscore_idx;
1725 }
1726 
1727 static int ipv6_get_saddr_master(struct net *net,
1728 				 const struct net_device *dst_dev,
1729 				 const struct net_device *master,
1730 				 struct ipv6_saddr_dst *dst,
1731 				 struct ipv6_saddr_score *scores,
1732 				 int hiscore_idx)
1733 {
1734 	struct inet6_dev *idev;
1735 
1736 	idev = __in6_dev_get(dst_dev);
1737 	if (idev)
1738 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1739 						   scores, hiscore_idx);
1740 
1741 	idev = __in6_dev_get(master);
1742 	if (idev)
1743 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1744 						   scores, hiscore_idx);
1745 
1746 	return hiscore_idx;
1747 }
1748 
1749 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1750 		       const struct in6_addr *daddr, unsigned int prefs,
1751 		       struct in6_addr *saddr)
1752 {
1753 	struct ipv6_saddr_score scores[2], *hiscore;
1754 	struct ipv6_saddr_dst dst;
1755 	struct inet6_dev *idev;
1756 	struct net_device *dev;
1757 	int dst_type;
1758 	bool use_oif_addr = false;
1759 	int hiscore_idx = 0;
1760 	int ret = 0;
1761 
1762 	dst_type = __ipv6_addr_type(daddr);
1763 	dst.addr = daddr;
1764 	dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1765 	dst.scope = __ipv6_addr_src_scope(dst_type);
1766 	dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1767 	dst.prefs = prefs;
1768 
1769 	scores[hiscore_idx].rule = -1;
1770 	scores[hiscore_idx].ifa = NULL;
1771 
1772 	rcu_read_lock();
1773 
1774 	/* Candidate Source Address (section 4)
1775 	 *  - multicast and link-local destination address,
1776 	 *    the set of candidate source address MUST only
1777 	 *    include addresses assigned to interfaces
1778 	 *    belonging to the same link as the outgoing
1779 	 *    interface.
1780 	 * (- For site-local destination addresses, the
1781 	 *    set of candidate source addresses MUST only
1782 	 *    include addresses assigned to interfaces
1783 	 *    belonging to the same site as the outgoing
1784 	 *    interface.)
1785 	 *  - "It is RECOMMENDED that the candidate source addresses
1786 	 *    be the set of unicast addresses assigned to the
1787 	 *    interface that will be used to send to the destination
1788 	 *    (the 'outgoing' interface)." (RFC 6724)
1789 	 */
1790 	if (dst_dev) {
1791 		idev = __in6_dev_get(dst_dev);
1792 		if ((dst_type & IPV6_ADDR_MULTICAST) ||
1793 		    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1794 		    (idev && idev->cnf.use_oif_addrs_only)) {
1795 			use_oif_addr = true;
1796 		}
1797 	}
1798 
1799 	if (use_oif_addr) {
1800 		if (idev)
1801 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1802 	} else {
1803 		const struct net_device *master;
1804 		int master_idx = 0;
1805 
1806 		/* if dst_dev exists and is enslaved to an L3 device, then
1807 		 * prefer addresses from dst_dev and then the master over
1808 		 * any other enslaved devices in the L3 domain.
1809 		 */
1810 		master = l3mdev_master_dev_rcu(dst_dev);
1811 		if (master) {
1812 			master_idx = master->ifindex;
1813 
1814 			hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1815 							    master, &dst,
1816 							    scores, hiscore_idx);
1817 
1818 			if (scores[hiscore_idx].ifa)
1819 				goto out;
1820 		}
1821 
1822 		for_each_netdev_rcu(net, dev) {
1823 			/* only consider addresses on devices in the
1824 			 * same L3 domain
1825 			 */
1826 			if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1827 				continue;
1828 			idev = __in6_dev_get(dev);
1829 			if (!idev)
1830 				continue;
1831 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1832 		}
1833 	}
1834 
1835 out:
1836 	hiscore = &scores[hiscore_idx];
1837 	if (!hiscore->ifa)
1838 		ret = -EADDRNOTAVAIL;
1839 	else
1840 		*saddr = hiscore->ifa->addr;
1841 
1842 	rcu_read_unlock();
1843 	return ret;
1844 }
1845 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1846 
1847 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1848 			      u32 banned_flags)
1849 {
1850 	struct inet6_ifaddr *ifp;
1851 	int err = -EADDRNOTAVAIL;
1852 
1853 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1854 		if (ifp->scope > IFA_LINK)
1855 			break;
1856 		if (ifp->scope == IFA_LINK &&
1857 		    !(ifp->flags & banned_flags)) {
1858 			*addr = ifp->addr;
1859 			err = 0;
1860 			break;
1861 		}
1862 	}
1863 	return err;
1864 }
1865 
1866 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1867 		    u32 banned_flags)
1868 {
1869 	struct inet6_dev *idev;
1870 	int err = -EADDRNOTAVAIL;
1871 
1872 	rcu_read_lock();
1873 	idev = __in6_dev_get(dev);
1874 	if (idev) {
1875 		read_lock_bh(&idev->lock);
1876 		err = __ipv6_get_lladdr(idev, addr, banned_flags);
1877 		read_unlock_bh(&idev->lock);
1878 	}
1879 	rcu_read_unlock();
1880 	return err;
1881 }
1882 
1883 static int ipv6_count_addresses(const struct inet6_dev *idev)
1884 {
1885 	const struct inet6_ifaddr *ifp;
1886 	int cnt = 0;
1887 
1888 	rcu_read_lock();
1889 	list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1890 		cnt++;
1891 	rcu_read_unlock();
1892 	return cnt;
1893 }
1894 
1895 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1896 		  const struct net_device *dev, int strict)
1897 {
1898 	return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1899 				       strict, IFA_F_TENTATIVE);
1900 }
1901 EXPORT_SYMBOL(ipv6_chk_addr);
1902 
1903 /* device argument is used to find the L3 domain of interest. If
1904  * skip_dev_check is set, then the ifp device is not checked against
1905  * the passed in dev argument. So the 2 cases for addresses checks are:
1906  *   1. does the address exist in the L3 domain that dev is part of
1907  *      (skip_dev_check = true), or
1908  *
1909  *   2. does the address exist on the specific device
1910  *      (skip_dev_check = false)
1911  */
1912 static struct net_device *
1913 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1914 			  const struct net_device *dev, bool skip_dev_check,
1915 			  int strict, u32 banned_flags)
1916 {
1917 	unsigned int hash = inet6_addr_hash(net, addr);
1918 	struct net_device *l3mdev, *ndev;
1919 	struct inet6_ifaddr *ifp;
1920 	u32 ifp_flags;
1921 
1922 	rcu_read_lock();
1923 
1924 	l3mdev = l3mdev_master_dev_rcu(dev);
1925 	if (skip_dev_check)
1926 		dev = NULL;
1927 
1928 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1929 		ndev = ifp->idev->dev;
1930 
1931 		if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1932 			continue;
1933 
1934 		/* Decouple optimistic from tentative for evaluation here.
1935 		 * Ban optimistic addresses explicitly, when required.
1936 		 */
1937 		ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1938 			    ? (ifp->flags&~IFA_F_TENTATIVE)
1939 			    : ifp->flags;
1940 		if (ipv6_addr_equal(&ifp->addr, addr) &&
1941 		    !(ifp_flags&banned_flags) &&
1942 		    (!dev || ndev == dev ||
1943 		     !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1944 			rcu_read_unlock();
1945 			return ndev;
1946 		}
1947 	}
1948 
1949 	rcu_read_unlock();
1950 	return NULL;
1951 }
1952 
1953 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1954 			    const struct net_device *dev, bool skip_dev_check,
1955 			    int strict, u32 banned_flags)
1956 {
1957 	return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
1958 					 strict, banned_flags) ? 1 : 0;
1959 }
1960 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1961 
1962 
1963 /* Compares an address/prefix_len with addresses on device @dev.
1964  * If one is found it returns true.
1965  */
1966 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1967 	const unsigned int prefix_len, struct net_device *dev)
1968 {
1969 	const struct inet6_ifaddr *ifa;
1970 	const struct inet6_dev *idev;
1971 	bool ret = false;
1972 
1973 	rcu_read_lock();
1974 	idev = __in6_dev_get(dev);
1975 	if (idev) {
1976 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1977 			ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1978 			if (ret)
1979 				break;
1980 		}
1981 	}
1982 	rcu_read_unlock();
1983 
1984 	return ret;
1985 }
1986 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1987 
1988 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1989 {
1990 	const struct inet6_ifaddr *ifa;
1991 	const struct inet6_dev *idev;
1992 	int	onlink;
1993 
1994 	onlink = 0;
1995 	rcu_read_lock();
1996 	idev = __in6_dev_get(dev);
1997 	if (idev) {
1998 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1999 			onlink = ipv6_prefix_equal(addr, &ifa->addr,
2000 						   ifa->prefix_len);
2001 			if (onlink)
2002 				break;
2003 		}
2004 	}
2005 	rcu_read_unlock();
2006 	return onlink;
2007 }
2008 EXPORT_SYMBOL(ipv6_chk_prefix);
2009 
2010 /**
2011  * ipv6_dev_find - find the first device with a given source address.
2012  * @net: the net namespace
2013  * @addr: the source address
2014  * @dev: used to find the L3 domain of interest
2015  *
2016  * The caller should be protected by RCU, or RTNL.
2017  */
2018 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2019 				 struct net_device *dev)
2020 {
2021 	return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2022 					 IFA_F_TENTATIVE);
2023 }
2024 EXPORT_SYMBOL(ipv6_dev_find);
2025 
2026 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2027 				     struct net_device *dev, int strict)
2028 {
2029 	unsigned int hash = inet6_addr_hash(net, addr);
2030 	struct inet6_ifaddr *ifp, *result = NULL;
2031 
2032 	rcu_read_lock();
2033 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
2034 		if (ipv6_addr_equal(&ifp->addr, addr)) {
2035 			if (!dev || ifp->idev->dev == dev ||
2036 			    !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2037 				result = ifp;
2038 				in6_ifa_hold(ifp);
2039 				break;
2040 			}
2041 		}
2042 	}
2043 	rcu_read_unlock();
2044 
2045 	return result;
2046 }
2047 
2048 /* Gets referenced address, destroys ifaddr */
2049 
2050 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2051 {
2052 	if (dad_failed)
2053 		ifp->flags |= IFA_F_DADFAILED;
2054 
2055 	if (ifp->flags&IFA_F_TEMPORARY) {
2056 		struct inet6_ifaddr *ifpub;
2057 		spin_lock_bh(&ifp->lock);
2058 		ifpub = ifp->ifpub;
2059 		if (ifpub) {
2060 			in6_ifa_hold(ifpub);
2061 			spin_unlock_bh(&ifp->lock);
2062 			ipv6_create_tempaddr(ifpub, true);
2063 			in6_ifa_put(ifpub);
2064 		} else {
2065 			spin_unlock_bh(&ifp->lock);
2066 		}
2067 		ipv6_del_addr(ifp);
2068 	} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2069 		spin_lock_bh(&ifp->lock);
2070 		addrconf_del_dad_work(ifp);
2071 		ifp->flags |= IFA_F_TENTATIVE;
2072 		if (dad_failed)
2073 			ifp->flags &= ~IFA_F_OPTIMISTIC;
2074 		spin_unlock_bh(&ifp->lock);
2075 		if (dad_failed)
2076 			ipv6_ifa_notify(0, ifp);
2077 		in6_ifa_put(ifp);
2078 	} else {
2079 		ipv6_del_addr(ifp);
2080 	}
2081 }
2082 
2083 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2084 {
2085 	int err = -ENOENT;
2086 
2087 	spin_lock_bh(&ifp->lock);
2088 	if (ifp->state == INET6_IFADDR_STATE_DAD) {
2089 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
2090 		err = 0;
2091 	}
2092 	spin_unlock_bh(&ifp->lock);
2093 
2094 	return err;
2095 }
2096 
2097 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2098 {
2099 	struct inet6_dev *idev = ifp->idev;
2100 	struct net *net = dev_net(idev->dev);
2101 
2102 	if (addrconf_dad_end(ifp)) {
2103 		in6_ifa_put(ifp);
2104 		return;
2105 	}
2106 
2107 	net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2108 			     ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2109 
2110 	spin_lock_bh(&ifp->lock);
2111 
2112 	if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2113 		struct in6_addr new_addr;
2114 		struct inet6_ifaddr *ifp2;
2115 		int retries = ifp->stable_privacy_retry + 1;
2116 		struct ifa6_config cfg = {
2117 			.pfx = &new_addr,
2118 			.plen = ifp->prefix_len,
2119 			.ifa_flags = ifp->flags,
2120 			.valid_lft = ifp->valid_lft,
2121 			.preferred_lft = ifp->prefered_lft,
2122 			.scope = ifp->scope,
2123 		};
2124 
2125 		if (retries > net->ipv6.sysctl.idgen_retries) {
2126 			net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2127 					     ifp->idev->dev->name);
2128 			goto errdad;
2129 		}
2130 
2131 		new_addr = ifp->addr;
2132 		if (ipv6_generate_stable_address(&new_addr, retries,
2133 						 idev))
2134 			goto errdad;
2135 
2136 		spin_unlock_bh(&ifp->lock);
2137 
2138 		if (idev->cnf.max_addresses &&
2139 		    ipv6_count_addresses(idev) >=
2140 		    idev->cnf.max_addresses)
2141 			goto lock_errdad;
2142 
2143 		net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2144 				     ifp->idev->dev->name);
2145 
2146 		ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2147 		if (IS_ERR(ifp2))
2148 			goto lock_errdad;
2149 
2150 		spin_lock_bh(&ifp2->lock);
2151 		ifp2->stable_privacy_retry = retries;
2152 		ifp2->state = INET6_IFADDR_STATE_PREDAD;
2153 		spin_unlock_bh(&ifp2->lock);
2154 
2155 		addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2156 		in6_ifa_put(ifp2);
2157 lock_errdad:
2158 		spin_lock_bh(&ifp->lock);
2159 	}
2160 
2161 errdad:
2162 	/* transition from _POSTDAD to _ERRDAD */
2163 	ifp->state = INET6_IFADDR_STATE_ERRDAD;
2164 	spin_unlock_bh(&ifp->lock);
2165 
2166 	addrconf_mod_dad_work(ifp, 0);
2167 	in6_ifa_put(ifp);
2168 }
2169 
2170 /* Join to solicited addr multicast group.
2171  * caller must hold RTNL */
2172 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2173 {
2174 	struct in6_addr maddr;
2175 
2176 	if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2177 		return;
2178 
2179 	addrconf_addr_solict_mult(addr, &maddr);
2180 	ipv6_dev_mc_inc(dev, &maddr);
2181 }
2182 
2183 /* caller must hold RTNL */
2184 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2185 {
2186 	struct in6_addr maddr;
2187 
2188 	if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2189 		return;
2190 
2191 	addrconf_addr_solict_mult(addr, &maddr);
2192 	__ipv6_dev_mc_dec(idev, &maddr);
2193 }
2194 
2195 /* caller must hold RTNL */
2196 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2197 {
2198 	struct in6_addr addr;
2199 
2200 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2201 		return;
2202 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2203 	if (ipv6_addr_any(&addr))
2204 		return;
2205 	__ipv6_dev_ac_inc(ifp->idev, &addr);
2206 }
2207 
2208 /* caller must hold RTNL */
2209 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2210 {
2211 	struct in6_addr addr;
2212 
2213 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2214 		return;
2215 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2216 	if (ipv6_addr_any(&addr))
2217 		return;
2218 	__ipv6_dev_ac_dec(ifp->idev, &addr);
2219 }
2220 
2221 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2222 {
2223 	switch (dev->addr_len) {
2224 	case ETH_ALEN:
2225 		memcpy(eui, dev->dev_addr, 3);
2226 		eui[3] = 0xFF;
2227 		eui[4] = 0xFE;
2228 		memcpy(eui + 5, dev->dev_addr + 3, 3);
2229 		break;
2230 	case EUI64_ADDR_LEN:
2231 		memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2232 		eui[0] ^= 2;
2233 		break;
2234 	default:
2235 		return -1;
2236 	}
2237 
2238 	return 0;
2239 }
2240 
2241 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2242 {
2243 	const union fwnet_hwaddr *ha;
2244 
2245 	if (dev->addr_len != FWNET_ALEN)
2246 		return -1;
2247 
2248 	ha = (const union fwnet_hwaddr *)dev->dev_addr;
2249 
2250 	memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2251 	eui[0] ^= 2;
2252 	return 0;
2253 }
2254 
2255 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2256 {
2257 	/* XXX: inherit EUI-64 from other interface -- yoshfuji */
2258 	if (dev->addr_len != ARCNET_ALEN)
2259 		return -1;
2260 	memset(eui, 0, 7);
2261 	eui[7] = *(u8 *)dev->dev_addr;
2262 	return 0;
2263 }
2264 
2265 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2266 {
2267 	if (dev->addr_len != INFINIBAND_ALEN)
2268 		return -1;
2269 	memcpy(eui, dev->dev_addr + 12, 8);
2270 	eui[0] |= 2;
2271 	return 0;
2272 }
2273 
2274 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2275 {
2276 	if (addr == 0)
2277 		return -1;
2278 	eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2279 		  ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2280 		  ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2281 		  ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2282 		  ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2283 		  ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2284 	eui[1] = 0;
2285 	eui[2] = 0x5E;
2286 	eui[3] = 0xFE;
2287 	memcpy(eui + 4, &addr, 4);
2288 	return 0;
2289 }
2290 
2291 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2292 {
2293 	if (dev->priv_flags & IFF_ISATAP)
2294 		return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2295 	return -1;
2296 }
2297 
2298 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2299 {
2300 	return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2301 }
2302 
2303 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2304 {
2305 	memcpy(eui, dev->perm_addr, 3);
2306 	memcpy(eui + 5, dev->perm_addr + 3, 3);
2307 	eui[3] = 0xFF;
2308 	eui[4] = 0xFE;
2309 	eui[0] ^= 2;
2310 	return 0;
2311 }
2312 
2313 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2314 {
2315 	switch (dev->type) {
2316 	case ARPHRD_ETHER:
2317 	case ARPHRD_FDDI:
2318 		return addrconf_ifid_eui48(eui, dev);
2319 	case ARPHRD_ARCNET:
2320 		return addrconf_ifid_arcnet(eui, dev);
2321 	case ARPHRD_INFINIBAND:
2322 		return addrconf_ifid_infiniband(eui, dev);
2323 	case ARPHRD_SIT:
2324 		return addrconf_ifid_sit(eui, dev);
2325 	case ARPHRD_IPGRE:
2326 	case ARPHRD_TUNNEL:
2327 		return addrconf_ifid_gre(eui, dev);
2328 	case ARPHRD_6LOWPAN:
2329 		return addrconf_ifid_6lowpan(eui, dev);
2330 	case ARPHRD_IEEE1394:
2331 		return addrconf_ifid_ieee1394(eui, dev);
2332 	case ARPHRD_TUNNEL6:
2333 	case ARPHRD_IP6GRE:
2334 	case ARPHRD_RAWIP:
2335 		return addrconf_ifid_ip6tnl(eui, dev);
2336 	}
2337 	return -1;
2338 }
2339 
2340 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2341 {
2342 	int err = -1;
2343 	struct inet6_ifaddr *ifp;
2344 
2345 	read_lock_bh(&idev->lock);
2346 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2347 		if (ifp->scope > IFA_LINK)
2348 			break;
2349 		if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2350 			memcpy(eui, ifp->addr.s6_addr+8, 8);
2351 			err = 0;
2352 			break;
2353 		}
2354 	}
2355 	read_unlock_bh(&idev->lock);
2356 	return err;
2357 }
2358 
2359 /* Generation of a randomized Interface Identifier
2360  * draft-ietf-6man-rfc4941bis, Section 3.3.1
2361  */
2362 
2363 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2364 {
2365 regen:
2366 	get_random_bytes(&addr->s6_addr[8], 8);
2367 
2368 	/* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2369 	 * check if generated address is not inappropriate:
2370 	 *
2371 	 * - Reserved IPv6 Interface Identifiers
2372 	 * - XXX: already assigned to an address on the device
2373 	 */
2374 
2375 	/* Subnet-router anycast: 0000:0000:0000:0000 */
2376 	if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2377 		goto regen;
2378 
2379 	/* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2380 	 * Proxy Mobile IPv6:   0200:5EFF:FE00:5213
2381 	 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2382 	 */
2383 	if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2384 	    (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2385 		goto regen;
2386 
2387 	/* Reserved subnet anycast addresses */
2388 	if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2389 	    ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2390 		goto regen;
2391 }
2392 
2393 /*
2394  *	Add prefix route.
2395  */
2396 
2397 static void
2398 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2399 		      struct net_device *dev, unsigned long expires,
2400 		      u32 flags, gfp_t gfp_flags)
2401 {
2402 	struct fib6_config cfg = {
2403 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2404 		.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2405 		.fc_ifindex = dev->ifindex,
2406 		.fc_expires = expires,
2407 		.fc_dst_len = plen,
2408 		.fc_flags = RTF_UP | flags,
2409 		.fc_nlinfo.nl_net = dev_net(dev),
2410 		.fc_protocol = RTPROT_KERNEL,
2411 		.fc_type = RTN_UNICAST,
2412 	};
2413 
2414 	cfg.fc_dst = *pfx;
2415 
2416 	/* Prevent useless cloning on PtP SIT.
2417 	   This thing is done here expecting that the whole
2418 	   class of non-broadcast devices need not cloning.
2419 	 */
2420 #if IS_ENABLED(CONFIG_IPV6_SIT)
2421 	if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2422 		cfg.fc_flags |= RTF_NONEXTHOP;
2423 #endif
2424 
2425 	ip6_route_add(&cfg, gfp_flags, NULL);
2426 }
2427 
2428 
2429 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2430 						  int plen,
2431 						  const struct net_device *dev,
2432 						  u32 flags, u32 noflags,
2433 						  bool no_gw)
2434 {
2435 	struct fib6_node *fn;
2436 	struct fib6_info *rt = NULL;
2437 	struct fib6_table *table;
2438 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2439 
2440 	table = fib6_get_table(dev_net(dev), tb_id);
2441 	if (!table)
2442 		return NULL;
2443 
2444 	rcu_read_lock();
2445 	fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2446 	if (!fn)
2447 		goto out;
2448 
2449 	for_each_fib6_node_rt_rcu(fn) {
2450 		/* prefix routes only use builtin fib6_nh */
2451 		if (rt->nh)
2452 			continue;
2453 
2454 		if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2455 			continue;
2456 		if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2457 			continue;
2458 		if ((rt->fib6_flags & flags) != flags)
2459 			continue;
2460 		if ((rt->fib6_flags & noflags) != 0)
2461 			continue;
2462 		if (!fib6_info_hold_safe(rt))
2463 			continue;
2464 		break;
2465 	}
2466 out:
2467 	rcu_read_unlock();
2468 	return rt;
2469 }
2470 
2471 
2472 /* Create "default" multicast route to the interface */
2473 
2474 static void addrconf_add_mroute(struct net_device *dev)
2475 {
2476 	struct fib6_config cfg = {
2477 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2478 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
2479 		.fc_ifindex = dev->ifindex,
2480 		.fc_dst_len = 8,
2481 		.fc_flags = RTF_UP,
2482 		.fc_type = RTN_MULTICAST,
2483 		.fc_nlinfo.nl_net = dev_net(dev),
2484 		.fc_protocol = RTPROT_KERNEL,
2485 	};
2486 
2487 	ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2488 
2489 	ip6_route_add(&cfg, GFP_KERNEL, NULL);
2490 }
2491 
2492 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2493 {
2494 	struct inet6_dev *idev;
2495 
2496 	ASSERT_RTNL();
2497 
2498 	idev = ipv6_find_idev(dev);
2499 	if (IS_ERR(idev))
2500 		return idev;
2501 
2502 	if (idev->cnf.disable_ipv6)
2503 		return ERR_PTR(-EACCES);
2504 
2505 	/* Add default multicast route */
2506 	if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2507 		addrconf_add_mroute(dev);
2508 
2509 	return idev;
2510 }
2511 
2512 static void manage_tempaddrs(struct inet6_dev *idev,
2513 			     struct inet6_ifaddr *ifp,
2514 			     __u32 valid_lft, __u32 prefered_lft,
2515 			     bool create, unsigned long now)
2516 {
2517 	u32 flags;
2518 	struct inet6_ifaddr *ift;
2519 
2520 	read_lock_bh(&idev->lock);
2521 	/* update all temporary addresses in the list */
2522 	list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2523 		int age, max_valid, max_prefered;
2524 
2525 		if (ifp != ift->ifpub)
2526 			continue;
2527 
2528 		/* RFC 4941 section 3.3:
2529 		 * If a received option will extend the lifetime of a public
2530 		 * address, the lifetimes of temporary addresses should
2531 		 * be extended, subject to the overall constraint that no
2532 		 * temporary addresses should ever remain "valid" or "preferred"
2533 		 * for a time longer than (TEMP_VALID_LIFETIME) or
2534 		 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2535 		 */
2536 		age = (now - ift->cstamp) / HZ;
2537 		max_valid = idev->cnf.temp_valid_lft - age;
2538 		if (max_valid < 0)
2539 			max_valid = 0;
2540 
2541 		max_prefered = idev->cnf.temp_prefered_lft -
2542 			       idev->desync_factor - age;
2543 		if (max_prefered < 0)
2544 			max_prefered = 0;
2545 
2546 		if (valid_lft > max_valid)
2547 			valid_lft = max_valid;
2548 
2549 		if (prefered_lft > max_prefered)
2550 			prefered_lft = max_prefered;
2551 
2552 		spin_lock(&ift->lock);
2553 		flags = ift->flags;
2554 		ift->valid_lft = valid_lft;
2555 		ift->prefered_lft = prefered_lft;
2556 		ift->tstamp = now;
2557 		if (prefered_lft > 0)
2558 			ift->flags &= ~IFA_F_DEPRECATED;
2559 
2560 		spin_unlock(&ift->lock);
2561 		if (!(flags&IFA_F_TENTATIVE))
2562 			ipv6_ifa_notify(0, ift);
2563 	}
2564 
2565 	if ((create || list_empty(&idev->tempaddr_list)) &&
2566 	    idev->cnf.use_tempaddr > 0) {
2567 		/* When a new public address is created as described
2568 		 * in [ADDRCONF], also create a new temporary address.
2569 		 * Also create a temporary address if it's enabled but
2570 		 * no temporary address currently exists.
2571 		 */
2572 		read_unlock_bh(&idev->lock);
2573 		ipv6_create_tempaddr(ifp, false);
2574 	} else {
2575 		read_unlock_bh(&idev->lock);
2576 	}
2577 }
2578 
2579 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2580 {
2581 	return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2582 	       idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2583 }
2584 
2585 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2586 				 const struct prefix_info *pinfo,
2587 				 struct inet6_dev *in6_dev,
2588 				 const struct in6_addr *addr, int addr_type,
2589 				 u32 addr_flags, bool sllao, bool tokenized,
2590 				 __u32 valid_lft, u32 prefered_lft)
2591 {
2592 	struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2593 	int create = 0, update_lft = 0;
2594 
2595 	if (!ifp && valid_lft) {
2596 		int max_addresses = in6_dev->cnf.max_addresses;
2597 		struct ifa6_config cfg = {
2598 			.pfx = addr,
2599 			.plen = pinfo->prefix_len,
2600 			.ifa_flags = addr_flags,
2601 			.valid_lft = valid_lft,
2602 			.preferred_lft = prefered_lft,
2603 			.scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2604 			.ifa_proto = IFAPROT_KERNEL_RA
2605 		};
2606 
2607 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2608 		if ((net->ipv6.devconf_all->optimistic_dad ||
2609 		     in6_dev->cnf.optimistic_dad) &&
2610 		    !net->ipv6.devconf_all->forwarding && sllao)
2611 			cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2612 #endif
2613 
2614 		/* Do not allow to create too much of autoconfigured
2615 		 * addresses; this would be too easy way to crash kernel.
2616 		 */
2617 		if (!max_addresses ||
2618 		    ipv6_count_addresses(in6_dev) < max_addresses)
2619 			ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2620 
2621 		if (IS_ERR_OR_NULL(ifp))
2622 			return -1;
2623 
2624 		create = 1;
2625 		spin_lock_bh(&ifp->lock);
2626 		ifp->flags |= IFA_F_MANAGETEMPADDR;
2627 		ifp->cstamp = jiffies;
2628 		ifp->tokenized = tokenized;
2629 		spin_unlock_bh(&ifp->lock);
2630 		addrconf_dad_start(ifp);
2631 	}
2632 
2633 	if (ifp) {
2634 		u32 flags;
2635 		unsigned long now;
2636 		u32 stored_lft;
2637 
2638 		/* update lifetime (RFC2462 5.5.3 e) */
2639 		spin_lock_bh(&ifp->lock);
2640 		now = jiffies;
2641 		if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2642 			stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2643 		else
2644 			stored_lft = 0;
2645 		if (!create && stored_lft) {
2646 			const u32 minimum_lft = min_t(u32,
2647 				stored_lft, MIN_VALID_LIFETIME);
2648 			valid_lft = max(valid_lft, minimum_lft);
2649 
2650 			/* RFC4862 Section 5.5.3e:
2651 			 * "Note that the preferred lifetime of the
2652 			 *  corresponding address is always reset to
2653 			 *  the Preferred Lifetime in the received
2654 			 *  Prefix Information option, regardless of
2655 			 *  whether the valid lifetime is also reset or
2656 			 *  ignored."
2657 			 *
2658 			 * So we should always update prefered_lft here.
2659 			 */
2660 			update_lft = 1;
2661 		}
2662 
2663 		if (update_lft) {
2664 			ifp->valid_lft = valid_lft;
2665 			ifp->prefered_lft = prefered_lft;
2666 			ifp->tstamp = now;
2667 			flags = ifp->flags;
2668 			ifp->flags &= ~IFA_F_DEPRECATED;
2669 			spin_unlock_bh(&ifp->lock);
2670 
2671 			if (!(flags&IFA_F_TENTATIVE))
2672 				ipv6_ifa_notify(0, ifp);
2673 		} else
2674 			spin_unlock_bh(&ifp->lock);
2675 
2676 		manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2677 				 create, now);
2678 
2679 		in6_ifa_put(ifp);
2680 		addrconf_verify(net);
2681 	}
2682 
2683 	return 0;
2684 }
2685 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2686 
2687 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2688 {
2689 	struct prefix_info *pinfo;
2690 	__u32 valid_lft;
2691 	__u32 prefered_lft;
2692 	int addr_type, err;
2693 	u32 addr_flags = 0;
2694 	struct inet6_dev *in6_dev;
2695 	struct net *net = dev_net(dev);
2696 
2697 	pinfo = (struct prefix_info *) opt;
2698 
2699 	if (len < sizeof(struct prefix_info)) {
2700 		netdev_dbg(dev, "addrconf: prefix option too short\n");
2701 		return;
2702 	}
2703 
2704 	/*
2705 	 *	Validation checks ([ADDRCONF], page 19)
2706 	 */
2707 
2708 	addr_type = ipv6_addr_type(&pinfo->prefix);
2709 
2710 	if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2711 		return;
2712 
2713 	valid_lft = ntohl(pinfo->valid);
2714 	prefered_lft = ntohl(pinfo->prefered);
2715 
2716 	if (prefered_lft > valid_lft) {
2717 		net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2718 		return;
2719 	}
2720 
2721 	in6_dev = in6_dev_get(dev);
2722 
2723 	if (!in6_dev) {
2724 		net_dbg_ratelimited("addrconf: device %s not configured\n",
2725 				    dev->name);
2726 		return;
2727 	}
2728 
2729 	/*
2730 	 *	Two things going on here:
2731 	 *	1) Add routes for on-link prefixes
2732 	 *	2) Configure prefixes with the auto flag set
2733 	 */
2734 
2735 	if (pinfo->onlink) {
2736 		struct fib6_info *rt;
2737 		unsigned long rt_expires;
2738 
2739 		/* Avoid arithmetic overflow. Really, we could
2740 		 * save rt_expires in seconds, likely valid_lft,
2741 		 * but it would require division in fib gc, that it
2742 		 * not good.
2743 		 */
2744 		if (HZ > USER_HZ)
2745 			rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2746 		else
2747 			rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2748 
2749 		if (addrconf_finite_timeout(rt_expires))
2750 			rt_expires *= HZ;
2751 
2752 		rt = addrconf_get_prefix_route(&pinfo->prefix,
2753 					       pinfo->prefix_len,
2754 					       dev,
2755 					       RTF_ADDRCONF | RTF_PREFIX_RT,
2756 					       RTF_DEFAULT, true);
2757 
2758 		if (rt) {
2759 			/* Autoconf prefix route */
2760 			if (valid_lft == 0) {
2761 				ip6_del_rt(net, rt, false);
2762 				rt = NULL;
2763 			} else if (addrconf_finite_timeout(rt_expires)) {
2764 				/* not infinity */
2765 				fib6_set_expires(rt, jiffies + rt_expires);
2766 			} else {
2767 				fib6_clean_expires(rt);
2768 			}
2769 		} else if (valid_lft) {
2770 			clock_t expires = 0;
2771 			int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2772 			if (addrconf_finite_timeout(rt_expires)) {
2773 				/* not infinity */
2774 				flags |= RTF_EXPIRES;
2775 				expires = jiffies_to_clock_t(rt_expires);
2776 			}
2777 			addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2778 					      0, dev, expires, flags,
2779 					      GFP_ATOMIC);
2780 		}
2781 		fib6_info_release(rt);
2782 	}
2783 
2784 	/* Try to figure out our local address for this prefix */
2785 
2786 	if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2787 		struct in6_addr addr;
2788 		bool tokenized = false, dev_addr_generated = false;
2789 
2790 		if (pinfo->prefix_len == 64) {
2791 			memcpy(&addr, &pinfo->prefix, 8);
2792 
2793 			if (!ipv6_addr_any(&in6_dev->token)) {
2794 				read_lock_bh(&in6_dev->lock);
2795 				memcpy(addr.s6_addr + 8,
2796 				       in6_dev->token.s6_addr + 8, 8);
2797 				read_unlock_bh(&in6_dev->lock);
2798 				tokenized = true;
2799 			} else if (is_addr_mode_generate_stable(in6_dev) &&
2800 				   !ipv6_generate_stable_address(&addr, 0,
2801 								 in6_dev)) {
2802 				addr_flags |= IFA_F_STABLE_PRIVACY;
2803 				goto ok;
2804 			} else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2805 				   ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2806 				goto put;
2807 			} else {
2808 				dev_addr_generated = true;
2809 			}
2810 			goto ok;
2811 		}
2812 		net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2813 				    pinfo->prefix_len);
2814 		goto put;
2815 
2816 ok:
2817 		err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2818 						   &addr, addr_type,
2819 						   addr_flags, sllao,
2820 						   tokenized, valid_lft,
2821 						   prefered_lft);
2822 		if (err)
2823 			goto put;
2824 
2825 		/* Ignore error case here because previous prefix add addr was
2826 		 * successful which will be notified.
2827 		 */
2828 		ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2829 					      addr_type, addr_flags, sllao,
2830 					      tokenized, valid_lft,
2831 					      prefered_lft,
2832 					      dev_addr_generated);
2833 	}
2834 	inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2835 put:
2836 	in6_dev_put(in6_dev);
2837 }
2838 
2839 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2840 		struct in6_ifreq *ireq)
2841 {
2842 	struct ip_tunnel_parm p = { };
2843 	int err;
2844 
2845 	if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2846 		return -EADDRNOTAVAIL;
2847 
2848 	p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2849 	p.iph.version = 4;
2850 	p.iph.ihl = 5;
2851 	p.iph.protocol = IPPROTO_IPV6;
2852 	p.iph.ttl = 64;
2853 
2854 	if (!dev->netdev_ops->ndo_tunnel_ctl)
2855 		return -EOPNOTSUPP;
2856 	err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2857 	if (err)
2858 		return err;
2859 
2860 	dev = __dev_get_by_name(net, p.name);
2861 	if (!dev)
2862 		return -ENOBUFS;
2863 	return dev_open(dev, NULL);
2864 }
2865 
2866 /*
2867  *	Set destination address.
2868  *	Special case for SIT interfaces where we create a new "virtual"
2869  *	device.
2870  */
2871 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2872 {
2873 	struct net_device *dev;
2874 	struct in6_ifreq ireq;
2875 	int err = -ENODEV;
2876 
2877 	if (!IS_ENABLED(CONFIG_IPV6_SIT))
2878 		return -ENODEV;
2879 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2880 		return -EFAULT;
2881 
2882 	rtnl_lock();
2883 	dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2884 	if (dev && dev->type == ARPHRD_SIT)
2885 		err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2886 	rtnl_unlock();
2887 	return err;
2888 }
2889 
2890 static int ipv6_mc_config(struct sock *sk, bool join,
2891 			  const struct in6_addr *addr, int ifindex)
2892 {
2893 	int ret;
2894 
2895 	ASSERT_RTNL();
2896 
2897 	lock_sock(sk);
2898 	if (join)
2899 		ret = ipv6_sock_mc_join(sk, ifindex, addr);
2900 	else
2901 		ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2902 	release_sock(sk);
2903 
2904 	return ret;
2905 }
2906 
2907 /*
2908  *	Manual configuration of address on an interface
2909  */
2910 static int inet6_addr_add(struct net *net, int ifindex,
2911 			  struct ifa6_config *cfg,
2912 			  struct netlink_ext_ack *extack)
2913 {
2914 	struct inet6_ifaddr *ifp;
2915 	struct inet6_dev *idev;
2916 	struct net_device *dev;
2917 	unsigned long timeout;
2918 	clock_t expires;
2919 	u32 flags;
2920 
2921 	ASSERT_RTNL();
2922 
2923 	if (cfg->plen > 128)
2924 		return -EINVAL;
2925 
2926 	/* check the lifetime */
2927 	if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2928 		return -EINVAL;
2929 
2930 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2931 		return -EINVAL;
2932 
2933 	dev = __dev_get_by_index(net, ifindex);
2934 	if (!dev)
2935 		return -ENODEV;
2936 
2937 	idev = addrconf_add_dev(dev);
2938 	if (IS_ERR(idev))
2939 		return PTR_ERR(idev);
2940 
2941 	if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2942 		int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2943 					 true, cfg->pfx, ifindex);
2944 
2945 		if (ret < 0)
2946 			return ret;
2947 	}
2948 
2949 	cfg->scope = ipv6_addr_scope(cfg->pfx);
2950 
2951 	timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2952 	if (addrconf_finite_timeout(timeout)) {
2953 		expires = jiffies_to_clock_t(timeout * HZ);
2954 		cfg->valid_lft = timeout;
2955 		flags = RTF_EXPIRES;
2956 	} else {
2957 		expires = 0;
2958 		flags = 0;
2959 		cfg->ifa_flags |= IFA_F_PERMANENT;
2960 	}
2961 
2962 	timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2963 	if (addrconf_finite_timeout(timeout)) {
2964 		if (timeout == 0)
2965 			cfg->ifa_flags |= IFA_F_DEPRECATED;
2966 		cfg->preferred_lft = timeout;
2967 	}
2968 
2969 	ifp = ipv6_add_addr(idev, cfg, true, extack);
2970 	if (!IS_ERR(ifp)) {
2971 		if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2972 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2973 					      ifp->rt_priority, dev, expires,
2974 					      flags, GFP_KERNEL);
2975 		}
2976 
2977 		/* Send a netlink notification if DAD is enabled and
2978 		 * optimistic flag is not set
2979 		 */
2980 		if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2981 			ipv6_ifa_notify(0, ifp);
2982 		/*
2983 		 * Note that section 3.1 of RFC 4429 indicates
2984 		 * that the Optimistic flag should not be set for
2985 		 * manually configured addresses
2986 		 */
2987 		addrconf_dad_start(ifp);
2988 		if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2989 			manage_tempaddrs(idev, ifp, cfg->valid_lft,
2990 					 cfg->preferred_lft, true, jiffies);
2991 		in6_ifa_put(ifp);
2992 		addrconf_verify_rtnl(net);
2993 		return 0;
2994 	} else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2995 		ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
2996 			       cfg->pfx, ifindex);
2997 	}
2998 
2999 	return PTR_ERR(ifp);
3000 }
3001 
3002 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3003 			  const struct in6_addr *pfx, unsigned int plen)
3004 {
3005 	struct inet6_ifaddr *ifp;
3006 	struct inet6_dev *idev;
3007 	struct net_device *dev;
3008 
3009 	if (plen > 128)
3010 		return -EINVAL;
3011 
3012 	dev = __dev_get_by_index(net, ifindex);
3013 	if (!dev)
3014 		return -ENODEV;
3015 
3016 	idev = __in6_dev_get(dev);
3017 	if (!idev)
3018 		return -ENXIO;
3019 
3020 	read_lock_bh(&idev->lock);
3021 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
3022 		if (ifp->prefix_len == plen &&
3023 		    ipv6_addr_equal(pfx, &ifp->addr)) {
3024 			in6_ifa_hold(ifp);
3025 			read_unlock_bh(&idev->lock);
3026 
3027 			if (!(ifp->flags & IFA_F_TEMPORARY) &&
3028 			    (ifa_flags & IFA_F_MANAGETEMPADDR))
3029 				manage_tempaddrs(idev, ifp, 0, 0, false,
3030 						 jiffies);
3031 			ipv6_del_addr(ifp);
3032 			addrconf_verify_rtnl(net);
3033 			if (ipv6_addr_is_multicast(pfx)) {
3034 				ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3035 					       false, pfx, dev->ifindex);
3036 			}
3037 			return 0;
3038 		}
3039 	}
3040 	read_unlock_bh(&idev->lock);
3041 	return -EADDRNOTAVAIL;
3042 }
3043 
3044 
3045 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3046 {
3047 	struct ifa6_config cfg = {
3048 		.ifa_flags = IFA_F_PERMANENT,
3049 		.preferred_lft = INFINITY_LIFE_TIME,
3050 		.valid_lft = INFINITY_LIFE_TIME,
3051 	};
3052 	struct in6_ifreq ireq;
3053 	int err;
3054 
3055 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3056 		return -EPERM;
3057 
3058 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3059 		return -EFAULT;
3060 
3061 	cfg.pfx = &ireq.ifr6_addr;
3062 	cfg.plen = ireq.ifr6_prefixlen;
3063 
3064 	rtnl_lock();
3065 	err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3066 	rtnl_unlock();
3067 	return err;
3068 }
3069 
3070 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3071 {
3072 	struct in6_ifreq ireq;
3073 	int err;
3074 
3075 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3076 		return -EPERM;
3077 
3078 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3079 		return -EFAULT;
3080 
3081 	rtnl_lock();
3082 	err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3083 			     ireq.ifr6_prefixlen);
3084 	rtnl_unlock();
3085 	return err;
3086 }
3087 
3088 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3089 		     int plen, int scope, u8 proto)
3090 {
3091 	struct inet6_ifaddr *ifp;
3092 	struct ifa6_config cfg = {
3093 		.pfx = addr,
3094 		.plen = plen,
3095 		.ifa_flags = IFA_F_PERMANENT,
3096 		.valid_lft = INFINITY_LIFE_TIME,
3097 		.preferred_lft = INFINITY_LIFE_TIME,
3098 		.scope = scope,
3099 		.ifa_proto = proto
3100 	};
3101 
3102 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3103 	if (!IS_ERR(ifp)) {
3104 		spin_lock_bh(&ifp->lock);
3105 		ifp->flags &= ~IFA_F_TENTATIVE;
3106 		spin_unlock_bh(&ifp->lock);
3107 		rt_genid_bump_ipv6(dev_net(idev->dev));
3108 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
3109 		in6_ifa_put(ifp);
3110 	}
3111 }
3112 
3113 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3114 static void add_v4_addrs(struct inet6_dev *idev)
3115 {
3116 	struct in6_addr addr;
3117 	struct net_device *dev;
3118 	struct net *net = dev_net(idev->dev);
3119 	int scope, plen, offset = 0;
3120 	u32 pflags = 0;
3121 
3122 	ASSERT_RTNL();
3123 
3124 	memset(&addr, 0, sizeof(struct in6_addr));
3125 	/* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
3126 	if (idev->dev->addr_len == sizeof(struct in6_addr))
3127 		offset = sizeof(struct in6_addr) - 4;
3128 	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
3129 
3130 	if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
3131 		scope = IPV6_ADDR_COMPATv4;
3132 		plen = 96;
3133 		pflags |= RTF_NONEXTHOP;
3134 	} else {
3135 		if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3136 			return;
3137 
3138 		addr.s6_addr32[0] = htonl(0xfe800000);
3139 		scope = IFA_LINK;
3140 		plen = 64;
3141 	}
3142 
3143 	if (addr.s6_addr32[3]) {
3144 		add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC);
3145 		addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3146 				      GFP_KERNEL);
3147 		return;
3148 	}
3149 
3150 	for_each_netdev(net, dev) {
3151 		struct in_device *in_dev = __in_dev_get_rtnl(dev);
3152 		if (in_dev && (dev->flags & IFF_UP)) {
3153 			struct in_ifaddr *ifa;
3154 			int flag = scope;
3155 
3156 			in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3157 				addr.s6_addr32[3] = ifa->ifa_local;
3158 
3159 				if (ifa->ifa_scope == RT_SCOPE_LINK)
3160 					continue;
3161 				if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3162 					if (idev->dev->flags&IFF_POINTOPOINT)
3163 						continue;
3164 					flag |= IFA_HOST;
3165 				}
3166 
3167 				add_addr(idev, &addr, plen, flag,
3168 					 IFAPROT_UNSPEC);
3169 				addrconf_prefix_route(&addr, plen, 0, idev->dev,
3170 						      0, pflags, GFP_KERNEL);
3171 			}
3172 		}
3173 	}
3174 }
3175 #endif
3176 
3177 static void init_loopback(struct net_device *dev)
3178 {
3179 	struct inet6_dev  *idev;
3180 
3181 	/* ::1 */
3182 
3183 	ASSERT_RTNL();
3184 
3185 	idev = ipv6_find_idev(dev);
3186 	if (IS_ERR(idev)) {
3187 		pr_debug("%s: add_dev failed\n", __func__);
3188 		return;
3189 	}
3190 
3191 	add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO);
3192 }
3193 
3194 void addrconf_add_linklocal(struct inet6_dev *idev,
3195 			    const struct in6_addr *addr, u32 flags)
3196 {
3197 	struct ifa6_config cfg = {
3198 		.pfx = addr,
3199 		.plen = 64,
3200 		.ifa_flags = flags | IFA_F_PERMANENT,
3201 		.valid_lft = INFINITY_LIFE_TIME,
3202 		.preferred_lft = INFINITY_LIFE_TIME,
3203 		.scope = IFA_LINK,
3204 		.ifa_proto = IFAPROT_KERNEL_LL
3205 	};
3206 	struct inet6_ifaddr *ifp;
3207 
3208 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3209 	if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3210 	     idev->cnf.optimistic_dad) &&
3211 	    !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3212 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3213 #endif
3214 
3215 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3216 	if (!IS_ERR(ifp)) {
3217 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3218 				      0, 0, GFP_ATOMIC);
3219 		addrconf_dad_start(ifp);
3220 		in6_ifa_put(ifp);
3221 	}
3222 }
3223 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3224 
3225 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3226 {
3227 	if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3228 		return true;
3229 
3230 	if (address.s6_addr32[2] == htonl(0x02005eff) &&
3231 	    ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3232 		return true;
3233 
3234 	if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3235 	    ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3236 		return true;
3237 
3238 	return false;
3239 }
3240 
3241 static int ipv6_generate_stable_address(struct in6_addr *address,
3242 					u8 dad_count,
3243 					const struct inet6_dev *idev)
3244 {
3245 	static DEFINE_SPINLOCK(lock);
3246 	static __u32 digest[SHA1_DIGEST_WORDS];
3247 	static __u32 workspace[SHA1_WORKSPACE_WORDS];
3248 
3249 	static union {
3250 		char __data[SHA1_BLOCK_SIZE];
3251 		struct {
3252 			struct in6_addr secret;
3253 			__be32 prefix[2];
3254 			unsigned char hwaddr[MAX_ADDR_LEN];
3255 			u8 dad_count;
3256 		} __packed;
3257 	} data;
3258 
3259 	struct in6_addr secret;
3260 	struct in6_addr temp;
3261 	struct net *net = dev_net(idev->dev);
3262 
3263 	BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3264 
3265 	if (idev->cnf.stable_secret.initialized)
3266 		secret = idev->cnf.stable_secret.secret;
3267 	else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3268 		secret = net->ipv6.devconf_dflt->stable_secret.secret;
3269 	else
3270 		return -1;
3271 
3272 retry:
3273 	spin_lock_bh(&lock);
3274 
3275 	sha1_init(digest);
3276 	memset(&data, 0, sizeof(data));
3277 	memset(workspace, 0, sizeof(workspace));
3278 	memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3279 	data.prefix[0] = address->s6_addr32[0];
3280 	data.prefix[1] = address->s6_addr32[1];
3281 	data.secret = secret;
3282 	data.dad_count = dad_count;
3283 
3284 	sha1_transform(digest, data.__data, workspace);
3285 
3286 	temp = *address;
3287 	temp.s6_addr32[2] = (__force __be32)digest[0];
3288 	temp.s6_addr32[3] = (__force __be32)digest[1];
3289 
3290 	spin_unlock_bh(&lock);
3291 
3292 	if (ipv6_reserved_interfaceid(temp)) {
3293 		dad_count++;
3294 		if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3295 			return -1;
3296 		goto retry;
3297 	}
3298 
3299 	*address = temp;
3300 	return 0;
3301 }
3302 
3303 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3304 {
3305 	struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3306 
3307 	if (s->initialized)
3308 		return;
3309 	s = &idev->cnf.stable_secret;
3310 	get_random_bytes(&s->secret, sizeof(s->secret));
3311 	s->initialized = true;
3312 }
3313 
3314 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3315 {
3316 	struct in6_addr addr;
3317 
3318 	/* no link local addresses on L3 master devices */
3319 	if (netif_is_l3_master(idev->dev))
3320 		return;
3321 
3322 	/* no link local addresses on devices flagged as slaves */
3323 	if (idev->dev->priv_flags & IFF_NO_ADDRCONF)
3324 		return;
3325 
3326 	ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3327 
3328 	switch (idev->cnf.addr_gen_mode) {
3329 	case IN6_ADDR_GEN_MODE_RANDOM:
3330 		ipv6_gen_mode_random_init(idev);
3331 		fallthrough;
3332 	case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3333 		if (!ipv6_generate_stable_address(&addr, 0, idev))
3334 			addrconf_add_linklocal(idev, &addr,
3335 					       IFA_F_STABLE_PRIVACY);
3336 		else if (prefix_route)
3337 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3338 					      0, 0, GFP_KERNEL);
3339 		break;
3340 	case IN6_ADDR_GEN_MODE_EUI64:
3341 		/* addrconf_add_linklocal also adds a prefix_route and we
3342 		 * only need to care about prefix routes if ipv6_generate_eui64
3343 		 * couldn't generate one.
3344 		 */
3345 		if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3346 			addrconf_add_linklocal(idev, &addr, 0);
3347 		else if (prefix_route)
3348 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3349 					      0, 0, GFP_KERNEL);
3350 		break;
3351 	case IN6_ADDR_GEN_MODE_NONE:
3352 	default:
3353 		/* will not add any link local address */
3354 		break;
3355 	}
3356 }
3357 
3358 static void addrconf_dev_config(struct net_device *dev)
3359 {
3360 	struct inet6_dev *idev;
3361 
3362 	ASSERT_RTNL();
3363 
3364 	if ((dev->type != ARPHRD_ETHER) &&
3365 	    (dev->type != ARPHRD_FDDI) &&
3366 	    (dev->type != ARPHRD_ARCNET) &&
3367 	    (dev->type != ARPHRD_INFINIBAND) &&
3368 	    (dev->type != ARPHRD_IEEE1394) &&
3369 	    (dev->type != ARPHRD_TUNNEL6) &&
3370 	    (dev->type != ARPHRD_6LOWPAN) &&
3371 	    (dev->type != ARPHRD_TUNNEL) &&
3372 	    (dev->type != ARPHRD_NONE) &&
3373 	    (dev->type != ARPHRD_RAWIP)) {
3374 		/* Alas, we support only Ethernet autoconfiguration. */
3375 		idev = __in6_dev_get(dev);
3376 		if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3377 		    dev->flags & IFF_MULTICAST)
3378 			ipv6_mc_up(idev);
3379 		return;
3380 	}
3381 
3382 	idev = addrconf_add_dev(dev);
3383 	if (IS_ERR(idev))
3384 		return;
3385 
3386 	/* this device type has no EUI support */
3387 	if (dev->type == ARPHRD_NONE &&
3388 	    idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3389 		idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3390 
3391 	addrconf_addr_gen(idev, false);
3392 }
3393 
3394 #if IS_ENABLED(CONFIG_IPV6_SIT)
3395 static void addrconf_sit_config(struct net_device *dev)
3396 {
3397 	struct inet6_dev *idev;
3398 
3399 	ASSERT_RTNL();
3400 
3401 	/*
3402 	 * Configure the tunnel with one of our IPv4
3403 	 * addresses... we should configure all of
3404 	 * our v4 addrs in the tunnel
3405 	 */
3406 
3407 	idev = ipv6_find_idev(dev);
3408 	if (IS_ERR(idev)) {
3409 		pr_debug("%s: add_dev failed\n", __func__);
3410 		return;
3411 	}
3412 
3413 	if (dev->priv_flags & IFF_ISATAP) {
3414 		addrconf_addr_gen(idev, false);
3415 		return;
3416 	}
3417 
3418 	add_v4_addrs(idev);
3419 
3420 	if (dev->flags&IFF_POINTOPOINT)
3421 		addrconf_add_mroute(dev);
3422 }
3423 #endif
3424 
3425 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3426 static void addrconf_gre_config(struct net_device *dev)
3427 {
3428 	struct inet6_dev *idev;
3429 
3430 	ASSERT_RTNL();
3431 
3432 	idev = ipv6_find_idev(dev);
3433 	if (IS_ERR(idev)) {
3434 		pr_debug("%s: add_dev failed\n", __func__);
3435 		return;
3436 	}
3437 
3438 	if (dev->type == ARPHRD_ETHER) {
3439 		addrconf_addr_gen(idev, true);
3440 		return;
3441 	}
3442 
3443 	add_v4_addrs(idev);
3444 
3445 	if (dev->flags & IFF_POINTOPOINT)
3446 		addrconf_add_mroute(dev);
3447 }
3448 #endif
3449 
3450 static void addrconf_init_auto_addrs(struct net_device *dev)
3451 {
3452 	switch (dev->type) {
3453 #if IS_ENABLED(CONFIG_IPV6_SIT)
3454 	case ARPHRD_SIT:
3455 		addrconf_sit_config(dev);
3456 		break;
3457 #endif
3458 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3459 	case ARPHRD_IP6GRE:
3460 	case ARPHRD_IPGRE:
3461 		addrconf_gre_config(dev);
3462 		break;
3463 #endif
3464 	case ARPHRD_LOOPBACK:
3465 		init_loopback(dev);
3466 		break;
3467 
3468 	default:
3469 		addrconf_dev_config(dev);
3470 		break;
3471 	}
3472 }
3473 
3474 static int fixup_permanent_addr(struct net *net,
3475 				struct inet6_dev *idev,
3476 				struct inet6_ifaddr *ifp)
3477 {
3478 	/* !fib6_node means the host route was removed from the
3479 	 * FIB, for example, if 'lo' device is taken down. In that
3480 	 * case regenerate the host route.
3481 	 */
3482 	if (!ifp->rt || !ifp->rt->fib6_node) {
3483 		struct fib6_info *f6i, *prev;
3484 
3485 		f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3486 					 GFP_ATOMIC);
3487 		if (IS_ERR(f6i))
3488 			return PTR_ERR(f6i);
3489 
3490 		/* ifp->rt can be accessed outside of rtnl */
3491 		spin_lock(&ifp->lock);
3492 		prev = ifp->rt;
3493 		ifp->rt = f6i;
3494 		spin_unlock(&ifp->lock);
3495 
3496 		fib6_info_release(prev);
3497 	}
3498 
3499 	if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3500 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3501 				      ifp->rt_priority, idev->dev, 0, 0,
3502 				      GFP_ATOMIC);
3503 	}
3504 
3505 	if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3506 		addrconf_dad_start(ifp);
3507 
3508 	return 0;
3509 }
3510 
3511 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3512 {
3513 	struct inet6_ifaddr *ifp, *tmp;
3514 	struct inet6_dev *idev;
3515 
3516 	idev = __in6_dev_get(dev);
3517 	if (!idev)
3518 		return;
3519 
3520 	write_lock_bh(&idev->lock);
3521 
3522 	list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3523 		if ((ifp->flags & IFA_F_PERMANENT) &&
3524 		    fixup_permanent_addr(net, idev, ifp) < 0) {
3525 			write_unlock_bh(&idev->lock);
3526 			in6_ifa_hold(ifp);
3527 			ipv6_del_addr(ifp);
3528 			write_lock_bh(&idev->lock);
3529 
3530 			net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3531 					     idev->dev->name, &ifp->addr);
3532 		}
3533 	}
3534 
3535 	write_unlock_bh(&idev->lock);
3536 }
3537 
3538 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3539 			   void *ptr)
3540 {
3541 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3542 	struct netdev_notifier_change_info *change_info;
3543 	struct netdev_notifier_changeupper_info *info;
3544 	struct inet6_dev *idev = __in6_dev_get(dev);
3545 	struct net *net = dev_net(dev);
3546 	int run_pending = 0;
3547 	int err;
3548 
3549 	switch (event) {
3550 	case NETDEV_REGISTER:
3551 		if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3552 			idev = ipv6_add_dev(dev);
3553 			if (IS_ERR(idev))
3554 				return notifier_from_errno(PTR_ERR(idev));
3555 		}
3556 		break;
3557 
3558 	case NETDEV_CHANGEMTU:
3559 		/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3560 		if (dev->mtu < IPV6_MIN_MTU) {
3561 			addrconf_ifdown(dev, dev != net->loopback_dev);
3562 			break;
3563 		}
3564 
3565 		if (idev) {
3566 			rt6_mtu_change(dev, dev->mtu);
3567 			idev->cnf.mtu6 = dev->mtu;
3568 			break;
3569 		}
3570 
3571 		/* allocate new idev */
3572 		idev = ipv6_add_dev(dev);
3573 		if (IS_ERR(idev))
3574 			break;
3575 
3576 		/* device is still not ready */
3577 		if (!(idev->if_flags & IF_READY))
3578 			break;
3579 
3580 		run_pending = 1;
3581 		fallthrough;
3582 	case NETDEV_UP:
3583 	case NETDEV_CHANGE:
3584 		if (idev && idev->cnf.disable_ipv6)
3585 			break;
3586 
3587 		if (dev->priv_flags & IFF_NO_ADDRCONF) {
3588 			if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
3589 			    dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
3590 				ipv6_mc_up(idev);
3591 			break;
3592 		}
3593 
3594 		if (event == NETDEV_UP) {
3595 			/* restore routes for permanent addresses */
3596 			addrconf_permanent_addr(net, dev);
3597 
3598 			if (!addrconf_link_ready(dev)) {
3599 				/* device is not ready yet. */
3600 				pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3601 					 dev->name);
3602 				break;
3603 			}
3604 
3605 			if (!idev && dev->mtu >= IPV6_MIN_MTU)
3606 				idev = ipv6_add_dev(dev);
3607 
3608 			if (!IS_ERR_OR_NULL(idev)) {
3609 				idev->if_flags |= IF_READY;
3610 				run_pending = 1;
3611 			}
3612 		} else if (event == NETDEV_CHANGE) {
3613 			if (!addrconf_link_ready(dev)) {
3614 				/* device is still not ready. */
3615 				rt6_sync_down_dev(dev, event);
3616 				break;
3617 			}
3618 
3619 			if (!IS_ERR_OR_NULL(idev)) {
3620 				if (idev->if_flags & IF_READY) {
3621 					/* device is already configured -
3622 					 * but resend MLD reports, we might
3623 					 * have roamed and need to update
3624 					 * multicast snooping switches
3625 					 */
3626 					ipv6_mc_up(idev);
3627 					change_info = ptr;
3628 					if (change_info->flags_changed & IFF_NOARP)
3629 						addrconf_dad_run(idev, true);
3630 					rt6_sync_up(dev, RTNH_F_LINKDOWN);
3631 					break;
3632 				}
3633 				idev->if_flags |= IF_READY;
3634 			}
3635 
3636 			pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3637 				dev->name);
3638 
3639 			run_pending = 1;
3640 		}
3641 
3642 		addrconf_init_auto_addrs(dev);
3643 
3644 		if (!IS_ERR_OR_NULL(idev)) {
3645 			if (run_pending)
3646 				addrconf_dad_run(idev, false);
3647 
3648 			/* Device has an address by now */
3649 			rt6_sync_up(dev, RTNH_F_DEAD);
3650 
3651 			/*
3652 			 * If the MTU changed during the interface down,
3653 			 * when the interface up, the changed MTU must be
3654 			 * reflected in the idev as well as routers.
3655 			 */
3656 			if (idev->cnf.mtu6 != dev->mtu &&
3657 			    dev->mtu >= IPV6_MIN_MTU) {
3658 				rt6_mtu_change(dev, dev->mtu);
3659 				idev->cnf.mtu6 = dev->mtu;
3660 			}
3661 			idev->tstamp = jiffies;
3662 			inet6_ifinfo_notify(RTM_NEWLINK, idev);
3663 
3664 			/*
3665 			 * If the changed mtu during down is lower than
3666 			 * IPV6_MIN_MTU stop IPv6 on this interface.
3667 			 */
3668 			if (dev->mtu < IPV6_MIN_MTU)
3669 				addrconf_ifdown(dev, dev != net->loopback_dev);
3670 		}
3671 		break;
3672 
3673 	case NETDEV_DOWN:
3674 	case NETDEV_UNREGISTER:
3675 		/*
3676 		 *	Remove all addresses from this interface.
3677 		 */
3678 		addrconf_ifdown(dev, event != NETDEV_DOWN);
3679 		break;
3680 
3681 	case NETDEV_CHANGENAME:
3682 		if (idev) {
3683 			snmp6_unregister_dev(idev);
3684 			addrconf_sysctl_unregister(idev);
3685 			err = addrconf_sysctl_register(idev);
3686 			if (err)
3687 				return notifier_from_errno(err);
3688 			err = snmp6_register_dev(idev);
3689 			if (err) {
3690 				addrconf_sysctl_unregister(idev);
3691 				return notifier_from_errno(err);
3692 			}
3693 		}
3694 		break;
3695 
3696 	case NETDEV_PRE_TYPE_CHANGE:
3697 	case NETDEV_POST_TYPE_CHANGE:
3698 		if (idev)
3699 			addrconf_type_change(dev, event);
3700 		break;
3701 
3702 	case NETDEV_CHANGEUPPER:
3703 		info = ptr;
3704 
3705 		/* flush all routes if dev is linked to or unlinked from
3706 		 * an L3 master device (e.g., VRF)
3707 		 */
3708 		if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3709 			addrconf_ifdown(dev, false);
3710 	}
3711 
3712 	return NOTIFY_OK;
3713 }
3714 
3715 /*
3716  *	addrconf module should be notified of a device going up
3717  */
3718 static struct notifier_block ipv6_dev_notf = {
3719 	.notifier_call = addrconf_notify,
3720 	.priority = ADDRCONF_NOTIFY_PRIORITY,
3721 };
3722 
3723 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3724 {
3725 	struct inet6_dev *idev;
3726 	ASSERT_RTNL();
3727 
3728 	idev = __in6_dev_get(dev);
3729 
3730 	if (event == NETDEV_POST_TYPE_CHANGE)
3731 		ipv6_mc_remap(idev);
3732 	else if (event == NETDEV_PRE_TYPE_CHANGE)
3733 		ipv6_mc_unmap(idev);
3734 }
3735 
3736 static bool addr_is_local(const struct in6_addr *addr)
3737 {
3738 	return ipv6_addr_type(addr) &
3739 		(IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3740 }
3741 
3742 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3743 {
3744 	unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3745 	struct net *net = dev_net(dev);
3746 	struct inet6_dev *idev;
3747 	struct inet6_ifaddr *ifa;
3748 	LIST_HEAD(tmp_addr_list);
3749 	bool keep_addr = false;
3750 	bool was_ready;
3751 	int state, i;
3752 
3753 	ASSERT_RTNL();
3754 
3755 	rt6_disable_ip(dev, event);
3756 
3757 	idev = __in6_dev_get(dev);
3758 	if (!idev)
3759 		return -ENODEV;
3760 
3761 	/*
3762 	 * Step 1: remove reference to ipv6 device from parent device.
3763 	 *	   Do not dev_put!
3764 	 */
3765 	if (unregister) {
3766 		idev->dead = 1;
3767 
3768 		/* protected by rtnl_lock */
3769 		RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3770 
3771 		/* Step 1.5: remove snmp6 entry */
3772 		snmp6_unregister_dev(idev);
3773 
3774 	}
3775 
3776 	/* combine the user config with event to determine if permanent
3777 	 * addresses are to be removed from address hash table
3778 	 */
3779 	if (!unregister && !idev->cnf.disable_ipv6) {
3780 		/* aggregate the system setting and interface setting */
3781 		int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3782 
3783 		if (!_keep_addr)
3784 			_keep_addr = idev->cnf.keep_addr_on_down;
3785 
3786 		keep_addr = (_keep_addr > 0);
3787 	}
3788 
3789 	/* Step 2: clear hash table */
3790 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3791 		struct hlist_head *h = &net->ipv6.inet6_addr_lst[i];
3792 
3793 		spin_lock_bh(&net->ipv6.addrconf_hash_lock);
3794 restart:
3795 		hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3796 			if (ifa->idev == idev) {
3797 				addrconf_del_dad_work(ifa);
3798 				/* combined flag + permanent flag decide if
3799 				 * address is retained on a down event
3800 				 */
3801 				if (!keep_addr ||
3802 				    !(ifa->flags & IFA_F_PERMANENT) ||
3803 				    addr_is_local(&ifa->addr)) {
3804 					hlist_del_init_rcu(&ifa->addr_lst);
3805 					goto restart;
3806 				}
3807 			}
3808 		}
3809 		spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
3810 	}
3811 
3812 	write_lock_bh(&idev->lock);
3813 
3814 	addrconf_del_rs_timer(idev);
3815 
3816 	/* Step 2: clear flags for stateless addrconf, repeated down
3817 	 *         detection
3818 	 */
3819 	was_ready = idev->if_flags & IF_READY;
3820 	if (!unregister)
3821 		idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3822 
3823 	/* Step 3: clear tempaddr list */
3824 	while (!list_empty(&idev->tempaddr_list)) {
3825 		ifa = list_first_entry(&idev->tempaddr_list,
3826 				       struct inet6_ifaddr, tmp_list);
3827 		list_del(&ifa->tmp_list);
3828 		write_unlock_bh(&idev->lock);
3829 		spin_lock_bh(&ifa->lock);
3830 
3831 		if (ifa->ifpub) {
3832 			in6_ifa_put(ifa->ifpub);
3833 			ifa->ifpub = NULL;
3834 		}
3835 		spin_unlock_bh(&ifa->lock);
3836 		in6_ifa_put(ifa);
3837 		write_lock_bh(&idev->lock);
3838 	}
3839 
3840 	list_for_each_entry(ifa, &idev->addr_list, if_list)
3841 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3842 	write_unlock_bh(&idev->lock);
3843 
3844 	while (!list_empty(&tmp_addr_list)) {
3845 		struct fib6_info *rt = NULL;
3846 		bool keep;
3847 
3848 		ifa = list_first_entry(&tmp_addr_list,
3849 				       struct inet6_ifaddr, if_list_aux);
3850 		list_del(&ifa->if_list_aux);
3851 
3852 		addrconf_del_dad_work(ifa);
3853 
3854 		keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3855 			!addr_is_local(&ifa->addr);
3856 
3857 		spin_lock_bh(&ifa->lock);
3858 
3859 		if (keep) {
3860 			/* set state to skip the notifier below */
3861 			state = INET6_IFADDR_STATE_DEAD;
3862 			ifa->state = INET6_IFADDR_STATE_PREDAD;
3863 			if (!(ifa->flags & IFA_F_NODAD))
3864 				ifa->flags |= IFA_F_TENTATIVE;
3865 
3866 			rt = ifa->rt;
3867 			ifa->rt = NULL;
3868 		} else {
3869 			state = ifa->state;
3870 			ifa->state = INET6_IFADDR_STATE_DEAD;
3871 		}
3872 
3873 		spin_unlock_bh(&ifa->lock);
3874 
3875 		if (rt)
3876 			ip6_del_rt(net, rt, false);
3877 
3878 		if (state != INET6_IFADDR_STATE_DEAD) {
3879 			__ipv6_ifa_notify(RTM_DELADDR, ifa);
3880 			inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3881 		} else {
3882 			if (idev->cnf.forwarding)
3883 				addrconf_leave_anycast(ifa);
3884 			addrconf_leave_solict(ifa->idev, &ifa->addr);
3885 		}
3886 
3887 		if (!keep) {
3888 			write_lock_bh(&idev->lock);
3889 			list_del_rcu(&ifa->if_list);
3890 			write_unlock_bh(&idev->lock);
3891 			in6_ifa_put(ifa);
3892 		}
3893 	}
3894 
3895 	/* Step 5: Discard anycast and multicast list */
3896 	if (unregister) {
3897 		ipv6_ac_destroy_dev(idev);
3898 		ipv6_mc_destroy_dev(idev);
3899 	} else if (was_ready) {
3900 		ipv6_mc_down(idev);
3901 	}
3902 
3903 	idev->tstamp = jiffies;
3904 	idev->ra_mtu = 0;
3905 
3906 	/* Last: Shot the device (if unregistered) */
3907 	if (unregister) {
3908 		addrconf_sysctl_unregister(idev);
3909 		neigh_parms_release(&nd_tbl, idev->nd_parms);
3910 		neigh_ifdown(&nd_tbl, dev);
3911 		in6_dev_put(idev);
3912 	}
3913 	return 0;
3914 }
3915 
3916 static void addrconf_rs_timer(struct timer_list *t)
3917 {
3918 	struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3919 	struct net_device *dev = idev->dev;
3920 	struct in6_addr lladdr;
3921 
3922 	write_lock(&idev->lock);
3923 	if (idev->dead || !(idev->if_flags & IF_READY))
3924 		goto out;
3925 
3926 	if (!ipv6_accept_ra(idev))
3927 		goto out;
3928 
3929 	/* Announcement received after solicitation was sent */
3930 	if (idev->if_flags & IF_RA_RCVD)
3931 		goto out;
3932 
3933 	if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3934 		write_unlock(&idev->lock);
3935 		if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3936 			ndisc_send_rs(dev, &lladdr,
3937 				      &in6addr_linklocal_allrouters);
3938 		else
3939 			goto put;
3940 
3941 		write_lock(&idev->lock);
3942 		idev->rs_interval = rfc3315_s14_backoff_update(
3943 			idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3944 		/* The wait after the last probe can be shorter */
3945 		addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3946 					     idev->cnf.rtr_solicits) ?
3947 				      idev->cnf.rtr_solicit_delay :
3948 				      idev->rs_interval);
3949 	} else {
3950 		/*
3951 		 * Note: we do not support deprecated "all on-link"
3952 		 * assumption any longer.
3953 		 */
3954 		pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3955 	}
3956 
3957 out:
3958 	write_unlock(&idev->lock);
3959 put:
3960 	in6_dev_put(idev);
3961 }
3962 
3963 /*
3964  *	Duplicate Address Detection
3965  */
3966 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3967 {
3968 	unsigned long rand_num;
3969 	struct inet6_dev *idev = ifp->idev;
3970 	u64 nonce;
3971 
3972 	if (ifp->flags & IFA_F_OPTIMISTIC)
3973 		rand_num = 0;
3974 	else
3975 		rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
3976 
3977 	nonce = 0;
3978 	if (idev->cnf.enhanced_dad ||
3979 	    dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3980 		do
3981 			get_random_bytes(&nonce, 6);
3982 		while (nonce == 0);
3983 	}
3984 	ifp->dad_nonce = nonce;
3985 	ifp->dad_probes = idev->cnf.dad_transmits;
3986 	addrconf_mod_dad_work(ifp, rand_num);
3987 }
3988 
3989 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3990 {
3991 	struct inet6_dev *idev = ifp->idev;
3992 	struct net_device *dev = idev->dev;
3993 	bool bump_id, notify = false;
3994 	struct net *net;
3995 
3996 	addrconf_join_solict(dev, &ifp->addr);
3997 
3998 	read_lock_bh(&idev->lock);
3999 	spin_lock(&ifp->lock);
4000 	if (ifp->state == INET6_IFADDR_STATE_DEAD)
4001 		goto out;
4002 
4003 	net = dev_net(dev);
4004 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
4005 	    (net->ipv6.devconf_all->accept_dad < 1 &&
4006 	     idev->cnf.accept_dad < 1) ||
4007 	    !(ifp->flags&IFA_F_TENTATIVE) ||
4008 	    ifp->flags & IFA_F_NODAD) {
4009 		bool send_na = false;
4010 
4011 		if (ifp->flags & IFA_F_TENTATIVE &&
4012 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4013 			send_na = true;
4014 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4015 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4016 		spin_unlock(&ifp->lock);
4017 		read_unlock_bh(&idev->lock);
4018 
4019 		addrconf_dad_completed(ifp, bump_id, send_na);
4020 		return;
4021 	}
4022 
4023 	if (!(idev->if_flags & IF_READY)) {
4024 		spin_unlock(&ifp->lock);
4025 		read_unlock_bh(&idev->lock);
4026 		/*
4027 		 * If the device is not ready:
4028 		 * - keep it tentative if it is a permanent address.
4029 		 * - otherwise, kill it.
4030 		 */
4031 		in6_ifa_hold(ifp);
4032 		addrconf_dad_stop(ifp, 0);
4033 		return;
4034 	}
4035 
4036 	/*
4037 	 * Optimistic nodes can start receiving
4038 	 * Frames right away
4039 	 */
4040 	if (ifp->flags & IFA_F_OPTIMISTIC) {
4041 		ip6_ins_rt(net, ifp->rt);
4042 		if (ipv6_use_optimistic_addr(net, idev)) {
4043 			/* Because optimistic nodes can use this address,
4044 			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4045 			 */
4046 			notify = true;
4047 		}
4048 	}
4049 
4050 	addrconf_dad_kick(ifp);
4051 out:
4052 	spin_unlock(&ifp->lock);
4053 	read_unlock_bh(&idev->lock);
4054 	if (notify)
4055 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
4056 }
4057 
4058 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4059 {
4060 	bool begin_dad = false;
4061 
4062 	spin_lock_bh(&ifp->lock);
4063 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4064 		ifp->state = INET6_IFADDR_STATE_PREDAD;
4065 		begin_dad = true;
4066 	}
4067 	spin_unlock_bh(&ifp->lock);
4068 
4069 	if (begin_dad)
4070 		addrconf_mod_dad_work(ifp, 0);
4071 }
4072 
4073 static void addrconf_dad_work(struct work_struct *w)
4074 {
4075 	struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4076 						struct inet6_ifaddr,
4077 						dad_work);
4078 	struct inet6_dev *idev = ifp->idev;
4079 	bool bump_id, disable_ipv6 = false;
4080 	struct in6_addr mcaddr;
4081 
4082 	enum {
4083 		DAD_PROCESS,
4084 		DAD_BEGIN,
4085 		DAD_ABORT,
4086 	} action = DAD_PROCESS;
4087 
4088 	rtnl_lock();
4089 
4090 	spin_lock_bh(&ifp->lock);
4091 	if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4092 		action = DAD_BEGIN;
4093 		ifp->state = INET6_IFADDR_STATE_DAD;
4094 	} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4095 		action = DAD_ABORT;
4096 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
4097 
4098 		if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
4099 		     idev->cnf.accept_dad > 1) &&
4100 		    !idev->cnf.disable_ipv6 &&
4101 		    !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4102 			struct in6_addr addr;
4103 
4104 			addr.s6_addr32[0] = htonl(0xfe800000);
4105 			addr.s6_addr32[1] = 0;
4106 
4107 			if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4108 			    ipv6_addr_equal(&ifp->addr, &addr)) {
4109 				/* DAD failed for link-local based on MAC */
4110 				idev->cnf.disable_ipv6 = 1;
4111 
4112 				pr_info("%s: IPv6 being disabled!\n",
4113 					ifp->idev->dev->name);
4114 				disable_ipv6 = true;
4115 			}
4116 		}
4117 	}
4118 	spin_unlock_bh(&ifp->lock);
4119 
4120 	if (action == DAD_BEGIN) {
4121 		addrconf_dad_begin(ifp);
4122 		goto out;
4123 	} else if (action == DAD_ABORT) {
4124 		in6_ifa_hold(ifp);
4125 		addrconf_dad_stop(ifp, 1);
4126 		if (disable_ipv6)
4127 			addrconf_ifdown(idev->dev, false);
4128 		goto out;
4129 	}
4130 
4131 	if (!ifp->dad_probes && addrconf_dad_end(ifp))
4132 		goto out;
4133 
4134 	write_lock_bh(&idev->lock);
4135 	if (idev->dead || !(idev->if_flags & IF_READY)) {
4136 		write_unlock_bh(&idev->lock);
4137 		goto out;
4138 	}
4139 
4140 	spin_lock(&ifp->lock);
4141 	if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4142 		spin_unlock(&ifp->lock);
4143 		write_unlock_bh(&idev->lock);
4144 		goto out;
4145 	}
4146 
4147 	if (ifp->dad_probes == 0) {
4148 		bool send_na = false;
4149 
4150 		/*
4151 		 * DAD was successful
4152 		 */
4153 
4154 		if (ifp->flags & IFA_F_TENTATIVE &&
4155 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4156 			send_na = true;
4157 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4158 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4159 		spin_unlock(&ifp->lock);
4160 		write_unlock_bh(&idev->lock);
4161 
4162 		addrconf_dad_completed(ifp, bump_id, send_na);
4163 
4164 		goto out;
4165 	}
4166 
4167 	ifp->dad_probes--;
4168 	addrconf_mod_dad_work(ifp,
4169 			      max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4170 				  HZ/100));
4171 	spin_unlock(&ifp->lock);
4172 	write_unlock_bh(&idev->lock);
4173 
4174 	/* send a neighbour solicitation for our addr */
4175 	addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4176 	ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4177 		      ifp->dad_nonce);
4178 out:
4179 	in6_ifa_put(ifp);
4180 	rtnl_unlock();
4181 }
4182 
4183 /* ifp->idev must be at least read locked */
4184 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4185 {
4186 	struct inet6_ifaddr *ifpiter;
4187 	struct inet6_dev *idev = ifp->idev;
4188 
4189 	list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4190 		if (ifpiter->scope > IFA_LINK)
4191 			break;
4192 		if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4193 		    (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4194 				       IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4195 		    IFA_F_PERMANENT)
4196 			return false;
4197 	}
4198 	return true;
4199 }
4200 
4201 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4202 				   bool send_na)
4203 {
4204 	struct net_device *dev = ifp->idev->dev;
4205 	struct in6_addr lladdr;
4206 	bool send_rs, send_mld;
4207 
4208 	addrconf_del_dad_work(ifp);
4209 
4210 	/*
4211 	 *	Configure the address for reception. Now it is valid.
4212 	 */
4213 
4214 	ipv6_ifa_notify(RTM_NEWADDR, ifp);
4215 
4216 	/* If added prefix is link local and we are prepared to process
4217 	   router advertisements, start sending router solicitations.
4218 	 */
4219 
4220 	read_lock_bh(&ifp->idev->lock);
4221 	send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4222 	send_rs = send_mld &&
4223 		  ipv6_accept_ra(ifp->idev) &&
4224 		  ifp->idev->cnf.rtr_solicits != 0 &&
4225 		  (dev->flags & IFF_LOOPBACK) == 0 &&
4226 		  (dev->type != ARPHRD_TUNNEL);
4227 	read_unlock_bh(&ifp->idev->lock);
4228 
4229 	/* While dad is in progress mld report's source address is in6_addrany.
4230 	 * Resend with proper ll now.
4231 	 */
4232 	if (send_mld)
4233 		ipv6_mc_dad_complete(ifp->idev);
4234 
4235 	/* send unsolicited NA if enabled */
4236 	if (send_na &&
4237 	    (ifp->idev->cnf.ndisc_notify ||
4238 	     dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4239 		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4240 			      /*router=*/ !!ifp->idev->cnf.forwarding,
4241 			      /*solicited=*/ false, /*override=*/ true,
4242 			      /*inc_opt=*/ true);
4243 	}
4244 
4245 	if (send_rs) {
4246 		/*
4247 		 *	If a host as already performed a random delay
4248 		 *	[...] as part of DAD [...] there is no need
4249 		 *	to delay again before sending the first RS
4250 		 */
4251 		if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4252 			return;
4253 		ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4254 
4255 		write_lock_bh(&ifp->idev->lock);
4256 		spin_lock(&ifp->lock);
4257 		ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4258 			ifp->idev->cnf.rtr_solicit_interval);
4259 		ifp->idev->rs_probes = 1;
4260 		ifp->idev->if_flags |= IF_RS_SENT;
4261 		addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4262 		spin_unlock(&ifp->lock);
4263 		write_unlock_bh(&ifp->idev->lock);
4264 	}
4265 
4266 	if (bump_id)
4267 		rt_genid_bump_ipv6(dev_net(dev));
4268 
4269 	/* Make sure that a new temporary address will be created
4270 	 * before this temporary address becomes deprecated.
4271 	 */
4272 	if (ifp->flags & IFA_F_TEMPORARY)
4273 		addrconf_verify_rtnl(dev_net(dev));
4274 }
4275 
4276 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4277 {
4278 	struct inet6_ifaddr *ifp;
4279 
4280 	read_lock_bh(&idev->lock);
4281 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
4282 		spin_lock(&ifp->lock);
4283 		if ((ifp->flags & IFA_F_TENTATIVE &&
4284 		     ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4285 			if (restart)
4286 				ifp->state = INET6_IFADDR_STATE_PREDAD;
4287 			addrconf_dad_kick(ifp);
4288 		}
4289 		spin_unlock(&ifp->lock);
4290 	}
4291 	read_unlock_bh(&idev->lock);
4292 }
4293 
4294 #ifdef CONFIG_PROC_FS
4295 struct if6_iter_state {
4296 	struct seq_net_private p;
4297 	int bucket;
4298 	int offset;
4299 };
4300 
4301 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4302 {
4303 	struct if6_iter_state *state = seq->private;
4304 	struct net *net = seq_file_net(seq);
4305 	struct inet6_ifaddr *ifa = NULL;
4306 	int p = 0;
4307 
4308 	/* initial bucket if pos is 0 */
4309 	if (pos == 0) {
4310 		state->bucket = 0;
4311 		state->offset = 0;
4312 	}
4313 
4314 	for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4315 		hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket],
4316 					 addr_lst) {
4317 			/* sync with offset */
4318 			if (p < state->offset) {
4319 				p++;
4320 				continue;
4321 			}
4322 			return ifa;
4323 		}
4324 
4325 		/* prepare for next bucket */
4326 		state->offset = 0;
4327 		p = 0;
4328 	}
4329 	return NULL;
4330 }
4331 
4332 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4333 					 struct inet6_ifaddr *ifa)
4334 {
4335 	struct if6_iter_state *state = seq->private;
4336 	struct net *net = seq_file_net(seq);
4337 
4338 	hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4339 		state->offset++;
4340 		return ifa;
4341 	}
4342 
4343 	state->offset = 0;
4344 	while (++state->bucket < IN6_ADDR_HSIZE) {
4345 		hlist_for_each_entry_rcu(ifa,
4346 				     &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) {
4347 			return ifa;
4348 		}
4349 	}
4350 
4351 	return NULL;
4352 }
4353 
4354 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4355 	__acquires(rcu)
4356 {
4357 	rcu_read_lock();
4358 	return if6_get_first(seq, *pos);
4359 }
4360 
4361 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4362 {
4363 	struct inet6_ifaddr *ifa;
4364 
4365 	ifa = if6_get_next(seq, v);
4366 	++*pos;
4367 	return ifa;
4368 }
4369 
4370 static void if6_seq_stop(struct seq_file *seq, void *v)
4371 	__releases(rcu)
4372 {
4373 	rcu_read_unlock();
4374 }
4375 
4376 static int if6_seq_show(struct seq_file *seq, void *v)
4377 {
4378 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4379 	seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4380 		   &ifp->addr,
4381 		   ifp->idev->dev->ifindex,
4382 		   ifp->prefix_len,
4383 		   ifp->scope,
4384 		   (u8) ifp->flags,
4385 		   ifp->idev->dev->name);
4386 	return 0;
4387 }
4388 
4389 static const struct seq_operations if6_seq_ops = {
4390 	.start	= if6_seq_start,
4391 	.next	= if6_seq_next,
4392 	.show	= if6_seq_show,
4393 	.stop	= if6_seq_stop,
4394 };
4395 
4396 static int __net_init if6_proc_net_init(struct net *net)
4397 {
4398 	if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4399 			sizeof(struct if6_iter_state)))
4400 		return -ENOMEM;
4401 	return 0;
4402 }
4403 
4404 static void __net_exit if6_proc_net_exit(struct net *net)
4405 {
4406 	remove_proc_entry("if_inet6", net->proc_net);
4407 }
4408 
4409 static struct pernet_operations if6_proc_net_ops = {
4410 	.init = if6_proc_net_init,
4411 	.exit = if6_proc_net_exit,
4412 };
4413 
4414 int __init if6_proc_init(void)
4415 {
4416 	return register_pernet_subsys(&if6_proc_net_ops);
4417 }
4418 
4419 void if6_proc_exit(void)
4420 {
4421 	unregister_pernet_subsys(&if6_proc_net_ops);
4422 }
4423 #endif	/* CONFIG_PROC_FS */
4424 
4425 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4426 /* Check if address is a home address configured on any interface. */
4427 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4428 {
4429 	unsigned int hash = inet6_addr_hash(net, addr);
4430 	struct inet6_ifaddr *ifp = NULL;
4431 	int ret = 0;
4432 
4433 	rcu_read_lock();
4434 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4435 		if (ipv6_addr_equal(&ifp->addr, addr) &&
4436 		    (ifp->flags & IFA_F_HOMEADDRESS)) {
4437 			ret = 1;
4438 			break;
4439 		}
4440 	}
4441 	rcu_read_unlock();
4442 	return ret;
4443 }
4444 #endif
4445 
4446 /* RFC6554 has some algorithm to avoid loops in segment routing by
4447  * checking if the segments contains any of a local interface address.
4448  *
4449  * Quote:
4450  *
4451  * To detect loops in the SRH, a router MUST determine if the SRH
4452  * includes multiple addresses assigned to any interface on that router.
4453  * If such addresses appear more than once and are separated by at least
4454  * one address not assigned to that router.
4455  */
4456 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4457 			  unsigned char nsegs)
4458 {
4459 	const struct in6_addr *addr;
4460 	int i, ret = 0, found = 0;
4461 	struct inet6_ifaddr *ifp;
4462 	bool separated = false;
4463 	unsigned int hash;
4464 	bool hash_found;
4465 
4466 	rcu_read_lock();
4467 	for (i = 0; i < nsegs; i++) {
4468 		addr = &segs[i];
4469 		hash = inet6_addr_hash(net, addr);
4470 
4471 		hash_found = false;
4472 		hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4473 
4474 			if (ipv6_addr_equal(&ifp->addr, addr)) {
4475 				hash_found = true;
4476 				break;
4477 			}
4478 		}
4479 
4480 		if (hash_found) {
4481 			if (found > 1 && separated) {
4482 				ret = 1;
4483 				break;
4484 			}
4485 
4486 			separated = false;
4487 			found++;
4488 		} else {
4489 			separated = true;
4490 		}
4491 	}
4492 	rcu_read_unlock();
4493 
4494 	return ret;
4495 }
4496 
4497 /*
4498  *	Periodic address status verification
4499  */
4500 
4501 static void addrconf_verify_rtnl(struct net *net)
4502 {
4503 	unsigned long now, next, next_sec, next_sched;
4504 	struct inet6_ifaddr *ifp;
4505 	int i;
4506 
4507 	ASSERT_RTNL();
4508 
4509 	rcu_read_lock_bh();
4510 	now = jiffies;
4511 	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4512 
4513 	cancel_delayed_work(&net->ipv6.addr_chk_work);
4514 
4515 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4516 restart:
4517 		hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) {
4518 			unsigned long age;
4519 
4520 			/* When setting preferred_lft to a value not zero or
4521 			 * infinity, while valid_lft is infinity
4522 			 * IFA_F_PERMANENT has a non-infinity life time.
4523 			 */
4524 			if ((ifp->flags & IFA_F_PERMANENT) &&
4525 			    (ifp->prefered_lft == INFINITY_LIFE_TIME))
4526 				continue;
4527 
4528 			spin_lock(&ifp->lock);
4529 			/* We try to batch several events at once. */
4530 			age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4531 
4532 			if ((ifp->flags&IFA_F_TEMPORARY) &&
4533 			    !(ifp->flags&IFA_F_TENTATIVE) &&
4534 			    ifp->prefered_lft != INFINITY_LIFE_TIME &&
4535 			    !ifp->regen_count && ifp->ifpub) {
4536 				/* This is a non-regenerated temporary addr. */
4537 
4538 				unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4539 					ifp->idev->cnf.dad_transmits *
4540 					max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
4541 
4542 				if (age + regen_advance >= ifp->prefered_lft) {
4543 					struct inet6_ifaddr *ifpub = ifp->ifpub;
4544 					if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4545 						next = ifp->tstamp + ifp->prefered_lft * HZ;
4546 
4547 					ifp->regen_count++;
4548 					in6_ifa_hold(ifp);
4549 					in6_ifa_hold(ifpub);
4550 					spin_unlock(&ifp->lock);
4551 
4552 					spin_lock(&ifpub->lock);
4553 					ifpub->regen_count = 0;
4554 					spin_unlock(&ifpub->lock);
4555 					rcu_read_unlock_bh();
4556 					ipv6_create_tempaddr(ifpub, true);
4557 					in6_ifa_put(ifpub);
4558 					in6_ifa_put(ifp);
4559 					rcu_read_lock_bh();
4560 					goto restart;
4561 				} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4562 					next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4563 			}
4564 
4565 			if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4566 			    age >= ifp->valid_lft) {
4567 				spin_unlock(&ifp->lock);
4568 				in6_ifa_hold(ifp);
4569 				rcu_read_unlock_bh();
4570 				ipv6_del_addr(ifp);
4571 				rcu_read_lock_bh();
4572 				goto restart;
4573 			} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4574 				spin_unlock(&ifp->lock);
4575 				continue;
4576 			} else if (age >= ifp->prefered_lft) {
4577 				/* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4578 				int deprecate = 0;
4579 
4580 				if (!(ifp->flags&IFA_F_DEPRECATED)) {
4581 					deprecate = 1;
4582 					ifp->flags |= IFA_F_DEPRECATED;
4583 				}
4584 
4585 				if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4586 				    (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4587 					next = ifp->tstamp + ifp->valid_lft * HZ;
4588 
4589 				spin_unlock(&ifp->lock);
4590 
4591 				if (deprecate) {
4592 					in6_ifa_hold(ifp);
4593 
4594 					ipv6_ifa_notify(0, ifp);
4595 					in6_ifa_put(ifp);
4596 					goto restart;
4597 				}
4598 			} else {
4599 				/* ifp->prefered_lft <= ifp->valid_lft */
4600 				if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4601 					next = ifp->tstamp + ifp->prefered_lft * HZ;
4602 				spin_unlock(&ifp->lock);
4603 			}
4604 		}
4605 	}
4606 
4607 	next_sec = round_jiffies_up(next);
4608 	next_sched = next;
4609 
4610 	/* If rounded timeout is accurate enough, accept it. */
4611 	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4612 		next_sched = next_sec;
4613 
4614 	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4615 	if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4616 		next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4617 
4618 	pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4619 		 now, next, next_sec, next_sched);
4620 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now);
4621 	rcu_read_unlock_bh();
4622 }
4623 
4624 static void addrconf_verify_work(struct work_struct *w)
4625 {
4626 	struct net *net = container_of(to_delayed_work(w), struct net,
4627 				       ipv6.addr_chk_work);
4628 
4629 	rtnl_lock();
4630 	addrconf_verify_rtnl(net);
4631 	rtnl_unlock();
4632 }
4633 
4634 static void addrconf_verify(struct net *net)
4635 {
4636 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0);
4637 }
4638 
4639 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4640 				     struct in6_addr **peer_pfx)
4641 {
4642 	struct in6_addr *pfx = NULL;
4643 
4644 	*peer_pfx = NULL;
4645 
4646 	if (addr)
4647 		pfx = nla_data(addr);
4648 
4649 	if (local) {
4650 		if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4651 			*peer_pfx = pfx;
4652 		pfx = nla_data(local);
4653 	}
4654 
4655 	return pfx;
4656 }
4657 
4658 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4659 	[IFA_ADDRESS]		= { .len = sizeof(struct in6_addr) },
4660 	[IFA_LOCAL]		= { .len = sizeof(struct in6_addr) },
4661 	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
4662 	[IFA_FLAGS]		= { .len = sizeof(u32) },
4663 	[IFA_RT_PRIORITY]	= { .len = sizeof(u32) },
4664 	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
4665 	[IFA_PROTO]		= { .type = NLA_U8 },
4666 };
4667 
4668 static int
4669 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4670 		  struct netlink_ext_ack *extack)
4671 {
4672 	struct net *net = sock_net(skb->sk);
4673 	struct ifaddrmsg *ifm;
4674 	struct nlattr *tb[IFA_MAX+1];
4675 	struct in6_addr *pfx, *peer_pfx;
4676 	u32 ifa_flags;
4677 	int err;
4678 
4679 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4680 				     ifa_ipv6_policy, extack);
4681 	if (err < 0)
4682 		return err;
4683 
4684 	ifm = nlmsg_data(nlh);
4685 	pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4686 	if (!pfx)
4687 		return -EINVAL;
4688 
4689 	ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4690 
4691 	/* We ignore other flags so far. */
4692 	ifa_flags &= IFA_F_MANAGETEMPADDR;
4693 
4694 	return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4695 			      ifm->ifa_prefixlen);
4696 }
4697 
4698 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4699 			       unsigned long expires, u32 flags,
4700 			       bool modify_peer)
4701 {
4702 	struct fib6_info *f6i;
4703 	u32 prio;
4704 
4705 	f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4706 					ifp->prefix_len,
4707 					ifp->idev->dev, 0, RTF_DEFAULT, true);
4708 	if (!f6i)
4709 		return -ENOENT;
4710 
4711 	prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4712 	if (f6i->fib6_metric != prio) {
4713 		/* delete old one */
4714 		ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4715 
4716 		/* add new one */
4717 		addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4718 				      ifp->prefix_len,
4719 				      ifp->rt_priority, ifp->idev->dev,
4720 				      expires, flags, GFP_KERNEL);
4721 	} else {
4722 		if (!expires)
4723 			fib6_clean_expires(f6i);
4724 		else
4725 			fib6_set_expires(f6i, expires);
4726 
4727 		fib6_info_release(f6i);
4728 	}
4729 
4730 	return 0;
4731 }
4732 
4733 static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
4734 			     struct ifa6_config *cfg)
4735 {
4736 	u32 flags;
4737 	clock_t expires;
4738 	unsigned long timeout;
4739 	bool was_managetempaddr;
4740 	bool had_prefixroute;
4741 	bool new_peer = false;
4742 
4743 	ASSERT_RTNL();
4744 
4745 	if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4746 		return -EINVAL;
4747 
4748 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4749 	    (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4750 		return -EINVAL;
4751 
4752 	if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4753 		cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4754 
4755 	timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4756 	if (addrconf_finite_timeout(timeout)) {
4757 		expires = jiffies_to_clock_t(timeout * HZ);
4758 		cfg->valid_lft = timeout;
4759 		flags = RTF_EXPIRES;
4760 	} else {
4761 		expires = 0;
4762 		flags = 0;
4763 		cfg->ifa_flags |= IFA_F_PERMANENT;
4764 	}
4765 
4766 	timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4767 	if (addrconf_finite_timeout(timeout)) {
4768 		if (timeout == 0)
4769 			cfg->ifa_flags |= IFA_F_DEPRECATED;
4770 		cfg->preferred_lft = timeout;
4771 	}
4772 
4773 	if (cfg->peer_pfx &&
4774 	    memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4775 		if (!ipv6_addr_any(&ifp->peer_addr))
4776 			cleanup_prefix_route(ifp, expires, true, true);
4777 		new_peer = true;
4778 	}
4779 
4780 	spin_lock_bh(&ifp->lock);
4781 	was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4782 	had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4783 			  !(ifp->flags & IFA_F_NOPREFIXROUTE);
4784 	ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4785 			IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4786 			IFA_F_NOPREFIXROUTE);
4787 	ifp->flags |= cfg->ifa_flags;
4788 	ifp->tstamp = jiffies;
4789 	ifp->valid_lft = cfg->valid_lft;
4790 	ifp->prefered_lft = cfg->preferred_lft;
4791 	ifp->ifa_proto = cfg->ifa_proto;
4792 
4793 	if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4794 		ifp->rt_priority = cfg->rt_priority;
4795 
4796 	if (new_peer)
4797 		ifp->peer_addr = *cfg->peer_pfx;
4798 
4799 	spin_unlock_bh(&ifp->lock);
4800 	if (!(ifp->flags&IFA_F_TENTATIVE))
4801 		ipv6_ifa_notify(0, ifp);
4802 
4803 	if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4804 		int rc = -ENOENT;
4805 
4806 		if (had_prefixroute)
4807 			rc = modify_prefix_route(ifp, expires, flags, false);
4808 
4809 		/* prefix route could have been deleted; if so restore it */
4810 		if (rc == -ENOENT) {
4811 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4812 					      ifp->rt_priority, ifp->idev->dev,
4813 					      expires, flags, GFP_KERNEL);
4814 		}
4815 
4816 		if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4817 			rc = modify_prefix_route(ifp, expires, flags, true);
4818 
4819 		if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4820 			addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4821 					      ifp->rt_priority, ifp->idev->dev,
4822 					      expires, flags, GFP_KERNEL);
4823 		}
4824 	} else if (had_prefixroute) {
4825 		enum cleanup_prefix_rt_t action;
4826 		unsigned long rt_expires;
4827 
4828 		write_lock_bh(&ifp->idev->lock);
4829 		action = check_cleanup_prefix_route(ifp, &rt_expires);
4830 		write_unlock_bh(&ifp->idev->lock);
4831 
4832 		if (action != CLEANUP_PREFIX_RT_NOP) {
4833 			cleanup_prefix_route(ifp, rt_expires,
4834 				action == CLEANUP_PREFIX_RT_DEL, false);
4835 		}
4836 	}
4837 
4838 	if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4839 		if (was_managetempaddr &&
4840 		    !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4841 			cfg->valid_lft = 0;
4842 			cfg->preferred_lft = 0;
4843 		}
4844 		manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4845 				 cfg->preferred_lft, !was_managetempaddr,
4846 				 jiffies);
4847 	}
4848 
4849 	addrconf_verify_rtnl(net);
4850 
4851 	return 0;
4852 }
4853 
4854 static int
4855 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4856 		  struct netlink_ext_ack *extack)
4857 {
4858 	struct net *net = sock_net(skb->sk);
4859 	struct ifaddrmsg *ifm;
4860 	struct nlattr *tb[IFA_MAX+1];
4861 	struct in6_addr *peer_pfx;
4862 	struct inet6_ifaddr *ifa;
4863 	struct net_device *dev;
4864 	struct inet6_dev *idev;
4865 	struct ifa6_config cfg;
4866 	int err;
4867 
4868 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4869 				     ifa_ipv6_policy, extack);
4870 	if (err < 0)
4871 		return err;
4872 
4873 	memset(&cfg, 0, sizeof(cfg));
4874 
4875 	ifm = nlmsg_data(nlh);
4876 	cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4877 	if (!cfg.pfx)
4878 		return -EINVAL;
4879 
4880 	cfg.peer_pfx = peer_pfx;
4881 	cfg.plen = ifm->ifa_prefixlen;
4882 	if (tb[IFA_RT_PRIORITY])
4883 		cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4884 
4885 	if (tb[IFA_PROTO])
4886 		cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
4887 
4888 	cfg.valid_lft = INFINITY_LIFE_TIME;
4889 	cfg.preferred_lft = INFINITY_LIFE_TIME;
4890 
4891 	if (tb[IFA_CACHEINFO]) {
4892 		struct ifa_cacheinfo *ci;
4893 
4894 		ci = nla_data(tb[IFA_CACHEINFO]);
4895 		cfg.valid_lft = ci->ifa_valid;
4896 		cfg.preferred_lft = ci->ifa_prefered;
4897 	}
4898 
4899 	dev =  __dev_get_by_index(net, ifm->ifa_index);
4900 	if (!dev)
4901 		return -ENODEV;
4902 
4903 	if (tb[IFA_FLAGS])
4904 		cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4905 	else
4906 		cfg.ifa_flags = ifm->ifa_flags;
4907 
4908 	/* We ignore other flags so far. */
4909 	cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4910 			 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4911 			 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4912 
4913 	idev = ipv6_find_idev(dev);
4914 	if (IS_ERR(idev))
4915 		return PTR_ERR(idev);
4916 
4917 	if (!ipv6_allow_optimistic_dad(net, idev))
4918 		cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4919 
4920 	if (cfg.ifa_flags & IFA_F_NODAD &&
4921 	    cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4922 		NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4923 		return -EINVAL;
4924 	}
4925 
4926 	ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4927 	if (!ifa) {
4928 		/*
4929 		 * It would be best to check for !NLM_F_CREATE here but
4930 		 * userspace already relies on not having to provide this.
4931 		 */
4932 		return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4933 	}
4934 
4935 	if (nlh->nlmsg_flags & NLM_F_EXCL ||
4936 	    !(nlh->nlmsg_flags & NLM_F_REPLACE))
4937 		err = -EEXIST;
4938 	else
4939 		err = inet6_addr_modify(net, ifa, &cfg);
4940 
4941 	in6_ifa_put(ifa);
4942 
4943 	return err;
4944 }
4945 
4946 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4947 			  u8 scope, int ifindex)
4948 {
4949 	struct ifaddrmsg *ifm;
4950 
4951 	ifm = nlmsg_data(nlh);
4952 	ifm->ifa_family = AF_INET6;
4953 	ifm->ifa_prefixlen = prefixlen;
4954 	ifm->ifa_flags = flags;
4955 	ifm->ifa_scope = scope;
4956 	ifm->ifa_index = ifindex;
4957 }
4958 
4959 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4960 			 unsigned long tstamp, u32 preferred, u32 valid)
4961 {
4962 	struct ifa_cacheinfo ci;
4963 
4964 	ci.cstamp = cstamp_delta(cstamp);
4965 	ci.tstamp = cstamp_delta(tstamp);
4966 	ci.ifa_prefered = preferred;
4967 	ci.ifa_valid = valid;
4968 
4969 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4970 }
4971 
4972 static inline int rt_scope(int ifa_scope)
4973 {
4974 	if (ifa_scope & IFA_HOST)
4975 		return RT_SCOPE_HOST;
4976 	else if (ifa_scope & IFA_LINK)
4977 		return RT_SCOPE_LINK;
4978 	else if (ifa_scope & IFA_SITE)
4979 		return RT_SCOPE_SITE;
4980 	else
4981 		return RT_SCOPE_UNIVERSE;
4982 }
4983 
4984 static inline int inet6_ifaddr_msgsize(void)
4985 {
4986 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4987 	       + nla_total_size(16) /* IFA_LOCAL */
4988 	       + nla_total_size(16) /* IFA_ADDRESS */
4989 	       + nla_total_size(sizeof(struct ifa_cacheinfo))
4990 	       + nla_total_size(4)  /* IFA_FLAGS */
4991 	       + nla_total_size(1)  /* IFA_PROTO */
4992 	       + nla_total_size(4)  /* IFA_RT_PRIORITY */;
4993 }
4994 
4995 enum addr_type_t {
4996 	UNICAST_ADDR,
4997 	MULTICAST_ADDR,
4998 	ANYCAST_ADDR,
4999 };
5000 
5001 struct inet6_fill_args {
5002 	u32 portid;
5003 	u32 seq;
5004 	int event;
5005 	unsigned int flags;
5006 	int netnsid;
5007 	int ifindex;
5008 	enum addr_type_t type;
5009 };
5010 
5011 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
5012 			     struct inet6_fill_args *args)
5013 {
5014 	struct nlmsghdr  *nlh;
5015 	u32 preferred, valid;
5016 
5017 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5018 			sizeof(struct ifaddrmsg), args->flags);
5019 	if (!nlh)
5020 		return -EMSGSIZE;
5021 
5022 	put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5023 		      ifa->idev->dev->ifindex);
5024 
5025 	if (args->netnsid >= 0 &&
5026 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5027 		goto error;
5028 
5029 	spin_lock_bh(&ifa->lock);
5030 	if (!((ifa->flags&IFA_F_PERMANENT) &&
5031 	      (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
5032 		preferred = ifa->prefered_lft;
5033 		valid = ifa->valid_lft;
5034 		if (preferred != INFINITY_LIFE_TIME) {
5035 			long tval = (jiffies - ifa->tstamp)/HZ;
5036 			if (preferred > tval)
5037 				preferred -= tval;
5038 			else
5039 				preferred = 0;
5040 			if (valid != INFINITY_LIFE_TIME) {
5041 				if (valid > tval)
5042 					valid -= tval;
5043 				else
5044 					valid = 0;
5045 			}
5046 		}
5047 	} else {
5048 		preferred = INFINITY_LIFE_TIME;
5049 		valid = INFINITY_LIFE_TIME;
5050 	}
5051 	spin_unlock_bh(&ifa->lock);
5052 
5053 	if (!ipv6_addr_any(&ifa->peer_addr)) {
5054 		if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5055 		    nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5056 			goto error;
5057 	} else
5058 		if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5059 			goto error;
5060 
5061 	if (ifa->rt_priority &&
5062 	    nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
5063 		goto error;
5064 
5065 	if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
5066 		goto error;
5067 
5068 	if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
5069 		goto error;
5070 
5071 	if (ifa->ifa_proto &&
5072 	    nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto))
5073 		goto error;
5074 
5075 	nlmsg_end(skb, nlh);
5076 	return 0;
5077 
5078 error:
5079 	nlmsg_cancel(skb, nlh);
5080 	return -EMSGSIZE;
5081 }
5082 
5083 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
5084 			       struct inet6_fill_args *args)
5085 {
5086 	struct nlmsghdr  *nlh;
5087 	u8 scope = RT_SCOPE_UNIVERSE;
5088 	int ifindex = ifmca->idev->dev->ifindex;
5089 
5090 	if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5091 		scope = RT_SCOPE_SITE;
5092 
5093 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5094 			sizeof(struct ifaddrmsg), args->flags);
5095 	if (!nlh)
5096 		return -EMSGSIZE;
5097 
5098 	if (args->netnsid >= 0 &&
5099 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5100 		nlmsg_cancel(skb, nlh);
5101 		return -EMSGSIZE;
5102 	}
5103 
5104 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5105 	if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5106 	    put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
5107 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5108 		nlmsg_cancel(skb, nlh);
5109 		return -EMSGSIZE;
5110 	}
5111 
5112 	nlmsg_end(skb, nlh);
5113 	return 0;
5114 }
5115 
5116 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
5117 			       struct inet6_fill_args *args)
5118 {
5119 	struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5120 	int ifindex = dev ? dev->ifindex : 1;
5121 	struct nlmsghdr  *nlh;
5122 	u8 scope = RT_SCOPE_UNIVERSE;
5123 
5124 	if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5125 		scope = RT_SCOPE_SITE;
5126 
5127 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5128 			sizeof(struct ifaddrmsg), args->flags);
5129 	if (!nlh)
5130 		return -EMSGSIZE;
5131 
5132 	if (args->netnsid >= 0 &&
5133 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5134 		nlmsg_cancel(skb, nlh);
5135 		return -EMSGSIZE;
5136 	}
5137 
5138 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5139 	if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5140 	    put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
5141 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5142 		nlmsg_cancel(skb, nlh);
5143 		return -EMSGSIZE;
5144 	}
5145 
5146 	nlmsg_end(skb, nlh);
5147 	return 0;
5148 }
5149 
5150 /* called with rcu_read_lock() */
5151 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
5152 			  struct netlink_callback *cb, int s_ip_idx,
5153 			  struct inet6_fill_args *fillargs)
5154 {
5155 	struct ifmcaddr6 *ifmca;
5156 	struct ifacaddr6 *ifaca;
5157 	int ip_idx = 0;
5158 	int err = 1;
5159 
5160 	read_lock_bh(&idev->lock);
5161 	switch (fillargs->type) {
5162 	case UNICAST_ADDR: {
5163 		struct inet6_ifaddr *ifa;
5164 		fillargs->event = RTM_NEWADDR;
5165 
5166 		/* unicast address incl. temp addr */
5167 		list_for_each_entry(ifa, &idev->addr_list, if_list) {
5168 			if (ip_idx < s_ip_idx)
5169 				goto next;
5170 			err = inet6_fill_ifaddr(skb, ifa, fillargs);
5171 			if (err < 0)
5172 				break;
5173 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5174 next:
5175 			ip_idx++;
5176 		}
5177 		break;
5178 	}
5179 	case MULTICAST_ADDR:
5180 		read_unlock_bh(&idev->lock);
5181 		fillargs->event = RTM_GETMULTICAST;
5182 
5183 		/* multicast address */
5184 		for (ifmca = rtnl_dereference(idev->mc_list);
5185 		     ifmca;
5186 		     ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
5187 			if (ip_idx < s_ip_idx)
5188 				continue;
5189 			err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5190 			if (err < 0)
5191 				break;
5192 		}
5193 		read_lock_bh(&idev->lock);
5194 		break;
5195 	case ANYCAST_ADDR:
5196 		fillargs->event = RTM_GETANYCAST;
5197 		/* anycast address */
5198 		for (ifaca = idev->ac_list; ifaca;
5199 		     ifaca = ifaca->aca_next, ip_idx++) {
5200 			if (ip_idx < s_ip_idx)
5201 				continue;
5202 			err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5203 			if (err < 0)
5204 				break;
5205 		}
5206 		break;
5207 	default:
5208 		break;
5209 	}
5210 	read_unlock_bh(&idev->lock);
5211 	cb->args[2] = ip_idx;
5212 	return err;
5213 }
5214 
5215 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5216 				       struct inet6_fill_args *fillargs,
5217 				       struct net **tgt_net, struct sock *sk,
5218 				       struct netlink_callback *cb)
5219 {
5220 	struct netlink_ext_ack *extack = cb->extack;
5221 	struct nlattr *tb[IFA_MAX+1];
5222 	struct ifaddrmsg *ifm;
5223 	int err, i;
5224 
5225 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5226 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5227 		return -EINVAL;
5228 	}
5229 
5230 	ifm = nlmsg_data(nlh);
5231 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5232 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5233 		return -EINVAL;
5234 	}
5235 
5236 	fillargs->ifindex = ifm->ifa_index;
5237 	if (fillargs->ifindex) {
5238 		cb->answer_flags |= NLM_F_DUMP_FILTERED;
5239 		fillargs->flags |= NLM_F_DUMP_FILTERED;
5240 	}
5241 
5242 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5243 					    ifa_ipv6_policy, extack);
5244 	if (err < 0)
5245 		return err;
5246 
5247 	for (i = 0; i <= IFA_MAX; ++i) {
5248 		if (!tb[i])
5249 			continue;
5250 
5251 		if (i == IFA_TARGET_NETNSID) {
5252 			struct net *net;
5253 
5254 			fillargs->netnsid = nla_get_s32(tb[i]);
5255 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5256 			if (IS_ERR(net)) {
5257 				fillargs->netnsid = -1;
5258 				NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5259 				return PTR_ERR(net);
5260 			}
5261 			*tgt_net = net;
5262 		} else {
5263 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5264 			return -EINVAL;
5265 		}
5266 	}
5267 
5268 	return 0;
5269 }
5270 
5271 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5272 			   enum addr_type_t type)
5273 {
5274 	const struct nlmsghdr *nlh = cb->nlh;
5275 	struct inet6_fill_args fillargs = {
5276 		.portid = NETLINK_CB(cb->skb).portid,
5277 		.seq = cb->nlh->nlmsg_seq,
5278 		.flags = NLM_F_MULTI,
5279 		.netnsid = -1,
5280 		.type = type,
5281 	};
5282 	struct net *tgt_net = sock_net(skb->sk);
5283 	int idx, s_idx, s_ip_idx;
5284 	int h, s_h;
5285 	struct net_device *dev;
5286 	struct inet6_dev *idev;
5287 	struct hlist_head *head;
5288 	int err = 0;
5289 
5290 	s_h = cb->args[0];
5291 	s_idx = idx = cb->args[1];
5292 	s_ip_idx = cb->args[2];
5293 
5294 	if (cb->strict_check) {
5295 		err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5296 						  skb->sk, cb);
5297 		if (err < 0)
5298 			goto put_tgt_net;
5299 
5300 		err = 0;
5301 		if (fillargs.ifindex) {
5302 			dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
5303 			if (!dev) {
5304 				err = -ENODEV;
5305 				goto put_tgt_net;
5306 			}
5307 			idev = __in6_dev_get(dev);
5308 			if (idev) {
5309 				err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5310 						     &fillargs);
5311 				if (err > 0)
5312 					err = 0;
5313 			}
5314 			goto put_tgt_net;
5315 		}
5316 	}
5317 
5318 	rcu_read_lock();
5319 	cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
5320 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5321 		idx = 0;
5322 		head = &tgt_net->dev_index_head[h];
5323 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
5324 			if (idx < s_idx)
5325 				goto cont;
5326 			if (h > s_h || idx > s_idx)
5327 				s_ip_idx = 0;
5328 			idev = __in6_dev_get(dev);
5329 			if (!idev)
5330 				goto cont;
5331 
5332 			if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
5333 					   &fillargs) < 0)
5334 				goto done;
5335 cont:
5336 			idx++;
5337 		}
5338 	}
5339 done:
5340 	rcu_read_unlock();
5341 	cb->args[0] = h;
5342 	cb->args[1] = idx;
5343 put_tgt_net:
5344 	if (fillargs.netnsid >= 0)
5345 		put_net(tgt_net);
5346 
5347 	return skb->len ? : err;
5348 }
5349 
5350 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5351 {
5352 	enum addr_type_t type = UNICAST_ADDR;
5353 
5354 	return inet6_dump_addr(skb, cb, type);
5355 }
5356 
5357 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5358 {
5359 	enum addr_type_t type = MULTICAST_ADDR;
5360 
5361 	return inet6_dump_addr(skb, cb, type);
5362 }
5363 
5364 
5365 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5366 {
5367 	enum addr_type_t type = ANYCAST_ADDR;
5368 
5369 	return inet6_dump_addr(skb, cb, type);
5370 }
5371 
5372 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5373 				       const struct nlmsghdr *nlh,
5374 				       struct nlattr **tb,
5375 				       struct netlink_ext_ack *extack)
5376 {
5377 	struct ifaddrmsg *ifm;
5378 	int i, err;
5379 
5380 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5381 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5382 		return -EINVAL;
5383 	}
5384 
5385 	if (!netlink_strict_get_check(skb))
5386 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5387 					      ifa_ipv6_policy, extack);
5388 
5389 	ifm = nlmsg_data(nlh);
5390 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5391 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5392 		return -EINVAL;
5393 	}
5394 
5395 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5396 					    ifa_ipv6_policy, extack);
5397 	if (err)
5398 		return err;
5399 
5400 	for (i = 0; i <= IFA_MAX; i++) {
5401 		if (!tb[i])
5402 			continue;
5403 
5404 		switch (i) {
5405 		case IFA_TARGET_NETNSID:
5406 		case IFA_ADDRESS:
5407 		case IFA_LOCAL:
5408 			break;
5409 		default:
5410 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5411 			return -EINVAL;
5412 		}
5413 	}
5414 
5415 	return 0;
5416 }
5417 
5418 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5419 			     struct netlink_ext_ack *extack)
5420 {
5421 	struct net *tgt_net = sock_net(in_skb->sk);
5422 	struct inet6_fill_args fillargs = {
5423 		.portid = NETLINK_CB(in_skb).portid,
5424 		.seq = nlh->nlmsg_seq,
5425 		.event = RTM_NEWADDR,
5426 		.flags = 0,
5427 		.netnsid = -1,
5428 	};
5429 	struct ifaddrmsg *ifm;
5430 	struct nlattr *tb[IFA_MAX+1];
5431 	struct in6_addr *addr = NULL, *peer;
5432 	struct net_device *dev = NULL;
5433 	struct inet6_ifaddr *ifa;
5434 	struct sk_buff *skb;
5435 	int err;
5436 
5437 	err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5438 	if (err < 0)
5439 		return err;
5440 
5441 	if (tb[IFA_TARGET_NETNSID]) {
5442 		fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5443 
5444 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5445 						  fillargs.netnsid);
5446 		if (IS_ERR(tgt_net))
5447 			return PTR_ERR(tgt_net);
5448 	}
5449 
5450 	addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5451 	if (!addr)
5452 		return -EINVAL;
5453 
5454 	ifm = nlmsg_data(nlh);
5455 	if (ifm->ifa_index)
5456 		dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5457 
5458 	ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5459 	if (!ifa) {
5460 		err = -EADDRNOTAVAIL;
5461 		goto errout;
5462 	}
5463 
5464 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5465 	if (!skb) {
5466 		err = -ENOBUFS;
5467 		goto errout_ifa;
5468 	}
5469 
5470 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5471 	if (err < 0) {
5472 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5473 		WARN_ON(err == -EMSGSIZE);
5474 		kfree_skb(skb);
5475 		goto errout_ifa;
5476 	}
5477 	err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5478 errout_ifa:
5479 	in6_ifa_put(ifa);
5480 errout:
5481 	dev_put(dev);
5482 	if (fillargs.netnsid >= 0)
5483 		put_net(tgt_net);
5484 
5485 	return err;
5486 }
5487 
5488 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5489 {
5490 	struct sk_buff *skb;
5491 	struct net *net = dev_net(ifa->idev->dev);
5492 	struct inet6_fill_args fillargs = {
5493 		.portid = 0,
5494 		.seq = 0,
5495 		.event = event,
5496 		.flags = 0,
5497 		.netnsid = -1,
5498 	};
5499 	int err = -ENOBUFS;
5500 
5501 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5502 	if (!skb)
5503 		goto errout;
5504 
5505 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5506 	if (err < 0) {
5507 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5508 		WARN_ON(err == -EMSGSIZE);
5509 		kfree_skb(skb);
5510 		goto errout;
5511 	}
5512 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5513 	return;
5514 errout:
5515 	if (err < 0)
5516 		rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5517 }
5518 
5519 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5520 				__s32 *array, int bytes)
5521 {
5522 	BUG_ON(bytes < (DEVCONF_MAX * 4));
5523 
5524 	memset(array, 0, bytes);
5525 	array[DEVCONF_FORWARDING] = cnf->forwarding;
5526 	array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5527 	array[DEVCONF_MTU6] = cnf->mtu6;
5528 	array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5529 	array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5530 	array[DEVCONF_AUTOCONF] = cnf->autoconf;
5531 	array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5532 	array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5533 	array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5534 		jiffies_to_msecs(cnf->rtr_solicit_interval);
5535 	array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5536 		jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5537 	array[DEVCONF_RTR_SOLICIT_DELAY] =
5538 		jiffies_to_msecs(cnf->rtr_solicit_delay);
5539 	array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5540 	array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5541 		jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5542 	array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5543 		jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5544 	array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5545 	array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5546 	array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5547 	array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5548 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5549 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5550 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5551 	array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
5552 	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5553 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5554 #ifdef CONFIG_IPV6_ROUTER_PREF
5555 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5556 	array[DEVCONF_RTR_PROBE_INTERVAL] =
5557 		jiffies_to_msecs(cnf->rtr_probe_interval);
5558 #ifdef CONFIG_IPV6_ROUTE_INFO
5559 	array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5560 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5561 #endif
5562 #endif
5563 	array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5564 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5565 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5566 	array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5567 	array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5568 #endif
5569 #ifdef CONFIG_IPV6_MROUTE
5570 	array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5571 #endif
5572 	array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5573 	array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5574 	array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5575 	array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5576 	array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5577 	array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5578 	array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5579 	array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5580 	/* we omit DEVCONF_STABLE_SECRET for now */
5581 	array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5582 	array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5583 	array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5584 	array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5585 	array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5586 #ifdef CONFIG_IPV6_SEG6_HMAC
5587 	array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5588 #endif
5589 	array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5590 	array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5591 	array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5592 	array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5593 	array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
5594 	array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
5595 	array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
5596 	array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
5597 	array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
5598 	array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
5599 }
5600 
5601 static inline size_t inet6_ifla6_size(void)
5602 {
5603 	return nla_total_size(4) /* IFLA_INET6_FLAGS */
5604 	     + nla_total_size(sizeof(struct ifla_cacheinfo))
5605 	     + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5606 	     + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5607 	     + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5608 	     + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5609 	     + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5610 	     + nla_total_size(4) /* IFLA_INET6_RA_MTU */
5611 	     + 0;
5612 }
5613 
5614 static inline size_t inet6_if_nlmsg_size(void)
5615 {
5616 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5617 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5618 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5619 	       + nla_total_size(4) /* IFLA_MTU */
5620 	       + nla_total_size(4) /* IFLA_LINK */
5621 	       + nla_total_size(1) /* IFLA_OPERSTATE */
5622 	       + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5623 }
5624 
5625 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5626 					int bytes)
5627 {
5628 	int i;
5629 	int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5630 	BUG_ON(pad < 0);
5631 
5632 	/* Use put_unaligned() because stats may not be aligned for u64. */
5633 	put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5634 	for (i = 1; i < ICMP6_MIB_MAX; i++)
5635 		put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5636 
5637 	memset(&stats[ICMP6_MIB_MAX], 0, pad);
5638 }
5639 
5640 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5641 					int bytes, size_t syncpoff)
5642 {
5643 	int i, c;
5644 	u64 buff[IPSTATS_MIB_MAX];
5645 	int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5646 
5647 	BUG_ON(pad < 0);
5648 
5649 	memset(buff, 0, sizeof(buff));
5650 	buff[0] = IPSTATS_MIB_MAX;
5651 
5652 	for_each_possible_cpu(c) {
5653 		for (i = 1; i < IPSTATS_MIB_MAX; i++)
5654 			buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5655 	}
5656 
5657 	memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5658 	memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5659 }
5660 
5661 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5662 			     int bytes)
5663 {
5664 	switch (attrtype) {
5665 	case IFLA_INET6_STATS:
5666 		__snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5667 				     offsetof(struct ipstats_mib, syncp));
5668 		break;
5669 	case IFLA_INET6_ICMP6STATS:
5670 		__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5671 		break;
5672 	}
5673 }
5674 
5675 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5676 				  u32 ext_filter_mask)
5677 {
5678 	struct nlattr *nla;
5679 	struct ifla_cacheinfo ci;
5680 
5681 	if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5682 		goto nla_put_failure;
5683 	ci.max_reasm_len = IPV6_MAXPLEN;
5684 	ci.tstamp = cstamp_delta(idev->tstamp);
5685 	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5686 	ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5687 	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5688 		goto nla_put_failure;
5689 	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5690 	if (!nla)
5691 		goto nla_put_failure;
5692 	ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5693 
5694 	/* XXX - MC not implemented */
5695 
5696 	if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5697 		return 0;
5698 
5699 	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5700 	if (!nla)
5701 		goto nla_put_failure;
5702 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5703 
5704 	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5705 	if (!nla)
5706 		goto nla_put_failure;
5707 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5708 
5709 	nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5710 	if (!nla)
5711 		goto nla_put_failure;
5712 	read_lock_bh(&idev->lock);
5713 	memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5714 	read_unlock_bh(&idev->lock);
5715 
5716 	if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5717 		goto nla_put_failure;
5718 
5719 	if (idev->ra_mtu &&
5720 	    nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
5721 		goto nla_put_failure;
5722 
5723 	return 0;
5724 
5725 nla_put_failure:
5726 	return -EMSGSIZE;
5727 }
5728 
5729 static size_t inet6_get_link_af_size(const struct net_device *dev,
5730 				     u32 ext_filter_mask)
5731 {
5732 	if (!__in6_dev_get(dev))
5733 		return 0;
5734 
5735 	return inet6_ifla6_size();
5736 }
5737 
5738 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5739 			      u32 ext_filter_mask)
5740 {
5741 	struct inet6_dev *idev = __in6_dev_get(dev);
5742 
5743 	if (!idev)
5744 		return -ENODATA;
5745 
5746 	if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5747 		return -EMSGSIZE;
5748 
5749 	return 0;
5750 }
5751 
5752 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5753 			     struct netlink_ext_ack *extack)
5754 {
5755 	struct inet6_ifaddr *ifp;
5756 	struct net_device *dev = idev->dev;
5757 	bool clear_token, update_rs = false;
5758 	struct in6_addr ll_addr;
5759 
5760 	ASSERT_RTNL();
5761 
5762 	if (!token)
5763 		return -EINVAL;
5764 
5765 	if (dev->flags & IFF_LOOPBACK) {
5766 		NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5767 		return -EINVAL;
5768 	}
5769 
5770 	if (dev->flags & IFF_NOARP) {
5771 		NL_SET_ERR_MSG_MOD(extack,
5772 				   "Device does not do neighbour discovery");
5773 		return -EINVAL;
5774 	}
5775 
5776 	if (!ipv6_accept_ra(idev)) {
5777 		NL_SET_ERR_MSG_MOD(extack,
5778 				   "Router advertisement is disabled on device");
5779 		return -EINVAL;
5780 	}
5781 
5782 	if (idev->cnf.rtr_solicits == 0) {
5783 		NL_SET_ERR_MSG(extack,
5784 			       "Router solicitation is disabled on device");
5785 		return -EINVAL;
5786 	}
5787 
5788 	write_lock_bh(&idev->lock);
5789 
5790 	BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5791 	memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5792 
5793 	write_unlock_bh(&idev->lock);
5794 
5795 	clear_token = ipv6_addr_any(token);
5796 	if (clear_token)
5797 		goto update_lft;
5798 
5799 	if (!idev->dead && (idev->if_flags & IF_READY) &&
5800 	    !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5801 			     IFA_F_OPTIMISTIC)) {
5802 		/* If we're not ready, then normal ifup will take care
5803 		 * of this. Otherwise, we need to request our rs here.
5804 		 */
5805 		ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5806 		update_rs = true;
5807 	}
5808 
5809 update_lft:
5810 	write_lock_bh(&idev->lock);
5811 
5812 	if (update_rs) {
5813 		idev->if_flags |= IF_RS_SENT;
5814 		idev->rs_interval = rfc3315_s14_backoff_init(
5815 			idev->cnf.rtr_solicit_interval);
5816 		idev->rs_probes = 1;
5817 		addrconf_mod_rs_timer(idev, idev->rs_interval);
5818 	}
5819 
5820 	/* Well, that's kinda nasty ... */
5821 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
5822 		spin_lock(&ifp->lock);
5823 		if (ifp->tokenized) {
5824 			ifp->valid_lft = 0;
5825 			ifp->prefered_lft = 0;
5826 		}
5827 		spin_unlock(&ifp->lock);
5828 	}
5829 
5830 	write_unlock_bh(&idev->lock);
5831 	inet6_ifinfo_notify(RTM_NEWLINK, idev);
5832 	addrconf_verify_rtnl(dev_net(dev));
5833 	return 0;
5834 }
5835 
5836 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5837 	[IFLA_INET6_ADDR_GEN_MODE]	= { .type = NLA_U8 },
5838 	[IFLA_INET6_TOKEN]		= { .len = sizeof(struct in6_addr) },
5839 	[IFLA_INET6_RA_MTU]		= { .type = NLA_REJECT,
5840 					    .reject_message =
5841 						"IFLA_INET6_RA_MTU can not be set" },
5842 };
5843 
5844 static int check_addr_gen_mode(int mode)
5845 {
5846 	if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5847 	    mode != IN6_ADDR_GEN_MODE_NONE &&
5848 	    mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5849 	    mode != IN6_ADDR_GEN_MODE_RANDOM)
5850 		return -EINVAL;
5851 	return 1;
5852 }
5853 
5854 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5855 				int mode)
5856 {
5857 	if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5858 	    !idev->cnf.stable_secret.initialized &&
5859 	    !net->ipv6.devconf_dflt->stable_secret.initialized)
5860 		return -EINVAL;
5861 	return 1;
5862 }
5863 
5864 static int inet6_validate_link_af(const struct net_device *dev,
5865 				  const struct nlattr *nla,
5866 				  struct netlink_ext_ack *extack)
5867 {
5868 	struct nlattr *tb[IFLA_INET6_MAX + 1];
5869 	struct inet6_dev *idev = NULL;
5870 	int err;
5871 
5872 	if (dev) {
5873 		idev = __in6_dev_get(dev);
5874 		if (!idev)
5875 			return -EAFNOSUPPORT;
5876 	}
5877 
5878 	err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
5879 					  inet6_af_policy, extack);
5880 	if (err)
5881 		return err;
5882 
5883 	if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
5884 		return -EINVAL;
5885 
5886 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5887 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5888 
5889 		if (check_addr_gen_mode(mode) < 0)
5890 			return -EINVAL;
5891 		if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
5892 			return -EINVAL;
5893 	}
5894 
5895 	return 0;
5896 }
5897 
5898 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
5899 			     struct netlink_ext_ack *extack)
5900 {
5901 	struct inet6_dev *idev = __in6_dev_get(dev);
5902 	struct nlattr *tb[IFLA_INET6_MAX + 1];
5903 	int err;
5904 
5905 	if (!idev)
5906 		return -EAFNOSUPPORT;
5907 
5908 	if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5909 		return -EINVAL;
5910 
5911 	if (tb[IFLA_INET6_TOKEN]) {
5912 		err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
5913 					extack);
5914 		if (err)
5915 			return err;
5916 	}
5917 
5918 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5919 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5920 
5921 		idev->cnf.addr_gen_mode = mode;
5922 	}
5923 
5924 	return 0;
5925 }
5926 
5927 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5928 			     u32 portid, u32 seq, int event, unsigned int flags)
5929 {
5930 	struct net_device *dev = idev->dev;
5931 	struct ifinfomsg *hdr;
5932 	struct nlmsghdr *nlh;
5933 	void *protoinfo;
5934 
5935 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5936 	if (!nlh)
5937 		return -EMSGSIZE;
5938 
5939 	hdr = nlmsg_data(nlh);
5940 	hdr->ifi_family = AF_INET6;
5941 	hdr->__ifi_pad = 0;
5942 	hdr->ifi_type = dev->type;
5943 	hdr->ifi_index = dev->ifindex;
5944 	hdr->ifi_flags = dev_get_flags(dev);
5945 	hdr->ifi_change = 0;
5946 
5947 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5948 	    (dev->addr_len &&
5949 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5950 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5951 	    (dev->ifindex != dev_get_iflink(dev) &&
5952 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5953 	    nla_put_u8(skb, IFLA_OPERSTATE,
5954 		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5955 		goto nla_put_failure;
5956 	protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
5957 	if (!protoinfo)
5958 		goto nla_put_failure;
5959 
5960 	if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5961 		goto nla_put_failure;
5962 
5963 	nla_nest_end(skb, protoinfo);
5964 	nlmsg_end(skb, nlh);
5965 	return 0;
5966 
5967 nla_put_failure:
5968 	nlmsg_cancel(skb, nlh);
5969 	return -EMSGSIZE;
5970 }
5971 
5972 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
5973 				   struct netlink_ext_ack *extack)
5974 {
5975 	struct ifinfomsg *ifm;
5976 
5977 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5978 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
5979 		return -EINVAL;
5980 	}
5981 
5982 	if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
5983 		NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
5984 		return -EINVAL;
5985 	}
5986 
5987 	ifm = nlmsg_data(nlh);
5988 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5989 	    ifm->ifi_change || ifm->ifi_index) {
5990 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
5991 		return -EINVAL;
5992 	}
5993 
5994 	return 0;
5995 }
5996 
5997 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5998 {
5999 	struct net *net = sock_net(skb->sk);
6000 	int h, s_h;
6001 	int idx = 0, s_idx;
6002 	struct net_device *dev;
6003 	struct inet6_dev *idev;
6004 	struct hlist_head *head;
6005 
6006 	/* only requests using strict checking can pass data to
6007 	 * influence the dump
6008 	 */
6009 	if (cb->strict_check) {
6010 		int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
6011 
6012 		if (err < 0)
6013 			return err;
6014 	}
6015 
6016 	s_h = cb->args[0];
6017 	s_idx = cb->args[1];
6018 
6019 	rcu_read_lock();
6020 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
6021 		idx = 0;
6022 		head = &net->dev_index_head[h];
6023 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
6024 			if (idx < s_idx)
6025 				goto cont;
6026 			idev = __in6_dev_get(dev);
6027 			if (!idev)
6028 				goto cont;
6029 			if (inet6_fill_ifinfo(skb, idev,
6030 					      NETLINK_CB(cb->skb).portid,
6031 					      cb->nlh->nlmsg_seq,
6032 					      RTM_NEWLINK, NLM_F_MULTI) < 0)
6033 				goto out;
6034 cont:
6035 			idx++;
6036 		}
6037 	}
6038 out:
6039 	rcu_read_unlock();
6040 	cb->args[1] = idx;
6041 	cb->args[0] = h;
6042 
6043 	return skb->len;
6044 }
6045 
6046 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6047 {
6048 	struct sk_buff *skb;
6049 	struct net *net = dev_net(idev->dev);
6050 	int err = -ENOBUFS;
6051 
6052 	skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6053 	if (!skb)
6054 		goto errout;
6055 
6056 	err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6057 	if (err < 0) {
6058 		/* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6059 		WARN_ON(err == -EMSGSIZE);
6060 		kfree_skb(skb);
6061 		goto errout;
6062 	}
6063 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6064 	return;
6065 errout:
6066 	if (err < 0)
6067 		rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6068 }
6069 
6070 static inline size_t inet6_prefix_nlmsg_size(void)
6071 {
6072 	return NLMSG_ALIGN(sizeof(struct prefixmsg))
6073 	       + nla_total_size(sizeof(struct in6_addr))
6074 	       + nla_total_size(sizeof(struct prefix_cacheinfo));
6075 }
6076 
6077 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6078 			     struct prefix_info *pinfo, u32 portid, u32 seq,
6079 			     int event, unsigned int flags)
6080 {
6081 	struct prefixmsg *pmsg;
6082 	struct nlmsghdr *nlh;
6083 	struct prefix_cacheinfo	ci;
6084 
6085 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6086 	if (!nlh)
6087 		return -EMSGSIZE;
6088 
6089 	pmsg = nlmsg_data(nlh);
6090 	pmsg->prefix_family = AF_INET6;
6091 	pmsg->prefix_pad1 = 0;
6092 	pmsg->prefix_pad2 = 0;
6093 	pmsg->prefix_ifindex = idev->dev->ifindex;
6094 	pmsg->prefix_len = pinfo->prefix_len;
6095 	pmsg->prefix_type = pinfo->type;
6096 	pmsg->prefix_pad3 = 0;
6097 	pmsg->prefix_flags = 0;
6098 	if (pinfo->onlink)
6099 		pmsg->prefix_flags |= IF_PREFIX_ONLINK;
6100 	if (pinfo->autoconf)
6101 		pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
6102 
6103 	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6104 		goto nla_put_failure;
6105 	ci.preferred_time = ntohl(pinfo->prefered);
6106 	ci.valid_time = ntohl(pinfo->valid);
6107 	if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6108 		goto nla_put_failure;
6109 	nlmsg_end(skb, nlh);
6110 	return 0;
6111 
6112 nla_put_failure:
6113 	nlmsg_cancel(skb, nlh);
6114 	return -EMSGSIZE;
6115 }
6116 
6117 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6118 			 struct prefix_info *pinfo)
6119 {
6120 	struct sk_buff *skb;
6121 	struct net *net = dev_net(idev->dev);
6122 	int err = -ENOBUFS;
6123 
6124 	skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6125 	if (!skb)
6126 		goto errout;
6127 
6128 	err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6129 	if (err < 0) {
6130 		/* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6131 		WARN_ON(err == -EMSGSIZE);
6132 		kfree_skb(skb);
6133 		goto errout;
6134 	}
6135 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6136 	return;
6137 errout:
6138 	if (err < 0)
6139 		rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6140 }
6141 
6142 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6143 {
6144 	struct net *net = dev_net(ifp->idev->dev);
6145 
6146 	if (event)
6147 		ASSERT_RTNL();
6148 
6149 	inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6150 
6151 	switch (event) {
6152 	case RTM_NEWADDR:
6153 		/*
6154 		 * If the address was optimistic we inserted the route at the
6155 		 * start of our DAD process, so we don't need to do it again.
6156 		 * If the device was taken down in the middle of the DAD
6157 		 * cycle there is a race where we could get here without a
6158 		 * host route, so nothing to insert. That will be fixed when
6159 		 * the device is brought up.
6160 		 */
6161 		if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6162 			ip6_ins_rt(net, ifp->rt);
6163 		} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6164 			pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6165 				&ifp->addr, ifp->idev->dev->name);
6166 		}
6167 
6168 		if (ifp->idev->cnf.forwarding)
6169 			addrconf_join_anycast(ifp);
6170 		if (!ipv6_addr_any(&ifp->peer_addr))
6171 			addrconf_prefix_route(&ifp->peer_addr, 128,
6172 					      ifp->rt_priority, ifp->idev->dev,
6173 					      0, 0, GFP_ATOMIC);
6174 		break;
6175 	case RTM_DELADDR:
6176 		if (ifp->idev->cnf.forwarding)
6177 			addrconf_leave_anycast(ifp);
6178 		addrconf_leave_solict(ifp->idev, &ifp->addr);
6179 		if (!ipv6_addr_any(&ifp->peer_addr)) {
6180 			struct fib6_info *rt;
6181 
6182 			rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6183 						       ifp->idev->dev, 0, 0,
6184 						       false);
6185 			if (rt)
6186 				ip6_del_rt(net, rt, false);
6187 		}
6188 		if (ifp->rt) {
6189 			ip6_del_rt(net, ifp->rt, false);
6190 			ifp->rt = NULL;
6191 		}
6192 		rt_genid_bump_ipv6(net);
6193 		break;
6194 	}
6195 	atomic_inc(&net->ipv6.dev_addr_genid);
6196 }
6197 
6198 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6199 {
6200 	if (likely(ifp->idev->dead == 0))
6201 		__ipv6_ifa_notify(event, ifp);
6202 }
6203 
6204 #ifdef CONFIG_SYSCTL
6205 
6206 static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
6207 		void *buffer, size_t *lenp, loff_t *ppos)
6208 {
6209 	int *valp = ctl->data;
6210 	int val = *valp;
6211 	loff_t pos = *ppos;
6212 	struct ctl_table lctl;
6213 	int ret;
6214 
6215 	/*
6216 	 * ctl->data points to idev->cnf.forwarding, we should
6217 	 * not modify it until we get the rtnl lock.
6218 	 */
6219 	lctl = *ctl;
6220 	lctl.data = &val;
6221 
6222 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6223 
6224 	if (write)
6225 		ret = addrconf_fixup_forwarding(ctl, valp, val);
6226 	if (ret)
6227 		*ppos = pos;
6228 	return ret;
6229 }
6230 
6231 static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
6232 		void *buffer, size_t *lenp, loff_t *ppos)
6233 {
6234 	struct inet6_dev *idev = ctl->extra1;
6235 	int min_mtu = IPV6_MIN_MTU;
6236 	struct ctl_table lctl;
6237 
6238 	lctl = *ctl;
6239 	lctl.extra1 = &min_mtu;
6240 	lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6241 
6242 	return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6243 }
6244 
6245 static void dev_disable_change(struct inet6_dev *idev)
6246 {
6247 	struct netdev_notifier_info info;
6248 
6249 	if (!idev || !idev->dev)
6250 		return;
6251 
6252 	netdev_notifier_info_init(&info, idev->dev);
6253 	if (idev->cnf.disable_ipv6)
6254 		addrconf_notify(NULL, NETDEV_DOWN, &info);
6255 	else
6256 		addrconf_notify(NULL, NETDEV_UP, &info);
6257 }
6258 
6259 static void addrconf_disable_change(struct net *net, __s32 newf)
6260 {
6261 	struct net_device *dev;
6262 	struct inet6_dev *idev;
6263 
6264 	for_each_netdev(net, dev) {
6265 		idev = __in6_dev_get(dev);
6266 		if (idev) {
6267 			int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6268 			idev->cnf.disable_ipv6 = newf;
6269 			if (changed)
6270 				dev_disable_change(idev);
6271 		}
6272 	}
6273 }
6274 
6275 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
6276 {
6277 	struct net *net;
6278 	int old;
6279 
6280 	if (!rtnl_trylock())
6281 		return restart_syscall();
6282 
6283 	net = (struct net *)table->extra2;
6284 	old = *p;
6285 	*p = newf;
6286 
6287 	if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6288 		rtnl_unlock();
6289 		return 0;
6290 	}
6291 
6292 	if (p == &net->ipv6.devconf_all->disable_ipv6) {
6293 		net->ipv6.devconf_dflt->disable_ipv6 = newf;
6294 		addrconf_disable_change(net, newf);
6295 	} else if ((!newf) ^ (!old))
6296 		dev_disable_change((struct inet6_dev *)table->extra1);
6297 
6298 	rtnl_unlock();
6299 	return 0;
6300 }
6301 
6302 static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
6303 		void *buffer, size_t *lenp, loff_t *ppos)
6304 {
6305 	int *valp = ctl->data;
6306 	int val = *valp;
6307 	loff_t pos = *ppos;
6308 	struct ctl_table lctl;
6309 	int ret;
6310 
6311 	/*
6312 	 * ctl->data points to idev->cnf.disable_ipv6, we should
6313 	 * not modify it until we get the rtnl lock.
6314 	 */
6315 	lctl = *ctl;
6316 	lctl.data = &val;
6317 
6318 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6319 
6320 	if (write)
6321 		ret = addrconf_disable_ipv6(ctl, valp, val);
6322 	if (ret)
6323 		*ppos = pos;
6324 	return ret;
6325 }
6326 
6327 static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
6328 		void *buffer, size_t *lenp, loff_t *ppos)
6329 {
6330 	int *valp = ctl->data;
6331 	int ret;
6332 	int old, new;
6333 
6334 	old = *valp;
6335 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6336 	new = *valp;
6337 
6338 	if (write && old != new) {
6339 		struct net *net = ctl->extra2;
6340 
6341 		if (!rtnl_trylock())
6342 			return restart_syscall();
6343 
6344 		if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6345 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6346 						     NETCONFA_PROXY_NEIGH,
6347 						     NETCONFA_IFINDEX_DEFAULT,
6348 						     net->ipv6.devconf_dflt);
6349 		else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6350 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6351 						     NETCONFA_PROXY_NEIGH,
6352 						     NETCONFA_IFINDEX_ALL,
6353 						     net->ipv6.devconf_all);
6354 		else {
6355 			struct inet6_dev *idev = ctl->extra1;
6356 
6357 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6358 						     NETCONFA_PROXY_NEIGH,
6359 						     idev->dev->ifindex,
6360 						     &idev->cnf);
6361 		}
6362 		rtnl_unlock();
6363 	}
6364 
6365 	return ret;
6366 }
6367 
6368 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
6369 					 void *buffer, size_t *lenp,
6370 					 loff_t *ppos)
6371 {
6372 	int ret = 0;
6373 	u32 new_val;
6374 	struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6375 	struct net *net = (struct net *)ctl->extra2;
6376 	struct ctl_table tmp = {
6377 		.data = &new_val,
6378 		.maxlen = sizeof(new_val),
6379 		.mode = ctl->mode,
6380 	};
6381 
6382 	if (!rtnl_trylock())
6383 		return restart_syscall();
6384 
6385 	new_val = *((u32 *)ctl->data);
6386 
6387 	ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6388 	if (ret != 0)
6389 		goto out;
6390 
6391 	if (write) {
6392 		if (check_addr_gen_mode(new_val) < 0) {
6393 			ret = -EINVAL;
6394 			goto out;
6395 		}
6396 
6397 		if (idev) {
6398 			if (check_stable_privacy(idev, net, new_val) < 0) {
6399 				ret = -EINVAL;
6400 				goto out;
6401 			}
6402 
6403 			if (idev->cnf.addr_gen_mode != new_val) {
6404 				idev->cnf.addr_gen_mode = new_val;
6405 				addrconf_init_auto_addrs(idev->dev);
6406 			}
6407 		} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6408 			struct net_device *dev;
6409 
6410 			net->ipv6.devconf_dflt->addr_gen_mode = new_val;
6411 			for_each_netdev(net, dev) {
6412 				idev = __in6_dev_get(dev);
6413 				if (idev &&
6414 				    idev->cnf.addr_gen_mode != new_val) {
6415 					idev->cnf.addr_gen_mode = new_val;
6416 					addrconf_init_auto_addrs(idev->dev);
6417 				}
6418 			}
6419 		}
6420 
6421 		*((u32 *)ctl->data) = new_val;
6422 	}
6423 
6424 out:
6425 	rtnl_unlock();
6426 
6427 	return ret;
6428 }
6429 
6430 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6431 					 void *buffer, size_t *lenp,
6432 					 loff_t *ppos)
6433 {
6434 	int err;
6435 	struct in6_addr addr;
6436 	char str[IPV6_MAX_STRLEN];
6437 	struct ctl_table lctl = *ctl;
6438 	struct net *net = ctl->extra2;
6439 	struct ipv6_stable_secret *secret = ctl->data;
6440 
6441 	if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6442 		return -EIO;
6443 
6444 	lctl.maxlen = IPV6_MAX_STRLEN;
6445 	lctl.data = str;
6446 
6447 	if (!rtnl_trylock())
6448 		return restart_syscall();
6449 
6450 	if (!write && !secret->initialized) {
6451 		err = -EIO;
6452 		goto out;
6453 	}
6454 
6455 	err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6456 	if (err >= sizeof(str)) {
6457 		err = -EIO;
6458 		goto out;
6459 	}
6460 
6461 	err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6462 	if (err || !write)
6463 		goto out;
6464 
6465 	if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6466 		err = -EIO;
6467 		goto out;
6468 	}
6469 
6470 	secret->initialized = true;
6471 	secret->secret = addr;
6472 
6473 	if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6474 		struct net_device *dev;
6475 
6476 		for_each_netdev(net, dev) {
6477 			struct inet6_dev *idev = __in6_dev_get(dev);
6478 
6479 			if (idev) {
6480 				idev->cnf.addr_gen_mode =
6481 					IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6482 			}
6483 		}
6484 	} else {
6485 		struct inet6_dev *idev = ctl->extra1;
6486 
6487 		idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6488 	}
6489 
6490 out:
6491 	rtnl_unlock();
6492 
6493 	return err;
6494 }
6495 
6496 static
6497 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6498 						int write, void *buffer,
6499 						size_t *lenp,
6500 						loff_t *ppos)
6501 {
6502 	int *valp = ctl->data;
6503 	int val = *valp;
6504 	loff_t pos = *ppos;
6505 	struct ctl_table lctl;
6506 	int ret;
6507 
6508 	/* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6509 	 * we should not modify it until we get the rtnl lock.
6510 	 */
6511 	lctl = *ctl;
6512 	lctl.data = &val;
6513 
6514 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6515 
6516 	if (write)
6517 		ret = addrconf_fixup_linkdown(ctl, valp, val);
6518 	if (ret)
6519 		*ppos = pos;
6520 	return ret;
6521 }
6522 
6523 static
6524 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6525 {
6526 	if (rt) {
6527 		if (action)
6528 			rt->dst.flags |= DST_NOPOLICY;
6529 		else
6530 			rt->dst.flags &= ~DST_NOPOLICY;
6531 	}
6532 }
6533 
6534 static
6535 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6536 {
6537 	struct inet6_ifaddr *ifa;
6538 
6539 	read_lock_bh(&idev->lock);
6540 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
6541 		spin_lock(&ifa->lock);
6542 		if (ifa->rt) {
6543 			/* host routes only use builtin fib6_nh */
6544 			struct fib6_nh *nh = ifa->rt->fib6_nh;
6545 			int cpu;
6546 
6547 			rcu_read_lock();
6548 			ifa->rt->dst_nopolicy = val ? true : false;
6549 			if (nh->rt6i_pcpu) {
6550 				for_each_possible_cpu(cpu) {
6551 					struct rt6_info **rtp;
6552 
6553 					rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6554 					addrconf_set_nopolicy(*rtp, val);
6555 				}
6556 			}
6557 			rcu_read_unlock();
6558 		}
6559 		spin_unlock(&ifa->lock);
6560 	}
6561 	read_unlock_bh(&idev->lock);
6562 }
6563 
6564 static
6565 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6566 {
6567 	struct inet6_dev *idev;
6568 	struct net *net;
6569 
6570 	if (!rtnl_trylock())
6571 		return restart_syscall();
6572 
6573 	*valp = val;
6574 
6575 	net = (struct net *)ctl->extra2;
6576 	if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6577 		rtnl_unlock();
6578 		return 0;
6579 	}
6580 
6581 	if (valp == &net->ipv6.devconf_all->disable_policy)  {
6582 		struct net_device *dev;
6583 
6584 		for_each_netdev(net, dev) {
6585 			idev = __in6_dev_get(dev);
6586 			if (idev)
6587 				addrconf_disable_policy_idev(idev, val);
6588 		}
6589 	} else {
6590 		idev = (struct inet6_dev *)ctl->extra1;
6591 		addrconf_disable_policy_idev(idev, val);
6592 	}
6593 
6594 	rtnl_unlock();
6595 	return 0;
6596 }
6597 
6598 static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6599 				   void *buffer, size_t *lenp, loff_t *ppos)
6600 {
6601 	int *valp = ctl->data;
6602 	int val = *valp;
6603 	loff_t pos = *ppos;
6604 	struct ctl_table lctl;
6605 	int ret;
6606 
6607 	lctl = *ctl;
6608 	lctl.data = &val;
6609 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6610 
6611 	if (write && (*valp != val))
6612 		ret = addrconf_disable_policy(ctl, valp, val);
6613 
6614 	if (ret)
6615 		*ppos = pos;
6616 
6617 	return ret;
6618 }
6619 
6620 static int minus_one = -1;
6621 static const int two_five_five = 255;
6622 static u32 ioam6_if_id_max = U16_MAX;
6623 
6624 static const struct ctl_table addrconf_sysctl[] = {
6625 	{
6626 		.procname	= "forwarding",
6627 		.data		= &ipv6_devconf.forwarding,
6628 		.maxlen		= sizeof(int),
6629 		.mode		= 0644,
6630 		.proc_handler	= addrconf_sysctl_forward,
6631 	},
6632 	{
6633 		.procname	= "hop_limit",
6634 		.data		= &ipv6_devconf.hop_limit,
6635 		.maxlen		= sizeof(int),
6636 		.mode		= 0644,
6637 		.proc_handler	= proc_dointvec_minmax,
6638 		.extra1		= (void *)SYSCTL_ONE,
6639 		.extra2		= (void *)&two_five_five,
6640 	},
6641 	{
6642 		.procname	= "mtu",
6643 		.data		= &ipv6_devconf.mtu6,
6644 		.maxlen		= sizeof(int),
6645 		.mode		= 0644,
6646 		.proc_handler	= addrconf_sysctl_mtu,
6647 	},
6648 	{
6649 		.procname	= "accept_ra",
6650 		.data		= &ipv6_devconf.accept_ra,
6651 		.maxlen		= sizeof(int),
6652 		.mode		= 0644,
6653 		.proc_handler	= proc_dointvec,
6654 	},
6655 	{
6656 		.procname	= "accept_redirects",
6657 		.data		= &ipv6_devconf.accept_redirects,
6658 		.maxlen		= sizeof(int),
6659 		.mode		= 0644,
6660 		.proc_handler	= proc_dointvec,
6661 	},
6662 	{
6663 		.procname	= "autoconf",
6664 		.data		= &ipv6_devconf.autoconf,
6665 		.maxlen		= sizeof(int),
6666 		.mode		= 0644,
6667 		.proc_handler	= proc_dointvec,
6668 	},
6669 	{
6670 		.procname	= "dad_transmits",
6671 		.data		= &ipv6_devconf.dad_transmits,
6672 		.maxlen		= sizeof(int),
6673 		.mode		= 0644,
6674 		.proc_handler	= proc_dointvec,
6675 	},
6676 	{
6677 		.procname	= "router_solicitations",
6678 		.data		= &ipv6_devconf.rtr_solicits,
6679 		.maxlen		= sizeof(int),
6680 		.mode		= 0644,
6681 		.proc_handler	= proc_dointvec_minmax,
6682 		.extra1		= &minus_one,
6683 	},
6684 	{
6685 		.procname	= "router_solicitation_interval",
6686 		.data		= &ipv6_devconf.rtr_solicit_interval,
6687 		.maxlen		= sizeof(int),
6688 		.mode		= 0644,
6689 		.proc_handler	= proc_dointvec_jiffies,
6690 	},
6691 	{
6692 		.procname	= "router_solicitation_max_interval",
6693 		.data		= &ipv6_devconf.rtr_solicit_max_interval,
6694 		.maxlen		= sizeof(int),
6695 		.mode		= 0644,
6696 		.proc_handler	= proc_dointvec_jiffies,
6697 	},
6698 	{
6699 		.procname	= "router_solicitation_delay",
6700 		.data		= &ipv6_devconf.rtr_solicit_delay,
6701 		.maxlen		= sizeof(int),
6702 		.mode		= 0644,
6703 		.proc_handler	= proc_dointvec_jiffies,
6704 	},
6705 	{
6706 		.procname	= "force_mld_version",
6707 		.data		= &ipv6_devconf.force_mld_version,
6708 		.maxlen		= sizeof(int),
6709 		.mode		= 0644,
6710 		.proc_handler	= proc_dointvec,
6711 	},
6712 	{
6713 		.procname	= "mldv1_unsolicited_report_interval",
6714 		.data		=
6715 			&ipv6_devconf.mldv1_unsolicited_report_interval,
6716 		.maxlen		= sizeof(int),
6717 		.mode		= 0644,
6718 		.proc_handler	= proc_dointvec_ms_jiffies,
6719 	},
6720 	{
6721 		.procname	= "mldv2_unsolicited_report_interval",
6722 		.data		=
6723 			&ipv6_devconf.mldv2_unsolicited_report_interval,
6724 		.maxlen		= sizeof(int),
6725 		.mode		= 0644,
6726 		.proc_handler	= proc_dointvec_ms_jiffies,
6727 	},
6728 	{
6729 		.procname	= "use_tempaddr",
6730 		.data		= &ipv6_devconf.use_tempaddr,
6731 		.maxlen		= sizeof(int),
6732 		.mode		= 0644,
6733 		.proc_handler	= proc_dointvec,
6734 	},
6735 	{
6736 		.procname	= "temp_valid_lft",
6737 		.data		= &ipv6_devconf.temp_valid_lft,
6738 		.maxlen		= sizeof(int),
6739 		.mode		= 0644,
6740 		.proc_handler	= proc_dointvec,
6741 	},
6742 	{
6743 		.procname	= "temp_prefered_lft",
6744 		.data		= &ipv6_devconf.temp_prefered_lft,
6745 		.maxlen		= sizeof(int),
6746 		.mode		= 0644,
6747 		.proc_handler	= proc_dointvec,
6748 	},
6749 	{
6750 		.procname	= "regen_max_retry",
6751 		.data		= &ipv6_devconf.regen_max_retry,
6752 		.maxlen		= sizeof(int),
6753 		.mode		= 0644,
6754 		.proc_handler	= proc_dointvec,
6755 	},
6756 	{
6757 		.procname	= "max_desync_factor",
6758 		.data		= &ipv6_devconf.max_desync_factor,
6759 		.maxlen		= sizeof(int),
6760 		.mode		= 0644,
6761 		.proc_handler	= proc_dointvec,
6762 	},
6763 	{
6764 		.procname	= "max_addresses",
6765 		.data		= &ipv6_devconf.max_addresses,
6766 		.maxlen		= sizeof(int),
6767 		.mode		= 0644,
6768 		.proc_handler	= proc_dointvec,
6769 	},
6770 	{
6771 		.procname	= "accept_ra_defrtr",
6772 		.data		= &ipv6_devconf.accept_ra_defrtr,
6773 		.maxlen		= sizeof(int),
6774 		.mode		= 0644,
6775 		.proc_handler	= proc_dointvec,
6776 	},
6777 	{
6778 		.procname	= "ra_defrtr_metric",
6779 		.data		= &ipv6_devconf.ra_defrtr_metric,
6780 		.maxlen		= sizeof(u32),
6781 		.mode		= 0644,
6782 		.proc_handler	= proc_douintvec_minmax,
6783 		.extra1		= (void *)SYSCTL_ONE,
6784 	},
6785 	{
6786 		.procname	= "accept_ra_min_hop_limit",
6787 		.data		= &ipv6_devconf.accept_ra_min_hop_limit,
6788 		.maxlen		= sizeof(int),
6789 		.mode		= 0644,
6790 		.proc_handler	= proc_dointvec,
6791 	},
6792 	{
6793 		.procname	= "accept_ra_pinfo",
6794 		.data		= &ipv6_devconf.accept_ra_pinfo,
6795 		.maxlen		= sizeof(int),
6796 		.mode		= 0644,
6797 		.proc_handler	= proc_dointvec,
6798 	},
6799 #ifdef CONFIG_IPV6_ROUTER_PREF
6800 	{
6801 		.procname	= "accept_ra_rtr_pref",
6802 		.data		= &ipv6_devconf.accept_ra_rtr_pref,
6803 		.maxlen		= sizeof(int),
6804 		.mode		= 0644,
6805 		.proc_handler	= proc_dointvec,
6806 	},
6807 	{
6808 		.procname	= "router_probe_interval",
6809 		.data		= &ipv6_devconf.rtr_probe_interval,
6810 		.maxlen		= sizeof(int),
6811 		.mode		= 0644,
6812 		.proc_handler	= proc_dointvec_jiffies,
6813 	},
6814 #ifdef CONFIG_IPV6_ROUTE_INFO
6815 	{
6816 		.procname	= "accept_ra_rt_info_min_plen",
6817 		.data		= &ipv6_devconf.accept_ra_rt_info_min_plen,
6818 		.maxlen		= sizeof(int),
6819 		.mode		= 0644,
6820 		.proc_handler	= proc_dointvec,
6821 	},
6822 	{
6823 		.procname	= "accept_ra_rt_info_max_plen",
6824 		.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
6825 		.maxlen		= sizeof(int),
6826 		.mode		= 0644,
6827 		.proc_handler	= proc_dointvec,
6828 	},
6829 #endif
6830 #endif
6831 	{
6832 		.procname	= "proxy_ndp",
6833 		.data		= &ipv6_devconf.proxy_ndp,
6834 		.maxlen		= sizeof(int),
6835 		.mode		= 0644,
6836 		.proc_handler	= addrconf_sysctl_proxy_ndp,
6837 	},
6838 	{
6839 		.procname	= "accept_source_route",
6840 		.data		= &ipv6_devconf.accept_source_route,
6841 		.maxlen		= sizeof(int),
6842 		.mode		= 0644,
6843 		.proc_handler	= proc_dointvec,
6844 	},
6845 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6846 	{
6847 		.procname	= "optimistic_dad",
6848 		.data		= &ipv6_devconf.optimistic_dad,
6849 		.maxlen		= sizeof(int),
6850 		.mode		= 0644,
6851 		.proc_handler   = proc_dointvec,
6852 	},
6853 	{
6854 		.procname	= "use_optimistic",
6855 		.data		= &ipv6_devconf.use_optimistic,
6856 		.maxlen		= sizeof(int),
6857 		.mode		= 0644,
6858 		.proc_handler	= proc_dointvec,
6859 	},
6860 #endif
6861 #ifdef CONFIG_IPV6_MROUTE
6862 	{
6863 		.procname	= "mc_forwarding",
6864 		.data		= &ipv6_devconf.mc_forwarding,
6865 		.maxlen		= sizeof(int),
6866 		.mode		= 0444,
6867 		.proc_handler	= proc_dointvec,
6868 	},
6869 #endif
6870 	{
6871 		.procname	= "disable_ipv6",
6872 		.data		= &ipv6_devconf.disable_ipv6,
6873 		.maxlen		= sizeof(int),
6874 		.mode		= 0644,
6875 		.proc_handler	= addrconf_sysctl_disable,
6876 	},
6877 	{
6878 		.procname	= "accept_dad",
6879 		.data		= &ipv6_devconf.accept_dad,
6880 		.maxlen		= sizeof(int),
6881 		.mode		= 0644,
6882 		.proc_handler	= proc_dointvec,
6883 	},
6884 	{
6885 		.procname	= "force_tllao",
6886 		.data		= &ipv6_devconf.force_tllao,
6887 		.maxlen		= sizeof(int),
6888 		.mode		= 0644,
6889 		.proc_handler	= proc_dointvec
6890 	},
6891 	{
6892 		.procname	= "ndisc_notify",
6893 		.data		= &ipv6_devconf.ndisc_notify,
6894 		.maxlen		= sizeof(int),
6895 		.mode		= 0644,
6896 		.proc_handler	= proc_dointvec
6897 	},
6898 	{
6899 		.procname	= "suppress_frag_ndisc",
6900 		.data		= &ipv6_devconf.suppress_frag_ndisc,
6901 		.maxlen		= sizeof(int),
6902 		.mode		= 0644,
6903 		.proc_handler	= proc_dointvec
6904 	},
6905 	{
6906 		.procname	= "accept_ra_from_local",
6907 		.data		= &ipv6_devconf.accept_ra_from_local,
6908 		.maxlen		= sizeof(int),
6909 		.mode		= 0644,
6910 		.proc_handler	= proc_dointvec,
6911 	},
6912 	{
6913 		.procname	= "accept_ra_mtu",
6914 		.data		= &ipv6_devconf.accept_ra_mtu,
6915 		.maxlen		= sizeof(int),
6916 		.mode		= 0644,
6917 		.proc_handler	= proc_dointvec,
6918 	},
6919 	{
6920 		.procname	= "stable_secret",
6921 		.data		= &ipv6_devconf.stable_secret,
6922 		.maxlen		= IPV6_MAX_STRLEN,
6923 		.mode		= 0600,
6924 		.proc_handler	= addrconf_sysctl_stable_secret,
6925 	},
6926 	{
6927 		.procname	= "use_oif_addrs_only",
6928 		.data		= &ipv6_devconf.use_oif_addrs_only,
6929 		.maxlen		= sizeof(int),
6930 		.mode		= 0644,
6931 		.proc_handler	= proc_dointvec,
6932 	},
6933 	{
6934 		.procname	= "ignore_routes_with_linkdown",
6935 		.data		= &ipv6_devconf.ignore_routes_with_linkdown,
6936 		.maxlen		= sizeof(int),
6937 		.mode		= 0644,
6938 		.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
6939 	},
6940 	{
6941 		.procname	= "drop_unicast_in_l2_multicast",
6942 		.data		= &ipv6_devconf.drop_unicast_in_l2_multicast,
6943 		.maxlen		= sizeof(int),
6944 		.mode		= 0644,
6945 		.proc_handler	= proc_dointvec,
6946 	},
6947 	{
6948 		.procname	= "drop_unsolicited_na",
6949 		.data		= &ipv6_devconf.drop_unsolicited_na,
6950 		.maxlen		= sizeof(int),
6951 		.mode		= 0644,
6952 		.proc_handler	= proc_dointvec,
6953 	},
6954 	{
6955 		.procname	= "keep_addr_on_down",
6956 		.data		= &ipv6_devconf.keep_addr_on_down,
6957 		.maxlen		= sizeof(int),
6958 		.mode		= 0644,
6959 		.proc_handler	= proc_dointvec,
6960 
6961 	},
6962 	{
6963 		.procname	= "seg6_enabled",
6964 		.data		= &ipv6_devconf.seg6_enabled,
6965 		.maxlen		= sizeof(int),
6966 		.mode		= 0644,
6967 		.proc_handler	= proc_dointvec,
6968 	},
6969 #ifdef CONFIG_IPV6_SEG6_HMAC
6970 	{
6971 		.procname	= "seg6_require_hmac",
6972 		.data		= &ipv6_devconf.seg6_require_hmac,
6973 		.maxlen		= sizeof(int),
6974 		.mode		= 0644,
6975 		.proc_handler	= proc_dointvec,
6976 	},
6977 #endif
6978 	{
6979 		.procname       = "enhanced_dad",
6980 		.data           = &ipv6_devconf.enhanced_dad,
6981 		.maxlen         = sizeof(int),
6982 		.mode           = 0644,
6983 		.proc_handler   = proc_dointvec,
6984 	},
6985 	{
6986 		.procname	= "addr_gen_mode",
6987 		.data		= &ipv6_devconf.addr_gen_mode,
6988 		.maxlen		= sizeof(int),
6989 		.mode		= 0644,
6990 		.proc_handler	= addrconf_sysctl_addr_gen_mode,
6991 	},
6992 	{
6993 		.procname       = "disable_policy",
6994 		.data           = &ipv6_devconf.disable_policy,
6995 		.maxlen         = sizeof(int),
6996 		.mode           = 0644,
6997 		.proc_handler   = addrconf_sysctl_disable_policy,
6998 	},
6999 	{
7000 		.procname	= "ndisc_tclass",
7001 		.data		= &ipv6_devconf.ndisc_tclass,
7002 		.maxlen		= sizeof(int),
7003 		.mode		= 0644,
7004 		.proc_handler	= proc_dointvec_minmax,
7005 		.extra1		= (void *)SYSCTL_ZERO,
7006 		.extra2		= (void *)&two_five_five,
7007 	},
7008 	{
7009 		.procname	= "rpl_seg_enabled",
7010 		.data		= &ipv6_devconf.rpl_seg_enabled,
7011 		.maxlen		= sizeof(int),
7012 		.mode		= 0644,
7013 		.proc_handler	= proc_dointvec,
7014 	},
7015 	{
7016 		.procname	= "ioam6_enabled",
7017 		.data		= &ipv6_devconf.ioam6_enabled,
7018 		.maxlen		= sizeof(u8),
7019 		.mode		= 0644,
7020 		.proc_handler	= proc_dou8vec_minmax,
7021 		.extra1		= (void *)SYSCTL_ZERO,
7022 		.extra2		= (void *)SYSCTL_ONE,
7023 	},
7024 	{
7025 		.procname	= "ioam6_id",
7026 		.data		= &ipv6_devconf.ioam6_id,
7027 		.maxlen		= sizeof(u32),
7028 		.mode		= 0644,
7029 		.proc_handler	= proc_douintvec_minmax,
7030 		.extra1		= (void *)SYSCTL_ZERO,
7031 		.extra2		= (void *)&ioam6_if_id_max,
7032 	},
7033 	{
7034 		.procname	= "ioam6_id_wide",
7035 		.data		= &ipv6_devconf.ioam6_id_wide,
7036 		.maxlen		= sizeof(u32),
7037 		.mode		= 0644,
7038 		.proc_handler	= proc_douintvec,
7039 	},
7040 	{
7041 		.procname	= "ndisc_evict_nocarrier",
7042 		.data		= &ipv6_devconf.ndisc_evict_nocarrier,
7043 		.maxlen		= sizeof(u8),
7044 		.mode		= 0644,
7045 		.proc_handler	= proc_dou8vec_minmax,
7046 		.extra1		= (void *)SYSCTL_ZERO,
7047 		.extra2		= (void *)SYSCTL_ONE,
7048 	},
7049 	{
7050 		.procname	= "accept_untracked_na",
7051 		.data		= &ipv6_devconf.accept_untracked_na,
7052 		.maxlen		= sizeof(int),
7053 		.mode		= 0644,
7054 		.proc_handler	= proc_dointvec_minmax,
7055 		.extra1		= SYSCTL_ZERO,
7056 		.extra2		= SYSCTL_TWO,
7057 	},
7058 	{
7059 		/* sentinel */
7060 	}
7061 };
7062 
7063 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7064 		struct inet6_dev *idev, struct ipv6_devconf *p)
7065 {
7066 	int i, ifindex;
7067 	struct ctl_table *table;
7068 	char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7069 
7070 	table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
7071 	if (!table)
7072 		goto out;
7073 
7074 	for (i = 0; table[i].data; i++) {
7075 		table[i].data += (char *)p - (char *)&ipv6_devconf;
7076 		/* If one of these is already set, then it is not safe to
7077 		 * overwrite either of them: this makes proc_dointvec_minmax
7078 		 * usable.
7079 		 */
7080 		if (!table[i].extra1 && !table[i].extra2) {
7081 			table[i].extra1 = idev; /* embedded; no ref */
7082 			table[i].extra2 = net;
7083 		}
7084 	}
7085 
7086 	snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7087 
7088 	p->sysctl_header = register_net_sysctl(net, path, table);
7089 	if (!p->sysctl_header)
7090 		goto free;
7091 
7092 	if (!strcmp(dev_name, "all"))
7093 		ifindex = NETCONFA_IFINDEX_ALL;
7094 	else if (!strcmp(dev_name, "default"))
7095 		ifindex = NETCONFA_IFINDEX_DEFAULT;
7096 	else
7097 		ifindex = idev->dev->ifindex;
7098 	inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7099 				     ifindex, p);
7100 	return 0;
7101 
7102 free:
7103 	kfree(table);
7104 out:
7105 	return -ENOBUFS;
7106 }
7107 
7108 static void __addrconf_sysctl_unregister(struct net *net,
7109 					 struct ipv6_devconf *p, int ifindex)
7110 {
7111 	struct ctl_table *table;
7112 
7113 	if (!p->sysctl_header)
7114 		return;
7115 
7116 	table = p->sysctl_header->ctl_table_arg;
7117 	unregister_net_sysctl_table(p->sysctl_header);
7118 	p->sysctl_header = NULL;
7119 	kfree(table);
7120 
7121 	inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7122 }
7123 
7124 static int addrconf_sysctl_register(struct inet6_dev *idev)
7125 {
7126 	int err;
7127 
7128 	if (!sysctl_dev_name_is_allowed(idev->dev->name))
7129 		return -EINVAL;
7130 
7131 	err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7132 				    &ndisc_ifinfo_sysctl_change);
7133 	if (err)
7134 		return err;
7135 	err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7136 					 idev, &idev->cnf);
7137 	if (err)
7138 		neigh_sysctl_unregister(idev->nd_parms);
7139 
7140 	return err;
7141 }
7142 
7143 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7144 {
7145 	__addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7146 				     idev->dev->ifindex);
7147 	neigh_sysctl_unregister(idev->nd_parms);
7148 }
7149 
7150 
7151 #endif
7152 
7153 static int __net_init addrconf_init_net(struct net *net)
7154 {
7155 	int err = -ENOMEM;
7156 	struct ipv6_devconf *all, *dflt;
7157 
7158 	spin_lock_init(&net->ipv6.addrconf_hash_lock);
7159 	INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work);
7160 	net->ipv6.inet6_addr_lst = kcalloc(IN6_ADDR_HSIZE,
7161 					   sizeof(struct hlist_head),
7162 					   GFP_KERNEL);
7163 	if (!net->ipv6.inet6_addr_lst)
7164 		goto err_alloc_addr;
7165 
7166 	all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7167 	if (!all)
7168 		goto err_alloc_all;
7169 
7170 	dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7171 	if (!dflt)
7172 		goto err_alloc_dflt;
7173 
7174 	if (!net_eq(net, &init_net)) {
7175 		switch (net_inherit_devconf()) {
7176 		case 1:  /* copy from init_net */
7177 			memcpy(all, init_net.ipv6.devconf_all,
7178 			       sizeof(ipv6_devconf));
7179 			memcpy(dflt, init_net.ipv6.devconf_dflt,
7180 			       sizeof(ipv6_devconf_dflt));
7181 			break;
7182 		case 3: /* copy from the current netns */
7183 			memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7184 			       sizeof(ipv6_devconf));
7185 			memcpy(dflt,
7186 			       current->nsproxy->net_ns->ipv6.devconf_dflt,
7187 			       sizeof(ipv6_devconf_dflt));
7188 			break;
7189 		case 0:
7190 		case 2:
7191 			/* use compiled values */
7192 			break;
7193 		}
7194 	}
7195 
7196 	/* these will be inherited by all namespaces */
7197 	dflt->autoconf = ipv6_defaults.autoconf;
7198 	dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7199 
7200 	dflt->stable_secret.initialized = false;
7201 	all->stable_secret.initialized = false;
7202 
7203 	net->ipv6.devconf_all = all;
7204 	net->ipv6.devconf_dflt = dflt;
7205 
7206 #ifdef CONFIG_SYSCTL
7207 	err = __addrconf_sysctl_register(net, "all", NULL, all);
7208 	if (err < 0)
7209 		goto err_reg_all;
7210 
7211 	err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7212 	if (err < 0)
7213 		goto err_reg_dflt;
7214 #endif
7215 	return 0;
7216 
7217 #ifdef CONFIG_SYSCTL
7218 err_reg_dflt:
7219 	__addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7220 err_reg_all:
7221 	kfree(dflt);
7222 	net->ipv6.devconf_dflt = NULL;
7223 #endif
7224 err_alloc_dflt:
7225 	kfree(all);
7226 	net->ipv6.devconf_all = NULL;
7227 err_alloc_all:
7228 	kfree(net->ipv6.inet6_addr_lst);
7229 err_alloc_addr:
7230 	return err;
7231 }
7232 
7233 static void __net_exit addrconf_exit_net(struct net *net)
7234 {
7235 	int i;
7236 
7237 #ifdef CONFIG_SYSCTL
7238 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7239 				     NETCONFA_IFINDEX_DEFAULT);
7240 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7241 				     NETCONFA_IFINDEX_ALL);
7242 #endif
7243 	kfree(net->ipv6.devconf_dflt);
7244 	net->ipv6.devconf_dflt = NULL;
7245 	kfree(net->ipv6.devconf_all);
7246 	net->ipv6.devconf_all = NULL;
7247 
7248 	cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
7249 	/*
7250 	 *	Check hash table, then free it.
7251 	 */
7252 	for (i = 0; i < IN6_ADDR_HSIZE; i++)
7253 		WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i]));
7254 
7255 	kfree(net->ipv6.inet6_addr_lst);
7256 	net->ipv6.inet6_addr_lst = NULL;
7257 }
7258 
7259 static struct pernet_operations addrconf_ops = {
7260 	.init = addrconf_init_net,
7261 	.exit = addrconf_exit_net,
7262 };
7263 
7264 static struct rtnl_af_ops inet6_ops __read_mostly = {
7265 	.family		  = AF_INET6,
7266 	.fill_link_af	  = inet6_fill_link_af,
7267 	.get_link_af_size = inet6_get_link_af_size,
7268 	.validate_link_af = inet6_validate_link_af,
7269 	.set_link_af	  = inet6_set_link_af,
7270 };
7271 
7272 /*
7273  *	Init / cleanup code
7274  */
7275 
7276 int __init addrconf_init(void)
7277 {
7278 	struct inet6_dev *idev;
7279 	int err;
7280 
7281 	err = ipv6_addr_label_init();
7282 	if (err < 0) {
7283 		pr_crit("%s: cannot initialize default policy table: %d\n",
7284 			__func__, err);
7285 		goto out;
7286 	}
7287 
7288 	err = register_pernet_subsys(&addrconf_ops);
7289 	if (err < 0)
7290 		goto out_addrlabel;
7291 
7292 	addrconf_wq = create_workqueue("ipv6_addrconf");
7293 	if (!addrconf_wq) {
7294 		err = -ENOMEM;
7295 		goto out_nowq;
7296 	}
7297 
7298 	rtnl_lock();
7299 	idev = ipv6_add_dev(blackhole_netdev);
7300 	rtnl_unlock();
7301 	if (IS_ERR(idev)) {
7302 		err = PTR_ERR(idev);
7303 		goto errlo;
7304 	}
7305 
7306 	ip6_route_init_special_entries();
7307 
7308 	register_netdevice_notifier(&ipv6_dev_notf);
7309 
7310 	addrconf_verify(&init_net);
7311 
7312 	rtnl_af_register(&inet6_ops);
7313 
7314 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7315 				   NULL, inet6_dump_ifinfo, 0);
7316 	if (err < 0)
7317 		goto errout;
7318 
7319 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7320 				   inet6_rtm_newaddr, NULL, 0);
7321 	if (err < 0)
7322 		goto errout;
7323 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7324 				   inet6_rtm_deladdr, NULL, 0);
7325 	if (err < 0)
7326 		goto errout;
7327 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7328 				   inet6_rtm_getaddr, inet6_dump_ifaddr,
7329 				   RTNL_FLAG_DOIT_UNLOCKED);
7330 	if (err < 0)
7331 		goto errout;
7332 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7333 				   NULL, inet6_dump_ifmcaddr, 0);
7334 	if (err < 0)
7335 		goto errout;
7336 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7337 				   NULL, inet6_dump_ifacaddr, 0);
7338 	if (err < 0)
7339 		goto errout;
7340 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7341 				   inet6_netconf_get_devconf,
7342 				   inet6_netconf_dump_devconf,
7343 				   RTNL_FLAG_DOIT_UNLOCKED);
7344 	if (err < 0)
7345 		goto errout;
7346 	err = ipv6_addr_label_rtnl_register();
7347 	if (err < 0)
7348 		goto errout;
7349 
7350 	return 0;
7351 errout:
7352 	rtnl_unregister_all(PF_INET6);
7353 	rtnl_af_unregister(&inet6_ops);
7354 	unregister_netdevice_notifier(&ipv6_dev_notf);
7355 errlo:
7356 	destroy_workqueue(addrconf_wq);
7357 out_nowq:
7358 	unregister_pernet_subsys(&addrconf_ops);
7359 out_addrlabel:
7360 	ipv6_addr_label_cleanup();
7361 out:
7362 	return err;
7363 }
7364 
7365 void addrconf_cleanup(void)
7366 {
7367 	struct net_device *dev;
7368 
7369 	unregister_netdevice_notifier(&ipv6_dev_notf);
7370 	unregister_pernet_subsys(&addrconf_ops);
7371 	ipv6_addr_label_cleanup();
7372 
7373 	rtnl_af_unregister(&inet6_ops);
7374 
7375 	rtnl_lock();
7376 
7377 	/* clean dev list */
7378 	for_each_netdev(&init_net, dev) {
7379 		if (__in6_dev_get(dev) == NULL)
7380 			continue;
7381 		addrconf_ifdown(dev, true);
7382 	}
7383 	addrconf_ifdown(init_net.loopback_dev, true);
7384 
7385 	rtnl_unlock();
7386 
7387 	destroy_workqueue(addrconf_wq);
7388 }
7389