xref: /openbmc/linux/net/ipv4/devinet.c (revision 1ee5ef31)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	NET3	IP device support routines.
4  *
5  *	Derived from the IP parts of dev.c 1.0.19
6  * 		Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  *	Changes:
15  *		Alexey Kuznetsov:	pa_* fields are replaced with ifaddr
16  *					lists.
17  *		Cyrus Durgin:		updated for kmod
18  *		Matthias Andree:	in devinet_ioctl, compare label and
19  *					address (4.4BSD alias style support),
20  *					fall back to comparing just the label
21  *					if no match found.
22  */
23 
24 
25 #include <linux/uaccess.h>
26 #include <linux/bitops.h>
27 #include <linux/capability.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/mm.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/in.h>
37 #include <linux/errno.h>
38 #include <linux/interrupt.h>
39 #include <linux/if_addr.h>
40 #include <linux/if_ether.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/notifier.h>
47 #include <linux/inetdevice.h>
48 #include <linux/igmp.h>
49 #include <linux/slab.h>
50 #include <linux/hash.h>
51 #ifdef CONFIG_SYSCTL
52 #include <linux/sysctl.h>
53 #endif
54 #include <linux/kmod.h>
55 #include <linux/netconf.h>
56 
57 #include <net/arp.h>
58 #include <net/ip.h>
59 #include <net/route.h>
60 #include <net/ip_fib.h>
61 #include <net/rtnetlink.h>
62 #include <net/net_namespace.h>
63 #include <net/addrconf.h>
64 
65 #define IPV6ONLY_FLAGS	\
66 		(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
67 		 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
68 		 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
69 
70 static struct ipv4_devconf ipv4_devconf = {
71 	.data = {
72 		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
73 		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
74 		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
75 		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
76 		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
77 		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
78 		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
79 	},
80 };
81 
82 static struct ipv4_devconf ipv4_devconf_dflt = {
83 	.data = {
84 		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
85 		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
86 		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
87 		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
88 		[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
89 		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
90 		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
91 		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
92 	},
93 };
94 
95 #define IPV4_DEVCONF_DFLT(net, attr) \
96 	IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
97 
98 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
99 	[IFA_LOCAL]     	= { .type = NLA_U32 },
100 	[IFA_ADDRESS]   	= { .type = NLA_U32 },
101 	[IFA_BROADCAST] 	= { .type = NLA_U32 },
102 	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
103 	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
104 	[IFA_FLAGS]		= { .type = NLA_U32 },
105 	[IFA_RT_PRIORITY]	= { .type = NLA_U32 },
106 	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
107 	[IFA_PROTO]		= { .type = NLA_U8 },
108 };
109 
110 struct inet_fill_args {
111 	u32 portid;
112 	u32 seq;
113 	int event;
114 	unsigned int flags;
115 	int netnsid;
116 	int ifindex;
117 };
118 
119 #define IN4_ADDR_HSIZE_SHIFT	8
120 #define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
121 
122 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
123 
inet_addr_hash(const struct net * net,__be32 addr)124 static u32 inet_addr_hash(const struct net *net, __be32 addr)
125 {
126 	u32 val = (__force u32) addr ^ net_hash_mix(net);
127 
128 	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
129 }
130 
inet_hash_insert(struct net * net,struct in_ifaddr * ifa)131 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
132 {
133 	u32 hash = inet_addr_hash(net, ifa->ifa_local);
134 
135 	ASSERT_RTNL();
136 	hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
137 }
138 
inet_hash_remove(struct in_ifaddr * ifa)139 static void inet_hash_remove(struct in_ifaddr *ifa)
140 {
141 	ASSERT_RTNL();
142 	hlist_del_init_rcu(&ifa->hash);
143 }
144 
145 /**
146  * __ip_dev_find - find the first device with a given source address.
147  * @net: the net namespace
148  * @addr: the source address
149  * @devref: if true, take a reference on the found device
150  *
151  * If a caller uses devref=false, it should be protected by RCU, or RTNL
152  */
__ip_dev_find(struct net * net,__be32 addr,bool devref)153 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
154 {
155 	struct net_device *result = NULL;
156 	struct in_ifaddr *ifa;
157 
158 	rcu_read_lock();
159 	ifa = inet_lookup_ifaddr_rcu(net, addr);
160 	if (!ifa) {
161 		struct flowi4 fl4 = { .daddr = addr };
162 		struct fib_result res = { 0 };
163 		struct fib_table *local;
164 
165 		/* Fallback to FIB local table so that communication
166 		 * over loopback subnets work.
167 		 */
168 		local = fib_get_table(net, RT_TABLE_LOCAL);
169 		if (local &&
170 		    !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
171 		    res.type == RTN_LOCAL)
172 			result = FIB_RES_DEV(res);
173 	} else {
174 		result = ifa->ifa_dev->dev;
175 	}
176 	if (result && devref)
177 		dev_hold(result);
178 	rcu_read_unlock();
179 	return result;
180 }
181 EXPORT_SYMBOL(__ip_dev_find);
182 
183 /* called under RCU lock */
inet_lookup_ifaddr_rcu(struct net * net,__be32 addr)184 struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
185 {
186 	u32 hash = inet_addr_hash(net, addr);
187 	struct in_ifaddr *ifa;
188 
189 	hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash)
190 		if (ifa->ifa_local == addr &&
191 		    net_eq(dev_net(ifa->ifa_dev->dev), net))
192 			return ifa;
193 
194 	return NULL;
195 }
196 
197 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
198 
199 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
200 static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
201 static void inet_del_ifa(struct in_device *in_dev,
202 			 struct in_ifaddr __rcu **ifap,
203 			 int destroy);
204 #ifdef CONFIG_SYSCTL
205 static int devinet_sysctl_register(struct in_device *idev);
206 static void devinet_sysctl_unregister(struct in_device *idev);
207 #else
devinet_sysctl_register(struct in_device * idev)208 static int devinet_sysctl_register(struct in_device *idev)
209 {
210 	return 0;
211 }
devinet_sysctl_unregister(struct in_device * idev)212 static void devinet_sysctl_unregister(struct in_device *idev)
213 {
214 }
215 #endif
216 
217 /* Locks all the inet devices. */
218 
inet_alloc_ifa(void)219 static struct in_ifaddr *inet_alloc_ifa(void)
220 {
221 	return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL_ACCOUNT);
222 }
223 
inet_rcu_free_ifa(struct rcu_head * head)224 static void inet_rcu_free_ifa(struct rcu_head *head)
225 {
226 	struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
227 	if (ifa->ifa_dev)
228 		in_dev_put(ifa->ifa_dev);
229 	kfree(ifa);
230 }
231 
inet_free_ifa(struct in_ifaddr * ifa)232 static void inet_free_ifa(struct in_ifaddr *ifa)
233 {
234 	call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
235 }
236 
in_dev_free_rcu(struct rcu_head * head)237 static void in_dev_free_rcu(struct rcu_head *head)
238 {
239 	struct in_device *idev = container_of(head, struct in_device, rcu_head);
240 
241 	kfree(rcu_dereference_protected(idev->mc_hash, 1));
242 	kfree(idev);
243 }
244 
in_dev_finish_destroy(struct in_device * idev)245 void in_dev_finish_destroy(struct in_device *idev)
246 {
247 	struct net_device *dev = idev->dev;
248 
249 	WARN_ON(idev->ifa_list);
250 	WARN_ON(idev->mc_list);
251 #ifdef NET_REFCNT_DEBUG
252 	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
253 #endif
254 	netdev_put(dev, &idev->dev_tracker);
255 	if (!idev->dead)
256 		pr_err("Freeing alive in_device %p\n", idev);
257 	else
258 		call_rcu(&idev->rcu_head, in_dev_free_rcu);
259 }
260 EXPORT_SYMBOL(in_dev_finish_destroy);
261 
inetdev_init(struct net_device * dev)262 static struct in_device *inetdev_init(struct net_device *dev)
263 {
264 	struct in_device *in_dev;
265 	int err = -ENOMEM;
266 
267 	ASSERT_RTNL();
268 
269 	in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
270 	if (!in_dev)
271 		goto out;
272 	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
273 			sizeof(in_dev->cnf));
274 	in_dev->cnf.sysctl = NULL;
275 	in_dev->dev = dev;
276 	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
277 	if (!in_dev->arp_parms)
278 		goto out_kfree;
279 	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
280 		dev_disable_lro(dev);
281 	/* Reference in_dev->dev */
282 	netdev_hold(dev, &in_dev->dev_tracker, GFP_KERNEL);
283 	/* Account for reference dev->ip_ptr (below) */
284 	refcount_set(&in_dev->refcnt, 1);
285 
286 	if (dev != blackhole_netdev) {
287 		err = devinet_sysctl_register(in_dev);
288 		if (err) {
289 			in_dev->dead = 1;
290 			neigh_parms_release(&arp_tbl, in_dev->arp_parms);
291 			in_dev_put(in_dev);
292 			in_dev = NULL;
293 			goto out;
294 		}
295 		ip_mc_init_dev(in_dev);
296 		if (dev->flags & IFF_UP)
297 			ip_mc_up(in_dev);
298 	}
299 
300 	/* we can receive as soon as ip_ptr is set -- do this last */
301 	rcu_assign_pointer(dev->ip_ptr, in_dev);
302 out:
303 	return in_dev ?: ERR_PTR(err);
304 out_kfree:
305 	kfree(in_dev);
306 	in_dev = NULL;
307 	goto out;
308 }
309 
inetdev_destroy(struct in_device * in_dev)310 static void inetdev_destroy(struct in_device *in_dev)
311 {
312 	struct net_device *dev;
313 	struct in_ifaddr *ifa;
314 
315 	ASSERT_RTNL();
316 
317 	dev = in_dev->dev;
318 
319 	in_dev->dead = 1;
320 
321 	ip_mc_destroy_dev(in_dev);
322 
323 	while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
324 		inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
325 		inet_free_ifa(ifa);
326 	}
327 
328 	RCU_INIT_POINTER(dev->ip_ptr, NULL);
329 
330 	devinet_sysctl_unregister(in_dev);
331 	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
332 	arp_ifdown(dev);
333 
334 	in_dev_put(in_dev);
335 }
336 
inet_blackhole_dev_init(void)337 static int __init inet_blackhole_dev_init(void)
338 {
339 	int err = 0;
340 
341 	rtnl_lock();
342 	if (!inetdev_init(blackhole_netdev))
343 		err = -ENOMEM;
344 	rtnl_unlock();
345 
346 	return err;
347 }
348 late_initcall(inet_blackhole_dev_init);
349 
inet_addr_onlink(struct in_device * in_dev,__be32 a,__be32 b)350 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
351 {
352 	const struct in_ifaddr *ifa;
353 
354 	rcu_read_lock();
355 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
356 		if (inet_ifa_match(a, ifa)) {
357 			if (!b || inet_ifa_match(b, ifa)) {
358 				rcu_read_unlock();
359 				return 1;
360 			}
361 		}
362 	}
363 	rcu_read_unlock();
364 	return 0;
365 }
366 
__inet_del_ifa(struct in_device * in_dev,struct in_ifaddr __rcu ** ifap,int destroy,struct nlmsghdr * nlh,u32 portid)367 static void __inet_del_ifa(struct in_device *in_dev,
368 			   struct in_ifaddr __rcu **ifap,
369 			   int destroy, struct nlmsghdr *nlh, u32 portid)
370 {
371 	struct in_ifaddr *promote = NULL;
372 	struct in_ifaddr *ifa, *ifa1;
373 	struct in_ifaddr __rcu **last_prim;
374 	struct in_ifaddr *prev_prom = NULL;
375 	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
376 
377 	ASSERT_RTNL();
378 
379 	ifa1 = rtnl_dereference(*ifap);
380 	last_prim = ifap;
381 	if (in_dev->dead)
382 		goto no_promotions;
383 
384 	/* 1. Deleting primary ifaddr forces deletion all secondaries
385 	 * unless alias promotion is set
386 	 **/
387 
388 	if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
389 		struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
390 
391 		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
392 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
393 			    ifa1->ifa_scope <= ifa->ifa_scope)
394 				last_prim = &ifa->ifa_next;
395 
396 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
397 			    ifa1->ifa_mask != ifa->ifa_mask ||
398 			    !inet_ifa_match(ifa1->ifa_address, ifa)) {
399 				ifap1 = &ifa->ifa_next;
400 				prev_prom = ifa;
401 				continue;
402 			}
403 
404 			if (!do_promote) {
405 				inet_hash_remove(ifa);
406 				*ifap1 = ifa->ifa_next;
407 
408 				rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
409 				blocking_notifier_call_chain(&inetaddr_chain,
410 						NETDEV_DOWN, ifa);
411 				inet_free_ifa(ifa);
412 			} else {
413 				promote = ifa;
414 				break;
415 			}
416 		}
417 	}
418 
419 	/* On promotion all secondaries from subnet are changing
420 	 * the primary IP, we must remove all their routes silently
421 	 * and later to add them back with new prefsrc. Do this
422 	 * while all addresses are on the device list.
423 	 */
424 	for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
425 		if (ifa1->ifa_mask == ifa->ifa_mask &&
426 		    inet_ifa_match(ifa1->ifa_address, ifa))
427 			fib_del_ifaddr(ifa, ifa1);
428 	}
429 
430 no_promotions:
431 	/* 2. Unlink it */
432 
433 	*ifap = ifa1->ifa_next;
434 	inet_hash_remove(ifa1);
435 
436 	/* 3. Announce address deletion */
437 
438 	/* Send message first, then call notifier.
439 	   At first sight, FIB update triggered by notifier
440 	   will refer to already deleted ifaddr, that could confuse
441 	   netlink listeners. It is not true: look, gated sees
442 	   that route deleted and if it still thinks that ifaddr
443 	   is valid, it will try to restore deleted routes... Grr.
444 	   So that, this order is correct.
445 	 */
446 	rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
447 	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
448 
449 	if (promote) {
450 		struct in_ifaddr *next_sec;
451 
452 		next_sec = rtnl_dereference(promote->ifa_next);
453 		if (prev_prom) {
454 			struct in_ifaddr *last_sec;
455 
456 			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
457 
458 			last_sec = rtnl_dereference(*last_prim);
459 			rcu_assign_pointer(promote->ifa_next, last_sec);
460 			rcu_assign_pointer(*last_prim, promote);
461 		}
462 
463 		promote->ifa_flags &= ~IFA_F_SECONDARY;
464 		rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
465 		blocking_notifier_call_chain(&inetaddr_chain,
466 				NETDEV_UP, promote);
467 		for (ifa = next_sec; ifa;
468 		     ifa = rtnl_dereference(ifa->ifa_next)) {
469 			if (ifa1->ifa_mask != ifa->ifa_mask ||
470 			    !inet_ifa_match(ifa1->ifa_address, ifa))
471 					continue;
472 			fib_add_ifaddr(ifa);
473 		}
474 
475 	}
476 	if (destroy)
477 		inet_free_ifa(ifa1);
478 }
479 
inet_del_ifa(struct in_device * in_dev,struct in_ifaddr __rcu ** ifap,int destroy)480 static void inet_del_ifa(struct in_device *in_dev,
481 			 struct in_ifaddr __rcu **ifap,
482 			 int destroy)
483 {
484 	__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
485 }
486 
487 static void check_lifetime(struct work_struct *work);
488 
489 static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
490 
__inet_insert_ifa(struct in_ifaddr * ifa,struct nlmsghdr * nlh,u32 portid,struct netlink_ext_ack * extack)491 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
492 			     u32 portid, struct netlink_ext_ack *extack)
493 {
494 	struct in_ifaddr __rcu **last_primary, **ifap;
495 	struct in_device *in_dev = ifa->ifa_dev;
496 	struct in_validator_info ivi;
497 	struct in_ifaddr *ifa1;
498 	int ret;
499 
500 	ASSERT_RTNL();
501 
502 	if (!ifa->ifa_local) {
503 		inet_free_ifa(ifa);
504 		return 0;
505 	}
506 
507 	ifa->ifa_flags &= ~IFA_F_SECONDARY;
508 	last_primary = &in_dev->ifa_list;
509 
510 	/* Don't set IPv6 only flags to IPv4 addresses */
511 	ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
512 
513 	ifap = &in_dev->ifa_list;
514 	ifa1 = rtnl_dereference(*ifap);
515 
516 	while (ifa1) {
517 		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
518 		    ifa->ifa_scope <= ifa1->ifa_scope)
519 			last_primary = &ifa1->ifa_next;
520 		if (ifa1->ifa_mask == ifa->ifa_mask &&
521 		    inet_ifa_match(ifa1->ifa_address, ifa)) {
522 			if (ifa1->ifa_local == ifa->ifa_local) {
523 				inet_free_ifa(ifa);
524 				return -EEXIST;
525 			}
526 			if (ifa1->ifa_scope != ifa->ifa_scope) {
527 				NL_SET_ERR_MSG(extack, "ipv4: Invalid scope value");
528 				inet_free_ifa(ifa);
529 				return -EINVAL;
530 			}
531 			ifa->ifa_flags |= IFA_F_SECONDARY;
532 		}
533 
534 		ifap = &ifa1->ifa_next;
535 		ifa1 = rtnl_dereference(*ifap);
536 	}
537 
538 	/* Allow any devices that wish to register ifaddr validtors to weigh
539 	 * in now, before changes are committed.  The rntl lock is serializing
540 	 * access here, so the state should not change between a validator call
541 	 * and a final notify on commit.  This isn't invoked on promotion under
542 	 * the assumption that validators are checking the address itself, and
543 	 * not the flags.
544 	 */
545 	ivi.ivi_addr = ifa->ifa_address;
546 	ivi.ivi_dev = ifa->ifa_dev;
547 	ivi.extack = extack;
548 	ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
549 					   NETDEV_UP, &ivi);
550 	ret = notifier_to_errno(ret);
551 	if (ret) {
552 		inet_free_ifa(ifa);
553 		return ret;
554 	}
555 
556 	if (!(ifa->ifa_flags & IFA_F_SECONDARY))
557 		ifap = last_primary;
558 
559 	rcu_assign_pointer(ifa->ifa_next, *ifap);
560 	rcu_assign_pointer(*ifap, ifa);
561 
562 	inet_hash_insert(dev_net(in_dev->dev), ifa);
563 
564 	cancel_delayed_work(&check_lifetime_work);
565 	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
566 
567 	/* Send message first, then call notifier.
568 	   Notifier will trigger FIB update, so that
569 	   listeners of netlink will know about new ifaddr */
570 	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
571 	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
572 
573 	return 0;
574 }
575 
inet_insert_ifa(struct in_ifaddr * ifa)576 static int inet_insert_ifa(struct in_ifaddr *ifa)
577 {
578 	return __inet_insert_ifa(ifa, NULL, 0, NULL);
579 }
580 
inet_set_ifa(struct net_device * dev,struct in_ifaddr * ifa)581 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
582 {
583 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
584 
585 	ASSERT_RTNL();
586 
587 	ipv4_devconf_setall(in_dev);
588 	neigh_parms_data_state_setall(in_dev->arp_parms);
589 	if (ifa->ifa_dev != in_dev) {
590 		WARN_ON(ifa->ifa_dev);
591 		in_dev_hold(in_dev);
592 		ifa->ifa_dev = in_dev;
593 	}
594 	if (ipv4_is_loopback(ifa->ifa_local))
595 		ifa->ifa_scope = RT_SCOPE_HOST;
596 	return inet_insert_ifa(ifa);
597 }
598 
599 /* Caller must hold RCU or RTNL :
600  * We dont take a reference on found in_device
601  */
inetdev_by_index(struct net * net,int ifindex)602 struct in_device *inetdev_by_index(struct net *net, int ifindex)
603 {
604 	struct net_device *dev;
605 	struct in_device *in_dev = NULL;
606 
607 	rcu_read_lock();
608 	dev = dev_get_by_index_rcu(net, ifindex);
609 	if (dev)
610 		in_dev = rcu_dereference_rtnl(dev->ip_ptr);
611 	rcu_read_unlock();
612 	return in_dev;
613 }
614 EXPORT_SYMBOL(inetdev_by_index);
615 
616 /* Called only from RTNL semaphored context. No locks. */
617 
inet_ifa_byprefix(struct in_device * in_dev,__be32 prefix,__be32 mask)618 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
619 				    __be32 mask)
620 {
621 	struct in_ifaddr *ifa;
622 
623 	ASSERT_RTNL();
624 
625 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
626 		if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
627 			return ifa;
628 	}
629 	return NULL;
630 }
631 
ip_mc_autojoin_config(struct net * net,bool join,const struct in_ifaddr * ifa)632 static int ip_mc_autojoin_config(struct net *net, bool join,
633 				 const struct in_ifaddr *ifa)
634 {
635 #if defined(CONFIG_IP_MULTICAST)
636 	struct ip_mreqn mreq = {
637 		.imr_multiaddr.s_addr = ifa->ifa_address,
638 		.imr_ifindex = ifa->ifa_dev->dev->ifindex,
639 	};
640 	struct sock *sk = net->ipv4.mc_autojoin_sk;
641 	int ret;
642 
643 	ASSERT_RTNL();
644 
645 	lock_sock(sk);
646 	if (join)
647 		ret = ip_mc_join_group(sk, &mreq);
648 	else
649 		ret = ip_mc_leave_group(sk, &mreq);
650 	release_sock(sk);
651 
652 	return ret;
653 #else
654 	return -EOPNOTSUPP;
655 #endif
656 }
657 
inet_rtm_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)658 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
659 			    struct netlink_ext_ack *extack)
660 {
661 	struct net *net = sock_net(skb->sk);
662 	struct in_ifaddr __rcu **ifap;
663 	struct nlattr *tb[IFA_MAX+1];
664 	struct in_device *in_dev;
665 	struct ifaddrmsg *ifm;
666 	struct in_ifaddr *ifa;
667 	int err;
668 
669 	ASSERT_RTNL();
670 
671 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
672 				     ifa_ipv4_policy, extack);
673 	if (err < 0)
674 		goto errout;
675 
676 	ifm = nlmsg_data(nlh);
677 	in_dev = inetdev_by_index(net, ifm->ifa_index);
678 	if (!in_dev) {
679 		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
680 		err = -ENODEV;
681 		goto errout;
682 	}
683 
684 	for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL;
685 	     ifap = &ifa->ifa_next) {
686 		if (tb[IFA_LOCAL] &&
687 		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
688 			continue;
689 
690 		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
691 			continue;
692 
693 		if (tb[IFA_ADDRESS] &&
694 		    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
695 		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
696 			continue;
697 
698 		if (ipv4_is_multicast(ifa->ifa_address))
699 			ip_mc_autojoin_config(net, false, ifa);
700 		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
701 		return 0;
702 	}
703 
704 	NL_SET_ERR_MSG(extack, "ipv4: Address not found");
705 	err = -EADDRNOTAVAIL;
706 errout:
707 	return err;
708 }
709 
710 #define INFINITY_LIFE_TIME	0xFFFFFFFF
711 
check_lifetime(struct work_struct * work)712 static void check_lifetime(struct work_struct *work)
713 {
714 	unsigned long now, next, next_sec, next_sched;
715 	struct in_ifaddr *ifa;
716 	struct hlist_node *n;
717 	int i;
718 
719 	now = jiffies;
720 	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
721 
722 	for (i = 0; i < IN4_ADDR_HSIZE; i++) {
723 		bool change_needed = false;
724 
725 		rcu_read_lock();
726 		hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
727 			unsigned long age;
728 
729 			if (ifa->ifa_flags & IFA_F_PERMANENT)
730 				continue;
731 
732 			/* We try to batch several events at once. */
733 			age = (now - ifa->ifa_tstamp +
734 			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
735 
736 			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
737 			    age >= ifa->ifa_valid_lft) {
738 				change_needed = true;
739 			} else if (ifa->ifa_preferred_lft ==
740 				   INFINITY_LIFE_TIME) {
741 				continue;
742 			} else if (age >= ifa->ifa_preferred_lft) {
743 				if (time_before(ifa->ifa_tstamp +
744 						ifa->ifa_valid_lft * HZ, next))
745 					next = ifa->ifa_tstamp +
746 					       ifa->ifa_valid_lft * HZ;
747 
748 				if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
749 					change_needed = true;
750 			} else if (time_before(ifa->ifa_tstamp +
751 					       ifa->ifa_preferred_lft * HZ,
752 					       next)) {
753 				next = ifa->ifa_tstamp +
754 				       ifa->ifa_preferred_lft * HZ;
755 			}
756 		}
757 		rcu_read_unlock();
758 		if (!change_needed)
759 			continue;
760 		rtnl_lock();
761 		hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
762 			unsigned long age;
763 
764 			if (ifa->ifa_flags & IFA_F_PERMANENT)
765 				continue;
766 
767 			/* We try to batch several events at once. */
768 			age = (now - ifa->ifa_tstamp +
769 			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
770 
771 			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
772 			    age >= ifa->ifa_valid_lft) {
773 				struct in_ifaddr __rcu **ifap;
774 				struct in_ifaddr *tmp;
775 
776 				ifap = &ifa->ifa_dev->ifa_list;
777 				tmp = rtnl_dereference(*ifap);
778 				while (tmp) {
779 					if (tmp == ifa) {
780 						inet_del_ifa(ifa->ifa_dev,
781 							     ifap, 1);
782 						break;
783 					}
784 					ifap = &tmp->ifa_next;
785 					tmp = rtnl_dereference(*ifap);
786 				}
787 			} else if (ifa->ifa_preferred_lft !=
788 				   INFINITY_LIFE_TIME &&
789 				   age >= ifa->ifa_preferred_lft &&
790 				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
791 				ifa->ifa_flags |= IFA_F_DEPRECATED;
792 				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
793 			}
794 		}
795 		rtnl_unlock();
796 	}
797 
798 	next_sec = round_jiffies_up(next);
799 	next_sched = next;
800 
801 	/* If rounded timeout is accurate enough, accept it. */
802 	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
803 		next_sched = next_sec;
804 
805 	now = jiffies;
806 	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
807 	if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
808 		next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
809 
810 	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
811 			next_sched - now);
812 }
813 
set_ifa_lifetime(struct in_ifaddr * ifa,__u32 valid_lft,__u32 prefered_lft)814 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
815 			     __u32 prefered_lft)
816 {
817 	unsigned long timeout;
818 
819 	ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
820 
821 	timeout = addrconf_timeout_fixup(valid_lft, HZ);
822 	if (addrconf_finite_timeout(timeout))
823 		ifa->ifa_valid_lft = timeout;
824 	else
825 		ifa->ifa_flags |= IFA_F_PERMANENT;
826 
827 	timeout = addrconf_timeout_fixup(prefered_lft, HZ);
828 	if (addrconf_finite_timeout(timeout)) {
829 		if (timeout == 0)
830 			ifa->ifa_flags |= IFA_F_DEPRECATED;
831 		ifa->ifa_preferred_lft = timeout;
832 	}
833 	ifa->ifa_tstamp = jiffies;
834 	if (!ifa->ifa_cstamp)
835 		ifa->ifa_cstamp = ifa->ifa_tstamp;
836 }
837 
rtm_to_ifaddr(struct net * net,struct nlmsghdr * nlh,__u32 * pvalid_lft,__u32 * pprefered_lft,struct netlink_ext_ack * extack)838 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
839 				       __u32 *pvalid_lft, __u32 *pprefered_lft,
840 				       struct netlink_ext_ack *extack)
841 {
842 	struct nlattr *tb[IFA_MAX+1];
843 	struct in_ifaddr *ifa;
844 	struct ifaddrmsg *ifm;
845 	struct net_device *dev;
846 	struct in_device *in_dev;
847 	int err;
848 
849 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
850 				     ifa_ipv4_policy, extack);
851 	if (err < 0)
852 		goto errout;
853 
854 	ifm = nlmsg_data(nlh);
855 	err = -EINVAL;
856 
857 	if (ifm->ifa_prefixlen > 32) {
858 		NL_SET_ERR_MSG(extack, "ipv4: Invalid prefix length");
859 		goto errout;
860 	}
861 
862 	if (!tb[IFA_LOCAL]) {
863 		NL_SET_ERR_MSG(extack, "ipv4: Local address is not supplied");
864 		goto errout;
865 	}
866 
867 	dev = __dev_get_by_index(net, ifm->ifa_index);
868 	err = -ENODEV;
869 	if (!dev) {
870 		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
871 		goto errout;
872 	}
873 
874 	in_dev = __in_dev_get_rtnl(dev);
875 	err = -ENOBUFS;
876 	if (!in_dev)
877 		goto errout;
878 
879 	ifa = inet_alloc_ifa();
880 	if (!ifa)
881 		/*
882 		 * A potential indev allocation can be left alive, it stays
883 		 * assigned to its device and is destroy with it.
884 		 */
885 		goto errout;
886 
887 	ipv4_devconf_setall(in_dev);
888 	neigh_parms_data_state_setall(in_dev->arp_parms);
889 	in_dev_hold(in_dev);
890 
891 	if (!tb[IFA_ADDRESS])
892 		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
893 
894 	INIT_HLIST_NODE(&ifa->hash);
895 	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
896 	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
897 	ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
898 					 ifm->ifa_flags;
899 	ifa->ifa_scope = ifm->ifa_scope;
900 	ifa->ifa_dev = in_dev;
901 
902 	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
903 	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
904 
905 	if (tb[IFA_BROADCAST])
906 		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
907 
908 	if (tb[IFA_LABEL])
909 		nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
910 	else
911 		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
912 
913 	if (tb[IFA_RT_PRIORITY])
914 		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
915 
916 	if (tb[IFA_PROTO])
917 		ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]);
918 
919 	if (tb[IFA_CACHEINFO]) {
920 		struct ifa_cacheinfo *ci;
921 
922 		ci = nla_data(tb[IFA_CACHEINFO]);
923 		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
924 			NL_SET_ERR_MSG(extack, "ipv4: address lifetime invalid");
925 			err = -EINVAL;
926 			goto errout_free;
927 		}
928 		*pvalid_lft = ci->ifa_valid;
929 		*pprefered_lft = ci->ifa_prefered;
930 	}
931 
932 	return ifa;
933 
934 errout_free:
935 	inet_free_ifa(ifa);
936 errout:
937 	return ERR_PTR(err);
938 }
939 
find_matching_ifa(struct in_ifaddr * ifa)940 static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
941 {
942 	struct in_device *in_dev = ifa->ifa_dev;
943 	struct in_ifaddr *ifa1;
944 
945 	if (!ifa->ifa_local)
946 		return NULL;
947 
948 	in_dev_for_each_ifa_rtnl(ifa1, in_dev) {
949 		if (ifa1->ifa_mask == ifa->ifa_mask &&
950 		    inet_ifa_match(ifa1->ifa_address, ifa) &&
951 		    ifa1->ifa_local == ifa->ifa_local)
952 			return ifa1;
953 	}
954 	return NULL;
955 }
956 
inet_rtm_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)957 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
958 			    struct netlink_ext_ack *extack)
959 {
960 	struct net *net = sock_net(skb->sk);
961 	struct in_ifaddr *ifa;
962 	struct in_ifaddr *ifa_existing;
963 	__u32 valid_lft = INFINITY_LIFE_TIME;
964 	__u32 prefered_lft = INFINITY_LIFE_TIME;
965 
966 	ASSERT_RTNL();
967 
968 	ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
969 	if (IS_ERR(ifa))
970 		return PTR_ERR(ifa);
971 
972 	ifa_existing = find_matching_ifa(ifa);
973 	if (!ifa_existing) {
974 		/* It would be best to check for !NLM_F_CREATE here but
975 		 * userspace already relies on not having to provide this.
976 		 */
977 		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
978 		if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
979 			int ret = ip_mc_autojoin_config(net, true, ifa);
980 
981 			if (ret < 0) {
982 				NL_SET_ERR_MSG(extack, "ipv4: Multicast auto join failed");
983 				inet_free_ifa(ifa);
984 				return ret;
985 			}
986 		}
987 		return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid,
988 					 extack);
989 	} else {
990 		u32 new_metric = ifa->ifa_rt_priority;
991 		u8 new_proto = ifa->ifa_proto;
992 
993 		inet_free_ifa(ifa);
994 
995 		if (nlh->nlmsg_flags & NLM_F_EXCL ||
996 		    !(nlh->nlmsg_flags & NLM_F_REPLACE)) {
997 			NL_SET_ERR_MSG(extack, "ipv4: Address already assigned");
998 			return -EEXIST;
999 		}
1000 		ifa = ifa_existing;
1001 
1002 		if (ifa->ifa_rt_priority != new_metric) {
1003 			fib_modify_prefix_metric(ifa, new_metric);
1004 			ifa->ifa_rt_priority = new_metric;
1005 		}
1006 
1007 		ifa->ifa_proto = new_proto;
1008 
1009 		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
1010 		cancel_delayed_work(&check_lifetime_work);
1011 		queue_delayed_work(system_power_efficient_wq,
1012 				&check_lifetime_work, 0);
1013 		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
1014 	}
1015 	return 0;
1016 }
1017 
1018 /*
1019  *	Determine a default network mask, based on the IP address.
1020  */
1021 
inet_abc_len(__be32 addr)1022 static int inet_abc_len(__be32 addr)
1023 {
1024 	int rc = -1;	/* Something else, probably a multicast. */
1025 
1026 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
1027 		rc = 0;
1028 	else {
1029 		__u32 haddr = ntohl(addr);
1030 		if (IN_CLASSA(haddr))
1031 			rc = 8;
1032 		else if (IN_CLASSB(haddr))
1033 			rc = 16;
1034 		else if (IN_CLASSC(haddr))
1035 			rc = 24;
1036 		else if (IN_CLASSE(haddr))
1037 			rc = 32;
1038 	}
1039 
1040 	return rc;
1041 }
1042 
1043 
devinet_ioctl(struct net * net,unsigned int cmd,struct ifreq * ifr)1044 int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
1045 {
1046 	struct sockaddr_in sin_orig;
1047 	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
1048 	struct in_ifaddr __rcu **ifap = NULL;
1049 	struct in_device *in_dev;
1050 	struct in_ifaddr *ifa = NULL;
1051 	struct net_device *dev;
1052 	char *colon;
1053 	int ret = -EFAULT;
1054 	int tryaddrmatch = 0;
1055 
1056 	ifr->ifr_name[IFNAMSIZ - 1] = 0;
1057 
1058 	/* save original address for comparison */
1059 	memcpy(&sin_orig, sin, sizeof(*sin));
1060 
1061 	colon = strchr(ifr->ifr_name, ':');
1062 	if (colon)
1063 		*colon = 0;
1064 
1065 	dev_load(net, ifr->ifr_name);
1066 
1067 	switch (cmd) {
1068 	case SIOCGIFADDR:	/* Get interface address */
1069 	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1070 	case SIOCGIFDSTADDR:	/* Get the destination address */
1071 	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1072 		/* Note that these ioctls will not sleep,
1073 		   so that we do not impose a lock.
1074 		   One day we will be forced to put shlock here (I mean SMP)
1075 		 */
1076 		tryaddrmatch = (sin_orig.sin_family == AF_INET);
1077 		memset(sin, 0, sizeof(*sin));
1078 		sin->sin_family = AF_INET;
1079 		break;
1080 
1081 	case SIOCSIFFLAGS:
1082 		ret = -EPERM;
1083 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1084 			goto out;
1085 		break;
1086 	case SIOCSIFADDR:	/* Set interface address (and family) */
1087 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1088 	case SIOCSIFDSTADDR:	/* Set the destination address */
1089 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1090 		ret = -EPERM;
1091 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1092 			goto out;
1093 		ret = -EINVAL;
1094 		if (sin->sin_family != AF_INET)
1095 			goto out;
1096 		break;
1097 	default:
1098 		ret = -EINVAL;
1099 		goto out;
1100 	}
1101 
1102 	rtnl_lock();
1103 
1104 	ret = -ENODEV;
1105 	dev = __dev_get_by_name(net, ifr->ifr_name);
1106 	if (!dev)
1107 		goto done;
1108 
1109 	if (colon)
1110 		*colon = ':';
1111 
1112 	in_dev = __in_dev_get_rtnl(dev);
1113 	if (in_dev) {
1114 		if (tryaddrmatch) {
1115 			/* Matthias Andree */
1116 			/* compare label and address (4.4BSD style) */
1117 			/* note: we only do this for a limited set of ioctls
1118 			   and only if the original address family was AF_INET.
1119 			   This is checked above. */
1120 
1121 			for (ifap = &in_dev->ifa_list;
1122 			     (ifa = rtnl_dereference(*ifap)) != NULL;
1123 			     ifap = &ifa->ifa_next) {
1124 				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
1125 				    sin_orig.sin_addr.s_addr ==
1126 							ifa->ifa_local) {
1127 					break; /* found */
1128 				}
1129 			}
1130 		}
1131 		/* we didn't get a match, maybe the application is
1132 		   4.3BSD-style and passed in junk so we fall back to
1133 		   comparing just the label */
1134 		if (!ifa) {
1135 			for (ifap = &in_dev->ifa_list;
1136 			     (ifa = rtnl_dereference(*ifap)) != NULL;
1137 			     ifap = &ifa->ifa_next)
1138 				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
1139 					break;
1140 		}
1141 	}
1142 
1143 	ret = -EADDRNOTAVAIL;
1144 	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1145 		goto done;
1146 
1147 	switch (cmd) {
1148 	case SIOCGIFADDR:	/* Get interface address */
1149 		ret = 0;
1150 		sin->sin_addr.s_addr = ifa->ifa_local;
1151 		break;
1152 
1153 	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1154 		ret = 0;
1155 		sin->sin_addr.s_addr = ifa->ifa_broadcast;
1156 		break;
1157 
1158 	case SIOCGIFDSTADDR:	/* Get the destination address */
1159 		ret = 0;
1160 		sin->sin_addr.s_addr = ifa->ifa_address;
1161 		break;
1162 
1163 	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1164 		ret = 0;
1165 		sin->sin_addr.s_addr = ifa->ifa_mask;
1166 		break;
1167 
1168 	case SIOCSIFFLAGS:
1169 		if (colon) {
1170 			ret = -EADDRNOTAVAIL;
1171 			if (!ifa)
1172 				break;
1173 			ret = 0;
1174 			if (!(ifr->ifr_flags & IFF_UP))
1175 				inet_del_ifa(in_dev, ifap, 1);
1176 			break;
1177 		}
1178 		ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
1179 		break;
1180 
1181 	case SIOCSIFADDR:	/* Set interface address (and family) */
1182 		ret = -EINVAL;
1183 		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1184 			break;
1185 
1186 		if (!ifa) {
1187 			ret = -ENOBUFS;
1188 			if (!in_dev)
1189 				break;
1190 			ifa = inet_alloc_ifa();
1191 			if (!ifa)
1192 				break;
1193 			INIT_HLIST_NODE(&ifa->hash);
1194 			if (colon)
1195 				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
1196 			else
1197 				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1198 		} else {
1199 			ret = 0;
1200 			if (ifa->ifa_local == sin->sin_addr.s_addr)
1201 				break;
1202 			inet_del_ifa(in_dev, ifap, 0);
1203 			ifa->ifa_broadcast = 0;
1204 			ifa->ifa_scope = 0;
1205 		}
1206 
1207 		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1208 
1209 		if (!(dev->flags & IFF_POINTOPOINT)) {
1210 			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1211 			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1212 			if ((dev->flags & IFF_BROADCAST) &&
1213 			    ifa->ifa_prefixlen < 31)
1214 				ifa->ifa_broadcast = ifa->ifa_address |
1215 						     ~ifa->ifa_mask;
1216 		} else {
1217 			ifa->ifa_prefixlen = 32;
1218 			ifa->ifa_mask = inet_make_mask(32);
1219 		}
1220 		set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1221 		ret = inet_set_ifa(dev, ifa);
1222 		break;
1223 
1224 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1225 		ret = 0;
1226 		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1227 			inet_del_ifa(in_dev, ifap, 0);
1228 			ifa->ifa_broadcast = sin->sin_addr.s_addr;
1229 			inet_insert_ifa(ifa);
1230 		}
1231 		break;
1232 
1233 	case SIOCSIFDSTADDR:	/* Set the destination address */
1234 		ret = 0;
1235 		if (ifa->ifa_address == sin->sin_addr.s_addr)
1236 			break;
1237 		ret = -EINVAL;
1238 		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1239 			break;
1240 		ret = 0;
1241 		inet_del_ifa(in_dev, ifap, 0);
1242 		ifa->ifa_address = sin->sin_addr.s_addr;
1243 		inet_insert_ifa(ifa);
1244 		break;
1245 
1246 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1247 
1248 		/*
1249 		 *	The mask we set must be legal.
1250 		 */
1251 		ret = -EINVAL;
1252 		if (bad_mask(sin->sin_addr.s_addr, 0))
1253 			break;
1254 		ret = 0;
1255 		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1256 			__be32 old_mask = ifa->ifa_mask;
1257 			inet_del_ifa(in_dev, ifap, 0);
1258 			ifa->ifa_mask = sin->sin_addr.s_addr;
1259 			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1260 
1261 			/* See if current broadcast address matches
1262 			 * with current netmask, then recalculate
1263 			 * the broadcast address. Otherwise it's a
1264 			 * funny address, so don't touch it since
1265 			 * the user seems to know what (s)he's doing...
1266 			 */
1267 			if ((dev->flags & IFF_BROADCAST) &&
1268 			    (ifa->ifa_prefixlen < 31) &&
1269 			    (ifa->ifa_broadcast ==
1270 			     (ifa->ifa_local|~old_mask))) {
1271 				ifa->ifa_broadcast = (ifa->ifa_local |
1272 						      ~sin->sin_addr.s_addr);
1273 			}
1274 			inet_insert_ifa(ifa);
1275 		}
1276 		break;
1277 	}
1278 done:
1279 	rtnl_unlock();
1280 out:
1281 	return ret;
1282 }
1283 
inet_gifconf(struct net_device * dev,char __user * buf,int len,int size)1284 int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
1285 {
1286 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1287 	const struct in_ifaddr *ifa;
1288 	struct ifreq ifr;
1289 	int done = 0;
1290 
1291 	if (WARN_ON(size > sizeof(struct ifreq)))
1292 		goto out;
1293 
1294 	if (!in_dev)
1295 		goto out;
1296 
1297 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1298 		if (!buf) {
1299 			done += size;
1300 			continue;
1301 		}
1302 		if (len < size)
1303 			break;
1304 		memset(&ifr, 0, sizeof(struct ifreq));
1305 		strcpy(ifr.ifr_name, ifa->ifa_label);
1306 
1307 		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1308 		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1309 								ifa->ifa_local;
1310 
1311 		if (copy_to_user(buf + done, &ifr, size)) {
1312 			done = -EFAULT;
1313 			break;
1314 		}
1315 		len  -= size;
1316 		done += size;
1317 	}
1318 out:
1319 	return done;
1320 }
1321 
in_dev_select_addr(const struct in_device * in_dev,int scope)1322 static __be32 in_dev_select_addr(const struct in_device *in_dev,
1323 				 int scope)
1324 {
1325 	const struct in_ifaddr *ifa;
1326 
1327 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1328 		if (ifa->ifa_flags & IFA_F_SECONDARY)
1329 			continue;
1330 		if (ifa->ifa_scope != RT_SCOPE_LINK &&
1331 		    ifa->ifa_scope <= scope)
1332 			return ifa->ifa_local;
1333 	}
1334 
1335 	return 0;
1336 }
1337 
inet_select_addr(const struct net_device * dev,__be32 dst,int scope)1338 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1339 {
1340 	const struct in_ifaddr *ifa;
1341 	__be32 addr = 0;
1342 	unsigned char localnet_scope = RT_SCOPE_HOST;
1343 	struct in_device *in_dev;
1344 	struct net *net = dev_net(dev);
1345 	int master_idx;
1346 
1347 	rcu_read_lock();
1348 	in_dev = __in_dev_get_rcu(dev);
1349 	if (!in_dev)
1350 		goto no_in_dev;
1351 
1352 	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1353 		localnet_scope = RT_SCOPE_LINK;
1354 
1355 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1356 		if (ifa->ifa_flags & IFA_F_SECONDARY)
1357 			continue;
1358 		if (min(ifa->ifa_scope, localnet_scope) > scope)
1359 			continue;
1360 		if (!dst || inet_ifa_match(dst, ifa)) {
1361 			addr = ifa->ifa_local;
1362 			break;
1363 		}
1364 		if (!addr)
1365 			addr = ifa->ifa_local;
1366 	}
1367 
1368 	if (addr)
1369 		goto out_unlock;
1370 no_in_dev:
1371 	master_idx = l3mdev_master_ifindex_rcu(dev);
1372 
1373 	/* For VRFs, the VRF device takes the place of the loopback device,
1374 	 * with addresses on it being preferred.  Note in such cases the
1375 	 * loopback device will be among the devices that fail the master_idx
1376 	 * equality check in the loop below.
1377 	 */
1378 	if (master_idx &&
1379 	    (dev = dev_get_by_index_rcu(net, master_idx)) &&
1380 	    (in_dev = __in_dev_get_rcu(dev))) {
1381 		addr = in_dev_select_addr(in_dev, scope);
1382 		if (addr)
1383 			goto out_unlock;
1384 	}
1385 
1386 	/* Not loopback addresses on loopback should be preferred
1387 	   in this case. It is important that lo is the first interface
1388 	   in dev_base list.
1389 	 */
1390 	for_each_netdev_rcu(net, dev) {
1391 		if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1392 			continue;
1393 
1394 		in_dev = __in_dev_get_rcu(dev);
1395 		if (!in_dev)
1396 			continue;
1397 
1398 		addr = in_dev_select_addr(in_dev, scope);
1399 		if (addr)
1400 			goto out_unlock;
1401 	}
1402 out_unlock:
1403 	rcu_read_unlock();
1404 	return addr;
1405 }
1406 EXPORT_SYMBOL(inet_select_addr);
1407 
confirm_addr_indev(struct in_device * in_dev,__be32 dst,__be32 local,int scope)1408 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1409 			      __be32 local, int scope)
1410 {
1411 	unsigned char localnet_scope = RT_SCOPE_HOST;
1412 	const struct in_ifaddr *ifa;
1413 	__be32 addr = 0;
1414 	int same = 0;
1415 
1416 	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1417 		localnet_scope = RT_SCOPE_LINK;
1418 
1419 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1420 		unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
1421 
1422 		if (!addr &&
1423 		    (local == ifa->ifa_local || !local) &&
1424 		    min_scope <= scope) {
1425 			addr = ifa->ifa_local;
1426 			if (same)
1427 				break;
1428 		}
1429 		if (!same) {
1430 			same = (!local || inet_ifa_match(local, ifa)) &&
1431 				(!dst || inet_ifa_match(dst, ifa));
1432 			if (same && addr) {
1433 				if (local || !dst)
1434 					break;
1435 				/* Is the selected addr into dst subnet? */
1436 				if (inet_ifa_match(addr, ifa))
1437 					break;
1438 				/* No, then can we use new local src? */
1439 				if (min_scope <= scope) {
1440 					addr = ifa->ifa_local;
1441 					break;
1442 				}
1443 				/* search for large dst subnet for addr */
1444 				same = 0;
1445 			}
1446 		}
1447 	}
1448 
1449 	return same ? addr : 0;
1450 }
1451 
1452 /*
1453  * Confirm that local IP address exists using wildcards:
1454  * - net: netns to check, cannot be NULL
1455  * - in_dev: only on this interface, NULL=any interface
1456  * - dst: only in the same subnet as dst, 0=any dst
1457  * - local: address, 0=autoselect the local address
1458  * - scope: maximum allowed scope value for the local address
1459  */
inet_confirm_addr(struct net * net,struct in_device * in_dev,__be32 dst,__be32 local,int scope)1460 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1461 			 __be32 dst, __be32 local, int scope)
1462 {
1463 	__be32 addr = 0;
1464 	struct net_device *dev;
1465 
1466 	if (in_dev)
1467 		return confirm_addr_indev(in_dev, dst, local, scope);
1468 
1469 	rcu_read_lock();
1470 	for_each_netdev_rcu(net, dev) {
1471 		in_dev = __in_dev_get_rcu(dev);
1472 		if (in_dev) {
1473 			addr = confirm_addr_indev(in_dev, dst, local, scope);
1474 			if (addr)
1475 				break;
1476 		}
1477 	}
1478 	rcu_read_unlock();
1479 
1480 	return addr;
1481 }
1482 EXPORT_SYMBOL(inet_confirm_addr);
1483 
1484 /*
1485  *	Device notifier
1486  */
1487 
register_inetaddr_notifier(struct notifier_block * nb)1488 int register_inetaddr_notifier(struct notifier_block *nb)
1489 {
1490 	return blocking_notifier_chain_register(&inetaddr_chain, nb);
1491 }
1492 EXPORT_SYMBOL(register_inetaddr_notifier);
1493 
unregister_inetaddr_notifier(struct notifier_block * nb)1494 int unregister_inetaddr_notifier(struct notifier_block *nb)
1495 {
1496 	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1497 }
1498 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1499 
register_inetaddr_validator_notifier(struct notifier_block * nb)1500 int register_inetaddr_validator_notifier(struct notifier_block *nb)
1501 {
1502 	return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
1503 }
1504 EXPORT_SYMBOL(register_inetaddr_validator_notifier);
1505 
unregister_inetaddr_validator_notifier(struct notifier_block * nb)1506 int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
1507 {
1508 	return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
1509 	    nb);
1510 }
1511 EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
1512 
1513 /* Rename ifa_labels for a device name change. Make some effort to preserve
1514  * existing alias numbering and to create unique labels if possible.
1515 */
inetdev_changename(struct net_device * dev,struct in_device * in_dev)1516 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1517 {
1518 	struct in_ifaddr *ifa;
1519 	int named = 0;
1520 
1521 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1522 		char old[IFNAMSIZ], *dot;
1523 
1524 		memcpy(old, ifa->ifa_label, IFNAMSIZ);
1525 		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1526 		if (named++ == 0)
1527 			goto skip;
1528 		dot = strchr(old, ':');
1529 		if (!dot) {
1530 			sprintf(old, ":%d", named);
1531 			dot = old;
1532 		}
1533 		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1534 			strcat(ifa->ifa_label, dot);
1535 		else
1536 			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1537 skip:
1538 		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1539 	}
1540 }
1541 
inetdev_send_gratuitous_arp(struct net_device * dev,struct in_device * in_dev)1542 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1543 					struct in_device *in_dev)
1544 
1545 {
1546 	const struct in_ifaddr *ifa;
1547 
1548 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1549 		arp_send(ARPOP_REQUEST, ETH_P_ARP,
1550 			 ifa->ifa_local, dev,
1551 			 ifa->ifa_local, NULL,
1552 			 dev->dev_addr, NULL);
1553 	}
1554 }
1555 
1556 /* Called only under RTNL semaphore */
1557 
inetdev_event(struct notifier_block * this,unsigned long event,void * ptr)1558 static int inetdev_event(struct notifier_block *this, unsigned long event,
1559 			 void *ptr)
1560 {
1561 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1562 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1563 
1564 	ASSERT_RTNL();
1565 
1566 	if (!in_dev) {
1567 		if (event == NETDEV_REGISTER) {
1568 			in_dev = inetdev_init(dev);
1569 			if (IS_ERR(in_dev))
1570 				return notifier_from_errno(PTR_ERR(in_dev));
1571 			if (dev->flags & IFF_LOOPBACK) {
1572 				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1573 				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1574 			}
1575 		} else if (event == NETDEV_CHANGEMTU) {
1576 			/* Re-enabling IP */
1577 			if (inetdev_valid_mtu(dev->mtu))
1578 				in_dev = inetdev_init(dev);
1579 		}
1580 		goto out;
1581 	}
1582 
1583 	switch (event) {
1584 	case NETDEV_REGISTER:
1585 		pr_debug("%s: bug\n", __func__);
1586 		RCU_INIT_POINTER(dev->ip_ptr, NULL);
1587 		break;
1588 	case NETDEV_UP:
1589 		if (!inetdev_valid_mtu(dev->mtu))
1590 			break;
1591 		if (dev->flags & IFF_LOOPBACK) {
1592 			struct in_ifaddr *ifa = inet_alloc_ifa();
1593 
1594 			if (ifa) {
1595 				INIT_HLIST_NODE(&ifa->hash);
1596 				ifa->ifa_local =
1597 				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
1598 				ifa->ifa_prefixlen = 8;
1599 				ifa->ifa_mask = inet_make_mask(8);
1600 				in_dev_hold(in_dev);
1601 				ifa->ifa_dev = in_dev;
1602 				ifa->ifa_scope = RT_SCOPE_HOST;
1603 				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1604 				set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1605 						 INFINITY_LIFE_TIME);
1606 				ipv4_devconf_setall(in_dev);
1607 				neigh_parms_data_state_setall(in_dev->arp_parms);
1608 				inet_insert_ifa(ifa);
1609 			}
1610 		}
1611 		ip_mc_up(in_dev);
1612 		fallthrough;
1613 	case NETDEV_CHANGEADDR:
1614 		if (!IN_DEV_ARP_NOTIFY(in_dev))
1615 			break;
1616 		fallthrough;
1617 	case NETDEV_NOTIFY_PEERS:
1618 		/* Send gratuitous ARP to notify of link change */
1619 		inetdev_send_gratuitous_arp(dev, in_dev);
1620 		break;
1621 	case NETDEV_DOWN:
1622 		ip_mc_down(in_dev);
1623 		break;
1624 	case NETDEV_PRE_TYPE_CHANGE:
1625 		ip_mc_unmap(in_dev);
1626 		break;
1627 	case NETDEV_POST_TYPE_CHANGE:
1628 		ip_mc_remap(in_dev);
1629 		break;
1630 	case NETDEV_CHANGEMTU:
1631 		if (inetdev_valid_mtu(dev->mtu))
1632 			break;
1633 		/* disable IP when MTU is not enough */
1634 		fallthrough;
1635 	case NETDEV_UNREGISTER:
1636 		inetdev_destroy(in_dev);
1637 		break;
1638 	case NETDEV_CHANGENAME:
1639 		/* Do not notify about label change, this event is
1640 		 * not interesting to applications using netlink.
1641 		 */
1642 		inetdev_changename(dev, in_dev);
1643 
1644 		devinet_sysctl_unregister(in_dev);
1645 		devinet_sysctl_register(in_dev);
1646 		break;
1647 	}
1648 out:
1649 	return NOTIFY_DONE;
1650 }
1651 
1652 static struct notifier_block ip_netdev_notifier = {
1653 	.notifier_call = inetdev_event,
1654 };
1655 
inet_nlmsg_size(void)1656 static size_t inet_nlmsg_size(void)
1657 {
1658 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1659 	       + nla_total_size(4) /* IFA_ADDRESS */
1660 	       + nla_total_size(4) /* IFA_LOCAL */
1661 	       + nla_total_size(4) /* IFA_BROADCAST */
1662 	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1663 	       + nla_total_size(4)  /* IFA_FLAGS */
1664 	       + nla_total_size(1)  /* IFA_PROTO */
1665 	       + nla_total_size(4)  /* IFA_RT_PRIORITY */
1666 	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1667 }
1668 
cstamp_delta(unsigned long cstamp)1669 static inline u32 cstamp_delta(unsigned long cstamp)
1670 {
1671 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1672 }
1673 
put_cacheinfo(struct sk_buff * skb,unsigned long cstamp,unsigned long tstamp,u32 preferred,u32 valid)1674 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1675 			 unsigned long tstamp, u32 preferred, u32 valid)
1676 {
1677 	struct ifa_cacheinfo ci;
1678 
1679 	ci.cstamp = cstamp_delta(cstamp);
1680 	ci.tstamp = cstamp_delta(tstamp);
1681 	ci.ifa_prefered = preferred;
1682 	ci.ifa_valid = valid;
1683 
1684 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1685 }
1686 
inet_fill_ifaddr(struct sk_buff * skb,struct in_ifaddr * ifa,struct inet_fill_args * args)1687 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1688 			    struct inet_fill_args *args)
1689 {
1690 	struct ifaddrmsg *ifm;
1691 	struct nlmsghdr  *nlh;
1692 	u32 preferred, valid;
1693 
1694 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
1695 			args->flags);
1696 	if (!nlh)
1697 		return -EMSGSIZE;
1698 
1699 	ifm = nlmsg_data(nlh);
1700 	ifm->ifa_family = AF_INET;
1701 	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1702 	ifm->ifa_flags = ifa->ifa_flags;
1703 	ifm->ifa_scope = ifa->ifa_scope;
1704 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1705 
1706 	if (args->netnsid >= 0 &&
1707 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
1708 		goto nla_put_failure;
1709 
1710 	if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1711 		preferred = ifa->ifa_preferred_lft;
1712 		valid = ifa->ifa_valid_lft;
1713 		if (preferred != INFINITY_LIFE_TIME) {
1714 			long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1715 
1716 			if (preferred > tval)
1717 				preferred -= tval;
1718 			else
1719 				preferred = 0;
1720 			if (valid != INFINITY_LIFE_TIME) {
1721 				if (valid > tval)
1722 					valid -= tval;
1723 				else
1724 					valid = 0;
1725 			}
1726 		}
1727 	} else {
1728 		preferred = INFINITY_LIFE_TIME;
1729 		valid = INFINITY_LIFE_TIME;
1730 	}
1731 	if ((ifa->ifa_address &&
1732 	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1733 	    (ifa->ifa_local &&
1734 	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1735 	    (ifa->ifa_broadcast &&
1736 	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1737 	    (ifa->ifa_label[0] &&
1738 	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1739 	    (ifa->ifa_proto &&
1740 	     nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
1741 	    nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1742 	    (ifa->ifa_rt_priority &&
1743 	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
1744 	    put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1745 			  preferred, valid))
1746 		goto nla_put_failure;
1747 
1748 	nlmsg_end(skb, nlh);
1749 	return 0;
1750 
1751 nla_put_failure:
1752 	nlmsg_cancel(skb, nlh);
1753 	return -EMSGSIZE;
1754 }
1755 
inet_valid_dump_ifaddr_req(const struct nlmsghdr * nlh,struct inet_fill_args * fillargs,struct net ** tgt_net,struct sock * sk,struct netlink_callback * cb)1756 static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
1757 				      struct inet_fill_args *fillargs,
1758 				      struct net **tgt_net, struct sock *sk,
1759 				      struct netlink_callback *cb)
1760 {
1761 	struct netlink_ext_ack *extack = cb->extack;
1762 	struct nlattr *tb[IFA_MAX+1];
1763 	struct ifaddrmsg *ifm;
1764 	int err, i;
1765 
1766 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
1767 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
1768 		return -EINVAL;
1769 	}
1770 
1771 	ifm = nlmsg_data(nlh);
1772 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
1773 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
1774 		return -EINVAL;
1775 	}
1776 
1777 	fillargs->ifindex = ifm->ifa_index;
1778 	if (fillargs->ifindex) {
1779 		cb->answer_flags |= NLM_F_DUMP_FILTERED;
1780 		fillargs->flags |= NLM_F_DUMP_FILTERED;
1781 	}
1782 
1783 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
1784 					    ifa_ipv4_policy, extack);
1785 	if (err < 0)
1786 		return err;
1787 
1788 	for (i = 0; i <= IFA_MAX; ++i) {
1789 		if (!tb[i])
1790 			continue;
1791 
1792 		if (i == IFA_TARGET_NETNSID) {
1793 			struct net *net;
1794 
1795 			fillargs->netnsid = nla_get_s32(tb[i]);
1796 
1797 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
1798 			if (IS_ERR(net)) {
1799 				fillargs->netnsid = -1;
1800 				NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
1801 				return PTR_ERR(net);
1802 			}
1803 			*tgt_net = net;
1804 		} else {
1805 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
1806 			return -EINVAL;
1807 		}
1808 	}
1809 
1810 	return 0;
1811 }
1812 
in_dev_dump_addr(struct in_device * in_dev,struct sk_buff * skb,struct netlink_callback * cb,int s_ip_idx,struct inet_fill_args * fillargs)1813 static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
1814 			    struct netlink_callback *cb, int s_ip_idx,
1815 			    struct inet_fill_args *fillargs)
1816 {
1817 	struct in_ifaddr *ifa;
1818 	int ip_idx = 0;
1819 	int err;
1820 
1821 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1822 		if (ip_idx < s_ip_idx) {
1823 			ip_idx++;
1824 			continue;
1825 		}
1826 		err = inet_fill_ifaddr(skb, ifa, fillargs);
1827 		if (err < 0)
1828 			goto done;
1829 
1830 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1831 		ip_idx++;
1832 	}
1833 	err = 0;
1834 
1835 done:
1836 	cb->args[2] = ip_idx;
1837 
1838 	return err;
1839 }
1840 
1841 /* Combine dev_addr_genid and dev_base_seq to detect changes.
1842  */
inet_base_seq(const struct net * net)1843 static u32 inet_base_seq(const struct net *net)
1844 {
1845 	u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
1846 		  net->dev_base_seq;
1847 
1848 	/* Must not return 0 (see nl_dump_check_consistent()).
1849 	 * Chose a value far away from 0.
1850 	 */
1851 	if (!res)
1852 		res = 0x80000000;
1853 	return res;
1854 }
1855 
inet_dump_ifaddr(struct sk_buff * skb,struct netlink_callback * cb)1856 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1857 {
1858 	const struct nlmsghdr *nlh = cb->nlh;
1859 	struct inet_fill_args fillargs = {
1860 		.portid = NETLINK_CB(cb->skb).portid,
1861 		.seq = nlh->nlmsg_seq,
1862 		.event = RTM_NEWADDR,
1863 		.flags = NLM_F_MULTI,
1864 		.netnsid = -1,
1865 	};
1866 	struct net *net = sock_net(skb->sk);
1867 	struct net *tgt_net = net;
1868 	int h, s_h;
1869 	int idx, s_idx;
1870 	int s_ip_idx;
1871 	struct net_device *dev;
1872 	struct in_device *in_dev;
1873 	struct hlist_head *head;
1874 	int err = 0;
1875 
1876 	s_h = cb->args[0];
1877 	s_idx = idx = cb->args[1];
1878 	s_ip_idx = cb->args[2];
1879 
1880 	if (cb->strict_check) {
1881 		err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
1882 						 skb->sk, cb);
1883 		if (err < 0)
1884 			goto put_tgt_net;
1885 
1886 		err = 0;
1887 		if (fillargs.ifindex) {
1888 			dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
1889 			if (!dev) {
1890 				err = -ENODEV;
1891 				goto put_tgt_net;
1892 			}
1893 
1894 			in_dev = __in_dev_get_rtnl(dev);
1895 			if (in_dev) {
1896 				err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1897 						       &fillargs);
1898 			}
1899 			goto put_tgt_net;
1900 		}
1901 	}
1902 
1903 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1904 		idx = 0;
1905 		head = &tgt_net->dev_index_head[h];
1906 		rcu_read_lock();
1907 		cb->seq = inet_base_seq(tgt_net);
1908 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
1909 			if (idx < s_idx)
1910 				goto cont;
1911 			if (h > s_h || idx > s_idx)
1912 				s_ip_idx = 0;
1913 			in_dev = __in_dev_get_rcu(dev);
1914 			if (!in_dev)
1915 				goto cont;
1916 
1917 			err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1918 					       &fillargs);
1919 			if (err < 0) {
1920 				rcu_read_unlock();
1921 				goto done;
1922 			}
1923 cont:
1924 			idx++;
1925 		}
1926 		rcu_read_unlock();
1927 	}
1928 
1929 done:
1930 	cb->args[0] = h;
1931 	cb->args[1] = idx;
1932 put_tgt_net:
1933 	if (fillargs.netnsid >= 0)
1934 		put_net(tgt_net);
1935 
1936 	return skb->len ? : err;
1937 }
1938 
rtmsg_ifa(int event,struct in_ifaddr * ifa,struct nlmsghdr * nlh,u32 portid)1939 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1940 		      u32 portid)
1941 {
1942 	struct inet_fill_args fillargs = {
1943 		.portid = portid,
1944 		.seq = nlh ? nlh->nlmsg_seq : 0,
1945 		.event = event,
1946 		.flags = 0,
1947 		.netnsid = -1,
1948 	};
1949 	struct sk_buff *skb;
1950 	int err = -ENOBUFS;
1951 	struct net *net;
1952 
1953 	net = dev_net(ifa->ifa_dev->dev);
1954 	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1955 	if (!skb)
1956 		goto errout;
1957 
1958 	err = inet_fill_ifaddr(skb, ifa, &fillargs);
1959 	if (err < 0) {
1960 		/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1961 		WARN_ON(err == -EMSGSIZE);
1962 		kfree_skb(skb);
1963 		goto errout;
1964 	}
1965 	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1966 	return;
1967 errout:
1968 	if (err < 0)
1969 		rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1970 }
1971 
inet_get_link_af_size(const struct net_device * dev,u32 ext_filter_mask)1972 static size_t inet_get_link_af_size(const struct net_device *dev,
1973 				    u32 ext_filter_mask)
1974 {
1975 	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1976 
1977 	if (!in_dev)
1978 		return 0;
1979 
1980 	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1981 }
1982 
inet_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)1983 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1984 			     u32 ext_filter_mask)
1985 {
1986 	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1987 	struct nlattr *nla;
1988 	int i;
1989 
1990 	if (!in_dev)
1991 		return -ENODATA;
1992 
1993 	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1994 	if (!nla)
1995 		return -EMSGSIZE;
1996 
1997 	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1998 		((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1999 
2000 	return 0;
2001 }
2002 
2003 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
2004 	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
2005 };
2006 
inet_validate_link_af(const struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)2007 static int inet_validate_link_af(const struct net_device *dev,
2008 				 const struct nlattr *nla,
2009 				 struct netlink_ext_ack *extack)
2010 {
2011 	struct nlattr *a, *tb[IFLA_INET_MAX+1];
2012 	int err, rem;
2013 
2014 	if (dev && !__in_dev_get_rtnl(dev))
2015 		return -EAFNOSUPPORT;
2016 
2017 	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
2018 					  inet_af_policy, extack);
2019 	if (err < 0)
2020 		return err;
2021 
2022 	if (tb[IFLA_INET_CONF]) {
2023 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
2024 			int cfgid = nla_type(a);
2025 
2026 			if (nla_len(a) < 4)
2027 				return -EINVAL;
2028 
2029 			if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
2030 				return -EINVAL;
2031 		}
2032 	}
2033 
2034 	return 0;
2035 }
2036 
inet_set_link_af(struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)2037 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
2038 			    struct netlink_ext_ack *extack)
2039 {
2040 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
2041 	struct nlattr *a, *tb[IFLA_INET_MAX+1];
2042 	int rem;
2043 
2044 	if (!in_dev)
2045 		return -EAFNOSUPPORT;
2046 
2047 	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
2048 		return -EINVAL;
2049 
2050 	if (tb[IFLA_INET_CONF]) {
2051 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
2052 			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
2053 	}
2054 
2055 	return 0;
2056 }
2057 
inet_netconf_msgsize_devconf(int type)2058 static int inet_netconf_msgsize_devconf(int type)
2059 {
2060 	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
2061 		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
2062 	bool all = false;
2063 
2064 	if (type == NETCONFA_ALL)
2065 		all = true;
2066 
2067 	if (all || type == NETCONFA_FORWARDING)
2068 		size += nla_total_size(4);
2069 	if (all || type == NETCONFA_RP_FILTER)
2070 		size += nla_total_size(4);
2071 	if (all || type == NETCONFA_MC_FORWARDING)
2072 		size += nla_total_size(4);
2073 	if (all || type == NETCONFA_BC_FORWARDING)
2074 		size += nla_total_size(4);
2075 	if (all || type == NETCONFA_PROXY_NEIGH)
2076 		size += nla_total_size(4);
2077 	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
2078 		size += nla_total_size(4);
2079 
2080 	return size;
2081 }
2082 
inet_netconf_fill_devconf(struct sk_buff * skb,int ifindex,struct ipv4_devconf * devconf,u32 portid,u32 seq,int event,unsigned int flags,int type)2083 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
2084 				     struct ipv4_devconf *devconf, u32 portid,
2085 				     u32 seq, int event, unsigned int flags,
2086 				     int type)
2087 {
2088 	struct nlmsghdr  *nlh;
2089 	struct netconfmsg *ncm;
2090 	bool all = false;
2091 
2092 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
2093 			flags);
2094 	if (!nlh)
2095 		return -EMSGSIZE;
2096 
2097 	if (type == NETCONFA_ALL)
2098 		all = true;
2099 
2100 	ncm = nlmsg_data(nlh);
2101 	ncm->ncm_family = AF_INET;
2102 
2103 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
2104 		goto nla_put_failure;
2105 
2106 	if (!devconf)
2107 		goto out;
2108 
2109 	if ((all || type == NETCONFA_FORWARDING) &&
2110 	    nla_put_s32(skb, NETCONFA_FORWARDING,
2111 			IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
2112 		goto nla_put_failure;
2113 	if ((all || type == NETCONFA_RP_FILTER) &&
2114 	    nla_put_s32(skb, NETCONFA_RP_FILTER,
2115 			IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
2116 		goto nla_put_failure;
2117 	if ((all || type == NETCONFA_MC_FORWARDING) &&
2118 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
2119 			IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
2120 		goto nla_put_failure;
2121 	if ((all || type == NETCONFA_BC_FORWARDING) &&
2122 	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
2123 			IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
2124 		goto nla_put_failure;
2125 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
2126 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
2127 			IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
2128 		goto nla_put_failure;
2129 	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
2130 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2131 			IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
2132 		goto nla_put_failure;
2133 
2134 out:
2135 	nlmsg_end(skb, nlh);
2136 	return 0;
2137 
2138 nla_put_failure:
2139 	nlmsg_cancel(skb, nlh);
2140 	return -EMSGSIZE;
2141 }
2142 
inet_netconf_notify_devconf(struct net * net,int event,int type,int ifindex,struct ipv4_devconf * devconf)2143 void inet_netconf_notify_devconf(struct net *net, int event, int type,
2144 				 int ifindex, struct ipv4_devconf *devconf)
2145 {
2146 	struct sk_buff *skb;
2147 	int err = -ENOBUFS;
2148 
2149 	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
2150 	if (!skb)
2151 		goto errout;
2152 
2153 	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
2154 					event, 0, type);
2155 	if (err < 0) {
2156 		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2157 		WARN_ON(err == -EMSGSIZE);
2158 		kfree_skb(skb);
2159 		goto errout;
2160 	}
2161 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
2162 	return;
2163 errout:
2164 	if (err < 0)
2165 		rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
2166 }
2167 
2168 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
2169 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
2170 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
2171 	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
2172 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
2173 	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
2174 };
2175 
inet_netconf_valid_get_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)2176 static int inet_netconf_valid_get_req(struct sk_buff *skb,
2177 				      const struct nlmsghdr *nlh,
2178 				      struct nlattr **tb,
2179 				      struct netlink_ext_ack *extack)
2180 {
2181 	int i, err;
2182 
2183 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
2184 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
2185 		return -EINVAL;
2186 	}
2187 
2188 	if (!netlink_strict_get_check(skb))
2189 		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
2190 					      tb, NETCONFA_MAX,
2191 					      devconf_ipv4_policy, extack);
2192 
2193 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
2194 					    tb, NETCONFA_MAX,
2195 					    devconf_ipv4_policy, extack);
2196 	if (err)
2197 		return err;
2198 
2199 	for (i = 0; i <= NETCONFA_MAX; i++) {
2200 		if (!tb[i])
2201 			continue;
2202 
2203 		switch (i) {
2204 		case NETCONFA_IFINDEX:
2205 			break;
2206 		default:
2207 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
2208 			return -EINVAL;
2209 		}
2210 	}
2211 
2212 	return 0;
2213 }
2214 
inet_netconf_get_devconf(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2215 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
2216 				    struct nlmsghdr *nlh,
2217 				    struct netlink_ext_ack *extack)
2218 {
2219 	struct net *net = sock_net(in_skb->sk);
2220 	struct nlattr *tb[NETCONFA_MAX+1];
2221 	struct sk_buff *skb;
2222 	struct ipv4_devconf *devconf;
2223 	struct in_device *in_dev;
2224 	struct net_device *dev;
2225 	int ifindex;
2226 	int err;
2227 
2228 	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
2229 	if (err)
2230 		goto errout;
2231 
2232 	err = -EINVAL;
2233 	if (!tb[NETCONFA_IFINDEX])
2234 		goto errout;
2235 
2236 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
2237 	switch (ifindex) {
2238 	case NETCONFA_IFINDEX_ALL:
2239 		devconf = net->ipv4.devconf_all;
2240 		break;
2241 	case NETCONFA_IFINDEX_DEFAULT:
2242 		devconf = net->ipv4.devconf_dflt;
2243 		break;
2244 	default:
2245 		dev = __dev_get_by_index(net, ifindex);
2246 		if (!dev)
2247 			goto errout;
2248 		in_dev = __in_dev_get_rtnl(dev);
2249 		if (!in_dev)
2250 			goto errout;
2251 		devconf = &in_dev->cnf;
2252 		break;
2253 	}
2254 
2255 	err = -ENOBUFS;
2256 	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
2257 	if (!skb)
2258 		goto errout;
2259 
2260 	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
2261 					NETLINK_CB(in_skb).portid,
2262 					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
2263 					NETCONFA_ALL);
2264 	if (err < 0) {
2265 		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2266 		WARN_ON(err == -EMSGSIZE);
2267 		kfree_skb(skb);
2268 		goto errout;
2269 	}
2270 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2271 errout:
2272 	return err;
2273 }
2274 
inet_netconf_dump_devconf(struct sk_buff * skb,struct netlink_callback * cb)2275 static int inet_netconf_dump_devconf(struct sk_buff *skb,
2276 				     struct netlink_callback *cb)
2277 {
2278 	const struct nlmsghdr *nlh = cb->nlh;
2279 	struct net *net = sock_net(skb->sk);
2280 	int h, s_h;
2281 	int idx, s_idx;
2282 	struct net_device *dev;
2283 	struct in_device *in_dev;
2284 	struct hlist_head *head;
2285 
2286 	if (cb->strict_check) {
2287 		struct netlink_ext_ack *extack = cb->extack;
2288 		struct netconfmsg *ncm;
2289 
2290 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
2291 			NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
2292 			return -EINVAL;
2293 		}
2294 
2295 		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
2296 			NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
2297 			return -EINVAL;
2298 		}
2299 	}
2300 
2301 	s_h = cb->args[0];
2302 	s_idx = idx = cb->args[1];
2303 
2304 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2305 		idx = 0;
2306 		head = &net->dev_index_head[h];
2307 		rcu_read_lock();
2308 		cb->seq = inet_base_seq(net);
2309 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
2310 			if (idx < s_idx)
2311 				goto cont;
2312 			in_dev = __in_dev_get_rcu(dev);
2313 			if (!in_dev)
2314 				goto cont;
2315 
2316 			if (inet_netconf_fill_devconf(skb, dev->ifindex,
2317 						      &in_dev->cnf,
2318 						      NETLINK_CB(cb->skb).portid,
2319 						      nlh->nlmsg_seq,
2320 						      RTM_NEWNETCONF,
2321 						      NLM_F_MULTI,
2322 						      NETCONFA_ALL) < 0) {
2323 				rcu_read_unlock();
2324 				goto done;
2325 			}
2326 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2327 cont:
2328 			idx++;
2329 		}
2330 		rcu_read_unlock();
2331 	}
2332 	if (h == NETDEV_HASHENTRIES) {
2333 		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
2334 					      net->ipv4.devconf_all,
2335 					      NETLINK_CB(cb->skb).portid,
2336 					      nlh->nlmsg_seq,
2337 					      RTM_NEWNETCONF, NLM_F_MULTI,
2338 					      NETCONFA_ALL) < 0)
2339 			goto done;
2340 		else
2341 			h++;
2342 	}
2343 	if (h == NETDEV_HASHENTRIES + 1) {
2344 		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
2345 					      net->ipv4.devconf_dflt,
2346 					      NETLINK_CB(cb->skb).portid,
2347 					      nlh->nlmsg_seq,
2348 					      RTM_NEWNETCONF, NLM_F_MULTI,
2349 					      NETCONFA_ALL) < 0)
2350 			goto done;
2351 		else
2352 			h++;
2353 	}
2354 done:
2355 	cb->args[0] = h;
2356 	cb->args[1] = idx;
2357 
2358 	return skb->len;
2359 }
2360 
2361 #ifdef CONFIG_SYSCTL
2362 
devinet_copy_dflt_conf(struct net * net,int i)2363 static void devinet_copy_dflt_conf(struct net *net, int i)
2364 {
2365 	struct net_device *dev;
2366 
2367 	rcu_read_lock();
2368 	for_each_netdev_rcu(net, dev) {
2369 		struct in_device *in_dev;
2370 
2371 		in_dev = __in_dev_get_rcu(dev);
2372 		if (in_dev && !test_bit(i, in_dev->cnf.state))
2373 			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2374 	}
2375 	rcu_read_unlock();
2376 }
2377 
2378 /* called with RTNL locked */
inet_forward_change(struct net * net)2379 static void inet_forward_change(struct net *net)
2380 {
2381 	struct net_device *dev;
2382 	int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2383 
2384 	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2385 	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2386 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2387 				    NETCONFA_FORWARDING,
2388 				    NETCONFA_IFINDEX_ALL,
2389 				    net->ipv4.devconf_all);
2390 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2391 				    NETCONFA_FORWARDING,
2392 				    NETCONFA_IFINDEX_DEFAULT,
2393 				    net->ipv4.devconf_dflt);
2394 
2395 	for_each_netdev(net, dev) {
2396 		struct in_device *in_dev;
2397 
2398 		if (on)
2399 			dev_disable_lro(dev);
2400 
2401 		in_dev = __in_dev_get_rtnl(dev);
2402 		if (in_dev) {
2403 			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2404 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2405 						    NETCONFA_FORWARDING,
2406 						    dev->ifindex, &in_dev->cnf);
2407 		}
2408 	}
2409 }
2410 
devinet_conf_ifindex(struct net * net,struct ipv4_devconf * cnf)2411 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2412 {
2413 	if (cnf == net->ipv4.devconf_dflt)
2414 		return NETCONFA_IFINDEX_DEFAULT;
2415 	else if (cnf == net->ipv4.devconf_all)
2416 		return NETCONFA_IFINDEX_ALL;
2417 	else {
2418 		struct in_device *idev
2419 			= container_of(cnf, struct in_device, cnf);
2420 		return idev->dev->ifindex;
2421 	}
2422 }
2423 
devinet_conf_proc(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)2424 static int devinet_conf_proc(struct ctl_table *ctl, int write,
2425 			     void *buffer, size_t *lenp, loff_t *ppos)
2426 {
2427 	int old_value = *(int *)ctl->data;
2428 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2429 	int new_value = *(int *)ctl->data;
2430 
2431 	if (write) {
2432 		struct ipv4_devconf *cnf = ctl->extra1;
2433 		struct net *net = ctl->extra2;
2434 		int i = (int *)ctl->data - cnf->data;
2435 		int ifindex;
2436 
2437 		set_bit(i, cnf->state);
2438 
2439 		if (cnf == net->ipv4.devconf_dflt)
2440 			devinet_copy_dflt_conf(net, i);
2441 		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2442 		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2443 			if ((new_value == 0) && (old_value != 0))
2444 				rt_cache_flush(net);
2445 
2446 		if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
2447 		    new_value != old_value)
2448 			rt_cache_flush(net);
2449 
2450 		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2451 		    new_value != old_value) {
2452 			ifindex = devinet_conf_ifindex(net, cnf);
2453 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2454 						    NETCONFA_RP_FILTER,
2455 						    ifindex, cnf);
2456 		}
2457 		if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2458 		    new_value != old_value) {
2459 			ifindex = devinet_conf_ifindex(net, cnf);
2460 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2461 						    NETCONFA_PROXY_NEIGH,
2462 						    ifindex, cnf);
2463 		}
2464 		if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2465 		    new_value != old_value) {
2466 			ifindex = devinet_conf_ifindex(net, cnf);
2467 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2468 						    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2469 						    ifindex, cnf);
2470 		}
2471 	}
2472 
2473 	return ret;
2474 }
2475 
devinet_sysctl_forward(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)2476 static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2477 				  void *buffer, size_t *lenp, loff_t *ppos)
2478 {
2479 	int *valp = ctl->data;
2480 	int val = *valp;
2481 	loff_t pos = *ppos;
2482 	struct net *net = ctl->extra2;
2483 	int ret;
2484 
2485 	if (write && !ns_capable(net->user_ns, CAP_NET_ADMIN))
2486 		return -EPERM;
2487 
2488 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2489 
2490 	if (write && *valp != val) {
2491 		if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2492 			if (!rtnl_trylock()) {
2493 				/* Restore the original values before restarting */
2494 				*valp = val;
2495 				*ppos = pos;
2496 				return restart_syscall();
2497 			}
2498 			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2499 				inet_forward_change(net);
2500 			} else {
2501 				struct ipv4_devconf *cnf = ctl->extra1;
2502 				struct in_device *idev =
2503 					container_of(cnf, struct in_device, cnf);
2504 				if (*valp)
2505 					dev_disable_lro(idev->dev);
2506 				inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2507 							    NETCONFA_FORWARDING,
2508 							    idev->dev->ifindex,
2509 							    cnf);
2510 			}
2511 			rtnl_unlock();
2512 			rt_cache_flush(net);
2513 		} else
2514 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2515 						    NETCONFA_FORWARDING,
2516 						    NETCONFA_IFINDEX_DEFAULT,
2517 						    net->ipv4.devconf_dflt);
2518 	}
2519 
2520 	return ret;
2521 }
2522 
ipv4_doint_and_flush(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)2523 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2524 				void *buffer, size_t *lenp, loff_t *ppos)
2525 {
2526 	int *valp = ctl->data;
2527 	int val = *valp;
2528 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2529 	struct net *net = ctl->extra2;
2530 
2531 	if (write && *valp != val)
2532 		rt_cache_flush(net);
2533 
2534 	return ret;
2535 }
2536 
2537 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2538 	{ \
2539 		.procname	= name, \
2540 		.data		= ipv4_devconf.data + \
2541 				  IPV4_DEVCONF_ ## attr - 1, \
2542 		.maxlen		= sizeof(int), \
2543 		.mode		= mval, \
2544 		.proc_handler	= proc, \
2545 		.extra1		= &ipv4_devconf, \
2546 	}
2547 
2548 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2549 	DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2550 
2551 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2552 	DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2553 
2554 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2555 	DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2556 
2557 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2558 	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2559 
2560 static struct devinet_sysctl_table {
2561 	struct ctl_table_header *sysctl_header;
2562 	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2563 } devinet_sysctl = {
2564 	.devinet_vars = {
2565 		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2566 					     devinet_sysctl_forward),
2567 		DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2568 		DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
2569 
2570 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2571 		DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2572 		DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2573 		DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2574 		DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2575 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2576 					"accept_source_route"),
2577 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2578 		DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2579 		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2580 		DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2581 		DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2582 		DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2583 		DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2584 		DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2585 		DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2586 		DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2587 		DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2588 		DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2589 		DEVINET_SYSCTL_RW_ENTRY(ARP_EVICT_NOCARRIER,
2590 					"arp_evict_nocarrier"),
2591 		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2592 		DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2593 					"force_igmp_version"),
2594 		DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2595 					"igmpv2_unsolicited_report_interval"),
2596 		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2597 					"igmpv3_unsolicited_report_interval"),
2598 		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2599 					"ignore_routes_with_linkdown"),
2600 		DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2601 					"drop_gratuitous_arp"),
2602 
2603 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2604 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2605 		DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2606 					      "promote_secondaries"),
2607 		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2608 					      "route_localnet"),
2609 		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2610 					      "drop_unicast_in_l2_multicast"),
2611 	},
2612 };
2613 
__devinet_sysctl_register(struct net * net,char * dev_name,int ifindex,struct ipv4_devconf * p)2614 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2615 				     int ifindex, struct ipv4_devconf *p)
2616 {
2617 	int i;
2618 	struct devinet_sysctl_table *t;
2619 	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2620 
2621 	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL_ACCOUNT);
2622 	if (!t)
2623 		goto out;
2624 
2625 	for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2626 		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2627 		t->devinet_vars[i].extra1 = p;
2628 		t->devinet_vars[i].extra2 = net;
2629 	}
2630 
2631 	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2632 
2633 	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2634 	if (!t->sysctl_header)
2635 		goto free;
2636 
2637 	p->sysctl = t;
2638 
2639 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
2640 				    ifindex, p);
2641 	return 0;
2642 
2643 free:
2644 	kfree(t);
2645 out:
2646 	return -ENOMEM;
2647 }
2648 
__devinet_sysctl_unregister(struct net * net,struct ipv4_devconf * cnf,int ifindex)2649 static void __devinet_sysctl_unregister(struct net *net,
2650 					struct ipv4_devconf *cnf, int ifindex)
2651 {
2652 	struct devinet_sysctl_table *t = cnf->sysctl;
2653 
2654 	if (t) {
2655 		cnf->sysctl = NULL;
2656 		unregister_net_sysctl_table(t->sysctl_header);
2657 		kfree(t);
2658 	}
2659 
2660 	inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
2661 }
2662 
devinet_sysctl_register(struct in_device * idev)2663 static int devinet_sysctl_register(struct in_device *idev)
2664 {
2665 	int err;
2666 
2667 	if (!sysctl_dev_name_is_allowed(idev->dev->name))
2668 		return -EINVAL;
2669 
2670 	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2671 	if (err)
2672 		return err;
2673 	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2674 					idev->dev->ifindex, &idev->cnf);
2675 	if (err)
2676 		neigh_sysctl_unregister(idev->arp_parms);
2677 	return err;
2678 }
2679 
devinet_sysctl_unregister(struct in_device * idev)2680 static void devinet_sysctl_unregister(struct in_device *idev)
2681 {
2682 	struct net *net = dev_net(idev->dev);
2683 
2684 	__devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
2685 	neigh_sysctl_unregister(idev->arp_parms);
2686 }
2687 
2688 static struct ctl_table ctl_forward_entry[] = {
2689 	{
2690 		.procname	= "ip_forward",
2691 		.data		= &ipv4_devconf.data[
2692 					IPV4_DEVCONF_FORWARDING - 1],
2693 		.maxlen		= sizeof(int),
2694 		.mode		= 0644,
2695 		.proc_handler	= devinet_sysctl_forward,
2696 		.extra1		= &ipv4_devconf,
2697 		.extra2		= &init_net,
2698 	},
2699 	{ },
2700 };
2701 #endif
2702 
devinet_init_net(struct net * net)2703 static __net_init int devinet_init_net(struct net *net)
2704 {
2705 	int err;
2706 	struct ipv4_devconf *all, *dflt;
2707 #ifdef CONFIG_SYSCTL
2708 	struct ctl_table *tbl;
2709 	struct ctl_table_header *forw_hdr;
2710 #endif
2711 
2712 	err = -ENOMEM;
2713 	all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
2714 	if (!all)
2715 		goto err_alloc_all;
2716 
2717 	dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2718 	if (!dflt)
2719 		goto err_alloc_dflt;
2720 
2721 #ifdef CONFIG_SYSCTL
2722 	tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
2723 	if (!tbl)
2724 		goto err_alloc_ctl;
2725 
2726 	tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2727 	tbl[0].extra1 = all;
2728 	tbl[0].extra2 = net;
2729 #endif
2730 
2731 	if (!net_eq(net, &init_net)) {
2732 		switch (net_inherit_devconf()) {
2733 		case 3:
2734 			/* copy from the current netns */
2735 			memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
2736 			       sizeof(ipv4_devconf));
2737 			memcpy(dflt,
2738 			       current->nsproxy->net_ns->ipv4.devconf_dflt,
2739 			       sizeof(ipv4_devconf_dflt));
2740 			break;
2741 		case 0:
2742 		case 1:
2743 			/* copy from init_net */
2744 			memcpy(all, init_net.ipv4.devconf_all,
2745 			       sizeof(ipv4_devconf));
2746 			memcpy(dflt, init_net.ipv4.devconf_dflt,
2747 			       sizeof(ipv4_devconf_dflt));
2748 			break;
2749 		case 2:
2750 			/* use compiled values */
2751 			break;
2752 		}
2753 	}
2754 
2755 #ifdef CONFIG_SYSCTL
2756 	err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2757 	if (err < 0)
2758 		goto err_reg_all;
2759 
2760 	err = __devinet_sysctl_register(net, "default",
2761 					NETCONFA_IFINDEX_DEFAULT, dflt);
2762 	if (err < 0)
2763 		goto err_reg_dflt;
2764 
2765 	err = -ENOMEM;
2766 	forw_hdr = register_net_sysctl_sz(net, "net/ipv4", tbl,
2767 					  ARRAY_SIZE(ctl_forward_entry));
2768 	if (!forw_hdr)
2769 		goto err_reg_ctl;
2770 	net->ipv4.forw_hdr = forw_hdr;
2771 #endif
2772 
2773 	net->ipv4.devconf_all = all;
2774 	net->ipv4.devconf_dflt = dflt;
2775 	return 0;
2776 
2777 #ifdef CONFIG_SYSCTL
2778 err_reg_ctl:
2779 	__devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
2780 err_reg_dflt:
2781 	__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
2782 err_reg_all:
2783 	kfree(tbl);
2784 err_alloc_ctl:
2785 #endif
2786 	kfree(dflt);
2787 err_alloc_dflt:
2788 	kfree(all);
2789 err_alloc_all:
2790 	return err;
2791 }
2792 
devinet_exit_net(struct net * net)2793 static __net_exit void devinet_exit_net(struct net *net)
2794 {
2795 #ifdef CONFIG_SYSCTL
2796 	struct ctl_table *tbl;
2797 
2798 	tbl = net->ipv4.forw_hdr->ctl_table_arg;
2799 	unregister_net_sysctl_table(net->ipv4.forw_hdr);
2800 	__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
2801 				    NETCONFA_IFINDEX_DEFAULT);
2802 	__devinet_sysctl_unregister(net, net->ipv4.devconf_all,
2803 				    NETCONFA_IFINDEX_ALL);
2804 	kfree(tbl);
2805 #endif
2806 	kfree(net->ipv4.devconf_dflt);
2807 	kfree(net->ipv4.devconf_all);
2808 }
2809 
2810 static __net_initdata struct pernet_operations devinet_ops = {
2811 	.init = devinet_init_net,
2812 	.exit = devinet_exit_net,
2813 };
2814 
2815 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2816 	.family		  = AF_INET,
2817 	.fill_link_af	  = inet_fill_link_af,
2818 	.get_link_af_size = inet_get_link_af_size,
2819 	.validate_link_af = inet_validate_link_af,
2820 	.set_link_af	  = inet_set_link_af,
2821 };
2822 
devinet_init(void)2823 void __init devinet_init(void)
2824 {
2825 	int i;
2826 
2827 	for (i = 0; i < IN4_ADDR_HSIZE; i++)
2828 		INIT_HLIST_HEAD(&inet_addr_lst[i]);
2829 
2830 	register_pernet_subsys(&devinet_ops);
2831 	register_netdevice_notifier(&ip_netdev_notifier);
2832 
2833 	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2834 
2835 	rtnl_af_register(&inet_af_ops);
2836 
2837 	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
2838 	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
2839 	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
2840 	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2841 		      inet_netconf_dump_devconf, 0);
2842 }
2843