xref: /openbmc/linux/net/ipv4/fib_frontend.c (revision fa7f32422ea1ac276b45b96a540ed5981caaa61f)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		IPv4 Forwarding Information Base: FIB frontend.
7  *
8  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9  *
10  *		This program is free software; you can redistribute it and/or
11  *		modify it under the terms of the GNU General Public License
12  *		as published by the Free Software Foundation; either version
13  *		2 of the License, or (at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <linux/bitops.h>
19 #include <linux/capability.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/errno.h>
27 #include <linux/in.h>
28 #include <linux/inet.h>
29 #include <linux/inetdevice.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_addr.h>
32 #include <linux/if_arp.h>
33 #include <linux/skbuff.h>
34 #include <linux/cache.h>
35 #include <linux/init.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 
39 #include <net/ip.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
42 #include <net/tcp.h>
43 #include <net/sock.h>
44 #include <net/arp.h>
45 #include <net/ip_fib.h>
46 #include <net/rtnetlink.h>
47 #include <net/xfrm.h>
48 #include <net/l3mdev.h>
49 #include <trace/events/fib.h>
50 
51 #ifndef CONFIG_IP_MULTIPLE_TABLES
52 
53 static int __net_init fib4_rules_init(struct net *net)
54 {
55 	struct fib_table *local_table, *main_table;
56 
57 	main_table  = fib_trie_table(RT_TABLE_MAIN, NULL);
58 	if (!main_table)
59 		return -ENOMEM;
60 
61 	local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
62 	if (!local_table)
63 		goto fail;
64 
65 	hlist_add_head_rcu(&local_table->tb_hlist,
66 				&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
67 	hlist_add_head_rcu(&main_table->tb_hlist,
68 				&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
69 	return 0;
70 
71 fail:
72 	fib_free_table(main_table);
73 	return -ENOMEM;
74 }
75 #else
76 
77 struct fib_table *fib_new_table(struct net *net, u32 id)
78 {
79 	struct fib_table *tb, *alias = NULL;
80 	unsigned int h;
81 
82 	if (id == 0)
83 		id = RT_TABLE_MAIN;
84 	tb = fib_get_table(net, id);
85 	if (tb)
86 		return tb;
87 
88 	if (id == RT_TABLE_LOCAL)
89 		alias = fib_new_table(net, RT_TABLE_MAIN);
90 
91 	tb = fib_trie_table(id, alias);
92 	if (!tb)
93 		return NULL;
94 
95 	switch (id) {
96 	case RT_TABLE_MAIN:
97 		rcu_assign_pointer(net->ipv4.fib_main, tb);
98 		break;
99 	case RT_TABLE_DEFAULT:
100 		rcu_assign_pointer(net->ipv4.fib_default, tb);
101 		break;
102 	default:
103 		break;
104 	}
105 
106 	h = id & (FIB_TABLE_HASHSZ - 1);
107 	hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
108 	return tb;
109 }
110 EXPORT_SYMBOL_GPL(fib_new_table);
111 
112 /* caller must hold either rtnl or rcu read lock */
113 struct fib_table *fib_get_table(struct net *net, u32 id)
114 {
115 	struct fib_table *tb;
116 	struct hlist_head *head;
117 	unsigned int h;
118 
119 	if (id == 0)
120 		id = RT_TABLE_MAIN;
121 	h = id & (FIB_TABLE_HASHSZ - 1);
122 
123 	head = &net->ipv4.fib_table_hash[h];
124 	hlist_for_each_entry_rcu(tb, head, tb_hlist) {
125 		if (tb->tb_id == id)
126 			return tb;
127 	}
128 	return NULL;
129 }
130 #endif /* CONFIG_IP_MULTIPLE_TABLES */
131 
132 static void fib_replace_table(struct net *net, struct fib_table *old,
133 			      struct fib_table *new)
134 {
135 #ifdef CONFIG_IP_MULTIPLE_TABLES
136 	switch (new->tb_id) {
137 	case RT_TABLE_MAIN:
138 		rcu_assign_pointer(net->ipv4.fib_main, new);
139 		break;
140 	case RT_TABLE_DEFAULT:
141 		rcu_assign_pointer(net->ipv4.fib_default, new);
142 		break;
143 	default:
144 		break;
145 	}
146 
147 #endif
148 	/* replace the old table in the hlist */
149 	hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist);
150 }
151 
152 int fib_unmerge(struct net *net)
153 {
154 	struct fib_table *old, *new;
155 
156 	/* attempt to fetch local table if it has been allocated */
157 	old = fib_get_table(net, RT_TABLE_LOCAL);
158 	if (!old)
159 		return 0;
160 
161 	new = fib_trie_unmerge(old);
162 	if (!new)
163 		return -ENOMEM;
164 
165 	/* replace merged table with clean table */
166 	if (new != old) {
167 		fib_replace_table(net, old, new);
168 		fib_free_table(old);
169 	}
170 
171 	return 0;
172 }
173 
174 static void fib_flush(struct net *net)
175 {
176 	int flushed = 0;
177 	unsigned int h;
178 
179 	for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
180 		struct hlist_head *head = &net->ipv4.fib_table_hash[h];
181 		struct hlist_node *tmp;
182 		struct fib_table *tb;
183 
184 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
185 			flushed += fib_table_flush(net, tb);
186 	}
187 
188 	if (flushed)
189 		rt_cache_flush(net);
190 }
191 
192 /*
193  * Find address type as if only "dev" was present in the system. If
194  * on_dev is NULL then all interfaces are taken into consideration.
195  */
196 static inline unsigned int __inet_dev_addr_type(struct net *net,
197 						const struct net_device *dev,
198 						__be32 addr, u32 tb_id)
199 {
200 	struct flowi4		fl4 = { .daddr = addr };
201 	struct fib_result	res;
202 	unsigned int ret = RTN_BROADCAST;
203 	struct fib_table *table;
204 
205 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
206 		return RTN_BROADCAST;
207 	if (ipv4_is_multicast(addr))
208 		return RTN_MULTICAST;
209 
210 	rcu_read_lock();
211 
212 	table = fib_get_table(net, tb_id);
213 	if (table) {
214 		ret = RTN_UNICAST;
215 		if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
216 			if (!dev || dev == res.fi->fib_dev)
217 				ret = res.type;
218 		}
219 	}
220 
221 	rcu_read_unlock();
222 	return ret;
223 }
224 
225 unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id)
226 {
227 	return __inet_dev_addr_type(net, NULL, addr, tb_id);
228 }
229 EXPORT_SYMBOL(inet_addr_type_table);
230 
231 unsigned int inet_addr_type(struct net *net, __be32 addr)
232 {
233 	return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
234 }
235 EXPORT_SYMBOL(inet_addr_type);
236 
237 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
238 				__be32 addr)
239 {
240 	u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
241 
242 	return __inet_dev_addr_type(net, dev, addr, rt_table);
243 }
244 EXPORT_SYMBOL(inet_dev_addr_type);
245 
246 /* inet_addr_type with dev == NULL but using the table from a dev
247  * if one is associated
248  */
249 unsigned int inet_addr_type_dev_table(struct net *net,
250 				      const struct net_device *dev,
251 				      __be32 addr)
252 {
253 	u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
254 
255 	return __inet_dev_addr_type(net, NULL, addr, rt_table);
256 }
257 EXPORT_SYMBOL(inet_addr_type_dev_table);
258 
259 __be32 fib_compute_spec_dst(struct sk_buff *skb)
260 {
261 	struct net_device *dev = skb->dev;
262 	struct in_device *in_dev;
263 	struct fib_result res;
264 	struct rtable *rt;
265 	struct net *net;
266 	int scope;
267 
268 	rt = skb_rtable(skb);
269 	if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
270 	    RTCF_LOCAL)
271 		return ip_hdr(skb)->daddr;
272 
273 	in_dev = __in_dev_get_rcu(dev);
274 	BUG_ON(!in_dev);
275 
276 	net = dev_net(dev);
277 
278 	scope = RT_SCOPE_UNIVERSE;
279 	if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
280 		struct flowi4 fl4 = {
281 			.flowi4_iif = LOOPBACK_IFINDEX,
282 			.daddr = ip_hdr(skb)->saddr,
283 			.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
284 			.flowi4_scope = scope,
285 			.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
286 		};
287 		if (!fib_lookup(net, &fl4, &res, 0))
288 			return FIB_RES_PREFSRC(net, res);
289 	} else {
290 		scope = RT_SCOPE_LINK;
291 	}
292 
293 	return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
294 }
295 
296 /* Given (packet source, input interface) and optional (dst, oif, tos):
297  * - (main) check, that source is valid i.e. not broadcast or our local
298  *   address.
299  * - figure out what "logical" interface this packet arrived
300  *   and calculate "specific destination" address.
301  * - check, that packet arrived from expected physical interface.
302  * called with rcu_read_lock()
303  */
304 static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
305 				 u8 tos, int oif, struct net_device *dev,
306 				 int rpf, struct in_device *idev, u32 *itag)
307 {
308 	int ret, no_addr;
309 	struct fib_result res;
310 	struct flowi4 fl4;
311 	struct net *net;
312 	bool dev_match;
313 
314 	fl4.flowi4_oif = 0;
315 	fl4.flowi4_iif = l3mdev_master_ifindex_rcu(dev);
316 	if (!fl4.flowi4_iif)
317 		fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
318 	fl4.daddr = src;
319 	fl4.saddr = dst;
320 	fl4.flowi4_tos = tos;
321 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
322 	fl4.flowi4_tun_key.tun_id = 0;
323 	fl4.flowi4_flags = 0;
324 
325 	no_addr = idev->ifa_list == NULL;
326 
327 	fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
328 
329 	trace_fib_validate_source(dev, &fl4);
330 
331 	net = dev_net(dev);
332 	if (fib_lookup(net, &fl4, &res, 0))
333 		goto last_resort;
334 	if (res.type != RTN_UNICAST &&
335 	    (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
336 		goto e_inval;
337 	if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
338 	    (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
339 		goto last_resort;
340 	fib_combine_itag(itag, &res);
341 	dev_match = false;
342 
343 #ifdef CONFIG_IP_ROUTE_MULTIPATH
344 	for (ret = 0; ret < res.fi->fib_nhs; ret++) {
345 		struct fib_nh *nh = &res.fi->fib_nh[ret];
346 
347 		if (nh->nh_dev == dev) {
348 			dev_match = true;
349 			break;
350 		} else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
351 			dev_match = true;
352 			break;
353 		}
354 	}
355 #else
356 	if (FIB_RES_DEV(res) == dev)
357 		dev_match = true;
358 #endif
359 	if (dev_match) {
360 		ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
361 		return ret;
362 	}
363 	if (no_addr)
364 		goto last_resort;
365 	if (rpf == 1)
366 		goto e_rpf;
367 	fl4.flowi4_oif = dev->ifindex;
368 
369 	ret = 0;
370 	if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
371 		if (res.type == RTN_UNICAST)
372 			ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
373 	}
374 	return ret;
375 
376 last_resort:
377 	if (rpf)
378 		goto e_rpf;
379 	*itag = 0;
380 	return 0;
381 
382 e_inval:
383 	return -EINVAL;
384 e_rpf:
385 	return -EXDEV;
386 }
387 
388 /* Ignore rp_filter for packets protected by IPsec. */
389 int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
390 			u8 tos, int oif, struct net_device *dev,
391 			struct in_device *idev, u32 *itag)
392 {
393 	int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
394 
395 	if (!r && !fib_num_tclassid_users(dev_net(dev)) &&
396 	    IN_DEV_ACCEPT_LOCAL(idev) &&
397 	    (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
398 		*itag = 0;
399 		return 0;
400 	}
401 	return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
402 }
403 
404 static inline __be32 sk_extract_addr(struct sockaddr *addr)
405 {
406 	return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
407 }
408 
409 static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
410 {
411 	struct nlattr *nla;
412 
413 	nla = (struct nlattr *) ((char *) mx + len);
414 	nla->nla_type = type;
415 	nla->nla_len = nla_attr_size(4);
416 	*(u32 *) nla_data(nla) = value;
417 
418 	return len + nla_total_size(4);
419 }
420 
421 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
422 				 struct fib_config *cfg)
423 {
424 	__be32 addr;
425 	int plen;
426 
427 	memset(cfg, 0, sizeof(*cfg));
428 	cfg->fc_nlinfo.nl_net = net;
429 
430 	if (rt->rt_dst.sa_family != AF_INET)
431 		return -EAFNOSUPPORT;
432 
433 	/*
434 	 * Check mask for validity:
435 	 * a) it must be contiguous.
436 	 * b) destination must have all host bits clear.
437 	 * c) if application forgot to set correct family (AF_INET),
438 	 *    reject request unless it is absolutely clear i.e.
439 	 *    both family and mask are zero.
440 	 */
441 	plen = 32;
442 	addr = sk_extract_addr(&rt->rt_dst);
443 	if (!(rt->rt_flags & RTF_HOST)) {
444 		__be32 mask = sk_extract_addr(&rt->rt_genmask);
445 
446 		if (rt->rt_genmask.sa_family != AF_INET) {
447 			if (mask || rt->rt_genmask.sa_family)
448 				return -EAFNOSUPPORT;
449 		}
450 
451 		if (bad_mask(mask, addr))
452 			return -EINVAL;
453 
454 		plen = inet_mask_len(mask);
455 	}
456 
457 	cfg->fc_dst_len = plen;
458 	cfg->fc_dst = addr;
459 
460 	if (cmd != SIOCDELRT) {
461 		cfg->fc_nlflags = NLM_F_CREATE;
462 		cfg->fc_protocol = RTPROT_BOOT;
463 	}
464 
465 	if (rt->rt_metric)
466 		cfg->fc_priority = rt->rt_metric - 1;
467 
468 	if (rt->rt_flags & RTF_REJECT) {
469 		cfg->fc_scope = RT_SCOPE_HOST;
470 		cfg->fc_type = RTN_UNREACHABLE;
471 		return 0;
472 	}
473 
474 	cfg->fc_scope = RT_SCOPE_NOWHERE;
475 	cfg->fc_type = RTN_UNICAST;
476 
477 	if (rt->rt_dev) {
478 		char *colon;
479 		struct net_device *dev;
480 		char devname[IFNAMSIZ];
481 
482 		if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
483 			return -EFAULT;
484 
485 		devname[IFNAMSIZ-1] = 0;
486 		colon = strchr(devname, ':');
487 		if (colon)
488 			*colon = 0;
489 		dev = __dev_get_by_name(net, devname);
490 		if (!dev)
491 			return -ENODEV;
492 		cfg->fc_oif = dev->ifindex;
493 		cfg->fc_table = l3mdev_fib_table(dev);
494 		if (colon) {
495 			struct in_ifaddr *ifa;
496 			struct in_device *in_dev = __in_dev_get_rtnl(dev);
497 			if (!in_dev)
498 				return -ENODEV;
499 			*colon = ':';
500 			for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
501 				if (strcmp(ifa->ifa_label, devname) == 0)
502 					break;
503 			if (!ifa)
504 				return -ENODEV;
505 			cfg->fc_prefsrc = ifa->ifa_local;
506 		}
507 	}
508 
509 	addr = sk_extract_addr(&rt->rt_gateway);
510 	if (rt->rt_gateway.sa_family == AF_INET && addr) {
511 		unsigned int addr_type;
512 
513 		cfg->fc_gw = addr;
514 		addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
515 		if (rt->rt_flags & RTF_GATEWAY &&
516 		    addr_type == RTN_UNICAST)
517 			cfg->fc_scope = RT_SCOPE_UNIVERSE;
518 	}
519 
520 	if (cmd == SIOCDELRT)
521 		return 0;
522 
523 	if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
524 		return -EINVAL;
525 
526 	if (cfg->fc_scope == RT_SCOPE_NOWHERE)
527 		cfg->fc_scope = RT_SCOPE_LINK;
528 
529 	if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
530 		struct nlattr *mx;
531 		int len = 0;
532 
533 		mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
534 		if (!mx)
535 			return -ENOMEM;
536 
537 		if (rt->rt_flags & RTF_MTU)
538 			len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
539 
540 		if (rt->rt_flags & RTF_WINDOW)
541 			len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
542 
543 		if (rt->rt_flags & RTF_IRTT)
544 			len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
545 
546 		cfg->fc_mx = mx;
547 		cfg->fc_mx_len = len;
548 	}
549 
550 	return 0;
551 }
552 
553 /*
554  * Handle IP routing ioctl calls.
555  * These are used to manipulate the routing tables
556  */
557 int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
558 {
559 	struct fib_config cfg;
560 	struct rtentry rt;
561 	int err;
562 
563 	switch (cmd) {
564 	case SIOCADDRT:		/* Add a route */
565 	case SIOCDELRT:		/* Delete a route */
566 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
567 			return -EPERM;
568 
569 		if (copy_from_user(&rt, arg, sizeof(rt)))
570 			return -EFAULT;
571 
572 		rtnl_lock();
573 		err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
574 		if (err == 0) {
575 			struct fib_table *tb;
576 
577 			if (cmd == SIOCDELRT) {
578 				tb = fib_get_table(net, cfg.fc_table);
579 				if (tb)
580 					err = fib_table_delete(net, tb, &cfg);
581 				else
582 					err = -ESRCH;
583 			} else {
584 				tb = fib_new_table(net, cfg.fc_table);
585 				if (tb)
586 					err = fib_table_insert(net, tb, &cfg);
587 				else
588 					err = -ENOBUFS;
589 			}
590 
591 			/* allocated by rtentry_to_fib_config() */
592 			kfree(cfg.fc_mx);
593 		}
594 		rtnl_unlock();
595 		return err;
596 	}
597 	return -EINVAL;
598 }
599 
600 const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
601 	[RTA_DST]		= { .type = NLA_U32 },
602 	[RTA_SRC]		= { .type = NLA_U32 },
603 	[RTA_IIF]		= { .type = NLA_U32 },
604 	[RTA_OIF]		= { .type = NLA_U32 },
605 	[RTA_GATEWAY]		= { .type = NLA_U32 },
606 	[RTA_PRIORITY]		= { .type = NLA_U32 },
607 	[RTA_PREFSRC]		= { .type = NLA_U32 },
608 	[RTA_METRICS]		= { .type = NLA_NESTED },
609 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
610 	[RTA_FLOW]		= { .type = NLA_U32 },
611 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
612 	[RTA_ENCAP]		= { .type = NLA_NESTED },
613 };
614 
615 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
616 			     struct nlmsghdr *nlh, struct fib_config *cfg)
617 {
618 	struct nlattr *attr;
619 	int err, remaining;
620 	struct rtmsg *rtm;
621 
622 	err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
623 	if (err < 0)
624 		goto errout;
625 
626 	memset(cfg, 0, sizeof(*cfg));
627 
628 	rtm = nlmsg_data(nlh);
629 	cfg->fc_dst_len = rtm->rtm_dst_len;
630 	cfg->fc_tos = rtm->rtm_tos;
631 	cfg->fc_table = rtm->rtm_table;
632 	cfg->fc_protocol = rtm->rtm_protocol;
633 	cfg->fc_scope = rtm->rtm_scope;
634 	cfg->fc_type = rtm->rtm_type;
635 	cfg->fc_flags = rtm->rtm_flags;
636 	cfg->fc_nlflags = nlh->nlmsg_flags;
637 
638 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
639 	cfg->fc_nlinfo.nlh = nlh;
640 	cfg->fc_nlinfo.nl_net = net;
641 
642 	if (cfg->fc_type > RTN_MAX) {
643 		err = -EINVAL;
644 		goto errout;
645 	}
646 
647 	nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
648 		switch (nla_type(attr)) {
649 		case RTA_DST:
650 			cfg->fc_dst = nla_get_be32(attr);
651 			break;
652 		case RTA_OIF:
653 			cfg->fc_oif = nla_get_u32(attr);
654 			break;
655 		case RTA_GATEWAY:
656 			cfg->fc_gw = nla_get_be32(attr);
657 			break;
658 		case RTA_PRIORITY:
659 			cfg->fc_priority = nla_get_u32(attr);
660 			break;
661 		case RTA_PREFSRC:
662 			cfg->fc_prefsrc = nla_get_be32(attr);
663 			break;
664 		case RTA_METRICS:
665 			cfg->fc_mx = nla_data(attr);
666 			cfg->fc_mx_len = nla_len(attr);
667 			break;
668 		case RTA_MULTIPATH:
669 			cfg->fc_mp = nla_data(attr);
670 			cfg->fc_mp_len = nla_len(attr);
671 			break;
672 		case RTA_FLOW:
673 			cfg->fc_flow = nla_get_u32(attr);
674 			break;
675 		case RTA_TABLE:
676 			cfg->fc_table = nla_get_u32(attr);
677 			break;
678 		case RTA_ENCAP:
679 			cfg->fc_encap = attr;
680 			break;
681 		case RTA_ENCAP_TYPE:
682 			cfg->fc_encap_type = nla_get_u16(attr);
683 			break;
684 		}
685 	}
686 
687 	return 0;
688 errout:
689 	return err;
690 }
691 
692 static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
693 {
694 	struct net *net = sock_net(skb->sk);
695 	struct fib_config cfg;
696 	struct fib_table *tb;
697 	int err;
698 
699 	err = rtm_to_fib_config(net, skb, nlh, &cfg);
700 	if (err < 0)
701 		goto errout;
702 
703 	tb = fib_get_table(net, cfg.fc_table);
704 	if (!tb) {
705 		err = -ESRCH;
706 		goto errout;
707 	}
708 
709 	err = fib_table_delete(net, tb, &cfg);
710 errout:
711 	return err;
712 }
713 
714 static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
715 {
716 	struct net *net = sock_net(skb->sk);
717 	struct fib_config cfg;
718 	struct fib_table *tb;
719 	int err;
720 
721 	err = rtm_to_fib_config(net, skb, nlh, &cfg);
722 	if (err < 0)
723 		goto errout;
724 
725 	tb = fib_new_table(net, cfg.fc_table);
726 	if (!tb) {
727 		err = -ENOBUFS;
728 		goto errout;
729 	}
730 
731 	err = fib_table_insert(net, tb, &cfg);
732 errout:
733 	return err;
734 }
735 
736 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
737 {
738 	struct net *net = sock_net(skb->sk);
739 	unsigned int h, s_h;
740 	unsigned int e = 0, s_e;
741 	struct fib_table *tb;
742 	struct hlist_head *head;
743 	int dumped = 0;
744 
745 	if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
746 	    ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
747 		return skb->len;
748 
749 	s_h = cb->args[0];
750 	s_e = cb->args[1];
751 
752 	rcu_read_lock();
753 
754 	for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
755 		e = 0;
756 		head = &net->ipv4.fib_table_hash[h];
757 		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
758 			if (e < s_e)
759 				goto next;
760 			if (dumped)
761 				memset(&cb->args[2], 0, sizeof(cb->args) -
762 						 2 * sizeof(cb->args[0]));
763 			if (fib_table_dump(tb, skb, cb) < 0)
764 				goto out;
765 			dumped = 1;
766 next:
767 			e++;
768 		}
769 	}
770 out:
771 	rcu_read_unlock();
772 
773 	cb->args[1] = e;
774 	cb->args[0] = h;
775 
776 	return skb->len;
777 }
778 
779 /* Prepare and feed intra-kernel routing request.
780  * Really, it should be netlink message, but :-( netlink
781  * can be not configured, so that we feed it directly
782  * to fib engine. It is legal, because all events occur
783  * only when netlink is already locked.
784  */
785 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
786 {
787 	struct net *net = dev_net(ifa->ifa_dev->dev);
788 	u32 tb_id = l3mdev_fib_table(ifa->ifa_dev->dev);
789 	struct fib_table *tb;
790 	struct fib_config cfg = {
791 		.fc_protocol = RTPROT_KERNEL,
792 		.fc_type = type,
793 		.fc_dst = dst,
794 		.fc_dst_len = dst_len,
795 		.fc_prefsrc = ifa->ifa_local,
796 		.fc_oif = ifa->ifa_dev->dev->ifindex,
797 		.fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
798 		.fc_nlinfo = {
799 			.nl_net = net,
800 		},
801 	};
802 
803 	if (!tb_id)
804 		tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
805 
806 	tb = fib_new_table(net, tb_id);
807 	if (!tb)
808 		return;
809 
810 	cfg.fc_table = tb->tb_id;
811 
812 	if (type != RTN_LOCAL)
813 		cfg.fc_scope = RT_SCOPE_LINK;
814 	else
815 		cfg.fc_scope = RT_SCOPE_HOST;
816 
817 	if (cmd == RTM_NEWROUTE)
818 		fib_table_insert(net, tb, &cfg);
819 	else
820 		fib_table_delete(net, tb, &cfg);
821 }
822 
823 void fib_add_ifaddr(struct in_ifaddr *ifa)
824 {
825 	struct in_device *in_dev = ifa->ifa_dev;
826 	struct net_device *dev = in_dev->dev;
827 	struct in_ifaddr *prim = ifa;
828 	__be32 mask = ifa->ifa_mask;
829 	__be32 addr = ifa->ifa_local;
830 	__be32 prefix = ifa->ifa_address & mask;
831 
832 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
833 		prim = inet_ifa_byprefix(in_dev, prefix, mask);
834 		if (!prim) {
835 			pr_warn("%s: bug: prim == NULL\n", __func__);
836 			return;
837 		}
838 	}
839 
840 	fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
841 
842 	if (!(dev->flags & IFF_UP))
843 		return;
844 
845 	/* Add broadcast address, if it is explicitly assigned. */
846 	if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
847 		fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
848 
849 	if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
850 	    (prefix != addr || ifa->ifa_prefixlen < 32)) {
851 		if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
852 			fib_magic(RTM_NEWROUTE,
853 				  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
854 				  prefix, ifa->ifa_prefixlen, prim);
855 
856 		/* Add network specific broadcasts, when it takes a sense */
857 		if (ifa->ifa_prefixlen < 31) {
858 			fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
859 			fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
860 				  32, prim);
861 		}
862 	}
863 }
864 
865 /* Delete primary or secondary address.
866  * Optionally, on secondary address promotion consider the addresses
867  * from subnet iprim as deleted, even if they are in device list.
868  * In this case the secondary ifa can be in device list.
869  */
870 void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
871 {
872 	struct in_device *in_dev = ifa->ifa_dev;
873 	struct net_device *dev = in_dev->dev;
874 	struct in_ifaddr *ifa1;
875 	struct in_ifaddr *prim = ifa, *prim1 = NULL;
876 	__be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
877 	__be32 any = ifa->ifa_address & ifa->ifa_mask;
878 #define LOCAL_OK	1
879 #define BRD_OK		2
880 #define BRD0_OK		4
881 #define BRD1_OK		8
882 	unsigned int ok = 0;
883 	int subnet = 0;		/* Primary network */
884 	int gone = 1;		/* Address is missing */
885 	int same_prefsrc = 0;	/* Another primary with same IP */
886 
887 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
888 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
889 		if (!prim) {
890 			/* if the device has been deleted, we don't perform
891 			 * address promotion
892 			 */
893 			if (!in_dev->dead)
894 				pr_warn("%s: bug: prim == NULL\n", __func__);
895 			return;
896 		}
897 		if (iprim && iprim != prim) {
898 			pr_warn("%s: bug: iprim != prim\n", __func__);
899 			return;
900 		}
901 	} else if (!ipv4_is_zeronet(any) &&
902 		   (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
903 		if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
904 			fib_magic(RTM_DELROUTE,
905 				  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
906 				  any, ifa->ifa_prefixlen, prim);
907 		subnet = 1;
908 	}
909 
910 	if (in_dev->dead)
911 		goto no_promotions;
912 
913 	/* Deletion is more complicated than add.
914 	 * We should take care of not to delete too much :-)
915 	 *
916 	 * Scan address list to be sure that addresses are really gone.
917 	 */
918 
919 	for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
920 		if (ifa1 == ifa) {
921 			/* promotion, keep the IP */
922 			gone = 0;
923 			continue;
924 		}
925 		/* Ignore IFAs from our subnet */
926 		if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
927 		    inet_ifa_match(ifa1->ifa_address, iprim))
928 			continue;
929 
930 		/* Ignore ifa1 if it uses different primary IP (prefsrc) */
931 		if (ifa1->ifa_flags & IFA_F_SECONDARY) {
932 			/* Another address from our subnet? */
933 			if (ifa1->ifa_mask == prim->ifa_mask &&
934 			    inet_ifa_match(ifa1->ifa_address, prim))
935 				prim1 = prim;
936 			else {
937 				/* We reached the secondaries, so
938 				 * same_prefsrc should be determined.
939 				 */
940 				if (!same_prefsrc)
941 					continue;
942 				/* Search new prim1 if ifa1 is not
943 				 * using the current prim1
944 				 */
945 				if (!prim1 ||
946 				    ifa1->ifa_mask != prim1->ifa_mask ||
947 				    !inet_ifa_match(ifa1->ifa_address, prim1))
948 					prim1 = inet_ifa_byprefix(in_dev,
949 							ifa1->ifa_address,
950 							ifa1->ifa_mask);
951 				if (!prim1)
952 					continue;
953 				if (prim1->ifa_local != prim->ifa_local)
954 					continue;
955 			}
956 		} else {
957 			if (prim->ifa_local != ifa1->ifa_local)
958 				continue;
959 			prim1 = ifa1;
960 			if (prim != prim1)
961 				same_prefsrc = 1;
962 		}
963 		if (ifa->ifa_local == ifa1->ifa_local)
964 			ok |= LOCAL_OK;
965 		if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
966 			ok |= BRD_OK;
967 		if (brd == ifa1->ifa_broadcast)
968 			ok |= BRD1_OK;
969 		if (any == ifa1->ifa_broadcast)
970 			ok |= BRD0_OK;
971 		/* primary has network specific broadcasts */
972 		if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
973 			__be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
974 			__be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
975 
976 			if (!ipv4_is_zeronet(any1)) {
977 				if (ifa->ifa_broadcast == brd1 ||
978 				    ifa->ifa_broadcast == any1)
979 					ok |= BRD_OK;
980 				if (brd == brd1 || brd == any1)
981 					ok |= BRD1_OK;
982 				if (any == brd1 || any == any1)
983 					ok |= BRD0_OK;
984 			}
985 		}
986 	}
987 
988 no_promotions:
989 	if (!(ok & BRD_OK))
990 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
991 	if (subnet && ifa->ifa_prefixlen < 31) {
992 		if (!(ok & BRD1_OK))
993 			fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
994 		if (!(ok & BRD0_OK))
995 			fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
996 	}
997 	if (!(ok & LOCAL_OK)) {
998 		unsigned int addr_type;
999 
1000 		fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
1001 
1002 		/* Check, that this local address finally disappeared. */
1003 		addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
1004 						     ifa->ifa_local);
1005 		if (gone && addr_type != RTN_LOCAL) {
1006 			/* And the last, but not the least thing.
1007 			 * We must flush stray FIB entries.
1008 			 *
1009 			 * First of all, we scan fib_info list searching
1010 			 * for stray nexthop entries, then ignite fib_flush.
1011 			 */
1012 			if (fib_sync_down_addr(dev, ifa->ifa_local))
1013 				fib_flush(dev_net(dev));
1014 		}
1015 	}
1016 #undef LOCAL_OK
1017 #undef BRD_OK
1018 #undef BRD0_OK
1019 #undef BRD1_OK
1020 }
1021 
1022 static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
1023 {
1024 
1025 	struct fib_result       res;
1026 	struct flowi4           fl4 = {
1027 		.flowi4_mark = frn->fl_mark,
1028 		.daddr = frn->fl_addr,
1029 		.flowi4_tos = frn->fl_tos,
1030 		.flowi4_scope = frn->fl_scope,
1031 	};
1032 	struct fib_table *tb;
1033 
1034 	rcu_read_lock();
1035 
1036 	tb = fib_get_table(net, frn->tb_id_in);
1037 
1038 	frn->err = -ENOENT;
1039 	if (tb) {
1040 		local_bh_disable();
1041 
1042 		frn->tb_id = tb->tb_id;
1043 		frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
1044 
1045 		if (!frn->err) {
1046 			frn->prefixlen = res.prefixlen;
1047 			frn->nh_sel = res.nh_sel;
1048 			frn->type = res.type;
1049 			frn->scope = res.scope;
1050 		}
1051 		local_bh_enable();
1052 	}
1053 
1054 	rcu_read_unlock();
1055 }
1056 
1057 static void nl_fib_input(struct sk_buff *skb)
1058 {
1059 	struct net *net;
1060 	struct fib_result_nl *frn;
1061 	struct nlmsghdr *nlh;
1062 	u32 portid;
1063 
1064 	net = sock_net(skb->sk);
1065 	nlh = nlmsg_hdr(skb);
1066 	if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
1067 	    nlmsg_len(nlh) < sizeof(*frn))
1068 		return;
1069 
1070 	skb = netlink_skb_clone(skb, GFP_KERNEL);
1071 	if (!skb)
1072 		return;
1073 	nlh = nlmsg_hdr(skb);
1074 
1075 	frn = (struct fib_result_nl *) nlmsg_data(nlh);
1076 	nl_fib_lookup(net, frn);
1077 
1078 	portid = NETLINK_CB(skb).portid;      /* netlink portid */
1079 	NETLINK_CB(skb).portid = 0;        /* from kernel */
1080 	NETLINK_CB(skb).dst_group = 0;  /* unicast */
1081 	netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
1082 }
1083 
1084 static int __net_init nl_fib_lookup_init(struct net *net)
1085 {
1086 	struct sock *sk;
1087 	struct netlink_kernel_cfg cfg = {
1088 		.input	= nl_fib_input,
1089 	};
1090 
1091 	sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
1092 	if (!sk)
1093 		return -EAFNOSUPPORT;
1094 	net->ipv4.fibnl = sk;
1095 	return 0;
1096 }
1097 
1098 static void nl_fib_lookup_exit(struct net *net)
1099 {
1100 	netlink_kernel_release(net->ipv4.fibnl);
1101 	net->ipv4.fibnl = NULL;
1102 }
1103 
1104 static void fib_disable_ip(struct net_device *dev, unsigned long event,
1105 			   bool force)
1106 {
1107 	if (fib_sync_down_dev(dev, event, force))
1108 		fib_flush(dev_net(dev));
1109 	rt_cache_flush(dev_net(dev));
1110 	arp_ifdown(dev);
1111 }
1112 
1113 static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
1114 {
1115 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1116 	struct net_device *dev = ifa->ifa_dev->dev;
1117 	struct net *net = dev_net(dev);
1118 
1119 	switch (event) {
1120 	case NETDEV_UP:
1121 		fib_add_ifaddr(ifa);
1122 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1123 		fib_sync_up(dev, RTNH_F_DEAD);
1124 #endif
1125 		atomic_inc(&net->ipv4.dev_addr_genid);
1126 		rt_cache_flush(dev_net(dev));
1127 		break;
1128 	case NETDEV_DOWN:
1129 		fib_del_ifaddr(ifa, NULL);
1130 		atomic_inc(&net->ipv4.dev_addr_genid);
1131 		if (!ifa->ifa_dev->ifa_list) {
1132 			/* Last address was deleted from this interface.
1133 			 * Disable IP.
1134 			 */
1135 			fib_disable_ip(dev, event, true);
1136 		} else {
1137 			rt_cache_flush(dev_net(dev));
1138 		}
1139 		break;
1140 	}
1141 	return NOTIFY_DONE;
1142 }
1143 
1144 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1145 {
1146 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1147 	struct netdev_notifier_changeupper_info *info;
1148 	struct in_device *in_dev;
1149 	struct net *net = dev_net(dev);
1150 	unsigned int flags;
1151 
1152 	if (event == NETDEV_UNREGISTER) {
1153 		fib_disable_ip(dev, event, true);
1154 		rt_flush_dev(dev);
1155 		return NOTIFY_DONE;
1156 	}
1157 
1158 	in_dev = __in_dev_get_rtnl(dev);
1159 	if (!in_dev)
1160 		return NOTIFY_DONE;
1161 
1162 	switch (event) {
1163 	case NETDEV_UP:
1164 		for_ifa(in_dev) {
1165 			fib_add_ifaddr(ifa);
1166 		} endfor_ifa(in_dev);
1167 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1168 		fib_sync_up(dev, RTNH_F_DEAD);
1169 #endif
1170 		atomic_inc(&net->ipv4.dev_addr_genid);
1171 		rt_cache_flush(net);
1172 		break;
1173 	case NETDEV_DOWN:
1174 		fib_disable_ip(dev, event, false);
1175 		break;
1176 	case NETDEV_CHANGE:
1177 		flags = dev_get_flags(dev);
1178 		if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1179 			fib_sync_up(dev, RTNH_F_LINKDOWN);
1180 		else
1181 			fib_sync_down_dev(dev, event, false);
1182 		/* fall through */
1183 	case NETDEV_CHANGEMTU:
1184 		rt_cache_flush(net);
1185 		break;
1186 	case NETDEV_CHANGEUPPER:
1187 		info = ptr;
1188 		/* flush all routes if dev is linked to or unlinked from
1189 		 * an L3 master device (e.g., VRF)
1190 		 */
1191 		if (info->upper_dev && netif_is_l3_master(info->upper_dev))
1192 			fib_disable_ip(dev, NETDEV_DOWN, true);
1193 		break;
1194 	}
1195 	return NOTIFY_DONE;
1196 }
1197 
1198 static struct notifier_block fib_inetaddr_notifier = {
1199 	.notifier_call = fib_inetaddr_event,
1200 };
1201 
1202 static struct notifier_block fib_netdev_notifier = {
1203 	.notifier_call = fib_netdev_event,
1204 };
1205 
1206 static int __net_init ip_fib_net_init(struct net *net)
1207 {
1208 	int err;
1209 	size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
1210 
1211 	/* Avoid false sharing : Use at least a full cache line */
1212 	size = max_t(size_t, size, L1_CACHE_BYTES);
1213 
1214 	net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
1215 	if (!net->ipv4.fib_table_hash)
1216 		return -ENOMEM;
1217 
1218 	err = fib4_rules_init(net);
1219 	if (err < 0)
1220 		goto fail;
1221 	return 0;
1222 
1223 fail:
1224 	kfree(net->ipv4.fib_table_hash);
1225 	return err;
1226 }
1227 
1228 static void ip_fib_net_exit(struct net *net)
1229 {
1230 	unsigned int i;
1231 
1232 	rtnl_lock();
1233 #ifdef CONFIG_IP_MULTIPLE_TABLES
1234 	RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
1235 	RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
1236 #endif
1237 	for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1238 		struct hlist_head *head = &net->ipv4.fib_table_hash[i];
1239 		struct hlist_node *tmp;
1240 		struct fib_table *tb;
1241 
1242 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1243 			hlist_del(&tb->tb_hlist);
1244 			fib_table_flush(net, tb);
1245 			fib_free_table(tb);
1246 		}
1247 	}
1248 
1249 #ifdef CONFIG_IP_MULTIPLE_TABLES
1250 	fib4_rules_exit(net);
1251 #endif
1252 	rtnl_unlock();
1253 	kfree(net->ipv4.fib_table_hash);
1254 }
1255 
1256 static int __net_init fib_net_init(struct net *net)
1257 {
1258 	int error;
1259 
1260 #ifdef CONFIG_IP_ROUTE_CLASSID
1261 	net->ipv4.fib_num_tclassid_users = 0;
1262 #endif
1263 	error = ip_fib_net_init(net);
1264 	if (error < 0)
1265 		goto out;
1266 	error = nl_fib_lookup_init(net);
1267 	if (error < 0)
1268 		goto out_nlfl;
1269 	error = fib_proc_init(net);
1270 	if (error < 0)
1271 		goto out_proc;
1272 out:
1273 	return error;
1274 
1275 out_proc:
1276 	nl_fib_lookup_exit(net);
1277 out_nlfl:
1278 	ip_fib_net_exit(net);
1279 	goto out;
1280 }
1281 
1282 static void __net_exit fib_net_exit(struct net *net)
1283 {
1284 	fib_proc_exit(net);
1285 	nl_fib_lookup_exit(net);
1286 	ip_fib_net_exit(net);
1287 }
1288 
1289 static struct pernet_operations fib_net_ops = {
1290 	.init = fib_net_init,
1291 	.exit = fib_net_exit,
1292 };
1293 
1294 void __init ip_fib_init(void)
1295 {
1296 	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
1297 	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1298 	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1299 
1300 	register_pernet_subsys(&fib_net_ops);
1301 	register_netdevice_notifier(&fib_netdev_notifier);
1302 	register_inetaddr_notifier(&fib_inetaddr_notifier);
1303 
1304 	fib_trie_init();
1305 }
1306