xref: /openbmc/linux/net/ipv4/fib_frontend.c (revision 4800cd83)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		IPv4 Forwarding Information Base: FIB frontend.
7  *
8  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9  *
10  *		This program is free software; you can redistribute it and/or
11  *		modify it under the terms of the GNU General Public License
12  *		as published by the Free Software Foundation; either version
13  *		2 of the License, or (at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <linux/bitops.h>
20 #include <linux/capability.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/string.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/errno.h>
28 #include <linux/in.h>
29 #include <linux/inet.h>
30 #include <linux/inetdevice.h>
31 #include <linux/netdevice.h>
32 #include <linux/if_addr.h>
33 #include <linux/if_arp.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 
39 #include <net/ip.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
42 #include <net/tcp.h>
43 #include <net/sock.h>
44 #include <net/arp.h>
45 #include <net/ip_fib.h>
46 #include <net/rtnetlink.h>
47 
48 #ifndef CONFIG_IP_MULTIPLE_TABLES
49 
50 static int __net_init fib4_rules_init(struct net *net)
51 {
52 	struct fib_table *local_table, *main_table;
53 
54 	local_table = fib_hash_table(RT_TABLE_LOCAL);
55 	if (local_table == NULL)
56 		return -ENOMEM;
57 
58 	main_table  = fib_hash_table(RT_TABLE_MAIN);
59 	if (main_table == NULL)
60 		goto fail;
61 
62 	hlist_add_head_rcu(&local_table->tb_hlist,
63 				&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
64 	hlist_add_head_rcu(&main_table->tb_hlist,
65 				&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
66 	return 0;
67 
68 fail:
69 	kfree(local_table);
70 	return -ENOMEM;
71 }
72 #else
73 
74 struct fib_table *fib_new_table(struct net *net, u32 id)
75 {
76 	struct fib_table *tb;
77 	unsigned int h;
78 
79 	if (id == 0)
80 		id = RT_TABLE_MAIN;
81 	tb = fib_get_table(net, id);
82 	if (tb)
83 		return tb;
84 
85 	tb = fib_hash_table(id);
86 	if (!tb)
87 		return NULL;
88 	h = id & (FIB_TABLE_HASHSZ - 1);
89 	hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
90 	return tb;
91 }
92 
93 struct fib_table *fib_get_table(struct net *net, u32 id)
94 {
95 	struct fib_table *tb;
96 	struct hlist_node *node;
97 	struct hlist_head *head;
98 	unsigned int h;
99 
100 	if (id == 0)
101 		id = RT_TABLE_MAIN;
102 	h = id & (FIB_TABLE_HASHSZ - 1);
103 
104 	rcu_read_lock();
105 	head = &net->ipv4.fib_table_hash[h];
106 	hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
107 		if (tb->tb_id == id) {
108 			rcu_read_unlock();
109 			return tb;
110 		}
111 	}
112 	rcu_read_unlock();
113 	return NULL;
114 }
115 #endif /* CONFIG_IP_MULTIPLE_TABLES */
116 
117 void fib_select_default(struct net *net,
118 			const struct flowi *flp, struct fib_result *res)
119 {
120 	struct fib_table *tb;
121 	int table = RT_TABLE_MAIN;
122 #ifdef CONFIG_IP_MULTIPLE_TABLES
123 	if (res->r == NULL || res->r->action != FR_ACT_TO_TBL)
124 		return;
125 	table = res->r->table;
126 #endif
127 	tb = fib_get_table(net, table);
128 	if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
129 		fib_table_select_default(tb, flp, res);
130 }
131 
132 static void fib_flush(struct net *net)
133 {
134 	int flushed = 0;
135 	struct fib_table *tb;
136 	struct hlist_node *node;
137 	struct hlist_head *head;
138 	unsigned int h;
139 
140 	for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
141 		head = &net->ipv4.fib_table_hash[h];
142 		hlist_for_each_entry(tb, node, head, tb_hlist)
143 			flushed += fib_table_flush(tb);
144 	}
145 
146 	if (flushed)
147 		rt_cache_flush(net, -1);
148 }
149 
150 /**
151  * __ip_dev_find - find the first device with a given source address.
152  * @net: the net namespace
153  * @addr: the source address
154  * @devref: if true, take a reference on the found device
155  *
156  * If a caller uses devref=false, it should be protected by RCU, or RTNL
157  */
158 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
159 {
160 	struct flowi fl = {
161 		.fl4_dst = addr,
162 	};
163 	struct fib_result res = { 0 };
164 	struct net_device *dev = NULL;
165 	struct fib_table *local_table;
166 
167 #ifdef CONFIG_IP_MULTIPLE_TABLES
168 	res.r = NULL;
169 #endif
170 
171 	rcu_read_lock();
172 	local_table = fib_get_table(net, RT_TABLE_LOCAL);
173 	if (!local_table ||
174 	    fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
175 		rcu_read_unlock();
176 		return NULL;
177 	}
178 	if (res.type != RTN_LOCAL)
179 		goto out;
180 	dev = FIB_RES_DEV(res);
181 
182 	if (dev && devref)
183 		dev_hold(dev);
184 out:
185 	rcu_read_unlock();
186 	return dev;
187 }
188 EXPORT_SYMBOL(__ip_dev_find);
189 
190 /*
191  * Find address type as if only "dev" was present in the system. If
192  * on_dev is NULL then all interfaces are taken into consideration.
193  */
194 static inline unsigned __inet_dev_addr_type(struct net *net,
195 					    const struct net_device *dev,
196 					    __be32 addr)
197 {
198 	struct flowi		fl = { .fl4_dst = addr };
199 	struct fib_result	res;
200 	unsigned ret = RTN_BROADCAST;
201 	struct fib_table *local_table;
202 
203 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
204 		return RTN_BROADCAST;
205 	if (ipv4_is_multicast(addr))
206 		return RTN_MULTICAST;
207 
208 #ifdef CONFIG_IP_MULTIPLE_TABLES
209 	res.r = NULL;
210 #endif
211 
212 	local_table = fib_get_table(net, RT_TABLE_LOCAL);
213 	if (local_table) {
214 		ret = RTN_UNICAST;
215 		rcu_read_lock();
216 		if (!fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
217 			if (!dev || dev == res.fi->fib_dev)
218 				ret = res.type;
219 		}
220 		rcu_read_unlock();
221 	}
222 	return ret;
223 }
224 
225 unsigned int inet_addr_type(struct net *net, __be32 addr)
226 {
227 	return __inet_dev_addr_type(net, NULL, addr);
228 }
229 EXPORT_SYMBOL(inet_addr_type);
230 
231 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
232 				__be32 addr)
233 {
234 	return __inet_dev_addr_type(net, dev, addr);
235 }
236 EXPORT_SYMBOL(inet_dev_addr_type);
237 
238 /* Given (packet source, input interface) and optional (dst, oif, tos):
239  * - (main) check, that source is valid i.e. not broadcast or our local
240  *   address.
241  * - figure out what "logical" interface this packet arrived
242  *   and calculate "specific destination" address.
243  * - check, that packet arrived from expected physical interface.
244  * called with rcu_read_lock()
245  */
246 int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
247 			struct net_device *dev, __be32 *spec_dst,
248 			u32 *itag, u32 mark)
249 {
250 	struct in_device *in_dev;
251 	struct flowi fl = {
252 		.fl4_dst = src,
253 		.fl4_src = dst,
254 		.fl4_tos = tos,
255 		.mark = mark,
256 		.iif = oif
257 	};
258 	struct fib_result res;
259 	int no_addr, rpf, accept_local;
260 	bool dev_match;
261 	int ret;
262 	struct net *net;
263 
264 	no_addr = rpf = accept_local = 0;
265 	in_dev = __in_dev_get_rcu(dev);
266 	if (in_dev) {
267 		no_addr = in_dev->ifa_list == NULL;
268 		rpf = IN_DEV_RPFILTER(in_dev);
269 		accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
270 		if (mark && !IN_DEV_SRC_VMARK(in_dev))
271 			fl.mark = 0;
272 	}
273 
274 	if (in_dev == NULL)
275 		goto e_inval;
276 
277 	net = dev_net(dev);
278 	if (fib_lookup(net, &fl, &res))
279 		goto last_resort;
280 	if (res.type != RTN_UNICAST) {
281 		if (res.type != RTN_LOCAL || !accept_local)
282 			goto e_inval;
283 	}
284 	*spec_dst = FIB_RES_PREFSRC(res);
285 	fib_combine_itag(itag, &res);
286 	dev_match = false;
287 
288 #ifdef CONFIG_IP_ROUTE_MULTIPATH
289 	for (ret = 0; ret < res.fi->fib_nhs; ret++) {
290 		struct fib_nh *nh = &res.fi->fib_nh[ret];
291 
292 		if (nh->nh_dev == dev) {
293 			dev_match = true;
294 			break;
295 		}
296 	}
297 #else
298 	if (FIB_RES_DEV(res) == dev)
299 		dev_match = true;
300 #endif
301 	if (dev_match) {
302 		ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
303 		return ret;
304 	}
305 	if (no_addr)
306 		goto last_resort;
307 	if (rpf == 1)
308 		goto e_rpf;
309 	fl.oif = dev->ifindex;
310 
311 	ret = 0;
312 	if (fib_lookup(net, &fl, &res) == 0) {
313 		if (res.type == RTN_UNICAST) {
314 			*spec_dst = FIB_RES_PREFSRC(res);
315 			ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
316 		}
317 	}
318 	return ret;
319 
320 last_resort:
321 	if (rpf)
322 		goto e_rpf;
323 	*spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
324 	*itag = 0;
325 	return 0;
326 
327 e_inval:
328 	return -EINVAL;
329 e_rpf:
330 	return -EXDEV;
331 }
332 
333 static inline __be32 sk_extract_addr(struct sockaddr *addr)
334 {
335 	return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
336 }
337 
338 static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
339 {
340 	struct nlattr *nla;
341 
342 	nla = (struct nlattr *) ((char *) mx + len);
343 	nla->nla_type = type;
344 	nla->nla_len = nla_attr_size(4);
345 	*(u32 *) nla_data(nla) = value;
346 
347 	return len + nla_total_size(4);
348 }
349 
350 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
351 				 struct fib_config *cfg)
352 {
353 	__be32 addr;
354 	int plen;
355 
356 	memset(cfg, 0, sizeof(*cfg));
357 	cfg->fc_nlinfo.nl_net = net;
358 
359 	if (rt->rt_dst.sa_family != AF_INET)
360 		return -EAFNOSUPPORT;
361 
362 	/*
363 	 * Check mask for validity:
364 	 * a) it must be contiguous.
365 	 * b) destination must have all host bits clear.
366 	 * c) if application forgot to set correct family (AF_INET),
367 	 *    reject request unless it is absolutely clear i.e.
368 	 *    both family and mask are zero.
369 	 */
370 	plen = 32;
371 	addr = sk_extract_addr(&rt->rt_dst);
372 	if (!(rt->rt_flags & RTF_HOST)) {
373 		__be32 mask = sk_extract_addr(&rt->rt_genmask);
374 
375 		if (rt->rt_genmask.sa_family != AF_INET) {
376 			if (mask || rt->rt_genmask.sa_family)
377 				return -EAFNOSUPPORT;
378 		}
379 
380 		if (bad_mask(mask, addr))
381 			return -EINVAL;
382 
383 		plen = inet_mask_len(mask);
384 	}
385 
386 	cfg->fc_dst_len = plen;
387 	cfg->fc_dst = addr;
388 
389 	if (cmd != SIOCDELRT) {
390 		cfg->fc_nlflags = NLM_F_CREATE;
391 		cfg->fc_protocol = RTPROT_BOOT;
392 	}
393 
394 	if (rt->rt_metric)
395 		cfg->fc_priority = rt->rt_metric - 1;
396 
397 	if (rt->rt_flags & RTF_REJECT) {
398 		cfg->fc_scope = RT_SCOPE_HOST;
399 		cfg->fc_type = RTN_UNREACHABLE;
400 		return 0;
401 	}
402 
403 	cfg->fc_scope = RT_SCOPE_NOWHERE;
404 	cfg->fc_type = RTN_UNICAST;
405 
406 	if (rt->rt_dev) {
407 		char *colon;
408 		struct net_device *dev;
409 		char devname[IFNAMSIZ];
410 
411 		if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
412 			return -EFAULT;
413 
414 		devname[IFNAMSIZ-1] = 0;
415 		colon = strchr(devname, ':');
416 		if (colon)
417 			*colon = 0;
418 		dev = __dev_get_by_name(net, devname);
419 		if (!dev)
420 			return -ENODEV;
421 		cfg->fc_oif = dev->ifindex;
422 		if (colon) {
423 			struct in_ifaddr *ifa;
424 			struct in_device *in_dev = __in_dev_get_rtnl(dev);
425 			if (!in_dev)
426 				return -ENODEV;
427 			*colon = ':';
428 			for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
429 				if (strcmp(ifa->ifa_label, devname) == 0)
430 					break;
431 			if (ifa == NULL)
432 				return -ENODEV;
433 			cfg->fc_prefsrc = ifa->ifa_local;
434 		}
435 	}
436 
437 	addr = sk_extract_addr(&rt->rt_gateway);
438 	if (rt->rt_gateway.sa_family == AF_INET && addr) {
439 		cfg->fc_gw = addr;
440 		if (rt->rt_flags & RTF_GATEWAY &&
441 		    inet_addr_type(net, addr) == RTN_UNICAST)
442 			cfg->fc_scope = RT_SCOPE_UNIVERSE;
443 	}
444 
445 	if (cmd == SIOCDELRT)
446 		return 0;
447 
448 	if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
449 		return -EINVAL;
450 
451 	if (cfg->fc_scope == RT_SCOPE_NOWHERE)
452 		cfg->fc_scope = RT_SCOPE_LINK;
453 
454 	if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
455 		struct nlattr *mx;
456 		int len = 0;
457 
458 		mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
459 		if (mx == NULL)
460 			return -ENOMEM;
461 
462 		if (rt->rt_flags & RTF_MTU)
463 			len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
464 
465 		if (rt->rt_flags & RTF_WINDOW)
466 			len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
467 
468 		if (rt->rt_flags & RTF_IRTT)
469 			len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
470 
471 		cfg->fc_mx = mx;
472 		cfg->fc_mx_len = len;
473 	}
474 
475 	return 0;
476 }
477 
478 /*
479  * Handle IP routing ioctl calls.
480  * These are used to manipulate the routing tables
481  */
482 int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
483 {
484 	struct fib_config cfg;
485 	struct rtentry rt;
486 	int err;
487 
488 	switch (cmd) {
489 	case SIOCADDRT:		/* Add a route */
490 	case SIOCDELRT:		/* Delete a route */
491 		if (!capable(CAP_NET_ADMIN))
492 			return -EPERM;
493 
494 		if (copy_from_user(&rt, arg, sizeof(rt)))
495 			return -EFAULT;
496 
497 		rtnl_lock();
498 		err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
499 		if (err == 0) {
500 			struct fib_table *tb;
501 
502 			if (cmd == SIOCDELRT) {
503 				tb = fib_get_table(net, cfg.fc_table);
504 				if (tb)
505 					err = fib_table_delete(tb, &cfg);
506 				else
507 					err = -ESRCH;
508 			} else {
509 				tb = fib_new_table(net, cfg.fc_table);
510 				if (tb)
511 					err = fib_table_insert(tb, &cfg);
512 				else
513 					err = -ENOBUFS;
514 			}
515 
516 			/* allocated by rtentry_to_fib_config() */
517 			kfree(cfg.fc_mx);
518 		}
519 		rtnl_unlock();
520 		return err;
521 	}
522 	return -EINVAL;
523 }
524 
525 const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
526 	[RTA_DST]		= { .type = NLA_U32 },
527 	[RTA_SRC]		= { .type = NLA_U32 },
528 	[RTA_IIF]		= { .type = NLA_U32 },
529 	[RTA_OIF]		= { .type = NLA_U32 },
530 	[RTA_GATEWAY]		= { .type = NLA_U32 },
531 	[RTA_PRIORITY]		= { .type = NLA_U32 },
532 	[RTA_PREFSRC]		= { .type = NLA_U32 },
533 	[RTA_METRICS]		= { .type = NLA_NESTED },
534 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
535 	[RTA_FLOW]		= { .type = NLA_U32 },
536 };
537 
538 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
539 			     struct nlmsghdr *nlh, struct fib_config *cfg)
540 {
541 	struct nlattr *attr;
542 	int err, remaining;
543 	struct rtmsg *rtm;
544 
545 	err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
546 	if (err < 0)
547 		goto errout;
548 
549 	memset(cfg, 0, sizeof(*cfg));
550 
551 	rtm = nlmsg_data(nlh);
552 	cfg->fc_dst_len = rtm->rtm_dst_len;
553 	cfg->fc_tos = rtm->rtm_tos;
554 	cfg->fc_table = rtm->rtm_table;
555 	cfg->fc_protocol = rtm->rtm_protocol;
556 	cfg->fc_scope = rtm->rtm_scope;
557 	cfg->fc_type = rtm->rtm_type;
558 	cfg->fc_flags = rtm->rtm_flags;
559 	cfg->fc_nlflags = nlh->nlmsg_flags;
560 
561 	cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
562 	cfg->fc_nlinfo.nlh = nlh;
563 	cfg->fc_nlinfo.nl_net = net;
564 
565 	if (cfg->fc_type > RTN_MAX) {
566 		err = -EINVAL;
567 		goto errout;
568 	}
569 
570 	nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
571 		switch (nla_type(attr)) {
572 		case RTA_DST:
573 			cfg->fc_dst = nla_get_be32(attr);
574 			break;
575 		case RTA_OIF:
576 			cfg->fc_oif = nla_get_u32(attr);
577 			break;
578 		case RTA_GATEWAY:
579 			cfg->fc_gw = nla_get_be32(attr);
580 			break;
581 		case RTA_PRIORITY:
582 			cfg->fc_priority = nla_get_u32(attr);
583 			break;
584 		case RTA_PREFSRC:
585 			cfg->fc_prefsrc = nla_get_be32(attr);
586 			break;
587 		case RTA_METRICS:
588 			cfg->fc_mx = nla_data(attr);
589 			cfg->fc_mx_len = nla_len(attr);
590 			break;
591 		case RTA_MULTIPATH:
592 			cfg->fc_mp = nla_data(attr);
593 			cfg->fc_mp_len = nla_len(attr);
594 			break;
595 		case RTA_FLOW:
596 			cfg->fc_flow = nla_get_u32(attr);
597 			break;
598 		case RTA_TABLE:
599 			cfg->fc_table = nla_get_u32(attr);
600 			break;
601 		}
602 	}
603 
604 	return 0;
605 errout:
606 	return err;
607 }
608 
609 static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
610 {
611 	struct net *net = sock_net(skb->sk);
612 	struct fib_config cfg;
613 	struct fib_table *tb;
614 	int err;
615 
616 	err = rtm_to_fib_config(net, skb, nlh, &cfg);
617 	if (err < 0)
618 		goto errout;
619 
620 	tb = fib_get_table(net, cfg.fc_table);
621 	if (tb == NULL) {
622 		err = -ESRCH;
623 		goto errout;
624 	}
625 
626 	err = fib_table_delete(tb, &cfg);
627 errout:
628 	return err;
629 }
630 
631 static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
632 {
633 	struct net *net = sock_net(skb->sk);
634 	struct fib_config cfg;
635 	struct fib_table *tb;
636 	int err;
637 
638 	err = rtm_to_fib_config(net, skb, nlh, &cfg);
639 	if (err < 0)
640 		goto errout;
641 
642 	tb = fib_new_table(net, cfg.fc_table);
643 	if (tb == NULL) {
644 		err = -ENOBUFS;
645 		goto errout;
646 	}
647 
648 	err = fib_table_insert(tb, &cfg);
649 errout:
650 	return err;
651 }
652 
653 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
654 {
655 	struct net *net = sock_net(skb->sk);
656 	unsigned int h, s_h;
657 	unsigned int e = 0, s_e;
658 	struct fib_table *tb;
659 	struct hlist_node *node;
660 	struct hlist_head *head;
661 	int dumped = 0;
662 
663 	if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
664 	    ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
665 		return ip_rt_dump(skb, cb);
666 
667 	s_h = cb->args[0];
668 	s_e = cb->args[1];
669 
670 	for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
671 		e = 0;
672 		head = &net->ipv4.fib_table_hash[h];
673 		hlist_for_each_entry(tb, node, head, tb_hlist) {
674 			if (e < s_e)
675 				goto next;
676 			if (dumped)
677 				memset(&cb->args[2], 0, sizeof(cb->args) -
678 						 2 * sizeof(cb->args[0]));
679 			if (fib_table_dump(tb, skb, cb) < 0)
680 				goto out;
681 			dumped = 1;
682 next:
683 			e++;
684 		}
685 	}
686 out:
687 	cb->args[1] = e;
688 	cb->args[0] = h;
689 
690 	return skb->len;
691 }
692 
693 /* Prepare and feed intra-kernel routing request.
694  * Really, it should be netlink message, but :-( netlink
695  * can be not configured, so that we feed it directly
696  * to fib engine. It is legal, because all events occur
697  * only when netlink is already locked.
698  */
699 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
700 {
701 	struct net *net = dev_net(ifa->ifa_dev->dev);
702 	struct fib_table *tb;
703 	struct fib_config cfg = {
704 		.fc_protocol = RTPROT_KERNEL,
705 		.fc_type = type,
706 		.fc_dst = dst,
707 		.fc_dst_len = dst_len,
708 		.fc_prefsrc = ifa->ifa_local,
709 		.fc_oif = ifa->ifa_dev->dev->ifindex,
710 		.fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
711 		.fc_nlinfo = {
712 			.nl_net = net,
713 		},
714 	};
715 
716 	if (type == RTN_UNICAST)
717 		tb = fib_new_table(net, RT_TABLE_MAIN);
718 	else
719 		tb = fib_new_table(net, RT_TABLE_LOCAL);
720 
721 	if (tb == NULL)
722 		return;
723 
724 	cfg.fc_table = tb->tb_id;
725 
726 	if (type != RTN_LOCAL)
727 		cfg.fc_scope = RT_SCOPE_LINK;
728 	else
729 		cfg.fc_scope = RT_SCOPE_HOST;
730 
731 	if (cmd == RTM_NEWROUTE)
732 		fib_table_insert(tb, &cfg);
733 	else
734 		fib_table_delete(tb, &cfg);
735 }
736 
737 void fib_add_ifaddr(struct in_ifaddr *ifa)
738 {
739 	struct in_device *in_dev = ifa->ifa_dev;
740 	struct net_device *dev = in_dev->dev;
741 	struct in_ifaddr *prim = ifa;
742 	__be32 mask = ifa->ifa_mask;
743 	__be32 addr = ifa->ifa_local;
744 	__be32 prefix = ifa->ifa_address & mask;
745 
746 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
747 		prim = inet_ifa_byprefix(in_dev, prefix, mask);
748 		if (prim == NULL) {
749 			printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n");
750 			return;
751 		}
752 	}
753 
754 	fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
755 
756 	if (!(dev->flags & IFF_UP))
757 		return;
758 
759 	/* Add broadcast address, if it is explicitly assigned. */
760 	if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
761 		fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
762 
763 	if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
764 	    (prefix != addr || ifa->ifa_prefixlen < 32)) {
765 		fib_magic(RTM_NEWROUTE,
766 			  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
767 			  prefix, ifa->ifa_prefixlen, prim);
768 
769 		/* Add network specific broadcasts, when it takes a sense */
770 		if (ifa->ifa_prefixlen < 31) {
771 			fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
772 			fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
773 				  32, prim);
774 		}
775 	}
776 }
777 
778 static void fib_del_ifaddr(struct in_ifaddr *ifa)
779 {
780 	struct in_device *in_dev = ifa->ifa_dev;
781 	struct net_device *dev = in_dev->dev;
782 	struct in_ifaddr *ifa1;
783 	struct in_ifaddr *prim = ifa;
784 	__be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
785 	__be32 any = ifa->ifa_address & ifa->ifa_mask;
786 #define LOCAL_OK	1
787 #define BRD_OK		2
788 #define BRD0_OK		4
789 #define BRD1_OK		8
790 	unsigned ok = 0;
791 
792 	if (!(ifa->ifa_flags & IFA_F_SECONDARY))
793 		fib_magic(RTM_DELROUTE,
794 			  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
795 			  any, ifa->ifa_prefixlen, prim);
796 	else {
797 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
798 		if (prim == NULL) {
799 			printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n");
800 			return;
801 		}
802 	}
803 
804 	/* Deletion is more complicated than add.
805 	 * We should take care of not to delete too much :-)
806 	 *
807 	 * Scan address list to be sure that addresses are really gone.
808 	 */
809 
810 	for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
811 		if (ifa->ifa_local == ifa1->ifa_local)
812 			ok |= LOCAL_OK;
813 		if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
814 			ok |= BRD_OK;
815 		if (brd == ifa1->ifa_broadcast)
816 			ok |= BRD1_OK;
817 		if (any == ifa1->ifa_broadcast)
818 			ok |= BRD0_OK;
819 	}
820 
821 	if (!(ok & BRD_OK))
822 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
823 	if (!(ok & BRD1_OK))
824 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
825 	if (!(ok & BRD0_OK))
826 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
827 	if (!(ok & LOCAL_OK)) {
828 		fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
829 
830 		/* Check, that this local address finally disappeared. */
831 		if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
832 			/* And the last, but not the least thing.
833 			 * We must flush stray FIB entries.
834 			 *
835 			 * First of all, we scan fib_info list searching
836 			 * for stray nexthop entries, then ignite fib_flush.
837 			 */
838 			if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
839 				fib_flush(dev_net(dev));
840 		}
841 	}
842 #undef LOCAL_OK
843 #undef BRD_OK
844 #undef BRD0_OK
845 #undef BRD1_OK
846 }
847 
848 static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
849 {
850 
851 	struct fib_result       res;
852 	struct flowi            fl = {
853 		.mark = frn->fl_mark,
854 		.fl4_dst = frn->fl_addr,
855 		.fl4_tos = frn->fl_tos,
856 		.fl4_scope = frn->fl_scope,
857 	};
858 
859 #ifdef CONFIG_IP_MULTIPLE_TABLES
860 	res.r = NULL;
861 #endif
862 
863 	frn->err = -ENOENT;
864 	if (tb) {
865 		local_bh_disable();
866 
867 		frn->tb_id = tb->tb_id;
868 		rcu_read_lock();
869 		frn->err = fib_table_lookup(tb, &fl, &res, FIB_LOOKUP_NOREF);
870 
871 		if (!frn->err) {
872 			frn->prefixlen = res.prefixlen;
873 			frn->nh_sel = res.nh_sel;
874 			frn->type = res.type;
875 			frn->scope = res.scope;
876 		}
877 		rcu_read_unlock();
878 		local_bh_enable();
879 	}
880 }
881 
882 static void nl_fib_input(struct sk_buff *skb)
883 {
884 	struct net *net;
885 	struct fib_result_nl *frn;
886 	struct nlmsghdr *nlh;
887 	struct fib_table *tb;
888 	u32 pid;
889 
890 	net = sock_net(skb->sk);
891 	nlh = nlmsg_hdr(skb);
892 	if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
893 	    nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn)))
894 		return;
895 
896 	skb = skb_clone(skb, GFP_KERNEL);
897 	if (skb == NULL)
898 		return;
899 	nlh = nlmsg_hdr(skb);
900 
901 	frn = (struct fib_result_nl *) NLMSG_DATA(nlh);
902 	tb = fib_get_table(net, frn->tb_id_in);
903 
904 	nl_fib_lookup(frn, tb);
905 
906 	pid = NETLINK_CB(skb).pid;      /* pid of sending process */
907 	NETLINK_CB(skb).pid = 0;        /* from kernel */
908 	NETLINK_CB(skb).dst_group = 0;  /* unicast */
909 	netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
910 }
911 
912 static int __net_init nl_fib_lookup_init(struct net *net)
913 {
914 	struct sock *sk;
915 	sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
916 				   nl_fib_input, NULL, THIS_MODULE);
917 	if (sk == NULL)
918 		return -EAFNOSUPPORT;
919 	net->ipv4.fibnl = sk;
920 	return 0;
921 }
922 
923 static void nl_fib_lookup_exit(struct net *net)
924 {
925 	netlink_kernel_release(net->ipv4.fibnl);
926 	net->ipv4.fibnl = NULL;
927 }
928 
929 static void fib_disable_ip(struct net_device *dev, int force, int delay)
930 {
931 	if (fib_sync_down_dev(dev, force))
932 		fib_flush(dev_net(dev));
933 	rt_cache_flush(dev_net(dev), delay);
934 	arp_ifdown(dev);
935 }
936 
937 static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
938 {
939 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
940 	struct net_device *dev = ifa->ifa_dev->dev;
941 
942 	switch (event) {
943 	case NETDEV_UP:
944 		fib_add_ifaddr(ifa);
945 #ifdef CONFIG_IP_ROUTE_MULTIPATH
946 		fib_sync_up(dev);
947 #endif
948 		rt_cache_flush(dev_net(dev), -1);
949 		break;
950 	case NETDEV_DOWN:
951 		fib_del_ifaddr(ifa);
952 		if (ifa->ifa_dev->ifa_list == NULL) {
953 			/* Last address was deleted from this interface.
954 			 * Disable IP.
955 			 */
956 			fib_disable_ip(dev, 1, 0);
957 		} else {
958 			rt_cache_flush(dev_net(dev), -1);
959 		}
960 		break;
961 	}
962 	return NOTIFY_DONE;
963 }
964 
965 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
966 {
967 	struct net_device *dev = ptr;
968 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
969 
970 	if (event == NETDEV_UNREGISTER) {
971 		fib_disable_ip(dev, 2, -1);
972 		return NOTIFY_DONE;
973 	}
974 
975 	if (!in_dev)
976 		return NOTIFY_DONE;
977 
978 	switch (event) {
979 	case NETDEV_UP:
980 		for_ifa(in_dev) {
981 			fib_add_ifaddr(ifa);
982 		} endfor_ifa(in_dev);
983 #ifdef CONFIG_IP_ROUTE_MULTIPATH
984 		fib_sync_up(dev);
985 #endif
986 		rt_cache_flush(dev_net(dev), -1);
987 		break;
988 	case NETDEV_DOWN:
989 		fib_disable_ip(dev, 0, 0);
990 		break;
991 	case NETDEV_CHANGEMTU:
992 	case NETDEV_CHANGE:
993 		rt_cache_flush(dev_net(dev), 0);
994 		break;
995 	case NETDEV_UNREGISTER_BATCH:
996 		/* The batch unregister is only called on the first
997 		 * device in the list of devices being unregistered.
998 		 * Therefore we should not pass dev_net(dev) in here.
999 		 */
1000 		rt_cache_flush_batch(NULL);
1001 		break;
1002 	}
1003 	return NOTIFY_DONE;
1004 }
1005 
1006 static struct notifier_block fib_inetaddr_notifier = {
1007 	.notifier_call = fib_inetaddr_event,
1008 };
1009 
1010 static struct notifier_block fib_netdev_notifier = {
1011 	.notifier_call = fib_netdev_event,
1012 };
1013 
1014 static int __net_init ip_fib_net_init(struct net *net)
1015 {
1016 	int err;
1017 	size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
1018 
1019 	/* Avoid false sharing : Use at least a full cache line */
1020 	size = max_t(size_t, size, L1_CACHE_BYTES);
1021 
1022 	net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
1023 	if (net->ipv4.fib_table_hash == NULL)
1024 		return -ENOMEM;
1025 
1026 	err = fib4_rules_init(net);
1027 	if (err < 0)
1028 		goto fail;
1029 	return 0;
1030 
1031 fail:
1032 	kfree(net->ipv4.fib_table_hash);
1033 	return err;
1034 }
1035 
1036 static void ip_fib_net_exit(struct net *net)
1037 {
1038 	unsigned int i;
1039 
1040 #ifdef CONFIG_IP_MULTIPLE_TABLES
1041 	fib4_rules_exit(net);
1042 #endif
1043 
1044 	for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1045 		struct fib_table *tb;
1046 		struct hlist_head *head;
1047 		struct hlist_node *node, *tmp;
1048 
1049 		head = &net->ipv4.fib_table_hash[i];
1050 		hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
1051 			hlist_del(node);
1052 			fib_table_flush(tb);
1053 			fib_free_table(tb);
1054 		}
1055 	}
1056 	kfree(net->ipv4.fib_table_hash);
1057 }
1058 
1059 static int __net_init fib_net_init(struct net *net)
1060 {
1061 	int error;
1062 
1063 	error = ip_fib_net_init(net);
1064 	if (error < 0)
1065 		goto out;
1066 	error = nl_fib_lookup_init(net);
1067 	if (error < 0)
1068 		goto out_nlfl;
1069 	error = fib_proc_init(net);
1070 	if (error < 0)
1071 		goto out_proc;
1072 out:
1073 	return error;
1074 
1075 out_proc:
1076 	nl_fib_lookup_exit(net);
1077 out_nlfl:
1078 	ip_fib_net_exit(net);
1079 	goto out;
1080 }
1081 
1082 static void __net_exit fib_net_exit(struct net *net)
1083 {
1084 	fib_proc_exit(net);
1085 	nl_fib_lookup_exit(net);
1086 	ip_fib_net_exit(net);
1087 }
1088 
1089 static struct pernet_operations fib_net_ops = {
1090 	.init = fib_net_init,
1091 	.exit = fib_net_exit,
1092 };
1093 
1094 void __init ip_fib_init(void)
1095 {
1096 	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL);
1097 	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL);
1098 	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib);
1099 
1100 	register_pernet_subsys(&fib_net_ops);
1101 	register_netdevice_notifier(&fib_netdev_notifier);
1102 	register_inetaddr_notifier(&fib_inetaddr_notifier);
1103 
1104 	fib_hash_init();
1105 }
1106