xref: /openbmc/linux/net/ipv4/ipmr.c (revision f7d84fa7)
1 /*
2  *	IP multicast routing support for mrouted 3.6/3.8
3  *
4  *		(c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *	  Linux Consultancy and Custom Driver Development
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  *
12  *	Fixes:
13  *	Michael Chastain	:	Incorrect size of copying.
14  *	Alan Cox		:	Added the cache manager code
15  *	Alan Cox		:	Fixed the clone/copy bug and device race.
16  *	Mike McLagan		:	Routing by source
17  *	Malcolm Beattie		:	Buffer handling fixes.
18  *	Alexey Kuznetsov	:	Double buffer free and other fixes.
19  *	SVR Anand		:	Fixed several multicast bugs and problems.
20  *	Alexey Kuznetsov	:	Status, optimisations and more.
21  *	Brad Parker		:	Better behaviour on mrouted upcall
22  *					overflow.
23  *      Carlos Picoto           :       PIMv1 Support
24  *	Pavlin Ivanov Radoslavov:	PIMv2 Registers must checksum only PIM header
25  *					Relax this requirement to work with older peers.
26  *
27  */
28 
29 #include <linux/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/mm.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
39 #include <linux/in.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
51 #include <net/ip.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
55 #include <net/sock.h>
56 #include <net/icmp.h>
57 #include <net/udp.h>
58 #include <net/raw.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69 #include <net/nexthop.h>
70 
71 struct ipmr_rule {
72 	struct fib_rule		common;
73 };
74 
75 struct ipmr_result {
76 	struct mr_table		*mrt;
77 };
78 
79 /* Big lock, protecting vif table, mrt cache and mroute socket state.
80  * Note that the changes are semaphored via rtnl_lock.
81  */
82 
83 static DEFINE_RWLOCK(mrt_lock);
84 
85 /* Multicast router control variables */
86 
87 /* Special spinlock for queue of unresolved entries */
88 static DEFINE_SPINLOCK(mfc_unres_lock);
89 
90 /* We return to original Alan's scheme. Hash table of resolved
91  * entries is changed only in process context and protected
92  * with weak lock mrt_lock. Queue of unresolved entries is protected
93  * with strong spinlock mfc_unres_lock.
94  *
95  * In this case data path is free of exclusive locks at all.
96  */
97 
98 static struct kmem_cache *mrt_cachep __read_mostly;
99 
100 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
101 static void ipmr_free_table(struct mr_table *mrt);
102 
103 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 			  struct net_device *dev, struct sk_buff *skb,
105 			  struct mfc_cache *cache, int local);
106 static int ipmr_cache_report(struct mr_table *mrt,
107 			     struct sk_buff *pkt, vifi_t vifi, int assert);
108 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
109 			      struct mfc_cache *c, struct rtmsg *rtm);
110 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
111 				 int cmd);
112 static void mroute_clean_tables(struct mr_table *mrt, bool all);
113 static void ipmr_expire_process(unsigned long arg);
114 
115 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
116 #define ipmr_for_each_table(mrt, net) \
117 	list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
118 
119 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
120 {
121 	struct mr_table *mrt;
122 
123 	ipmr_for_each_table(mrt, net) {
124 		if (mrt->id == id)
125 			return mrt;
126 	}
127 	return NULL;
128 }
129 
130 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
131 			   struct mr_table **mrt)
132 {
133 	int err;
134 	struct ipmr_result res;
135 	struct fib_lookup_arg arg = {
136 		.result = &res,
137 		.flags = FIB_LOOKUP_NOREF,
138 	};
139 
140 	/* update flow if oif or iif point to device enslaved to l3mdev */
141 	l3mdev_update_flow(net, flowi4_to_flowi(flp4));
142 
143 	err = fib_rules_lookup(net->ipv4.mr_rules_ops,
144 			       flowi4_to_flowi(flp4), 0, &arg);
145 	if (err < 0)
146 		return err;
147 	*mrt = res.mrt;
148 	return 0;
149 }
150 
151 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
152 			    int flags, struct fib_lookup_arg *arg)
153 {
154 	struct ipmr_result *res = arg->result;
155 	struct mr_table *mrt;
156 
157 	switch (rule->action) {
158 	case FR_ACT_TO_TBL:
159 		break;
160 	case FR_ACT_UNREACHABLE:
161 		return -ENETUNREACH;
162 	case FR_ACT_PROHIBIT:
163 		return -EACCES;
164 	case FR_ACT_BLACKHOLE:
165 	default:
166 		return -EINVAL;
167 	}
168 
169 	arg->table = fib_rule_get_table(rule, arg);
170 
171 	mrt = ipmr_get_table(rule->fr_net, arg->table);
172 	if (!mrt)
173 		return -EAGAIN;
174 	res->mrt = mrt;
175 	return 0;
176 }
177 
178 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
179 {
180 	return 1;
181 }
182 
183 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
184 	FRA_GENERIC_POLICY,
185 };
186 
187 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
188 			       struct fib_rule_hdr *frh, struct nlattr **tb)
189 {
190 	return 0;
191 }
192 
193 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
194 			     struct nlattr **tb)
195 {
196 	return 1;
197 }
198 
199 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
200 			  struct fib_rule_hdr *frh)
201 {
202 	frh->dst_len = 0;
203 	frh->src_len = 0;
204 	frh->tos     = 0;
205 	return 0;
206 }
207 
208 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
209 	.family		= RTNL_FAMILY_IPMR,
210 	.rule_size	= sizeof(struct ipmr_rule),
211 	.addr_size	= sizeof(u32),
212 	.action		= ipmr_rule_action,
213 	.match		= ipmr_rule_match,
214 	.configure	= ipmr_rule_configure,
215 	.compare	= ipmr_rule_compare,
216 	.fill		= ipmr_rule_fill,
217 	.nlgroup	= RTNLGRP_IPV4_RULE,
218 	.policy		= ipmr_rule_policy,
219 	.owner		= THIS_MODULE,
220 };
221 
222 static int __net_init ipmr_rules_init(struct net *net)
223 {
224 	struct fib_rules_ops *ops;
225 	struct mr_table *mrt;
226 	int err;
227 
228 	ops = fib_rules_register(&ipmr_rules_ops_template, net);
229 	if (IS_ERR(ops))
230 		return PTR_ERR(ops);
231 
232 	INIT_LIST_HEAD(&net->ipv4.mr_tables);
233 
234 	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
235 	if (IS_ERR(mrt)) {
236 		err = PTR_ERR(mrt);
237 		goto err1;
238 	}
239 
240 	err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
241 	if (err < 0)
242 		goto err2;
243 
244 	net->ipv4.mr_rules_ops = ops;
245 	return 0;
246 
247 err2:
248 	ipmr_free_table(mrt);
249 err1:
250 	fib_rules_unregister(ops);
251 	return err;
252 }
253 
254 static void __net_exit ipmr_rules_exit(struct net *net)
255 {
256 	struct mr_table *mrt, *next;
257 
258 	rtnl_lock();
259 	list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
260 		list_del(&mrt->list);
261 		ipmr_free_table(mrt);
262 	}
263 	fib_rules_unregister(net->ipv4.mr_rules_ops);
264 	rtnl_unlock();
265 }
266 #else
267 #define ipmr_for_each_table(mrt, net) \
268 	for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
269 
270 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
271 {
272 	return net->ipv4.mrt;
273 }
274 
275 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
276 			   struct mr_table **mrt)
277 {
278 	*mrt = net->ipv4.mrt;
279 	return 0;
280 }
281 
282 static int __net_init ipmr_rules_init(struct net *net)
283 {
284 	struct mr_table *mrt;
285 
286 	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
287 	if (IS_ERR(mrt))
288 		return PTR_ERR(mrt);
289 	net->ipv4.mrt = mrt;
290 	return 0;
291 }
292 
293 static void __net_exit ipmr_rules_exit(struct net *net)
294 {
295 	rtnl_lock();
296 	ipmr_free_table(net->ipv4.mrt);
297 	net->ipv4.mrt = NULL;
298 	rtnl_unlock();
299 }
300 #endif
301 
302 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
303 				const void *ptr)
304 {
305 	const struct mfc_cache_cmp_arg *cmparg = arg->key;
306 	struct mfc_cache *c = (struct mfc_cache *)ptr;
307 
308 	return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
309 	       cmparg->mfc_origin != c->mfc_origin;
310 }
311 
312 static const struct rhashtable_params ipmr_rht_params = {
313 	.head_offset = offsetof(struct mfc_cache, mnode),
314 	.key_offset = offsetof(struct mfc_cache, cmparg),
315 	.key_len = sizeof(struct mfc_cache_cmp_arg),
316 	.nelem_hint = 3,
317 	.locks_mul = 1,
318 	.obj_cmpfn = ipmr_hash_cmp,
319 	.automatic_shrinking = true,
320 };
321 
322 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
323 {
324 	struct mr_table *mrt;
325 
326 	/* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
327 	if (id != RT_TABLE_DEFAULT && id >= 1000000000)
328 		return ERR_PTR(-EINVAL);
329 
330 	mrt = ipmr_get_table(net, id);
331 	if (mrt)
332 		return mrt;
333 
334 	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
335 	if (!mrt)
336 		return ERR_PTR(-ENOMEM);
337 	write_pnet(&mrt->net, net);
338 	mrt->id = id;
339 
340 	rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
341 	INIT_LIST_HEAD(&mrt->mfc_cache_list);
342 	INIT_LIST_HEAD(&mrt->mfc_unres_queue);
343 
344 	setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
345 		    (unsigned long)mrt);
346 
347 	mrt->mroute_reg_vif_num = -1;
348 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
349 	list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
350 #endif
351 	return mrt;
352 }
353 
354 static void ipmr_free_table(struct mr_table *mrt)
355 {
356 	del_timer_sync(&mrt->ipmr_expire_timer);
357 	mroute_clean_tables(mrt, true);
358 	rhltable_destroy(&mrt->mfc_hash);
359 	kfree(mrt);
360 }
361 
362 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
363 
364 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
365 {
366 	struct net *net = dev_net(dev);
367 
368 	dev_close(dev);
369 
370 	dev = __dev_get_by_name(net, "tunl0");
371 	if (dev) {
372 		const struct net_device_ops *ops = dev->netdev_ops;
373 		struct ifreq ifr;
374 		struct ip_tunnel_parm p;
375 
376 		memset(&p, 0, sizeof(p));
377 		p.iph.daddr = v->vifc_rmt_addr.s_addr;
378 		p.iph.saddr = v->vifc_lcl_addr.s_addr;
379 		p.iph.version = 4;
380 		p.iph.ihl = 5;
381 		p.iph.protocol = IPPROTO_IPIP;
382 		sprintf(p.name, "dvmrp%d", v->vifc_vifi);
383 		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
384 
385 		if (ops->ndo_do_ioctl) {
386 			mm_segment_t oldfs = get_fs();
387 
388 			set_fs(KERNEL_DS);
389 			ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
390 			set_fs(oldfs);
391 		}
392 	}
393 }
394 
395 /* Initialize ipmr pimreg/tunnel in_device */
396 static bool ipmr_init_vif_indev(const struct net_device *dev)
397 {
398 	struct in_device *in_dev;
399 
400 	ASSERT_RTNL();
401 
402 	in_dev = __in_dev_get_rtnl(dev);
403 	if (!in_dev)
404 		return false;
405 	ipv4_devconf_setall(in_dev);
406 	neigh_parms_data_state_setall(in_dev->arp_parms);
407 	IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
408 
409 	return true;
410 }
411 
412 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
413 {
414 	struct net_device  *dev;
415 
416 	dev = __dev_get_by_name(net, "tunl0");
417 
418 	if (dev) {
419 		const struct net_device_ops *ops = dev->netdev_ops;
420 		int err;
421 		struct ifreq ifr;
422 		struct ip_tunnel_parm p;
423 
424 		memset(&p, 0, sizeof(p));
425 		p.iph.daddr = v->vifc_rmt_addr.s_addr;
426 		p.iph.saddr = v->vifc_lcl_addr.s_addr;
427 		p.iph.version = 4;
428 		p.iph.ihl = 5;
429 		p.iph.protocol = IPPROTO_IPIP;
430 		sprintf(p.name, "dvmrp%d", v->vifc_vifi);
431 		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
432 
433 		if (ops->ndo_do_ioctl) {
434 			mm_segment_t oldfs = get_fs();
435 
436 			set_fs(KERNEL_DS);
437 			err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
438 			set_fs(oldfs);
439 		} else {
440 			err = -EOPNOTSUPP;
441 		}
442 		dev = NULL;
443 
444 		if (err == 0 &&
445 		    (dev = __dev_get_by_name(net, p.name)) != NULL) {
446 			dev->flags |= IFF_MULTICAST;
447 			if (!ipmr_init_vif_indev(dev))
448 				goto failure;
449 			if (dev_open(dev))
450 				goto failure;
451 			dev_hold(dev);
452 		}
453 	}
454 	return dev;
455 
456 failure:
457 	unregister_netdevice(dev);
458 	return NULL;
459 }
460 
461 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
462 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
463 {
464 	struct net *net = dev_net(dev);
465 	struct mr_table *mrt;
466 	struct flowi4 fl4 = {
467 		.flowi4_oif	= dev->ifindex,
468 		.flowi4_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
469 		.flowi4_mark	= skb->mark,
470 	};
471 	int err;
472 
473 	err = ipmr_fib_lookup(net, &fl4, &mrt);
474 	if (err < 0) {
475 		kfree_skb(skb);
476 		return err;
477 	}
478 
479 	read_lock(&mrt_lock);
480 	dev->stats.tx_bytes += skb->len;
481 	dev->stats.tx_packets++;
482 	ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
483 	read_unlock(&mrt_lock);
484 	kfree_skb(skb);
485 	return NETDEV_TX_OK;
486 }
487 
488 static int reg_vif_get_iflink(const struct net_device *dev)
489 {
490 	return 0;
491 }
492 
493 static const struct net_device_ops reg_vif_netdev_ops = {
494 	.ndo_start_xmit	= reg_vif_xmit,
495 	.ndo_get_iflink = reg_vif_get_iflink,
496 };
497 
498 static void reg_vif_setup(struct net_device *dev)
499 {
500 	dev->type		= ARPHRD_PIMREG;
501 	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 8;
502 	dev->flags		= IFF_NOARP;
503 	dev->netdev_ops		= &reg_vif_netdev_ops;
504 	dev->needs_free_netdev	= true;
505 	dev->features		|= NETIF_F_NETNS_LOCAL;
506 }
507 
508 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
509 {
510 	struct net_device *dev;
511 	char name[IFNAMSIZ];
512 
513 	if (mrt->id == RT_TABLE_DEFAULT)
514 		sprintf(name, "pimreg");
515 	else
516 		sprintf(name, "pimreg%u", mrt->id);
517 
518 	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
519 
520 	if (!dev)
521 		return NULL;
522 
523 	dev_net_set(dev, net);
524 
525 	if (register_netdevice(dev)) {
526 		free_netdev(dev);
527 		return NULL;
528 	}
529 
530 	if (!ipmr_init_vif_indev(dev))
531 		goto failure;
532 	if (dev_open(dev))
533 		goto failure;
534 
535 	dev_hold(dev);
536 
537 	return dev;
538 
539 failure:
540 	unregister_netdevice(dev);
541 	return NULL;
542 }
543 
544 /* called with rcu_read_lock() */
545 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
546 		     unsigned int pimlen)
547 {
548 	struct net_device *reg_dev = NULL;
549 	struct iphdr *encap;
550 
551 	encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
552 	/* Check that:
553 	 * a. packet is really sent to a multicast group
554 	 * b. packet is not a NULL-REGISTER
555 	 * c. packet is not truncated
556 	 */
557 	if (!ipv4_is_multicast(encap->daddr) ||
558 	    encap->tot_len == 0 ||
559 	    ntohs(encap->tot_len) + pimlen > skb->len)
560 		return 1;
561 
562 	read_lock(&mrt_lock);
563 	if (mrt->mroute_reg_vif_num >= 0)
564 		reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
565 	read_unlock(&mrt_lock);
566 
567 	if (!reg_dev)
568 		return 1;
569 
570 	skb->mac_header = skb->network_header;
571 	skb_pull(skb, (u8 *)encap - skb->data);
572 	skb_reset_network_header(skb);
573 	skb->protocol = htons(ETH_P_IP);
574 	skb->ip_summed = CHECKSUM_NONE;
575 
576 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
577 
578 	netif_rx(skb);
579 
580 	return NET_RX_SUCCESS;
581 }
582 #else
583 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
584 {
585 	return NULL;
586 }
587 #endif
588 
589 /**
590  *	vif_delete - Delete a VIF entry
591  *	@notify: Set to 1, if the caller is a notifier_call
592  */
593 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
594 		      struct list_head *head)
595 {
596 	struct vif_device *v;
597 	struct net_device *dev;
598 	struct in_device *in_dev;
599 
600 	if (vifi < 0 || vifi >= mrt->maxvif)
601 		return -EADDRNOTAVAIL;
602 
603 	v = &mrt->vif_table[vifi];
604 
605 	write_lock_bh(&mrt_lock);
606 	dev = v->dev;
607 	v->dev = NULL;
608 
609 	if (!dev) {
610 		write_unlock_bh(&mrt_lock);
611 		return -EADDRNOTAVAIL;
612 	}
613 
614 	if (vifi == mrt->mroute_reg_vif_num)
615 		mrt->mroute_reg_vif_num = -1;
616 
617 	if (vifi + 1 == mrt->maxvif) {
618 		int tmp;
619 
620 		for (tmp = vifi - 1; tmp >= 0; tmp--) {
621 			if (VIF_EXISTS(mrt, tmp))
622 				break;
623 		}
624 		mrt->maxvif = tmp+1;
625 	}
626 
627 	write_unlock_bh(&mrt_lock);
628 
629 	dev_set_allmulti(dev, -1);
630 
631 	in_dev = __in_dev_get_rtnl(dev);
632 	if (in_dev) {
633 		IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
634 		inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
635 					    NETCONFA_MC_FORWARDING,
636 					    dev->ifindex, &in_dev->cnf);
637 		ip_rt_multicast_event(in_dev);
638 	}
639 
640 	if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
641 		unregister_netdevice_queue(dev, head);
642 
643 	dev_put(dev);
644 	return 0;
645 }
646 
647 static void ipmr_cache_free_rcu(struct rcu_head *head)
648 {
649 	struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
650 
651 	kmem_cache_free(mrt_cachep, c);
652 }
653 
654 static inline void ipmr_cache_free(struct mfc_cache *c)
655 {
656 	call_rcu(&c->rcu, ipmr_cache_free_rcu);
657 }
658 
659 /* Destroy an unresolved cache entry, killing queued skbs
660  * and reporting error to netlink readers.
661  */
662 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
663 {
664 	struct net *net = read_pnet(&mrt->net);
665 	struct sk_buff *skb;
666 	struct nlmsgerr *e;
667 
668 	atomic_dec(&mrt->cache_resolve_queue_len);
669 
670 	while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
671 		if (ip_hdr(skb)->version == 0) {
672 			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
673 			nlh->nlmsg_type = NLMSG_ERROR;
674 			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
675 			skb_trim(skb, nlh->nlmsg_len);
676 			e = nlmsg_data(nlh);
677 			e->error = -ETIMEDOUT;
678 			memset(&e->msg, 0, sizeof(e->msg));
679 
680 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
681 		} else {
682 			kfree_skb(skb);
683 		}
684 	}
685 
686 	ipmr_cache_free(c);
687 }
688 
689 /* Timer process for the unresolved queue. */
690 static void ipmr_expire_process(unsigned long arg)
691 {
692 	struct mr_table *mrt = (struct mr_table *)arg;
693 	unsigned long now;
694 	unsigned long expires;
695 	struct mfc_cache *c, *next;
696 
697 	if (!spin_trylock(&mfc_unres_lock)) {
698 		mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
699 		return;
700 	}
701 
702 	if (list_empty(&mrt->mfc_unres_queue))
703 		goto out;
704 
705 	now = jiffies;
706 	expires = 10*HZ;
707 
708 	list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
709 		if (time_after(c->mfc_un.unres.expires, now)) {
710 			unsigned long interval = c->mfc_un.unres.expires - now;
711 			if (interval < expires)
712 				expires = interval;
713 			continue;
714 		}
715 
716 		list_del(&c->list);
717 		mroute_netlink_event(mrt, c, RTM_DELROUTE);
718 		ipmr_destroy_unres(mrt, c);
719 	}
720 
721 	if (!list_empty(&mrt->mfc_unres_queue))
722 		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
723 
724 out:
725 	spin_unlock(&mfc_unres_lock);
726 }
727 
728 /* Fill oifs list. It is called under write locked mrt_lock. */
729 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
730 				   unsigned char *ttls)
731 {
732 	int vifi;
733 
734 	cache->mfc_un.res.minvif = MAXVIFS;
735 	cache->mfc_un.res.maxvif = 0;
736 	memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
737 
738 	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
739 		if (VIF_EXISTS(mrt, vifi) &&
740 		    ttls[vifi] && ttls[vifi] < 255) {
741 			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
742 			if (cache->mfc_un.res.minvif > vifi)
743 				cache->mfc_un.res.minvif = vifi;
744 			if (cache->mfc_un.res.maxvif <= vifi)
745 				cache->mfc_un.res.maxvif = vifi + 1;
746 		}
747 	}
748 	cache->mfc_un.res.lastuse = jiffies;
749 }
750 
751 static int vif_add(struct net *net, struct mr_table *mrt,
752 		   struct vifctl *vifc, int mrtsock)
753 {
754 	int vifi = vifc->vifc_vifi;
755 	struct vif_device *v = &mrt->vif_table[vifi];
756 	struct net_device *dev;
757 	struct in_device *in_dev;
758 	int err;
759 
760 	/* Is vif busy ? */
761 	if (VIF_EXISTS(mrt, vifi))
762 		return -EADDRINUSE;
763 
764 	switch (vifc->vifc_flags) {
765 	case VIFF_REGISTER:
766 		if (!ipmr_pimsm_enabled())
767 			return -EINVAL;
768 		/* Special Purpose VIF in PIM
769 		 * All the packets will be sent to the daemon
770 		 */
771 		if (mrt->mroute_reg_vif_num >= 0)
772 			return -EADDRINUSE;
773 		dev = ipmr_reg_vif(net, mrt);
774 		if (!dev)
775 			return -ENOBUFS;
776 		err = dev_set_allmulti(dev, 1);
777 		if (err) {
778 			unregister_netdevice(dev);
779 			dev_put(dev);
780 			return err;
781 		}
782 		break;
783 	case VIFF_TUNNEL:
784 		dev = ipmr_new_tunnel(net, vifc);
785 		if (!dev)
786 			return -ENOBUFS;
787 		err = dev_set_allmulti(dev, 1);
788 		if (err) {
789 			ipmr_del_tunnel(dev, vifc);
790 			dev_put(dev);
791 			return err;
792 		}
793 		break;
794 	case VIFF_USE_IFINDEX:
795 	case 0:
796 		if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
797 			dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
798 			if (dev && !__in_dev_get_rtnl(dev)) {
799 				dev_put(dev);
800 				return -EADDRNOTAVAIL;
801 			}
802 		} else {
803 			dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
804 		}
805 		if (!dev)
806 			return -EADDRNOTAVAIL;
807 		err = dev_set_allmulti(dev, 1);
808 		if (err) {
809 			dev_put(dev);
810 			return err;
811 		}
812 		break;
813 	default:
814 		return -EINVAL;
815 	}
816 
817 	in_dev = __in_dev_get_rtnl(dev);
818 	if (!in_dev) {
819 		dev_put(dev);
820 		return -EADDRNOTAVAIL;
821 	}
822 	IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
823 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
824 				    dev->ifindex, &in_dev->cnf);
825 	ip_rt_multicast_event(in_dev);
826 
827 	/* Fill in the VIF structures */
828 
829 	v->rate_limit = vifc->vifc_rate_limit;
830 	v->local = vifc->vifc_lcl_addr.s_addr;
831 	v->remote = vifc->vifc_rmt_addr.s_addr;
832 	v->flags = vifc->vifc_flags;
833 	if (!mrtsock)
834 		v->flags |= VIFF_STATIC;
835 	v->threshold = vifc->vifc_threshold;
836 	v->bytes_in = 0;
837 	v->bytes_out = 0;
838 	v->pkt_in = 0;
839 	v->pkt_out = 0;
840 	v->link = dev->ifindex;
841 	if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
842 		v->link = dev_get_iflink(dev);
843 
844 	/* And finish update writing critical data */
845 	write_lock_bh(&mrt_lock);
846 	v->dev = dev;
847 	if (v->flags & VIFF_REGISTER)
848 		mrt->mroute_reg_vif_num = vifi;
849 	if (vifi+1 > mrt->maxvif)
850 		mrt->maxvif = vifi+1;
851 	write_unlock_bh(&mrt_lock);
852 	return 0;
853 }
854 
855 /* called with rcu_read_lock() */
856 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
857 					 __be32 origin,
858 					 __be32 mcastgrp)
859 {
860 	struct mfc_cache_cmp_arg arg = {
861 			.mfc_mcastgrp = mcastgrp,
862 			.mfc_origin = origin
863 	};
864 	struct rhlist_head *tmp, *list;
865 	struct mfc_cache *c;
866 
867 	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
868 	rhl_for_each_entry_rcu(c, tmp, list, mnode)
869 		return c;
870 
871 	return NULL;
872 }
873 
874 /* Look for a (*,*,oif) entry */
875 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
876 						    int vifi)
877 {
878 	struct mfc_cache_cmp_arg arg = {
879 			.mfc_mcastgrp = htonl(INADDR_ANY),
880 			.mfc_origin = htonl(INADDR_ANY)
881 	};
882 	struct rhlist_head *tmp, *list;
883 	struct mfc_cache *c;
884 
885 	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
886 	rhl_for_each_entry_rcu(c, tmp, list, mnode)
887 		if (c->mfc_un.res.ttls[vifi] < 255)
888 			return c;
889 
890 	return NULL;
891 }
892 
893 /* Look for a (*,G) entry */
894 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
895 					     __be32 mcastgrp, int vifi)
896 {
897 	struct mfc_cache_cmp_arg arg = {
898 			.mfc_mcastgrp = mcastgrp,
899 			.mfc_origin = htonl(INADDR_ANY)
900 	};
901 	struct rhlist_head *tmp, *list;
902 	struct mfc_cache *c, *proxy;
903 
904 	if (mcastgrp == htonl(INADDR_ANY))
905 		goto skip;
906 
907 	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
908 	rhl_for_each_entry_rcu(c, tmp, list, mnode) {
909 		if (c->mfc_un.res.ttls[vifi] < 255)
910 			return c;
911 
912 		/* It's ok if the vifi is part of the static tree */
913 		proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent);
914 		if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
915 			return c;
916 	}
917 
918 skip:
919 	return ipmr_cache_find_any_parent(mrt, vifi);
920 }
921 
922 /* Look for a (S,G,iif) entry if parent != -1 */
923 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
924 						__be32 origin, __be32 mcastgrp,
925 						int parent)
926 {
927 	struct mfc_cache_cmp_arg arg = {
928 			.mfc_mcastgrp = mcastgrp,
929 			.mfc_origin = origin,
930 	};
931 	struct rhlist_head *tmp, *list;
932 	struct mfc_cache *c;
933 
934 	list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
935 	rhl_for_each_entry_rcu(c, tmp, list, mnode)
936 		if (parent == -1 || parent == c->mfc_parent)
937 			return c;
938 
939 	return NULL;
940 }
941 
942 /* Allocate a multicast cache entry */
943 static struct mfc_cache *ipmr_cache_alloc(void)
944 {
945 	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
946 
947 	if (c) {
948 		c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
949 		c->mfc_un.res.minvif = MAXVIFS;
950 	}
951 	return c;
952 }
953 
954 static struct mfc_cache *ipmr_cache_alloc_unres(void)
955 {
956 	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
957 
958 	if (c) {
959 		skb_queue_head_init(&c->mfc_un.unres.unresolved);
960 		c->mfc_un.unres.expires = jiffies + 10*HZ;
961 	}
962 	return c;
963 }
964 
965 /* A cache entry has gone into a resolved state from queued */
966 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
967 			       struct mfc_cache *uc, struct mfc_cache *c)
968 {
969 	struct sk_buff *skb;
970 	struct nlmsgerr *e;
971 
972 	/* Play the pending entries through our router */
973 	while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
974 		if (ip_hdr(skb)->version == 0) {
975 			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
976 
977 			if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
978 				nlh->nlmsg_len = skb_tail_pointer(skb) -
979 						 (u8 *)nlh;
980 			} else {
981 				nlh->nlmsg_type = NLMSG_ERROR;
982 				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
983 				skb_trim(skb, nlh->nlmsg_len);
984 				e = nlmsg_data(nlh);
985 				e->error = -EMSGSIZE;
986 				memset(&e->msg, 0, sizeof(e->msg));
987 			}
988 
989 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
990 		} else {
991 			ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
992 		}
993 	}
994 }
995 
996 /* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
997  * expects the following bizarre scheme.
998  *
999  * Called under mrt_lock.
1000  */
1001 static int ipmr_cache_report(struct mr_table *mrt,
1002 			     struct sk_buff *pkt, vifi_t vifi, int assert)
1003 {
1004 	const int ihl = ip_hdrlen(pkt);
1005 	struct sock *mroute_sk;
1006 	struct igmphdr *igmp;
1007 	struct igmpmsg *msg;
1008 	struct sk_buff *skb;
1009 	int ret;
1010 
1011 	if (assert == IGMPMSG_WHOLEPKT)
1012 		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1013 	else
1014 		skb = alloc_skb(128, GFP_ATOMIC);
1015 
1016 	if (!skb)
1017 		return -ENOBUFS;
1018 
1019 	if (assert == IGMPMSG_WHOLEPKT) {
1020 		/* Ugly, but we have no choice with this interface.
1021 		 * Duplicate old header, fix ihl, length etc.
1022 		 * And all this only to mangle msg->im_msgtype and
1023 		 * to set msg->im_mbz to "mbz" :-)
1024 		 */
1025 		skb_push(skb, sizeof(struct iphdr));
1026 		skb_reset_network_header(skb);
1027 		skb_reset_transport_header(skb);
1028 		msg = (struct igmpmsg *)skb_network_header(skb);
1029 		memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1030 		msg->im_msgtype = IGMPMSG_WHOLEPKT;
1031 		msg->im_mbz = 0;
1032 		msg->im_vif = mrt->mroute_reg_vif_num;
1033 		ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1034 		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1035 					     sizeof(struct iphdr));
1036 	} else {
1037 		/* Copy the IP header */
1038 		skb_set_network_header(skb, skb->len);
1039 		skb_put(skb, ihl);
1040 		skb_copy_to_linear_data(skb, pkt->data, ihl);
1041 		/* Flag to the kernel this is a route add */
1042 		ip_hdr(skb)->protocol = 0;
1043 		msg = (struct igmpmsg *)skb_network_header(skb);
1044 		msg->im_vif = vifi;
1045 		skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1046 		/* Add our header */
1047 		igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1048 		igmp->type = assert;
1049 		msg->im_msgtype = assert;
1050 		igmp->code = 0;
1051 		ip_hdr(skb)->tot_len = htons(skb->len);	/* Fix the length */
1052 		skb->transport_header = skb->network_header;
1053 	}
1054 
1055 	rcu_read_lock();
1056 	mroute_sk = rcu_dereference(mrt->mroute_sk);
1057 	if (!mroute_sk) {
1058 		rcu_read_unlock();
1059 		kfree_skb(skb);
1060 		return -EINVAL;
1061 	}
1062 
1063 	/* Deliver to mrouted */
1064 	ret = sock_queue_rcv_skb(mroute_sk, skb);
1065 	rcu_read_unlock();
1066 	if (ret < 0) {
1067 		net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1068 		kfree_skb(skb);
1069 	}
1070 
1071 	return ret;
1072 }
1073 
1074 /* Queue a packet for resolution. It gets locked cache entry! */
1075 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1076 				 struct sk_buff *skb, struct net_device *dev)
1077 {
1078 	const struct iphdr *iph = ip_hdr(skb);
1079 	struct mfc_cache *c;
1080 	bool found = false;
1081 	int err;
1082 
1083 	spin_lock_bh(&mfc_unres_lock);
1084 	list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1085 		if (c->mfc_mcastgrp == iph->daddr &&
1086 		    c->mfc_origin == iph->saddr) {
1087 			found = true;
1088 			break;
1089 		}
1090 	}
1091 
1092 	if (!found) {
1093 		/* Create a new entry if allowable */
1094 		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1095 		    (c = ipmr_cache_alloc_unres()) == NULL) {
1096 			spin_unlock_bh(&mfc_unres_lock);
1097 
1098 			kfree_skb(skb);
1099 			return -ENOBUFS;
1100 		}
1101 
1102 		/* Fill in the new cache entry */
1103 		c->mfc_parent	= -1;
1104 		c->mfc_origin	= iph->saddr;
1105 		c->mfc_mcastgrp	= iph->daddr;
1106 
1107 		/* Reflect first query at mrouted. */
1108 		err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1109 		if (err < 0) {
1110 			/* If the report failed throw the cache entry
1111 			   out - Brad Parker
1112 			 */
1113 			spin_unlock_bh(&mfc_unres_lock);
1114 
1115 			ipmr_cache_free(c);
1116 			kfree_skb(skb);
1117 			return err;
1118 		}
1119 
1120 		atomic_inc(&mrt->cache_resolve_queue_len);
1121 		list_add(&c->list, &mrt->mfc_unres_queue);
1122 		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1123 
1124 		if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1125 			mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1126 	}
1127 
1128 	/* See if we can append the packet */
1129 	if (c->mfc_un.unres.unresolved.qlen > 3) {
1130 		kfree_skb(skb);
1131 		err = -ENOBUFS;
1132 	} else {
1133 		if (dev) {
1134 			skb->dev = dev;
1135 			skb->skb_iif = dev->ifindex;
1136 		}
1137 		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1138 		err = 0;
1139 	}
1140 
1141 	spin_unlock_bh(&mfc_unres_lock);
1142 	return err;
1143 }
1144 
1145 /* MFC cache manipulation by user space mroute daemon */
1146 
1147 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1148 {
1149 	struct mfc_cache *c;
1150 
1151 	/* The entries are added/deleted only under RTNL */
1152 	rcu_read_lock();
1153 	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1154 				   mfc->mfcc_mcastgrp.s_addr, parent);
1155 	rcu_read_unlock();
1156 	if (!c)
1157 		return -ENOENT;
1158 	rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1159 	list_del_rcu(&c->list);
1160 	mroute_netlink_event(mrt, c, RTM_DELROUTE);
1161 	ipmr_cache_free(c);
1162 
1163 	return 0;
1164 }
1165 
1166 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1167 			struct mfcctl *mfc, int mrtsock, int parent)
1168 {
1169 	struct mfc_cache *uc, *c;
1170 	bool found;
1171 	int ret;
1172 
1173 	if (mfc->mfcc_parent >= MAXVIFS)
1174 		return -ENFILE;
1175 
1176 	/* The entries are added/deleted only under RTNL */
1177 	rcu_read_lock();
1178 	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1179 				   mfc->mfcc_mcastgrp.s_addr, parent);
1180 	rcu_read_unlock();
1181 	if (c) {
1182 		write_lock_bh(&mrt_lock);
1183 		c->mfc_parent = mfc->mfcc_parent;
1184 		ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1185 		if (!mrtsock)
1186 			c->mfc_flags |= MFC_STATIC;
1187 		write_unlock_bh(&mrt_lock);
1188 		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1189 		return 0;
1190 	}
1191 
1192 	if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1193 	    !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1194 		return -EINVAL;
1195 
1196 	c = ipmr_cache_alloc();
1197 	if (!c)
1198 		return -ENOMEM;
1199 
1200 	c->mfc_origin = mfc->mfcc_origin.s_addr;
1201 	c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1202 	c->mfc_parent = mfc->mfcc_parent;
1203 	ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1204 	if (!mrtsock)
1205 		c->mfc_flags |= MFC_STATIC;
1206 
1207 	ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode,
1208 				  ipmr_rht_params);
1209 	if (ret) {
1210 		pr_err("ipmr: rhtable insert error %d\n", ret);
1211 		ipmr_cache_free(c);
1212 		return ret;
1213 	}
1214 	list_add_tail_rcu(&c->list, &mrt->mfc_cache_list);
1215 	/* Check to see if we resolved a queued list. If so we
1216 	 * need to send on the frames and tidy up.
1217 	 */
1218 	found = false;
1219 	spin_lock_bh(&mfc_unres_lock);
1220 	list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1221 		if (uc->mfc_origin == c->mfc_origin &&
1222 		    uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1223 			list_del(&uc->list);
1224 			atomic_dec(&mrt->cache_resolve_queue_len);
1225 			found = true;
1226 			break;
1227 		}
1228 	}
1229 	if (list_empty(&mrt->mfc_unres_queue))
1230 		del_timer(&mrt->ipmr_expire_timer);
1231 	spin_unlock_bh(&mfc_unres_lock);
1232 
1233 	if (found) {
1234 		ipmr_cache_resolve(net, mrt, uc, c);
1235 		ipmr_cache_free(uc);
1236 	}
1237 	mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1238 	return 0;
1239 }
1240 
1241 /* Close the multicast socket, and clear the vif tables etc */
1242 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1243 {
1244 	struct mfc_cache *c, *tmp;
1245 	LIST_HEAD(list);
1246 	int i;
1247 
1248 	/* Shut down all active vif entries */
1249 	for (i = 0; i < mrt->maxvif; i++) {
1250 		if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1251 			continue;
1252 		vif_delete(mrt, i, 0, &list);
1253 	}
1254 	unregister_netdevice_many(&list);
1255 
1256 	/* Wipe the cache */
1257 	list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1258 		if (!all && (c->mfc_flags & MFC_STATIC))
1259 			continue;
1260 		rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1261 		list_del_rcu(&c->list);
1262 		mroute_netlink_event(mrt, c, RTM_DELROUTE);
1263 		ipmr_cache_free(c);
1264 	}
1265 
1266 	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1267 		spin_lock_bh(&mfc_unres_lock);
1268 		list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1269 			list_del(&c->list);
1270 			mroute_netlink_event(mrt, c, RTM_DELROUTE);
1271 			ipmr_destroy_unres(mrt, c);
1272 		}
1273 		spin_unlock_bh(&mfc_unres_lock);
1274 	}
1275 }
1276 
1277 /* called from ip_ra_control(), before an RCU grace period,
1278  * we dont need to call synchronize_rcu() here
1279  */
1280 static void mrtsock_destruct(struct sock *sk)
1281 {
1282 	struct net *net = sock_net(sk);
1283 	struct mr_table *mrt;
1284 
1285 	ASSERT_RTNL();
1286 	ipmr_for_each_table(mrt, net) {
1287 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1288 			IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1289 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1290 						    NETCONFA_MC_FORWARDING,
1291 						    NETCONFA_IFINDEX_ALL,
1292 						    net->ipv4.devconf_all);
1293 			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1294 			mroute_clean_tables(mrt, false);
1295 		}
1296 	}
1297 }
1298 
1299 /* Socket options and virtual interface manipulation. The whole
1300  * virtual interface system is a complete heap, but unfortunately
1301  * that's how BSD mrouted happens to think. Maybe one day with a proper
1302  * MOSPF/PIM router set up we can clean this up.
1303  */
1304 
1305 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1306 			 unsigned int optlen)
1307 {
1308 	struct net *net = sock_net(sk);
1309 	int val, ret = 0, parent = 0;
1310 	struct mr_table *mrt;
1311 	struct vifctl vif;
1312 	struct mfcctl mfc;
1313 	u32 uval;
1314 
1315 	/* There's one exception to the lock - MRT_DONE which needs to unlock */
1316 	rtnl_lock();
1317 	if (sk->sk_type != SOCK_RAW ||
1318 	    inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1319 		ret = -EOPNOTSUPP;
1320 		goto out_unlock;
1321 	}
1322 
1323 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1324 	if (!mrt) {
1325 		ret = -ENOENT;
1326 		goto out_unlock;
1327 	}
1328 	if (optname != MRT_INIT) {
1329 		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1330 		    !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1331 			ret = -EACCES;
1332 			goto out_unlock;
1333 		}
1334 	}
1335 
1336 	switch (optname) {
1337 	case MRT_INIT:
1338 		if (optlen != sizeof(int)) {
1339 			ret = -EINVAL;
1340 			break;
1341 		}
1342 		if (rtnl_dereference(mrt->mroute_sk)) {
1343 			ret = -EADDRINUSE;
1344 			break;
1345 		}
1346 
1347 		ret = ip_ra_control(sk, 1, mrtsock_destruct);
1348 		if (ret == 0) {
1349 			rcu_assign_pointer(mrt->mroute_sk, sk);
1350 			IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1351 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1352 						    NETCONFA_MC_FORWARDING,
1353 						    NETCONFA_IFINDEX_ALL,
1354 						    net->ipv4.devconf_all);
1355 		}
1356 		break;
1357 	case MRT_DONE:
1358 		if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1359 			ret = -EACCES;
1360 		} else {
1361 			ret = ip_ra_control(sk, 0, NULL);
1362 			goto out_unlock;
1363 		}
1364 		break;
1365 	case MRT_ADD_VIF:
1366 	case MRT_DEL_VIF:
1367 		if (optlen != sizeof(vif)) {
1368 			ret = -EINVAL;
1369 			break;
1370 		}
1371 		if (copy_from_user(&vif, optval, sizeof(vif))) {
1372 			ret = -EFAULT;
1373 			break;
1374 		}
1375 		if (vif.vifc_vifi >= MAXVIFS) {
1376 			ret = -ENFILE;
1377 			break;
1378 		}
1379 		if (optname == MRT_ADD_VIF) {
1380 			ret = vif_add(net, mrt, &vif,
1381 				      sk == rtnl_dereference(mrt->mroute_sk));
1382 		} else {
1383 			ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1384 		}
1385 		break;
1386 	/* Manipulate the forwarding caches. These live
1387 	 * in a sort of kernel/user symbiosis.
1388 	 */
1389 	case MRT_ADD_MFC:
1390 	case MRT_DEL_MFC:
1391 		parent = -1;
1392 	case MRT_ADD_MFC_PROXY:
1393 	case MRT_DEL_MFC_PROXY:
1394 		if (optlen != sizeof(mfc)) {
1395 			ret = -EINVAL;
1396 			break;
1397 		}
1398 		if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1399 			ret = -EFAULT;
1400 			break;
1401 		}
1402 		if (parent == 0)
1403 			parent = mfc.mfcc_parent;
1404 		if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1405 			ret = ipmr_mfc_delete(mrt, &mfc, parent);
1406 		else
1407 			ret = ipmr_mfc_add(net, mrt, &mfc,
1408 					   sk == rtnl_dereference(mrt->mroute_sk),
1409 					   parent);
1410 		break;
1411 	/* Control PIM assert. */
1412 	case MRT_ASSERT:
1413 		if (optlen != sizeof(val)) {
1414 			ret = -EINVAL;
1415 			break;
1416 		}
1417 		if (get_user(val, (int __user *)optval)) {
1418 			ret = -EFAULT;
1419 			break;
1420 		}
1421 		mrt->mroute_do_assert = val;
1422 		break;
1423 	case MRT_PIM:
1424 		if (!ipmr_pimsm_enabled()) {
1425 			ret = -ENOPROTOOPT;
1426 			break;
1427 		}
1428 		if (optlen != sizeof(val)) {
1429 			ret = -EINVAL;
1430 			break;
1431 		}
1432 		if (get_user(val, (int __user *)optval)) {
1433 			ret = -EFAULT;
1434 			break;
1435 		}
1436 
1437 		val = !!val;
1438 		if (val != mrt->mroute_do_pim) {
1439 			mrt->mroute_do_pim = val;
1440 			mrt->mroute_do_assert = val;
1441 		}
1442 		break;
1443 	case MRT_TABLE:
1444 		if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1445 			ret = -ENOPROTOOPT;
1446 			break;
1447 		}
1448 		if (optlen != sizeof(uval)) {
1449 			ret = -EINVAL;
1450 			break;
1451 		}
1452 		if (get_user(uval, (u32 __user *)optval)) {
1453 			ret = -EFAULT;
1454 			break;
1455 		}
1456 
1457 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1458 			ret = -EBUSY;
1459 		} else {
1460 			mrt = ipmr_new_table(net, uval);
1461 			if (IS_ERR(mrt))
1462 				ret = PTR_ERR(mrt);
1463 			else
1464 				raw_sk(sk)->ipmr_table = uval;
1465 		}
1466 		break;
1467 	/* Spurious command, or MRT_VERSION which you cannot set. */
1468 	default:
1469 		ret = -ENOPROTOOPT;
1470 	}
1471 out_unlock:
1472 	rtnl_unlock();
1473 	return ret;
1474 }
1475 
1476 /* Getsock opt support for the multicast routing system. */
1477 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1478 {
1479 	int olr;
1480 	int val;
1481 	struct net *net = sock_net(sk);
1482 	struct mr_table *mrt;
1483 
1484 	if (sk->sk_type != SOCK_RAW ||
1485 	    inet_sk(sk)->inet_num != IPPROTO_IGMP)
1486 		return -EOPNOTSUPP;
1487 
1488 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1489 	if (!mrt)
1490 		return -ENOENT;
1491 
1492 	switch (optname) {
1493 	case MRT_VERSION:
1494 		val = 0x0305;
1495 		break;
1496 	case MRT_PIM:
1497 		if (!ipmr_pimsm_enabled())
1498 			return -ENOPROTOOPT;
1499 		val = mrt->mroute_do_pim;
1500 		break;
1501 	case MRT_ASSERT:
1502 		val = mrt->mroute_do_assert;
1503 		break;
1504 	default:
1505 		return -ENOPROTOOPT;
1506 	}
1507 
1508 	if (get_user(olr, optlen))
1509 		return -EFAULT;
1510 	olr = min_t(unsigned int, olr, sizeof(int));
1511 	if (olr < 0)
1512 		return -EINVAL;
1513 	if (put_user(olr, optlen))
1514 		return -EFAULT;
1515 	if (copy_to_user(optval, &val, olr))
1516 		return -EFAULT;
1517 	return 0;
1518 }
1519 
1520 /* The IP multicast ioctl support routines. */
1521 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1522 {
1523 	struct sioc_sg_req sr;
1524 	struct sioc_vif_req vr;
1525 	struct vif_device *vif;
1526 	struct mfc_cache *c;
1527 	struct net *net = sock_net(sk);
1528 	struct mr_table *mrt;
1529 
1530 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1531 	if (!mrt)
1532 		return -ENOENT;
1533 
1534 	switch (cmd) {
1535 	case SIOCGETVIFCNT:
1536 		if (copy_from_user(&vr, arg, sizeof(vr)))
1537 			return -EFAULT;
1538 		if (vr.vifi >= mrt->maxvif)
1539 			return -EINVAL;
1540 		read_lock(&mrt_lock);
1541 		vif = &mrt->vif_table[vr.vifi];
1542 		if (VIF_EXISTS(mrt, vr.vifi)) {
1543 			vr.icount = vif->pkt_in;
1544 			vr.ocount = vif->pkt_out;
1545 			vr.ibytes = vif->bytes_in;
1546 			vr.obytes = vif->bytes_out;
1547 			read_unlock(&mrt_lock);
1548 
1549 			if (copy_to_user(arg, &vr, sizeof(vr)))
1550 				return -EFAULT;
1551 			return 0;
1552 		}
1553 		read_unlock(&mrt_lock);
1554 		return -EADDRNOTAVAIL;
1555 	case SIOCGETSGCNT:
1556 		if (copy_from_user(&sr, arg, sizeof(sr)))
1557 			return -EFAULT;
1558 
1559 		rcu_read_lock();
1560 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1561 		if (c) {
1562 			sr.pktcnt = c->mfc_un.res.pkt;
1563 			sr.bytecnt = c->mfc_un.res.bytes;
1564 			sr.wrong_if = c->mfc_un.res.wrong_if;
1565 			rcu_read_unlock();
1566 
1567 			if (copy_to_user(arg, &sr, sizeof(sr)))
1568 				return -EFAULT;
1569 			return 0;
1570 		}
1571 		rcu_read_unlock();
1572 		return -EADDRNOTAVAIL;
1573 	default:
1574 		return -ENOIOCTLCMD;
1575 	}
1576 }
1577 
1578 #ifdef CONFIG_COMPAT
1579 struct compat_sioc_sg_req {
1580 	struct in_addr src;
1581 	struct in_addr grp;
1582 	compat_ulong_t pktcnt;
1583 	compat_ulong_t bytecnt;
1584 	compat_ulong_t wrong_if;
1585 };
1586 
1587 struct compat_sioc_vif_req {
1588 	vifi_t	vifi;		/* Which iface */
1589 	compat_ulong_t icount;
1590 	compat_ulong_t ocount;
1591 	compat_ulong_t ibytes;
1592 	compat_ulong_t obytes;
1593 };
1594 
1595 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1596 {
1597 	struct compat_sioc_sg_req sr;
1598 	struct compat_sioc_vif_req vr;
1599 	struct vif_device *vif;
1600 	struct mfc_cache *c;
1601 	struct net *net = sock_net(sk);
1602 	struct mr_table *mrt;
1603 
1604 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1605 	if (!mrt)
1606 		return -ENOENT;
1607 
1608 	switch (cmd) {
1609 	case SIOCGETVIFCNT:
1610 		if (copy_from_user(&vr, arg, sizeof(vr)))
1611 			return -EFAULT;
1612 		if (vr.vifi >= mrt->maxvif)
1613 			return -EINVAL;
1614 		read_lock(&mrt_lock);
1615 		vif = &mrt->vif_table[vr.vifi];
1616 		if (VIF_EXISTS(mrt, vr.vifi)) {
1617 			vr.icount = vif->pkt_in;
1618 			vr.ocount = vif->pkt_out;
1619 			vr.ibytes = vif->bytes_in;
1620 			vr.obytes = vif->bytes_out;
1621 			read_unlock(&mrt_lock);
1622 
1623 			if (copy_to_user(arg, &vr, sizeof(vr)))
1624 				return -EFAULT;
1625 			return 0;
1626 		}
1627 		read_unlock(&mrt_lock);
1628 		return -EADDRNOTAVAIL;
1629 	case SIOCGETSGCNT:
1630 		if (copy_from_user(&sr, arg, sizeof(sr)))
1631 			return -EFAULT;
1632 
1633 		rcu_read_lock();
1634 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1635 		if (c) {
1636 			sr.pktcnt = c->mfc_un.res.pkt;
1637 			sr.bytecnt = c->mfc_un.res.bytes;
1638 			sr.wrong_if = c->mfc_un.res.wrong_if;
1639 			rcu_read_unlock();
1640 
1641 			if (copy_to_user(arg, &sr, sizeof(sr)))
1642 				return -EFAULT;
1643 			return 0;
1644 		}
1645 		rcu_read_unlock();
1646 		return -EADDRNOTAVAIL;
1647 	default:
1648 		return -ENOIOCTLCMD;
1649 	}
1650 }
1651 #endif
1652 
1653 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1654 {
1655 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1656 	struct net *net = dev_net(dev);
1657 	struct mr_table *mrt;
1658 	struct vif_device *v;
1659 	int ct;
1660 
1661 	if (event != NETDEV_UNREGISTER)
1662 		return NOTIFY_DONE;
1663 
1664 	ipmr_for_each_table(mrt, net) {
1665 		v = &mrt->vif_table[0];
1666 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1667 			if (v->dev == dev)
1668 				vif_delete(mrt, ct, 1, NULL);
1669 		}
1670 	}
1671 	return NOTIFY_DONE;
1672 }
1673 
1674 static struct notifier_block ip_mr_notifier = {
1675 	.notifier_call = ipmr_device_event,
1676 };
1677 
1678 /* Encapsulate a packet by attaching a valid IPIP header to it.
1679  * This avoids tunnel drivers and other mess and gives us the speed so
1680  * important for multicast video.
1681  */
1682 static void ip_encap(struct net *net, struct sk_buff *skb,
1683 		     __be32 saddr, __be32 daddr)
1684 {
1685 	struct iphdr *iph;
1686 	const struct iphdr *old_iph = ip_hdr(skb);
1687 
1688 	skb_push(skb, sizeof(struct iphdr));
1689 	skb->transport_header = skb->network_header;
1690 	skb_reset_network_header(skb);
1691 	iph = ip_hdr(skb);
1692 
1693 	iph->version	=	4;
1694 	iph->tos	=	old_iph->tos;
1695 	iph->ttl	=	old_iph->ttl;
1696 	iph->frag_off	=	0;
1697 	iph->daddr	=	daddr;
1698 	iph->saddr	=	saddr;
1699 	iph->protocol	=	IPPROTO_IPIP;
1700 	iph->ihl	=	5;
1701 	iph->tot_len	=	htons(skb->len);
1702 	ip_select_ident(net, skb, NULL);
1703 	ip_send_check(iph);
1704 
1705 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1706 	nf_reset(skb);
1707 }
1708 
1709 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1710 				      struct sk_buff *skb)
1711 {
1712 	struct ip_options *opt = &(IPCB(skb)->opt);
1713 
1714 	IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1715 	IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1716 
1717 	if (unlikely(opt->optlen))
1718 		ip_forward_options(skb);
1719 
1720 	return dst_output(net, sk, skb);
1721 }
1722 
1723 /* Processing handlers for ipmr_forward */
1724 
1725 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1726 			    struct sk_buff *skb, struct mfc_cache *c, int vifi)
1727 {
1728 	const struct iphdr *iph = ip_hdr(skb);
1729 	struct vif_device *vif = &mrt->vif_table[vifi];
1730 	struct net_device *dev;
1731 	struct rtable *rt;
1732 	struct flowi4 fl4;
1733 	int    encap = 0;
1734 
1735 	if (!vif->dev)
1736 		goto out_free;
1737 
1738 	if (vif->flags & VIFF_REGISTER) {
1739 		vif->pkt_out++;
1740 		vif->bytes_out += skb->len;
1741 		vif->dev->stats.tx_bytes += skb->len;
1742 		vif->dev->stats.tx_packets++;
1743 		ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1744 		goto out_free;
1745 	}
1746 
1747 	if (vif->flags & VIFF_TUNNEL) {
1748 		rt = ip_route_output_ports(net, &fl4, NULL,
1749 					   vif->remote, vif->local,
1750 					   0, 0,
1751 					   IPPROTO_IPIP,
1752 					   RT_TOS(iph->tos), vif->link);
1753 		if (IS_ERR(rt))
1754 			goto out_free;
1755 		encap = sizeof(struct iphdr);
1756 	} else {
1757 		rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1758 					   0, 0,
1759 					   IPPROTO_IPIP,
1760 					   RT_TOS(iph->tos), vif->link);
1761 		if (IS_ERR(rt))
1762 			goto out_free;
1763 	}
1764 
1765 	dev = rt->dst.dev;
1766 
1767 	if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1768 		/* Do not fragment multicasts. Alas, IPv4 does not
1769 		 * allow to send ICMP, so that packets will disappear
1770 		 * to blackhole.
1771 		 */
1772 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1773 		ip_rt_put(rt);
1774 		goto out_free;
1775 	}
1776 
1777 	encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1778 
1779 	if (skb_cow(skb, encap)) {
1780 		ip_rt_put(rt);
1781 		goto out_free;
1782 	}
1783 
1784 	vif->pkt_out++;
1785 	vif->bytes_out += skb->len;
1786 
1787 	skb_dst_drop(skb);
1788 	skb_dst_set(skb, &rt->dst);
1789 	ip_decrease_ttl(ip_hdr(skb));
1790 
1791 	/* FIXME: forward and output firewalls used to be called here.
1792 	 * What do we do with netfilter? -- RR
1793 	 */
1794 	if (vif->flags & VIFF_TUNNEL) {
1795 		ip_encap(net, skb, vif->local, vif->remote);
1796 		/* FIXME: extra output firewall step used to be here. --RR */
1797 		vif->dev->stats.tx_packets++;
1798 		vif->dev->stats.tx_bytes += skb->len;
1799 	}
1800 
1801 	IPCB(skb)->flags |= IPSKB_FORWARDED;
1802 
1803 	/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1804 	 * not only before forwarding, but after forwarding on all output
1805 	 * interfaces. It is clear, if mrouter runs a multicasting
1806 	 * program, it should receive packets not depending to what interface
1807 	 * program is joined.
1808 	 * If we will not make it, the program will have to join on all
1809 	 * interfaces. On the other hand, multihoming host (or router, but
1810 	 * not mrouter) cannot join to more than one interface - it will
1811 	 * result in receiving multiple packets.
1812 	 */
1813 	NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1814 		net, NULL, skb, skb->dev, dev,
1815 		ipmr_forward_finish);
1816 	return;
1817 
1818 out_free:
1819 	kfree_skb(skb);
1820 }
1821 
1822 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1823 {
1824 	int ct;
1825 
1826 	for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1827 		if (mrt->vif_table[ct].dev == dev)
1828 			break;
1829 	}
1830 	return ct;
1831 }
1832 
1833 /* "local" means that we should preserve one skb (for local delivery) */
1834 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1835 			  struct net_device *dev, struct sk_buff *skb,
1836 			  struct mfc_cache *cache, int local)
1837 {
1838 	int true_vifi = ipmr_find_vif(mrt, dev);
1839 	int psend = -1;
1840 	int vif, ct;
1841 
1842 	vif = cache->mfc_parent;
1843 	cache->mfc_un.res.pkt++;
1844 	cache->mfc_un.res.bytes += skb->len;
1845 	cache->mfc_un.res.lastuse = jiffies;
1846 
1847 	if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1848 		struct mfc_cache *cache_proxy;
1849 
1850 		/* For an (*,G) entry, we only check that the incomming
1851 		 * interface is part of the static tree.
1852 		 */
1853 		cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1854 		if (cache_proxy &&
1855 		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1856 			goto forward;
1857 	}
1858 
1859 	/* Wrong interface: drop packet and (maybe) send PIM assert. */
1860 	if (mrt->vif_table[vif].dev != dev) {
1861 		if (rt_is_output_route(skb_rtable(skb))) {
1862 			/* It is our own packet, looped back.
1863 			 * Very complicated situation...
1864 			 *
1865 			 * The best workaround until routing daemons will be
1866 			 * fixed is not to redistribute packet, if it was
1867 			 * send through wrong interface. It means, that
1868 			 * multicast applications WILL NOT work for
1869 			 * (S,G), which have default multicast route pointing
1870 			 * to wrong oif. In any case, it is not a good
1871 			 * idea to use multicasting applications on router.
1872 			 */
1873 			goto dont_forward;
1874 		}
1875 
1876 		cache->mfc_un.res.wrong_if++;
1877 
1878 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
1879 		    /* pimsm uses asserts, when switching from RPT to SPT,
1880 		     * so that we cannot check that packet arrived on an oif.
1881 		     * It is bad, but otherwise we would need to move pretty
1882 		     * large chunk of pimd to kernel. Ough... --ANK
1883 		     */
1884 		    (mrt->mroute_do_pim ||
1885 		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
1886 		    time_after(jiffies,
1887 			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1888 			cache->mfc_un.res.last_assert = jiffies;
1889 			ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1890 		}
1891 		goto dont_forward;
1892 	}
1893 
1894 forward:
1895 	mrt->vif_table[vif].pkt_in++;
1896 	mrt->vif_table[vif].bytes_in += skb->len;
1897 
1898 	/* Forward the frame */
1899 	if (cache->mfc_origin == htonl(INADDR_ANY) &&
1900 	    cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1901 		if (true_vifi >= 0 &&
1902 		    true_vifi != cache->mfc_parent &&
1903 		    ip_hdr(skb)->ttl >
1904 				cache->mfc_un.res.ttls[cache->mfc_parent]) {
1905 			/* It's an (*,*) entry and the packet is not coming from
1906 			 * the upstream: forward the packet to the upstream
1907 			 * only.
1908 			 */
1909 			psend = cache->mfc_parent;
1910 			goto last_forward;
1911 		}
1912 		goto dont_forward;
1913 	}
1914 	for (ct = cache->mfc_un.res.maxvif - 1;
1915 	     ct >= cache->mfc_un.res.minvif; ct--) {
1916 		/* For (*,G) entry, don't forward to the incoming interface */
1917 		if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1918 		     ct != true_vifi) &&
1919 		    ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1920 			if (psend != -1) {
1921 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1922 
1923 				if (skb2)
1924 					ipmr_queue_xmit(net, mrt, skb2, cache,
1925 							psend);
1926 			}
1927 			psend = ct;
1928 		}
1929 	}
1930 last_forward:
1931 	if (psend != -1) {
1932 		if (local) {
1933 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1934 
1935 			if (skb2)
1936 				ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1937 		} else {
1938 			ipmr_queue_xmit(net, mrt, skb, cache, psend);
1939 			return;
1940 		}
1941 	}
1942 
1943 dont_forward:
1944 	if (!local)
1945 		kfree_skb(skb);
1946 }
1947 
1948 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1949 {
1950 	struct rtable *rt = skb_rtable(skb);
1951 	struct iphdr *iph = ip_hdr(skb);
1952 	struct flowi4 fl4 = {
1953 		.daddr = iph->daddr,
1954 		.saddr = iph->saddr,
1955 		.flowi4_tos = RT_TOS(iph->tos),
1956 		.flowi4_oif = (rt_is_output_route(rt) ?
1957 			       skb->dev->ifindex : 0),
1958 		.flowi4_iif = (rt_is_output_route(rt) ?
1959 			       LOOPBACK_IFINDEX :
1960 			       skb->dev->ifindex),
1961 		.flowi4_mark = skb->mark,
1962 	};
1963 	struct mr_table *mrt;
1964 	int err;
1965 
1966 	err = ipmr_fib_lookup(net, &fl4, &mrt);
1967 	if (err)
1968 		return ERR_PTR(err);
1969 	return mrt;
1970 }
1971 
1972 /* Multicast packets for forwarding arrive here
1973  * Called with rcu_read_lock();
1974  */
1975 int ip_mr_input(struct sk_buff *skb)
1976 {
1977 	struct mfc_cache *cache;
1978 	struct net *net = dev_net(skb->dev);
1979 	int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1980 	struct mr_table *mrt;
1981 	struct net_device *dev;
1982 
1983 	/* skb->dev passed in is the loX master dev for vrfs.
1984 	 * As there are no vifs associated with loopback devices,
1985 	 * get the proper interface that does have a vif associated with it.
1986 	 */
1987 	dev = skb->dev;
1988 	if (netif_is_l3_master(skb->dev)) {
1989 		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
1990 		if (!dev) {
1991 			kfree_skb(skb);
1992 			return -ENODEV;
1993 		}
1994 	}
1995 
1996 	/* Packet is looped back after forward, it should not be
1997 	 * forwarded second time, but still can be delivered locally.
1998 	 */
1999 	if (IPCB(skb)->flags & IPSKB_FORWARDED)
2000 		goto dont_forward;
2001 
2002 	mrt = ipmr_rt_fib_lookup(net, skb);
2003 	if (IS_ERR(mrt)) {
2004 		kfree_skb(skb);
2005 		return PTR_ERR(mrt);
2006 	}
2007 	if (!local) {
2008 		if (IPCB(skb)->opt.router_alert) {
2009 			if (ip_call_ra_chain(skb))
2010 				return 0;
2011 		} else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2012 			/* IGMPv1 (and broken IGMPv2 implementations sort of
2013 			 * Cisco IOS <= 11.2(8)) do not put router alert
2014 			 * option to IGMP packets destined to routable
2015 			 * groups. It is very bad, because it means
2016 			 * that we can forward NO IGMP messages.
2017 			 */
2018 			struct sock *mroute_sk;
2019 
2020 			mroute_sk = rcu_dereference(mrt->mroute_sk);
2021 			if (mroute_sk) {
2022 				nf_reset(skb);
2023 				raw_rcv(mroute_sk, skb);
2024 				return 0;
2025 			}
2026 		    }
2027 	}
2028 
2029 	/* already under rcu_read_lock() */
2030 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2031 	if (!cache) {
2032 		int vif = ipmr_find_vif(mrt, dev);
2033 
2034 		if (vif >= 0)
2035 			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2036 						    vif);
2037 	}
2038 
2039 	/* No usable cache entry */
2040 	if (!cache) {
2041 		int vif;
2042 
2043 		if (local) {
2044 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2045 			ip_local_deliver(skb);
2046 			if (!skb2)
2047 				return -ENOBUFS;
2048 			skb = skb2;
2049 		}
2050 
2051 		read_lock(&mrt_lock);
2052 		vif = ipmr_find_vif(mrt, dev);
2053 		if (vif >= 0) {
2054 			int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2055 			read_unlock(&mrt_lock);
2056 
2057 			return err2;
2058 		}
2059 		read_unlock(&mrt_lock);
2060 		kfree_skb(skb);
2061 		return -ENODEV;
2062 	}
2063 
2064 	read_lock(&mrt_lock);
2065 	ip_mr_forward(net, mrt, dev, skb, cache, local);
2066 	read_unlock(&mrt_lock);
2067 
2068 	if (local)
2069 		return ip_local_deliver(skb);
2070 
2071 	return 0;
2072 
2073 dont_forward:
2074 	if (local)
2075 		return ip_local_deliver(skb);
2076 	kfree_skb(skb);
2077 	return 0;
2078 }
2079 
2080 #ifdef CONFIG_IP_PIMSM_V1
2081 /* Handle IGMP messages of PIMv1 */
2082 int pim_rcv_v1(struct sk_buff *skb)
2083 {
2084 	struct igmphdr *pim;
2085 	struct net *net = dev_net(skb->dev);
2086 	struct mr_table *mrt;
2087 
2088 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2089 		goto drop;
2090 
2091 	pim = igmp_hdr(skb);
2092 
2093 	mrt = ipmr_rt_fib_lookup(net, skb);
2094 	if (IS_ERR(mrt))
2095 		goto drop;
2096 	if (!mrt->mroute_do_pim ||
2097 	    pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2098 		goto drop;
2099 
2100 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2101 drop:
2102 		kfree_skb(skb);
2103 	}
2104 	return 0;
2105 }
2106 #endif
2107 
2108 #ifdef CONFIG_IP_PIMSM_V2
2109 static int pim_rcv(struct sk_buff *skb)
2110 {
2111 	struct pimreghdr *pim;
2112 	struct net *net = dev_net(skb->dev);
2113 	struct mr_table *mrt;
2114 
2115 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2116 		goto drop;
2117 
2118 	pim = (struct pimreghdr *)skb_transport_header(skb);
2119 	if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
2120 	    (pim->flags & PIM_NULL_REGISTER) ||
2121 	    (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2122 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2123 		goto drop;
2124 
2125 	mrt = ipmr_rt_fib_lookup(net, skb);
2126 	if (IS_ERR(mrt))
2127 		goto drop;
2128 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2129 drop:
2130 		kfree_skb(skb);
2131 	}
2132 	return 0;
2133 }
2134 #endif
2135 
2136 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2137 			      struct mfc_cache *c, struct rtmsg *rtm)
2138 {
2139 	struct rta_mfc_stats mfcs;
2140 	struct nlattr *mp_attr;
2141 	struct rtnexthop *nhp;
2142 	unsigned long lastuse;
2143 	int ct;
2144 
2145 	/* If cache is unresolved, don't try to parse IIF and OIF */
2146 	if (c->mfc_parent >= MAXVIFS) {
2147 		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2148 		return -ENOENT;
2149 	}
2150 
2151 	if (VIF_EXISTS(mrt, c->mfc_parent) &&
2152 	    nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2153 		return -EMSGSIZE;
2154 
2155 	if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2156 		return -EMSGSIZE;
2157 
2158 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2159 		if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2160 			if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2161 				nla_nest_cancel(skb, mp_attr);
2162 				return -EMSGSIZE;
2163 			}
2164 
2165 			nhp->rtnh_flags = 0;
2166 			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2167 			nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2168 			nhp->rtnh_len = sizeof(*nhp);
2169 		}
2170 	}
2171 
2172 	nla_nest_end(skb, mp_attr);
2173 
2174 	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2175 	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2176 
2177 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
2178 	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2179 	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2180 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2181 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2182 			      RTA_PAD))
2183 		return -EMSGSIZE;
2184 
2185 	rtm->rtm_type = RTN_MULTICAST;
2186 	return 1;
2187 }
2188 
2189 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2190 		   __be32 saddr, __be32 daddr,
2191 		   struct rtmsg *rtm, u32 portid)
2192 {
2193 	struct mfc_cache *cache;
2194 	struct mr_table *mrt;
2195 	int err;
2196 
2197 	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2198 	if (!mrt)
2199 		return -ENOENT;
2200 
2201 	rcu_read_lock();
2202 	cache = ipmr_cache_find(mrt, saddr, daddr);
2203 	if (!cache && skb->dev) {
2204 		int vif = ipmr_find_vif(mrt, skb->dev);
2205 
2206 		if (vif >= 0)
2207 			cache = ipmr_cache_find_any(mrt, daddr, vif);
2208 	}
2209 	if (!cache) {
2210 		struct sk_buff *skb2;
2211 		struct iphdr *iph;
2212 		struct net_device *dev;
2213 		int vif = -1;
2214 
2215 		dev = skb->dev;
2216 		read_lock(&mrt_lock);
2217 		if (dev)
2218 			vif = ipmr_find_vif(mrt, dev);
2219 		if (vif < 0) {
2220 			read_unlock(&mrt_lock);
2221 			rcu_read_unlock();
2222 			return -ENODEV;
2223 		}
2224 		skb2 = skb_clone(skb, GFP_ATOMIC);
2225 		if (!skb2) {
2226 			read_unlock(&mrt_lock);
2227 			rcu_read_unlock();
2228 			return -ENOMEM;
2229 		}
2230 
2231 		NETLINK_CB(skb2).portid = portid;
2232 		skb_push(skb2, sizeof(struct iphdr));
2233 		skb_reset_network_header(skb2);
2234 		iph = ip_hdr(skb2);
2235 		iph->ihl = sizeof(struct iphdr) >> 2;
2236 		iph->saddr = saddr;
2237 		iph->daddr = daddr;
2238 		iph->version = 0;
2239 		err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2240 		read_unlock(&mrt_lock);
2241 		rcu_read_unlock();
2242 		return err;
2243 	}
2244 
2245 	read_lock(&mrt_lock);
2246 	err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2247 	read_unlock(&mrt_lock);
2248 	rcu_read_unlock();
2249 	return err;
2250 }
2251 
2252 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2253 			    u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2254 			    int flags)
2255 {
2256 	struct nlmsghdr *nlh;
2257 	struct rtmsg *rtm;
2258 	int err;
2259 
2260 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2261 	if (!nlh)
2262 		return -EMSGSIZE;
2263 
2264 	rtm = nlmsg_data(nlh);
2265 	rtm->rtm_family   = RTNL_FAMILY_IPMR;
2266 	rtm->rtm_dst_len  = 32;
2267 	rtm->rtm_src_len  = 32;
2268 	rtm->rtm_tos      = 0;
2269 	rtm->rtm_table    = mrt->id;
2270 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2271 		goto nla_put_failure;
2272 	rtm->rtm_type     = RTN_MULTICAST;
2273 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2274 	if (c->mfc_flags & MFC_STATIC)
2275 		rtm->rtm_protocol = RTPROT_STATIC;
2276 	else
2277 		rtm->rtm_protocol = RTPROT_MROUTED;
2278 	rtm->rtm_flags    = 0;
2279 
2280 	if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2281 	    nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2282 		goto nla_put_failure;
2283 	err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2284 	/* do not break the dump if cache is unresolved */
2285 	if (err < 0 && err != -ENOENT)
2286 		goto nla_put_failure;
2287 
2288 	nlmsg_end(skb, nlh);
2289 	return 0;
2290 
2291 nla_put_failure:
2292 	nlmsg_cancel(skb, nlh);
2293 	return -EMSGSIZE;
2294 }
2295 
2296 static size_t mroute_msgsize(bool unresolved, int maxvif)
2297 {
2298 	size_t len =
2299 		NLMSG_ALIGN(sizeof(struct rtmsg))
2300 		+ nla_total_size(4)	/* RTA_TABLE */
2301 		+ nla_total_size(4)	/* RTA_SRC */
2302 		+ nla_total_size(4)	/* RTA_DST */
2303 		;
2304 
2305 	if (!unresolved)
2306 		len = len
2307 		      + nla_total_size(4)	/* RTA_IIF */
2308 		      + nla_total_size(0)	/* RTA_MULTIPATH */
2309 		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2310 						/* RTA_MFC_STATS */
2311 		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2312 		;
2313 
2314 	return len;
2315 }
2316 
2317 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2318 				 int cmd)
2319 {
2320 	struct net *net = read_pnet(&mrt->net);
2321 	struct sk_buff *skb;
2322 	int err = -ENOBUFS;
2323 
2324 	skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2325 			GFP_ATOMIC);
2326 	if (!skb)
2327 		goto errout;
2328 
2329 	err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2330 	if (err < 0)
2331 		goto errout;
2332 
2333 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2334 	return;
2335 
2336 errout:
2337 	kfree_skb(skb);
2338 	if (err < 0)
2339 		rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2340 }
2341 
2342 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2343 {
2344 	struct net *net = sock_net(skb->sk);
2345 	struct mr_table *mrt;
2346 	struct mfc_cache *mfc;
2347 	unsigned int t = 0, s_t;
2348 	unsigned int e = 0, s_e;
2349 
2350 	s_t = cb->args[0];
2351 	s_e = cb->args[1];
2352 
2353 	rcu_read_lock();
2354 	ipmr_for_each_table(mrt, net) {
2355 		if (t < s_t)
2356 			goto next_table;
2357 		list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
2358 			if (e < s_e)
2359 				goto next_entry;
2360 			if (ipmr_fill_mroute(mrt, skb,
2361 					     NETLINK_CB(cb->skb).portid,
2362 					     cb->nlh->nlmsg_seq,
2363 					     mfc, RTM_NEWROUTE,
2364 					     NLM_F_MULTI) < 0)
2365 				goto done;
2366 next_entry:
2367 			e++;
2368 		}
2369 		e = 0;
2370 		s_e = 0;
2371 
2372 		spin_lock_bh(&mfc_unres_lock);
2373 		list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2374 			if (e < s_e)
2375 				goto next_entry2;
2376 			if (ipmr_fill_mroute(mrt, skb,
2377 					     NETLINK_CB(cb->skb).portid,
2378 					     cb->nlh->nlmsg_seq,
2379 					     mfc, RTM_NEWROUTE,
2380 					     NLM_F_MULTI) < 0) {
2381 				spin_unlock_bh(&mfc_unres_lock);
2382 				goto done;
2383 			}
2384 next_entry2:
2385 			e++;
2386 		}
2387 		spin_unlock_bh(&mfc_unres_lock);
2388 		e = 0;
2389 		s_e = 0;
2390 next_table:
2391 		t++;
2392 	}
2393 done:
2394 	rcu_read_unlock();
2395 
2396 	cb->args[1] = e;
2397 	cb->args[0] = t;
2398 
2399 	return skb->len;
2400 }
2401 
2402 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2403 	[RTA_SRC]	= { .type = NLA_U32 },
2404 	[RTA_DST]	= { .type = NLA_U32 },
2405 	[RTA_IIF]	= { .type = NLA_U32 },
2406 	[RTA_TABLE]	= { .type = NLA_U32 },
2407 	[RTA_MULTIPATH]	= { .len = sizeof(struct rtnexthop) },
2408 };
2409 
2410 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2411 {
2412 	switch (rtm_protocol) {
2413 	case RTPROT_STATIC:
2414 	case RTPROT_MROUTED:
2415 		return true;
2416 	}
2417 	return false;
2418 }
2419 
2420 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2421 {
2422 	struct rtnexthop *rtnh = nla_data(nla);
2423 	int remaining = nla_len(nla), vifi = 0;
2424 
2425 	while (rtnh_ok(rtnh, remaining)) {
2426 		mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2427 		if (++vifi == MAXVIFS)
2428 			break;
2429 		rtnh = rtnh_next(rtnh, &remaining);
2430 	}
2431 
2432 	return remaining > 0 ? -EINVAL : vifi;
2433 }
2434 
2435 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2436 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2437 			    struct mfcctl *mfcc, int *mrtsock,
2438 			    struct mr_table **mrtret,
2439 			    struct netlink_ext_ack *extack)
2440 {
2441 	struct net_device *dev = NULL;
2442 	u32 tblid = RT_TABLE_DEFAULT;
2443 	struct mr_table *mrt;
2444 	struct nlattr *attr;
2445 	struct rtmsg *rtm;
2446 	int ret, rem;
2447 
2448 	ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy,
2449 			     extack);
2450 	if (ret < 0)
2451 		goto out;
2452 	rtm = nlmsg_data(nlh);
2453 
2454 	ret = -EINVAL;
2455 	if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2456 	    rtm->rtm_type != RTN_MULTICAST ||
2457 	    rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2458 	    !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2459 		goto out;
2460 
2461 	memset(mfcc, 0, sizeof(*mfcc));
2462 	mfcc->mfcc_parent = -1;
2463 	ret = 0;
2464 	nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2465 		switch (nla_type(attr)) {
2466 		case RTA_SRC:
2467 			mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2468 			break;
2469 		case RTA_DST:
2470 			mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2471 			break;
2472 		case RTA_IIF:
2473 			dev = __dev_get_by_index(net, nla_get_u32(attr));
2474 			if (!dev) {
2475 				ret = -ENODEV;
2476 				goto out;
2477 			}
2478 			break;
2479 		case RTA_MULTIPATH:
2480 			if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2481 				ret = -EINVAL;
2482 				goto out;
2483 			}
2484 			break;
2485 		case RTA_PREFSRC:
2486 			ret = 1;
2487 			break;
2488 		case RTA_TABLE:
2489 			tblid = nla_get_u32(attr);
2490 			break;
2491 		}
2492 	}
2493 	mrt = ipmr_get_table(net, tblid);
2494 	if (!mrt) {
2495 		ret = -ENOENT;
2496 		goto out;
2497 	}
2498 	*mrtret = mrt;
2499 	*mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2500 	if (dev)
2501 		mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2502 
2503 out:
2504 	return ret;
2505 }
2506 
2507 /* takes care of both newroute and delroute */
2508 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2509 			  struct netlink_ext_ack *extack)
2510 {
2511 	struct net *net = sock_net(skb->sk);
2512 	int ret, mrtsock, parent;
2513 	struct mr_table *tbl;
2514 	struct mfcctl mfcc;
2515 
2516 	mrtsock = 0;
2517 	tbl = NULL;
2518 	ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
2519 	if (ret < 0)
2520 		return ret;
2521 
2522 	parent = ret ? mfcc.mfcc_parent : -1;
2523 	if (nlh->nlmsg_type == RTM_NEWROUTE)
2524 		return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2525 	else
2526 		return ipmr_mfc_delete(tbl, &mfcc, parent);
2527 }
2528 
2529 #ifdef CONFIG_PROC_FS
2530 /* The /proc interfaces to multicast routing :
2531  * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2532  */
2533 struct ipmr_vif_iter {
2534 	struct seq_net_private p;
2535 	struct mr_table *mrt;
2536 	int ct;
2537 };
2538 
2539 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2540 					   struct ipmr_vif_iter *iter,
2541 					   loff_t pos)
2542 {
2543 	struct mr_table *mrt = iter->mrt;
2544 
2545 	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2546 		if (!VIF_EXISTS(mrt, iter->ct))
2547 			continue;
2548 		if (pos-- == 0)
2549 			return &mrt->vif_table[iter->ct];
2550 	}
2551 	return NULL;
2552 }
2553 
2554 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2555 	__acquires(mrt_lock)
2556 {
2557 	struct ipmr_vif_iter *iter = seq->private;
2558 	struct net *net = seq_file_net(seq);
2559 	struct mr_table *mrt;
2560 
2561 	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2562 	if (!mrt)
2563 		return ERR_PTR(-ENOENT);
2564 
2565 	iter->mrt = mrt;
2566 
2567 	read_lock(&mrt_lock);
2568 	return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2569 		: SEQ_START_TOKEN;
2570 }
2571 
2572 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2573 {
2574 	struct ipmr_vif_iter *iter = seq->private;
2575 	struct net *net = seq_file_net(seq);
2576 	struct mr_table *mrt = iter->mrt;
2577 
2578 	++*pos;
2579 	if (v == SEQ_START_TOKEN)
2580 		return ipmr_vif_seq_idx(net, iter, 0);
2581 
2582 	while (++iter->ct < mrt->maxvif) {
2583 		if (!VIF_EXISTS(mrt, iter->ct))
2584 			continue;
2585 		return &mrt->vif_table[iter->ct];
2586 	}
2587 	return NULL;
2588 }
2589 
2590 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2591 	__releases(mrt_lock)
2592 {
2593 	read_unlock(&mrt_lock);
2594 }
2595 
2596 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2597 {
2598 	struct ipmr_vif_iter *iter = seq->private;
2599 	struct mr_table *mrt = iter->mrt;
2600 
2601 	if (v == SEQ_START_TOKEN) {
2602 		seq_puts(seq,
2603 			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
2604 	} else {
2605 		const struct vif_device *vif = v;
2606 		const char *name =  vif->dev ? vif->dev->name : "none";
2607 
2608 		seq_printf(seq,
2609 			   "%2zd %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
2610 			   vif - mrt->vif_table,
2611 			   name, vif->bytes_in, vif->pkt_in,
2612 			   vif->bytes_out, vif->pkt_out,
2613 			   vif->flags, vif->local, vif->remote);
2614 	}
2615 	return 0;
2616 }
2617 
2618 static const struct seq_operations ipmr_vif_seq_ops = {
2619 	.start = ipmr_vif_seq_start,
2620 	.next  = ipmr_vif_seq_next,
2621 	.stop  = ipmr_vif_seq_stop,
2622 	.show  = ipmr_vif_seq_show,
2623 };
2624 
2625 static int ipmr_vif_open(struct inode *inode, struct file *file)
2626 {
2627 	return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2628 			    sizeof(struct ipmr_vif_iter));
2629 }
2630 
2631 static const struct file_operations ipmr_vif_fops = {
2632 	.owner	 = THIS_MODULE,
2633 	.open    = ipmr_vif_open,
2634 	.read    = seq_read,
2635 	.llseek  = seq_lseek,
2636 	.release = seq_release_net,
2637 };
2638 
2639 struct ipmr_mfc_iter {
2640 	struct seq_net_private p;
2641 	struct mr_table *mrt;
2642 	struct list_head *cache;
2643 };
2644 
2645 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2646 					  struct ipmr_mfc_iter *it, loff_t pos)
2647 {
2648 	struct mr_table *mrt = it->mrt;
2649 	struct mfc_cache *mfc;
2650 
2651 	rcu_read_lock();
2652 	it->cache = &mrt->mfc_cache_list;
2653 	list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
2654 		if (pos-- == 0)
2655 			return mfc;
2656 	rcu_read_unlock();
2657 
2658 	spin_lock_bh(&mfc_unres_lock);
2659 	it->cache = &mrt->mfc_unres_queue;
2660 	list_for_each_entry(mfc, it->cache, list)
2661 		if (pos-- == 0)
2662 			return mfc;
2663 	spin_unlock_bh(&mfc_unres_lock);
2664 
2665 	it->cache = NULL;
2666 	return NULL;
2667 }
2668 
2669 
2670 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2671 {
2672 	struct ipmr_mfc_iter *it = seq->private;
2673 	struct net *net = seq_file_net(seq);
2674 	struct mr_table *mrt;
2675 
2676 	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2677 	if (!mrt)
2678 		return ERR_PTR(-ENOENT);
2679 
2680 	it->mrt = mrt;
2681 	it->cache = NULL;
2682 	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2683 		: SEQ_START_TOKEN;
2684 }
2685 
2686 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2687 {
2688 	struct ipmr_mfc_iter *it = seq->private;
2689 	struct net *net = seq_file_net(seq);
2690 	struct mr_table *mrt = it->mrt;
2691 	struct mfc_cache *mfc = v;
2692 
2693 	++*pos;
2694 
2695 	if (v == SEQ_START_TOKEN)
2696 		return ipmr_mfc_seq_idx(net, seq->private, 0);
2697 
2698 	if (mfc->list.next != it->cache)
2699 		return list_entry(mfc->list.next, struct mfc_cache, list);
2700 
2701 	if (it->cache == &mrt->mfc_unres_queue)
2702 		goto end_of_list;
2703 
2704 	/* exhausted cache_array, show unresolved */
2705 	rcu_read_unlock();
2706 	it->cache = &mrt->mfc_unres_queue;
2707 
2708 	spin_lock_bh(&mfc_unres_lock);
2709 	if (!list_empty(it->cache))
2710 		return list_first_entry(it->cache, struct mfc_cache, list);
2711 
2712 end_of_list:
2713 	spin_unlock_bh(&mfc_unres_lock);
2714 	it->cache = NULL;
2715 
2716 	return NULL;
2717 }
2718 
2719 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2720 {
2721 	struct ipmr_mfc_iter *it = seq->private;
2722 	struct mr_table *mrt = it->mrt;
2723 
2724 	if (it->cache == &mrt->mfc_unres_queue)
2725 		spin_unlock_bh(&mfc_unres_lock);
2726 	else if (it->cache == &mrt->mfc_cache_list)
2727 		rcu_read_unlock();
2728 }
2729 
2730 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2731 {
2732 	int n;
2733 
2734 	if (v == SEQ_START_TOKEN) {
2735 		seq_puts(seq,
2736 		 "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
2737 	} else {
2738 		const struct mfc_cache *mfc = v;
2739 		const struct ipmr_mfc_iter *it = seq->private;
2740 		const struct mr_table *mrt = it->mrt;
2741 
2742 		seq_printf(seq, "%08X %08X %-3hd",
2743 			   (__force u32) mfc->mfc_mcastgrp,
2744 			   (__force u32) mfc->mfc_origin,
2745 			   mfc->mfc_parent);
2746 
2747 		if (it->cache != &mrt->mfc_unres_queue) {
2748 			seq_printf(seq, " %8lu %8lu %8lu",
2749 				   mfc->mfc_un.res.pkt,
2750 				   mfc->mfc_un.res.bytes,
2751 				   mfc->mfc_un.res.wrong_if);
2752 			for (n = mfc->mfc_un.res.minvif;
2753 			     n < mfc->mfc_un.res.maxvif; n++) {
2754 				if (VIF_EXISTS(mrt, n) &&
2755 				    mfc->mfc_un.res.ttls[n] < 255)
2756 					seq_printf(seq,
2757 					   " %2d:%-3d",
2758 					   n, mfc->mfc_un.res.ttls[n]);
2759 			}
2760 		} else {
2761 			/* unresolved mfc_caches don't contain
2762 			 * pkt, bytes and wrong_if values
2763 			 */
2764 			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2765 		}
2766 		seq_putc(seq, '\n');
2767 	}
2768 	return 0;
2769 }
2770 
2771 static const struct seq_operations ipmr_mfc_seq_ops = {
2772 	.start = ipmr_mfc_seq_start,
2773 	.next  = ipmr_mfc_seq_next,
2774 	.stop  = ipmr_mfc_seq_stop,
2775 	.show  = ipmr_mfc_seq_show,
2776 };
2777 
2778 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2779 {
2780 	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2781 			    sizeof(struct ipmr_mfc_iter));
2782 }
2783 
2784 static const struct file_operations ipmr_mfc_fops = {
2785 	.owner	 = THIS_MODULE,
2786 	.open    = ipmr_mfc_open,
2787 	.read    = seq_read,
2788 	.llseek  = seq_lseek,
2789 	.release = seq_release_net,
2790 };
2791 #endif
2792 
2793 #ifdef CONFIG_IP_PIMSM_V2
2794 static const struct net_protocol pim_protocol = {
2795 	.handler	=	pim_rcv,
2796 	.netns_ok	=	1,
2797 };
2798 #endif
2799 
2800 /* Setup for IP multicast routing */
2801 static int __net_init ipmr_net_init(struct net *net)
2802 {
2803 	int err;
2804 
2805 	err = ipmr_rules_init(net);
2806 	if (err < 0)
2807 		goto fail;
2808 
2809 #ifdef CONFIG_PROC_FS
2810 	err = -ENOMEM;
2811 	if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2812 		goto proc_vif_fail;
2813 	if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2814 		goto proc_cache_fail;
2815 #endif
2816 	return 0;
2817 
2818 #ifdef CONFIG_PROC_FS
2819 proc_cache_fail:
2820 	remove_proc_entry("ip_mr_vif", net->proc_net);
2821 proc_vif_fail:
2822 	ipmr_rules_exit(net);
2823 #endif
2824 fail:
2825 	return err;
2826 }
2827 
2828 static void __net_exit ipmr_net_exit(struct net *net)
2829 {
2830 #ifdef CONFIG_PROC_FS
2831 	remove_proc_entry("ip_mr_cache", net->proc_net);
2832 	remove_proc_entry("ip_mr_vif", net->proc_net);
2833 #endif
2834 	ipmr_rules_exit(net);
2835 }
2836 
2837 static struct pernet_operations ipmr_net_ops = {
2838 	.init = ipmr_net_init,
2839 	.exit = ipmr_net_exit,
2840 };
2841 
2842 int __init ip_mr_init(void)
2843 {
2844 	int err;
2845 
2846 	mrt_cachep = kmem_cache_create("ip_mrt_cache",
2847 				       sizeof(struct mfc_cache),
2848 				       0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2849 				       NULL);
2850 
2851 	err = register_pernet_subsys(&ipmr_net_ops);
2852 	if (err)
2853 		goto reg_pernet_fail;
2854 
2855 	err = register_netdevice_notifier(&ip_mr_notifier);
2856 	if (err)
2857 		goto reg_notif_fail;
2858 #ifdef CONFIG_IP_PIMSM_V2
2859 	if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2860 		pr_err("%s: can't add PIM protocol\n", __func__);
2861 		err = -EAGAIN;
2862 		goto add_proto_fail;
2863 	}
2864 #endif
2865 	rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2866 		      NULL, ipmr_rtm_dumproute, NULL);
2867 	rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
2868 		      ipmr_rtm_route, NULL, NULL);
2869 	rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
2870 		      ipmr_rtm_route, NULL, NULL);
2871 	return 0;
2872 
2873 #ifdef CONFIG_IP_PIMSM_V2
2874 add_proto_fail:
2875 	unregister_netdevice_notifier(&ip_mr_notifier);
2876 #endif
2877 reg_notif_fail:
2878 	unregister_pernet_subsys(&ipmr_net_ops);
2879 reg_pernet_fail:
2880 	kmem_cache_destroy(mrt_cachep);
2881 	return err;
2882 }
2883