xref: /openbmc/linux/net/ipv6/ip6mr.c (revision dc6a81c3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux IPv6 multicast routing support for BSD pim6sd
4  *	Based on net/ipv4/ipmr.c.
5  *
6  *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
7  *		LSIIT Laboratory, Strasbourg, France
8  *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9  *		6WIND, Paris, France
10  *	Copyright (C)2007,2008 USAGI/WIDE Project
11  *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12  */
13 
14 #include <linux/uaccess.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/kernel.h>
20 #include <linux/fcntl.h>
21 #include <linux/stat.h>
22 #include <linux/socket.h>
23 #include <linux/inet.h>
24 #include <linux/netdevice.h>
25 #include <linux/inetdevice.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/compat.h>
30 #include <linux/rhashtable.h>
31 #include <net/protocol.h>
32 #include <linux/skbuff.h>
33 #include <net/raw.h>
34 #include <linux/notifier.h>
35 #include <linux/if_arp.h>
36 #include <net/checksum.h>
37 #include <net/netlink.h>
38 #include <net/fib_rules.h>
39 
40 #include <net/ipv6.h>
41 #include <net/ip6_route.h>
42 #include <linux/mroute6.h>
43 #include <linux/pim.h>
44 #include <net/addrconf.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/export.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/netconf.h>
49 #include <net/ip_tunnels.h>
50 
51 #include <linux/nospec.h>
52 
53 struct ip6mr_rule {
54 	struct fib_rule		common;
55 };
56 
57 struct ip6mr_result {
58 	struct mr_table	*mrt;
59 };
60 
61 /* Big lock, protecting vif table, mrt cache and mroute socket state.
62    Note that the changes are semaphored via rtnl_lock.
63  */
64 
65 static DEFINE_RWLOCK(mrt_lock);
66 
67 /* Multicast router control variables */
68 
69 /* Special spinlock for queue of unresolved entries */
70 static DEFINE_SPINLOCK(mfc_unres_lock);
71 
72 /* We return to original Alan's scheme. Hash table of resolved
73    entries is changed only in process context and protected
74    with weak lock mrt_lock. Queue of unresolved entries is protected
75    with strong spinlock mfc_unres_lock.
76 
77    In this case data path is free of exclusive locks at all.
78  */
79 
80 static struct kmem_cache *mrt_cachep __read_mostly;
81 
82 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
83 static void ip6mr_free_table(struct mr_table *mrt);
84 
85 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
86 			   struct net_device *dev, struct sk_buff *skb,
87 			   struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 			      mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
91 			      int cmd);
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 			       struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, int flags);
96 static void ipmr_expire_process(struct timer_list *t);
97 
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
101 
102 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 					    struct mr_table *mrt)
104 {
105 	struct mr_table *ret;
106 
107 	if (!mrt)
108 		ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 				     struct mr_table, list);
110 	else
111 		ret = list_entry_rcu(mrt->list.next,
112 				     struct mr_table, list);
113 
114 	if (&ret->list == &net->ipv6.mr6_tables)
115 		return NULL;
116 	return ret;
117 }
118 
119 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
120 {
121 	struct mr_table *mrt;
122 
123 	ip6mr_for_each_table(mrt, net) {
124 		if (mrt->id == id)
125 			return mrt;
126 	}
127 	return NULL;
128 }
129 
130 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
131 			    struct mr_table **mrt)
132 {
133 	int err;
134 	struct ip6mr_result res;
135 	struct fib_lookup_arg arg = {
136 		.result = &res,
137 		.flags = FIB_LOOKUP_NOREF,
138 	};
139 
140 	/* update flow if oif or iif point to device enslaved to l3mdev */
141 	l3mdev_update_flow(net, flowi6_to_flowi(flp6));
142 
143 	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
144 			       flowi6_to_flowi(flp6), 0, &arg);
145 	if (err < 0)
146 		return err;
147 	*mrt = res.mrt;
148 	return 0;
149 }
150 
151 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
152 			     int flags, struct fib_lookup_arg *arg)
153 {
154 	struct ip6mr_result *res = arg->result;
155 	struct mr_table *mrt;
156 
157 	switch (rule->action) {
158 	case FR_ACT_TO_TBL:
159 		break;
160 	case FR_ACT_UNREACHABLE:
161 		return -ENETUNREACH;
162 	case FR_ACT_PROHIBIT:
163 		return -EACCES;
164 	case FR_ACT_BLACKHOLE:
165 	default:
166 		return -EINVAL;
167 	}
168 
169 	arg->table = fib_rule_get_table(rule, arg);
170 
171 	mrt = ip6mr_get_table(rule->fr_net, arg->table);
172 	if (!mrt)
173 		return -EAGAIN;
174 	res->mrt = mrt;
175 	return 0;
176 }
177 
178 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
179 {
180 	return 1;
181 }
182 
183 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
184 	FRA_GENERIC_POLICY,
185 };
186 
187 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
188 				struct fib_rule_hdr *frh, struct nlattr **tb,
189 				struct netlink_ext_ack *extack)
190 {
191 	return 0;
192 }
193 
194 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
195 			      struct nlattr **tb)
196 {
197 	return 1;
198 }
199 
200 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
201 			   struct fib_rule_hdr *frh)
202 {
203 	frh->dst_len = 0;
204 	frh->src_len = 0;
205 	frh->tos     = 0;
206 	return 0;
207 }
208 
209 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
210 	.family		= RTNL_FAMILY_IP6MR,
211 	.rule_size	= sizeof(struct ip6mr_rule),
212 	.addr_size	= sizeof(struct in6_addr),
213 	.action		= ip6mr_rule_action,
214 	.match		= ip6mr_rule_match,
215 	.configure	= ip6mr_rule_configure,
216 	.compare	= ip6mr_rule_compare,
217 	.fill		= ip6mr_rule_fill,
218 	.nlgroup	= RTNLGRP_IPV6_RULE,
219 	.policy		= ip6mr_rule_policy,
220 	.owner		= THIS_MODULE,
221 };
222 
223 static int __net_init ip6mr_rules_init(struct net *net)
224 {
225 	struct fib_rules_ops *ops;
226 	struct mr_table *mrt;
227 	int err;
228 
229 	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
230 	if (IS_ERR(ops))
231 		return PTR_ERR(ops);
232 
233 	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
234 
235 	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
236 	if (IS_ERR(mrt)) {
237 		err = PTR_ERR(mrt);
238 		goto err1;
239 	}
240 
241 	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
242 	if (err < 0)
243 		goto err2;
244 
245 	net->ipv6.mr6_rules_ops = ops;
246 	return 0;
247 
248 err2:
249 	ip6mr_free_table(mrt);
250 err1:
251 	fib_rules_unregister(ops);
252 	return err;
253 }
254 
255 static void __net_exit ip6mr_rules_exit(struct net *net)
256 {
257 	struct mr_table *mrt, *next;
258 
259 	rtnl_lock();
260 	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
261 		list_del(&mrt->list);
262 		ip6mr_free_table(mrt);
263 	}
264 	fib_rules_unregister(net->ipv6.mr6_rules_ops);
265 	rtnl_unlock();
266 }
267 
268 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
269 			    struct netlink_ext_ack *extack)
270 {
271 	return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
272 }
273 
274 static unsigned int ip6mr_rules_seq_read(struct net *net)
275 {
276 	return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
277 }
278 
279 bool ip6mr_rule_default(const struct fib_rule *rule)
280 {
281 	return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
282 	       rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
283 }
284 EXPORT_SYMBOL(ip6mr_rule_default);
285 #else
286 #define ip6mr_for_each_table(mrt, net) \
287 	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
288 
289 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
290 					    struct mr_table *mrt)
291 {
292 	if (!mrt)
293 		return net->ipv6.mrt6;
294 	return NULL;
295 }
296 
297 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
298 {
299 	return net->ipv6.mrt6;
300 }
301 
302 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
303 			    struct mr_table **mrt)
304 {
305 	*mrt = net->ipv6.mrt6;
306 	return 0;
307 }
308 
309 static int __net_init ip6mr_rules_init(struct net *net)
310 {
311 	struct mr_table *mrt;
312 
313 	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
314 	if (IS_ERR(mrt))
315 		return PTR_ERR(mrt);
316 	net->ipv6.mrt6 = mrt;
317 	return 0;
318 }
319 
320 static void __net_exit ip6mr_rules_exit(struct net *net)
321 {
322 	rtnl_lock();
323 	ip6mr_free_table(net->ipv6.mrt6);
324 	net->ipv6.mrt6 = NULL;
325 	rtnl_unlock();
326 }
327 
328 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
329 			    struct netlink_ext_ack *extack)
330 {
331 	return 0;
332 }
333 
334 static unsigned int ip6mr_rules_seq_read(struct net *net)
335 {
336 	return 0;
337 }
338 #endif
339 
340 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
341 			  const void *ptr)
342 {
343 	const struct mfc6_cache_cmp_arg *cmparg = arg->key;
344 	struct mfc6_cache *c = (struct mfc6_cache *)ptr;
345 
346 	return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
347 	       !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
348 }
349 
350 static const struct rhashtable_params ip6mr_rht_params = {
351 	.head_offset = offsetof(struct mr_mfc, mnode),
352 	.key_offset = offsetof(struct mfc6_cache, cmparg),
353 	.key_len = sizeof(struct mfc6_cache_cmp_arg),
354 	.nelem_hint = 3,
355 	.obj_cmpfn = ip6mr_hash_cmp,
356 	.automatic_shrinking = true,
357 };
358 
359 static void ip6mr_new_table_set(struct mr_table *mrt,
360 				struct net *net)
361 {
362 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
363 	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
364 #endif
365 }
366 
367 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
368 	.mf6c_origin = IN6ADDR_ANY_INIT,
369 	.mf6c_mcastgrp = IN6ADDR_ANY_INIT,
370 };
371 
372 static struct mr_table_ops ip6mr_mr_table_ops = {
373 	.rht_params = &ip6mr_rht_params,
374 	.cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
375 };
376 
377 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
378 {
379 	struct mr_table *mrt;
380 
381 	mrt = ip6mr_get_table(net, id);
382 	if (mrt)
383 		return mrt;
384 
385 	return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
386 			      ipmr_expire_process, ip6mr_new_table_set);
387 }
388 
389 static void ip6mr_free_table(struct mr_table *mrt)
390 {
391 	del_timer_sync(&mrt->ipmr_expire_timer);
392 	mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
393 				 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
394 	rhltable_destroy(&mrt->mfc_hash);
395 	kfree(mrt);
396 }
397 
398 #ifdef CONFIG_PROC_FS
399 /* The /proc interfaces to multicast routing
400  * /proc/ip6_mr_cache /proc/ip6_mr_vif
401  */
402 
403 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
404 	__acquires(mrt_lock)
405 {
406 	struct mr_vif_iter *iter = seq->private;
407 	struct net *net = seq_file_net(seq);
408 	struct mr_table *mrt;
409 
410 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
411 	if (!mrt)
412 		return ERR_PTR(-ENOENT);
413 
414 	iter->mrt = mrt;
415 
416 	read_lock(&mrt_lock);
417 	return mr_vif_seq_start(seq, pos);
418 }
419 
420 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
421 	__releases(mrt_lock)
422 {
423 	read_unlock(&mrt_lock);
424 }
425 
426 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
427 {
428 	struct mr_vif_iter *iter = seq->private;
429 	struct mr_table *mrt = iter->mrt;
430 
431 	if (v == SEQ_START_TOKEN) {
432 		seq_puts(seq,
433 			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
434 	} else {
435 		const struct vif_device *vif = v;
436 		const char *name = vif->dev ? vif->dev->name : "none";
437 
438 		seq_printf(seq,
439 			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
440 			   vif - mrt->vif_table,
441 			   name, vif->bytes_in, vif->pkt_in,
442 			   vif->bytes_out, vif->pkt_out,
443 			   vif->flags);
444 	}
445 	return 0;
446 }
447 
448 static const struct seq_operations ip6mr_vif_seq_ops = {
449 	.start = ip6mr_vif_seq_start,
450 	.next  = mr_vif_seq_next,
451 	.stop  = ip6mr_vif_seq_stop,
452 	.show  = ip6mr_vif_seq_show,
453 };
454 
455 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
456 {
457 	struct net *net = seq_file_net(seq);
458 	struct mr_table *mrt;
459 
460 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
461 	if (!mrt)
462 		return ERR_PTR(-ENOENT);
463 
464 	return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
465 }
466 
467 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
468 {
469 	int n;
470 
471 	if (v == SEQ_START_TOKEN) {
472 		seq_puts(seq,
473 			 "Group                            "
474 			 "Origin                           "
475 			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
476 	} else {
477 		const struct mfc6_cache *mfc = v;
478 		const struct mr_mfc_iter *it = seq->private;
479 		struct mr_table *mrt = it->mrt;
480 
481 		seq_printf(seq, "%pI6 %pI6 %-3hd",
482 			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
483 			   mfc->_c.mfc_parent);
484 
485 		if (it->cache != &mrt->mfc_unres_queue) {
486 			seq_printf(seq, " %8lu %8lu %8lu",
487 				   mfc->_c.mfc_un.res.pkt,
488 				   mfc->_c.mfc_un.res.bytes,
489 				   mfc->_c.mfc_un.res.wrong_if);
490 			for (n = mfc->_c.mfc_un.res.minvif;
491 			     n < mfc->_c.mfc_un.res.maxvif; n++) {
492 				if (VIF_EXISTS(mrt, n) &&
493 				    mfc->_c.mfc_un.res.ttls[n] < 255)
494 					seq_printf(seq,
495 						   " %2d:%-3d", n,
496 						   mfc->_c.mfc_un.res.ttls[n]);
497 			}
498 		} else {
499 			/* unresolved mfc_caches don't contain
500 			 * pkt, bytes and wrong_if values
501 			 */
502 			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
503 		}
504 		seq_putc(seq, '\n');
505 	}
506 	return 0;
507 }
508 
509 static const struct seq_operations ipmr_mfc_seq_ops = {
510 	.start = ipmr_mfc_seq_start,
511 	.next  = mr_mfc_seq_next,
512 	.stop  = mr_mfc_seq_stop,
513 	.show  = ipmr_mfc_seq_show,
514 };
515 #endif
516 
517 #ifdef CONFIG_IPV6_PIMSM_V2
518 
519 static int pim6_rcv(struct sk_buff *skb)
520 {
521 	struct pimreghdr *pim;
522 	struct ipv6hdr   *encap;
523 	struct net_device  *reg_dev = NULL;
524 	struct net *net = dev_net(skb->dev);
525 	struct mr_table *mrt;
526 	struct flowi6 fl6 = {
527 		.flowi6_iif	= skb->dev->ifindex,
528 		.flowi6_mark	= skb->mark,
529 	};
530 	int reg_vif_num;
531 
532 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
533 		goto drop;
534 
535 	pim = (struct pimreghdr *)skb_transport_header(skb);
536 	if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
537 	    (pim->flags & PIM_NULL_REGISTER) ||
538 	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
539 			     sizeof(*pim), IPPROTO_PIM,
540 			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
541 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
542 		goto drop;
543 
544 	/* check if the inner packet is destined to mcast group */
545 	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
546 				   sizeof(*pim));
547 
548 	if (!ipv6_addr_is_multicast(&encap->daddr) ||
549 	    encap->payload_len == 0 ||
550 	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
551 		goto drop;
552 
553 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
554 		goto drop;
555 	reg_vif_num = mrt->mroute_reg_vif_num;
556 
557 	read_lock(&mrt_lock);
558 	if (reg_vif_num >= 0)
559 		reg_dev = mrt->vif_table[reg_vif_num].dev;
560 	if (reg_dev)
561 		dev_hold(reg_dev);
562 	read_unlock(&mrt_lock);
563 
564 	if (!reg_dev)
565 		goto drop;
566 
567 	skb->mac_header = skb->network_header;
568 	skb_pull(skb, (u8 *)encap - skb->data);
569 	skb_reset_network_header(skb);
570 	skb->protocol = htons(ETH_P_IPV6);
571 	skb->ip_summed = CHECKSUM_NONE;
572 
573 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
574 
575 	netif_rx(skb);
576 
577 	dev_put(reg_dev);
578 	return 0;
579  drop:
580 	kfree_skb(skb);
581 	return 0;
582 }
583 
584 static const struct inet6_protocol pim6_protocol = {
585 	.handler	=	pim6_rcv,
586 };
587 
588 /* Service routines creating virtual interfaces: PIMREG */
589 
590 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
591 				      struct net_device *dev)
592 {
593 	struct net *net = dev_net(dev);
594 	struct mr_table *mrt;
595 	struct flowi6 fl6 = {
596 		.flowi6_oif	= dev->ifindex,
597 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
598 		.flowi6_mark	= skb->mark,
599 	};
600 
601 	if (!pskb_inet_may_pull(skb))
602 		goto tx_err;
603 
604 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
605 		goto tx_err;
606 
607 	read_lock(&mrt_lock);
608 	dev->stats.tx_bytes += skb->len;
609 	dev->stats.tx_packets++;
610 	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
611 	read_unlock(&mrt_lock);
612 	kfree_skb(skb);
613 	return NETDEV_TX_OK;
614 
615 tx_err:
616 	dev->stats.tx_errors++;
617 	kfree_skb(skb);
618 	return NETDEV_TX_OK;
619 }
620 
621 static int reg_vif_get_iflink(const struct net_device *dev)
622 {
623 	return 0;
624 }
625 
626 static const struct net_device_ops reg_vif_netdev_ops = {
627 	.ndo_start_xmit	= reg_vif_xmit,
628 	.ndo_get_iflink = reg_vif_get_iflink,
629 };
630 
631 static void reg_vif_setup(struct net_device *dev)
632 {
633 	dev->type		= ARPHRD_PIMREG;
634 	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
635 	dev->flags		= IFF_NOARP;
636 	dev->netdev_ops		= &reg_vif_netdev_ops;
637 	dev->needs_free_netdev	= true;
638 	dev->features		|= NETIF_F_NETNS_LOCAL;
639 }
640 
641 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
642 {
643 	struct net_device *dev;
644 	char name[IFNAMSIZ];
645 
646 	if (mrt->id == RT6_TABLE_DFLT)
647 		sprintf(name, "pim6reg");
648 	else
649 		sprintf(name, "pim6reg%u", mrt->id);
650 
651 	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
652 	if (!dev)
653 		return NULL;
654 
655 	dev_net_set(dev, net);
656 
657 	if (register_netdevice(dev)) {
658 		free_netdev(dev);
659 		return NULL;
660 	}
661 
662 	if (dev_open(dev, NULL))
663 		goto failure;
664 
665 	dev_hold(dev);
666 	return dev;
667 
668 failure:
669 	unregister_netdevice(dev);
670 	return NULL;
671 }
672 #endif
673 
674 static int call_ip6mr_vif_entry_notifiers(struct net *net,
675 					  enum fib_event_type event_type,
676 					  struct vif_device *vif,
677 					  mifi_t vif_index, u32 tb_id)
678 {
679 	return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
680 				     vif, vif_index, tb_id,
681 				     &net->ipv6.ipmr_seq);
682 }
683 
684 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
685 					  enum fib_event_type event_type,
686 					  struct mfc6_cache *mfc, u32 tb_id)
687 {
688 	return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
689 				     &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
690 }
691 
692 /* Delete a VIF entry */
693 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
694 		       struct list_head *head)
695 {
696 	struct vif_device *v;
697 	struct net_device *dev;
698 	struct inet6_dev *in6_dev;
699 
700 	if (vifi < 0 || vifi >= mrt->maxvif)
701 		return -EADDRNOTAVAIL;
702 
703 	v = &mrt->vif_table[vifi];
704 
705 	if (VIF_EXISTS(mrt, vifi))
706 		call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
707 					       FIB_EVENT_VIF_DEL, v, vifi,
708 					       mrt->id);
709 
710 	write_lock_bh(&mrt_lock);
711 	dev = v->dev;
712 	v->dev = NULL;
713 
714 	if (!dev) {
715 		write_unlock_bh(&mrt_lock);
716 		return -EADDRNOTAVAIL;
717 	}
718 
719 #ifdef CONFIG_IPV6_PIMSM_V2
720 	if (vifi == mrt->mroute_reg_vif_num)
721 		mrt->mroute_reg_vif_num = -1;
722 #endif
723 
724 	if (vifi + 1 == mrt->maxvif) {
725 		int tmp;
726 		for (tmp = vifi - 1; tmp >= 0; tmp--) {
727 			if (VIF_EXISTS(mrt, tmp))
728 				break;
729 		}
730 		mrt->maxvif = tmp + 1;
731 	}
732 
733 	write_unlock_bh(&mrt_lock);
734 
735 	dev_set_allmulti(dev, -1);
736 
737 	in6_dev = __in6_dev_get(dev);
738 	if (in6_dev) {
739 		in6_dev->cnf.mc_forwarding--;
740 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
741 					     NETCONFA_MC_FORWARDING,
742 					     dev->ifindex, &in6_dev->cnf);
743 	}
744 
745 	if ((v->flags & MIFF_REGISTER) && !notify)
746 		unregister_netdevice_queue(dev, head);
747 
748 	dev_put(dev);
749 	return 0;
750 }
751 
752 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
753 {
754 	struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
755 
756 	kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
757 }
758 
759 static inline void ip6mr_cache_free(struct mfc6_cache *c)
760 {
761 	call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
762 }
763 
764 /* Destroy an unresolved cache entry, killing queued skbs
765    and reporting error to netlink readers.
766  */
767 
768 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
769 {
770 	struct net *net = read_pnet(&mrt->net);
771 	struct sk_buff *skb;
772 
773 	atomic_dec(&mrt->cache_resolve_queue_len);
774 
775 	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
776 		if (ipv6_hdr(skb)->version == 0) {
777 			struct nlmsghdr *nlh = skb_pull(skb,
778 							sizeof(struct ipv6hdr));
779 			nlh->nlmsg_type = NLMSG_ERROR;
780 			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
781 			skb_trim(skb, nlh->nlmsg_len);
782 			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
783 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
784 		} else
785 			kfree_skb(skb);
786 	}
787 
788 	ip6mr_cache_free(c);
789 }
790 
791 
792 /* Timer process for all the unresolved queue. */
793 
794 static void ipmr_do_expire_process(struct mr_table *mrt)
795 {
796 	unsigned long now = jiffies;
797 	unsigned long expires = 10 * HZ;
798 	struct mr_mfc *c, *next;
799 
800 	list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
801 		if (time_after(c->mfc_un.unres.expires, now)) {
802 			/* not yet... */
803 			unsigned long interval = c->mfc_un.unres.expires - now;
804 			if (interval < expires)
805 				expires = interval;
806 			continue;
807 		}
808 
809 		list_del(&c->list);
810 		mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
811 		ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
812 	}
813 
814 	if (!list_empty(&mrt->mfc_unres_queue))
815 		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
816 }
817 
818 static void ipmr_expire_process(struct timer_list *t)
819 {
820 	struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
821 
822 	if (!spin_trylock(&mfc_unres_lock)) {
823 		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
824 		return;
825 	}
826 
827 	if (!list_empty(&mrt->mfc_unres_queue))
828 		ipmr_do_expire_process(mrt);
829 
830 	spin_unlock(&mfc_unres_lock);
831 }
832 
833 /* Fill oifs list. It is called under write locked mrt_lock. */
834 
835 static void ip6mr_update_thresholds(struct mr_table *mrt,
836 				    struct mr_mfc *cache,
837 				    unsigned char *ttls)
838 {
839 	int vifi;
840 
841 	cache->mfc_un.res.minvif = MAXMIFS;
842 	cache->mfc_un.res.maxvif = 0;
843 	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
844 
845 	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
846 		if (VIF_EXISTS(mrt, vifi) &&
847 		    ttls[vifi] && ttls[vifi] < 255) {
848 			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
849 			if (cache->mfc_un.res.minvif > vifi)
850 				cache->mfc_un.res.minvif = vifi;
851 			if (cache->mfc_un.res.maxvif <= vifi)
852 				cache->mfc_un.res.maxvif = vifi + 1;
853 		}
854 	}
855 	cache->mfc_un.res.lastuse = jiffies;
856 }
857 
858 static int mif6_add(struct net *net, struct mr_table *mrt,
859 		    struct mif6ctl *vifc, int mrtsock)
860 {
861 	int vifi = vifc->mif6c_mifi;
862 	struct vif_device *v = &mrt->vif_table[vifi];
863 	struct net_device *dev;
864 	struct inet6_dev *in6_dev;
865 	int err;
866 
867 	/* Is vif busy ? */
868 	if (VIF_EXISTS(mrt, vifi))
869 		return -EADDRINUSE;
870 
871 	switch (vifc->mif6c_flags) {
872 #ifdef CONFIG_IPV6_PIMSM_V2
873 	case MIFF_REGISTER:
874 		/*
875 		 * Special Purpose VIF in PIM
876 		 * All the packets will be sent to the daemon
877 		 */
878 		if (mrt->mroute_reg_vif_num >= 0)
879 			return -EADDRINUSE;
880 		dev = ip6mr_reg_vif(net, mrt);
881 		if (!dev)
882 			return -ENOBUFS;
883 		err = dev_set_allmulti(dev, 1);
884 		if (err) {
885 			unregister_netdevice(dev);
886 			dev_put(dev);
887 			return err;
888 		}
889 		break;
890 #endif
891 	case 0:
892 		dev = dev_get_by_index(net, vifc->mif6c_pifi);
893 		if (!dev)
894 			return -EADDRNOTAVAIL;
895 		err = dev_set_allmulti(dev, 1);
896 		if (err) {
897 			dev_put(dev);
898 			return err;
899 		}
900 		break;
901 	default:
902 		return -EINVAL;
903 	}
904 
905 	in6_dev = __in6_dev_get(dev);
906 	if (in6_dev) {
907 		in6_dev->cnf.mc_forwarding++;
908 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
909 					     NETCONFA_MC_FORWARDING,
910 					     dev->ifindex, &in6_dev->cnf);
911 	}
912 
913 	/* Fill in the VIF structures */
914 	vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
915 			vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
916 			MIFF_REGISTER);
917 
918 	/* And finish update writing critical data */
919 	write_lock_bh(&mrt_lock);
920 	v->dev = dev;
921 #ifdef CONFIG_IPV6_PIMSM_V2
922 	if (v->flags & MIFF_REGISTER)
923 		mrt->mroute_reg_vif_num = vifi;
924 #endif
925 	if (vifi + 1 > mrt->maxvif)
926 		mrt->maxvif = vifi + 1;
927 	write_unlock_bh(&mrt_lock);
928 	call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
929 				       v, vifi, mrt->id);
930 	return 0;
931 }
932 
933 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
934 					   const struct in6_addr *origin,
935 					   const struct in6_addr *mcastgrp)
936 {
937 	struct mfc6_cache_cmp_arg arg = {
938 		.mf6c_origin = *origin,
939 		.mf6c_mcastgrp = *mcastgrp,
940 	};
941 
942 	return mr_mfc_find(mrt, &arg);
943 }
944 
945 /* Look for a (*,G) entry */
946 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
947 					       struct in6_addr *mcastgrp,
948 					       mifi_t mifi)
949 {
950 	struct mfc6_cache_cmp_arg arg = {
951 		.mf6c_origin = in6addr_any,
952 		.mf6c_mcastgrp = *mcastgrp,
953 	};
954 
955 	if (ipv6_addr_any(mcastgrp))
956 		return mr_mfc_find_any_parent(mrt, mifi);
957 	return mr_mfc_find_any(mrt, mifi, &arg);
958 }
959 
960 /* Look for a (S,G,iif) entry if parent != -1 */
961 static struct mfc6_cache *
962 ip6mr_cache_find_parent(struct mr_table *mrt,
963 			const struct in6_addr *origin,
964 			const struct in6_addr *mcastgrp,
965 			int parent)
966 {
967 	struct mfc6_cache_cmp_arg arg = {
968 		.mf6c_origin = *origin,
969 		.mf6c_mcastgrp = *mcastgrp,
970 	};
971 
972 	return mr_mfc_find_parent(mrt, &arg, parent);
973 }
974 
975 /* Allocate a multicast cache entry */
976 static struct mfc6_cache *ip6mr_cache_alloc(void)
977 {
978 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
979 	if (!c)
980 		return NULL;
981 	c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
982 	c->_c.mfc_un.res.minvif = MAXMIFS;
983 	c->_c.free = ip6mr_cache_free_rcu;
984 	refcount_set(&c->_c.mfc_un.res.refcount, 1);
985 	return c;
986 }
987 
988 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
989 {
990 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
991 	if (!c)
992 		return NULL;
993 	skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
994 	c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
995 	return c;
996 }
997 
998 /*
999  *	A cache entry has gone into a resolved state from queued
1000  */
1001 
1002 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1003 				struct mfc6_cache *uc, struct mfc6_cache *c)
1004 {
1005 	struct sk_buff *skb;
1006 
1007 	/*
1008 	 *	Play the pending entries through our router
1009 	 */
1010 
1011 	while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1012 		if (ipv6_hdr(skb)->version == 0) {
1013 			struct nlmsghdr *nlh = skb_pull(skb,
1014 							sizeof(struct ipv6hdr));
1015 
1016 			if (mr_fill_mroute(mrt, skb, &c->_c,
1017 					   nlmsg_data(nlh)) > 0) {
1018 				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1019 			} else {
1020 				nlh->nlmsg_type = NLMSG_ERROR;
1021 				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1022 				skb_trim(skb, nlh->nlmsg_len);
1023 				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1024 			}
1025 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1026 		} else
1027 			ip6_mr_forward(net, mrt, skb->dev, skb, c);
1028 	}
1029 }
1030 
1031 /*
1032  *	Bounce a cache query up to pim6sd and netlink.
1033  *
1034  *	Called under mrt_lock.
1035  */
1036 
1037 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1038 			      mifi_t mifi, int assert)
1039 {
1040 	struct sock *mroute6_sk;
1041 	struct sk_buff *skb;
1042 	struct mrt6msg *msg;
1043 	int ret;
1044 
1045 #ifdef CONFIG_IPV6_PIMSM_V2
1046 	if (assert == MRT6MSG_WHOLEPKT)
1047 		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1048 						+sizeof(*msg));
1049 	else
1050 #endif
1051 		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1052 
1053 	if (!skb)
1054 		return -ENOBUFS;
1055 
1056 	/* I suppose that internal messages
1057 	 * do not require checksums */
1058 
1059 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1060 
1061 #ifdef CONFIG_IPV6_PIMSM_V2
1062 	if (assert == MRT6MSG_WHOLEPKT) {
1063 		/* Ugly, but we have no choice with this interface.
1064 		   Duplicate old header, fix length etc.
1065 		   And all this only to mangle msg->im6_msgtype and
1066 		   to set msg->im6_mbz to "mbz" :-)
1067 		 */
1068 		skb_push(skb, -skb_network_offset(pkt));
1069 
1070 		skb_push(skb, sizeof(*msg));
1071 		skb_reset_transport_header(skb);
1072 		msg = (struct mrt6msg *)skb_transport_header(skb);
1073 		msg->im6_mbz = 0;
1074 		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1075 		msg->im6_mif = mrt->mroute_reg_vif_num;
1076 		msg->im6_pad = 0;
1077 		msg->im6_src = ipv6_hdr(pkt)->saddr;
1078 		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1079 
1080 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1081 	} else
1082 #endif
1083 	{
1084 	/*
1085 	 *	Copy the IP header
1086 	 */
1087 
1088 	skb_put(skb, sizeof(struct ipv6hdr));
1089 	skb_reset_network_header(skb);
1090 	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1091 
1092 	/*
1093 	 *	Add our header
1094 	 */
1095 	skb_put(skb, sizeof(*msg));
1096 	skb_reset_transport_header(skb);
1097 	msg = (struct mrt6msg *)skb_transport_header(skb);
1098 
1099 	msg->im6_mbz = 0;
1100 	msg->im6_msgtype = assert;
1101 	msg->im6_mif = mifi;
1102 	msg->im6_pad = 0;
1103 	msg->im6_src = ipv6_hdr(pkt)->saddr;
1104 	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1105 
1106 	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1107 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1108 	}
1109 
1110 	rcu_read_lock();
1111 	mroute6_sk = rcu_dereference(mrt->mroute_sk);
1112 	if (!mroute6_sk) {
1113 		rcu_read_unlock();
1114 		kfree_skb(skb);
1115 		return -EINVAL;
1116 	}
1117 
1118 	mrt6msg_netlink_event(mrt, skb);
1119 
1120 	/* Deliver to user space multicast routing algorithms */
1121 	ret = sock_queue_rcv_skb(mroute6_sk, skb);
1122 	rcu_read_unlock();
1123 	if (ret < 0) {
1124 		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1125 		kfree_skb(skb);
1126 	}
1127 
1128 	return ret;
1129 }
1130 
1131 /* Queue a packet for resolution. It gets locked cache entry! */
1132 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1133 				  struct sk_buff *skb, struct net_device *dev)
1134 {
1135 	struct mfc6_cache *c;
1136 	bool found = false;
1137 	int err;
1138 
1139 	spin_lock_bh(&mfc_unres_lock);
1140 	list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1141 		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1142 		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1143 			found = true;
1144 			break;
1145 		}
1146 	}
1147 
1148 	if (!found) {
1149 		/*
1150 		 *	Create a new entry if allowable
1151 		 */
1152 
1153 		c = ip6mr_cache_alloc_unres();
1154 		if (!c) {
1155 			spin_unlock_bh(&mfc_unres_lock);
1156 
1157 			kfree_skb(skb);
1158 			return -ENOBUFS;
1159 		}
1160 
1161 		/* Fill in the new cache entry */
1162 		c->_c.mfc_parent = -1;
1163 		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1164 		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1165 
1166 		/*
1167 		 *	Reflect first query at pim6sd
1168 		 */
1169 		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1170 		if (err < 0) {
1171 			/* If the report failed throw the cache entry
1172 			   out - Brad Parker
1173 			 */
1174 			spin_unlock_bh(&mfc_unres_lock);
1175 
1176 			ip6mr_cache_free(c);
1177 			kfree_skb(skb);
1178 			return err;
1179 		}
1180 
1181 		atomic_inc(&mrt->cache_resolve_queue_len);
1182 		list_add(&c->_c.list, &mrt->mfc_unres_queue);
1183 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1184 
1185 		ipmr_do_expire_process(mrt);
1186 	}
1187 
1188 	/* See if we can append the packet */
1189 	if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1190 		kfree_skb(skb);
1191 		err = -ENOBUFS;
1192 	} else {
1193 		if (dev) {
1194 			skb->dev = dev;
1195 			skb->skb_iif = dev->ifindex;
1196 		}
1197 		skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1198 		err = 0;
1199 	}
1200 
1201 	spin_unlock_bh(&mfc_unres_lock);
1202 	return err;
1203 }
1204 
1205 /*
1206  *	MFC6 cache manipulation by user space
1207  */
1208 
1209 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1210 			    int parent)
1211 {
1212 	struct mfc6_cache *c;
1213 
1214 	/* The entries are added/deleted only under RTNL */
1215 	rcu_read_lock();
1216 	c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1217 				    &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1218 	rcu_read_unlock();
1219 	if (!c)
1220 		return -ENOENT;
1221 	rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1222 	list_del_rcu(&c->_c.list);
1223 
1224 	call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1225 				       FIB_EVENT_ENTRY_DEL, c, mrt->id);
1226 	mr6_netlink_event(mrt, c, RTM_DELROUTE);
1227 	mr_cache_put(&c->_c);
1228 	return 0;
1229 }
1230 
1231 static int ip6mr_device_event(struct notifier_block *this,
1232 			      unsigned long event, void *ptr)
1233 {
1234 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1235 	struct net *net = dev_net(dev);
1236 	struct mr_table *mrt;
1237 	struct vif_device *v;
1238 	int ct;
1239 
1240 	if (event != NETDEV_UNREGISTER)
1241 		return NOTIFY_DONE;
1242 
1243 	ip6mr_for_each_table(mrt, net) {
1244 		v = &mrt->vif_table[0];
1245 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1246 			if (v->dev == dev)
1247 				mif6_delete(mrt, ct, 1, NULL);
1248 		}
1249 	}
1250 
1251 	return NOTIFY_DONE;
1252 }
1253 
1254 static unsigned int ip6mr_seq_read(struct net *net)
1255 {
1256 	ASSERT_RTNL();
1257 
1258 	return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1259 }
1260 
1261 static int ip6mr_dump(struct net *net, struct notifier_block *nb,
1262 		      struct netlink_ext_ack *extack)
1263 {
1264 	return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1265 		       ip6mr_mr_table_iter, &mrt_lock, extack);
1266 }
1267 
1268 static struct notifier_block ip6_mr_notifier = {
1269 	.notifier_call = ip6mr_device_event
1270 };
1271 
1272 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1273 	.family		= RTNL_FAMILY_IP6MR,
1274 	.fib_seq_read	= ip6mr_seq_read,
1275 	.fib_dump	= ip6mr_dump,
1276 	.owner		= THIS_MODULE,
1277 };
1278 
1279 static int __net_init ip6mr_notifier_init(struct net *net)
1280 {
1281 	struct fib_notifier_ops *ops;
1282 
1283 	net->ipv6.ipmr_seq = 0;
1284 
1285 	ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1286 	if (IS_ERR(ops))
1287 		return PTR_ERR(ops);
1288 
1289 	net->ipv6.ip6mr_notifier_ops = ops;
1290 
1291 	return 0;
1292 }
1293 
1294 static void __net_exit ip6mr_notifier_exit(struct net *net)
1295 {
1296 	fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1297 	net->ipv6.ip6mr_notifier_ops = NULL;
1298 }
1299 
1300 /* Setup for IP multicast routing */
1301 static int __net_init ip6mr_net_init(struct net *net)
1302 {
1303 	int err;
1304 
1305 	err = ip6mr_notifier_init(net);
1306 	if (err)
1307 		return err;
1308 
1309 	err = ip6mr_rules_init(net);
1310 	if (err < 0)
1311 		goto ip6mr_rules_fail;
1312 
1313 #ifdef CONFIG_PROC_FS
1314 	err = -ENOMEM;
1315 	if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1316 			sizeof(struct mr_vif_iter)))
1317 		goto proc_vif_fail;
1318 	if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1319 			sizeof(struct mr_mfc_iter)))
1320 		goto proc_cache_fail;
1321 #endif
1322 
1323 	return 0;
1324 
1325 #ifdef CONFIG_PROC_FS
1326 proc_cache_fail:
1327 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1328 proc_vif_fail:
1329 	ip6mr_rules_exit(net);
1330 #endif
1331 ip6mr_rules_fail:
1332 	ip6mr_notifier_exit(net);
1333 	return err;
1334 }
1335 
1336 static void __net_exit ip6mr_net_exit(struct net *net)
1337 {
1338 #ifdef CONFIG_PROC_FS
1339 	remove_proc_entry("ip6_mr_cache", net->proc_net);
1340 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1341 #endif
1342 	ip6mr_rules_exit(net);
1343 	ip6mr_notifier_exit(net);
1344 }
1345 
1346 static struct pernet_operations ip6mr_net_ops = {
1347 	.init = ip6mr_net_init,
1348 	.exit = ip6mr_net_exit,
1349 };
1350 
1351 int __init ip6_mr_init(void)
1352 {
1353 	int err;
1354 
1355 	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1356 				       sizeof(struct mfc6_cache),
1357 				       0, SLAB_HWCACHE_ALIGN,
1358 				       NULL);
1359 	if (!mrt_cachep)
1360 		return -ENOMEM;
1361 
1362 	err = register_pernet_subsys(&ip6mr_net_ops);
1363 	if (err)
1364 		goto reg_pernet_fail;
1365 
1366 	err = register_netdevice_notifier(&ip6_mr_notifier);
1367 	if (err)
1368 		goto reg_notif_fail;
1369 #ifdef CONFIG_IPV6_PIMSM_V2
1370 	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1371 		pr_err("%s: can't add PIM protocol\n", __func__);
1372 		err = -EAGAIN;
1373 		goto add_proto_fail;
1374 	}
1375 #endif
1376 	err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1377 				   NULL, ip6mr_rtm_dumproute, 0);
1378 	if (err == 0)
1379 		return 0;
1380 
1381 #ifdef CONFIG_IPV6_PIMSM_V2
1382 	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1383 add_proto_fail:
1384 	unregister_netdevice_notifier(&ip6_mr_notifier);
1385 #endif
1386 reg_notif_fail:
1387 	unregister_pernet_subsys(&ip6mr_net_ops);
1388 reg_pernet_fail:
1389 	kmem_cache_destroy(mrt_cachep);
1390 	return err;
1391 }
1392 
1393 void ip6_mr_cleanup(void)
1394 {
1395 	rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1396 #ifdef CONFIG_IPV6_PIMSM_V2
1397 	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1398 #endif
1399 	unregister_netdevice_notifier(&ip6_mr_notifier);
1400 	unregister_pernet_subsys(&ip6mr_net_ops);
1401 	kmem_cache_destroy(mrt_cachep);
1402 }
1403 
1404 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1405 			 struct mf6cctl *mfc, int mrtsock, int parent)
1406 {
1407 	unsigned char ttls[MAXMIFS];
1408 	struct mfc6_cache *uc, *c;
1409 	struct mr_mfc *_uc;
1410 	bool found;
1411 	int i, err;
1412 
1413 	if (mfc->mf6cc_parent >= MAXMIFS)
1414 		return -ENFILE;
1415 
1416 	memset(ttls, 255, MAXMIFS);
1417 	for (i = 0; i < MAXMIFS; i++) {
1418 		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1419 			ttls[i] = 1;
1420 	}
1421 
1422 	/* The entries are added/deleted only under RTNL */
1423 	rcu_read_lock();
1424 	c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1425 				    &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1426 	rcu_read_unlock();
1427 	if (c) {
1428 		write_lock_bh(&mrt_lock);
1429 		c->_c.mfc_parent = mfc->mf6cc_parent;
1430 		ip6mr_update_thresholds(mrt, &c->_c, ttls);
1431 		if (!mrtsock)
1432 			c->_c.mfc_flags |= MFC_STATIC;
1433 		write_unlock_bh(&mrt_lock);
1434 		call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1435 					       c, mrt->id);
1436 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1437 		return 0;
1438 	}
1439 
1440 	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1441 	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1442 		return -EINVAL;
1443 
1444 	c = ip6mr_cache_alloc();
1445 	if (!c)
1446 		return -ENOMEM;
1447 
1448 	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1449 	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1450 	c->_c.mfc_parent = mfc->mf6cc_parent;
1451 	ip6mr_update_thresholds(mrt, &c->_c, ttls);
1452 	if (!mrtsock)
1453 		c->_c.mfc_flags |= MFC_STATIC;
1454 
1455 	err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1456 				  ip6mr_rht_params);
1457 	if (err) {
1458 		pr_err("ip6mr: rhtable insert error %d\n", err);
1459 		ip6mr_cache_free(c);
1460 		return err;
1461 	}
1462 	list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1463 
1464 	/* Check to see if we resolved a queued list. If so we
1465 	 * need to send on the frames and tidy up.
1466 	 */
1467 	found = false;
1468 	spin_lock_bh(&mfc_unres_lock);
1469 	list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1470 		uc = (struct mfc6_cache *)_uc;
1471 		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1472 		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1473 			list_del(&_uc->list);
1474 			atomic_dec(&mrt->cache_resolve_queue_len);
1475 			found = true;
1476 			break;
1477 		}
1478 	}
1479 	if (list_empty(&mrt->mfc_unres_queue))
1480 		del_timer(&mrt->ipmr_expire_timer);
1481 	spin_unlock_bh(&mfc_unres_lock);
1482 
1483 	if (found) {
1484 		ip6mr_cache_resolve(net, mrt, uc, c);
1485 		ip6mr_cache_free(uc);
1486 	}
1487 	call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1488 				       c, mrt->id);
1489 	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1490 	return 0;
1491 }
1492 
1493 /*
1494  *	Close the multicast socket, and clear the vif tables etc
1495  */
1496 
1497 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1498 {
1499 	struct mr_mfc *c, *tmp;
1500 	LIST_HEAD(list);
1501 	int i;
1502 
1503 	/* Shut down all active vif entries */
1504 	if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1505 		for (i = 0; i < mrt->maxvif; i++) {
1506 			if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1507 			     !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1508 			    (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1509 				continue;
1510 			mif6_delete(mrt, i, 0, &list);
1511 		}
1512 		unregister_netdevice_many(&list);
1513 	}
1514 
1515 	/* Wipe the cache */
1516 	if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1517 		list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1518 			if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1519 			    (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
1520 				continue;
1521 			rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1522 			list_del_rcu(&c->list);
1523 			call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1524 						       FIB_EVENT_ENTRY_DEL,
1525 						       (struct mfc6_cache *)c, mrt->id);
1526 			mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1527 			mr_cache_put(c);
1528 		}
1529 	}
1530 
1531 	if (flags & MRT6_FLUSH_MFC) {
1532 		if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1533 			spin_lock_bh(&mfc_unres_lock);
1534 			list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1535 				list_del(&c->list);
1536 				mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1537 						  RTM_DELROUTE);
1538 				ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1539 			}
1540 			spin_unlock_bh(&mfc_unres_lock);
1541 		}
1542 	}
1543 }
1544 
1545 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1546 {
1547 	int err = 0;
1548 	struct net *net = sock_net(sk);
1549 
1550 	rtnl_lock();
1551 	write_lock_bh(&mrt_lock);
1552 	if (rtnl_dereference(mrt->mroute_sk)) {
1553 		err = -EADDRINUSE;
1554 	} else {
1555 		rcu_assign_pointer(mrt->mroute_sk, sk);
1556 		sock_set_flag(sk, SOCK_RCU_FREE);
1557 		net->ipv6.devconf_all->mc_forwarding++;
1558 	}
1559 	write_unlock_bh(&mrt_lock);
1560 
1561 	if (!err)
1562 		inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1563 					     NETCONFA_MC_FORWARDING,
1564 					     NETCONFA_IFINDEX_ALL,
1565 					     net->ipv6.devconf_all);
1566 	rtnl_unlock();
1567 
1568 	return err;
1569 }
1570 
1571 int ip6mr_sk_done(struct sock *sk)
1572 {
1573 	int err = -EACCES;
1574 	struct net *net = sock_net(sk);
1575 	struct mr_table *mrt;
1576 
1577 	if (sk->sk_type != SOCK_RAW ||
1578 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1579 		return err;
1580 
1581 	rtnl_lock();
1582 	ip6mr_for_each_table(mrt, net) {
1583 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1584 			write_lock_bh(&mrt_lock);
1585 			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1586 			/* Note that mroute_sk had SOCK_RCU_FREE set,
1587 			 * so the RCU grace period before sk freeing
1588 			 * is guaranteed by sk_destruct()
1589 			 */
1590 			net->ipv6.devconf_all->mc_forwarding--;
1591 			write_unlock_bh(&mrt_lock);
1592 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1593 						     NETCONFA_MC_FORWARDING,
1594 						     NETCONFA_IFINDEX_ALL,
1595 						     net->ipv6.devconf_all);
1596 
1597 			mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
1598 			err = 0;
1599 			break;
1600 		}
1601 	}
1602 	rtnl_unlock();
1603 
1604 	return err;
1605 }
1606 
1607 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1608 {
1609 	struct mr_table *mrt;
1610 	struct flowi6 fl6 = {
1611 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1612 		.flowi6_oif	= skb->dev->ifindex,
1613 		.flowi6_mark	= skb->mark,
1614 	};
1615 
1616 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1617 		return NULL;
1618 
1619 	return rcu_access_pointer(mrt->mroute_sk);
1620 }
1621 EXPORT_SYMBOL(mroute6_is_socket);
1622 
1623 /*
1624  *	Socket options and virtual interface manipulation. The whole
1625  *	virtual interface system is a complete heap, but unfortunately
1626  *	that's how BSD mrouted happens to think. Maybe one day with a proper
1627  *	MOSPF/PIM router set up we can clean this up.
1628  */
1629 
1630 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1631 {
1632 	int ret, parent = 0;
1633 	struct mif6ctl vif;
1634 	struct mf6cctl mfc;
1635 	mifi_t mifi;
1636 	struct net *net = sock_net(sk);
1637 	struct mr_table *mrt;
1638 
1639 	if (sk->sk_type != SOCK_RAW ||
1640 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1641 		return -EOPNOTSUPP;
1642 
1643 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1644 	if (!mrt)
1645 		return -ENOENT;
1646 
1647 	if (optname != MRT6_INIT) {
1648 		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1649 		    !ns_capable(net->user_ns, CAP_NET_ADMIN))
1650 			return -EACCES;
1651 	}
1652 
1653 	switch (optname) {
1654 	case MRT6_INIT:
1655 		if (optlen < sizeof(int))
1656 			return -EINVAL;
1657 
1658 		return ip6mr_sk_init(mrt, sk);
1659 
1660 	case MRT6_DONE:
1661 		return ip6mr_sk_done(sk);
1662 
1663 	case MRT6_ADD_MIF:
1664 		if (optlen < sizeof(vif))
1665 			return -EINVAL;
1666 		if (copy_from_user(&vif, optval, sizeof(vif)))
1667 			return -EFAULT;
1668 		if (vif.mif6c_mifi >= MAXMIFS)
1669 			return -ENFILE;
1670 		rtnl_lock();
1671 		ret = mif6_add(net, mrt, &vif,
1672 			       sk == rtnl_dereference(mrt->mroute_sk));
1673 		rtnl_unlock();
1674 		return ret;
1675 
1676 	case MRT6_DEL_MIF:
1677 		if (optlen < sizeof(mifi_t))
1678 			return -EINVAL;
1679 		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1680 			return -EFAULT;
1681 		rtnl_lock();
1682 		ret = mif6_delete(mrt, mifi, 0, NULL);
1683 		rtnl_unlock();
1684 		return ret;
1685 
1686 	/*
1687 	 *	Manipulate the forwarding caches. These live
1688 	 *	in a sort of kernel/user symbiosis.
1689 	 */
1690 	case MRT6_ADD_MFC:
1691 	case MRT6_DEL_MFC:
1692 		parent = -1;
1693 		/* fall through */
1694 	case MRT6_ADD_MFC_PROXY:
1695 	case MRT6_DEL_MFC_PROXY:
1696 		if (optlen < sizeof(mfc))
1697 			return -EINVAL;
1698 		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1699 			return -EFAULT;
1700 		if (parent == 0)
1701 			parent = mfc.mf6cc_parent;
1702 		rtnl_lock();
1703 		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1704 			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1705 		else
1706 			ret = ip6mr_mfc_add(net, mrt, &mfc,
1707 					    sk ==
1708 					    rtnl_dereference(mrt->mroute_sk),
1709 					    parent);
1710 		rtnl_unlock();
1711 		return ret;
1712 
1713 	case MRT6_FLUSH:
1714 	{
1715 		int flags;
1716 
1717 		if (optlen != sizeof(flags))
1718 			return -EINVAL;
1719 		if (get_user(flags, (int __user *)optval))
1720 			return -EFAULT;
1721 		rtnl_lock();
1722 		mroute_clean_tables(mrt, flags);
1723 		rtnl_unlock();
1724 		return 0;
1725 	}
1726 
1727 	/*
1728 	 *	Control PIM assert (to activate pim will activate assert)
1729 	 */
1730 	case MRT6_ASSERT:
1731 	{
1732 		int v;
1733 
1734 		if (optlen != sizeof(v))
1735 			return -EINVAL;
1736 		if (get_user(v, (int __user *)optval))
1737 			return -EFAULT;
1738 		mrt->mroute_do_assert = v;
1739 		return 0;
1740 	}
1741 
1742 #ifdef CONFIG_IPV6_PIMSM_V2
1743 	case MRT6_PIM:
1744 	{
1745 		int v;
1746 
1747 		if (optlen != sizeof(v))
1748 			return -EINVAL;
1749 		if (get_user(v, (int __user *)optval))
1750 			return -EFAULT;
1751 		v = !!v;
1752 		rtnl_lock();
1753 		ret = 0;
1754 		if (v != mrt->mroute_do_pim) {
1755 			mrt->mroute_do_pim = v;
1756 			mrt->mroute_do_assert = v;
1757 		}
1758 		rtnl_unlock();
1759 		return ret;
1760 	}
1761 
1762 #endif
1763 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1764 	case MRT6_TABLE:
1765 	{
1766 		u32 v;
1767 
1768 		if (optlen != sizeof(u32))
1769 			return -EINVAL;
1770 		if (get_user(v, (u32 __user *)optval))
1771 			return -EFAULT;
1772 		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1773 		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1774 			return -EINVAL;
1775 		if (sk == rcu_access_pointer(mrt->mroute_sk))
1776 			return -EBUSY;
1777 
1778 		rtnl_lock();
1779 		ret = 0;
1780 		mrt = ip6mr_new_table(net, v);
1781 		if (IS_ERR(mrt))
1782 			ret = PTR_ERR(mrt);
1783 		else
1784 			raw6_sk(sk)->ip6mr_table = v;
1785 		rtnl_unlock();
1786 		return ret;
1787 	}
1788 #endif
1789 	/*
1790 	 *	Spurious command, or MRT6_VERSION which you cannot
1791 	 *	set.
1792 	 */
1793 	default:
1794 		return -ENOPROTOOPT;
1795 	}
1796 }
1797 
1798 /*
1799  *	Getsock opt support for the multicast routing system.
1800  */
1801 
1802 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1803 			  int __user *optlen)
1804 {
1805 	int olr;
1806 	int val;
1807 	struct net *net = sock_net(sk);
1808 	struct mr_table *mrt;
1809 
1810 	if (sk->sk_type != SOCK_RAW ||
1811 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1812 		return -EOPNOTSUPP;
1813 
1814 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1815 	if (!mrt)
1816 		return -ENOENT;
1817 
1818 	switch (optname) {
1819 	case MRT6_VERSION:
1820 		val = 0x0305;
1821 		break;
1822 #ifdef CONFIG_IPV6_PIMSM_V2
1823 	case MRT6_PIM:
1824 		val = mrt->mroute_do_pim;
1825 		break;
1826 #endif
1827 	case MRT6_ASSERT:
1828 		val = mrt->mroute_do_assert;
1829 		break;
1830 	default:
1831 		return -ENOPROTOOPT;
1832 	}
1833 
1834 	if (get_user(olr, optlen))
1835 		return -EFAULT;
1836 
1837 	olr = min_t(int, olr, sizeof(int));
1838 	if (olr < 0)
1839 		return -EINVAL;
1840 
1841 	if (put_user(olr, optlen))
1842 		return -EFAULT;
1843 	if (copy_to_user(optval, &val, olr))
1844 		return -EFAULT;
1845 	return 0;
1846 }
1847 
1848 /*
1849  *	The IP multicast ioctl support routines.
1850  */
1851 
1852 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1853 {
1854 	struct sioc_sg_req6 sr;
1855 	struct sioc_mif_req6 vr;
1856 	struct vif_device *vif;
1857 	struct mfc6_cache *c;
1858 	struct net *net = sock_net(sk);
1859 	struct mr_table *mrt;
1860 
1861 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1862 	if (!mrt)
1863 		return -ENOENT;
1864 
1865 	switch (cmd) {
1866 	case SIOCGETMIFCNT_IN6:
1867 		if (copy_from_user(&vr, arg, sizeof(vr)))
1868 			return -EFAULT;
1869 		if (vr.mifi >= mrt->maxvif)
1870 			return -EINVAL;
1871 		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1872 		read_lock(&mrt_lock);
1873 		vif = &mrt->vif_table[vr.mifi];
1874 		if (VIF_EXISTS(mrt, vr.mifi)) {
1875 			vr.icount = vif->pkt_in;
1876 			vr.ocount = vif->pkt_out;
1877 			vr.ibytes = vif->bytes_in;
1878 			vr.obytes = vif->bytes_out;
1879 			read_unlock(&mrt_lock);
1880 
1881 			if (copy_to_user(arg, &vr, sizeof(vr)))
1882 				return -EFAULT;
1883 			return 0;
1884 		}
1885 		read_unlock(&mrt_lock);
1886 		return -EADDRNOTAVAIL;
1887 	case SIOCGETSGCNT_IN6:
1888 		if (copy_from_user(&sr, arg, sizeof(sr)))
1889 			return -EFAULT;
1890 
1891 		rcu_read_lock();
1892 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1893 		if (c) {
1894 			sr.pktcnt = c->_c.mfc_un.res.pkt;
1895 			sr.bytecnt = c->_c.mfc_un.res.bytes;
1896 			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1897 			rcu_read_unlock();
1898 
1899 			if (copy_to_user(arg, &sr, sizeof(sr)))
1900 				return -EFAULT;
1901 			return 0;
1902 		}
1903 		rcu_read_unlock();
1904 		return -EADDRNOTAVAIL;
1905 	default:
1906 		return -ENOIOCTLCMD;
1907 	}
1908 }
1909 
1910 #ifdef CONFIG_COMPAT
1911 struct compat_sioc_sg_req6 {
1912 	struct sockaddr_in6 src;
1913 	struct sockaddr_in6 grp;
1914 	compat_ulong_t pktcnt;
1915 	compat_ulong_t bytecnt;
1916 	compat_ulong_t wrong_if;
1917 };
1918 
1919 struct compat_sioc_mif_req6 {
1920 	mifi_t	mifi;
1921 	compat_ulong_t icount;
1922 	compat_ulong_t ocount;
1923 	compat_ulong_t ibytes;
1924 	compat_ulong_t obytes;
1925 };
1926 
1927 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1928 {
1929 	struct compat_sioc_sg_req6 sr;
1930 	struct compat_sioc_mif_req6 vr;
1931 	struct vif_device *vif;
1932 	struct mfc6_cache *c;
1933 	struct net *net = sock_net(sk);
1934 	struct mr_table *mrt;
1935 
1936 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1937 	if (!mrt)
1938 		return -ENOENT;
1939 
1940 	switch (cmd) {
1941 	case SIOCGETMIFCNT_IN6:
1942 		if (copy_from_user(&vr, arg, sizeof(vr)))
1943 			return -EFAULT;
1944 		if (vr.mifi >= mrt->maxvif)
1945 			return -EINVAL;
1946 		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1947 		read_lock(&mrt_lock);
1948 		vif = &mrt->vif_table[vr.mifi];
1949 		if (VIF_EXISTS(mrt, vr.mifi)) {
1950 			vr.icount = vif->pkt_in;
1951 			vr.ocount = vif->pkt_out;
1952 			vr.ibytes = vif->bytes_in;
1953 			vr.obytes = vif->bytes_out;
1954 			read_unlock(&mrt_lock);
1955 
1956 			if (copy_to_user(arg, &vr, sizeof(vr)))
1957 				return -EFAULT;
1958 			return 0;
1959 		}
1960 		read_unlock(&mrt_lock);
1961 		return -EADDRNOTAVAIL;
1962 	case SIOCGETSGCNT_IN6:
1963 		if (copy_from_user(&sr, arg, sizeof(sr)))
1964 			return -EFAULT;
1965 
1966 		rcu_read_lock();
1967 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1968 		if (c) {
1969 			sr.pktcnt = c->_c.mfc_un.res.pkt;
1970 			sr.bytecnt = c->_c.mfc_un.res.bytes;
1971 			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1972 			rcu_read_unlock();
1973 
1974 			if (copy_to_user(arg, &sr, sizeof(sr)))
1975 				return -EFAULT;
1976 			return 0;
1977 		}
1978 		rcu_read_unlock();
1979 		return -EADDRNOTAVAIL;
1980 	default:
1981 		return -ENOIOCTLCMD;
1982 	}
1983 }
1984 #endif
1985 
1986 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1987 {
1988 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1989 		      IPSTATS_MIB_OUTFORWDATAGRAMS);
1990 	IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1991 		      IPSTATS_MIB_OUTOCTETS, skb->len);
1992 	return dst_output(net, sk, skb);
1993 }
1994 
1995 /*
1996  *	Processing handlers for ip6mr_forward
1997  */
1998 
1999 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
2000 			  struct sk_buff *skb, int vifi)
2001 {
2002 	struct ipv6hdr *ipv6h;
2003 	struct vif_device *vif = &mrt->vif_table[vifi];
2004 	struct net_device *dev;
2005 	struct dst_entry *dst;
2006 	struct flowi6 fl6;
2007 
2008 	if (!vif->dev)
2009 		goto out_free;
2010 
2011 #ifdef CONFIG_IPV6_PIMSM_V2
2012 	if (vif->flags & MIFF_REGISTER) {
2013 		vif->pkt_out++;
2014 		vif->bytes_out += skb->len;
2015 		vif->dev->stats.tx_bytes += skb->len;
2016 		vif->dev->stats.tx_packets++;
2017 		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2018 		goto out_free;
2019 	}
2020 #endif
2021 
2022 	ipv6h = ipv6_hdr(skb);
2023 
2024 	fl6 = (struct flowi6) {
2025 		.flowi6_oif = vif->link,
2026 		.daddr = ipv6h->daddr,
2027 	};
2028 
2029 	dst = ip6_route_output(net, NULL, &fl6);
2030 	if (dst->error) {
2031 		dst_release(dst);
2032 		goto out_free;
2033 	}
2034 
2035 	skb_dst_drop(skb);
2036 	skb_dst_set(skb, dst);
2037 
2038 	/*
2039 	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2040 	 * not only before forwarding, but after forwarding on all output
2041 	 * interfaces. It is clear, if mrouter runs a multicasting
2042 	 * program, it should receive packets not depending to what interface
2043 	 * program is joined.
2044 	 * If we will not make it, the program will have to join on all
2045 	 * interfaces. On the other hand, multihoming host (or router, but
2046 	 * not mrouter) cannot join to more than one interface - it will
2047 	 * result in receiving multiple packets.
2048 	 */
2049 	dev = vif->dev;
2050 	skb->dev = dev;
2051 	vif->pkt_out++;
2052 	vif->bytes_out += skb->len;
2053 
2054 	/* We are about to write */
2055 	/* XXX: extension headers? */
2056 	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2057 		goto out_free;
2058 
2059 	ipv6h = ipv6_hdr(skb);
2060 	ipv6h->hop_limit--;
2061 
2062 	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2063 
2064 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2065 		       net, NULL, skb, skb->dev, dev,
2066 		       ip6mr_forward2_finish);
2067 
2068 out_free:
2069 	kfree_skb(skb);
2070 	return 0;
2071 }
2072 
2073 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2074 {
2075 	int ct;
2076 
2077 	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2078 		if (mrt->vif_table[ct].dev == dev)
2079 			break;
2080 	}
2081 	return ct;
2082 }
2083 
2084 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2085 			   struct net_device *dev, struct sk_buff *skb,
2086 			   struct mfc6_cache *c)
2087 {
2088 	int psend = -1;
2089 	int vif, ct;
2090 	int true_vifi = ip6mr_find_vif(mrt, dev);
2091 
2092 	vif = c->_c.mfc_parent;
2093 	c->_c.mfc_un.res.pkt++;
2094 	c->_c.mfc_un.res.bytes += skb->len;
2095 	c->_c.mfc_un.res.lastuse = jiffies;
2096 
2097 	if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2098 		struct mfc6_cache *cache_proxy;
2099 
2100 		/* For an (*,G) entry, we only check that the incoming
2101 		 * interface is part of the static tree.
2102 		 */
2103 		rcu_read_lock();
2104 		cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2105 		if (cache_proxy &&
2106 		    cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2107 			rcu_read_unlock();
2108 			goto forward;
2109 		}
2110 		rcu_read_unlock();
2111 	}
2112 
2113 	/*
2114 	 * Wrong interface: drop packet and (maybe) send PIM assert.
2115 	 */
2116 	if (mrt->vif_table[vif].dev != dev) {
2117 		c->_c.mfc_un.res.wrong_if++;
2118 
2119 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2120 		    /* pimsm uses asserts, when switching from RPT to SPT,
2121 		       so that we cannot check that packet arrived on an oif.
2122 		       It is bad, but otherwise we would need to move pretty
2123 		       large chunk of pimd to kernel. Ough... --ANK
2124 		     */
2125 		    (mrt->mroute_do_pim ||
2126 		     c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2127 		    time_after(jiffies,
2128 			       c->_c.mfc_un.res.last_assert +
2129 			       MFC_ASSERT_THRESH)) {
2130 			c->_c.mfc_un.res.last_assert = jiffies;
2131 			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2132 		}
2133 		goto dont_forward;
2134 	}
2135 
2136 forward:
2137 	mrt->vif_table[vif].pkt_in++;
2138 	mrt->vif_table[vif].bytes_in += skb->len;
2139 
2140 	/*
2141 	 *	Forward the frame
2142 	 */
2143 	if (ipv6_addr_any(&c->mf6c_origin) &&
2144 	    ipv6_addr_any(&c->mf6c_mcastgrp)) {
2145 		if (true_vifi >= 0 &&
2146 		    true_vifi != c->_c.mfc_parent &&
2147 		    ipv6_hdr(skb)->hop_limit >
2148 				c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2149 			/* It's an (*,*) entry and the packet is not coming from
2150 			 * the upstream: forward the packet to the upstream
2151 			 * only.
2152 			 */
2153 			psend = c->_c.mfc_parent;
2154 			goto last_forward;
2155 		}
2156 		goto dont_forward;
2157 	}
2158 	for (ct = c->_c.mfc_un.res.maxvif - 1;
2159 	     ct >= c->_c.mfc_un.res.minvif; ct--) {
2160 		/* For (*,G) entry, don't forward to the incoming interface */
2161 		if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2162 		    ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2163 			if (psend != -1) {
2164 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2165 				if (skb2)
2166 					ip6mr_forward2(net, mrt, skb2, psend);
2167 			}
2168 			psend = ct;
2169 		}
2170 	}
2171 last_forward:
2172 	if (psend != -1) {
2173 		ip6mr_forward2(net, mrt, skb, psend);
2174 		return;
2175 	}
2176 
2177 dont_forward:
2178 	kfree_skb(skb);
2179 }
2180 
2181 
2182 /*
2183  *	Multicast packets for forwarding arrive here
2184  */
2185 
2186 int ip6_mr_input(struct sk_buff *skb)
2187 {
2188 	struct mfc6_cache *cache;
2189 	struct net *net = dev_net(skb->dev);
2190 	struct mr_table *mrt;
2191 	struct flowi6 fl6 = {
2192 		.flowi6_iif	= skb->dev->ifindex,
2193 		.flowi6_mark	= skb->mark,
2194 	};
2195 	int err;
2196 	struct net_device *dev;
2197 
2198 	/* skb->dev passed in is the master dev for vrfs.
2199 	 * Get the proper interface that does have a vif associated with it.
2200 	 */
2201 	dev = skb->dev;
2202 	if (netif_is_l3_master(skb->dev)) {
2203 		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2204 		if (!dev) {
2205 			kfree_skb(skb);
2206 			return -ENODEV;
2207 		}
2208 	}
2209 
2210 	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2211 	if (err < 0) {
2212 		kfree_skb(skb);
2213 		return err;
2214 	}
2215 
2216 	read_lock(&mrt_lock);
2217 	cache = ip6mr_cache_find(mrt,
2218 				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2219 	if (!cache) {
2220 		int vif = ip6mr_find_vif(mrt, dev);
2221 
2222 		if (vif >= 0)
2223 			cache = ip6mr_cache_find_any(mrt,
2224 						     &ipv6_hdr(skb)->daddr,
2225 						     vif);
2226 	}
2227 
2228 	/*
2229 	 *	No usable cache entry
2230 	 */
2231 	if (!cache) {
2232 		int vif;
2233 
2234 		vif = ip6mr_find_vif(mrt, dev);
2235 		if (vif >= 0) {
2236 			int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
2237 			read_unlock(&mrt_lock);
2238 
2239 			return err;
2240 		}
2241 		read_unlock(&mrt_lock);
2242 		kfree_skb(skb);
2243 		return -ENODEV;
2244 	}
2245 
2246 	ip6_mr_forward(net, mrt, dev, skb, cache);
2247 
2248 	read_unlock(&mrt_lock);
2249 
2250 	return 0;
2251 }
2252 
2253 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2254 		    u32 portid)
2255 {
2256 	int err;
2257 	struct mr_table *mrt;
2258 	struct mfc6_cache *cache;
2259 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2260 
2261 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2262 	if (!mrt)
2263 		return -ENOENT;
2264 
2265 	read_lock(&mrt_lock);
2266 	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2267 	if (!cache && skb->dev) {
2268 		int vif = ip6mr_find_vif(mrt, skb->dev);
2269 
2270 		if (vif >= 0)
2271 			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2272 						     vif);
2273 	}
2274 
2275 	if (!cache) {
2276 		struct sk_buff *skb2;
2277 		struct ipv6hdr *iph;
2278 		struct net_device *dev;
2279 		int vif;
2280 
2281 		dev = skb->dev;
2282 		if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2283 			read_unlock(&mrt_lock);
2284 			return -ENODEV;
2285 		}
2286 
2287 		/* really correct? */
2288 		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2289 		if (!skb2) {
2290 			read_unlock(&mrt_lock);
2291 			return -ENOMEM;
2292 		}
2293 
2294 		NETLINK_CB(skb2).portid = portid;
2295 		skb_reset_transport_header(skb2);
2296 
2297 		skb_put(skb2, sizeof(struct ipv6hdr));
2298 		skb_reset_network_header(skb2);
2299 
2300 		iph = ipv6_hdr(skb2);
2301 		iph->version = 0;
2302 		iph->priority = 0;
2303 		iph->flow_lbl[0] = 0;
2304 		iph->flow_lbl[1] = 0;
2305 		iph->flow_lbl[2] = 0;
2306 		iph->payload_len = 0;
2307 		iph->nexthdr = IPPROTO_NONE;
2308 		iph->hop_limit = 0;
2309 		iph->saddr = rt->rt6i_src.addr;
2310 		iph->daddr = rt->rt6i_dst.addr;
2311 
2312 		err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
2313 		read_unlock(&mrt_lock);
2314 
2315 		return err;
2316 	}
2317 
2318 	err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2319 	read_unlock(&mrt_lock);
2320 	return err;
2321 }
2322 
2323 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2324 			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2325 			     int flags)
2326 {
2327 	struct nlmsghdr *nlh;
2328 	struct rtmsg *rtm;
2329 	int err;
2330 
2331 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2332 	if (!nlh)
2333 		return -EMSGSIZE;
2334 
2335 	rtm = nlmsg_data(nlh);
2336 	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2337 	rtm->rtm_dst_len  = 128;
2338 	rtm->rtm_src_len  = 128;
2339 	rtm->rtm_tos      = 0;
2340 	rtm->rtm_table    = mrt->id;
2341 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2342 		goto nla_put_failure;
2343 	rtm->rtm_type = RTN_MULTICAST;
2344 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2345 	if (c->_c.mfc_flags & MFC_STATIC)
2346 		rtm->rtm_protocol = RTPROT_STATIC;
2347 	else
2348 		rtm->rtm_protocol = RTPROT_MROUTED;
2349 	rtm->rtm_flags    = 0;
2350 
2351 	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2352 	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2353 		goto nla_put_failure;
2354 	err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2355 	/* do not break the dump if cache is unresolved */
2356 	if (err < 0 && err != -ENOENT)
2357 		goto nla_put_failure;
2358 
2359 	nlmsg_end(skb, nlh);
2360 	return 0;
2361 
2362 nla_put_failure:
2363 	nlmsg_cancel(skb, nlh);
2364 	return -EMSGSIZE;
2365 }
2366 
2367 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2368 			      u32 portid, u32 seq, struct mr_mfc *c,
2369 			      int cmd, int flags)
2370 {
2371 	return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2372 				 cmd, flags);
2373 }
2374 
2375 static int mr6_msgsize(bool unresolved, int maxvif)
2376 {
2377 	size_t len =
2378 		NLMSG_ALIGN(sizeof(struct rtmsg))
2379 		+ nla_total_size(4)	/* RTA_TABLE */
2380 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2381 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2382 		;
2383 
2384 	if (!unresolved)
2385 		len = len
2386 		      + nla_total_size(4)	/* RTA_IIF */
2387 		      + nla_total_size(0)	/* RTA_MULTIPATH */
2388 		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2389 						/* RTA_MFC_STATS */
2390 		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2391 		;
2392 
2393 	return len;
2394 }
2395 
2396 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2397 			      int cmd)
2398 {
2399 	struct net *net = read_pnet(&mrt->net);
2400 	struct sk_buff *skb;
2401 	int err = -ENOBUFS;
2402 
2403 	skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2404 			GFP_ATOMIC);
2405 	if (!skb)
2406 		goto errout;
2407 
2408 	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2409 	if (err < 0)
2410 		goto errout;
2411 
2412 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2413 	return;
2414 
2415 errout:
2416 	kfree_skb(skb);
2417 	if (err < 0)
2418 		rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2419 }
2420 
2421 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2422 {
2423 	size_t len =
2424 		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2425 		+ nla_total_size(1)	/* IP6MRA_CREPORT_MSGTYPE */
2426 		+ nla_total_size(4)	/* IP6MRA_CREPORT_MIF_ID */
2427 					/* IP6MRA_CREPORT_SRC_ADDR */
2428 		+ nla_total_size(sizeof(struct in6_addr))
2429 					/* IP6MRA_CREPORT_DST_ADDR */
2430 		+ nla_total_size(sizeof(struct in6_addr))
2431 					/* IP6MRA_CREPORT_PKT */
2432 		+ nla_total_size(payloadlen)
2433 		;
2434 
2435 	return len;
2436 }
2437 
2438 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2439 {
2440 	struct net *net = read_pnet(&mrt->net);
2441 	struct nlmsghdr *nlh;
2442 	struct rtgenmsg *rtgenm;
2443 	struct mrt6msg *msg;
2444 	struct sk_buff *skb;
2445 	struct nlattr *nla;
2446 	int payloadlen;
2447 
2448 	payloadlen = pkt->len - sizeof(struct mrt6msg);
2449 	msg = (struct mrt6msg *)skb_transport_header(pkt);
2450 
2451 	skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2452 	if (!skb)
2453 		goto errout;
2454 
2455 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2456 			sizeof(struct rtgenmsg), 0);
2457 	if (!nlh)
2458 		goto errout;
2459 	rtgenm = nlmsg_data(nlh);
2460 	rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2461 	if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2462 	    nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2463 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2464 			     &msg->im6_src) ||
2465 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2466 			     &msg->im6_dst))
2467 		goto nla_put_failure;
2468 
2469 	nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2470 	if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2471 				  nla_data(nla), payloadlen))
2472 		goto nla_put_failure;
2473 
2474 	nlmsg_end(skb, nlh);
2475 
2476 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2477 	return;
2478 
2479 nla_put_failure:
2480 	nlmsg_cancel(skb, nlh);
2481 errout:
2482 	kfree_skb(skb);
2483 	rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2484 }
2485 
2486 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2487 {
2488 	const struct nlmsghdr *nlh = cb->nlh;
2489 	struct fib_dump_filter filter = {};
2490 	int err;
2491 
2492 	if (cb->strict_check) {
2493 		err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
2494 					    &filter, cb);
2495 		if (err < 0)
2496 			return err;
2497 	}
2498 
2499 	if (filter.table_id) {
2500 		struct mr_table *mrt;
2501 
2502 		mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2503 		if (!mrt) {
2504 			if (filter.dump_all_families)
2505 				return skb->len;
2506 
2507 			NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2508 			return -ENOENT;
2509 		}
2510 		err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2511 				    &mfc_unres_lock, &filter);
2512 		return skb->len ? : err;
2513 	}
2514 
2515 	return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2516 				_ip6mr_fill_mroute, &mfc_unres_lock, &filter);
2517 }
2518