xref: /openbmc/linux/net/ipv6/ip6mr.c (revision 1b39eacd)
1 /*
2  *	Linux IPv6 multicast routing support for BSD pim6sd
3  *	Based on net/ipv4/ipmr.c.
4  *
5  *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6  *		LSIIT Laboratory, Strasbourg, France
7  *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8  *		6WIND, Paris, France
9  *	Copyright (C)2007,2008 USAGI/WIDE Project
10  *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11  *
12  *	This program is free software; you can redistribute it and/or
13  *	modify it under the terms of the GNU General Public License
14  *	as published by the Free Software Foundation; either version
15  *	2 of the License, or (at your option) any later version.
16  *
17  */
18 
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
46 
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56 
57 struct mr6_table {
58 	struct list_head	list;
59 	possible_net_t		net;
60 	u32			id;
61 	struct sock		*mroute6_sk;
62 	struct timer_list	ipmr_expire_timer;
63 	struct list_head	mfc6_unres_queue;
64 	struct list_head	mfc6_cache_array[MFC6_LINES];
65 	struct mif_device	vif6_table[MAXMIFS];
66 	int			maxvif;
67 	atomic_t		cache_resolve_queue_len;
68 	bool			mroute_do_assert;
69 	bool			mroute_do_pim;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 	int			mroute_reg_vif_num;
72 #endif
73 };
74 
75 struct ip6mr_rule {
76 	struct fib_rule		common;
77 };
78 
79 struct ip6mr_result {
80 	struct mr6_table	*mrt;
81 };
82 
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84    Note that the changes are semaphored via rtnl_lock.
85  */
86 
87 static DEFINE_RWLOCK(mrt_lock);
88 
89 /*
90  *	Multicast router control variables
91  */
92 
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
94 
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock);
97 
98 /* We return to original Alan's scheme. Hash table of resolved
99    entries is changed only in process context and protected
100    with weak lock mrt_lock. Queue of unresolved entries is protected
101    with strong spinlock mfc_unres_lock.
102 
103    In this case data path is free of exclusive locks at all.
104  */
105 
106 static struct kmem_cache *mrt_cachep __read_mostly;
107 
108 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109 static void ip6mr_free_table(struct mr6_table *mrt);
110 
111 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 			   struct sk_buff *skb, struct mfc6_cache *cache);
113 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
114 			      mifi_t mifi, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 			       struct mfc6_cache *c, struct rtmsg *rtm);
117 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
118 			      int cmd);
119 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
120 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
121 			       struct netlink_callback *cb);
122 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
123 static void ipmr_expire_process(struct timer_list *t);
124 
125 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
126 #define ip6mr_for_each_table(mrt, net) \
127 	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
128 
129 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
130 {
131 	struct mr6_table *mrt;
132 
133 	ip6mr_for_each_table(mrt, net) {
134 		if (mrt->id == id)
135 			return mrt;
136 	}
137 	return NULL;
138 }
139 
140 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
141 			    struct mr6_table **mrt)
142 {
143 	int err;
144 	struct ip6mr_result res;
145 	struct fib_lookup_arg arg = {
146 		.result = &res,
147 		.flags = FIB_LOOKUP_NOREF,
148 	};
149 
150 	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
151 			       flowi6_to_flowi(flp6), 0, &arg);
152 	if (err < 0)
153 		return err;
154 	*mrt = res.mrt;
155 	return 0;
156 }
157 
158 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
159 			     int flags, struct fib_lookup_arg *arg)
160 {
161 	struct ip6mr_result *res = arg->result;
162 	struct mr6_table *mrt;
163 
164 	switch (rule->action) {
165 	case FR_ACT_TO_TBL:
166 		break;
167 	case FR_ACT_UNREACHABLE:
168 		return -ENETUNREACH;
169 	case FR_ACT_PROHIBIT:
170 		return -EACCES;
171 	case FR_ACT_BLACKHOLE:
172 	default:
173 		return -EINVAL;
174 	}
175 
176 	mrt = ip6mr_get_table(rule->fr_net, rule->table);
177 	if (!mrt)
178 		return -EAGAIN;
179 	res->mrt = mrt;
180 	return 0;
181 }
182 
183 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
184 {
185 	return 1;
186 }
187 
188 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
189 	FRA_GENERIC_POLICY,
190 };
191 
192 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
193 				struct fib_rule_hdr *frh, struct nlattr **tb)
194 {
195 	return 0;
196 }
197 
198 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
199 			      struct nlattr **tb)
200 {
201 	return 1;
202 }
203 
204 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
205 			   struct fib_rule_hdr *frh)
206 {
207 	frh->dst_len = 0;
208 	frh->src_len = 0;
209 	frh->tos     = 0;
210 	return 0;
211 }
212 
213 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
214 	.family		= RTNL_FAMILY_IP6MR,
215 	.rule_size	= sizeof(struct ip6mr_rule),
216 	.addr_size	= sizeof(struct in6_addr),
217 	.action		= ip6mr_rule_action,
218 	.match		= ip6mr_rule_match,
219 	.configure	= ip6mr_rule_configure,
220 	.compare	= ip6mr_rule_compare,
221 	.fill		= ip6mr_rule_fill,
222 	.nlgroup	= RTNLGRP_IPV6_RULE,
223 	.policy		= ip6mr_rule_policy,
224 	.owner		= THIS_MODULE,
225 };
226 
227 static int __net_init ip6mr_rules_init(struct net *net)
228 {
229 	struct fib_rules_ops *ops;
230 	struct mr6_table *mrt;
231 	int err;
232 
233 	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
234 	if (IS_ERR(ops))
235 		return PTR_ERR(ops);
236 
237 	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
238 
239 	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
240 	if (!mrt) {
241 		err = -ENOMEM;
242 		goto err1;
243 	}
244 
245 	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
246 	if (err < 0)
247 		goto err2;
248 
249 	net->ipv6.mr6_rules_ops = ops;
250 	return 0;
251 
252 err2:
253 	ip6mr_free_table(mrt);
254 err1:
255 	fib_rules_unregister(ops);
256 	return err;
257 }
258 
259 static void __net_exit ip6mr_rules_exit(struct net *net)
260 {
261 	struct mr6_table *mrt, *next;
262 
263 	rtnl_lock();
264 	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
265 		list_del(&mrt->list);
266 		ip6mr_free_table(mrt);
267 	}
268 	fib_rules_unregister(net->ipv6.mr6_rules_ops);
269 	rtnl_unlock();
270 }
271 #else
272 #define ip6mr_for_each_table(mrt, net) \
273 	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
274 
275 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
276 {
277 	return net->ipv6.mrt6;
278 }
279 
280 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
281 			    struct mr6_table **mrt)
282 {
283 	*mrt = net->ipv6.mrt6;
284 	return 0;
285 }
286 
287 static int __net_init ip6mr_rules_init(struct net *net)
288 {
289 	net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
290 	return net->ipv6.mrt6 ? 0 : -ENOMEM;
291 }
292 
293 static void __net_exit ip6mr_rules_exit(struct net *net)
294 {
295 	rtnl_lock();
296 	ip6mr_free_table(net->ipv6.mrt6);
297 	net->ipv6.mrt6 = NULL;
298 	rtnl_unlock();
299 }
300 #endif
301 
302 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
303 {
304 	struct mr6_table *mrt;
305 	unsigned int i;
306 
307 	mrt = ip6mr_get_table(net, id);
308 	if (mrt)
309 		return mrt;
310 
311 	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
312 	if (!mrt)
313 		return NULL;
314 	mrt->id = id;
315 	write_pnet(&mrt->net, net);
316 
317 	/* Forwarding cache */
318 	for (i = 0; i < MFC6_LINES; i++)
319 		INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
320 
321 	INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
322 
323 	timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
324 
325 #ifdef CONFIG_IPV6_PIMSM_V2
326 	mrt->mroute_reg_vif_num = -1;
327 #endif
328 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
329 	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
330 #endif
331 	return mrt;
332 }
333 
334 static void ip6mr_free_table(struct mr6_table *mrt)
335 {
336 	del_timer_sync(&mrt->ipmr_expire_timer);
337 	mroute_clean_tables(mrt, true);
338 	kfree(mrt);
339 }
340 
341 #ifdef CONFIG_PROC_FS
342 
343 struct ipmr_mfc_iter {
344 	struct seq_net_private p;
345 	struct mr6_table *mrt;
346 	struct list_head *cache;
347 	int ct;
348 };
349 
350 
351 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
352 					   struct ipmr_mfc_iter *it, loff_t pos)
353 {
354 	struct mr6_table *mrt = it->mrt;
355 	struct mfc6_cache *mfc;
356 
357 	read_lock(&mrt_lock);
358 	for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
359 		it->cache = &mrt->mfc6_cache_array[it->ct];
360 		list_for_each_entry(mfc, it->cache, list)
361 			if (pos-- == 0)
362 				return mfc;
363 	}
364 	read_unlock(&mrt_lock);
365 
366 	spin_lock_bh(&mfc_unres_lock);
367 	it->cache = &mrt->mfc6_unres_queue;
368 	list_for_each_entry(mfc, it->cache, list)
369 		if (pos-- == 0)
370 			return mfc;
371 	spin_unlock_bh(&mfc_unres_lock);
372 
373 	it->cache = NULL;
374 	return NULL;
375 }
376 
377 /*
378  *	The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
379  */
380 
381 struct ipmr_vif_iter {
382 	struct seq_net_private p;
383 	struct mr6_table *mrt;
384 	int ct;
385 };
386 
387 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
388 					    struct ipmr_vif_iter *iter,
389 					    loff_t pos)
390 {
391 	struct mr6_table *mrt = iter->mrt;
392 
393 	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
394 		if (!MIF_EXISTS(mrt, iter->ct))
395 			continue;
396 		if (pos-- == 0)
397 			return &mrt->vif6_table[iter->ct];
398 	}
399 	return NULL;
400 }
401 
402 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
403 	__acquires(mrt_lock)
404 {
405 	struct ipmr_vif_iter *iter = seq->private;
406 	struct net *net = seq_file_net(seq);
407 	struct mr6_table *mrt;
408 
409 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
410 	if (!mrt)
411 		return ERR_PTR(-ENOENT);
412 
413 	iter->mrt = mrt;
414 
415 	read_lock(&mrt_lock);
416 	return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
417 		: SEQ_START_TOKEN;
418 }
419 
420 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
421 {
422 	struct ipmr_vif_iter *iter = seq->private;
423 	struct net *net = seq_file_net(seq);
424 	struct mr6_table *mrt = iter->mrt;
425 
426 	++*pos;
427 	if (v == SEQ_START_TOKEN)
428 		return ip6mr_vif_seq_idx(net, iter, 0);
429 
430 	while (++iter->ct < mrt->maxvif) {
431 		if (!MIF_EXISTS(mrt, iter->ct))
432 			continue;
433 		return &mrt->vif6_table[iter->ct];
434 	}
435 	return NULL;
436 }
437 
438 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
439 	__releases(mrt_lock)
440 {
441 	read_unlock(&mrt_lock);
442 }
443 
444 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
445 {
446 	struct ipmr_vif_iter *iter = seq->private;
447 	struct mr6_table *mrt = iter->mrt;
448 
449 	if (v == SEQ_START_TOKEN) {
450 		seq_puts(seq,
451 			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
452 	} else {
453 		const struct mif_device *vif = v;
454 		const char *name = vif->dev ? vif->dev->name : "none";
455 
456 		seq_printf(seq,
457 			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
458 			   vif - mrt->vif6_table,
459 			   name, vif->bytes_in, vif->pkt_in,
460 			   vif->bytes_out, vif->pkt_out,
461 			   vif->flags);
462 	}
463 	return 0;
464 }
465 
466 static const struct seq_operations ip6mr_vif_seq_ops = {
467 	.start = ip6mr_vif_seq_start,
468 	.next  = ip6mr_vif_seq_next,
469 	.stop  = ip6mr_vif_seq_stop,
470 	.show  = ip6mr_vif_seq_show,
471 };
472 
473 static int ip6mr_vif_open(struct inode *inode, struct file *file)
474 {
475 	return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
476 			    sizeof(struct ipmr_vif_iter));
477 }
478 
479 static const struct file_operations ip6mr_vif_fops = {
480 	.owner	 = THIS_MODULE,
481 	.open    = ip6mr_vif_open,
482 	.read    = seq_read,
483 	.llseek  = seq_lseek,
484 	.release = seq_release_net,
485 };
486 
487 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
488 {
489 	struct ipmr_mfc_iter *it = seq->private;
490 	struct net *net = seq_file_net(seq);
491 	struct mr6_table *mrt;
492 
493 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
494 	if (!mrt)
495 		return ERR_PTR(-ENOENT);
496 
497 	it->mrt = mrt;
498 	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
499 		: SEQ_START_TOKEN;
500 }
501 
502 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
503 {
504 	struct mfc6_cache *mfc = v;
505 	struct ipmr_mfc_iter *it = seq->private;
506 	struct net *net = seq_file_net(seq);
507 	struct mr6_table *mrt = it->mrt;
508 
509 	++*pos;
510 
511 	if (v == SEQ_START_TOKEN)
512 		return ipmr_mfc_seq_idx(net, seq->private, 0);
513 
514 	if (mfc->list.next != it->cache)
515 		return list_entry(mfc->list.next, struct mfc6_cache, list);
516 
517 	if (it->cache == &mrt->mfc6_unres_queue)
518 		goto end_of_list;
519 
520 	BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
521 
522 	while (++it->ct < MFC6_LINES) {
523 		it->cache = &mrt->mfc6_cache_array[it->ct];
524 		if (list_empty(it->cache))
525 			continue;
526 		return list_first_entry(it->cache, struct mfc6_cache, list);
527 	}
528 
529 	/* exhausted cache_array, show unresolved */
530 	read_unlock(&mrt_lock);
531 	it->cache = &mrt->mfc6_unres_queue;
532 	it->ct = 0;
533 
534 	spin_lock_bh(&mfc_unres_lock);
535 	if (!list_empty(it->cache))
536 		return list_first_entry(it->cache, struct mfc6_cache, list);
537 
538  end_of_list:
539 	spin_unlock_bh(&mfc_unres_lock);
540 	it->cache = NULL;
541 
542 	return NULL;
543 }
544 
545 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
546 {
547 	struct ipmr_mfc_iter *it = seq->private;
548 	struct mr6_table *mrt = it->mrt;
549 
550 	if (it->cache == &mrt->mfc6_unres_queue)
551 		spin_unlock_bh(&mfc_unres_lock);
552 	else if (it->cache == &mrt->mfc6_cache_array[it->ct])
553 		read_unlock(&mrt_lock);
554 }
555 
556 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
557 {
558 	int n;
559 
560 	if (v == SEQ_START_TOKEN) {
561 		seq_puts(seq,
562 			 "Group                            "
563 			 "Origin                           "
564 			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
565 	} else {
566 		const struct mfc6_cache *mfc = v;
567 		const struct ipmr_mfc_iter *it = seq->private;
568 		struct mr6_table *mrt = it->mrt;
569 
570 		seq_printf(seq, "%pI6 %pI6 %-3hd",
571 			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
572 			   mfc->mf6c_parent);
573 
574 		if (it->cache != &mrt->mfc6_unres_queue) {
575 			seq_printf(seq, " %8lu %8lu %8lu",
576 				   mfc->mfc_un.res.pkt,
577 				   mfc->mfc_un.res.bytes,
578 				   mfc->mfc_un.res.wrong_if);
579 			for (n = mfc->mfc_un.res.minvif;
580 			     n < mfc->mfc_un.res.maxvif; n++) {
581 				if (MIF_EXISTS(mrt, n) &&
582 				    mfc->mfc_un.res.ttls[n] < 255)
583 					seq_printf(seq,
584 						   " %2d:%-3d",
585 						   n, mfc->mfc_un.res.ttls[n]);
586 			}
587 		} else {
588 			/* unresolved mfc_caches don't contain
589 			 * pkt, bytes and wrong_if values
590 			 */
591 			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
592 		}
593 		seq_putc(seq, '\n');
594 	}
595 	return 0;
596 }
597 
598 static const struct seq_operations ipmr_mfc_seq_ops = {
599 	.start = ipmr_mfc_seq_start,
600 	.next  = ipmr_mfc_seq_next,
601 	.stop  = ipmr_mfc_seq_stop,
602 	.show  = ipmr_mfc_seq_show,
603 };
604 
605 static int ipmr_mfc_open(struct inode *inode, struct file *file)
606 {
607 	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
608 			    sizeof(struct ipmr_mfc_iter));
609 }
610 
611 static const struct file_operations ip6mr_mfc_fops = {
612 	.owner	 = THIS_MODULE,
613 	.open    = ipmr_mfc_open,
614 	.read    = seq_read,
615 	.llseek  = seq_lseek,
616 	.release = seq_release_net,
617 };
618 #endif
619 
620 #ifdef CONFIG_IPV6_PIMSM_V2
621 
622 static int pim6_rcv(struct sk_buff *skb)
623 {
624 	struct pimreghdr *pim;
625 	struct ipv6hdr   *encap;
626 	struct net_device  *reg_dev = NULL;
627 	struct net *net = dev_net(skb->dev);
628 	struct mr6_table *mrt;
629 	struct flowi6 fl6 = {
630 		.flowi6_iif	= skb->dev->ifindex,
631 		.flowi6_mark	= skb->mark,
632 	};
633 	int reg_vif_num;
634 
635 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
636 		goto drop;
637 
638 	pim = (struct pimreghdr *)skb_transport_header(skb);
639 	if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
640 	    (pim->flags & PIM_NULL_REGISTER) ||
641 	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
642 			     sizeof(*pim), IPPROTO_PIM,
643 			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
644 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
645 		goto drop;
646 
647 	/* check if the inner packet is destined to mcast group */
648 	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
649 				   sizeof(*pim));
650 
651 	if (!ipv6_addr_is_multicast(&encap->daddr) ||
652 	    encap->payload_len == 0 ||
653 	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
654 		goto drop;
655 
656 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
657 		goto drop;
658 	reg_vif_num = mrt->mroute_reg_vif_num;
659 
660 	read_lock(&mrt_lock);
661 	if (reg_vif_num >= 0)
662 		reg_dev = mrt->vif6_table[reg_vif_num].dev;
663 	if (reg_dev)
664 		dev_hold(reg_dev);
665 	read_unlock(&mrt_lock);
666 
667 	if (!reg_dev)
668 		goto drop;
669 
670 	skb->mac_header = skb->network_header;
671 	skb_pull(skb, (u8 *)encap - skb->data);
672 	skb_reset_network_header(skb);
673 	skb->protocol = htons(ETH_P_IPV6);
674 	skb->ip_summed = CHECKSUM_NONE;
675 
676 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
677 
678 	netif_rx(skb);
679 
680 	dev_put(reg_dev);
681 	return 0;
682  drop:
683 	kfree_skb(skb);
684 	return 0;
685 }
686 
687 static const struct inet6_protocol pim6_protocol = {
688 	.handler	=	pim6_rcv,
689 };
690 
691 /* Service routines creating virtual interfaces: PIMREG */
692 
693 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
694 				      struct net_device *dev)
695 {
696 	struct net *net = dev_net(dev);
697 	struct mr6_table *mrt;
698 	struct flowi6 fl6 = {
699 		.flowi6_oif	= dev->ifindex,
700 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
701 		.flowi6_mark	= skb->mark,
702 	};
703 	int err;
704 
705 	err = ip6mr_fib_lookup(net, &fl6, &mrt);
706 	if (err < 0) {
707 		kfree_skb(skb);
708 		return err;
709 	}
710 
711 	read_lock(&mrt_lock);
712 	dev->stats.tx_bytes += skb->len;
713 	dev->stats.tx_packets++;
714 	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
715 	read_unlock(&mrt_lock);
716 	kfree_skb(skb);
717 	return NETDEV_TX_OK;
718 }
719 
720 static int reg_vif_get_iflink(const struct net_device *dev)
721 {
722 	return 0;
723 }
724 
725 static const struct net_device_ops reg_vif_netdev_ops = {
726 	.ndo_start_xmit	= reg_vif_xmit,
727 	.ndo_get_iflink = reg_vif_get_iflink,
728 };
729 
730 static void reg_vif_setup(struct net_device *dev)
731 {
732 	dev->type		= ARPHRD_PIMREG;
733 	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
734 	dev->flags		= IFF_NOARP;
735 	dev->netdev_ops		= &reg_vif_netdev_ops;
736 	dev->needs_free_netdev	= true;
737 	dev->features		|= NETIF_F_NETNS_LOCAL;
738 }
739 
740 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
741 {
742 	struct net_device *dev;
743 	char name[IFNAMSIZ];
744 
745 	if (mrt->id == RT6_TABLE_DFLT)
746 		sprintf(name, "pim6reg");
747 	else
748 		sprintf(name, "pim6reg%u", mrt->id);
749 
750 	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
751 	if (!dev)
752 		return NULL;
753 
754 	dev_net_set(dev, net);
755 
756 	if (register_netdevice(dev)) {
757 		free_netdev(dev);
758 		return NULL;
759 	}
760 
761 	if (dev_open(dev))
762 		goto failure;
763 
764 	dev_hold(dev);
765 	return dev;
766 
767 failure:
768 	unregister_netdevice(dev);
769 	return NULL;
770 }
771 #endif
772 
773 /*
774  *	Delete a VIF entry
775  */
776 
777 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
778 		       struct list_head *head)
779 {
780 	struct mif_device *v;
781 	struct net_device *dev;
782 	struct inet6_dev *in6_dev;
783 
784 	if (vifi < 0 || vifi >= mrt->maxvif)
785 		return -EADDRNOTAVAIL;
786 
787 	v = &mrt->vif6_table[vifi];
788 
789 	write_lock_bh(&mrt_lock);
790 	dev = v->dev;
791 	v->dev = NULL;
792 
793 	if (!dev) {
794 		write_unlock_bh(&mrt_lock);
795 		return -EADDRNOTAVAIL;
796 	}
797 
798 #ifdef CONFIG_IPV6_PIMSM_V2
799 	if (vifi == mrt->mroute_reg_vif_num)
800 		mrt->mroute_reg_vif_num = -1;
801 #endif
802 
803 	if (vifi + 1 == mrt->maxvif) {
804 		int tmp;
805 		for (tmp = vifi - 1; tmp >= 0; tmp--) {
806 			if (MIF_EXISTS(mrt, tmp))
807 				break;
808 		}
809 		mrt->maxvif = tmp + 1;
810 	}
811 
812 	write_unlock_bh(&mrt_lock);
813 
814 	dev_set_allmulti(dev, -1);
815 
816 	in6_dev = __in6_dev_get(dev);
817 	if (in6_dev) {
818 		in6_dev->cnf.mc_forwarding--;
819 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
820 					     NETCONFA_MC_FORWARDING,
821 					     dev->ifindex, &in6_dev->cnf);
822 	}
823 
824 	if ((v->flags & MIFF_REGISTER) && !notify)
825 		unregister_netdevice_queue(dev, head);
826 
827 	dev_put(dev);
828 	return 0;
829 }
830 
831 static inline void ip6mr_cache_free(struct mfc6_cache *c)
832 {
833 	kmem_cache_free(mrt_cachep, c);
834 }
835 
836 /* Destroy an unresolved cache entry, killing queued skbs
837    and reporting error to netlink readers.
838  */
839 
840 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
841 {
842 	struct net *net = read_pnet(&mrt->net);
843 	struct sk_buff *skb;
844 
845 	atomic_dec(&mrt->cache_resolve_queue_len);
846 
847 	while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
848 		if (ipv6_hdr(skb)->version == 0) {
849 			struct nlmsghdr *nlh = skb_pull(skb,
850 							sizeof(struct ipv6hdr));
851 			nlh->nlmsg_type = NLMSG_ERROR;
852 			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
853 			skb_trim(skb, nlh->nlmsg_len);
854 			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
855 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
856 		} else
857 			kfree_skb(skb);
858 	}
859 
860 	ip6mr_cache_free(c);
861 }
862 
863 
864 /* Timer process for all the unresolved queue. */
865 
866 static void ipmr_do_expire_process(struct mr6_table *mrt)
867 {
868 	unsigned long now = jiffies;
869 	unsigned long expires = 10 * HZ;
870 	struct mfc6_cache *c, *next;
871 
872 	list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
873 		if (time_after(c->mfc_un.unres.expires, now)) {
874 			/* not yet... */
875 			unsigned long interval = c->mfc_un.unres.expires - now;
876 			if (interval < expires)
877 				expires = interval;
878 			continue;
879 		}
880 
881 		list_del(&c->list);
882 		mr6_netlink_event(mrt, c, RTM_DELROUTE);
883 		ip6mr_destroy_unres(mrt, c);
884 	}
885 
886 	if (!list_empty(&mrt->mfc6_unres_queue))
887 		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
888 }
889 
890 static void ipmr_expire_process(struct timer_list *t)
891 {
892 	struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
893 
894 	if (!spin_trylock(&mfc_unres_lock)) {
895 		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
896 		return;
897 	}
898 
899 	if (!list_empty(&mrt->mfc6_unres_queue))
900 		ipmr_do_expire_process(mrt);
901 
902 	spin_unlock(&mfc_unres_lock);
903 }
904 
905 /* Fill oifs list. It is called under write locked mrt_lock. */
906 
907 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
908 				    unsigned char *ttls)
909 {
910 	int vifi;
911 
912 	cache->mfc_un.res.minvif = MAXMIFS;
913 	cache->mfc_un.res.maxvif = 0;
914 	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
915 
916 	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
917 		if (MIF_EXISTS(mrt, vifi) &&
918 		    ttls[vifi] && ttls[vifi] < 255) {
919 			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
920 			if (cache->mfc_un.res.minvif > vifi)
921 				cache->mfc_un.res.minvif = vifi;
922 			if (cache->mfc_un.res.maxvif <= vifi)
923 				cache->mfc_un.res.maxvif = vifi + 1;
924 		}
925 	}
926 	cache->mfc_un.res.lastuse = jiffies;
927 }
928 
929 static int mif6_add(struct net *net, struct mr6_table *mrt,
930 		    struct mif6ctl *vifc, int mrtsock)
931 {
932 	int vifi = vifc->mif6c_mifi;
933 	struct mif_device *v = &mrt->vif6_table[vifi];
934 	struct net_device *dev;
935 	struct inet6_dev *in6_dev;
936 	int err;
937 
938 	/* Is vif busy ? */
939 	if (MIF_EXISTS(mrt, vifi))
940 		return -EADDRINUSE;
941 
942 	switch (vifc->mif6c_flags) {
943 #ifdef CONFIG_IPV6_PIMSM_V2
944 	case MIFF_REGISTER:
945 		/*
946 		 * Special Purpose VIF in PIM
947 		 * All the packets will be sent to the daemon
948 		 */
949 		if (mrt->mroute_reg_vif_num >= 0)
950 			return -EADDRINUSE;
951 		dev = ip6mr_reg_vif(net, mrt);
952 		if (!dev)
953 			return -ENOBUFS;
954 		err = dev_set_allmulti(dev, 1);
955 		if (err) {
956 			unregister_netdevice(dev);
957 			dev_put(dev);
958 			return err;
959 		}
960 		break;
961 #endif
962 	case 0:
963 		dev = dev_get_by_index(net, vifc->mif6c_pifi);
964 		if (!dev)
965 			return -EADDRNOTAVAIL;
966 		err = dev_set_allmulti(dev, 1);
967 		if (err) {
968 			dev_put(dev);
969 			return err;
970 		}
971 		break;
972 	default:
973 		return -EINVAL;
974 	}
975 
976 	in6_dev = __in6_dev_get(dev);
977 	if (in6_dev) {
978 		in6_dev->cnf.mc_forwarding++;
979 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
980 					     NETCONFA_MC_FORWARDING,
981 					     dev->ifindex, &in6_dev->cnf);
982 	}
983 
984 	/*
985 	 *	Fill in the VIF structures
986 	 */
987 	v->rate_limit = vifc->vifc_rate_limit;
988 	v->flags = vifc->mif6c_flags;
989 	if (!mrtsock)
990 		v->flags |= VIFF_STATIC;
991 	v->threshold = vifc->vifc_threshold;
992 	v->bytes_in = 0;
993 	v->bytes_out = 0;
994 	v->pkt_in = 0;
995 	v->pkt_out = 0;
996 	v->link = dev->ifindex;
997 	if (v->flags & MIFF_REGISTER)
998 		v->link = dev_get_iflink(dev);
999 
1000 	/* And finish update writing critical data */
1001 	write_lock_bh(&mrt_lock);
1002 	v->dev = dev;
1003 #ifdef CONFIG_IPV6_PIMSM_V2
1004 	if (v->flags & MIFF_REGISTER)
1005 		mrt->mroute_reg_vif_num = vifi;
1006 #endif
1007 	if (vifi + 1 > mrt->maxvif)
1008 		mrt->maxvif = vifi + 1;
1009 	write_unlock_bh(&mrt_lock);
1010 	return 0;
1011 }
1012 
1013 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1014 					   const struct in6_addr *origin,
1015 					   const struct in6_addr *mcastgrp)
1016 {
1017 	int line = MFC6_HASH(mcastgrp, origin);
1018 	struct mfc6_cache *c;
1019 
1020 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1021 		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1022 		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1023 			return c;
1024 	}
1025 	return NULL;
1026 }
1027 
1028 /* Look for a (*,*,oif) entry */
1029 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1030 						      mifi_t mifi)
1031 {
1032 	int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1033 	struct mfc6_cache *c;
1034 
1035 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1036 		if (ipv6_addr_any(&c->mf6c_origin) &&
1037 		    ipv6_addr_any(&c->mf6c_mcastgrp) &&
1038 		    (c->mfc_un.res.ttls[mifi] < 255))
1039 			return c;
1040 
1041 	return NULL;
1042 }
1043 
1044 /* Look for a (*,G) entry */
1045 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1046 					       struct in6_addr *mcastgrp,
1047 					       mifi_t mifi)
1048 {
1049 	int line = MFC6_HASH(mcastgrp, &in6addr_any);
1050 	struct mfc6_cache *c, *proxy;
1051 
1052 	if (ipv6_addr_any(mcastgrp))
1053 		goto skip;
1054 
1055 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1056 		if (ipv6_addr_any(&c->mf6c_origin) &&
1057 		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1058 			if (c->mfc_un.res.ttls[mifi] < 255)
1059 				return c;
1060 
1061 			/* It's ok if the mifi is part of the static tree */
1062 			proxy = ip6mr_cache_find_any_parent(mrt,
1063 							    c->mf6c_parent);
1064 			if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1065 				return c;
1066 		}
1067 
1068 skip:
1069 	return ip6mr_cache_find_any_parent(mrt, mifi);
1070 }
1071 
1072 /*
1073  *	Allocate a multicast cache entry
1074  */
1075 static struct mfc6_cache *ip6mr_cache_alloc(void)
1076 {
1077 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1078 	if (!c)
1079 		return NULL;
1080 	c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1081 	c->mfc_un.res.minvif = MAXMIFS;
1082 	return c;
1083 }
1084 
1085 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1086 {
1087 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1088 	if (!c)
1089 		return NULL;
1090 	skb_queue_head_init(&c->mfc_un.unres.unresolved);
1091 	c->mfc_un.unres.expires = jiffies + 10 * HZ;
1092 	return c;
1093 }
1094 
1095 /*
1096  *	A cache entry has gone into a resolved state from queued
1097  */
1098 
1099 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1100 				struct mfc6_cache *uc, struct mfc6_cache *c)
1101 {
1102 	struct sk_buff *skb;
1103 
1104 	/*
1105 	 *	Play the pending entries through our router
1106 	 */
1107 
1108 	while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1109 		if (ipv6_hdr(skb)->version == 0) {
1110 			struct nlmsghdr *nlh = skb_pull(skb,
1111 							sizeof(struct ipv6hdr));
1112 
1113 			if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1114 				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1115 			} else {
1116 				nlh->nlmsg_type = NLMSG_ERROR;
1117 				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1118 				skb_trim(skb, nlh->nlmsg_len);
1119 				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1120 			}
1121 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1122 		} else
1123 			ip6_mr_forward(net, mrt, skb, c);
1124 	}
1125 }
1126 
1127 /*
1128  *	Bounce a cache query up to pim6sd and netlink.
1129  *
1130  *	Called under mrt_lock.
1131  */
1132 
1133 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1134 			      mifi_t mifi, int assert)
1135 {
1136 	struct sk_buff *skb;
1137 	struct mrt6msg *msg;
1138 	int ret;
1139 
1140 #ifdef CONFIG_IPV6_PIMSM_V2
1141 	if (assert == MRT6MSG_WHOLEPKT)
1142 		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1143 						+sizeof(*msg));
1144 	else
1145 #endif
1146 		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1147 
1148 	if (!skb)
1149 		return -ENOBUFS;
1150 
1151 	/* I suppose that internal messages
1152 	 * do not require checksums */
1153 
1154 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1155 
1156 #ifdef CONFIG_IPV6_PIMSM_V2
1157 	if (assert == MRT6MSG_WHOLEPKT) {
1158 		/* Ugly, but we have no choice with this interface.
1159 		   Duplicate old header, fix length etc.
1160 		   And all this only to mangle msg->im6_msgtype and
1161 		   to set msg->im6_mbz to "mbz" :-)
1162 		 */
1163 		skb_push(skb, -skb_network_offset(pkt));
1164 
1165 		skb_push(skb, sizeof(*msg));
1166 		skb_reset_transport_header(skb);
1167 		msg = (struct mrt6msg *)skb_transport_header(skb);
1168 		msg->im6_mbz = 0;
1169 		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1170 		msg->im6_mif = mrt->mroute_reg_vif_num;
1171 		msg->im6_pad = 0;
1172 		msg->im6_src = ipv6_hdr(pkt)->saddr;
1173 		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1174 
1175 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1176 	} else
1177 #endif
1178 	{
1179 	/*
1180 	 *	Copy the IP header
1181 	 */
1182 
1183 	skb_put(skb, sizeof(struct ipv6hdr));
1184 	skb_reset_network_header(skb);
1185 	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1186 
1187 	/*
1188 	 *	Add our header
1189 	 */
1190 	skb_put(skb, sizeof(*msg));
1191 	skb_reset_transport_header(skb);
1192 	msg = (struct mrt6msg *)skb_transport_header(skb);
1193 
1194 	msg->im6_mbz = 0;
1195 	msg->im6_msgtype = assert;
1196 	msg->im6_mif = mifi;
1197 	msg->im6_pad = 0;
1198 	msg->im6_src = ipv6_hdr(pkt)->saddr;
1199 	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1200 
1201 	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1202 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1203 	}
1204 
1205 	if (!mrt->mroute6_sk) {
1206 		kfree_skb(skb);
1207 		return -EINVAL;
1208 	}
1209 
1210 	mrt6msg_netlink_event(mrt, skb);
1211 
1212 	/*
1213 	 *	Deliver to user space multicast routing algorithms
1214 	 */
1215 	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1216 	if (ret < 0) {
1217 		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1218 		kfree_skb(skb);
1219 	}
1220 
1221 	return ret;
1222 }
1223 
1224 /*
1225  *	Queue a packet for resolution. It gets locked cache entry!
1226  */
1227 
1228 static int
1229 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1230 {
1231 	bool found = false;
1232 	int err;
1233 	struct mfc6_cache *c;
1234 
1235 	spin_lock_bh(&mfc_unres_lock);
1236 	list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1237 		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1238 		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1239 			found = true;
1240 			break;
1241 		}
1242 	}
1243 
1244 	if (!found) {
1245 		/*
1246 		 *	Create a new entry if allowable
1247 		 */
1248 
1249 		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1250 		    (c = ip6mr_cache_alloc_unres()) == NULL) {
1251 			spin_unlock_bh(&mfc_unres_lock);
1252 
1253 			kfree_skb(skb);
1254 			return -ENOBUFS;
1255 		}
1256 
1257 		/*
1258 		 *	Fill in the new cache entry
1259 		 */
1260 		c->mf6c_parent = -1;
1261 		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1262 		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1263 
1264 		/*
1265 		 *	Reflect first query at pim6sd
1266 		 */
1267 		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1268 		if (err < 0) {
1269 			/* If the report failed throw the cache entry
1270 			   out - Brad Parker
1271 			 */
1272 			spin_unlock_bh(&mfc_unres_lock);
1273 
1274 			ip6mr_cache_free(c);
1275 			kfree_skb(skb);
1276 			return err;
1277 		}
1278 
1279 		atomic_inc(&mrt->cache_resolve_queue_len);
1280 		list_add(&c->list, &mrt->mfc6_unres_queue);
1281 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1282 
1283 		ipmr_do_expire_process(mrt);
1284 	}
1285 
1286 	/*
1287 	 *	See if we can append the packet
1288 	 */
1289 	if (c->mfc_un.unres.unresolved.qlen > 3) {
1290 		kfree_skb(skb);
1291 		err = -ENOBUFS;
1292 	} else {
1293 		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1294 		err = 0;
1295 	}
1296 
1297 	spin_unlock_bh(&mfc_unres_lock);
1298 	return err;
1299 }
1300 
1301 /*
1302  *	MFC6 cache manipulation by user space
1303  */
1304 
1305 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1306 			    int parent)
1307 {
1308 	int line;
1309 	struct mfc6_cache *c, *next;
1310 
1311 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1312 
1313 	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1314 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1315 		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1316 				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1317 		    (parent == -1 || parent == c->mf6c_parent)) {
1318 			write_lock_bh(&mrt_lock);
1319 			list_del(&c->list);
1320 			write_unlock_bh(&mrt_lock);
1321 
1322 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1323 			ip6mr_cache_free(c);
1324 			return 0;
1325 		}
1326 	}
1327 	return -ENOENT;
1328 }
1329 
1330 static int ip6mr_device_event(struct notifier_block *this,
1331 			      unsigned long event, void *ptr)
1332 {
1333 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1334 	struct net *net = dev_net(dev);
1335 	struct mr6_table *mrt;
1336 	struct mif_device *v;
1337 	int ct;
1338 
1339 	if (event != NETDEV_UNREGISTER)
1340 		return NOTIFY_DONE;
1341 
1342 	ip6mr_for_each_table(mrt, net) {
1343 		v = &mrt->vif6_table[0];
1344 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1345 			if (v->dev == dev)
1346 				mif6_delete(mrt, ct, 1, NULL);
1347 		}
1348 	}
1349 
1350 	return NOTIFY_DONE;
1351 }
1352 
1353 static struct notifier_block ip6_mr_notifier = {
1354 	.notifier_call = ip6mr_device_event
1355 };
1356 
1357 /*
1358  *	Setup for IP multicast routing
1359  */
1360 
1361 static int __net_init ip6mr_net_init(struct net *net)
1362 {
1363 	int err;
1364 
1365 	err = ip6mr_rules_init(net);
1366 	if (err < 0)
1367 		goto fail;
1368 
1369 #ifdef CONFIG_PROC_FS
1370 	err = -ENOMEM;
1371 	if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1372 		goto proc_vif_fail;
1373 	if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1374 		goto proc_cache_fail;
1375 #endif
1376 
1377 	return 0;
1378 
1379 #ifdef CONFIG_PROC_FS
1380 proc_cache_fail:
1381 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1382 proc_vif_fail:
1383 	ip6mr_rules_exit(net);
1384 #endif
1385 fail:
1386 	return err;
1387 }
1388 
1389 static void __net_exit ip6mr_net_exit(struct net *net)
1390 {
1391 #ifdef CONFIG_PROC_FS
1392 	remove_proc_entry("ip6_mr_cache", net->proc_net);
1393 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1394 #endif
1395 	ip6mr_rules_exit(net);
1396 }
1397 
1398 static struct pernet_operations ip6mr_net_ops = {
1399 	.init = ip6mr_net_init,
1400 	.exit = ip6mr_net_exit,
1401 };
1402 
1403 int __init ip6_mr_init(void)
1404 {
1405 	int err;
1406 
1407 	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1408 				       sizeof(struct mfc6_cache),
1409 				       0, SLAB_HWCACHE_ALIGN,
1410 				       NULL);
1411 	if (!mrt_cachep)
1412 		return -ENOMEM;
1413 
1414 	err = register_pernet_subsys(&ip6mr_net_ops);
1415 	if (err)
1416 		goto reg_pernet_fail;
1417 
1418 	err = register_netdevice_notifier(&ip6_mr_notifier);
1419 	if (err)
1420 		goto reg_notif_fail;
1421 #ifdef CONFIG_IPV6_PIMSM_V2
1422 	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1423 		pr_err("%s: can't add PIM protocol\n", __func__);
1424 		err = -EAGAIN;
1425 		goto add_proto_fail;
1426 	}
1427 #endif
1428 	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1429 		      ip6mr_rtm_dumproute, 0);
1430 	return 0;
1431 #ifdef CONFIG_IPV6_PIMSM_V2
1432 add_proto_fail:
1433 	unregister_netdevice_notifier(&ip6_mr_notifier);
1434 #endif
1435 reg_notif_fail:
1436 	unregister_pernet_subsys(&ip6mr_net_ops);
1437 reg_pernet_fail:
1438 	kmem_cache_destroy(mrt_cachep);
1439 	return err;
1440 }
1441 
1442 void ip6_mr_cleanup(void)
1443 {
1444 	rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1445 #ifdef CONFIG_IPV6_PIMSM_V2
1446 	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1447 #endif
1448 	unregister_netdevice_notifier(&ip6_mr_notifier);
1449 	unregister_pernet_subsys(&ip6mr_net_ops);
1450 	kmem_cache_destroy(mrt_cachep);
1451 }
1452 
1453 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1454 			 struct mf6cctl *mfc, int mrtsock, int parent)
1455 {
1456 	bool found = false;
1457 	int line;
1458 	struct mfc6_cache *uc, *c;
1459 	unsigned char ttls[MAXMIFS];
1460 	int i;
1461 
1462 	if (mfc->mf6cc_parent >= MAXMIFS)
1463 		return -ENFILE;
1464 
1465 	memset(ttls, 255, MAXMIFS);
1466 	for (i = 0; i < MAXMIFS; i++) {
1467 		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1468 			ttls[i] = 1;
1469 
1470 	}
1471 
1472 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1473 
1474 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1475 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1476 		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1477 				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1478 		    (parent == -1 || parent == mfc->mf6cc_parent)) {
1479 			found = true;
1480 			break;
1481 		}
1482 	}
1483 
1484 	if (found) {
1485 		write_lock_bh(&mrt_lock);
1486 		c->mf6c_parent = mfc->mf6cc_parent;
1487 		ip6mr_update_thresholds(mrt, c, ttls);
1488 		if (!mrtsock)
1489 			c->mfc_flags |= MFC_STATIC;
1490 		write_unlock_bh(&mrt_lock);
1491 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1492 		return 0;
1493 	}
1494 
1495 	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1496 	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1497 		return -EINVAL;
1498 
1499 	c = ip6mr_cache_alloc();
1500 	if (!c)
1501 		return -ENOMEM;
1502 
1503 	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1504 	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1505 	c->mf6c_parent = mfc->mf6cc_parent;
1506 	ip6mr_update_thresholds(mrt, c, ttls);
1507 	if (!mrtsock)
1508 		c->mfc_flags |= MFC_STATIC;
1509 
1510 	write_lock_bh(&mrt_lock);
1511 	list_add(&c->list, &mrt->mfc6_cache_array[line]);
1512 	write_unlock_bh(&mrt_lock);
1513 
1514 	/*
1515 	 *	Check to see if we resolved a queued list. If so we
1516 	 *	need to send on the frames and tidy up.
1517 	 */
1518 	found = false;
1519 	spin_lock_bh(&mfc_unres_lock);
1520 	list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1521 		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1522 		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1523 			list_del(&uc->list);
1524 			atomic_dec(&mrt->cache_resolve_queue_len);
1525 			found = true;
1526 			break;
1527 		}
1528 	}
1529 	if (list_empty(&mrt->mfc6_unres_queue))
1530 		del_timer(&mrt->ipmr_expire_timer);
1531 	spin_unlock_bh(&mfc_unres_lock);
1532 
1533 	if (found) {
1534 		ip6mr_cache_resolve(net, mrt, uc, c);
1535 		ip6mr_cache_free(uc);
1536 	}
1537 	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1538 	return 0;
1539 }
1540 
1541 /*
1542  *	Close the multicast socket, and clear the vif tables etc
1543  */
1544 
1545 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1546 {
1547 	int i;
1548 	LIST_HEAD(list);
1549 	struct mfc6_cache *c, *next;
1550 
1551 	/*
1552 	 *	Shut down all active vif entries
1553 	 */
1554 	for (i = 0; i < mrt->maxvif; i++) {
1555 		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1556 			continue;
1557 		mif6_delete(mrt, i, 0, &list);
1558 	}
1559 	unregister_netdevice_many(&list);
1560 
1561 	/*
1562 	 *	Wipe the cache
1563 	 */
1564 	for (i = 0; i < MFC6_LINES; i++) {
1565 		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1566 			if (!all && (c->mfc_flags & MFC_STATIC))
1567 				continue;
1568 			write_lock_bh(&mrt_lock);
1569 			list_del(&c->list);
1570 			write_unlock_bh(&mrt_lock);
1571 
1572 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1573 			ip6mr_cache_free(c);
1574 		}
1575 	}
1576 
1577 	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1578 		spin_lock_bh(&mfc_unres_lock);
1579 		list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1580 			list_del(&c->list);
1581 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1582 			ip6mr_destroy_unres(mrt, c);
1583 		}
1584 		spin_unlock_bh(&mfc_unres_lock);
1585 	}
1586 }
1587 
1588 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1589 {
1590 	int err = 0;
1591 	struct net *net = sock_net(sk);
1592 
1593 	rtnl_lock();
1594 	write_lock_bh(&mrt_lock);
1595 	if (likely(mrt->mroute6_sk == NULL)) {
1596 		mrt->mroute6_sk = sk;
1597 		net->ipv6.devconf_all->mc_forwarding++;
1598 	} else {
1599 		err = -EADDRINUSE;
1600 	}
1601 	write_unlock_bh(&mrt_lock);
1602 
1603 	if (!err)
1604 		inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1605 					     NETCONFA_MC_FORWARDING,
1606 					     NETCONFA_IFINDEX_ALL,
1607 					     net->ipv6.devconf_all);
1608 	rtnl_unlock();
1609 
1610 	return err;
1611 }
1612 
1613 int ip6mr_sk_done(struct sock *sk)
1614 {
1615 	int err = -EACCES;
1616 	struct net *net = sock_net(sk);
1617 	struct mr6_table *mrt;
1618 
1619 	if (sk->sk_type != SOCK_RAW ||
1620 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1621 		return err;
1622 
1623 	rtnl_lock();
1624 	ip6mr_for_each_table(mrt, net) {
1625 		if (sk == mrt->mroute6_sk) {
1626 			write_lock_bh(&mrt_lock);
1627 			mrt->mroute6_sk = NULL;
1628 			net->ipv6.devconf_all->mc_forwarding--;
1629 			write_unlock_bh(&mrt_lock);
1630 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1631 						     NETCONFA_MC_FORWARDING,
1632 						     NETCONFA_IFINDEX_ALL,
1633 						     net->ipv6.devconf_all);
1634 
1635 			mroute_clean_tables(mrt, false);
1636 			err = 0;
1637 			break;
1638 		}
1639 	}
1640 	rtnl_unlock();
1641 
1642 	return err;
1643 }
1644 
1645 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1646 {
1647 	struct mr6_table *mrt;
1648 	struct flowi6 fl6 = {
1649 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1650 		.flowi6_oif	= skb->dev->ifindex,
1651 		.flowi6_mark	= skb->mark,
1652 	};
1653 
1654 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1655 		return NULL;
1656 
1657 	return mrt->mroute6_sk;
1658 }
1659 
1660 /*
1661  *	Socket options and virtual interface manipulation. The whole
1662  *	virtual interface system is a complete heap, but unfortunately
1663  *	that's how BSD mrouted happens to think. Maybe one day with a proper
1664  *	MOSPF/PIM router set up we can clean this up.
1665  */
1666 
1667 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1668 {
1669 	int ret, parent = 0;
1670 	struct mif6ctl vif;
1671 	struct mf6cctl mfc;
1672 	mifi_t mifi;
1673 	struct net *net = sock_net(sk);
1674 	struct mr6_table *mrt;
1675 
1676 	if (sk->sk_type != SOCK_RAW ||
1677 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1678 		return -EOPNOTSUPP;
1679 
1680 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1681 	if (!mrt)
1682 		return -ENOENT;
1683 
1684 	if (optname != MRT6_INIT) {
1685 		if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1686 			return -EACCES;
1687 	}
1688 
1689 	switch (optname) {
1690 	case MRT6_INIT:
1691 		if (optlen < sizeof(int))
1692 			return -EINVAL;
1693 
1694 		return ip6mr_sk_init(mrt, sk);
1695 
1696 	case MRT6_DONE:
1697 		return ip6mr_sk_done(sk);
1698 
1699 	case MRT6_ADD_MIF:
1700 		if (optlen < sizeof(vif))
1701 			return -EINVAL;
1702 		if (copy_from_user(&vif, optval, sizeof(vif)))
1703 			return -EFAULT;
1704 		if (vif.mif6c_mifi >= MAXMIFS)
1705 			return -ENFILE;
1706 		rtnl_lock();
1707 		ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1708 		rtnl_unlock();
1709 		return ret;
1710 
1711 	case MRT6_DEL_MIF:
1712 		if (optlen < sizeof(mifi_t))
1713 			return -EINVAL;
1714 		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1715 			return -EFAULT;
1716 		rtnl_lock();
1717 		ret = mif6_delete(mrt, mifi, 0, NULL);
1718 		rtnl_unlock();
1719 		return ret;
1720 
1721 	/*
1722 	 *	Manipulate the forwarding caches. These live
1723 	 *	in a sort of kernel/user symbiosis.
1724 	 */
1725 	case MRT6_ADD_MFC:
1726 	case MRT6_DEL_MFC:
1727 		parent = -1;
1728 		/* fall through */
1729 	case MRT6_ADD_MFC_PROXY:
1730 	case MRT6_DEL_MFC_PROXY:
1731 		if (optlen < sizeof(mfc))
1732 			return -EINVAL;
1733 		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1734 			return -EFAULT;
1735 		if (parent == 0)
1736 			parent = mfc.mf6cc_parent;
1737 		rtnl_lock();
1738 		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1739 			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1740 		else
1741 			ret = ip6mr_mfc_add(net, mrt, &mfc,
1742 					    sk == mrt->mroute6_sk, parent);
1743 		rtnl_unlock();
1744 		return ret;
1745 
1746 	/*
1747 	 *	Control PIM assert (to activate pim will activate assert)
1748 	 */
1749 	case MRT6_ASSERT:
1750 	{
1751 		int v;
1752 
1753 		if (optlen != sizeof(v))
1754 			return -EINVAL;
1755 		if (get_user(v, (int __user *)optval))
1756 			return -EFAULT;
1757 		mrt->mroute_do_assert = v;
1758 		return 0;
1759 	}
1760 
1761 #ifdef CONFIG_IPV6_PIMSM_V2
1762 	case MRT6_PIM:
1763 	{
1764 		int v;
1765 
1766 		if (optlen != sizeof(v))
1767 			return -EINVAL;
1768 		if (get_user(v, (int __user *)optval))
1769 			return -EFAULT;
1770 		v = !!v;
1771 		rtnl_lock();
1772 		ret = 0;
1773 		if (v != mrt->mroute_do_pim) {
1774 			mrt->mroute_do_pim = v;
1775 			mrt->mroute_do_assert = v;
1776 		}
1777 		rtnl_unlock();
1778 		return ret;
1779 	}
1780 
1781 #endif
1782 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1783 	case MRT6_TABLE:
1784 	{
1785 		u32 v;
1786 
1787 		if (optlen != sizeof(u32))
1788 			return -EINVAL;
1789 		if (get_user(v, (u32 __user *)optval))
1790 			return -EFAULT;
1791 		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1792 		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1793 			return -EINVAL;
1794 		if (sk == mrt->mroute6_sk)
1795 			return -EBUSY;
1796 
1797 		rtnl_lock();
1798 		ret = 0;
1799 		if (!ip6mr_new_table(net, v))
1800 			ret = -ENOMEM;
1801 		raw6_sk(sk)->ip6mr_table = v;
1802 		rtnl_unlock();
1803 		return ret;
1804 	}
1805 #endif
1806 	/*
1807 	 *	Spurious command, or MRT6_VERSION which you cannot
1808 	 *	set.
1809 	 */
1810 	default:
1811 		return -ENOPROTOOPT;
1812 	}
1813 }
1814 
1815 /*
1816  *	Getsock opt support for the multicast routing system.
1817  */
1818 
1819 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1820 			  int __user *optlen)
1821 {
1822 	int olr;
1823 	int val;
1824 	struct net *net = sock_net(sk);
1825 	struct mr6_table *mrt;
1826 
1827 	if (sk->sk_type != SOCK_RAW ||
1828 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1829 		return -EOPNOTSUPP;
1830 
1831 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1832 	if (!mrt)
1833 		return -ENOENT;
1834 
1835 	switch (optname) {
1836 	case MRT6_VERSION:
1837 		val = 0x0305;
1838 		break;
1839 #ifdef CONFIG_IPV6_PIMSM_V2
1840 	case MRT6_PIM:
1841 		val = mrt->mroute_do_pim;
1842 		break;
1843 #endif
1844 	case MRT6_ASSERT:
1845 		val = mrt->mroute_do_assert;
1846 		break;
1847 	default:
1848 		return -ENOPROTOOPT;
1849 	}
1850 
1851 	if (get_user(olr, optlen))
1852 		return -EFAULT;
1853 
1854 	olr = min_t(int, olr, sizeof(int));
1855 	if (olr < 0)
1856 		return -EINVAL;
1857 
1858 	if (put_user(olr, optlen))
1859 		return -EFAULT;
1860 	if (copy_to_user(optval, &val, olr))
1861 		return -EFAULT;
1862 	return 0;
1863 }
1864 
1865 /*
1866  *	The IP multicast ioctl support routines.
1867  */
1868 
1869 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1870 {
1871 	struct sioc_sg_req6 sr;
1872 	struct sioc_mif_req6 vr;
1873 	struct mif_device *vif;
1874 	struct mfc6_cache *c;
1875 	struct net *net = sock_net(sk);
1876 	struct mr6_table *mrt;
1877 
1878 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1879 	if (!mrt)
1880 		return -ENOENT;
1881 
1882 	switch (cmd) {
1883 	case SIOCGETMIFCNT_IN6:
1884 		if (copy_from_user(&vr, arg, sizeof(vr)))
1885 			return -EFAULT;
1886 		if (vr.mifi >= mrt->maxvif)
1887 			return -EINVAL;
1888 		read_lock(&mrt_lock);
1889 		vif = &mrt->vif6_table[vr.mifi];
1890 		if (MIF_EXISTS(mrt, vr.mifi)) {
1891 			vr.icount = vif->pkt_in;
1892 			vr.ocount = vif->pkt_out;
1893 			vr.ibytes = vif->bytes_in;
1894 			vr.obytes = vif->bytes_out;
1895 			read_unlock(&mrt_lock);
1896 
1897 			if (copy_to_user(arg, &vr, sizeof(vr)))
1898 				return -EFAULT;
1899 			return 0;
1900 		}
1901 		read_unlock(&mrt_lock);
1902 		return -EADDRNOTAVAIL;
1903 	case SIOCGETSGCNT_IN6:
1904 		if (copy_from_user(&sr, arg, sizeof(sr)))
1905 			return -EFAULT;
1906 
1907 		read_lock(&mrt_lock);
1908 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1909 		if (c) {
1910 			sr.pktcnt = c->mfc_un.res.pkt;
1911 			sr.bytecnt = c->mfc_un.res.bytes;
1912 			sr.wrong_if = c->mfc_un.res.wrong_if;
1913 			read_unlock(&mrt_lock);
1914 
1915 			if (copy_to_user(arg, &sr, sizeof(sr)))
1916 				return -EFAULT;
1917 			return 0;
1918 		}
1919 		read_unlock(&mrt_lock);
1920 		return -EADDRNOTAVAIL;
1921 	default:
1922 		return -ENOIOCTLCMD;
1923 	}
1924 }
1925 
1926 #ifdef CONFIG_COMPAT
1927 struct compat_sioc_sg_req6 {
1928 	struct sockaddr_in6 src;
1929 	struct sockaddr_in6 grp;
1930 	compat_ulong_t pktcnt;
1931 	compat_ulong_t bytecnt;
1932 	compat_ulong_t wrong_if;
1933 };
1934 
1935 struct compat_sioc_mif_req6 {
1936 	mifi_t	mifi;
1937 	compat_ulong_t icount;
1938 	compat_ulong_t ocount;
1939 	compat_ulong_t ibytes;
1940 	compat_ulong_t obytes;
1941 };
1942 
1943 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1944 {
1945 	struct compat_sioc_sg_req6 sr;
1946 	struct compat_sioc_mif_req6 vr;
1947 	struct mif_device *vif;
1948 	struct mfc6_cache *c;
1949 	struct net *net = sock_net(sk);
1950 	struct mr6_table *mrt;
1951 
1952 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1953 	if (!mrt)
1954 		return -ENOENT;
1955 
1956 	switch (cmd) {
1957 	case SIOCGETMIFCNT_IN6:
1958 		if (copy_from_user(&vr, arg, sizeof(vr)))
1959 			return -EFAULT;
1960 		if (vr.mifi >= mrt->maxvif)
1961 			return -EINVAL;
1962 		read_lock(&mrt_lock);
1963 		vif = &mrt->vif6_table[vr.mifi];
1964 		if (MIF_EXISTS(mrt, vr.mifi)) {
1965 			vr.icount = vif->pkt_in;
1966 			vr.ocount = vif->pkt_out;
1967 			vr.ibytes = vif->bytes_in;
1968 			vr.obytes = vif->bytes_out;
1969 			read_unlock(&mrt_lock);
1970 
1971 			if (copy_to_user(arg, &vr, sizeof(vr)))
1972 				return -EFAULT;
1973 			return 0;
1974 		}
1975 		read_unlock(&mrt_lock);
1976 		return -EADDRNOTAVAIL;
1977 	case SIOCGETSGCNT_IN6:
1978 		if (copy_from_user(&sr, arg, sizeof(sr)))
1979 			return -EFAULT;
1980 
1981 		read_lock(&mrt_lock);
1982 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1983 		if (c) {
1984 			sr.pktcnt = c->mfc_un.res.pkt;
1985 			sr.bytecnt = c->mfc_un.res.bytes;
1986 			sr.wrong_if = c->mfc_un.res.wrong_if;
1987 			read_unlock(&mrt_lock);
1988 
1989 			if (copy_to_user(arg, &sr, sizeof(sr)))
1990 				return -EFAULT;
1991 			return 0;
1992 		}
1993 		read_unlock(&mrt_lock);
1994 		return -EADDRNOTAVAIL;
1995 	default:
1996 		return -ENOIOCTLCMD;
1997 	}
1998 }
1999 #endif
2000 
2001 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2002 {
2003 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2004 			IPSTATS_MIB_OUTFORWDATAGRAMS);
2005 	__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2006 			IPSTATS_MIB_OUTOCTETS, skb->len);
2007 	return dst_output(net, sk, skb);
2008 }
2009 
2010 /*
2011  *	Processing handlers for ip6mr_forward
2012  */
2013 
2014 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2015 			  struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2016 {
2017 	struct ipv6hdr *ipv6h;
2018 	struct mif_device *vif = &mrt->vif6_table[vifi];
2019 	struct net_device *dev;
2020 	struct dst_entry *dst;
2021 	struct flowi6 fl6;
2022 
2023 	if (!vif->dev)
2024 		goto out_free;
2025 
2026 #ifdef CONFIG_IPV6_PIMSM_V2
2027 	if (vif->flags & MIFF_REGISTER) {
2028 		vif->pkt_out++;
2029 		vif->bytes_out += skb->len;
2030 		vif->dev->stats.tx_bytes += skb->len;
2031 		vif->dev->stats.tx_packets++;
2032 		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2033 		goto out_free;
2034 	}
2035 #endif
2036 
2037 	ipv6h = ipv6_hdr(skb);
2038 
2039 	fl6 = (struct flowi6) {
2040 		.flowi6_oif = vif->link,
2041 		.daddr = ipv6h->daddr,
2042 	};
2043 
2044 	dst = ip6_route_output(net, NULL, &fl6);
2045 	if (dst->error) {
2046 		dst_release(dst);
2047 		goto out_free;
2048 	}
2049 
2050 	skb_dst_drop(skb);
2051 	skb_dst_set(skb, dst);
2052 
2053 	/*
2054 	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2055 	 * not only before forwarding, but after forwarding on all output
2056 	 * interfaces. It is clear, if mrouter runs a multicasting
2057 	 * program, it should receive packets not depending to what interface
2058 	 * program is joined.
2059 	 * If we will not make it, the program will have to join on all
2060 	 * interfaces. On the other hand, multihoming host (or router, but
2061 	 * not mrouter) cannot join to more than one interface - it will
2062 	 * result in receiving multiple packets.
2063 	 */
2064 	dev = vif->dev;
2065 	skb->dev = dev;
2066 	vif->pkt_out++;
2067 	vif->bytes_out += skb->len;
2068 
2069 	/* We are about to write */
2070 	/* XXX: extension headers? */
2071 	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2072 		goto out_free;
2073 
2074 	ipv6h = ipv6_hdr(skb);
2075 	ipv6h->hop_limit--;
2076 
2077 	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2078 
2079 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2080 		       net, NULL, skb, skb->dev, dev,
2081 		       ip6mr_forward2_finish);
2082 
2083 out_free:
2084 	kfree_skb(skb);
2085 	return 0;
2086 }
2087 
2088 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2089 {
2090 	int ct;
2091 
2092 	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2093 		if (mrt->vif6_table[ct].dev == dev)
2094 			break;
2095 	}
2096 	return ct;
2097 }
2098 
2099 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2100 			   struct sk_buff *skb, struct mfc6_cache *cache)
2101 {
2102 	int psend = -1;
2103 	int vif, ct;
2104 	int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2105 
2106 	vif = cache->mf6c_parent;
2107 	cache->mfc_un.res.pkt++;
2108 	cache->mfc_un.res.bytes += skb->len;
2109 	cache->mfc_un.res.lastuse = jiffies;
2110 
2111 	if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2112 		struct mfc6_cache *cache_proxy;
2113 
2114 		/* For an (*,G) entry, we only check that the incoming
2115 		 * interface is part of the static tree.
2116 		 */
2117 		cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2118 		if (cache_proxy &&
2119 		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2120 			goto forward;
2121 	}
2122 
2123 	/*
2124 	 * Wrong interface: drop packet and (maybe) send PIM assert.
2125 	 */
2126 	if (mrt->vif6_table[vif].dev != skb->dev) {
2127 		cache->mfc_un.res.wrong_if++;
2128 
2129 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2130 		    /* pimsm uses asserts, when switching from RPT to SPT,
2131 		       so that we cannot check that packet arrived on an oif.
2132 		       It is bad, but otherwise we would need to move pretty
2133 		       large chunk of pimd to kernel. Ough... --ANK
2134 		     */
2135 		    (mrt->mroute_do_pim ||
2136 		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2137 		    time_after(jiffies,
2138 			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2139 			cache->mfc_un.res.last_assert = jiffies;
2140 			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2141 		}
2142 		goto dont_forward;
2143 	}
2144 
2145 forward:
2146 	mrt->vif6_table[vif].pkt_in++;
2147 	mrt->vif6_table[vif].bytes_in += skb->len;
2148 
2149 	/*
2150 	 *	Forward the frame
2151 	 */
2152 	if (ipv6_addr_any(&cache->mf6c_origin) &&
2153 	    ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2154 		if (true_vifi >= 0 &&
2155 		    true_vifi != cache->mf6c_parent &&
2156 		    ipv6_hdr(skb)->hop_limit >
2157 				cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2158 			/* It's an (*,*) entry and the packet is not coming from
2159 			 * the upstream: forward the packet to the upstream
2160 			 * only.
2161 			 */
2162 			psend = cache->mf6c_parent;
2163 			goto last_forward;
2164 		}
2165 		goto dont_forward;
2166 	}
2167 	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2168 		/* For (*,G) entry, don't forward to the incoming interface */
2169 		if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2170 		    ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2171 			if (psend != -1) {
2172 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2173 				if (skb2)
2174 					ip6mr_forward2(net, mrt, skb2, cache, psend);
2175 			}
2176 			psend = ct;
2177 		}
2178 	}
2179 last_forward:
2180 	if (psend != -1) {
2181 		ip6mr_forward2(net, mrt, skb, cache, psend);
2182 		return;
2183 	}
2184 
2185 dont_forward:
2186 	kfree_skb(skb);
2187 }
2188 
2189 
2190 /*
2191  *	Multicast packets for forwarding arrive here
2192  */
2193 
2194 int ip6_mr_input(struct sk_buff *skb)
2195 {
2196 	struct mfc6_cache *cache;
2197 	struct net *net = dev_net(skb->dev);
2198 	struct mr6_table *mrt;
2199 	struct flowi6 fl6 = {
2200 		.flowi6_iif	= skb->dev->ifindex,
2201 		.flowi6_mark	= skb->mark,
2202 	};
2203 	int err;
2204 
2205 	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2206 	if (err < 0) {
2207 		kfree_skb(skb);
2208 		return err;
2209 	}
2210 
2211 	read_lock(&mrt_lock);
2212 	cache = ip6mr_cache_find(mrt,
2213 				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2214 	if (!cache) {
2215 		int vif = ip6mr_find_vif(mrt, skb->dev);
2216 
2217 		if (vif >= 0)
2218 			cache = ip6mr_cache_find_any(mrt,
2219 						     &ipv6_hdr(skb)->daddr,
2220 						     vif);
2221 	}
2222 
2223 	/*
2224 	 *	No usable cache entry
2225 	 */
2226 	if (!cache) {
2227 		int vif;
2228 
2229 		vif = ip6mr_find_vif(mrt, skb->dev);
2230 		if (vif >= 0) {
2231 			int err = ip6mr_cache_unresolved(mrt, vif, skb);
2232 			read_unlock(&mrt_lock);
2233 
2234 			return err;
2235 		}
2236 		read_unlock(&mrt_lock);
2237 		kfree_skb(skb);
2238 		return -ENODEV;
2239 	}
2240 
2241 	ip6_mr_forward(net, mrt, skb, cache);
2242 
2243 	read_unlock(&mrt_lock);
2244 
2245 	return 0;
2246 }
2247 
2248 
2249 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2250 			       struct mfc6_cache *c, struct rtmsg *rtm)
2251 {
2252 	struct rta_mfc_stats mfcs;
2253 	struct nlattr *mp_attr;
2254 	struct rtnexthop *nhp;
2255 	unsigned long lastuse;
2256 	int ct;
2257 
2258 	/* If cache is unresolved, don't try to parse IIF and OIF */
2259 	if (c->mf6c_parent >= MAXMIFS) {
2260 		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2261 		return -ENOENT;
2262 	}
2263 
2264 	if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2265 	    nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2266 		return -EMSGSIZE;
2267 	mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2268 	if (!mp_attr)
2269 		return -EMSGSIZE;
2270 
2271 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2272 		if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2273 			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2274 			if (!nhp) {
2275 				nla_nest_cancel(skb, mp_attr);
2276 				return -EMSGSIZE;
2277 			}
2278 
2279 			nhp->rtnh_flags = 0;
2280 			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2281 			nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2282 			nhp->rtnh_len = sizeof(*nhp);
2283 		}
2284 	}
2285 
2286 	nla_nest_end(skb, mp_attr);
2287 
2288 	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2289 	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2290 
2291 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
2292 	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2293 	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2294 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2295 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2296 			      RTA_PAD))
2297 		return -EMSGSIZE;
2298 
2299 	rtm->rtm_type = RTN_MULTICAST;
2300 	return 1;
2301 }
2302 
2303 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2304 		    u32 portid)
2305 {
2306 	int err;
2307 	struct mr6_table *mrt;
2308 	struct mfc6_cache *cache;
2309 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2310 
2311 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2312 	if (!mrt)
2313 		return -ENOENT;
2314 
2315 	read_lock(&mrt_lock);
2316 	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2317 	if (!cache && skb->dev) {
2318 		int vif = ip6mr_find_vif(mrt, skb->dev);
2319 
2320 		if (vif >= 0)
2321 			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2322 						     vif);
2323 	}
2324 
2325 	if (!cache) {
2326 		struct sk_buff *skb2;
2327 		struct ipv6hdr *iph;
2328 		struct net_device *dev;
2329 		int vif;
2330 
2331 		dev = skb->dev;
2332 		if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2333 			read_unlock(&mrt_lock);
2334 			return -ENODEV;
2335 		}
2336 
2337 		/* really correct? */
2338 		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2339 		if (!skb2) {
2340 			read_unlock(&mrt_lock);
2341 			return -ENOMEM;
2342 		}
2343 
2344 		NETLINK_CB(skb2).portid = portid;
2345 		skb_reset_transport_header(skb2);
2346 
2347 		skb_put(skb2, sizeof(struct ipv6hdr));
2348 		skb_reset_network_header(skb2);
2349 
2350 		iph = ipv6_hdr(skb2);
2351 		iph->version = 0;
2352 		iph->priority = 0;
2353 		iph->flow_lbl[0] = 0;
2354 		iph->flow_lbl[1] = 0;
2355 		iph->flow_lbl[2] = 0;
2356 		iph->payload_len = 0;
2357 		iph->nexthdr = IPPROTO_NONE;
2358 		iph->hop_limit = 0;
2359 		iph->saddr = rt->rt6i_src.addr;
2360 		iph->daddr = rt->rt6i_dst.addr;
2361 
2362 		err = ip6mr_cache_unresolved(mrt, vif, skb2);
2363 		read_unlock(&mrt_lock);
2364 
2365 		return err;
2366 	}
2367 
2368 	if (rtm->rtm_flags & RTM_F_NOTIFY)
2369 		cache->mfc_flags |= MFC_NOTIFY;
2370 
2371 	err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2372 	read_unlock(&mrt_lock);
2373 	return err;
2374 }
2375 
2376 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2377 			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2378 			     int flags)
2379 {
2380 	struct nlmsghdr *nlh;
2381 	struct rtmsg *rtm;
2382 	int err;
2383 
2384 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2385 	if (!nlh)
2386 		return -EMSGSIZE;
2387 
2388 	rtm = nlmsg_data(nlh);
2389 	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2390 	rtm->rtm_dst_len  = 128;
2391 	rtm->rtm_src_len  = 128;
2392 	rtm->rtm_tos      = 0;
2393 	rtm->rtm_table    = mrt->id;
2394 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2395 		goto nla_put_failure;
2396 	rtm->rtm_type = RTN_MULTICAST;
2397 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2398 	if (c->mfc_flags & MFC_STATIC)
2399 		rtm->rtm_protocol = RTPROT_STATIC;
2400 	else
2401 		rtm->rtm_protocol = RTPROT_MROUTED;
2402 	rtm->rtm_flags    = 0;
2403 
2404 	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2405 	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2406 		goto nla_put_failure;
2407 	err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2408 	/* do not break the dump if cache is unresolved */
2409 	if (err < 0 && err != -ENOENT)
2410 		goto nla_put_failure;
2411 
2412 	nlmsg_end(skb, nlh);
2413 	return 0;
2414 
2415 nla_put_failure:
2416 	nlmsg_cancel(skb, nlh);
2417 	return -EMSGSIZE;
2418 }
2419 
2420 static int mr6_msgsize(bool unresolved, int maxvif)
2421 {
2422 	size_t len =
2423 		NLMSG_ALIGN(sizeof(struct rtmsg))
2424 		+ nla_total_size(4)	/* RTA_TABLE */
2425 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2426 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2427 		;
2428 
2429 	if (!unresolved)
2430 		len = len
2431 		      + nla_total_size(4)	/* RTA_IIF */
2432 		      + nla_total_size(0)	/* RTA_MULTIPATH */
2433 		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2434 						/* RTA_MFC_STATS */
2435 		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2436 		;
2437 
2438 	return len;
2439 }
2440 
2441 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2442 			      int cmd)
2443 {
2444 	struct net *net = read_pnet(&mrt->net);
2445 	struct sk_buff *skb;
2446 	int err = -ENOBUFS;
2447 
2448 	skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2449 			GFP_ATOMIC);
2450 	if (!skb)
2451 		goto errout;
2452 
2453 	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2454 	if (err < 0)
2455 		goto errout;
2456 
2457 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2458 	return;
2459 
2460 errout:
2461 	kfree_skb(skb);
2462 	if (err < 0)
2463 		rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2464 }
2465 
2466 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2467 {
2468 	size_t len =
2469 		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2470 		+ nla_total_size(1)	/* IP6MRA_CREPORT_MSGTYPE */
2471 		+ nla_total_size(4)	/* IP6MRA_CREPORT_MIF_ID */
2472 					/* IP6MRA_CREPORT_SRC_ADDR */
2473 		+ nla_total_size(sizeof(struct in6_addr))
2474 					/* IP6MRA_CREPORT_DST_ADDR */
2475 		+ nla_total_size(sizeof(struct in6_addr))
2476 					/* IP6MRA_CREPORT_PKT */
2477 		+ nla_total_size(payloadlen)
2478 		;
2479 
2480 	return len;
2481 }
2482 
2483 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2484 {
2485 	struct net *net = read_pnet(&mrt->net);
2486 	struct nlmsghdr *nlh;
2487 	struct rtgenmsg *rtgenm;
2488 	struct mrt6msg *msg;
2489 	struct sk_buff *skb;
2490 	struct nlattr *nla;
2491 	int payloadlen;
2492 
2493 	payloadlen = pkt->len - sizeof(struct mrt6msg);
2494 	msg = (struct mrt6msg *)skb_transport_header(pkt);
2495 
2496 	skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2497 	if (!skb)
2498 		goto errout;
2499 
2500 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2501 			sizeof(struct rtgenmsg), 0);
2502 	if (!nlh)
2503 		goto errout;
2504 	rtgenm = nlmsg_data(nlh);
2505 	rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2506 	if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2507 	    nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2508 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2509 			     &msg->im6_src) ||
2510 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2511 			     &msg->im6_dst))
2512 		goto nla_put_failure;
2513 
2514 	nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2515 	if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2516 				  nla_data(nla), payloadlen))
2517 		goto nla_put_failure;
2518 
2519 	nlmsg_end(skb, nlh);
2520 
2521 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2522 	return;
2523 
2524 nla_put_failure:
2525 	nlmsg_cancel(skb, nlh);
2526 errout:
2527 	kfree_skb(skb);
2528 	rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2529 }
2530 
2531 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2532 {
2533 	struct net *net = sock_net(skb->sk);
2534 	struct mr6_table *mrt;
2535 	struct mfc6_cache *mfc;
2536 	unsigned int t = 0, s_t;
2537 	unsigned int h = 0, s_h;
2538 	unsigned int e = 0, s_e;
2539 
2540 	s_t = cb->args[0];
2541 	s_h = cb->args[1];
2542 	s_e = cb->args[2];
2543 
2544 	read_lock(&mrt_lock);
2545 	ip6mr_for_each_table(mrt, net) {
2546 		if (t < s_t)
2547 			goto next_table;
2548 		if (t > s_t)
2549 			s_h = 0;
2550 		for (h = s_h; h < MFC6_LINES; h++) {
2551 			list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2552 				if (e < s_e)
2553 					goto next_entry;
2554 				if (ip6mr_fill_mroute(mrt, skb,
2555 						      NETLINK_CB(cb->skb).portid,
2556 						      cb->nlh->nlmsg_seq,
2557 						      mfc, RTM_NEWROUTE,
2558 						      NLM_F_MULTI) < 0)
2559 					goto done;
2560 next_entry:
2561 				e++;
2562 			}
2563 			e = s_e = 0;
2564 		}
2565 		spin_lock_bh(&mfc_unres_lock);
2566 		list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2567 			if (e < s_e)
2568 				goto next_entry2;
2569 			if (ip6mr_fill_mroute(mrt, skb,
2570 					      NETLINK_CB(cb->skb).portid,
2571 					      cb->nlh->nlmsg_seq,
2572 					      mfc, RTM_NEWROUTE,
2573 					      NLM_F_MULTI) < 0) {
2574 				spin_unlock_bh(&mfc_unres_lock);
2575 				goto done;
2576 			}
2577 next_entry2:
2578 			e++;
2579 		}
2580 		spin_unlock_bh(&mfc_unres_lock);
2581 		e = s_e = 0;
2582 		s_h = 0;
2583 next_table:
2584 		t++;
2585 	}
2586 done:
2587 	read_unlock(&mrt_lock);
2588 
2589 	cb->args[2] = e;
2590 	cb->args[1] = h;
2591 	cb->args[0] = t;
2592 
2593 	return skb->len;
2594 }
2595