xref: /openbmc/linux/net/ipv6/ip6mr.c (revision 82e6fdd6)
1 /*
2  *	Linux IPv6 multicast routing support for BSD pim6sd
3  *	Based on net/ipv4/ipmr.c.
4  *
5  *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6  *		LSIIT Laboratory, Strasbourg, France
7  *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8  *		6WIND, Paris, France
9  *	Copyright (C)2007,2008 USAGI/WIDE Project
10  *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11  *
12  *	This program is free software; you can redistribute it and/or
13  *	modify it under the terms of the GNU General Public License
14  *	as published by the Free Software Foundation; either version
15  *	2 of the License, or (at your option) any later version.
16  *
17  */
18 
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
46 
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56 
57 struct mr6_table {
58 	struct list_head	list;
59 	possible_net_t		net;
60 	u32			id;
61 	struct sock		*mroute6_sk;
62 	struct timer_list	ipmr_expire_timer;
63 	struct list_head	mfc6_unres_queue;
64 	struct list_head	mfc6_cache_array[MFC6_LINES];
65 	struct mif_device	vif6_table[MAXMIFS];
66 	int			maxvif;
67 	atomic_t		cache_resolve_queue_len;
68 	bool			mroute_do_assert;
69 	bool			mroute_do_pim;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 	int			mroute_reg_vif_num;
72 #endif
73 };
74 
75 struct ip6mr_rule {
76 	struct fib_rule		common;
77 };
78 
79 struct ip6mr_result {
80 	struct mr6_table	*mrt;
81 };
82 
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84    Note that the changes are semaphored via rtnl_lock.
85  */
86 
87 static DEFINE_RWLOCK(mrt_lock);
88 
89 /*
90  *	Multicast router control variables
91  */
92 
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
94 
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock);
97 
98 /* We return to original Alan's scheme. Hash table of resolved
99    entries is changed only in process context and protected
100    with weak lock mrt_lock. Queue of unresolved entries is protected
101    with strong spinlock mfc_unres_lock.
102 
103    In this case data path is free of exclusive locks at all.
104  */
105 
106 static struct kmem_cache *mrt_cachep __read_mostly;
107 
108 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109 static void ip6mr_free_table(struct mr6_table *mrt);
110 
111 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 			   struct sk_buff *skb, struct mfc6_cache *cache);
113 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
114 			      mifi_t mifi, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 			       struct mfc6_cache *c, struct rtmsg *rtm);
117 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
118 			      int cmd);
119 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
120 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
121 			       struct netlink_callback *cb);
122 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
123 static void ipmr_expire_process(struct timer_list *t);
124 
125 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
126 #define ip6mr_for_each_table(mrt, net) \
127 	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
128 
129 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
130 {
131 	struct mr6_table *mrt;
132 
133 	ip6mr_for_each_table(mrt, net) {
134 		if (mrt->id == id)
135 			return mrt;
136 	}
137 	return NULL;
138 }
139 
140 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
141 			    struct mr6_table **mrt)
142 {
143 	int err;
144 	struct ip6mr_result res;
145 	struct fib_lookup_arg arg = {
146 		.result = &res,
147 		.flags = FIB_LOOKUP_NOREF,
148 	};
149 
150 	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
151 			       flowi6_to_flowi(flp6), 0, &arg);
152 	if (err < 0)
153 		return err;
154 	*mrt = res.mrt;
155 	return 0;
156 }
157 
158 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
159 			     int flags, struct fib_lookup_arg *arg)
160 {
161 	struct ip6mr_result *res = arg->result;
162 	struct mr6_table *mrt;
163 
164 	switch (rule->action) {
165 	case FR_ACT_TO_TBL:
166 		break;
167 	case FR_ACT_UNREACHABLE:
168 		return -ENETUNREACH;
169 	case FR_ACT_PROHIBIT:
170 		return -EACCES;
171 	case FR_ACT_BLACKHOLE:
172 	default:
173 		return -EINVAL;
174 	}
175 
176 	mrt = ip6mr_get_table(rule->fr_net, rule->table);
177 	if (!mrt)
178 		return -EAGAIN;
179 	res->mrt = mrt;
180 	return 0;
181 }
182 
183 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
184 {
185 	return 1;
186 }
187 
188 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
189 	FRA_GENERIC_POLICY,
190 };
191 
192 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
193 				struct fib_rule_hdr *frh, struct nlattr **tb)
194 {
195 	return 0;
196 }
197 
198 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
199 			      struct nlattr **tb)
200 {
201 	return 1;
202 }
203 
204 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
205 			   struct fib_rule_hdr *frh)
206 {
207 	frh->dst_len = 0;
208 	frh->src_len = 0;
209 	frh->tos     = 0;
210 	return 0;
211 }
212 
213 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
214 	.family		= RTNL_FAMILY_IP6MR,
215 	.rule_size	= sizeof(struct ip6mr_rule),
216 	.addr_size	= sizeof(struct in6_addr),
217 	.action		= ip6mr_rule_action,
218 	.match		= ip6mr_rule_match,
219 	.configure	= ip6mr_rule_configure,
220 	.compare	= ip6mr_rule_compare,
221 	.fill		= ip6mr_rule_fill,
222 	.nlgroup	= RTNLGRP_IPV6_RULE,
223 	.policy		= ip6mr_rule_policy,
224 	.owner		= THIS_MODULE,
225 };
226 
227 static int __net_init ip6mr_rules_init(struct net *net)
228 {
229 	struct fib_rules_ops *ops;
230 	struct mr6_table *mrt;
231 	int err;
232 
233 	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
234 	if (IS_ERR(ops))
235 		return PTR_ERR(ops);
236 
237 	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
238 
239 	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
240 	if (!mrt) {
241 		err = -ENOMEM;
242 		goto err1;
243 	}
244 
245 	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
246 	if (err < 0)
247 		goto err2;
248 
249 	net->ipv6.mr6_rules_ops = ops;
250 	return 0;
251 
252 err2:
253 	ip6mr_free_table(mrt);
254 err1:
255 	fib_rules_unregister(ops);
256 	return err;
257 }
258 
259 static void __net_exit ip6mr_rules_exit(struct net *net)
260 {
261 	struct mr6_table *mrt, *next;
262 
263 	rtnl_lock();
264 	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
265 		list_del(&mrt->list);
266 		ip6mr_free_table(mrt);
267 	}
268 	fib_rules_unregister(net->ipv6.mr6_rules_ops);
269 	rtnl_unlock();
270 }
271 #else
272 #define ip6mr_for_each_table(mrt, net) \
273 	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
274 
275 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
276 {
277 	return net->ipv6.mrt6;
278 }
279 
280 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
281 			    struct mr6_table **mrt)
282 {
283 	*mrt = net->ipv6.mrt6;
284 	return 0;
285 }
286 
287 static int __net_init ip6mr_rules_init(struct net *net)
288 {
289 	net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
290 	return net->ipv6.mrt6 ? 0 : -ENOMEM;
291 }
292 
293 static void __net_exit ip6mr_rules_exit(struct net *net)
294 {
295 	rtnl_lock();
296 	ip6mr_free_table(net->ipv6.mrt6);
297 	net->ipv6.mrt6 = NULL;
298 	rtnl_unlock();
299 }
300 #endif
301 
302 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
303 {
304 	struct mr6_table *mrt;
305 	unsigned int i;
306 
307 	mrt = ip6mr_get_table(net, id);
308 	if (mrt)
309 		return mrt;
310 
311 	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
312 	if (!mrt)
313 		return NULL;
314 	mrt->id = id;
315 	write_pnet(&mrt->net, net);
316 
317 	/* Forwarding cache */
318 	for (i = 0; i < MFC6_LINES; i++)
319 		INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
320 
321 	INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
322 
323 	timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
324 
325 #ifdef CONFIG_IPV6_PIMSM_V2
326 	mrt->mroute_reg_vif_num = -1;
327 #endif
328 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
329 	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
330 #endif
331 	return mrt;
332 }
333 
334 static void ip6mr_free_table(struct mr6_table *mrt)
335 {
336 	del_timer_sync(&mrt->ipmr_expire_timer);
337 	mroute_clean_tables(mrt, true);
338 	kfree(mrt);
339 }
340 
341 #ifdef CONFIG_PROC_FS
342 
343 struct ipmr_mfc_iter {
344 	struct seq_net_private p;
345 	struct mr6_table *mrt;
346 	struct list_head *cache;
347 	int ct;
348 };
349 
350 
351 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
352 					   struct ipmr_mfc_iter *it, loff_t pos)
353 {
354 	struct mr6_table *mrt = it->mrt;
355 	struct mfc6_cache *mfc;
356 
357 	read_lock(&mrt_lock);
358 	for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
359 		it->cache = &mrt->mfc6_cache_array[it->ct];
360 		list_for_each_entry(mfc, it->cache, list)
361 			if (pos-- == 0)
362 				return mfc;
363 	}
364 	read_unlock(&mrt_lock);
365 
366 	spin_lock_bh(&mfc_unres_lock);
367 	it->cache = &mrt->mfc6_unres_queue;
368 	list_for_each_entry(mfc, it->cache, list)
369 		if (pos-- == 0)
370 			return mfc;
371 	spin_unlock_bh(&mfc_unres_lock);
372 
373 	it->cache = NULL;
374 	return NULL;
375 }
376 
377 /*
378  *	The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
379  */
380 
381 struct ipmr_vif_iter {
382 	struct seq_net_private p;
383 	struct mr6_table *mrt;
384 	int ct;
385 };
386 
387 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
388 					    struct ipmr_vif_iter *iter,
389 					    loff_t pos)
390 {
391 	struct mr6_table *mrt = iter->mrt;
392 
393 	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
394 		if (!MIF_EXISTS(mrt, iter->ct))
395 			continue;
396 		if (pos-- == 0)
397 			return &mrt->vif6_table[iter->ct];
398 	}
399 	return NULL;
400 }
401 
402 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
403 	__acquires(mrt_lock)
404 {
405 	struct ipmr_vif_iter *iter = seq->private;
406 	struct net *net = seq_file_net(seq);
407 	struct mr6_table *mrt;
408 
409 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
410 	if (!mrt)
411 		return ERR_PTR(-ENOENT);
412 
413 	iter->mrt = mrt;
414 
415 	read_lock(&mrt_lock);
416 	return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
417 		: SEQ_START_TOKEN;
418 }
419 
420 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
421 {
422 	struct ipmr_vif_iter *iter = seq->private;
423 	struct net *net = seq_file_net(seq);
424 	struct mr6_table *mrt = iter->mrt;
425 
426 	++*pos;
427 	if (v == SEQ_START_TOKEN)
428 		return ip6mr_vif_seq_idx(net, iter, 0);
429 
430 	while (++iter->ct < mrt->maxvif) {
431 		if (!MIF_EXISTS(mrt, iter->ct))
432 			continue;
433 		return &mrt->vif6_table[iter->ct];
434 	}
435 	return NULL;
436 }
437 
438 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
439 	__releases(mrt_lock)
440 {
441 	read_unlock(&mrt_lock);
442 }
443 
444 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
445 {
446 	struct ipmr_vif_iter *iter = seq->private;
447 	struct mr6_table *mrt = iter->mrt;
448 
449 	if (v == SEQ_START_TOKEN) {
450 		seq_puts(seq,
451 			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
452 	} else {
453 		const struct mif_device *vif = v;
454 		const char *name = vif->dev ? vif->dev->name : "none";
455 
456 		seq_printf(seq,
457 			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
458 			   vif - mrt->vif6_table,
459 			   name, vif->bytes_in, vif->pkt_in,
460 			   vif->bytes_out, vif->pkt_out,
461 			   vif->flags);
462 	}
463 	return 0;
464 }
465 
466 static const struct seq_operations ip6mr_vif_seq_ops = {
467 	.start = ip6mr_vif_seq_start,
468 	.next  = ip6mr_vif_seq_next,
469 	.stop  = ip6mr_vif_seq_stop,
470 	.show  = ip6mr_vif_seq_show,
471 };
472 
473 static int ip6mr_vif_open(struct inode *inode, struct file *file)
474 {
475 	return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
476 			    sizeof(struct ipmr_vif_iter));
477 }
478 
479 static const struct file_operations ip6mr_vif_fops = {
480 	.open    = ip6mr_vif_open,
481 	.read    = seq_read,
482 	.llseek  = seq_lseek,
483 	.release = seq_release_net,
484 };
485 
486 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
487 {
488 	struct ipmr_mfc_iter *it = seq->private;
489 	struct net *net = seq_file_net(seq);
490 	struct mr6_table *mrt;
491 
492 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
493 	if (!mrt)
494 		return ERR_PTR(-ENOENT);
495 
496 	it->mrt = mrt;
497 	it->cache = NULL;
498 	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
499 		: SEQ_START_TOKEN;
500 }
501 
502 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
503 {
504 	struct mfc6_cache *mfc = v;
505 	struct ipmr_mfc_iter *it = seq->private;
506 	struct net *net = seq_file_net(seq);
507 	struct mr6_table *mrt = it->mrt;
508 
509 	++*pos;
510 
511 	if (v == SEQ_START_TOKEN)
512 		return ipmr_mfc_seq_idx(net, seq->private, 0);
513 
514 	if (mfc->list.next != it->cache)
515 		return list_entry(mfc->list.next, struct mfc6_cache, list);
516 
517 	if (it->cache == &mrt->mfc6_unres_queue)
518 		goto end_of_list;
519 
520 	BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
521 
522 	while (++it->ct < MFC6_LINES) {
523 		it->cache = &mrt->mfc6_cache_array[it->ct];
524 		if (list_empty(it->cache))
525 			continue;
526 		return list_first_entry(it->cache, struct mfc6_cache, list);
527 	}
528 
529 	/* exhausted cache_array, show unresolved */
530 	read_unlock(&mrt_lock);
531 	it->cache = &mrt->mfc6_unres_queue;
532 	it->ct = 0;
533 
534 	spin_lock_bh(&mfc_unres_lock);
535 	if (!list_empty(it->cache))
536 		return list_first_entry(it->cache, struct mfc6_cache, list);
537 
538  end_of_list:
539 	spin_unlock_bh(&mfc_unres_lock);
540 	it->cache = NULL;
541 
542 	return NULL;
543 }
544 
545 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
546 {
547 	struct ipmr_mfc_iter *it = seq->private;
548 	struct mr6_table *mrt = it->mrt;
549 
550 	if (it->cache == &mrt->mfc6_unres_queue)
551 		spin_unlock_bh(&mfc_unres_lock);
552 	else if (it->cache == &mrt->mfc6_cache_array[it->ct])
553 		read_unlock(&mrt_lock);
554 }
555 
556 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
557 {
558 	int n;
559 
560 	if (v == SEQ_START_TOKEN) {
561 		seq_puts(seq,
562 			 "Group                            "
563 			 "Origin                           "
564 			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
565 	} else {
566 		const struct mfc6_cache *mfc = v;
567 		const struct ipmr_mfc_iter *it = seq->private;
568 		struct mr6_table *mrt = it->mrt;
569 
570 		seq_printf(seq, "%pI6 %pI6 %-3hd",
571 			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
572 			   mfc->mf6c_parent);
573 
574 		if (it->cache != &mrt->mfc6_unres_queue) {
575 			seq_printf(seq, " %8lu %8lu %8lu",
576 				   mfc->mfc_un.res.pkt,
577 				   mfc->mfc_un.res.bytes,
578 				   mfc->mfc_un.res.wrong_if);
579 			for (n = mfc->mfc_un.res.minvif;
580 			     n < mfc->mfc_un.res.maxvif; n++) {
581 				if (MIF_EXISTS(mrt, n) &&
582 				    mfc->mfc_un.res.ttls[n] < 255)
583 					seq_printf(seq,
584 						   " %2d:%-3d",
585 						   n, mfc->mfc_un.res.ttls[n]);
586 			}
587 		} else {
588 			/* unresolved mfc_caches don't contain
589 			 * pkt, bytes and wrong_if values
590 			 */
591 			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
592 		}
593 		seq_putc(seq, '\n');
594 	}
595 	return 0;
596 }
597 
598 static const struct seq_operations ipmr_mfc_seq_ops = {
599 	.start = ipmr_mfc_seq_start,
600 	.next  = ipmr_mfc_seq_next,
601 	.stop  = ipmr_mfc_seq_stop,
602 	.show  = ipmr_mfc_seq_show,
603 };
604 
605 static int ipmr_mfc_open(struct inode *inode, struct file *file)
606 {
607 	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
608 			    sizeof(struct ipmr_mfc_iter));
609 }
610 
611 static const struct file_operations ip6mr_mfc_fops = {
612 	.open    = ipmr_mfc_open,
613 	.read    = seq_read,
614 	.llseek  = seq_lseek,
615 	.release = seq_release_net,
616 };
617 #endif
618 
619 #ifdef CONFIG_IPV6_PIMSM_V2
620 
621 static int pim6_rcv(struct sk_buff *skb)
622 {
623 	struct pimreghdr *pim;
624 	struct ipv6hdr   *encap;
625 	struct net_device  *reg_dev = NULL;
626 	struct net *net = dev_net(skb->dev);
627 	struct mr6_table *mrt;
628 	struct flowi6 fl6 = {
629 		.flowi6_iif	= skb->dev->ifindex,
630 		.flowi6_mark	= skb->mark,
631 	};
632 	int reg_vif_num;
633 
634 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
635 		goto drop;
636 
637 	pim = (struct pimreghdr *)skb_transport_header(skb);
638 	if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
639 	    (pim->flags & PIM_NULL_REGISTER) ||
640 	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
641 			     sizeof(*pim), IPPROTO_PIM,
642 			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
643 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
644 		goto drop;
645 
646 	/* check if the inner packet is destined to mcast group */
647 	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
648 				   sizeof(*pim));
649 
650 	if (!ipv6_addr_is_multicast(&encap->daddr) ||
651 	    encap->payload_len == 0 ||
652 	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
653 		goto drop;
654 
655 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
656 		goto drop;
657 	reg_vif_num = mrt->mroute_reg_vif_num;
658 
659 	read_lock(&mrt_lock);
660 	if (reg_vif_num >= 0)
661 		reg_dev = mrt->vif6_table[reg_vif_num].dev;
662 	if (reg_dev)
663 		dev_hold(reg_dev);
664 	read_unlock(&mrt_lock);
665 
666 	if (!reg_dev)
667 		goto drop;
668 
669 	skb->mac_header = skb->network_header;
670 	skb_pull(skb, (u8 *)encap - skb->data);
671 	skb_reset_network_header(skb);
672 	skb->protocol = htons(ETH_P_IPV6);
673 	skb->ip_summed = CHECKSUM_NONE;
674 
675 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
676 
677 	netif_rx(skb);
678 
679 	dev_put(reg_dev);
680 	return 0;
681  drop:
682 	kfree_skb(skb);
683 	return 0;
684 }
685 
686 static const struct inet6_protocol pim6_protocol = {
687 	.handler	=	pim6_rcv,
688 };
689 
690 /* Service routines creating virtual interfaces: PIMREG */
691 
692 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
693 				      struct net_device *dev)
694 {
695 	struct net *net = dev_net(dev);
696 	struct mr6_table *mrt;
697 	struct flowi6 fl6 = {
698 		.flowi6_oif	= dev->ifindex,
699 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
700 		.flowi6_mark	= skb->mark,
701 	};
702 	int err;
703 
704 	err = ip6mr_fib_lookup(net, &fl6, &mrt);
705 	if (err < 0) {
706 		kfree_skb(skb);
707 		return err;
708 	}
709 
710 	read_lock(&mrt_lock);
711 	dev->stats.tx_bytes += skb->len;
712 	dev->stats.tx_packets++;
713 	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
714 	read_unlock(&mrt_lock);
715 	kfree_skb(skb);
716 	return NETDEV_TX_OK;
717 }
718 
719 static int reg_vif_get_iflink(const struct net_device *dev)
720 {
721 	return 0;
722 }
723 
724 static const struct net_device_ops reg_vif_netdev_ops = {
725 	.ndo_start_xmit	= reg_vif_xmit,
726 	.ndo_get_iflink = reg_vif_get_iflink,
727 };
728 
729 static void reg_vif_setup(struct net_device *dev)
730 {
731 	dev->type		= ARPHRD_PIMREG;
732 	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
733 	dev->flags		= IFF_NOARP;
734 	dev->netdev_ops		= &reg_vif_netdev_ops;
735 	dev->needs_free_netdev	= true;
736 	dev->features		|= NETIF_F_NETNS_LOCAL;
737 }
738 
739 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
740 {
741 	struct net_device *dev;
742 	char name[IFNAMSIZ];
743 
744 	if (mrt->id == RT6_TABLE_DFLT)
745 		sprintf(name, "pim6reg");
746 	else
747 		sprintf(name, "pim6reg%u", mrt->id);
748 
749 	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
750 	if (!dev)
751 		return NULL;
752 
753 	dev_net_set(dev, net);
754 
755 	if (register_netdevice(dev)) {
756 		free_netdev(dev);
757 		return NULL;
758 	}
759 
760 	if (dev_open(dev))
761 		goto failure;
762 
763 	dev_hold(dev);
764 	return dev;
765 
766 failure:
767 	unregister_netdevice(dev);
768 	return NULL;
769 }
770 #endif
771 
772 /*
773  *	Delete a VIF entry
774  */
775 
776 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
777 		       struct list_head *head)
778 {
779 	struct mif_device *v;
780 	struct net_device *dev;
781 	struct inet6_dev *in6_dev;
782 
783 	if (vifi < 0 || vifi >= mrt->maxvif)
784 		return -EADDRNOTAVAIL;
785 
786 	v = &mrt->vif6_table[vifi];
787 
788 	write_lock_bh(&mrt_lock);
789 	dev = v->dev;
790 	v->dev = NULL;
791 
792 	if (!dev) {
793 		write_unlock_bh(&mrt_lock);
794 		return -EADDRNOTAVAIL;
795 	}
796 
797 #ifdef CONFIG_IPV6_PIMSM_V2
798 	if (vifi == mrt->mroute_reg_vif_num)
799 		mrt->mroute_reg_vif_num = -1;
800 #endif
801 
802 	if (vifi + 1 == mrt->maxvif) {
803 		int tmp;
804 		for (tmp = vifi - 1; tmp >= 0; tmp--) {
805 			if (MIF_EXISTS(mrt, tmp))
806 				break;
807 		}
808 		mrt->maxvif = tmp + 1;
809 	}
810 
811 	write_unlock_bh(&mrt_lock);
812 
813 	dev_set_allmulti(dev, -1);
814 
815 	in6_dev = __in6_dev_get(dev);
816 	if (in6_dev) {
817 		in6_dev->cnf.mc_forwarding--;
818 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
819 					     NETCONFA_MC_FORWARDING,
820 					     dev->ifindex, &in6_dev->cnf);
821 	}
822 
823 	if ((v->flags & MIFF_REGISTER) && !notify)
824 		unregister_netdevice_queue(dev, head);
825 
826 	dev_put(dev);
827 	return 0;
828 }
829 
830 static inline void ip6mr_cache_free(struct mfc6_cache *c)
831 {
832 	kmem_cache_free(mrt_cachep, c);
833 }
834 
835 /* Destroy an unresolved cache entry, killing queued skbs
836    and reporting error to netlink readers.
837  */
838 
839 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
840 {
841 	struct net *net = read_pnet(&mrt->net);
842 	struct sk_buff *skb;
843 
844 	atomic_dec(&mrt->cache_resolve_queue_len);
845 
846 	while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
847 		if (ipv6_hdr(skb)->version == 0) {
848 			struct nlmsghdr *nlh = skb_pull(skb,
849 							sizeof(struct ipv6hdr));
850 			nlh->nlmsg_type = NLMSG_ERROR;
851 			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
852 			skb_trim(skb, nlh->nlmsg_len);
853 			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
854 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
855 		} else
856 			kfree_skb(skb);
857 	}
858 
859 	ip6mr_cache_free(c);
860 }
861 
862 
863 /* Timer process for all the unresolved queue. */
864 
865 static void ipmr_do_expire_process(struct mr6_table *mrt)
866 {
867 	unsigned long now = jiffies;
868 	unsigned long expires = 10 * HZ;
869 	struct mfc6_cache *c, *next;
870 
871 	list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
872 		if (time_after(c->mfc_un.unres.expires, now)) {
873 			/* not yet... */
874 			unsigned long interval = c->mfc_un.unres.expires - now;
875 			if (interval < expires)
876 				expires = interval;
877 			continue;
878 		}
879 
880 		list_del(&c->list);
881 		mr6_netlink_event(mrt, c, RTM_DELROUTE);
882 		ip6mr_destroy_unres(mrt, c);
883 	}
884 
885 	if (!list_empty(&mrt->mfc6_unres_queue))
886 		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
887 }
888 
889 static void ipmr_expire_process(struct timer_list *t)
890 {
891 	struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
892 
893 	if (!spin_trylock(&mfc_unres_lock)) {
894 		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
895 		return;
896 	}
897 
898 	if (!list_empty(&mrt->mfc6_unres_queue))
899 		ipmr_do_expire_process(mrt);
900 
901 	spin_unlock(&mfc_unres_lock);
902 }
903 
904 /* Fill oifs list. It is called under write locked mrt_lock. */
905 
906 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
907 				    unsigned char *ttls)
908 {
909 	int vifi;
910 
911 	cache->mfc_un.res.minvif = MAXMIFS;
912 	cache->mfc_un.res.maxvif = 0;
913 	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
914 
915 	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
916 		if (MIF_EXISTS(mrt, vifi) &&
917 		    ttls[vifi] && ttls[vifi] < 255) {
918 			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
919 			if (cache->mfc_un.res.minvif > vifi)
920 				cache->mfc_un.res.minvif = vifi;
921 			if (cache->mfc_un.res.maxvif <= vifi)
922 				cache->mfc_un.res.maxvif = vifi + 1;
923 		}
924 	}
925 	cache->mfc_un.res.lastuse = jiffies;
926 }
927 
928 static int mif6_add(struct net *net, struct mr6_table *mrt,
929 		    struct mif6ctl *vifc, int mrtsock)
930 {
931 	int vifi = vifc->mif6c_mifi;
932 	struct mif_device *v = &mrt->vif6_table[vifi];
933 	struct net_device *dev;
934 	struct inet6_dev *in6_dev;
935 	int err;
936 
937 	/* Is vif busy ? */
938 	if (MIF_EXISTS(mrt, vifi))
939 		return -EADDRINUSE;
940 
941 	switch (vifc->mif6c_flags) {
942 #ifdef CONFIG_IPV6_PIMSM_V2
943 	case MIFF_REGISTER:
944 		/*
945 		 * Special Purpose VIF in PIM
946 		 * All the packets will be sent to the daemon
947 		 */
948 		if (mrt->mroute_reg_vif_num >= 0)
949 			return -EADDRINUSE;
950 		dev = ip6mr_reg_vif(net, mrt);
951 		if (!dev)
952 			return -ENOBUFS;
953 		err = dev_set_allmulti(dev, 1);
954 		if (err) {
955 			unregister_netdevice(dev);
956 			dev_put(dev);
957 			return err;
958 		}
959 		break;
960 #endif
961 	case 0:
962 		dev = dev_get_by_index(net, vifc->mif6c_pifi);
963 		if (!dev)
964 			return -EADDRNOTAVAIL;
965 		err = dev_set_allmulti(dev, 1);
966 		if (err) {
967 			dev_put(dev);
968 			return err;
969 		}
970 		break;
971 	default:
972 		return -EINVAL;
973 	}
974 
975 	in6_dev = __in6_dev_get(dev);
976 	if (in6_dev) {
977 		in6_dev->cnf.mc_forwarding++;
978 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
979 					     NETCONFA_MC_FORWARDING,
980 					     dev->ifindex, &in6_dev->cnf);
981 	}
982 
983 	/*
984 	 *	Fill in the VIF structures
985 	 */
986 	v->rate_limit = vifc->vifc_rate_limit;
987 	v->flags = vifc->mif6c_flags;
988 	if (!mrtsock)
989 		v->flags |= VIFF_STATIC;
990 	v->threshold = vifc->vifc_threshold;
991 	v->bytes_in = 0;
992 	v->bytes_out = 0;
993 	v->pkt_in = 0;
994 	v->pkt_out = 0;
995 	v->link = dev->ifindex;
996 	if (v->flags & MIFF_REGISTER)
997 		v->link = dev_get_iflink(dev);
998 
999 	/* And finish update writing critical data */
1000 	write_lock_bh(&mrt_lock);
1001 	v->dev = dev;
1002 #ifdef CONFIG_IPV6_PIMSM_V2
1003 	if (v->flags & MIFF_REGISTER)
1004 		mrt->mroute_reg_vif_num = vifi;
1005 #endif
1006 	if (vifi + 1 > mrt->maxvif)
1007 		mrt->maxvif = vifi + 1;
1008 	write_unlock_bh(&mrt_lock);
1009 	return 0;
1010 }
1011 
1012 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1013 					   const struct in6_addr *origin,
1014 					   const struct in6_addr *mcastgrp)
1015 {
1016 	int line = MFC6_HASH(mcastgrp, origin);
1017 	struct mfc6_cache *c;
1018 
1019 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1020 		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1021 		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1022 			return c;
1023 	}
1024 	return NULL;
1025 }
1026 
1027 /* Look for a (*,*,oif) entry */
1028 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1029 						      mifi_t mifi)
1030 {
1031 	int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1032 	struct mfc6_cache *c;
1033 
1034 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1035 		if (ipv6_addr_any(&c->mf6c_origin) &&
1036 		    ipv6_addr_any(&c->mf6c_mcastgrp) &&
1037 		    (c->mfc_un.res.ttls[mifi] < 255))
1038 			return c;
1039 
1040 	return NULL;
1041 }
1042 
1043 /* Look for a (*,G) entry */
1044 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1045 					       struct in6_addr *mcastgrp,
1046 					       mifi_t mifi)
1047 {
1048 	int line = MFC6_HASH(mcastgrp, &in6addr_any);
1049 	struct mfc6_cache *c, *proxy;
1050 
1051 	if (ipv6_addr_any(mcastgrp))
1052 		goto skip;
1053 
1054 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1055 		if (ipv6_addr_any(&c->mf6c_origin) &&
1056 		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1057 			if (c->mfc_un.res.ttls[mifi] < 255)
1058 				return c;
1059 
1060 			/* It's ok if the mifi is part of the static tree */
1061 			proxy = ip6mr_cache_find_any_parent(mrt,
1062 							    c->mf6c_parent);
1063 			if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1064 				return c;
1065 		}
1066 
1067 skip:
1068 	return ip6mr_cache_find_any_parent(mrt, mifi);
1069 }
1070 
1071 /*
1072  *	Allocate a multicast cache entry
1073  */
1074 static struct mfc6_cache *ip6mr_cache_alloc(void)
1075 {
1076 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1077 	if (!c)
1078 		return NULL;
1079 	c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1080 	c->mfc_un.res.minvif = MAXMIFS;
1081 	return c;
1082 }
1083 
1084 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1085 {
1086 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1087 	if (!c)
1088 		return NULL;
1089 	skb_queue_head_init(&c->mfc_un.unres.unresolved);
1090 	c->mfc_un.unres.expires = jiffies + 10 * HZ;
1091 	return c;
1092 }
1093 
1094 /*
1095  *	A cache entry has gone into a resolved state from queued
1096  */
1097 
1098 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1099 				struct mfc6_cache *uc, struct mfc6_cache *c)
1100 {
1101 	struct sk_buff *skb;
1102 
1103 	/*
1104 	 *	Play the pending entries through our router
1105 	 */
1106 
1107 	while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1108 		if (ipv6_hdr(skb)->version == 0) {
1109 			struct nlmsghdr *nlh = skb_pull(skb,
1110 							sizeof(struct ipv6hdr));
1111 
1112 			if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1113 				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1114 			} else {
1115 				nlh->nlmsg_type = NLMSG_ERROR;
1116 				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1117 				skb_trim(skb, nlh->nlmsg_len);
1118 				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1119 			}
1120 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1121 		} else
1122 			ip6_mr_forward(net, mrt, skb, c);
1123 	}
1124 }
1125 
1126 /*
1127  *	Bounce a cache query up to pim6sd and netlink.
1128  *
1129  *	Called under mrt_lock.
1130  */
1131 
1132 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1133 			      mifi_t mifi, int assert)
1134 {
1135 	struct sk_buff *skb;
1136 	struct mrt6msg *msg;
1137 	int ret;
1138 
1139 #ifdef CONFIG_IPV6_PIMSM_V2
1140 	if (assert == MRT6MSG_WHOLEPKT)
1141 		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1142 						+sizeof(*msg));
1143 	else
1144 #endif
1145 		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1146 
1147 	if (!skb)
1148 		return -ENOBUFS;
1149 
1150 	/* I suppose that internal messages
1151 	 * do not require checksums */
1152 
1153 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1154 
1155 #ifdef CONFIG_IPV6_PIMSM_V2
1156 	if (assert == MRT6MSG_WHOLEPKT) {
1157 		/* Ugly, but we have no choice with this interface.
1158 		   Duplicate old header, fix length etc.
1159 		   And all this only to mangle msg->im6_msgtype and
1160 		   to set msg->im6_mbz to "mbz" :-)
1161 		 */
1162 		skb_push(skb, -skb_network_offset(pkt));
1163 
1164 		skb_push(skb, sizeof(*msg));
1165 		skb_reset_transport_header(skb);
1166 		msg = (struct mrt6msg *)skb_transport_header(skb);
1167 		msg->im6_mbz = 0;
1168 		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1169 		msg->im6_mif = mrt->mroute_reg_vif_num;
1170 		msg->im6_pad = 0;
1171 		msg->im6_src = ipv6_hdr(pkt)->saddr;
1172 		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1173 
1174 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1175 	} else
1176 #endif
1177 	{
1178 	/*
1179 	 *	Copy the IP header
1180 	 */
1181 
1182 	skb_put(skb, sizeof(struct ipv6hdr));
1183 	skb_reset_network_header(skb);
1184 	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1185 
1186 	/*
1187 	 *	Add our header
1188 	 */
1189 	skb_put(skb, sizeof(*msg));
1190 	skb_reset_transport_header(skb);
1191 	msg = (struct mrt6msg *)skb_transport_header(skb);
1192 
1193 	msg->im6_mbz = 0;
1194 	msg->im6_msgtype = assert;
1195 	msg->im6_mif = mifi;
1196 	msg->im6_pad = 0;
1197 	msg->im6_src = ipv6_hdr(pkt)->saddr;
1198 	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1199 
1200 	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1201 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1202 	}
1203 
1204 	if (!mrt->mroute6_sk) {
1205 		kfree_skb(skb);
1206 		return -EINVAL;
1207 	}
1208 
1209 	mrt6msg_netlink_event(mrt, skb);
1210 
1211 	/*
1212 	 *	Deliver to user space multicast routing algorithms
1213 	 */
1214 	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1215 	if (ret < 0) {
1216 		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1217 		kfree_skb(skb);
1218 	}
1219 
1220 	return ret;
1221 }
1222 
1223 /*
1224  *	Queue a packet for resolution. It gets locked cache entry!
1225  */
1226 
1227 static int
1228 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1229 {
1230 	bool found = false;
1231 	int err;
1232 	struct mfc6_cache *c;
1233 
1234 	spin_lock_bh(&mfc_unres_lock);
1235 	list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1236 		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1237 		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1238 			found = true;
1239 			break;
1240 		}
1241 	}
1242 
1243 	if (!found) {
1244 		/*
1245 		 *	Create a new entry if allowable
1246 		 */
1247 
1248 		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1249 		    (c = ip6mr_cache_alloc_unres()) == NULL) {
1250 			spin_unlock_bh(&mfc_unres_lock);
1251 
1252 			kfree_skb(skb);
1253 			return -ENOBUFS;
1254 		}
1255 
1256 		/*
1257 		 *	Fill in the new cache entry
1258 		 */
1259 		c->mf6c_parent = -1;
1260 		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1261 		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1262 
1263 		/*
1264 		 *	Reflect first query at pim6sd
1265 		 */
1266 		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1267 		if (err < 0) {
1268 			/* If the report failed throw the cache entry
1269 			   out - Brad Parker
1270 			 */
1271 			spin_unlock_bh(&mfc_unres_lock);
1272 
1273 			ip6mr_cache_free(c);
1274 			kfree_skb(skb);
1275 			return err;
1276 		}
1277 
1278 		atomic_inc(&mrt->cache_resolve_queue_len);
1279 		list_add(&c->list, &mrt->mfc6_unres_queue);
1280 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1281 
1282 		ipmr_do_expire_process(mrt);
1283 	}
1284 
1285 	/*
1286 	 *	See if we can append the packet
1287 	 */
1288 	if (c->mfc_un.unres.unresolved.qlen > 3) {
1289 		kfree_skb(skb);
1290 		err = -ENOBUFS;
1291 	} else {
1292 		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1293 		err = 0;
1294 	}
1295 
1296 	spin_unlock_bh(&mfc_unres_lock);
1297 	return err;
1298 }
1299 
1300 /*
1301  *	MFC6 cache manipulation by user space
1302  */
1303 
1304 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1305 			    int parent)
1306 {
1307 	int line;
1308 	struct mfc6_cache *c, *next;
1309 
1310 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1311 
1312 	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1313 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1314 		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1315 				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1316 		    (parent == -1 || parent == c->mf6c_parent)) {
1317 			write_lock_bh(&mrt_lock);
1318 			list_del(&c->list);
1319 			write_unlock_bh(&mrt_lock);
1320 
1321 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1322 			ip6mr_cache_free(c);
1323 			return 0;
1324 		}
1325 	}
1326 	return -ENOENT;
1327 }
1328 
1329 static int ip6mr_device_event(struct notifier_block *this,
1330 			      unsigned long event, void *ptr)
1331 {
1332 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1333 	struct net *net = dev_net(dev);
1334 	struct mr6_table *mrt;
1335 	struct mif_device *v;
1336 	int ct;
1337 
1338 	if (event != NETDEV_UNREGISTER)
1339 		return NOTIFY_DONE;
1340 
1341 	ip6mr_for_each_table(mrt, net) {
1342 		v = &mrt->vif6_table[0];
1343 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1344 			if (v->dev == dev)
1345 				mif6_delete(mrt, ct, 1, NULL);
1346 		}
1347 	}
1348 
1349 	return NOTIFY_DONE;
1350 }
1351 
1352 static struct notifier_block ip6_mr_notifier = {
1353 	.notifier_call = ip6mr_device_event
1354 };
1355 
1356 /*
1357  *	Setup for IP multicast routing
1358  */
1359 
1360 static int __net_init ip6mr_net_init(struct net *net)
1361 {
1362 	int err;
1363 
1364 	err = ip6mr_rules_init(net);
1365 	if (err < 0)
1366 		goto fail;
1367 
1368 #ifdef CONFIG_PROC_FS
1369 	err = -ENOMEM;
1370 	if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1371 		goto proc_vif_fail;
1372 	if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1373 		goto proc_cache_fail;
1374 #endif
1375 
1376 	return 0;
1377 
1378 #ifdef CONFIG_PROC_FS
1379 proc_cache_fail:
1380 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1381 proc_vif_fail:
1382 	ip6mr_rules_exit(net);
1383 #endif
1384 fail:
1385 	return err;
1386 }
1387 
1388 static void __net_exit ip6mr_net_exit(struct net *net)
1389 {
1390 #ifdef CONFIG_PROC_FS
1391 	remove_proc_entry("ip6_mr_cache", net->proc_net);
1392 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1393 #endif
1394 	ip6mr_rules_exit(net);
1395 }
1396 
1397 static struct pernet_operations ip6mr_net_ops = {
1398 	.init = ip6mr_net_init,
1399 	.exit = ip6mr_net_exit,
1400 };
1401 
1402 int __init ip6_mr_init(void)
1403 {
1404 	int err;
1405 
1406 	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1407 				       sizeof(struct mfc6_cache),
1408 				       0, SLAB_HWCACHE_ALIGN,
1409 				       NULL);
1410 	if (!mrt_cachep)
1411 		return -ENOMEM;
1412 
1413 	err = register_pernet_subsys(&ip6mr_net_ops);
1414 	if (err)
1415 		goto reg_pernet_fail;
1416 
1417 	err = register_netdevice_notifier(&ip6_mr_notifier);
1418 	if (err)
1419 		goto reg_notif_fail;
1420 #ifdef CONFIG_IPV6_PIMSM_V2
1421 	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1422 		pr_err("%s: can't add PIM protocol\n", __func__);
1423 		err = -EAGAIN;
1424 		goto add_proto_fail;
1425 	}
1426 #endif
1427 	err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1428 				   NULL, ip6mr_rtm_dumproute, 0);
1429 	if (err == 0)
1430 		return 0;
1431 
1432 #ifdef CONFIG_IPV6_PIMSM_V2
1433 	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1434 add_proto_fail:
1435 	unregister_netdevice_notifier(&ip6_mr_notifier);
1436 #endif
1437 reg_notif_fail:
1438 	unregister_pernet_subsys(&ip6mr_net_ops);
1439 reg_pernet_fail:
1440 	kmem_cache_destroy(mrt_cachep);
1441 	return err;
1442 }
1443 
1444 void ip6_mr_cleanup(void)
1445 {
1446 	rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1447 #ifdef CONFIG_IPV6_PIMSM_V2
1448 	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1449 #endif
1450 	unregister_netdevice_notifier(&ip6_mr_notifier);
1451 	unregister_pernet_subsys(&ip6mr_net_ops);
1452 	kmem_cache_destroy(mrt_cachep);
1453 }
1454 
1455 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1456 			 struct mf6cctl *mfc, int mrtsock, int parent)
1457 {
1458 	bool found = false;
1459 	int line;
1460 	struct mfc6_cache *uc, *c;
1461 	unsigned char ttls[MAXMIFS];
1462 	int i;
1463 
1464 	if (mfc->mf6cc_parent >= MAXMIFS)
1465 		return -ENFILE;
1466 
1467 	memset(ttls, 255, MAXMIFS);
1468 	for (i = 0; i < MAXMIFS; i++) {
1469 		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1470 			ttls[i] = 1;
1471 
1472 	}
1473 
1474 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1475 
1476 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1477 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1478 		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1479 				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1480 		    (parent == -1 || parent == mfc->mf6cc_parent)) {
1481 			found = true;
1482 			break;
1483 		}
1484 	}
1485 
1486 	if (found) {
1487 		write_lock_bh(&mrt_lock);
1488 		c->mf6c_parent = mfc->mf6cc_parent;
1489 		ip6mr_update_thresholds(mrt, c, ttls);
1490 		if (!mrtsock)
1491 			c->mfc_flags |= MFC_STATIC;
1492 		write_unlock_bh(&mrt_lock);
1493 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1494 		return 0;
1495 	}
1496 
1497 	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1498 	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1499 		return -EINVAL;
1500 
1501 	c = ip6mr_cache_alloc();
1502 	if (!c)
1503 		return -ENOMEM;
1504 
1505 	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1506 	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1507 	c->mf6c_parent = mfc->mf6cc_parent;
1508 	ip6mr_update_thresholds(mrt, c, ttls);
1509 	if (!mrtsock)
1510 		c->mfc_flags |= MFC_STATIC;
1511 
1512 	write_lock_bh(&mrt_lock);
1513 	list_add(&c->list, &mrt->mfc6_cache_array[line]);
1514 	write_unlock_bh(&mrt_lock);
1515 
1516 	/*
1517 	 *	Check to see if we resolved a queued list. If so we
1518 	 *	need to send on the frames and tidy up.
1519 	 */
1520 	found = false;
1521 	spin_lock_bh(&mfc_unres_lock);
1522 	list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1523 		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1524 		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1525 			list_del(&uc->list);
1526 			atomic_dec(&mrt->cache_resolve_queue_len);
1527 			found = true;
1528 			break;
1529 		}
1530 	}
1531 	if (list_empty(&mrt->mfc6_unres_queue))
1532 		del_timer(&mrt->ipmr_expire_timer);
1533 	spin_unlock_bh(&mfc_unres_lock);
1534 
1535 	if (found) {
1536 		ip6mr_cache_resolve(net, mrt, uc, c);
1537 		ip6mr_cache_free(uc);
1538 	}
1539 	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1540 	return 0;
1541 }
1542 
1543 /*
1544  *	Close the multicast socket, and clear the vif tables etc
1545  */
1546 
1547 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1548 {
1549 	int i;
1550 	LIST_HEAD(list);
1551 	struct mfc6_cache *c, *next;
1552 
1553 	/*
1554 	 *	Shut down all active vif entries
1555 	 */
1556 	for (i = 0; i < mrt->maxvif; i++) {
1557 		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1558 			continue;
1559 		mif6_delete(mrt, i, 0, &list);
1560 	}
1561 	unregister_netdevice_many(&list);
1562 
1563 	/*
1564 	 *	Wipe the cache
1565 	 */
1566 	for (i = 0; i < MFC6_LINES; i++) {
1567 		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1568 			if (!all && (c->mfc_flags & MFC_STATIC))
1569 				continue;
1570 			write_lock_bh(&mrt_lock);
1571 			list_del(&c->list);
1572 			write_unlock_bh(&mrt_lock);
1573 
1574 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1575 			ip6mr_cache_free(c);
1576 		}
1577 	}
1578 
1579 	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1580 		spin_lock_bh(&mfc_unres_lock);
1581 		list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1582 			list_del(&c->list);
1583 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1584 			ip6mr_destroy_unres(mrt, c);
1585 		}
1586 		spin_unlock_bh(&mfc_unres_lock);
1587 	}
1588 }
1589 
1590 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1591 {
1592 	int err = 0;
1593 	struct net *net = sock_net(sk);
1594 
1595 	rtnl_lock();
1596 	write_lock_bh(&mrt_lock);
1597 	if (likely(mrt->mroute6_sk == NULL)) {
1598 		mrt->mroute6_sk = sk;
1599 		net->ipv6.devconf_all->mc_forwarding++;
1600 	} else {
1601 		err = -EADDRINUSE;
1602 	}
1603 	write_unlock_bh(&mrt_lock);
1604 
1605 	if (!err)
1606 		inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1607 					     NETCONFA_MC_FORWARDING,
1608 					     NETCONFA_IFINDEX_ALL,
1609 					     net->ipv6.devconf_all);
1610 	rtnl_unlock();
1611 
1612 	return err;
1613 }
1614 
1615 int ip6mr_sk_done(struct sock *sk)
1616 {
1617 	int err = -EACCES;
1618 	struct net *net = sock_net(sk);
1619 	struct mr6_table *mrt;
1620 
1621 	if (sk->sk_type != SOCK_RAW ||
1622 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1623 		return err;
1624 
1625 	rtnl_lock();
1626 	ip6mr_for_each_table(mrt, net) {
1627 		if (sk == mrt->mroute6_sk) {
1628 			write_lock_bh(&mrt_lock);
1629 			mrt->mroute6_sk = NULL;
1630 			net->ipv6.devconf_all->mc_forwarding--;
1631 			write_unlock_bh(&mrt_lock);
1632 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1633 						     NETCONFA_MC_FORWARDING,
1634 						     NETCONFA_IFINDEX_ALL,
1635 						     net->ipv6.devconf_all);
1636 
1637 			mroute_clean_tables(mrt, false);
1638 			err = 0;
1639 			break;
1640 		}
1641 	}
1642 	rtnl_unlock();
1643 
1644 	return err;
1645 }
1646 
1647 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1648 {
1649 	struct mr6_table *mrt;
1650 	struct flowi6 fl6 = {
1651 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1652 		.flowi6_oif	= skb->dev->ifindex,
1653 		.flowi6_mark	= skb->mark,
1654 	};
1655 
1656 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1657 		return NULL;
1658 
1659 	return mrt->mroute6_sk;
1660 }
1661 
1662 /*
1663  *	Socket options and virtual interface manipulation. The whole
1664  *	virtual interface system is a complete heap, but unfortunately
1665  *	that's how BSD mrouted happens to think. Maybe one day with a proper
1666  *	MOSPF/PIM router set up we can clean this up.
1667  */
1668 
1669 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1670 {
1671 	int ret, parent = 0;
1672 	struct mif6ctl vif;
1673 	struct mf6cctl mfc;
1674 	mifi_t mifi;
1675 	struct net *net = sock_net(sk);
1676 	struct mr6_table *mrt;
1677 
1678 	if (sk->sk_type != SOCK_RAW ||
1679 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1680 		return -EOPNOTSUPP;
1681 
1682 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1683 	if (!mrt)
1684 		return -ENOENT;
1685 
1686 	if (optname != MRT6_INIT) {
1687 		if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1688 			return -EACCES;
1689 	}
1690 
1691 	switch (optname) {
1692 	case MRT6_INIT:
1693 		if (optlen < sizeof(int))
1694 			return -EINVAL;
1695 
1696 		return ip6mr_sk_init(mrt, sk);
1697 
1698 	case MRT6_DONE:
1699 		return ip6mr_sk_done(sk);
1700 
1701 	case MRT6_ADD_MIF:
1702 		if (optlen < sizeof(vif))
1703 			return -EINVAL;
1704 		if (copy_from_user(&vif, optval, sizeof(vif)))
1705 			return -EFAULT;
1706 		if (vif.mif6c_mifi >= MAXMIFS)
1707 			return -ENFILE;
1708 		rtnl_lock();
1709 		ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1710 		rtnl_unlock();
1711 		return ret;
1712 
1713 	case MRT6_DEL_MIF:
1714 		if (optlen < sizeof(mifi_t))
1715 			return -EINVAL;
1716 		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1717 			return -EFAULT;
1718 		rtnl_lock();
1719 		ret = mif6_delete(mrt, mifi, 0, NULL);
1720 		rtnl_unlock();
1721 		return ret;
1722 
1723 	/*
1724 	 *	Manipulate the forwarding caches. These live
1725 	 *	in a sort of kernel/user symbiosis.
1726 	 */
1727 	case MRT6_ADD_MFC:
1728 	case MRT6_DEL_MFC:
1729 		parent = -1;
1730 		/* fall through */
1731 	case MRT6_ADD_MFC_PROXY:
1732 	case MRT6_DEL_MFC_PROXY:
1733 		if (optlen < sizeof(mfc))
1734 			return -EINVAL;
1735 		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1736 			return -EFAULT;
1737 		if (parent == 0)
1738 			parent = mfc.mf6cc_parent;
1739 		rtnl_lock();
1740 		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1741 			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1742 		else
1743 			ret = ip6mr_mfc_add(net, mrt, &mfc,
1744 					    sk == mrt->mroute6_sk, parent);
1745 		rtnl_unlock();
1746 		return ret;
1747 
1748 	/*
1749 	 *	Control PIM assert (to activate pim will activate assert)
1750 	 */
1751 	case MRT6_ASSERT:
1752 	{
1753 		int v;
1754 
1755 		if (optlen != sizeof(v))
1756 			return -EINVAL;
1757 		if (get_user(v, (int __user *)optval))
1758 			return -EFAULT;
1759 		mrt->mroute_do_assert = v;
1760 		return 0;
1761 	}
1762 
1763 #ifdef CONFIG_IPV6_PIMSM_V2
1764 	case MRT6_PIM:
1765 	{
1766 		int v;
1767 
1768 		if (optlen != sizeof(v))
1769 			return -EINVAL;
1770 		if (get_user(v, (int __user *)optval))
1771 			return -EFAULT;
1772 		v = !!v;
1773 		rtnl_lock();
1774 		ret = 0;
1775 		if (v != mrt->mroute_do_pim) {
1776 			mrt->mroute_do_pim = v;
1777 			mrt->mroute_do_assert = v;
1778 		}
1779 		rtnl_unlock();
1780 		return ret;
1781 	}
1782 
1783 #endif
1784 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1785 	case MRT6_TABLE:
1786 	{
1787 		u32 v;
1788 
1789 		if (optlen != sizeof(u32))
1790 			return -EINVAL;
1791 		if (get_user(v, (u32 __user *)optval))
1792 			return -EFAULT;
1793 		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1794 		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1795 			return -EINVAL;
1796 		if (sk == mrt->mroute6_sk)
1797 			return -EBUSY;
1798 
1799 		rtnl_lock();
1800 		ret = 0;
1801 		if (!ip6mr_new_table(net, v))
1802 			ret = -ENOMEM;
1803 		raw6_sk(sk)->ip6mr_table = v;
1804 		rtnl_unlock();
1805 		return ret;
1806 	}
1807 #endif
1808 	/*
1809 	 *	Spurious command, or MRT6_VERSION which you cannot
1810 	 *	set.
1811 	 */
1812 	default:
1813 		return -ENOPROTOOPT;
1814 	}
1815 }
1816 
1817 /*
1818  *	Getsock opt support for the multicast routing system.
1819  */
1820 
1821 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1822 			  int __user *optlen)
1823 {
1824 	int olr;
1825 	int val;
1826 	struct net *net = sock_net(sk);
1827 	struct mr6_table *mrt;
1828 
1829 	if (sk->sk_type != SOCK_RAW ||
1830 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1831 		return -EOPNOTSUPP;
1832 
1833 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1834 	if (!mrt)
1835 		return -ENOENT;
1836 
1837 	switch (optname) {
1838 	case MRT6_VERSION:
1839 		val = 0x0305;
1840 		break;
1841 #ifdef CONFIG_IPV6_PIMSM_V2
1842 	case MRT6_PIM:
1843 		val = mrt->mroute_do_pim;
1844 		break;
1845 #endif
1846 	case MRT6_ASSERT:
1847 		val = mrt->mroute_do_assert;
1848 		break;
1849 	default:
1850 		return -ENOPROTOOPT;
1851 	}
1852 
1853 	if (get_user(olr, optlen))
1854 		return -EFAULT;
1855 
1856 	olr = min_t(int, olr, sizeof(int));
1857 	if (olr < 0)
1858 		return -EINVAL;
1859 
1860 	if (put_user(olr, optlen))
1861 		return -EFAULT;
1862 	if (copy_to_user(optval, &val, olr))
1863 		return -EFAULT;
1864 	return 0;
1865 }
1866 
1867 /*
1868  *	The IP multicast ioctl support routines.
1869  */
1870 
1871 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1872 {
1873 	struct sioc_sg_req6 sr;
1874 	struct sioc_mif_req6 vr;
1875 	struct mif_device *vif;
1876 	struct mfc6_cache *c;
1877 	struct net *net = sock_net(sk);
1878 	struct mr6_table *mrt;
1879 
1880 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1881 	if (!mrt)
1882 		return -ENOENT;
1883 
1884 	switch (cmd) {
1885 	case SIOCGETMIFCNT_IN6:
1886 		if (copy_from_user(&vr, arg, sizeof(vr)))
1887 			return -EFAULT;
1888 		if (vr.mifi >= mrt->maxvif)
1889 			return -EINVAL;
1890 		read_lock(&mrt_lock);
1891 		vif = &mrt->vif6_table[vr.mifi];
1892 		if (MIF_EXISTS(mrt, vr.mifi)) {
1893 			vr.icount = vif->pkt_in;
1894 			vr.ocount = vif->pkt_out;
1895 			vr.ibytes = vif->bytes_in;
1896 			vr.obytes = vif->bytes_out;
1897 			read_unlock(&mrt_lock);
1898 
1899 			if (copy_to_user(arg, &vr, sizeof(vr)))
1900 				return -EFAULT;
1901 			return 0;
1902 		}
1903 		read_unlock(&mrt_lock);
1904 		return -EADDRNOTAVAIL;
1905 	case SIOCGETSGCNT_IN6:
1906 		if (copy_from_user(&sr, arg, sizeof(sr)))
1907 			return -EFAULT;
1908 
1909 		read_lock(&mrt_lock);
1910 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1911 		if (c) {
1912 			sr.pktcnt = c->mfc_un.res.pkt;
1913 			sr.bytecnt = c->mfc_un.res.bytes;
1914 			sr.wrong_if = c->mfc_un.res.wrong_if;
1915 			read_unlock(&mrt_lock);
1916 
1917 			if (copy_to_user(arg, &sr, sizeof(sr)))
1918 				return -EFAULT;
1919 			return 0;
1920 		}
1921 		read_unlock(&mrt_lock);
1922 		return -EADDRNOTAVAIL;
1923 	default:
1924 		return -ENOIOCTLCMD;
1925 	}
1926 }
1927 
1928 #ifdef CONFIG_COMPAT
1929 struct compat_sioc_sg_req6 {
1930 	struct sockaddr_in6 src;
1931 	struct sockaddr_in6 grp;
1932 	compat_ulong_t pktcnt;
1933 	compat_ulong_t bytecnt;
1934 	compat_ulong_t wrong_if;
1935 };
1936 
1937 struct compat_sioc_mif_req6 {
1938 	mifi_t	mifi;
1939 	compat_ulong_t icount;
1940 	compat_ulong_t ocount;
1941 	compat_ulong_t ibytes;
1942 	compat_ulong_t obytes;
1943 };
1944 
1945 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1946 {
1947 	struct compat_sioc_sg_req6 sr;
1948 	struct compat_sioc_mif_req6 vr;
1949 	struct mif_device *vif;
1950 	struct mfc6_cache *c;
1951 	struct net *net = sock_net(sk);
1952 	struct mr6_table *mrt;
1953 
1954 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1955 	if (!mrt)
1956 		return -ENOENT;
1957 
1958 	switch (cmd) {
1959 	case SIOCGETMIFCNT_IN6:
1960 		if (copy_from_user(&vr, arg, sizeof(vr)))
1961 			return -EFAULT;
1962 		if (vr.mifi >= mrt->maxvif)
1963 			return -EINVAL;
1964 		read_lock(&mrt_lock);
1965 		vif = &mrt->vif6_table[vr.mifi];
1966 		if (MIF_EXISTS(mrt, vr.mifi)) {
1967 			vr.icount = vif->pkt_in;
1968 			vr.ocount = vif->pkt_out;
1969 			vr.ibytes = vif->bytes_in;
1970 			vr.obytes = vif->bytes_out;
1971 			read_unlock(&mrt_lock);
1972 
1973 			if (copy_to_user(arg, &vr, sizeof(vr)))
1974 				return -EFAULT;
1975 			return 0;
1976 		}
1977 		read_unlock(&mrt_lock);
1978 		return -EADDRNOTAVAIL;
1979 	case SIOCGETSGCNT_IN6:
1980 		if (copy_from_user(&sr, arg, sizeof(sr)))
1981 			return -EFAULT;
1982 
1983 		read_lock(&mrt_lock);
1984 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1985 		if (c) {
1986 			sr.pktcnt = c->mfc_un.res.pkt;
1987 			sr.bytecnt = c->mfc_un.res.bytes;
1988 			sr.wrong_if = c->mfc_un.res.wrong_if;
1989 			read_unlock(&mrt_lock);
1990 
1991 			if (copy_to_user(arg, &sr, sizeof(sr)))
1992 				return -EFAULT;
1993 			return 0;
1994 		}
1995 		read_unlock(&mrt_lock);
1996 		return -EADDRNOTAVAIL;
1997 	default:
1998 		return -ENOIOCTLCMD;
1999 	}
2000 }
2001 #endif
2002 
2003 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2004 {
2005 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2006 			IPSTATS_MIB_OUTFORWDATAGRAMS);
2007 	__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2008 			IPSTATS_MIB_OUTOCTETS, skb->len);
2009 	return dst_output(net, sk, skb);
2010 }
2011 
2012 /*
2013  *	Processing handlers for ip6mr_forward
2014  */
2015 
2016 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2017 			  struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2018 {
2019 	struct ipv6hdr *ipv6h;
2020 	struct mif_device *vif = &mrt->vif6_table[vifi];
2021 	struct net_device *dev;
2022 	struct dst_entry *dst;
2023 	struct flowi6 fl6;
2024 
2025 	if (!vif->dev)
2026 		goto out_free;
2027 
2028 #ifdef CONFIG_IPV6_PIMSM_V2
2029 	if (vif->flags & MIFF_REGISTER) {
2030 		vif->pkt_out++;
2031 		vif->bytes_out += skb->len;
2032 		vif->dev->stats.tx_bytes += skb->len;
2033 		vif->dev->stats.tx_packets++;
2034 		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2035 		goto out_free;
2036 	}
2037 #endif
2038 
2039 	ipv6h = ipv6_hdr(skb);
2040 
2041 	fl6 = (struct flowi6) {
2042 		.flowi6_oif = vif->link,
2043 		.daddr = ipv6h->daddr,
2044 	};
2045 
2046 	dst = ip6_route_output(net, NULL, &fl6);
2047 	if (dst->error) {
2048 		dst_release(dst);
2049 		goto out_free;
2050 	}
2051 
2052 	skb_dst_drop(skb);
2053 	skb_dst_set(skb, dst);
2054 
2055 	/*
2056 	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2057 	 * not only before forwarding, but after forwarding on all output
2058 	 * interfaces. It is clear, if mrouter runs a multicasting
2059 	 * program, it should receive packets not depending to what interface
2060 	 * program is joined.
2061 	 * If we will not make it, the program will have to join on all
2062 	 * interfaces. On the other hand, multihoming host (or router, but
2063 	 * not mrouter) cannot join to more than one interface - it will
2064 	 * result in receiving multiple packets.
2065 	 */
2066 	dev = vif->dev;
2067 	skb->dev = dev;
2068 	vif->pkt_out++;
2069 	vif->bytes_out += skb->len;
2070 
2071 	/* We are about to write */
2072 	/* XXX: extension headers? */
2073 	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2074 		goto out_free;
2075 
2076 	ipv6h = ipv6_hdr(skb);
2077 	ipv6h->hop_limit--;
2078 
2079 	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2080 
2081 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2082 		       net, NULL, skb, skb->dev, dev,
2083 		       ip6mr_forward2_finish);
2084 
2085 out_free:
2086 	kfree_skb(skb);
2087 	return 0;
2088 }
2089 
2090 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2091 {
2092 	int ct;
2093 
2094 	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2095 		if (mrt->vif6_table[ct].dev == dev)
2096 			break;
2097 	}
2098 	return ct;
2099 }
2100 
2101 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2102 			   struct sk_buff *skb, struct mfc6_cache *cache)
2103 {
2104 	int psend = -1;
2105 	int vif, ct;
2106 	int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2107 
2108 	vif = cache->mf6c_parent;
2109 	cache->mfc_un.res.pkt++;
2110 	cache->mfc_un.res.bytes += skb->len;
2111 	cache->mfc_un.res.lastuse = jiffies;
2112 
2113 	if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2114 		struct mfc6_cache *cache_proxy;
2115 
2116 		/* For an (*,G) entry, we only check that the incoming
2117 		 * interface is part of the static tree.
2118 		 */
2119 		cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2120 		if (cache_proxy &&
2121 		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2122 			goto forward;
2123 	}
2124 
2125 	/*
2126 	 * Wrong interface: drop packet and (maybe) send PIM assert.
2127 	 */
2128 	if (mrt->vif6_table[vif].dev != skb->dev) {
2129 		cache->mfc_un.res.wrong_if++;
2130 
2131 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2132 		    /* pimsm uses asserts, when switching from RPT to SPT,
2133 		       so that we cannot check that packet arrived on an oif.
2134 		       It is bad, but otherwise we would need to move pretty
2135 		       large chunk of pimd to kernel. Ough... --ANK
2136 		     */
2137 		    (mrt->mroute_do_pim ||
2138 		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2139 		    time_after(jiffies,
2140 			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2141 			cache->mfc_un.res.last_assert = jiffies;
2142 			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2143 		}
2144 		goto dont_forward;
2145 	}
2146 
2147 forward:
2148 	mrt->vif6_table[vif].pkt_in++;
2149 	mrt->vif6_table[vif].bytes_in += skb->len;
2150 
2151 	/*
2152 	 *	Forward the frame
2153 	 */
2154 	if (ipv6_addr_any(&cache->mf6c_origin) &&
2155 	    ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2156 		if (true_vifi >= 0 &&
2157 		    true_vifi != cache->mf6c_parent &&
2158 		    ipv6_hdr(skb)->hop_limit >
2159 				cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2160 			/* It's an (*,*) entry and the packet is not coming from
2161 			 * the upstream: forward the packet to the upstream
2162 			 * only.
2163 			 */
2164 			psend = cache->mf6c_parent;
2165 			goto last_forward;
2166 		}
2167 		goto dont_forward;
2168 	}
2169 	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2170 		/* For (*,G) entry, don't forward to the incoming interface */
2171 		if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2172 		    ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2173 			if (psend != -1) {
2174 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2175 				if (skb2)
2176 					ip6mr_forward2(net, mrt, skb2, cache, psend);
2177 			}
2178 			psend = ct;
2179 		}
2180 	}
2181 last_forward:
2182 	if (psend != -1) {
2183 		ip6mr_forward2(net, mrt, skb, cache, psend);
2184 		return;
2185 	}
2186 
2187 dont_forward:
2188 	kfree_skb(skb);
2189 }
2190 
2191 
2192 /*
2193  *	Multicast packets for forwarding arrive here
2194  */
2195 
2196 int ip6_mr_input(struct sk_buff *skb)
2197 {
2198 	struct mfc6_cache *cache;
2199 	struct net *net = dev_net(skb->dev);
2200 	struct mr6_table *mrt;
2201 	struct flowi6 fl6 = {
2202 		.flowi6_iif	= skb->dev->ifindex,
2203 		.flowi6_mark	= skb->mark,
2204 	};
2205 	int err;
2206 
2207 	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2208 	if (err < 0) {
2209 		kfree_skb(skb);
2210 		return err;
2211 	}
2212 
2213 	read_lock(&mrt_lock);
2214 	cache = ip6mr_cache_find(mrt,
2215 				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2216 	if (!cache) {
2217 		int vif = ip6mr_find_vif(mrt, skb->dev);
2218 
2219 		if (vif >= 0)
2220 			cache = ip6mr_cache_find_any(mrt,
2221 						     &ipv6_hdr(skb)->daddr,
2222 						     vif);
2223 	}
2224 
2225 	/*
2226 	 *	No usable cache entry
2227 	 */
2228 	if (!cache) {
2229 		int vif;
2230 
2231 		vif = ip6mr_find_vif(mrt, skb->dev);
2232 		if (vif >= 0) {
2233 			int err = ip6mr_cache_unresolved(mrt, vif, skb);
2234 			read_unlock(&mrt_lock);
2235 
2236 			return err;
2237 		}
2238 		read_unlock(&mrt_lock);
2239 		kfree_skb(skb);
2240 		return -ENODEV;
2241 	}
2242 
2243 	ip6_mr_forward(net, mrt, skb, cache);
2244 
2245 	read_unlock(&mrt_lock);
2246 
2247 	return 0;
2248 }
2249 
2250 
2251 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2252 			       struct mfc6_cache *c, struct rtmsg *rtm)
2253 {
2254 	struct rta_mfc_stats mfcs;
2255 	struct nlattr *mp_attr;
2256 	struct rtnexthop *nhp;
2257 	unsigned long lastuse;
2258 	int ct;
2259 
2260 	/* If cache is unresolved, don't try to parse IIF and OIF */
2261 	if (c->mf6c_parent >= MAXMIFS) {
2262 		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2263 		return -ENOENT;
2264 	}
2265 
2266 	if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2267 	    nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2268 		return -EMSGSIZE;
2269 	mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2270 	if (!mp_attr)
2271 		return -EMSGSIZE;
2272 
2273 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2274 		if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2275 			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2276 			if (!nhp) {
2277 				nla_nest_cancel(skb, mp_attr);
2278 				return -EMSGSIZE;
2279 			}
2280 
2281 			nhp->rtnh_flags = 0;
2282 			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2283 			nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2284 			nhp->rtnh_len = sizeof(*nhp);
2285 		}
2286 	}
2287 
2288 	nla_nest_end(skb, mp_attr);
2289 
2290 	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2291 	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2292 
2293 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
2294 	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2295 	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2296 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2297 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2298 			      RTA_PAD))
2299 		return -EMSGSIZE;
2300 
2301 	rtm->rtm_type = RTN_MULTICAST;
2302 	return 1;
2303 }
2304 
2305 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2306 		    u32 portid)
2307 {
2308 	int err;
2309 	struct mr6_table *mrt;
2310 	struct mfc6_cache *cache;
2311 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2312 
2313 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2314 	if (!mrt)
2315 		return -ENOENT;
2316 
2317 	read_lock(&mrt_lock);
2318 	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2319 	if (!cache && skb->dev) {
2320 		int vif = ip6mr_find_vif(mrt, skb->dev);
2321 
2322 		if (vif >= 0)
2323 			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2324 						     vif);
2325 	}
2326 
2327 	if (!cache) {
2328 		struct sk_buff *skb2;
2329 		struct ipv6hdr *iph;
2330 		struct net_device *dev;
2331 		int vif;
2332 
2333 		dev = skb->dev;
2334 		if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2335 			read_unlock(&mrt_lock);
2336 			return -ENODEV;
2337 		}
2338 
2339 		/* really correct? */
2340 		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2341 		if (!skb2) {
2342 			read_unlock(&mrt_lock);
2343 			return -ENOMEM;
2344 		}
2345 
2346 		NETLINK_CB(skb2).portid = portid;
2347 		skb_reset_transport_header(skb2);
2348 
2349 		skb_put(skb2, sizeof(struct ipv6hdr));
2350 		skb_reset_network_header(skb2);
2351 
2352 		iph = ipv6_hdr(skb2);
2353 		iph->version = 0;
2354 		iph->priority = 0;
2355 		iph->flow_lbl[0] = 0;
2356 		iph->flow_lbl[1] = 0;
2357 		iph->flow_lbl[2] = 0;
2358 		iph->payload_len = 0;
2359 		iph->nexthdr = IPPROTO_NONE;
2360 		iph->hop_limit = 0;
2361 		iph->saddr = rt->rt6i_src.addr;
2362 		iph->daddr = rt->rt6i_dst.addr;
2363 
2364 		err = ip6mr_cache_unresolved(mrt, vif, skb2);
2365 		read_unlock(&mrt_lock);
2366 
2367 		return err;
2368 	}
2369 
2370 	if (rtm->rtm_flags & RTM_F_NOTIFY)
2371 		cache->mfc_flags |= MFC_NOTIFY;
2372 
2373 	err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2374 	read_unlock(&mrt_lock);
2375 	return err;
2376 }
2377 
2378 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2379 			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2380 			     int flags)
2381 {
2382 	struct nlmsghdr *nlh;
2383 	struct rtmsg *rtm;
2384 	int err;
2385 
2386 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2387 	if (!nlh)
2388 		return -EMSGSIZE;
2389 
2390 	rtm = nlmsg_data(nlh);
2391 	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2392 	rtm->rtm_dst_len  = 128;
2393 	rtm->rtm_src_len  = 128;
2394 	rtm->rtm_tos      = 0;
2395 	rtm->rtm_table    = mrt->id;
2396 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2397 		goto nla_put_failure;
2398 	rtm->rtm_type = RTN_MULTICAST;
2399 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2400 	if (c->mfc_flags & MFC_STATIC)
2401 		rtm->rtm_protocol = RTPROT_STATIC;
2402 	else
2403 		rtm->rtm_protocol = RTPROT_MROUTED;
2404 	rtm->rtm_flags    = 0;
2405 
2406 	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2407 	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2408 		goto nla_put_failure;
2409 	err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2410 	/* do not break the dump if cache is unresolved */
2411 	if (err < 0 && err != -ENOENT)
2412 		goto nla_put_failure;
2413 
2414 	nlmsg_end(skb, nlh);
2415 	return 0;
2416 
2417 nla_put_failure:
2418 	nlmsg_cancel(skb, nlh);
2419 	return -EMSGSIZE;
2420 }
2421 
2422 static int mr6_msgsize(bool unresolved, int maxvif)
2423 {
2424 	size_t len =
2425 		NLMSG_ALIGN(sizeof(struct rtmsg))
2426 		+ nla_total_size(4)	/* RTA_TABLE */
2427 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2428 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2429 		;
2430 
2431 	if (!unresolved)
2432 		len = len
2433 		      + nla_total_size(4)	/* RTA_IIF */
2434 		      + nla_total_size(0)	/* RTA_MULTIPATH */
2435 		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2436 						/* RTA_MFC_STATS */
2437 		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2438 		;
2439 
2440 	return len;
2441 }
2442 
2443 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2444 			      int cmd)
2445 {
2446 	struct net *net = read_pnet(&mrt->net);
2447 	struct sk_buff *skb;
2448 	int err = -ENOBUFS;
2449 
2450 	skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2451 			GFP_ATOMIC);
2452 	if (!skb)
2453 		goto errout;
2454 
2455 	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2456 	if (err < 0)
2457 		goto errout;
2458 
2459 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2460 	return;
2461 
2462 errout:
2463 	kfree_skb(skb);
2464 	if (err < 0)
2465 		rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2466 }
2467 
2468 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2469 {
2470 	size_t len =
2471 		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2472 		+ nla_total_size(1)	/* IP6MRA_CREPORT_MSGTYPE */
2473 		+ nla_total_size(4)	/* IP6MRA_CREPORT_MIF_ID */
2474 					/* IP6MRA_CREPORT_SRC_ADDR */
2475 		+ nla_total_size(sizeof(struct in6_addr))
2476 					/* IP6MRA_CREPORT_DST_ADDR */
2477 		+ nla_total_size(sizeof(struct in6_addr))
2478 					/* IP6MRA_CREPORT_PKT */
2479 		+ nla_total_size(payloadlen)
2480 		;
2481 
2482 	return len;
2483 }
2484 
2485 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2486 {
2487 	struct net *net = read_pnet(&mrt->net);
2488 	struct nlmsghdr *nlh;
2489 	struct rtgenmsg *rtgenm;
2490 	struct mrt6msg *msg;
2491 	struct sk_buff *skb;
2492 	struct nlattr *nla;
2493 	int payloadlen;
2494 
2495 	payloadlen = pkt->len - sizeof(struct mrt6msg);
2496 	msg = (struct mrt6msg *)skb_transport_header(pkt);
2497 
2498 	skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2499 	if (!skb)
2500 		goto errout;
2501 
2502 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2503 			sizeof(struct rtgenmsg), 0);
2504 	if (!nlh)
2505 		goto errout;
2506 	rtgenm = nlmsg_data(nlh);
2507 	rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2508 	if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2509 	    nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2510 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2511 			     &msg->im6_src) ||
2512 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2513 			     &msg->im6_dst))
2514 		goto nla_put_failure;
2515 
2516 	nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2517 	if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2518 				  nla_data(nla), payloadlen))
2519 		goto nla_put_failure;
2520 
2521 	nlmsg_end(skb, nlh);
2522 
2523 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2524 	return;
2525 
2526 nla_put_failure:
2527 	nlmsg_cancel(skb, nlh);
2528 errout:
2529 	kfree_skb(skb);
2530 	rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2531 }
2532 
2533 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2534 {
2535 	struct net *net = sock_net(skb->sk);
2536 	struct mr6_table *mrt;
2537 	struct mfc6_cache *mfc;
2538 	unsigned int t = 0, s_t;
2539 	unsigned int h = 0, s_h;
2540 	unsigned int e = 0, s_e;
2541 
2542 	s_t = cb->args[0];
2543 	s_h = cb->args[1];
2544 	s_e = cb->args[2];
2545 
2546 	read_lock(&mrt_lock);
2547 	ip6mr_for_each_table(mrt, net) {
2548 		if (t < s_t)
2549 			goto next_table;
2550 		if (t > s_t)
2551 			s_h = 0;
2552 		for (h = s_h; h < MFC6_LINES; h++) {
2553 			list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2554 				if (e < s_e)
2555 					goto next_entry;
2556 				if (ip6mr_fill_mroute(mrt, skb,
2557 						      NETLINK_CB(cb->skb).portid,
2558 						      cb->nlh->nlmsg_seq,
2559 						      mfc, RTM_NEWROUTE,
2560 						      NLM_F_MULTI) < 0)
2561 					goto done;
2562 next_entry:
2563 				e++;
2564 			}
2565 			e = s_e = 0;
2566 		}
2567 		spin_lock_bh(&mfc_unres_lock);
2568 		list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2569 			if (e < s_e)
2570 				goto next_entry2;
2571 			if (ip6mr_fill_mroute(mrt, skb,
2572 					      NETLINK_CB(cb->skb).portid,
2573 					      cb->nlh->nlmsg_seq,
2574 					      mfc, RTM_NEWROUTE,
2575 					      NLM_F_MULTI) < 0) {
2576 				spin_unlock_bh(&mfc_unres_lock);
2577 				goto done;
2578 			}
2579 next_entry2:
2580 			e++;
2581 		}
2582 		spin_unlock_bh(&mfc_unres_lock);
2583 		e = s_e = 0;
2584 		s_h = 0;
2585 next_table:
2586 		t++;
2587 	}
2588 done:
2589 	read_unlock(&mrt_lock);
2590 
2591 	cb->args[2] = e;
2592 	cb->args[1] = h;
2593 	cb->args[0] = t;
2594 
2595 	return skb->len;
2596 }
2597