xref: /openbmc/linux/net/batman-adv/multicast.c (revision 3557b3fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2014-2019  B.A.T.M.A.N. contributors:
3  *
4  * Linus Lüssing
5  */
6 
7 #include "multicast.h"
8 #include "main.h"
9 
10 #include <linux/atomic.h>
11 #include <linux/bitops.h>
12 #include <linux/bug.h>
13 #include <linux/byteorder/generic.h>
14 #include <linux/errno.h>
15 #include <linux/etherdevice.h>
16 #include <linux/gfp.h>
17 #include <linux/icmpv6.h>
18 #include <linux/if_bridge.h>
19 #include <linux/if_ether.h>
20 #include <linux/igmp.h>
21 #include <linux/in.h>
22 #include <linux/in6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25 #include <linux/jiffies.h>
26 #include <linux/kernel.h>
27 #include <linux/kref.h>
28 #include <linux/list.h>
29 #include <linux/lockdep.h>
30 #include <linux/netdevice.h>
31 #include <linux/netlink.h>
32 #include <linux/printk.h>
33 #include <linux/rculist.h>
34 #include <linux/rcupdate.h>
35 #include <linux/seq_file.h>
36 #include <linux/skbuff.h>
37 #include <linux/slab.h>
38 #include <linux/spinlock.h>
39 #include <linux/stddef.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/workqueue.h>
43 #include <net/addrconf.h>
44 #include <net/genetlink.h>
45 #include <net/if_inet6.h>
46 #include <net/ip.h>
47 #include <net/ipv6.h>
48 #include <net/netlink.h>
49 #include <net/sock.h>
50 #include <uapi/linux/batadv_packet.h>
51 #include <uapi/linux/batman_adv.h>
52 
53 #include "hard-interface.h"
54 #include "hash.h"
55 #include "log.h"
56 #include "netlink.h"
57 #include "send.h"
58 #include "soft-interface.h"
59 #include "translation-table.h"
60 #include "tvlv.h"
61 
62 static void batadv_mcast_mla_update(struct work_struct *work);
63 
64 /**
65  * batadv_mcast_start_timer() - schedule the multicast periodic worker
66  * @bat_priv: the bat priv with all the soft interface information
67  */
68 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
69 {
70 	queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
71 			   msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
72 }
73 
74 /**
75  * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
76  * @soft_iface: netdev struct of the mesh interface
77  *
78  * If the given soft interface has a bridge on top then the refcount
79  * of the according net device is increased.
80  *
81  * Return: NULL if no such bridge exists. Otherwise the net device of the
82  * bridge.
83  */
84 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
85 {
86 	struct net_device *upper = soft_iface;
87 
88 	rcu_read_lock();
89 	do {
90 		upper = netdev_master_upper_dev_get_rcu(upper);
91 	} while (upper && !(upper->priv_flags & IFF_EBRIDGE));
92 
93 	if (upper)
94 		dev_hold(upper);
95 	rcu_read_unlock();
96 
97 	return upper;
98 }
99 
100 /**
101  * batadv_mcast_addr_is_ipv4() - check if multicast MAC is IPv4
102  * @addr: the MAC address to check
103  *
104  * Return: True, if MAC address is one reserved for IPv4 multicast, false
105  * otherwise.
106  */
107 static bool batadv_mcast_addr_is_ipv4(const u8 *addr)
108 {
109 	static const u8 prefix[] = {0x01, 0x00, 0x5E};
110 
111 	return memcmp(prefix, addr, sizeof(prefix)) == 0;
112 }
113 
114 /**
115  * batadv_mcast_addr_is_ipv6() - check if multicast MAC is IPv6
116  * @addr: the MAC address to check
117  *
118  * Return: True, if MAC address is one reserved for IPv6 multicast, false
119  * otherwise.
120  */
121 static bool batadv_mcast_addr_is_ipv6(const u8 *addr)
122 {
123 	static const u8 prefix[] = {0x33, 0x33};
124 
125 	return memcmp(prefix, addr, sizeof(prefix)) == 0;
126 }
127 
128 /**
129  * batadv_mcast_mla_softif_get() - get softif multicast listeners
130  * @bat_priv: the bat priv with all the soft interface information
131  * @dev: the device to collect multicast addresses from
132  * @mcast_list: a list to put found addresses into
133  *
134  * Collects multicast addresses of multicast listeners residing
135  * on this kernel on the given soft interface, dev, in
136  * the given mcast_list. In general, multicast listeners provided by
137  * your multicast receiving applications run directly on this node.
138  *
139  * If there is a bridge interface on top of dev, collects from that one
140  * instead. Just like with IP addresses and routes, multicast listeners
141  * will(/should) register to the bridge interface instead of an
142  * enslaved bat0.
143  *
144  * Return: -ENOMEM on memory allocation error or the number of
145  * items added to the mcast_list otherwise.
146  */
147 static int batadv_mcast_mla_softif_get(struct batadv_priv *bat_priv,
148 				       struct net_device *dev,
149 				       struct hlist_head *mcast_list)
150 {
151 	bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4;
152 	bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6;
153 	struct net_device *bridge = batadv_mcast_get_bridge(dev);
154 	struct netdev_hw_addr *mc_list_entry;
155 	struct batadv_hw_addr *new;
156 	int ret = 0;
157 
158 	netif_addr_lock_bh(bridge ? bridge : dev);
159 	netdev_for_each_mc_addr(mc_list_entry, bridge ? bridge : dev) {
160 		if (all_ipv4 && batadv_mcast_addr_is_ipv4(mc_list_entry->addr))
161 			continue;
162 
163 		if (all_ipv6 && batadv_mcast_addr_is_ipv6(mc_list_entry->addr))
164 			continue;
165 
166 		new = kmalloc(sizeof(*new), GFP_ATOMIC);
167 		if (!new) {
168 			ret = -ENOMEM;
169 			break;
170 		}
171 
172 		ether_addr_copy(new->addr, mc_list_entry->addr);
173 		hlist_add_head(&new->list, mcast_list);
174 		ret++;
175 	}
176 	netif_addr_unlock_bh(bridge ? bridge : dev);
177 
178 	if (bridge)
179 		dev_put(bridge);
180 
181 	return ret;
182 }
183 
184 /**
185  * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
186  * @mcast_addr: the multicast address to check
187  * @mcast_list: the list with multicast addresses to search in
188  *
189  * Return: true if the given address is already in the given list.
190  * Otherwise returns false.
191  */
192 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
193 					  struct hlist_head *mcast_list)
194 {
195 	struct batadv_hw_addr *mcast_entry;
196 
197 	hlist_for_each_entry(mcast_entry, mcast_list, list)
198 		if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
199 			return true;
200 
201 	return false;
202 }
203 
204 /**
205  * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
206  * @dst: destination to write to - a multicast MAC address
207  * @src: source to read from - a multicast IP address
208  *
209  * Converts a given multicast IPv4/IPv6 address from a bridge
210  * to its matching multicast MAC address and copies it into the given
211  * destination buffer.
212  *
213  * Caller needs to make sure the destination buffer can hold
214  * at least ETH_ALEN bytes.
215  */
216 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
217 {
218 	if (src->proto == htons(ETH_P_IP))
219 		ip_eth_mc_map(src->u.ip4, dst);
220 #if IS_ENABLED(CONFIG_IPV6)
221 	else if (src->proto == htons(ETH_P_IPV6))
222 		ipv6_eth_mc_map(&src->u.ip6, dst);
223 #endif
224 	else
225 		eth_zero_addr(dst);
226 }
227 
228 /**
229  * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
230  * @bat_priv: the bat priv with all the soft interface information
231  * @dev: a bridge slave whose bridge to collect multicast addresses from
232  * @mcast_list: a list to put found addresses into
233  *
234  * Collects multicast addresses of multicast listeners residing
235  * on foreign, non-mesh devices which we gave access to our mesh via
236  * a bridge on top of the given soft interface, dev, in the given
237  * mcast_list.
238  *
239  * Return: -ENOMEM on memory allocation error or the number of
240  * items added to the mcast_list otherwise.
241  */
242 static int batadv_mcast_mla_bridge_get(struct batadv_priv *bat_priv,
243 				       struct net_device *dev,
244 				       struct hlist_head *mcast_list)
245 {
246 	struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
247 	bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4;
248 	bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6;
249 	struct br_ip_list *br_ip_entry, *tmp;
250 	struct batadv_hw_addr *new;
251 	u8 mcast_addr[ETH_ALEN];
252 	int ret;
253 
254 	/* we don't need to detect these devices/listeners, the IGMP/MLD
255 	 * snooping code of the Linux bridge already does that for us
256 	 */
257 	ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
258 	if (ret < 0)
259 		goto out;
260 
261 	list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
262 		if (all_ipv4 && br_ip_entry->addr.proto == htons(ETH_P_IP))
263 			continue;
264 
265 		if (all_ipv6 && br_ip_entry->addr.proto == htons(ETH_P_IPV6))
266 			continue;
267 
268 		batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
269 		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
270 			continue;
271 
272 		new = kmalloc(sizeof(*new), GFP_ATOMIC);
273 		if (!new) {
274 			ret = -ENOMEM;
275 			break;
276 		}
277 
278 		ether_addr_copy(new->addr, mcast_addr);
279 		hlist_add_head(&new->list, mcast_list);
280 	}
281 
282 out:
283 	list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
284 		list_del(&br_ip_entry->list);
285 		kfree(br_ip_entry);
286 	}
287 
288 	return ret;
289 }
290 
291 /**
292  * batadv_mcast_mla_list_free() - free a list of multicast addresses
293  * @mcast_list: the list to free
294  *
295  * Removes and frees all items in the given mcast_list.
296  */
297 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
298 {
299 	struct batadv_hw_addr *mcast_entry;
300 	struct hlist_node *tmp;
301 
302 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
303 		hlist_del(&mcast_entry->list);
304 		kfree(mcast_entry);
305 	}
306 }
307 
308 /**
309  * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
310  * @bat_priv: the bat priv with all the soft interface information
311  * @mcast_list: a list of addresses which should _not_ be removed
312  *
313  * Retracts the announcement of any multicast listener from the
314  * translation table except the ones listed in the given mcast_list.
315  *
316  * If mcast_list is NULL then all are retracted.
317  *
318  * Do not call outside of the mcast worker! (or cancel mcast worker first)
319  */
320 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
321 					struct hlist_head *mcast_list)
322 {
323 	struct batadv_hw_addr *mcast_entry;
324 	struct hlist_node *tmp;
325 
326 	WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
327 
328 	hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
329 				  list) {
330 		if (mcast_list &&
331 		    batadv_mcast_mla_is_duplicate(mcast_entry->addr,
332 						  mcast_list))
333 			continue;
334 
335 		batadv_tt_local_remove(bat_priv, mcast_entry->addr,
336 				       BATADV_NO_FLAGS,
337 				       "mcast TT outdated", false);
338 
339 		hlist_del(&mcast_entry->list);
340 		kfree(mcast_entry);
341 	}
342 }
343 
344 /**
345  * batadv_mcast_mla_tt_add() - add multicast listener announcements
346  * @bat_priv: the bat priv with all the soft interface information
347  * @mcast_list: a list of addresses which are going to get added
348  *
349  * Adds multicast listener announcements from the given mcast_list to the
350  * translation table if they have not been added yet.
351  *
352  * Do not call outside of the mcast worker! (or cancel mcast worker first)
353  */
354 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
355 				    struct hlist_head *mcast_list)
356 {
357 	struct batadv_hw_addr *mcast_entry;
358 	struct hlist_node *tmp;
359 
360 	WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
361 
362 	if (!mcast_list)
363 		return;
364 
365 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
366 		if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
367 						  &bat_priv->mcast.mla_list))
368 			continue;
369 
370 		if (!batadv_tt_local_add(bat_priv->soft_iface,
371 					 mcast_entry->addr, BATADV_NO_FLAGS,
372 					 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
373 			continue;
374 
375 		hlist_del(&mcast_entry->list);
376 		hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
377 	}
378 }
379 
380 /**
381  * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
382  * @bat_priv: the bat priv with all the soft interface information
383  *
384  * Checks whether there is a bridge on top of our soft interface.
385  *
386  * Return: true if there is a bridge, false otherwise.
387  */
388 static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
389 {
390 	struct net_device *upper = bat_priv->soft_iface;
391 
392 	rcu_read_lock();
393 	do {
394 		upper = netdev_master_upper_dev_get_rcu(upper);
395 	} while (upper && !(upper->priv_flags & IFF_EBRIDGE));
396 	rcu_read_unlock();
397 
398 	return upper;
399 }
400 
401 /**
402  * batadv_mcast_querier_log() - debug output regarding the querier status on
403  *  link
404  * @bat_priv: the bat priv with all the soft interface information
405  * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
406  * @old_state: the previous querier state on our link
407  * @new_state: the new querier state on our link
408  *
409  * Outputs debug messages to the logging facility with log level 'mcast'
410  * regarding changes to the querier status on the link which are relevant
411  * to our multicast optimizations.
412  *
413  * Usually this is about whether a querier appeared or vanished in
414  * our mesh or whether the querier is in the suboptimal position of being
415  * behind our local bridge segment: Snooping switches will directly
416  * forward listener reports to the querier, therefore batman-adv and
417  * the bridge will potentially not see these listeners - the querier is
418  * potentially shadowing listeners from us then.
419  *
420  * This is only interesting for nodes with a bridge on top of their
421  * soft interface.
422  */
423 static void
424 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
425 			 struct batadv_mcast_querier_state *old_state,
426 			 struct batadv_mcast_querier_state *new_state)
427 {
428 	if (!old_state->exists && new_state->exists)
429 		batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
430 			    str_proto);
431 	else if (old_state->exists && !new_state->exists)
432 		batadv_info(bat_priv->soft_iface,
433 			    "%s Querier disappeared - multicast optimizations disabled\n",
434 			    str_proto);
435 	else if (!bat_priv->mcast.bridged && !new_state->exists)
436 		batadv_info(bat_priv->soft_iface,
437 			    "No %s Querier present - multicast optimizations disabled\n",
438 			    str_proto);
439 
440 	if (new_state->exists) {
441 		if ((!old_state->shadowing && new_state->shadowing) ||
442 		    (!old_state->exists && new_state->shadowing))
443 			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
444 				   "%s Querier is behind our bridged segment: Might shadow listeners\n",
445 				   str_proto);
446 		else if (old_state->shadowing && !new_state->shadowing)
447 			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
448 				   "%s Querier is not behind our bridged segment\n",
449 				   str_proto);
450 	}
451 }
452 
453 /**
454  * batadv_mcast_bridge_log() - debug output for topology changes in bridged
455  *  setups
456  * @bat_priv: the bat priv with all the soft interface information
457  * @bridged: a flag about whether the soft interface is currently bridged or not
458  * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
459  * @querier_ipv6: (maybe) new status of a potential, selected MLD querier
460  *
461  * If no bridges are ever used on this node, then this function does nothing.
462  *
463  * Otherwise this function outputs debug information to the 'mcast' log level
464  * which might be relevant to our multicast optimizations.
465  *
466  * More precisely, it outputs information when a bridge interface is added or
467  * removed from a soft interface. And when a bridge is present, it further
468  * outputs information about the querier state which is relevant for the
469  * multicast flags this node is going to set.
470  */
471 static void
472 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
473 			struct batadv_mcast_querier_state *querier_ipv4,
474 			struct batadv_mcast_querier_state *querier_ipv6)
475 {
476 	if (!bat_priv->mcast.bridged && bridged)
477 		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
478 			   "Bridge added: Setting Unsnoopables(U)-flag\n");
479 	else if (bat_priv->mcast.bridged && !bridged)
480 		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
481 			   "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
482 
483 	if (bridged) {
484 		batadv_mcast_querier_log(bat_priv, "IGMP",
485 					 &bat_priv->mcast.querier_ipv4,
486 					 querier_ipv4);
487 		batadv_mcast_querier_log(bat_priv, "MLD",
488 					 &bat_priv->mcast.querier_ipv6,
489 					 querier_ipv6);
490 	}
491 }
492 
493 /**
494  * batadv_mcast_flags_logs() - output debug information about mcast flag changes
495  * @bat_priv: the bat priv with all the soft interface information
496  * @flags: flags indicating the new multicast state
497  *
498  * Whenever the multicast flags this nodes announces changes (@mcast_flags vs.
499  * bat_priv->mcast.flags), this notifies userspace via the 'mcast' log level.
500  */
501 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
502 {
503 	u8 old_flags = bat_priv->mcast.flags;
504 	char str_old_flags[] = "[...]";
505 
506 	sprintf(str_old_flags, "[%c%c%c]",
507 		(old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
508 		(old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
509 		(old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
510 
511 	batadv_dbg(BATADV_DBG_MCAST, bat_priv,
512 		   "Changing multicast flags from '%s' to '[%c%c%c]'\n",
513 		   bat_priv->mcast.enabled ? str_old_flags : "<undefined>",
514 		   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
515 		   (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
516 		   (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
517 }
518 
519 /**
520  * batadv_mcast_mla_tvlv_update() - update multicast tvlv
521  * @bat_priv: the bat priv with all the soft interface information
522  *
523  * Updates the own multicast tvlv with our current multicast related settings,
524  * capabilities and inabilities.
525  *
526  * Return: false if we want all IPv4 && IPv6 multicast traffic and true
527  * otherwise.
528  */
529 static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
530 {
531 	struct batadv_tvlv_mcast_data mcast_data;
532 	struct batadv_mcast_querier_state querier4 = {false, false};
533 	struct batadv_mcast_querier_state querier6 = {false, false};
534 	struct net_device *dev = bat_priv->soft_iface;
535 	bool bridged;
536 
537 	mcast_data.flags = BATADV_NO_FLAGS;
538 	memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
539 
540 	bridged = batadv_mcast_has_bridge(bat_priv);
541 	if (!bridged)
542 		goto update;
543 
544 	if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
545 		pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
546 
547 	querier4.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
548 	querier4.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
549 
550 	querier6.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
551 	querier6.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
552 
553 	mcast_data.flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
554 
555 	/* 1) If no querier exists at all, then multicast listeners on
556 	 *    our local TT clients behind the bridge will keep silent.
557 	 * 2) If the selected querier is on one of our local TT clients,
558 	 *    behind the bridge, then this querier might shadow multicast
559 	 *    listeners on our local TT clients, behind this bridge.
560 	 *
561 	 * In both cases, we will signalize other batman nodes that
562 	 * we need all multicast traffic of the according protocol.
563 	 */
564 	if (!querier4.exists || querier4.shadowing)
565 		mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV4;
566 
567 	if (!querier6.exists || querier6.shadowing)
568 		mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV6;
569 
570 update:
571 	batadv_mcast_bridge_log(bat_priv, bridged, &querier4, &querier6);
572 
573 	bat_priv->mcast.querier_ipv4.exists = querier4.exists;
574 	bat_priv->mcast.querier_ipv4.shadowing = querier4.shadowing;
575 
576 	bat_priv->mcast.querier_ipv6.exists = querier6.exists;
577 	bat_priv->mcast.querier_ipv6.shadowing = querier6.shadowing;
578 
579 	bat_priv->mcast.bridged = bridged;
580 
581 	if (!bat_priv->mcast.enabled ||
582 	    mcast_data.flags != bat_priv->mcast.flags) {
583 		batadv_mcast_flags_log(bat_priv, mcast_data.flags);
584 		batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
585 					       &mcast_data, sizeof(mcast_data));
586 		bat_priv->mcast.flags = mcast_data.flags;
587 		bat_priv->mcast.enabled = true;
588 	}
589 
590 	return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
591 		 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
592 }
593 
594 /**
595  * __batadv_mcast_mla_update() - update the own MLAs
596  * @bat_priv: the bat priv with all the soft interface information
597  *
598  * Updates the own multicast listener announcements in the translation
599  * table as well as the own, announced multicast tvlv container.
600  *
601  * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
602  * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
603  * ensured by the non-parallel execution of the worker this function
604  * belongs to.
605  */
606 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
607 {
608 	struct net_device *soft_iface = bat_priv->soft_iface;
609 	struct hlist_head mcast_list = HLIST_HEAD_INIT;
610 	int ret;
611 
612 	if (!batadv_mcast_mla_tvlv_update(bat_priv))
613 		goto update;
614 
615 	ret = batadv_mcast_mla_softif_get(bat_priv, soft_iface, &mcast_list);
616 	if (ret < 0)
617 		goto out;
618 
619 	ret = batadv_mcast_mla_bridge_get(bat_priv, soft_iface, &mcast_list);
620 	if (ret < 0)
621 		goto out;
622 
623 update:
624 	batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
625 	batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
626 
627 out:
628 	batadv_mcast_mla_list_free(&mcast_list);
629 }
630 
631 /**
632  * batadv_mcast_mla_update() - update the own MLAs
633  * @work: kernel work struct
634  *
635  * Updates the own multicast listener announcements in the translation
636  * table as well as the own, announced multicast tvlv container.
637  *
638  * In the end, reschedules the work timer.
639  */
640 static void batadv_mcast_mla_update(struct work_struct *work)
641 {
642 	struct delayed_work *delayed_work;
643 	struct batadv_priv_mcast *priv_mcast;
644 	struct batadv_priv *bat_priv;
645 
646 	delayed_work = to_delayed_work(work);
647 	priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
648 	bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
649 
650 	__batadv_mcast_mla_update(bat_priv);
651 	batadv_mcast_start_timer(bat_priv);
652 }
653 
654 /**
655  * batadv_mcast_is_report_ipv4() - check for IGMP reports
656  * @skb: the ethernet frame destined for the mesh
657  *
658  * This call might reallocate skb data.
659  *
660  * Checks whether the given frame is a valid IGMP report.
661  *
662  * Return: If so then true, otherwise false.
663  */
664 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
665 {
666 	if (ip_mc_check_igmp(skb) < 0)
667 		return false;
668 
669 	switch (igmp_hdr(skb)->type) {
670 	case IGMP_HOST_MEMBERSHIP_REPORT:
671 	case IGMPV2_HOST_MEMBERSHIP_REPORT:
672 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
673 		return true;
674 	}
675 
676 	return false;
677 }
678 
679 /**
680  * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
681  *  potential
682  * @bat_priv: the bat priv with all the soft interface information
683  * @skb: the IPv4 packet to check
684  * @is_unsnoopable: stores whether the destination is snoopable
685  *
686  * Checks whether the given IPv4 packet has the potential to be forwarded with a
687  * mode more optimal than classic flooding.
688  *
689  * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
690  * allocation failure.
691  */
692 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
693 					     struct sk_buff *skb,
694 					     bool *is_unsnoopable)
695 {
696 	struct iphdr *iphdr;
697 
698 	/* We might fail due to out-of-memory -> drop it */
699 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
700 		return -ENOMEM;
701 
702 	if (batadv_mcast_is_report_ipv4(skb))
703 		return -EINVAL;
704 
705 	iphdr = ip_hdr(skb);
706 
707 	/* TODO: Implement Multicast Router Discovery (RFC4286),
708 	 * then allow scope > link local, too
709 	 */
710 	if (!ipv4_is_local_multicast(iphdr->daddr))
711 		return -EINVAL;
712 
713 	/* link-local multicast listeners behind a bridge are
714 	 * not snoopable (see RFC4541, section 2.1.2.2)
715 	 */
716 	*is_unsnoopable = true;
717 
718 	return 0;
719 }
720 
721 /**
722  * batadv_mcast_is_report_ipv6() - check for MLD reports
723  * @skb: the ethernet frame destined for the mesh
724  *
725  * This call might reallocate skb data.
726  *
727  * Checks whether the given frame is a valid MLD report.
728  *
729  * Return: If so then true, otherwise false.
730  */
731 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
732 {
733 	if (ipv6_mc_check_mld(skb) < 0)
734 		return false;
735 
736 	switch (icmp6_hdr(skb)->icmp6_type) {
737 	case ICMPV6_MGM_REPORT:
738 	case ICMPV6_MLD2_REPORT:
739 		return true;
740 	}
741 
742 	return false;
743 }
744 
745 /**
746  * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
747  *  potential
748  * @bat_priv: the bat priv with all the soft interface information
749  * @skb: the IPv6 packet to check
750  * @is_unsnoopable: stores whether the destination is snoopable
751  *
752  * Checks whether the given IPv6 packet has the potential to be forwarded with a
753  * mode more optimal than classic flooding.
754  *
755  * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
756  */
757 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
758 					     struct sk_buff *skb,
759 					     bool *is_unsnoopable)
760 {
761 	struct ipv6hdr *ip6hdr;
762 
763 	/* We might fail due to out-of-memory -> drop it */
764 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
765 		return -ENOMEM;
766 
767 	if (batadv_mcast_is_report_ipv6(skb))
768 		return -EINVAL;
769 
770 	ip6hdr = ipv6_hdr(skb);
771 
772 	/* TODO: Implement Multicast Router Discovery (RFC4286),
773 	 * then allow scope > link local, too
774 	 */
775 	if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) != IPV6_ADDR_SCOPE_LINKLOCAL)
776 		return -EINVAL;
777 
778 	/* link-local-all-nodes multicast listeners behind a bridge are
779 	 * not snoopable (see RFC4541, section 3, paragraph 3)
780 	 */
781 	if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
782 		*is_unsnoopable = true;
783 
784 	return 0;
785 }
786 
787 /**
788  * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
789  * @bat_priv: the bat priv with all the soft interface information
790  * @skb: the multicast frame to check
791  * @is_unsnoopable: stores whether the destination is snoopable
792  *
793  * Checks whether the given multicast ethernet frame has the potential to be
794  * forwarded with a mode more optimal than classic flooding.
795  *
796  * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
797  */
798 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
799 					struct sk_buff *skb,
800 					bool *is_unsnoopable)
801 {
802 	struct ethhdr *ethhdr = eth_hdr(skb);
803 
804 	if (!atomic_read(&bat_priv->multicast_mode))
805 		return -EINVAL;
806 
807 	switch (ntohs(ethhdr->h_proto)) {
808 	case ETH_P_IP:
809 		return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
810 							 is_unsnoopable);
811 	case ETH_P_IPV6:
812 		if (!IS_ENABLED(CONFIG_IPV6))
813 			return -EINVAL;
814 
815 		return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
816 							 is_unsnoopable);
817 	default:
818 		return -EINVAL;
819 	}
820 }
821 
822 /**
823  * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
824  *  interest
825  * @bat_priv: the bat priv with all the soft interface information
826  * @ethhdr: ethernet header of a packet
827  *
828  * Return: the number of nodes which want all IPv4 multicast traffic if the
829  * given ethhdr is from an IPv4 packet or the number of nodes which want all
830  * IPv6 traffic if it matches an IPv6 packet.
831  */
832 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
833 					       struct ethhdr *ethhdr)
834 {
835 	switch (ntohs(ethhdr->h_proto)) {
836 	case ETH_P_IP:
837 		return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
838 	case ETH_P_IPV6:
839 		return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
840 	default:
841 		/* we shouldn't be here... */
842 		return 0;
843 	}
844 }
845 
846 /**
847  * batadv_mcast_forw_tt_node_get() - get a multicast tt node
848  * @bat_priv: the bat priv with all the soft interface information
849  * @ethhdr: the ether header containing the multicast destination
850  *
851  * Return: an orig_node matching the multicast address provided by ethhdr
852  * via a translation table lookup. This increases the returned nodes refcount.
853  */
854 static struct batadv_orig_node *
855 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
856 			      struct ethhdr *ethhdr)
857 {
858 	return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
859 					BATADV_NO_FLAGS);
860 }
861 
862 /**
863  * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
864  * @bat_priv: the bat priv with all the soft interface information
865  *
866  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
867  * increases its refcount.
868  */
869 static struct batadv_orig_node *
870 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
871 {
872 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
873 
874 	rcu_read_lock();
875 	hlist_for_each_entry_rcu(tmp_orig_node,
876 				 &bat_priv->mcast.want_all_ipv4_list,
877 				 mcast_want_all_ipv4_node) {
878 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
879 			continue;
880 
881 		orig_node = tmp_orig_node;
882 		break;
883 	}
884 	rcu_read_unlock();
885 
886 	return orig_node;
887 }
888 
889 /**
890  * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
891  * @bat_priv: the bat priv with all the soft interface information
892  *
893  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
894  * and increases its refcount.
895  */
896 static struct batadv_orig_node *
897 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
898 {
899 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
900 
901 	rcu_read_lock();
902 	hlist_for_each_entry_rcu(tmp_orig_node,
903 				 &bat_priv->mcast.want_all_ipv6_list,
904 				 mcast_want_all_ipv6_node) {
905 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
906 			continue;
907 
908 		orig_node = tmp_orig_node;
909 		break;
910 	}
911 	rcu_read_unlock();
912 
913 	return orig_node;
914 }
915 
916 /**
917  * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
918  * @bat_priv: the bat priv with all the soft interface information
919  * @ethhdr: an ethernet header to determine the protocol family from
920  *
921  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
922  * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and
923  * increases its refcount.
924  */
925 static struct batadv_orig_node *
926 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
927 			      struct ethhdr *ethhdr)
928 {
929 	switch (ntohs(ethhdr->h_proto)) {
930 	case ETH_P_IP:
931 		return batadv_mcast_forw_ipv4_node_get(bat_priv);
932 	case ETH_P_IPV6:
933 		return batadv_mcast_forw_ipv6_node_get(bat_priv);
934 	default:
935 		/* we shouldn't be here... */
936 		return NULL;
937 	}
938 }
939 
940 /**
941  * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
942  * @bat_priv: the bat priv with all the soft interface information
943  *
944  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
945  * set and increases its refcount.
946  */
947 static struct batadv_orig_node *
948 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
949 {
950 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
951 
952 	rcu_read_lock();
953 	hlist_for_each_entry_rcu(tmp_orig_node,
954 				 &bat_priv->mcast.want_all_unsnoopables_list,
955 				 mcast_want_all_unsnoopables_node) {
956 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
957 			continue;
958 
959 		orig_node = tmp_orig_node;
960 		break;
961 	}
962 	rcu_read_unlock();
963 
964 	return orig_node;
965 }
966 
967 /**
968  * batadv_mcast_forw_mode() - check on how to forward a multicast packet
969  * @bat_priv: the bat priv with all the soft interface information
970  * @skb: The multicast packet to check
971  * @orig: an originator to be set to forward the skb to
972  *
973  * Return: the forwarding mode as enum batadv_forw_mode and in case of
974  * BATADV_FORW_SINGLE set the orig to the single originator the skb
975  * should be forwarded to.
976  */
977 enum batadv_forw_mode
978 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
979 		       struct batadv_orig_node **orig)
980 {
981 	int ret, tt_count, ip_count, unsnoop_count, total_count;
982 	bool is_unsnoopable = false;
983 	unsigned int mcast_fanout;
984 	struct ethhdr *ethhdr;
985 
986 	ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable);
987 	if (ret == -ENOMEM)
988 		return BATADV_FORW_NONE;
989 	else if (ret < 0)
990 		return BATADV_FORW_ALL;
991 
992 	ethhdr = eth_hdr(skb);
993 
994 	tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
995 					       BATADV_NO_FLAGS);
996 	ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
997 	unsnoop_count = !is_unsnoopable ? 0 :
998 			atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
999 
1000 	total_count = tt_count + ip_count + unsnoop_count;
1001 
1002 	switch (total_count) {
1003 	case 1:
1004 		if (tt_count)
1005 			*orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
1006 		else if (ip_count)
1007 			*orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
1008 		else if (unsnoop_count)
1009 			*orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
1010 
1011 		if (*orig)
1012 			return BATADV_FORW_SINGLE;
1013 
1014 		/* fall through */
1015 	case 0:
1016 		return BATADV_FORW_NONE;
1017 	default:
1018 		mcast_fanout = atomic_read(&bat_priv->multicast_fanout);
1019 
1020 		if (!unsnoop_count && total_count <= mcast_fanout)
1021 			return BATADV_FORW_SOME;
1022 	}
1023 
1024 	return BATADV_FORW_ALL;
1025 }
1026 
1027 /**
1028  * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
1029  * @bat_priv: the bat priv with all the soft interface information
1030  * @skb: the multicast packet to transmit
1031  * @vid: the vlan identifier
1032  *
1033  * Sends copies of a frame with multicast destination to any multicast
1034  * listener registered in the translation table. A transmission is performed
1035  * via a batman-adv unicast packet for each such destination node.
1036  *
1037  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1038  * otherwise.
1039  */
1040 static int
1041 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1042 		     unsigned short vid)
1043 {
1044 	int ret = NET_XMIT_SUCCESS;
1045 	struct sk_buff *newskb;
1046 
1047 	struct batadv_tt_orig_list_entry *orig_entry;
1048 
1049 	struct batadv_tt_global_entry *tt_global;
1050 	const u8 *addr = eth_hdr(skb)->h_dest;
1051 
1052 	tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1053 	if (!tt_global)
1054 		goto out;
1055 
1056 	rcu_read_lock();
1057 	hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1058 		newskb = skb_copy(skb, GFP_ATOMIC);
1059 		if (!newskb) {
1060 			ret = NET_XMIT_DROP;
1061 			break;
1062 		}
1063 
1064 		batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1065 					orig_entry->orig_node, vid);
1066 	}
1067 	rcu_read_unlock();
1068 
1069 	batadv_tt_global_entry_put(tt_global);
1070 
1071 out:
1072 	return ret;
1073 }
1074 
1075 /**
1076  * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
1077  * @bat_priv: the bat priv with all the soft interface information
1078  * @skb: the multicast packet to transmit
1079  * @vid: the vlan identifier
1080  *
1081  * Sends copies of a frame with multicast destination to any node with a
1082  * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
1083  * batman-adv unicast packet for each such destination node.
1084  *
1085  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1086  * otherwise.
1087  */
1088 static int
1089 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1090 				struct sk_buff *skb, unsigned short vid)
1091 {
1092 	struct batadv_orig_node *orig_node;
1093 	int ret = NET_XMIT_SUCCESS;
1094 	struct sk_buff *newskb;
1095 
1096 	rcu_read_lock();
1097 	hlist_for_each_entry_rcu(orig_node,
1098 				 &bat_priv->mcast.want_all_ipv4_list,
1099 				 mcast_want_all_ipv4_node) {
1100 		newskb = skb_copy(skb, GFP_ATOMIC);
1101 		if (!newskb) {
1102 			ret = NET_XMIT_DROP;
1103 			break;
1104 		}
1105 
1106 		batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1107 					orig_node, vid);
1108 	}
1109 	rcu_read_unlock();
1110 	return ret;
1111 }
1112 
1113 /**
1114  * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
1115  * @bat_priv: the bat priv with all the soft interface information
1116  * @skb: The multicast packet to transmit
1117  * @vid: the vlan identifier
1118  *
1119  * Sends copies of a frame with multicast destination to any node with a
1120  * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
1121  * batman-adv unicast packet for each such destination node.
1122  *
1123  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1124  * otherwise.
1125  */
1126 static int
1127 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1128 				struct sk_buff *skb, unsigned short vid)
1129 {
1130 	struct batadv_orig_node *orig_node;
1131 	int ret = NET_XMIT_SUCCESS;
1132 	struct sk_buff *newskb;
1133 
1134 	rcu_read_lock();
1135 	hlist_for_each_entry_rcu(orig_node,
1136 				 &bat_priv->mcast.want_all_ipv6_list,
1137 				 mcast_want_all_ipv6_node) {
1138 		newskb = skb_copy(skb, GFP_ATOMIC);
1139 		if (!newskb) {
1140 			ret = NET_XMIT_DROP;
1141 			break;
1142 		}
1143 
1144 		batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1145 					orig_node, vid);
1146 	}
1147 	rcu_read_unlock();
1148 	return ret;
1149 }
1150 
1151 /**
1152  * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
1153  * @bat_priv: the bat priv with all the soft interface information
1154  * @skb: the multicast packet to transmit
1155  * @vid: the vlan identifier
1156  *
1157  * Sends copies of a frame with multicast destination to any node with a
1158  * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
1159  * transmission is performed via a batman-adv unicast packet for each such
1160  * destination node.
1161  *
1162  * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1163  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1164  */
1165 static int
1166 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1167 			   struct sk_buff *skb, unsigned short vid)
1168 {
1169 	switch (ntohs(eth_hdr(skb)->h_proto)) {
1170 	case ETH_P_IP:
1171 		return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1172 	case ETH_P_IPV6:
1173 		return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1174 	default:
1175 		/* we shouldn't be here... */
1176 		return NET_XMIT_DROP;
1177 	}
1178 }
1179 
1180 /**
1181  * batadv_mcast_forw_send() - send packet to any detected multicast recpient
1182  * @bat_priv: the bat priv with all the soft interface information
1183  * @skb: the multicast packet to transmit
1184  * @vid: the vlan identifier
1185  *
1186  * Sends copies of a frame with multicast destination to any node that signaled
1187  * interest in it, that is either via the translation table or the according
1188  * want-all flags. A transmission is performed via a batman-adv unicast packet
1189  * for each such destination node.
1190  *
1191  * The given skb is consumed/freed.
1192  *
1193  * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1194  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1195  */
1196 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1197 			   unsigned short vid)
1198 {
1199 	int ret;
1200 
1201 	ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1202 	if (ret != NET_XMIT_SUCCESS) {
1203 		kfree_skb(skb);
1204 		return ret;
1205 	}
1206 
1207 	ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1208 	if (ret != NET_XMIT_SUCCESS) {
1209 		kfree_skb(skb);
1210 		return ret;
1211 	}
1212 
1213 	consume_skb(skb);
1214 	return ret;
1215 }
1216 
1217 /**
1218  * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
1219  * @bat_priv: the bat priv with all the soft interface information
1220  * @orig: the orig_node which multicast state might have changed of
1221  * @mcast_flags: flags indicating the new multicast state
1222  *
1223  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
1224  * orig, has toggled then this method updates counter and list accordingly.
1225  *
1226  * Caller needs to hold orig->mcast_handler_lock.
1227  */
1228 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1229 					     struct batadv_orig_node *orig,
1230 					     u8 mcast_flags)
1231 {
1232 	struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1233 	struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1234 
1235 	lockdep_assert_held(&orig->mcast_handler_lock);
1236 
1237 	/* switched from flag unset to set */
1238 	if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1239 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1240 		atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1241 
1242 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1243 		/* flag checks above + mcast_handler_lock prevents this */
1244 		WARN_ON(!hlist_unhashed(node));
1245 
1246 		hlist_add_head_rcu(node, head);
1247 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1248 	/* switched from flag set to unset */
1249 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1250 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1251 		atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1252 
1253 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1254 		/* flag checks above + mcast_handler_lock prevents this */
1255 		WARN_ON(hlist_unhashed(node));
1256 
1257 		hlist_del_init_rcu(node);
1258 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1259 	}
1260 }
1261 
1262 /**
1263  * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
1264  * @bat_priv: the bat priv with all the soft interface information
1265  * @orig: the orig_node which multicast state might have changed of
1266  * @mcast_flags: flags indicating the new multicast state
1267  *
1268  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
1269  * toggled then this method updates counter and list accordingly.
1270  *
1271  * Caller needs to hold orig->mcast_handler_lock.
1272  */
1273 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1274 					  struct batadv_orig_node *orig,
1275 					  u8 mcast_flags)
1276 {
1277 	struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1278 	struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1279 
1280 	lockdep_assert_held(&orig->mcast_handler_lock);
1281 
1282 	/* switched from flag unset to set */
1283 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1284 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1285 		atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1286 
1287 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1288 		/* flag checks above + mcast_handler_lock prevents this */
1289 		WARN_ON(!hlist_unhashed(node));
1290 
1291 		hlist_add_head_rcu(node, head);
1292 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1293 	/* switched from flag set to unset */
1294 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1295 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1296 		atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1297 
1298 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1299 		/* flag checks above + mcast_handler_lock prevents this */
1300 		WARN_ON(hlist_unhashed(node));
1301 
1302 		hlist_del_init_rcu(node);
1303 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1304 	}
1305 }
1306 
1307 /**
1308  * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
1309  * @bat_priv: the bat priv with all the soft interface information
1310  * @orig: the orig_node which multicast state might have changed of
1311  * @mcast_flags: flags indicating the new multicast state
1312  *
1313  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
1314  * toggled then this method updates counter and list accordingly.
1315  *
1316  * Caller needs to hold orig->mcast_handler_lock.
1317  */
1318 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1319 					  struct batadv_orig_node *orig,
1320 					  u8 mcast_flags)
1321 {
1322 	struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1323 	struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1324 
1325 	lockdep_assert_held(&orig->mcast_handler_lock);
1326 
1327 	/* switched from flag unset to set */
1328 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1329 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1330 		atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1331 
1332 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1333 		/* flag checks above + mcast_handler_lock prevents this */
1334 		WARN_ON(!hlist_unhashed(node));
1335 
1336 		hlist_add_head_rcu(node, head);
1337 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1338 	/* switched from flag set to unset */
1339 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1340 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1341 		atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1342 
1343 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1344 		/* flag checks above + mcast_handler_lock prevents this */
1345 		WARN_ON(hlist_unhashed(node));
1346 
1347 		hlist_del_init_rcu(node);
1348 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1349 	}
1350 }
1351 
1352 /**
1353  * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
1354  * @bat_priv: the bat priv with all the soft interface information
1355  * @orig: the orig_node of the ogm
1356  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
1357  * @tvlv_value: tvlv buffer containing the multicast data
1358  * @tvlv_value_len: tvlv buffer length
1359  */
1360 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
1361 					  struct batadv_orig_node *orig,
1362 					  u8 flags,
1363 					  void *tvlv_value,
1364 					  u16 tvlv_value_len)
1365 {
1366 	bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1367 	u8 mcast_flags = BATADV_NO_FLAGS;
1368 
1369 	if (orig_mcast_enabled && tvlv_value &&
1370 	    tvlv_value_len >= sizeof(mcast_flags))
1371 		mcast_flags = *(u8 *)tvlv_value;
1372 
1373 	if (!orig_mcast_enabled) {
1374 		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
1375 		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
1376 	}
1377 
1378 	spin_lock_bh(&orig->mcast_handler_lock);
1379 
1380 	if (orig_mcast_enabled &&
1381 	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1382 		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1383 	} else if (!orig_mcast_enabled &&
1384 		   test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1385 		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1386 	}
1387 
1388 	set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
1389 
1390 	batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
1391 	batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
1392 	batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
1393 
1394 	orig->mcast_flags = mcast_flags;
1395 	spin_unlock_bh(&orig->mcast_handler_lock);
1396 }
1397 
1398 /**
1399  * batadv_mcast_init() - initialize the multicast optimizations structures
1400  * @bat_priv: the bat priv with all the soft interface information
1401  */
1402 void batadv_mcast_init(struct batadv_priv *bat_priv)
1403 {
1404 	batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
1405 				     NULL, BATADV_TVLV_MCAST, 2,
1406 				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1407 
1408 	INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
1409 	batadv_mcast_start_timer(bat_priv);
1410 }
1411 
1412 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
1413 /**
1414  * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
1415  * @bat_priv: the bat priv with all the soft interface information
1416  * @seq: debugfs table seq_file struct
1417  *
1418  * Prints our own multicast flags including a more specific reason why
1419  * they are set, that is prints the bridge and querier state too, to
1420  * the debugfs table specified via @seq.
1421  */
1422 static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
1423 					    struct seq_file *seq)
1424 {
1425 	u8 flags = bat_priv->mcast.flags;
1426 	char querier4, querier6, shadowing4, shadowing6;
1427 	bool bridged = bat_priv->mcast.bridged;
1428 
1429 	if (bridged) {
1430 		querier4 = bat_priv->mcast.querier_ipv4.exists ? '.' : '4';
1431 		querier6 = bat_priv->mcast.querier_ipv6.exists ? '.' : '6';
1432 		shadowing4 = bat_priv->mcast.querier_ipv4.shadowing ? '4' : '.';
1433 		shadowing6 = bat_priv->mcast.querier_ipv6.shadowing ? '6' : '.';
1434 	} else {
1435 		querier4 = '?';
1436 		querier6 = '?';
1437 		shadowing4 = '?';
1438 		shadowing6 = '?';
1439 	}
1440 
1441 	seq_printf(seq, "Multicast flags (own flags: [%c%c%c])\n",
1442 		   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
1443 		   (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
1444 		   (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
1445 	seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.');
1446 	seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n",
1447 		   querier4, querier6);
1448 	seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n",
1449 		   shadowing4, shadowing6);
1450 	seq_puts(seq, "-------------------------------------------\n");
1451 	seq_printf(seq, "       %-10s %s\n", "Originator", "Flags");
1452 }
1453 
1454 /**
1455  * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
1456  * @seq: seq file to print on
1457  * @offset: not used
1458  *
1459  * This prints a table of (primary) originators and their according
1460  * multicast flags, including (in the header) our own.
1461  *
1462  * Return: always 0
1463  */
1464 int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
1465 {
1466 	struct net_device *net_dev = (struct net_device *)seq->private;
1467 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
1468 	struct batadv_hard_iface *primary_if;
1469 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1470 	struct batadv_orig_node *orig_node;
1471 	struct hlist_head *head;
1472 	u8 flags;
1473 	u32 i;
1474 
1475 	primary_if = batadv_seq_print_text_primary_if_get(seq);
1476 	if (!primary_if)
1477 		return 0;
1478 
1479 	batadv_mcast_flags_print_header(bat_priv, seq);
1480 
1481 	for (i = 0; i < hash->size; i++) {
1482 		head = &hash->table[i];
1483 
1484 		rcu_read_lock();
1485 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1486 			if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1487 				      &orig_node->capa_initialized))
1488 				continue;
1489 
1490 			if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1491 				      &orig_node->capabilities)) {
1492 				seq_printf(seq, "%pM -\n", orig_node->orig);
1493 				continue;
1494 			}
1495 
1496 			flags = orig_node->mcast_flags;
1497 
1498 			seq_printf(seq, "%pM [%c%c%c]\n", orig_node->orig,
1499 				   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)
1500 				   ? 'U' : '.',
1501 				   (flags & BATADV_MCAST_WANT_ALL_IPV4)
1502 				   ? '4' : '.',
1503 				   (flags & BATADV_MCAST_WANT_ALL_IPV6)
1504 				   ? '6' : '.');
1505 		}
1506 		rcu_read_unlock();
1507 	}
1508 
1509 	batadv_hardif_put(primary_if);
1510 
1511 	return 0;
1512 }
1513 #endif
1514 
1515 /**
1516  * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
1517  * @msg: buffer for the message
1518  * @bat_priv: the bat priv with all the soft interface information
1519  *
1520  * Return: 0 or error code.
1521  */
1522 int batadv_mcast_mesh_info_put(struct sk_buff *msg,
1523 			       struct batadv_priv *bat_priv)
1524 {
1525 	u32 flags = bat_priv->mcast.flags;
1526 	u32 flags_priv = BATADV_NO_FLAGS;
1527 
1528 	if (bat_priv->mcast.bridged) {
1529 		flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
1530 
1531 		if (bat_priv->mcast.querier_ipv4.exists)
1532 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
1533 		if (bat_priv->mcast.querier_ipv6.exists)
1534 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
1535 		if (bat_priv->mcast.querier_ipv4.shadowing)
1536 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
1537 		if (bat_priv->mcast.querier_ipv6.shadowing)
1538 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
1539 	}
1540 
1541 	if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
1542 	    nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
1543 		return -EMSGSIZE;
1544 
1545 	return 0;
1546 }
1547 
1548 /**
1549  * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
1550  *  to a netlink socket
1551  * @msg: buffer for the message
1552  * @portid: netlink port
1553  * @cb: Control block containing additional options
1554  * @orig_node: originator to dump the multicast flags of
1555  *
1556  * Return: 0 or error code.
1557  */
1558 static int
1559 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
1560 			      struct netlink_callback *cb,
1561 			      struct batadv_orig_node *orig_node)
1562 {
1563 	void *hdr;
1564 
1565 	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
1566 			  &batadv_netlink_family, NLM_F_MULTI,
1567 			  BATADV_CMD_GET_MCAST_FLAGS);
1568 	if (!hdr)
1569 		return -ENOBUFS;
1570 
1571 	genl_dump_check_consistent(cb, hdr);
1572 
1573 	if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
1574 		    orig_node->orig)) {
1575 		genlmsg_cancel(msg, hdr);
1576 		return -EMSGSIZE;
1577 	}
1578 
1579 	if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1580 		     &orig_node->capabilities)) {
1581 		if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
1582 				orig_node->mcast_flags)) {
1583 			genlmsg_cancel(msg, hdr);
1584 			return -EMSGSIZE;
1585 		}
1586 	}
1587 
1588 	genlmsg_end(msg, hdr);
1589 	return 0;
1590 }
1591 
1592 /**
1593  * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
1594  *  table to a netlink socket
1595  * @msg: buffer for the message
1596  * @portid: netlink port
1597  * @cb: Control block containing additional options
1598  * @hash: hash to dump
1599  * @bucket: bucket index to dump
1600  * @idx_skip: How many entries to skip
1601  *
1602  * Return: 0 or error code.
1603  */
1604 static int
1605 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
1606 			       struct netlink_callback *cb,
1607 			       struct batadv_hashtable *hash,
1608 			       unsigned int bucket, long *idx_skip)
1609 {
1610 	struct batadv_orig_node *orig_node;
1611 	long idx = 0;
1612 
1613 	spin_lock_bh(&hash->list_locks[bucket]);
1614 	cb->seq = atomic_read(&hash->generation) << 1 | 1;
1615 
1616 	hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
1617 		if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1618 			      &orig_node->capa_initialized))
1619 			continue;
1620 
1621 		if (idx < *idx_skip)
1622 			goto skip;
1623 
1624 		if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
1625 			spin_unlock_bh(&hash->list_locks[bucket]);
1626 			*idx_skip = idx;
1627 
1628 			return -EMSGSIZE;
1629 		}
1630 
1631 skip:
1632 		idx++;
1633 	}
1634 	spin_unlock_bh(&hash->list_locks[bucket]);
1635 
1636 	return 0;
1637 }
1638 
1639 /**
1640  * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
1641  * @msg: buffer for the message
1642  * @portid: netlink port
1643  * @cb: Control block containing additional options
1644  * @bat_priv: the bat priv with all the soft interface information
1645  * @bucket: current bucket to dump
1646  * @idx: index in current bucket to the next entry to dump
1647  *
1648  * Return: 0 or error code.
1649  */
1650 static int
1651 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
1652 			  struct netlink_callback *cb,
1653 			  struct batadv_priv *bat_priv, long *bucket, long *idx)
1654 {
1655 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1656 	long bucket_tmp = *bucket;
1657 	long idx_tmp = *idx;
1658 
1659 	while (bucket_tmp < hash->size) {
1660 		if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
1661 						   *bucket, &idx_tmp))
1662 			break;
1663 
1664 		bucket_tmp++;
1665 		idx_tmp = 0;
1666 	}
1667 
1668 	*bucket = bucket_tmp;
1669 	*idx = idx_tmp;
1670 
1671 	return msg->len;
1672 }
1673 
1674 /**
1675  * batadv_mcast_netlink_get_primary() - get primary interface from netlink
1676  *  callback
1677  * @cb: netlink callback structure
1678  * @primary_if: the primary interface pointer to return the result in
1679  *
1680  * Return: 0 or error code.
1681  */
1682 static int
1683 batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
1684 				 struct batadv_hard_iface **primary_if)
1685 {
1686 	struct batadv_hard_iface *hard_iface = NULL;
1687 	struct net *net = sock_net(cb->skb->sk);
1688 	struct net_device *soft_iface;
1689 	struct batadv_priv *bat_priv;
1690 	int ifindex;
1691 	int ret = 0;
1692 
1693 	ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
1694 	if (!ifindex)
1695 		return -EINVAL;
1696 
1697 	soft_iface = dev_get_by_index(net, ifindex);
1698 	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
1699 		ret = -ENODEV;
1700 		goto out;
1701 	}
1702 
1703 	bat_priv = netdev_priv(soft_iface);
1704 
1705 	hard_iface = batadv_primary_if_get_selected(bat_priv);
1706 	if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
1707 		ret = -ENOENT;
1708 		goto out;
1709 	}
1710 
1711 out:
1712 	if (soft_iface)
1713 		dev_put(soft_iface);
1714 
1715 	if (!ret && primary_if)
1716 		*primary_if = hard_iface;
1717 	else if (hard_iface)
1718 		batadv_hardif_put(hard_iface);
1719 
1720 	return ret;
1721 }
1722 
1723 /**
1724  * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
1725  * @msg: buffer for the message
1726  * @cb: callback structure containing arguments
1727  *
1728  * Return: message length.
1729  */
1730 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
1731 {
1732 	struct batadv_hard_iface *primary_if = NULL;
1733 	int portid = NETLINK_CB(cb->skb).portid;
1734 	struct batadv_priv *bat_priv;
1735 	long *bucket = &cb->args[0];
1736 	long *idx = &cb->args[1];
1737 	int ret;
1738 
1739 	ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
1740 	if (ret)
1741 		return ret;
1742 
1743 	bat_priv = netdev_priv(primary_if->soft_iface);
1744 	ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
1745 
1746 	batadv_hardif_put(primary_if);
1747 	return ret;
1748 }
1749 
1750 /**
1751  * batadv_mcast_free() - free the multicast optimizations structures
1752  * @bat_priv: the bat priv with all the soft interface information
1753  */
1754 void batadv_mcast_free(struct batadv_priv *bat_priv)
1755 {
1756 	cancel_delayed_work_sync(&bat_priv->mcast.work);
1757 
1758 	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
1759 	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
1760 
1761 	/* safely calling outside of worker, as worker was canceled above */
1762 	batadv_mcast_mla_tt_retract(bat_priv, NULL);
1763 }
1764 
1765 /**
1766  * batadv_mcast_purge_orig() - reset originator global mcast state modifications
1767  * @orig: the originator which is going to get purged
1768  */
1769 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
1770 {
1771 	struct batadv_priv *bat_priv = orig->bat_priv;
1772 
1773 	spin_lock_bh(&orig->mcast_handler_lock);
1774 
1775 	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
1776 	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
1777 	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
1778 
1779 	spin_unlock_bh(&orig->mcast_handler_lock);
1780 }
1781