xref: /openbmc/linux/net/batman-adv/multicast.c (revision 74ba9207)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2014-2019  B.A.T.M.A.N. contributors:
3  *
4  * Linus Lüssing
5  */
6 
7 #include "multicast.h"
8 #include "main.h"
9 
10 #include <linux/atomic.h>
11 #include <linux/bitops.h>
12 #include <linux/bug.h>
13 #include <linux/byteorder/generic.h>
14 #include <linux/errno.h>
15 #include <linux/etherdevice.h>
16 #include <linux/gfp.h>
17 #include <linux/icmpv6.h>
18 #include <linux/if_bridge.h>
19 #include <linux/if_ether.h>
20 #include <linux/igmp.h>
21 #include <linux/in.h>
22 #include <linux/in6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25 #include <linux/jiffies.h>
26 #include <linux/kernel.h>
27 #include <linux/kref.h>
28 #include <linux/list.h>
29 #include <linux/lockdep.h>
30 #include <linux/netdevice.h>
31 #include <linux/netlink.h>
32 #include <linux/printk.h>
33 #include <linux/rculist.h>
34 #include <linux/rcupdate.h>
35 #include <linux/seq_file.h>
36 #include <linux/skbuff.h>
37 #include <linux/slab.h>
38 #include <linux/spinlock.h>
39 #include <linux/stddef.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/workqueue.h>
43 #include <net/addrconf.h>
44 #include <net/genetlink.h>
45 #include <net/if_inet6.h>
46 #include <net/ip.h>
47 #include <net/ipv6.h>
48 #include <net/netlink.h>
49 #include <net/sock.h>
50 #include <uapi/linux/batadv_packet.h>
51 #include <uapi/linux/batman_adv.h>
52 
53 #include "hard-interface.h"
54 #include "hash.h"
55 #include "log.h"
56 #include "netlink.h"
57 #include "send.h"
58 #include "soft-interface.h"
59 #include "translation-table.h"
60 #include "tvlv.h"
61 
62 static void batadv_mcast_mla_update(struct work_struct *work);
63 
64 /**
65  * batadv_mcast_start_timer() - schedule the multicast periodic worker
66  * @bat_priv: the bat priv with all the soft interface information
67  */
68 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
69 {
70 	queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
71 			   msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
72 }
73 
74 /**
75  * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
76  * @soft_iface: netdev struct of the mesh interface
77  *
78  * If the given soft interface has a bridge on top then the refcount
79  * of the according net device is increased.
80  *
81  * Return: NULL if no such bridge exists. Otherwise the net device of the
82  * bridge.
83  */
84 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
85 {
86 	struct net_device *upper = soft_iface;
87 
88 	rcu_read_lock();
89 	do {
90 		upper = netdev_master_upper_dev_get_rcu(upper);
91 	} while (upper && !(upper->priv_flags & IFF_EBRIDGE));
92 
93 	if (upper)
94 		dev_hold(upper);
95 	rcu_read_unlock();
96 
97 	return upper;
98 }
99 
100 /**
101  * batadv_mcast_addr_is_ipv4() - check if multicast MAC is IPv4
102  * @addr: the MAC address to check
103  *
104  * Return: True, if MAC address is one reserved for IPv4 multicast, false
105  * otherwise.
106  */
107 static bool batadv_mcast_addr_is_ipv4(const u8 *addr)
108 {
109 	static const u8 prefix[] = {0x01, 0x00, 0x5E};
110 
111 	return memcmp(prefix, addr, sizeof(prefix)) == 0;
112 }
113 
114 /**
115  * batadv_mcast_addr_is_ipv6() - check if multicast MAC is IPv6
116  * @addr: the MAC address to check
117  *
118  * Return: True, if MAC address is one reserved for IPv6 multicast, false
119  * otherwise.
120  */
121 static bool batadv_mcast_addr_is_ipv6(const u8 *addr)
122 {
123 	static const u8 prefix[] = {0x33, 0x33};
124 
125 	return memcmp(prefix, addr, sizeof(prefix)) == 0;
126 }
127 
128 /**
129  * batadv_mcast_mla_softif_get() - get softif multicast listeners
130  * @bat_priv: the bat priv with all the soft interface information
131  * @dev: the device to collect multicast addresses from
132  * @mcast_list: a list to put found addresses into
133  *
134  * Collects multicast addresses of multicast listeners residing
135  * on this kernel on the given soft interface, dev, in
136  * the given mcast_list. In general, multicast listeners provided by
137  * your multicast receiving applications run directly on this node.
138  *
139  * If there is a bridge interface on top of dev, collects from that one
140  * instead. Just like with IP addresses and routes, multicast listeners
141  * will(/should) register to the bridge interface instead of an
142  * enslaved bat0.
143  *
144  * Return: -ENOMEM on memory allocation error or the number of
145  * items added to the mcast_list otherwise.
146  */
147 static int batadv_mcast_mla_softif_get(struct batadv_priv *bat_priv,
148 				       struct net_device *dev,
149 				       struct hlist_head *mcast_list)
150 {
151 	bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4;
152 	bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6;
153 	struct net_device *bridge = batadv_mcast_get_bridge(dev);
154 	struct netdev_hw_addr *mc_list_entry;
155 	struct batadv_hw_addr *new;
156 	int ret = 0;
157 
158 	netif_addr_lock_bh(bridge ? bridge : dev);
159 	netdev_for_each_mc_addr(mc_list_entry, bridge ? bridge : dev) {
160 		if (all_ipv4 && batadv_mcast_addr_is_ipv4(mc_list_entry->addr))
161 			continue;
162 
163 		if (all_ipv6 && batadv_mcast_addr_is_ipv6(mc_list_entry->addr))
164 			continue;
165 
166 		new = kmalloc(sizeof(*new), GFP_ATOMIC);
167 		if (!new) {
168 			ret = -ENOMEM;
169 			break;
170 		}
171 
172 		ether_addr_copy(new->addr, mc_list_entry->addr);
173 		hlist_add_head(&new->list, mcast_list);
174 		ret++;
175 	}
176 	netif_addr_unlock_bh(bridge ? bridge : dev);
177 
178 	if (bridge)
179 		dev_put(bridge);
180 
181 	return ret;
182 }
183 
184 /**
185  * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
186  * @mcast_addr: the multicast address to check
187  * @mcast_list: the list with multicast addresses to search in
188  *
189  * Return: true if the given address is already in the given list.
190  * Otherwise returns false.
191  */
192 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
193 					  struct hlist_head *mcast_list)
194 {
195 	struct batadv_hw_addr *mcast_entry;
196 
197 	hlist_for_each_entry(mcast_entry, mcast_list, list)
198 		if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
199 			return true;
200 
201 	return false;
202 }
203 
204 /**
205  * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
206  * @dst: destination to write to - a multicast MAC address
207  * @src: source to read from - a multicast IP address
208  *
209  * Converts a given multicast IPv4/IPv6 address from a bridge
210  * to its matching multicast MAC address and copies it into the given
211  * destination buffer.
212  *
213  * Caller needs to make sure the destination buffer can hold
214  * at least ETH_ALEN bytes.
215  */
216 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
217 {
218 	if (src->proto == htons(ETH_P_IP))
219 		ip_eth_mc_map(src->u.ip4, dst);
220 #if IS_ENABLED(CONFIG_IPV6)
221 	else if (src->proto == htons(ETH_P_IPV6))
222 		ipv6_eth_mc_map(&src->u.ip6, dst);
223 #endif
224 	else
225 		eth_zero_addr(dst);
226 }
227 
228 /**
229  * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
230  * @bat_priv: the bat priv with all the soft interface information
231  * @dev: a bridge slave whose bridge to collect multicast addresses from
232  * @mcast_list: a list to put found addresses into
233  *
234  * Collects multicast addresses of multicast listeners residing
235  * on foreign, non-mesh devices which we gave access to our mesh via
236  * a bridge on top of the given soft interface, dev, in the given
237  * mcast_list.
238  *
239  * Return: -ENOMEM on memory allocation error or the number of
240  * items added to the mcast_list otherwise.
241  */
242 static int batadv_mcast_mla_bridge_get(struct batadv_priv *bat_priv,
243 				       struct net_device *dev,
244 				       struct hlist_head *mcast_list)
245 {
246 	struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
247 	bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4;
248 	bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6;
249 	struct br_ip_list *br_ip_entry, *tmp;
250 	struct batadv_hw_addr *new;
251 	u8 mcast_addr[ETH_ALEN];
252 	int ret;
253 
254 	/* we don't need to detect these devices/listeners, the IGMP/MLD
255 	 * snooping code of the Linux bridge already does that for us
256 	 */
257 	ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
258 	if (ret < 0)
259 		goto out;
260 
261 	list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
262 		if (all_ipv4 && br_ip_entry->addr.proto == htons(ETH_P_IP))
263 			continue;
264 
265 		if (all_ipv6 && br_ip_entry->addr.proto == htons(ETH_P_IPV6))
266 			continue;
267 
268 		batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
269 		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
270 			continue;
271 
272 		new = kmalloc(sizeof(*new), GFP_ATOMIC);
273 		if (!new) {
274 			ret = -ENOMEM;
275 			break;
276 		}
277 
278 		ether_addr_copy(new->addr, mcast_addr);
279 		hlist_add_head(&new->list, mcast_list);
280 	}
281 
282 out:
283 	list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
284 		list_del(&br_ip_entry->list);
285 		kfree(br_ip_entry);
286 	}
287 
288 	return ret;
289 }
290 
291 /**
292  * batadv_mcast_mla_list_free() - free a list of multicast addresses
293  * @mcast_list: the list to free
294  *
295  * Removes and frees all items in the given mcast_list.
296  */
297 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
298 {
299 	struct batadv_hw_addr *mcast_entry;
300 	struct hlist_node *tmp;
301 
302 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
303 		hlist_del(&mcast_entry->list);
304 		kfree(mcast_entry);
305 	}
306 }
307 
308 /**
309  * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
310  * @bat_priv: the bat priv with all the soft interface information
311  * @mcast_list: a list of addresses which should _not_ be removed
312  *
313  * Retracts the announcement of any multicast listener from the
314  * translation table except the ones listed in the given mcast_list.
315  *
316  * If mcast_list is NULL then all are retracted.
317  */
318 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
319 					struct hlist_head *mcast_list)
320 {
321 	struct batadv_hw_addr *mcast_entry;
322 	struct hlist_node *tmp;
323 
324 	hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
325 				  list) {
326 		if (mcast_list &&
327 		    batadv_mcast_mla_is_duplicate(mcast_entry->addr,
328 						  mcast_list))
329 			continue;
330 
331 		batadv_tt_local_remove(bat_priv, mcast_entry->addr,
332 				       BATADV_NO_FLAGS,
333 				       "mcast TT outdated", false);
334 
335 		hlist_del(&mcast_entry->list);
336 		kfree(mcast_entry);
337 	}
338 }
339 
340 /**
341  * batadv_mcast_mla_tt_add() - add multicast listener announcements
342  * @bat_priv: the bat priv with all the soft interface information
343  * @mcast_list: a list of addresses which are going to get added
344  *
345  * Adds multicast listener announcements from the given mcast_list to the
346  * translation table if they have not been added yet.
347  */
348 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
349 				    struct hlist_head *mcast_list)
350 {
351 	struct batadv_hw_addr *mcast_entry;
352 	struct hlist_node *tmp;
353 
354 	if (!mcast_list)
355 		return;
356 
357 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
358 		if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
359 						  &bat_priv->mcast.mla_list))
360 			continue;
361 
362 		if (!batadv_tt_local_add(bat_priv->soft_iface,
363 					 mcast_entry->addr, BATADV_NO_FLAGS,
364 					 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
365 			continue;
366 
367 		hlist_del(&mcast_entry->list);
368 		hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
369 	}
370 }
371 
372 /**
373  * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
374  * @bat_priv: the bat priv with all the soft interface information
375  *
376  * Checks whether there is a bridge on top of our soft interface.
377  *
378  * Return: true if there is a bridge, false otherwise.
379  */
380 static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
381 {
382 	struct net_device *upper = bat_priv->soft_iface;
383 
384 	rcu_read_lock();
385 	do {
386 		upper = netdev_master_upper_dev_get_rcu(upper);
387 	} while (upper && !(upper->priv_flags & IFF_EBRIDGE));
388 	rcu_read_unlock();
389 
390 	return upper;
391 }
392 
393 /**
394  * batadv_mcast_querier_log() - debug output regarding the querier status on
395  *  link
396  * @bat_priv: the bat priv with all the soft interface information
397  * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
398  * @old_state: the previous querier state on our link
399  * @new_state: the new querier state on our link
400  *
401  * Outputs debug messages to the logging facility with log level 'mcast'
402  * regarding changes to the querier status on the link which are relevant
403  * to our multicast optimizations.
404  *
405  * Usually this is about whether a querier appeared or vanished in
406  * our mesh or whether the querier is in the suboptimal position of being
407  * behind our local bridge segment: Snooping switches will directly
408  * forward listener reports to the querier, therefore batman-adv and
409  * the bridge will potentially not see these listeners - the querier is
410  * potentially shadowing listeners from us then.
411  *
412  * This is only interesting for nodes with a bridge on top of their
413  * soft interface.
414  */
415 static void
416 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
417 			 struct batadv_mcast_querier_state *old_state,
418 			 struct batadv_mcast_querier_state *new_state)
419 {
420 	if (!old_state->exists && new_state->exists)
421 		batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
422 			    str_proto);
423 	else if (old_state->exists && !new_state->exists)
424 		batadv_info(bat_priv->soft_iface,
425 			    "%s Querier disappeared - multicast optimizations disabled\n",
426 			    str_proto);
427 	else if (!bat_priv->mcast.bridged && !new_state->exists)
428 		batadv_info(bat_priv->soft_iface,
429 			    "No %s Querier present - multicast optimizations disabled\n",
430 			    str_proto);
431 
432 	if (new_state->exists) {
433 		if ((!old_state->shadowing && new_state->shadowing) ||
434 		    (!old_state->exists && new_state->shadowing))
435 			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
436 				   "%s Querier is behind our bridged segment: Might shadow listeners\n",
437 				   str_proto);
438 		else if (old_state->shadowing && !new_state->shadowing)
439 			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
440 				   "%s Querier is not behind our bridged segment\n",
441 				   str_proto);
442 	}
443 }
444 
445 /**
446  * batadv_mcast_bridge_log() - debug output for topology changes in bridged
447  *  setups
448  * @bat_priv: the bat priv with all the soft interface information
449  * @bridged: a flag about whether the soft interface is currently bridged or not
450  * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
451  * @querier_ipv6: (maybe) new status of a potential, selected MLD querier
452  *
453  * If no bridges are ever used on this node, then this function does nothing.
454  *
455  * Otherwise this function outputs debug information to the 'mcast' log level
456  * which might be relevant to our multicast optimizations.
457  *
458  * More precisely, it outputs information when a bridge interface is added or
459  * removed from a soft interface. And when a bridge is present, it further
460  * outputs information about the querier state which is relevant for the
461  * multicast flags this node is going to set.
462  */
463 static void
464 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
465 			struct batadv_mcast_querier_state *querier_ipv4,
466 			struct batadv_mcast_querier_state *querier_ipv6)
467 {
468 	if (!bat_priv->mcast.bridged && bridged)
469 		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
470 			   "Bridge added: Setting Unsnoopables(U)-flag\n");
471 	else if (bat_priv->mcast.bridged && !bridged)
472 		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
473 			   "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
474 
475 	if (bridged) {
476 		batadv_mcast_querier_log(bat_priv, "IGMP",
477 					 &bat_priv->mcast.querier_ipv4,
478 					 querier_ipv4);
479 		batadv_mcast_querier_log(bat_priv, "MLD",
480 					 &bat_priv->mcast.querier_ipv6,
481 					 querier_ipv6);
482 	}
483 }
484 
485 /**
486  * batadv_mcast_flags_logs() - output debug information about mcast flag changes
487  * @bat_priv: the bat priv with all the soft interface information
488  * @flags: flags indicating the new multicast state
489  *
490  * Whenever the multicast flags this nodes announces changes (@mcast_flags vs.
491  * bat_priv->mcast.flags), this notifies userspace via the 'mcast' log level.
492  */
493 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
494 {
495 	u8 old_flags = bat_priv->mcast.flags;
496 	char str_old_flags[] = "[...]";
497 
498 	sprintf(str_old_flags, "[%c%c%c]",
499 		(old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
500 		(old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
501 		(old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
502 
503 	batadv_dbg(BATADV_DBG_MCAST, bat_priv,
504 		   "Changing multicast flags from '%s' to '[%c%c%c]'\n",
505 		   bat_priv->mcast.enabled ? str_old_flags : "<undefined>",
506 		   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
507 		   (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
508 		   (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
509 }
510 
511 /**
512  * batadv_mcast_mla_tvlv_update() - update multicast tvlv
513  * @bat_priv: the bat priv with all the soft interface information
514  *
515  * Updates the own multicast tvlv with our current multicast related settings,
516  * capabilities and inabilities.
517  *
518  * Return: false if we want all IPv4 && IPv6 multicast traffic and true
519  * otherwise.
520  */
521 static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
522 {
523 	struct batadv_tvlv_mcast_data mcast_data;
524 	struct batadv_mcast_querier_state querier4 = {false, false};
525 	struct batadv_mcast_querier_state querier6 = {false, false};
526 	struct net_device *dev = bat_priv->soft_iface;
527 	bool bridged;
528 
529 	mcast_data.flags = BATADV_NO_FLAGS;
530 	memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
531 
532 	bridged = batadv_mcast_has_bridge(bat_priv);
533 	if (!bridged)
534 		goto update;
535 
536 	if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
537 		pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
538 
539 	querier4.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
540 	querier4.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
541 
542 	querier6.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
543 	querier6.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
544 
545 	mcast_data.flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
546 
547 	/* 1) If no querier exists at all, then multicast listeners on
548 	 *    our local TT clients behind the bridge will keep silent.
549 	 * 2) If the selected querier is on one of our local TT clients,
550 	 *    behind the bridge, then this querier might shadow multicast
551 	 *    listeners on our local TT clients, behind this bridge.
552 	 *
553 	 * In both cases, we will signalize other batman nodes that
554 	 * we need all multicast traffic of the according protocol.
555 	 */
556 	if (!querier4.exists || querier4.shadowing)
557 		mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV4;
558 
559 	if (!querier6.exists || querier6.shadowing)
560 		mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV6;
561 
562 update:
563 	batadv_mcast_bridge_log(bat_priv, bridged, &querier4, &querier6);
564 
565 	bat_priv->mcast.querier_ipv4.exists = querier4.exists;
566 	bat_priv->mcast.querier_ipv4.shadowing = querier4.shadowing;
567 
568 	bat_priv->mcast.querier_ipv6.exists = querier6.exists;
569 	bat_priv->mcast.querier_ipv6.shadowing = querier6.shadowing;
570 
571 	bat_priv->mcast.bridged = bridged;
572 
573 	if (!bat_priv->mcast.enabled ||
574 	    mcast_data.flags != bat_priv->mcast.flags) {
575 		batadv_mcast_flags_log(bat_priv, mcast_data.flags);
576 		batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
577 					       &mcast_data, sizeof(mcast_data));
578 		bat_priv->mcast.flags = mcast_data.flags;
579 		bat_priv->mcast.enabled = true;
580 	}
581 
582 	return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
583 		 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
584 }
585 
586 /**
587  * __batadv_mcast_mla_update() - update the own MLAs
588  * @bat_priv: the bat priv with all the soft interface information
589  *
590  * Updates the own multicast listener announcements in the translation
591  * table as well as the own, announced multicast tvlv container.
592  *
593  * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
594  * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
595  * ensured by the non-parallel execution of the worker this function
596  * belongs to.
597  */
598 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
599 {
600 	struct net_device *soft_iface = bat_priv->soft_iface;
601 	struct hlist_head mcast_list = HLIST_HEAD_INIT;
602 	int ret;
603 
604 	if (!batadv_mcast_mla_tvlv_update(bat_priv))
605 		goto update;
606 
607 	ret = batadv_mcast_mla_softif_get(bat_priv, soft_iface, &mcast_list);
608 	if (ret < 0)
609 		goto out;
610 
611 	ret = batadv_mcast_mla_bridge_get(bat_priv, soft_iface, &mcast_list);
612 	if (ret < 0)
613 		goto out;
614 
615 update:
616 	batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
617 	batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
618 
619 out:
620 	batadv_mcast_mla_list_free(&mcast_list);
621 }
622 
623 /**
624  * batadv_mcast_mla_update() - update the own MLAs
625  * @work: kernel work struct
626  *
627  * Updates the own multicast listener announcements in the translation
628  * table as well as the own, announced multicast tvlv container.
629  *
630  * In the end, reschedules the work timer.
631  */
632 static void batadv_mcast_mla_update(struct work_struct *work)
633 {
634 	struct delayed_work *delayed_work;
635 	struct batadv_priv_mcast *priv_mcast;
636 	struct batadv_priv *bat_priv;
637 
638 	delayed_work = to_delayed_work(work);
639 	priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
640 	bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
641 
642 	spin_lock(&bat_priv->mcast.mla_lock);
643 	__batadv_mcast_mla_update(bat_priv);
644 	spin_unlock(&bat_priv->mcast.mla_lock);
645 
646 	batadv_mcast_start_timer(bat_priv);
647 }
648 
649 /**
650  * batadv_mcast_is_report_ipv4() - check for IGMP reports
651  * @skb: the ethernet frame destined for the mesh
652  *
653  * This call might reallocate skb data.
654  *
655  * Checks whether the given frame is a valid IGMP report.
656  *
657  * Return: If so then true, otherwise false.
658  */
659 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
660 {
661 	if (ip_mc_check_igmp(skb) < 0)
662 		return false;
663 
664 	switch (igmp_hdr(skb)->type) {
665 	case IGMP_HOST_MEMBERSHIP_REPORT:
666 	case IGMPV2_HOST_MEMBERSHIP_REPORT:
667 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
668 		return true;
669 	}
670 
671 	return false;
672 }
673 
674 /**
675  * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
676  *  potential
677  * @bat_priv: the bat priv with all the soft interface information
678  * @skb: the IPv4 packet to check
679  * @is_unsnoopable: stores whether the destination is snoopable
680  *
681  * Checks whether the given IPv4 packet has the potential to be forwarded with a
682  * mode more optimal than classic flooding.
683  *
684  * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
685  * allocation failure.
686  */
687 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
688 					     struct sk_buff *skb,
689 					     bool *is_unsnoopable)
690 {
691 	struct iphdr *iphdr;
692 
693 	/* We might fail due to out-of-memory -> drop it */
694 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
695 		return -ENOMEM;
696 
697 	if (batadv_mcast_is_report_ipv4(skb))
698 		return -EINVAL;
699 
700 	iphdr = ip_hdr(skb);
701 
702 	/* TODO: Implement Multicast Router Discovery (RFC4286),
703 	 * then allow scope > link local, too
704 	 */
705 	if (!ipv4_is_local_multicast(iphdr->daddr))
706 		return -EINVAL;
707 
708 	/* link-local multicast listeners behind a bridge are
709 	 * not snoopable (see RFC4541, section 2.1.2.2)
710 	 */
711 	*is_unsnoopable = true;
712 
713 	return 0;
714 }
715 
716 /**
717  * batadv_mcast_is_report_ipv6() - check for MLD reports
718  * @skb: the ethernet frame destined for the mesh
719  *
720  * This call might reallocate skb data.
721  *
722  * Checks whether the given frame is a valid MLD report.
723  *
724  * Return: If so then true, otherwise false.
725  */
726 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
727 {
728 	if (ipv6_mc_check_mld(skb) < 0)
729 		return false;
730 
731 	switch (icmp6_hdr(skb)->icmp6_type) {
732 	case ICMPV6_MGM_REPORT:
733 	case ICMPV6_MLD2_REPORT:
734 		return true;
735 	}
736 
737 	return false;
738 }
739 
740 /**
741  * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
742  *  potential
743  * @bat_priv: the bat priv with all the soft interface information
744  * @skb: the IPv6 packet to check
745  * @is_unsnoopable: stores whether the destination is snoopable
746  *
747  * Checks whether the given IPv6 packet has the potential to be forwarded with a
748  * mode more optimal than classic flooding.
749  *
750  * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
751  */
752 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
753 					     struct sk_buff *skb,
754 					     bool *is_unsnoopable)
755 {
756 	struct ipv6hdr *ip6hdr;
757 
758 	/* We might fail due to out-of-memory -> drop it */
759 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
760 		return -ENOMEM;
761 
762 	if (batadv_mcast_is_report_ipv6(skb))
763 		return -EINVAL;
764 
765 	ip6hdr = ipv6_hdr(skb);
766 
767 	/* TODO: Implement Multicast Router Discovery (RFC4286),
768 	 * then allow scope > link local, too
769 	 */
770 	if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) != IPV6_ADDR_SCOPE_LINKLOCAL)
771 		return -EINVAL;
772 
773 	/* link-local-all-nodes multicast listeners behind a bridge are
774 	 * not snoopable (see RFC4541, section 3, paragraph 3)
775 	 */
776 	if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
777 		*is_unsnoopable = true;
778 
779 	return 0;
780 }
781 
782 /**
783  * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
784  * @bat_priv: the bat priv with all the soft interface information
785  * @skb: the multicast frame to check
786  * @is_unsnoopable: stores whether the destination is snoopable
787  *
788  * Checks whether the given multicast ethernet frame has the potential to be
789  * forwarded with a mode more optimal than classic flooding.
790  *
791  * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
792  */
793 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
794 					struct sk_buff *skb,
795 					bool *is_unsnoopable)
796 {
797 	struct ethhdr *ethhdr = eth_hdr(skb);
798 
799 	if (!atomic_read(&bat_priv->multicast_mode))
800 		return -EINVAL;
801 
802 	switch (ntohs(ethhdr->h_proto)) {
803 	case ETH_P_IP:
804 		return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
805 							 is_unsnoopable);
806 	case ETH_P_IPV6:
807 		if (!IS_ENABLED(CONFIG_IPV6))
808 			return -EINVAL;
809 
810 		return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
811 							 is_unsnoopable);
812 	default:
813 		return -EINVAL;
814 	}
815 }
816 
817 /**
818  * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
819  *  interest
820  * @bat_priv: the bat priv with all the soft interface information
821  * @ethhdr: ethernet header of a packet
822  *
823  * Return: the number of nodes which want all IPv4 multicast traffic if the
824  * given ethhdr is from an IPv4 packet or the number of nodes which want all
825  * IPv6 traffic if it matches an IPv6 packet.
826  */
827 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
828 					       struct ethhdr *ethhdr)
829 {
830 	switch (ntohs(ethhdr->h_proto)) {
831 	case ETH_P_IP:
832 		return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
833 	case ETH_P_IPV6:
834 		return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
835 	default:
836 		/* we shouldn't be here... */
837 		return 0;
838 	}
839 }
840 
841 /**
842  * batadv_mcast_forw_tt_node_get() - get a multicast tt node
843  * @bat_priv: the bat priv with all the soft interface information
844  * @ethhdr: the ether header containing the multicast destination
845  *
846  * Return: an orig_node matching the multicast address provided by ethhdr
847  * via a translation table lookup. This increases the returned nodes refcount.
848  */
849 static struct batadv_orig_node *
850 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
851 			      struct ethhdr *ethhdr)
852 {
853 	return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
854 					BATADV_NO_FLAGS);
855 }
856 
857 /**
858  * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
859  * @bat_priv: the bat priv with all the soft interface information
860  *
861  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
862  * increases its refcount.
863  */
864 static struct batadv_orig_node *
865 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
866 {
867 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
868 
869 	rcu_read_lock();
870 	hlist_for_each_entry_rcu(tmp_orig_node,
871 				 &bat_priv->mcast.want_all_ipv4_list,
872 				 mcast_want_all_ipv4_node) {
873 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
874 			continue;
875 
876 		orig_node = tmp_orig_node;
877 		break;
878 	}
879 	rcu_read_unlock();
880 
881 	return orig_node;
882 }
883 
884 /**
885  * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
886  * @bat_priv: the bat priv with all the soft interface information
887  *
888  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
889  * and increases its refcount.
890  */
891 static struct batadv_orig_node *
892 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
893 {
894 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
895 
896 	rcu_read_lock();
897 	hlist_for_each_entry_rcu(tmp_orig_node,
898 				 &bat_priv->mcast.want_all_ipv6_list,
899 				 mcast_want_all_ipv6_node) {
900 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
901 			continue;
902 
903 		orig_node = tmp_orig_node;
904 		break;
905 	}
906 	rcu_read_unlock();
907 
908 	return orig_node;
909 }
910 
911 /**
912  * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
913  * @bat_priv: the bat priv with all the soft interface information
914  * @ethhdr: an ethernet header to determine the protocol family from
915  *
916  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
917  * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and
918  * increases its refcount.
919  */
920 static struct batadv_orig_node *
921 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
922 			      struct ethhdr *ethhdr)
923 {
924 	switch (ntohs(ethhdr->h_proto)) {
925 	case ETH_P_IP:
926 		return batadv_mcast_forw_ipv4_node_get(bat_priv);
927 	case ETH_P_IPV6:
928 		return batadv_mcast_forw_ipv6_node_get(bat_priv);
929 	default:
930 		/* we shouldn't be here... */
931 		return NULL;
932 	}
933 }
934 
935 /**
936  * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
937  * @bat_priv: the bat priv with all the soft interface information
938  *
939  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
940  * set and increases its refcount.
941  */
942 static struct batadv_orig_node *
943 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
944 {
945 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
946 
947 	rcu_read_lock();
948 	hlist_for_each_entry_rcu(tmp_orig_node,
949 				 &bat_priv->mcast.want_all_unsnoopables_list,
950 				 mcast_want_all_unsnoopables_node) {
951 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
952 			continue;
953 
954 		orig_node = tmp_orig_node;
955 		break;
956 	}
957 	rcu_read_unlock();
958 
959 	return orig_node;
960 }
961 
962 /**
963  * batadv_mcast_forw_mode() - check on how to forward a multicast packet
964  * @bat_priv: the bat priv with all the soft interface information
965  * @skb: The multicast packet to check
966  * @orig: an originator to be set to forward the skb to
967  *
968  * Return: the forwarding mode as enum batadv_forw_mode and in case of
969  * BATADV_FORW_SINGLE set the orig to the single originator the skb
970  * should be forwarded to.
971  */
972 enum batadv_forw_mode
973 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
974 		       struct batadv_orig_node **orig)
975 {
976 	int ret, tt_count, ip_count, unsnoop_count, total_count;
977 	bool is_unsnoopable = false;
978 	unsigned int mcast_fanout;
979 	struct ethhdr *ethhdr;
980 
981 	ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable);
982 	if (ret == -ENOMEM)
983 		return BATADV_FORW_NONE;
984 	else if (ret < 0)
985 		return BATADV_FORW_ALL;
986 
987 	ethhdr = eth_hdr(skb);
988 
989 	tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
990 					       BATADV_NO_FLAGS);
991 	ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
992 	unsnoop_count = !is_unsnoopable ? 0 :
993 			atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
994 
995 	total_count = tt_count + ip_count + unsnoop_count;
996 
997 	switch (total_count) {
998 	case 1:
999 		if (tt_count)
1000 			*orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
1001 		else if (ip_count)
1002 			*orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
1003 		else if (unsnoop_count)
1004 			*orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
1005 
1006 		if (*orig)
1007 			return BATADV_FORW_SINGLE;
1008 
1009 		/* fall through */
1010 	case 0:
1011 		return BATADV_FORW_NONE;
1012 	default:
1013 		mcast_fanout = atomic_read(&bat_priv->multicast_fanout);
1014 
1015 		if (!unsnoop_count && total_count <= mcast_fanout)
1016 			return BATADV_FORW_SOME;
1017 	}
1018 
1019 	return BATADV_FORW_ALL;
1020 }
1021 
1022 /**
1023  * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
1024  * @bat_priv: the bat priv with all the soft interface information
1025  * @skb: the multicast packet to transmit
1026  * @vid: the vlan identifier
1027  *
1028  * Sends copies of a frame with multicast destination to any multicast
1029  * listener registered in the translation table. A transmission is performed
1030  * via a batman-adv unicast packet for each such destination node.
1031  *
1032  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1033  * otherwise.
1034  */
1035 static int
1036 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1037 		     unsigned short vid)
1038 {
1039 	int ret = NET_XMIT_SUCCESS;
1040 	struct sk_buff *newskb;
1041 
1042 	struct batadv_tt_orig_list_entry *orig_entry;
1043 
1044 	struct batadv_tt_global_entry *tt_global;
1045 	const u8 *addr = eth_hdr(skb)->h_dest;
1046 
1047 	tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1048 	if (!tt_global)
1049 		goto out;
1050 
1051 	rcu_read_lock();
1052 	hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1053 		newskb = skb_copy(skb, GFP_ATOMIC);
1054 		if (!newskb) {
1055 			ret = NET_XMIT_DROP;
1056 			break;
1057 		}
1058 
1059 		batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1060 					orig_entry->orig_node, vid);
1061 	}
1062 	rcu_read_unlock();
1063 
1064 	batadv_tt_global_entry_put(tt_global);
1065 
1066 out:
1067 	return ret;
1068 }
1069 
1070 /**
1071  * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
1072  * @bat_priv: the bat priv with all the soft interface information
1073  * @skb: the multicast packet to transmit
1074  * @vid: the vlan identifier
1075  *
1076  * Sends copies of a frame with multicast destination to any node with a
1077  * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
1078  * batman-adv unicast packet for each such destination node.
1079  *
1080  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1081  * otherwise.
1082  */
1083 static int
1084 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1085 				struct sk_buff *skb, unsigned short vid)
1086 {
1087 	struct batadv_orig_node *orig_node;
1088 	int ret = NET_XMIT_SUCCESS;
1089 	struct sk_buff *newskb;
1090 
1091 	rcu_read_lock();
1092 	hlist_for_each_entry_rcu(orig_node,
1093 				 &bat_priv->mcast.want_all_ipv4_list,
1094 				 mcast_want_all_ipv4_node) {
1095 		newskb = skb_copy(skb, GFP_ATOMIC);
1096 		if (!newskb) {
1097 			ret = NET_XMIT_DROP;
1098 			break;
1099 		}
1100 
1101 		batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1102 					orig_node, vid);
1103 	}
1104 	rcu_read_unlock();
1105 	return ret;
1106 }
1107 
1108 /**
1109  * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
1110  * @bat_priv: the bat priv with all the soft interface information
1111  * @skb: The multicast packet to transmit
1112  * @vid: the vlan identifier
1113  *
1114  * Sends copies of a frame with multicast destination to any node with a
1115  * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
1116  * batman-adv unicast packet for each such destination node.
1117  *
1118  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1119  * otherwise.
1120  */
1121 static int
1122 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1123 				struct sk_buff *skb, unsigned short vid)
1124 {
1125 	struct batadv_orig_node *orig_node;
1126 	int ret = NET_XMIT_SUCCESS;
1127 	struct sk_buff *newskb;
1128 
1129 	rcu_read_lock();
1130 	hlist_for_each_entry_rcu(orig_node,
1131 				 &bat_priv->mcast.want_all_ipv6_list,
1132 				 mcast_want_all_ipv6_node) {
1133 		newskb = skb_copy(skb, GFP_ATOMIC);
1134 		if (!newskb) {
1135 			ret = NET_XMIT_DROP;
1136 			break;
1137 		}
1138 
1139 		batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1140 					orig_node, vid);
1141 	}
1142 	rcu_read_unlock();
1143 	return ret;
1144 }
1145 
1146 /**
1147  * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
1148  * @bat_priv: the bat priv with all the soft interface information
1149  * @skb: the multicast packet to transmit
1150  * @vid: the vlan identifier
1151  *
1152  * Sends copies of a frame with multicast destination to any node with a
1153  * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
1154  * transmission is performed via a batman-adv unicast packet for each such
1155  * destination node.
1156  *
1157  * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1158  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1159  */
1160 static int
1161 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1162 			   struct sk_buff *skb, unsigned short vid)
1163 {
1164 	switch (ntohs(eth_hdr(skb)->h_proto)) {
1165 	case ETH_P_IP:
1166 		return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1167 	case ETH_P_IPV6:
1168 		return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1169 	default:
1170 		/* we shouldn't be here... */
1171 		return NET_XMIT_DROP;
1172 	}
1173 }
1174 
1175 /**
1176  * batadv_mcast_forw_send() - send packet to any detected multicast recpient
1177  * @bat_priv: the bat priv with all the soft interface information
1178  * @skb: the multicast packet to transmit
1179  * @vid: the vlan identifier
1180  *
1181  * Sends copies of a frame with multicast destination to any node that signaled
1182  * interest in it, that is either via the translation table or the according
1183  * want-all flags. A transmission is performed via a batman-adv unicast packet
1184  * for each such destination node.
1185  *
1186  * The given skb is consumed/freed.
1187  *
1188  * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1189  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1190  */
1191 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1192 			   unsigned short vid)
1193 {
1194 	int ret;
1195 
1196 	ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1197 	if (ret != NET_XMIT_SUCCESS) {
1198 		kfree_skb(skb);
1199 		return ret;
1200 	}
1201 
1202 	ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1203 	if (ret != NET_XMIT_SUCCESS) {
1204 		kfree_skb(skb);
1205 		return ret;
1206 	}
1207 
1208 	consume_skb(skb);
1209 	return ret;
1210 }
1211 
1212 /**
1213  * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
1214  * @bat_priv: the bat priv with all the soft interface information
1215  * @orig: the orig_node which multicast state might have changed of
1216  * @mcast_flags: flags indicating the new multicast state
1217  *
1218  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
1219  * orig, has toggled then this method updates counter and list accordingly.
1220  *
1221  * Caller needs to hold orig->mcast_handler_lock.
1222  */
1223 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1224 					     struct batadv_orig_node *orig,
1225 					     u8 mcast_flags)
1226 {
1227 	struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1228 	struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1229 
1230 	lockdep_assert_held(&orig->mcast_handler_lock);
1231 
1232 	/* switched from flag unset to set */
1233 	if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1234 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1235 		atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1236 
1237 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1238 		/* flag checks above + mcast_handler_lock prevents this */
1239 		WARN_ON(!hlist_unhashed(node));
1240 
1241 		hlist_add_head_rcu(node, head);
1242 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1243 	/* switched from flag set to unset */
1244 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1245 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1246 		atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1247 
1248 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1249 		/* flag checks above + mcast_handler_lock prevents this */
1250 		WARN_ON(hlist_unhashed(node));
1251 
1252 		hlist_del_init_rcu(node);
1253 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1254 	}
1255 }
1256 
1257 /**
1258  * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
1259  * @bat_priv: the bat priv with all the soft interface information
1260  * @orig: the orig_node which multicast state might have changed of
1261  * @mcast_flags: flags indicating the new multicast state
1262  *
1263  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
1264  * toggled then this method updates counter and list accordingly.
1265  *
1266  * Caller needs to hold orig->mcast_handler_lock.
1267  */
1268 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1269 					  struct batadv_orig_node *orig,
1270 					  u8 mcast_flags)
1271 {
1272 	struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1273 	struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1274 
1275 	lockdep_assert_held(&orig->mcast_handler_lock);
1276 
1277 	/* switched from flag unset to set */
1278 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1279 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1280 		atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1281 
1282 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1283 		/* flag checks above + mcast_handler_lock prevents this */
1284 		WARN_ON(!hlist_unhashed(node));
1285 
1286 		hlist_add_head_rcu(node, head);
1287 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1288 	/* switched from flag set to unset */
1289 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1290 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1291 		atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1292 
1293 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1294 		/* flag checks above + mcast_handler_lock prevents this */
1295 		WARN_ON(hlist_unhashed(node));
1296 
1297 		hlist_del_init_rcu(node);
1298 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1299 	}
1300 }
1301 
1302 /**
1303  * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
1304  * @bat_priv: the bat priv with all the soft interface information
1305  * @orig: the orig_node which multicast state might have changed of
1306  * @mcast_flags: flags indicating the new multicast state
1307  *
1308  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
1309  * toggled then this method updates counter and list accordingly.
1310  *
1311  * Caller needs to hold orig->mcast_handler_lock.
1312  */
1313 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1314 					  struct batadv_orig_node *orig,
1315 					  u8 mcast_flags)
1316 {
1317 	struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1318 	struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1319 
1320 	lockdep_assert_held(&orig->mcast_handler_lock);
1321 
1322 	/* switched from flag unset to set */
1323 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1324 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1325 		atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1326 
1327 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1328 		/* flag checks above + mcast_handler_lock prevents this */
1329 		WARN_ON(!hlist_unhashed(node));
1330 
1331 		hlist_add_head_rcu(node, head);
1332 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1333 	/* switched from flag set to unset */
1334 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1335 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1336 		atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1337 
1338 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1339 		/* flag checks above + mcast_handler_lock prevents this */
1340 		WARN_ON(hlist_unhashed(node));
1341 
1342 		hlist_del_init_rcu(node);
1343 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1344 	}
1345 }
1346 
1347 /**
1348  * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
1349  * @bat_priv: the bat priv with all the soft interface information
1350  * @orig: the orig_node of the ogm
1351  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
1352  * @tvlv_value: tvlv buffer containing the multicast data
1353  * @tvlv_value_len: tvlv buffer length
1354  */
1355 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
1356 					  struct batadv_orig_node *orig,
1357 					  u8 flags,
1358 					  void *tvlv_value,
1359 					  u16 tvlv_value_len)
1360 {
1361 	bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1362 	u8 mcast_flags = BATADV_NO_FLAGS;
1363 
1364 	if (orig_mcast_enabled && tvlv_value &&
1365 	    tvlv_value_len >= sizeof(mcast_flags))
1366 		mcast_flags = *(u8 *)tvlv_value;
1367 
1368 	if (!orig_mcast_enabled) {
1369 		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
1370 		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
1371 	}
1372 
1373 	spin_lock_bh(&orig->mcast_handler_lock);
1374 
1375 	if (orig_mcast_enabled &&
1376 	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1377 		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1378 	} else if (!orig_mcast_enabled &&
1379 		   test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1380 		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1381 	}
1382 
1383 	set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
1384 
1385 	batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
1386 	batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
1387 	batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
1388 
1389 	orig->mcast_flags = mcast_flags;
1390 	spin_unlock_bh(&orig->mcast_handler_lock);
1391 }
1392 
1393 /**
1394  * batadv_mcast_init() - initialize the multicast optimizations structures
1395  * @bat_priv: the bat priv with all the soft interface information
1396  */
1397 void batadv_mcast_init(struct batadv_priv *bat_priv)
1398 {
1399 	batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
1400 				     NULL, BATADV_TVLV_MCAST, 2,
1401 				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1402 
1403 	INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
1404 	batadv_mcast_start_timer(bat_priv);
1405 }
1406 
1407 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
1408 /**
1409  * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
1410  * @bat_priv: the bat priv with all the soft interface information
1411  * @seq: debugfs table seq_file struct
1412  *
1413  * Prints our own multicast flags including a more specific reason why
1414  * they are set, that is prints the bridge and querier state too, to
1415  * the debugfs table specified via @seq.
1416  */
1417 static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
1418 					    struct seq_file *seq)
1419 {
1420 	u8 flags = bat_priv->mcast.flags;
1421 	char querier4, querier6, shadowing4, shadowing6;
1422 	bool bridged = bat_priv->mcast.bridged;
1423 
1424 	if (bridged) {
1425 		querier4 = bat_priv->mcast.querier_ipv4.exists ? '.' : '4';
1426 		querier6 = bat_priv->mcast.querier_ipv6.exists ? '.' : '6';
1427 		shadowing4 = bat_priv->mcast.querier_ipv4.shadowing ? '4' : '.';
1428 		shadowing6 = bat_priv->mcast.querier_ipv6.shadowing ? '6' : '.';
1429 	} else {
1430 		querier4 = '?';
1431 		querier6 = '?';
1432 		shadowing4 = '?';
1433 		shadowing6 = '?';
1434 	}
1435 
1436 	seq_printf(seq, "Multicast flags (own flags: [%c%c%c])\n",
1437 		   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
1438 		   (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
1439 		   (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
1440 	seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.');
1441 	seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n",
1442 		   querier4, querier6);
1443 	seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n",
1444 		   shadowing4, shadowing6);
1445 	seq_puts(seq, "-------------------------------------------\n");
1446 	seq_printf(seq, "       %-10s %s\n", "Originator", "Flags");
1447 }
1448 
1449 /**
1450  * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
1451  * @seq: seq file to print on
1452  * @offset: not used
1453  *
1454  * This prints a table of (primary) originators and their according
1455  * multicast flags, including (in the header) our own.
1456  *
1457  * Return: always 0
1458  */
1459 int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
1460 {
1461 	struct net_device *net_dev = (struct net_device *)seq->private;
1462 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
1463 	struct batadv_hard_iface *primary_if;
1464 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1465 	struct batadv_orig_node *orig_node;
1466 	struct hlist_head *head;
1467 	u8 flags;
1468 	u32 i;
1469 
1470 	primary_if = batadv_seq_print_text_primary_if_get(seq);
1471 	if (!primary_if)
1472 		return 0;
1473 
1474 	batadv_mcast_flags_print_header(bat_priv, seq);
1475 
1476 	for (i = 0; i < hash->size; i++) {
1477 		head = &hash->table[i];
1478 
1479 		rcu_read_lock();
1480 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1481 			if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1482 				      &orig_node->capa_initialized))
1483 				continue;
1484 
1485 			if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1486 				      &orig_node->capabilities)) {
1487 				seq_printf(seq, "%pM -\n", orig_node->orig);
1488 				continue;
1489 			}
1490 
1491 			flags = orig_node->mcast_flags;
1492 
1493 			seq_printf(seq, "%pM [%c%c%c]\n", orig_node->orig,
1494 				   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)
1495 				   ? 'U' : '.',
1496 				   (flags & BATADV_MCAST_WANT_ALL_IPV4)
1497 				   ? '4' : '.',
1498 				   (flags & BATADV_MCAST_WANT_ALL_IPV6)
1499 				   ? '6' : '.');
1500 		}
1501 		rcu_read_unlock();
1502 	}
1503 
1504 	batadv_hardif_put(primary_if);
1505 
1506 	return 0;
1507 }
1508 #endif
1509 
1510 /**
1511  * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
1512  * @msg: buffer for the message
1513  * @bat_priv: the bat priv with all the soft interface information
1514  *
1515  * Return: 0 or error code.
1516  */
1517 int batadv_mcast_mesh_info_put(struct sk_buff *msg,
1518 			       struct batadv_priv *bat_priv)
1519 {
1520 	u32 flags = bat_priv->mcast.flags;
1521 	u32 flags_priv = BATADV_NO_FLAGS;
1522 
1523 	if (bat_priv->mcast.bridged) {
1524 		flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
1525 
1526 		if (bat_priv->mcast.querier_ipv4.exists)
1527 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
1528 		if (bat_priv->mcast.querier_ipv6.exists)
1529 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
1530 		if (bat_priv->mcast.querier_ipv4.shadowing)
1531 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
1532 		if (bat_priv->mcast.querier_ipv6.shadowing)
1533 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
1534 	}
1535 
1536 	if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
1537 	    nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
1538 		return -EMSGSIZE;
1539 
1540 	return 0;
1541 }
1542 
1543 /**
1544  * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
1545  *  to a netlink socket
1546  * @msg: buffer for the message
1547  * @portid: netlink port
1548  * @cb: Control block containing additional options
1549  * @orig_node: originator to dump the multicast flags of
1550  *
1551  * Return: 0 or error code.
1552  */
1553 static int
1554 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
1555 			      struct netlink_callback *cb,
1556 			      struct batadv_orig_node *orig_node)
1557 {
1558 	void *hdr;
1559 
1560 	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
1561 			  &batadv_netlink_family, NLM_F_MULTI,
1562 			  BATADV_CMD_GET_MCAST_FLAGS);
1563 	if (!hdr)
1564 		return -ENOBUFS;
1565 
1566 	genl_dump_check_consistent(cb, hdr);
1567 
1568 	if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
1569 		    orig_node->orig)) {
1570 		genlmsg_cancel(msg, hdr);
1571 		return -EMSGSIZE;
1572 	}
1573 
1574 	if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1575 		     &orig_node->capabilities)) {
1576 		if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
1577 				orig_node->mcast_flags)) {
1578 			genlmsg_cancel(msg, hdr);
1579 			return -EMSGSIZE;
1580 		}
1581 	}
1582 
1583 	genlmsg_end(msg, hdr);
1584 	return 0;
1585 }
1586 
1587 /**
1588  * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
1589  *  table to a netlink socket
1590  * @msg: buffer for the message
1591  * @portid: netlink port
1592  * @cb: Control block containing additional options
1593  * @hash: hash to dump
1594  * @bucket: bucket index to dump
1595  * @idx_skip: How many entries to skip
1596  *
1597  * Return: 0 or error code.
1598  */
1599 static int
1600 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
1601 			       struct netlink_callback *cb,
1602 			       struct batadv_hashtable *hash,
1603 			       unsigned int bucket, long *idx_skip)
1604 {
1605 	struct batadv_orig_node *orig_node;
1606 	long idx = 0;
1607 
1608 	spin_lock_bh(&hash->list_locks[bucket]);
1609 	cb->seq = atomic_read(&hash->generation) << 1 | 1;
1610 
1611 	hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
1612 		if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1613 			      &orig_node->capa_initialized))
1614 			continue;
1615 
1616 		if (idx < *idx_skip)
1617 			goto skip;
1618 
1619 		if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
1620 			spin_unlock_bh(&hash->list_locks[bucket]);
1621 			*idx_skip = idx;
1622 
1623 			return -EMSGSIZE;
1624 		}
1625 
1626 skip:
1627 		idx++;
1628 	}
1629 	spin_unlock_bh(&hash->list_locks[bucket]);
1630 
1631 	return 0;
1632 }
1633 
1634 /**
1635  * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
1636  * @msg: buffer for the message
1637  * @portid: netlink port
1638  * @cb: Control block containing additional options
1639  * @bat_priv: the bat priv with all the soft interface information
1640  * @bucket: current bucket to dump
1641  * @idx: index in current bucket to the next entry to dump
1642  *
1643  * Return: 0 or error code.
1644  */
1645 static int
1646 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
1647 			  struct netlink_callback *cb,
1648 			  struct batadv_priv *bat_priv, long *bucket, long *idx)
1649 {
1650 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1651 	long bucket_tmp = *bucket;
1652 	long idx_tmp = *idx;
1653 
1654 	while (bucket_tmp < hash->size) {
1655 		if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
1656 						   *bucket, &idx_tmp))
1657 			break;
1658 
1659 		bucket_tmp++;
1660 		idx_tmp = 0;
1661 	}
1662 
1663 	*bucket = bucket_tmp;
1664 	*idx = idx_tmp;
1665 
1666 	return msg->len;
1667 }
1668 
1669 /**
1670  * batadv_mcast_netlink_get_primary() - get primary interface from netlink
1671  *  callback
1672  * @cb: netlink callback structure
1673  * @primary_if: the primary interface pointer to return the result in
1674  *
1675  * Return: 0 or error code.
1676  */
1677 static int
1678 batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
1679 				 struct batadv_hard_iface **primary_if)
1680 {
1681 	struct batadv_hard_iface *hard_iface = NULL;
1682 	struct net *net = sock_net(cb->skb->sk);
1683 	struct net_device *soft_iface;
1684 	struct batadv_priv *bat_priv;
1685 	int ifindex;
1686 	int ret = 0;
1687 
1688 	ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
1689 	if (!ifindex)
1690 		return -EINVAL;
1691 
1692 	soft_iface = dev_get_by_index(net, ifindex);
1693 	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
1694 		ret = -ENODEV;
1695 		goto out;
1696 	}
1697 
1698 	bat_priv = netdev_priv(soft_iface);
1699 
1700 	hard_iface = batadv_primary_if_get_selected(bat_priv);
1701 	if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
1702 		ret = -ENOENT;
1703 		goto out;
1704 	}
1705 
1706 out:
1707 	if (soft_iface)
1708 		dev_put(soft_iface);
1709 
1710 	if (!ret && primary_if)
1711 		*primary_if = hard_iface;
1712 	else if (hard_iface)
1713 		batadv_hardif_put(hard_iface);
1714 
1715 	return ret;
1716 }
1717 
1718 /**
1719  * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
1720  * @msg: buffer for the message
1721  * @cb: callback structure containing arguments
1722  *
1723  * Return: message length.
1724  */
1725 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
1726 {
1727 	struct batadv_hard_iface *primary_if = NULL;
1728 	int portid = NETLINK_CB(cb->skb).portid;
1729 	struct batadv_priv *bat_priv;
1730 	long *bucket = &cb->args[0];
1731 	long *idx = &cb->args[1];
1732 	int ret;
1733 
1734 	ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
1735 	if (ret)
1736 		return ret;
1737 
1738 	bat_priv = netdev_priv(primary_if->soft_iface);
1739 	ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
1740 
1741 	batadv_hardif_put(primary_if);
1742 	return ret;
1743 }
1744 
1745 /**
1746  * batadv_mcast_free() - free the multicast optimizations structures
1747  * @bat_priv: the bat priv with all the soft interface information
1748  */
1749 void batadv_mcast_free(struct batadv_priv *bat_priv)
1750 {
1751 	cancel_delayed_work_sync(&bat_priv->mcast.work);
1752 
1753 	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
1754 	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
1755 
1756 	/* safely calling outside of worker, as worker was canceled above */
1757 	batadv_mcast_mla_tt_retract(bat_priv, NULL);
1758 }
1759 
1760 /**
1761  * batadv_mcast_purge_orig() - reset originator global mcast state modifications
1762  * @orig: the originator which is going to get purged
1763  */
1764 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
1765 {
1766 	struct batadv_priv *bat_priv = orig->bat_priv;
1767 
1768 	spin_lock_bh(&orig->mcast_handler_lock);
1769 
1770 	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
1771 	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
1772 	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
1773 
1774 	spin_unlock_bh(&orig->mcast_handler_lock);
1775 }
1776