xref: /openbmc/linux/net/batman-adv/multicast.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1  // SPDX-License-Identifier: GPL-2.0
2  /* Copyright (C) B.A.T.M.A.N. contributors:
3   *
4   * Linus Lüssing
5   */
6  
7  #include "multicast.h"
8  #include "main.h"
9  
10  #include <linux/atomic.h>
11  #include <linux/bitops.h>
12  #include <linux/bug.h>
13  #include <linux/byteorder/generic.h>
14  #include <linux/container_of.h>
15  #include <linux/errno.h>
16  #include <linux/etherdevice.h>
17  #include <linux/gfp.h>
18  #include <linux/icmpv6.h>
19  #include <linux/if_bridge.h>
20  #include <linux/if_ether.h>
21  #include <linux/igmp.h>
22  #include <linux/in.h>
23  #include <linux/in6.h>
24  #include <linux/inetdevice.h>
25  #include <linux/ip.h>
26  #include <linux/ipv6.h>
27  #include <linux/jiffies.h>
28  #include <linux/kernel.h>
29  #include <linux/list.h>
30  #include <linux/lockdep.h>
31  #include <linux/netdevice.h>
32  #include <linux/netlink.h>
33  #include <linux/printk.h>
34  #include <linux/rculist.h>
35  #include <linux/rcupdate.h>
36  #include <linux/skbuff.h>
37  #include <linux/slab.h>
38  #include <linux/spinlock.h>
39  #include <linux/stddef.h>
40  #include <linux/string.h>
41  #include <linux/types.h>
42  #include <linux/workqueue.h>
43  #include <net/addrconf.h>
44  #include <net/genetlink.h>
45  #include <net/if_inet6.h>
46  #include <net/ip.h>
47  #include <net/ipv6.h>
48  #include <net/netlink.h>
49  #include <net/sock.h>
50  #include <uapi/linux/batadv_packet.h>
51  #include <uapi/linux/batman_adv.h>
52  
53  #include "bridge_loop_avoidance.h"
54  #include "hard-interface.h"
55  #include "hash.h"
56  #include "log.h"
57  #include "netlink.h"
58  #include "send.h"
59  #include "soft-interface.h"
60  #include "translation-table.h"
61  #include "tvlv.h"
62  
63  static void batadv_mcast_mla_update(struct work_struct *work);
64  
65  /**
66   * batadv_mcast_start_timer() - schedule the multicast periodic worker
67   * @bat_priv: the bat priv with all the soft interface information
68   */
batadv_mcast_start_timer(struct batadv_priv * bat_priv)69  static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
70  {
71  	queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
72  			   msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
73  }
74  
75  /**
76   * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
77   * @soft_iface: netdev struct of the mesh interface
78   *
79   * If the given soft interface has a bridge on top then the refcount
80   * of the according net device is increased.
81   *
82   * Return: NULL if no such bridge exists. Otherwise the net device of the
83   * bridge.
84   */
batadv_mcast_get_bridge(struct net_device * soft_iface)85  static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
86  {
87  	struct net_device *upper = soft_iface;
88  
89  	rcu_read_lock();
90  	do {
91  		upper = netdev_master_upper_dev_get_rcu(upper);
92  	} while (upper && !netif_is_bridge_master(upper));
93  
94  	dev_hold(upper);
95  	rcu_read_unlock();
96  
97  	return upper;
98  }
99  
100  /**
101   * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from
102   *  node for IPv4
103   * @dev: the interface to check
104   *
105   * Checks the presence of an IPv4 multicast router on this node.
106   *
107   * Caller needs to hold rcu read lock.
108   *
109   * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise.
110   */
batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device * dev)111  static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev)
112  {
113  	struct in_device *in_dev = __in_dev_get_rcu(dev);
114  
115  	if (in_dev && IN_DEV_MFORWARD(in_dev))
116  		return BATADV_NO_FLAGS;
117  	else
118  		return BATADV_MCAST_WANT_NO_RTR4;
119  }
120  
121  /**
122   * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from
123   *  node for IPv6
124   * @dev: the interface to check
125   *
126   * Checks the presence of an IPv6 multicast router on this node.
127   *
128   * Caller needs to hold rcu read lock.
129   *
130   * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise.
131   */
132  #if IS_ENABLED(CONFIG_IPV6_MROUTE)
batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device * dev)133  static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
134  {
135  	struct inet6_dev *in6_dev = __in6_dev_get(dev);
136  
137  	if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding))
138  		return BATADV_NO_FLAGS;
139  	else
140  		return BATADV_MCAST_WANT_NO_RTR6;
141  }
142  #else
143  static inline u8
batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device * dev)144  batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
145  {
146  	return BATADV_MCAST_WANT_NO_RTR6;
147  }
148  #endif
149  
150  /**
151   * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node
152   * @bat_priv: the bat priv with all the soft interface information
153   * @bridge: bridge interface on top of the soft_iface if present,
154   *  otherwise pass NULL
155   *
156   * Checks the presence of IPv4 and IPv6 multicast routers on this
157   * node.
158   *
159   * Return:
160   *	BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
161   *	BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
162   *	BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
163   *	The former two OR'd: no multicast router is present
164   */
batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv * bat_priv,struct net_device * bridge)165  static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
166  						struct net_device *bridge)
167  {
168  	struct net_device *dev = bridge ? bridge : bat_priv->soft_iface;
169  	u8 flags = BATADV_NO_FLAGS;
170  
171  	rcu_read_lock();
172  
173  	flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev);
174  	flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev);
175  
176  	rcu_read_unlock();
177  
178  	return flags;
179  }
180  
181  /**
182   * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge
183   * @bat_priv: the bat priv with all the soft interface information
184   * @bridge: bridge interface on top of the soft_iface if present,
185   *  otherwise pass NULL
186   *
187   * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge.
188   *
189   * Return:
190   *	BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
191   *	BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
192   *	BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
193   *	The former two OR'd: no multicast router is present
194   */
batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv * bat_priv,struct net_device * bridge)195  static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
196  						struct net_device *bridge)
197  {
198  	struct net_device *dev = bat_priv->soft_iface;
199  	u8 flags = BATADV_NO_FLAGS;
200  
201  	if (!bridge)
202  		return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
203  
204  	if (!br_multicast_has_router_adjacent(dev, ETH_P_IP))
205  		flags |= BATADV_MCAST_WANT_NO_RTR4;
206  	if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6))
207  		flags |= BATADV_MCAST_WANT_NO_RTR6;
208  
209  	return flags;
210  }
211  
212  /**
213   * batadv_mcast_mla_rtr_flags_get() - get multicast router flags
214   * @bat_priv: the bat priv with all the soft interface information
215   * @bridge: bridge interface on top of the soft_iface if present,
216   *  otherwise pass NULL
217   *
218   * Checks the presence of IPv4 and IPv6 multicast routers on this
219   * node or behind its bridge.
220   *
221   * Return:
222   *	BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
223   *	BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
224   *	BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
225   *	The former two OR'd: no multicast router is present
226   */
batadv_mcast_mla_rtr_flags_get(struct batadv_priv * bat_priv,struct net_device * bridge)227  static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
228  					 struct net_device *bridge)
229  {
230  	u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
231  
232  	flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge);
233  	flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge);
234  
235  	return flags;
236  }
237  
238  /**
239   * batadv_mcast_mla_flags_get() - get the new multicast flags
240   * @bat_priv: the bat priv with all the soft interface information
241   *
242   * Return: A set of flags for the current/next TVLV, querier and
243   * bridge state.
244   */
245  static struct batadv_mcast_mla_flags
batadv_mcast_mla_flags_get(struct batadv_priv * bat_priv)246  batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
247  {
248  	struct net_device *dev = bat_priv->soft_iface;
249  	struct batadv_mcast_querier_state *qr4, *qr6;
250  	struct batadv_mcast_mla_flags mla_flags;
251  	struct net_device *bridge;
252  
253  	bridge = batadv_mcast_get_bridge(dev);
254  
255  	memset(&mla_flags, 0, sizeof(mla_flags));
256  	mla_flags.enabled = 1;
257  	mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv,
258  							       bridge);
259  
260  	if (!bridge)
261  		return mla_flags;
262  
263  	dev_put(bridge);
264  
265  	mla_flags.bridged = 1;
266  	qr4 = &mla_flags.querier_ipv4;
267  	qr6 = &mla_flags.querier_ipv6;
268  
269  	if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
270  		pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
271  
272  	qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
273  	qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
274  
275  	qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
276  	qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
277  
278  	mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
279  
280  	/* 1) If no querier exists at all, then multicast listeners on
281  	 *    our local TT clients behind the bridge will keep silent.
282  	 * 2) If the selected querier is on one of our local TT clients,
283  	 *    behind the bridge, then this querier might shadow multicast
284  	 *    listeners on our local TT clients, behind this bridge.
285  	 *
286  	 * In both cases, we will signalize other batman nodes that
287  	 * we need all multicast traffic of the according protocol.
288  	 */
289  	if (!qr4->exists || qr4->shadowing) {
290  		mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
291  		mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4;
292  	}
293  
294  	if (!qr6->exists || qr6->shadowing) {
295  		mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
296  		mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6;
297  	}
298  
299  	return mla_flags;
300  }
301  
302  /**
303   * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
304   * @mcast_addr: the multicast address to check
305   * @mcast_list: the list with multicast addresses to search in
306   *
307   * Return: true if the given address is already in the given list.
308   * Otherwise returns false.
309   */
batadv_mcast_mla_is_duplicate(u8 * mcast_addr,struct hlist_head * mcast_list)310  static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
311  					  struct hlist_head *mcast_list)
312  {
313  	struct batadv_hw_addr *mcast_entry;
314  
315  	hlist_for_each_entry(mcast_entry, mcast_list, list)
316  		if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
317  			return true;
318  
319  	return false;
320  }
321  
322  /**
323   * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners
324   * @dev: the device to collect multicast addresses from
325   * @mcast_list: a list to put found addresses into
326   * @flags: flags indicating the new multicast state
327   *
328   * Collects multicast addresses of IPv4 multicast listeners residing
329   * on this kernel on the given soft interface, dev, in
330   * the given mcast_list. In general, multicast listeners provided by
331   * your multicast receiving applications run directly on this node.
332   *
333   * Return: -ENOMEM on memory allocation error or the number of
334   * items added to the mcast_list otherwise.
335   */
336  static int
batadv_mcast_mla_softif_get_ipv4(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)337  batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
338  				 struct hlist_head *mcast_list,
339  				 struct batadv_mcast_mla_flags *flags)
340  {
341  	struct batadv_hw_addr *new;
342  	struct in_device *in_dev;
343  	u8 mcast_addr[ETH_ALEN];
344  	struct ip_mc_list *pmc;
345  	int ret = 0;
346  
347  	if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
348  		return 0;
349  
350  	rcu_read_lock();
351  
352  	in_dev = __in_dev_get_rcu(dev);
353  	if (!in_dev) {
354  		rcu_read_unlock();
355  		return 0;
356  	}
357  
358  	for (pmc = rcu_dereference(in_dev->mc_list); pmc;
359  	     pmc = rcu_dereference(pmc->next_rcu)) {
360  		if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
361  		    ipv4_is_local_multicast(pmc->multiaddr))
362  			continue;
363  
364  		if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
365  		    !ipv4_is_local_multicast(pmc->multiaddr))
366  			continue;
367  
368  		ip_eth_mc_map(pmc->multiaddr, mcast_addr);
369  
370  		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
371  			continue;
372  
373  		new = kmalloc(sizeof(*new), GFP_ATOMIC);
374  		if (!new) {
375  			ret = -ENOMEM;
376  			break;
377  		}
378  
379  		ether_addr_copy(new->addr, mcast_addr);
380  		hlist_add_head(&new->list, mcast_list);
381  		ret++;
382  	}
383  	rcu_read_unlock();
384  
385  	return ret;
386  }
387  
388  /**
389   * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners
390   * @dev: the device to collect multicast addresses from
391   * @mcast_list: a list to put found addresses into
392   * @flags: flags indicating the new multicast state
393   *
394   * Collects multicast addresses of IPv6 multicast listeners residing
395   * on this kernel on the given soft interface, dev, in
396   * the given mcast_list. In general, multicast listeners provided by
397   * your multicast receiving applications run directly on this node.
398   *
399   * Return: -ENOMEM on memory allocation error or the number of
400   * items added to the mcast_list otherwise.
401   */
402  #if IS_ENABLED(CONFIG_IPV6)
403  static int
batadv_mcast_mla_softif_get_ipv6(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)404  batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
405  				 struct hlist_head *mcast_list,
406  				 struct batadv_mcast_mla_flags *flags)
407  {
408  	struct batadv_hw_addr *new;
409  	struct inet6_dev *in6_dev;
410  	u8 mcast_addr[ETH_ALEN];
411  	struct ifmcaddr6 *pmc6;
412  	int ret = 0;
413  
414  	if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
415  		return 0;
416  
417  	rcu_read_lock();
418  
419  	in6_dev = __in6_dev_get(dev);
420  	if (!in6_dev) {
421  		rcu_read_unlock();
422  		return 0;
423  	}
424  
425  	for (pmc6 = rcu_dereference(in6_dev->mc_list);
426  	     pmc6;
427  	     pmc6 = rcu_dereference(pmc6->next)) {
428  		if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
429  		    IPV6_ADDR_SCOPE_LINKLOCAL)
430  			continue;
431  
432  		if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
433  		    ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr))
434  			continue;
435  
436  		if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
437  		    IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) >
438  		    IPV6_ADDR_SCOPE_LINKLOCAL)
439  			continue;
440  
441  		ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
442  
443  		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
444  			continue;
445  
446  		new = kmalloc(sizeof(*new), GFP_ATOMIC);
447  		if (!new) {
448  			ret = -ENOMEM;
449  			break;
450  		}
451  
452  		ether_addr_copy(new->addr, mcast_addr);
453  		hlist_add_head(&new->list, mcast_list);
454  		ret++;
455  	}
456  	rcu_read_unlock();
457  
458  	return ret;
459  }
460  #else
461  static inline int
batadv_mcast_mla_softif_get_ipv6(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)462  batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
463  				 struct hlist_head *mcast_list,
464  				 struct batadv_mcast_mla_flags *flags)
465  {
466  	return 0;
467  }
468  #endif
469  
470  /**
471   * batadv_mcast_mla_softif_get() - get softif multicast listeners
472   * @dev: the device to collect multicast addresses from
473   * @mcast_list: a list to put found addresses into
474   * @flags: flags indicating the new multicast state
475   *
476   * Collects multicast addresses of multicast listeners residing
477   * on this kernel on the given soft interface, dev, in
478   * the given mcast_list. In general, multicast listeners provided by
479   * your multicast receiving applications run directly on this node.
480   *
481   * If there is a bridge interface on top of dev, collect from that one
482   * instead. Just like with IP addresses and routes, multicast listeners
483   * will(/should) register to the bridge interface instead of an
484   * enslaved bat0.
485   *
486   * Return: -ENOMEM on memory allocation error or the number of
487   * items added to the mcast_list otherwise.
488   */
489  static int
batadv_mcast_mla_softif_get(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)490  batadv_mcast_mla_softif_get(struct net_device *dev,
491  			    struct hlist_head *mcast_list,
492  			    struct batadv_mcast_mla_flags *flags)
493  {
494  	struct net_device *bridge = batadv_mcast_get_bridge(dev);
495  	int ret4, ret6 = 0;
496  
497  	if (bridge)
498  		dev = bridge;
499  
500  	ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
501  	if (ret4 < 0)
502  		goto out;
503  
504  	ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
505  	if (ret6 < 0) {
506  		ret4 = 0;
507  		goto out;
508  	}
509  
510  out:
511  	dev_put(bridge);
512  
513  	return ret4 + ret6;
514  }
515  
516  /**
517   * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
518   * @dst: destination to write to - a multicast MAC address
519   * @src: source to read from - a multicast IP address
520   *
521   * Converts a given multicast IPv4/IPv6 address from a bridge
522   * to its matching multicast MAC address and copies it into the given
523   * destination buffer.
524   *
525   * Caller needs to make sure the destination buffer can hold
526   * at least ETH_ALEN bytes.
527   */
batadv_mcast_mla_br_addr_cpy(char * dst,const struct br_ip * src)528  static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
529  {
530  	if (src->proto == htons(ETH_P_IP))
531  		ip_eth_mc_map(src->dst.ip4, dst);
532  #if IS_ENABLED(CONFIG_IPV6)
533  	else if (src->proto == htons(ETH_P_IPV6))
534  		ipv6_eth_mc_map(&src->dst.ip6, dst);
535  #endif
536  	else
537  		eth_zero_addr(dst);
538  }
539  
540  /**
541   * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
542   * @dev: a bridge slave whose bridge to collect multicast addresses from
543   * @mcast_list: a list to put found addresses into
544   * @flags: flags indicating the new multicast state
545   *
546   * Collects multicast addresses of multicast listeners residing
547   * on foreign, non-mesh devices which we gave access to our mesh via
548   * a bridge on top of the given soft interface, dev, in the given
549   * mcast_list.
550   *
551   * Return: -ENOMEM on memory allocation error or the number of
552   * items added to the mcast_list otherwise.
553   */
batadv_mcast_mla_bridge_get(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)554  static int batadv_mcast_mla_bridge_get(struct net_device *dev,
555  				       struct hlist_head *mcast_list,
556  				       struct batadv_mcast_mla_flags *flags)
557  {
558  	struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
559  	struct br_ip_list *br_ip_entry, *tmp;
560  	u8 tvlv_flags = flags->tvlv_flags;
561  	struct batadv_hw_addr *new;
562  	u8 mcast_addr[ETH_ALEN];
563  	int ret;
564  
565  	/* we don't need to detect these devices/listeners, the IGMP/MLD
566  	 * snooping code of the Linux bridge already does that for us
567  	 */
568  	ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
569  	if (ret < 0)
570  		goto out;
571  
572  	list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
573  		if (br_ip_entry->addr.proto == htons(ETH_P_IP)) {
574  			if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
575  				continue;
576  
577  			if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
578  			    ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
579  				continue;
580  
581  			if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
582  			    !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
583  				continue;
584  		}
585  
586  #if IS_ENABLED(CONFIG_IPV6)
587  		if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) {
588  			if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
589  				continue;
590  
591  			if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
592  			    ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
593  				continue;
594  
595  			if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
596  			    IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
597  			    IPV6_ADDR_SCOPE_LINKLOCAL)
598  				continue;
599  		}
600  #endif
601  
602  		batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
603  		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
604  			continue;
605  
606  		new = kmalloc(sizeof(*new), GFP_ATOMIC);
607  		if (!new) {
608  			ret = -ENOMEM;
609  			break;
610  		}
611  
612  		ether_addr_copy(new->addr, mcast_addr);
613  		hlist_add_head(&new->list, mcast_list);
614  	}
615  
616  out:
617  	list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
618  		list_del(&br_ip_entry->list);
619  		kfree(br_ip_entry);
620  	}
621  
622  	return ret;
623  }
624  
625  /**
626   * batadv_mcast_mla_list_free() - free a list of multicast addresses
627   * @mcast_list: the list to free
628   *
629   * Removes and frees all items in the given mcast_list.
630   */
batadv_mcast_mla_list_free(struct hlist_head * mcast_list)631  static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
632  {
633  	struct batadv_hw_addr *mcast_entry;
634  	struct hlist_node *tmp;
635  
636  	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
637  		hlist_del(&mcast_entry->list);
638  		kfree(mcast_entry);
639  	}
640  }
641  
642  /**
643   * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
644   * @bat_priv: the bat priv with all the soft interface information
645   * @mcast_list: a list of addresses which should _not_ be removed
646   *
647   * Retracts the announcement of any multicast listener from the
648   * translation table except the ones listed in the given mcast_list.
649   *
650   * If mcast_list is NULL then all are retracted.
651   */
batadv_mcast_mla_tt_retract(struct batadv_priv * bat_priv,struct hlist_head * mcast_list)652  static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
653  					struct hlist_head *mcast_list)
654  {
655  	struct batadv_hw_addr *mcast_entry;
656  	struct hlist_node *tmp;
657  
658  	hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
659  				  list) {
660  		if (mcast_list &&
661  		    batadv_mcast_mla_is_duplicate(mcast_entry->addr,
662  						  mcast_list))
663  			continue;
664  
665  		batadv_tt_local_remove(bat_priv, mcast_entry->addr,
666  				       BATADV_NO_FLAGS,
667  				       "mcast TT outdated", false);
668  
669  		hlist_del(&mcast_entry->list);
670  		kfree(mcast_entry);
671  	}
672  }
673  
674  /**
675   * batadv_mcast_mla_tt_add() - add multicast listener announcements
676   * @bat_priv: the bat priv with all the soft interface information
677   * @mcast_list: a list of addresses which are going to get added
678   *
679   * Adds multicast listener announcements from the given mcast_list to the
680   * translation table if they have not been added yet.
681   */
batadv_mcast_mla_tt_add(struct batadv_priv * bat_priv,struct hlist_head * mcast_list)682  static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
683  				    struct hlist_head *mcast_list)
684  {
685  	struct batadv_hw_addr *mcast_entry;
686  	struct hlist_node *tmp;
687  
688  	if (!mcast_list)
689  		return;
690  
691  	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
692  		if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
693  						  &bat_priv->mcast.mla_list))
694  			continue;
695  
696  		if (!batadv_tt_local_add(bat_priv->soft_iface,
697  					 mcast_entry->addr, BATADV_NO_FLAGS,
698  					 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
699  			continue;
700  
701  		hlist_del(&mcast_entry->list);
702  		hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
703  	}
704  }
705  
706  /**
707   * batadv_mcast_querier_log() - debug output regarding the querier status on
708   *  link
709   * @bat_priv: the bat priv with all the soft interface information
710   * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
711   * @old_state: the previous querier state on our link
712   * @new_state: the new querier state on our link
713   *
714   * Outputs debug messages to the logging facility with log level 'mcast'
715   * regarding changes to the querier status on the link which are relevant
716   * to our multicast optimizations.
717   *
718   * Usually this is about whether a querier appeared or vanished in
719   * our mesh or whether the querier is in the suboptimal position of being
720   * behind our local bridge segment: Snooping switches will directly
721   * forward listener reports to the querier, therefore batman-adv and
722   * the bridge will potentially not see these listeners - the querier is
723   * potentially shadowing listeners from us then.
724   *
725   * This is only interesting for nodes with a bridge on top of their
726   * soft interface.
727   */
728  static void
batadv_mcast_querier_log(struct batadv_priv * bat_priv,char * str_proto,struct batadv_mcast_querier_state * old_state,struct batadv_mcast_querier_state * new_state)729  batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
730  			 struct batadv_mcast_querier_state *old_state,
731  			 struct batadv_mcast_querier_state *new_state)
732  {
733  	if (!old_state->exists && new_state->exists)
734  		batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
735  			    str_proto);
736  	else if (old_state->exists && !new_state->exists)
737  		batadv_info(bat_priv->soft_iface,
738  			    "%s Querier disappeared - multicast optimizations disabled\n",
739  			    str_proto);
740  	else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
741  		batadv_info(bat_priv->soft_iface,
742  			    "No %s Querier present - multicast optimizations disabled\n",
743  			    str_proto);
744  
745  	if (new_state->exists) {
746  		if ((!old_state->shadowing && new_state->shadowing) ||
747  		    (!old_state->exists && new_state->shadowing))
748  			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
749  				   "%s Querier is behind our bridged segment: Might shadow listeners\n",
750  				   str_proto);
751  		else if (old_state->shadowing && !new_state->shadowing)
752  			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
753  				   "%s Querier is not behind our bridged segment\n",
754  				   str_proto);
755  	}
756  }
757  
758  /**
759   * batadv_mcast_bridge_log() - debug output for topology changes in bridged
760   *  setups
761   * @bat_priv: the bat priv with all the soft interface information
762   * @new_flags: flags indicating the new multicast state
763   *
764   * If no bridges are ever used on this node, then this function does nothing.
765   *
766   * Otherwise this function outputs debug information to the 'mcast' log level
767   * which might be relevant to our multicast optimizations.
768   *
769   * More precisely, it outputs information when a bridge interface is added or
770   * removed from a soft interface. And when a bridge is present, it further
771   * outputs information about the querier state which is relevant for the
772   * multicast flags this node is going to set.
773   */
774  static void
batadv_mcast_bridge_log(struct batadv_priv * bat_priv,struct batadv_mcast_mla_flags * new_flags)775  batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
776  			struct batadv_mcast_mla_flags *new_flags)
777  {
778  	struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
779  
780  	if (!old_flags->bridged && new_flags->bridged)
781  		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
782  			   "Bridge added: Setting Unsnoopables(U)-flag\n");
783  	else if (old_flags->bridged && !new_flags->bridged)
784  		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
785  			   "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
786  
787  	if (new_flags->bridged) {
788  		batadv_mcast_querier_log(bat_priv, "IGMP",
789  					 &old_flags->querier_ipv4,
790  					 &new_flags->querier_ipv4);
791  		batadv_mcast_querier_log(bat_priv, "MLD",
792  					 &old_flags->querier_ipv6,
793  					 &new_flags->querier_ipv6);
794  	}
795  }
796  
797  /**
798   * batadv_mcast_flags_log() - output debug information about mcast flag changes
799   * @bat_priv: the bat priv with all the soft interface information
800   * @flags: TVLV flags indicating the new multicast state
801   *
802   * Whenever the multicast TVLV flags this node announces change, this function
803   * should be used to notify userspace about the change.
804   */
batadv_mcast_flags_log(struct batadv_priv * bat_priv,u8 flags)805  static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
806  {
807  	bool old_enabled = bat_priv->mcast.mla_flags.enabled;
808  	u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
809  	char str_old_flags[] = "[.... . ]";
810  
811  	sprintf(str_old_flags, "[%c%c%c%s%s]",
812  		(old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
813  		(old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
814  		(old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
815  		!(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
816  		!(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
817  
818  	batadv_dbg(BATADV_DBG_MCAST, bat_priv,
819  		   "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n",
820  		   old_enabled ? str_old_flags : "<undefined>",
821  		   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
822  		   (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
823  		   (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
824  		   !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
825  		   !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
826  }
827  
828  /**
829   * batadv_mcast_mla_flags_update() - update multicast flags
830   * @bat_priv: the bat priv with all the soft interface information
831   * @flags: flags indicating the new multicast state
832   *
833   * Updates the own multicast tvlv with our current multicast related settings,
834   * capabilities and inabilities.
835   */
836  static void
batadv_mcast_mla_flags_update(struct batadv_priv * bat_priv,struct batadv_mcast_mla_flags * flags)837  batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
838  			      struct batadv_mcast_mla_flags *flags)
839  {
840  	struct batadv_tvlv_mcast_data mcast_data;
841  
842  	if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
843  		return;
844  
845  	batadv_mcast_bridge_log(bat_priv, flags);
846  	batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
847  
848  	mcast_data.flags = flags->tvlv_flags;
849  	memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
850  
851  	batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
852  				       &mcast_data, sizeof(mcast_data));
853  
854  	bat_priv->mcast.mla_flags = *flags;
855  }
856  
857  /**
858   * __batadv_mcast_mla_update() - update the own MLAs
859   * @bat_priv: the bat priv with all the soft interface information
860   *
861   * Updates the own multicast listener announcements in the translation
862   * table as well as the own, announced multicast tvlv container.
863   *
864   * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
865   * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
866   * ensured by the non-parallel execution of the worker this function
867   * belongs to.
868   */
__batadv_mcast_mla_update(struct batadv_priv * bat_priv)869  static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
870  {
871  	struct net_device *soft_iface = bat_priv->soft_iface;
872  	struct hlist_head mcast_list = HLIST_HEAD_INIT;
873  	struct batadv_mcast_mla_flags flags;
874  	int ret;
875  
876  	flags = batadv_mcast_mla_flags_get(bat_priv);
877  
878  	ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
879  	if (ret < 0)
880  		goto out;
881  
882  	ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
883  	if (ret < 0)
884  		goto out;
885  
886  	spin_lock(&bat_priv->mcast.mla_lock);
887  	batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
888  	batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
889  	batadv_mcast_mla_flags_update(bat_priv, &flags);
890  	spin_unlock(&bat_priv->mcast.mla_lock);
891  
892  out:
893  	batadv_mcast_mla_list_free(&mcast_list);
894  }
895  
896  /**
897   * batadv_mcast_mla_update() - update the own MLAs
898   * @work: kernel work struct
899   *
900   * Updates the own multicast listener announcements in the translation
901   * table as well as the own, announced multicast tvlv container.
902   *
903   * In the end, reschedules the work timer.
904   */
batadv_mcast_mla_update(struct work_struct * work)905  static void batadv_mcast_mla_update(struct work_struct *work)
906  {
907  	struct delayed_work *delayed_work;
908  	struct batadv_priv_mcast *priv_mcast;
909  	struct batadv_priv *bat_priv;
910  
911  	delayed_work = to_delayed_work(work);
912  	priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
913  	bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
914  
915  	__batadv_mcast_mla_update(bat_priv);
916  	batadv_mcast_start_timer(bat_priv);
917  }
918  
919  /**
920   * batadv_mcast_is_report_ipv4() - check for IGMP reports
921   * @skb: the ethernet frame destined for the mesh
922   *
923   * This call might reallocate skb data.
924   *
925   * Checks whether the given frame is a valid IGMP report.
926   *
927   * Return: If so then true, otherwise false.
928   */
batadv_mcast_is_report_ipv4(struct sk_buff * skb)929  static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
930  {
931  	if (ip_mc_check_igmp(skb) < 0)
932  		return false;
933  
934  	switch (igmp_hdr(skb)->type) {
935  	case IGMP_HOST_MEMBERSHIP_REPORT:
936  	case IGMPV2_HOST_MEMBERSHIP_REPORT:
937  	case IGMPV3_HOST_MEMBERSHIP_REPORT:
938  		return true;
939  	}
940  
941  	return false;
942  }
943  
944  /**
945   * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
946   *  potential
947   * @bat_priv: the bat priv with all the soft interface information
948   * @skb: the IPv4 packet to check
949   * @is_unsnoopable: stores whether the destination is snoopable
950   * @is_routable: stores whether the destination is routable
951   *
952   * Checks whether the given IPv4 packet has the potential to be forwarded with a
953   * mode more optimal than classic flooding.
954   *
955   * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
956   * allocation failure.
957   */
batadv_mcast_forw_mode_check_ipv4(struct batadv_priv * bat_priv,struct sk_buff * skb,bool * is_unsnoopable,int * is_routable)958  static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
959  					     struct sk_buff *skb,
960  					     bool *is_unsnoopable,
961  					     int *is_routable)
962  {
963  	struct iphdr *iphdr;
964  
965  	/* We might fail due to out-of-memory -> drop it */
966  	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
967  		return -ENOMEM;
968  
969  	if (batadv_mcast_is_report_ipv4(skb))
970  		return -EINVAL;
971  
972  	iphdr = ip_hdr(skb);
973  
974  	/* link-local multicast listeners behind a bridge are
975  	 * not snoopable (see RFC4541, section 2.1.2.2)
976  	 */
977  	if (ipv4_is_local_multicast(iphdr->daddr))
978  		*is_unsnoopable = true;
979  	else
980  		*is_routable = ETH_P_IP;
981  
982  	return 0;
983  }
984  
985  /**
986   * batadv_mcast_is_report_ipv6() - check for MLD reports
987   * @skb: the ethernet frame destined for the mesh
988   *
989   * This call might reallocate skb data.
990   *
991   * Checks whether the given frame is a valid MLD report.
992   *
993   * Return: If so then true, otherwise false.
994   */
batadv_mcast_is_report_ipv6(struct sk_buff * skb)995  static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
996  {
997  	if (ipv6_mc_check_mld(skb) < 0)
998  		return false;
999  
1000  	switch (icmp6_hdr(skb)->icmp6_type) {
1001  	case ICMPV6_MGM_REPORT:
1002  	case ICMPV6_MLD2_REPORT:
1003  		return true;
1004  	}
1005  
1006  	return false;
1007  }
1008  
1009  /**
1010   * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
1011   *  potential
1012   * @bat_priv: the bat priv with all the soft interface information
1013   * @skb: the IPv6 packet to check
1014   * @is_unsnoopable: stores whether the destination is snoopable
1015   * @is_routable: stores whether the destination is routable
1016   *
1017   * Checks whether the given IPv6 packet has the potential to be forwarded with a
1018   * mode more optimal than classic flooding.
1019   *
1020   * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1021   */
batadv_mcast_forw_mode_check_ipv6(struct batadv_priv * bat_priv,struct sk_buff * skb,bool * is_unsnoopable,int * is_routable)1022  static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
1023  					     struct sk_buff *skb,
1024  					     bool *is_unsnoopable,
1025  					     int *is_routable)
1026  {
1027  	struct ipv6hdr *ip6hdr;
1028  
1029  	/* We might fail due to out-of-memory -> drop it */
1030  	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
1031  		return -ENOMEM;
1032  
1033  	if (batadv_mcast_is_report_ipv6(skb))
1034  		return -EINVAL;
1035  
1036  	ip6hdr = ipv6_hdr(skb);
1037  
1038  	if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1039  		return -EINVAL;
1040  
1041  	/* link-local-all-nodes multicast listeners behind a bridge are
1042  	 * not snoopable (see RFC4541, section 3, paragraph 3)
1043  	 */
1044  	if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
1045  		*is_unsnoopable = true;
1046  	else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL)
1047  		*is_routable = ETH_P_IPV6;
1048  
1049  	return 0;
1050  }
1051  
1052  /**
1053   * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
1054   * @bat_priv: the bat priv with all the soft interface information
1055   * @skb: the multicast frame to check
1056   * @is_unsnoopable: stores whether the destination is snoopable
1057   * @is_routable: stores whether the destination is routable
1058   *
1059   * Checks whether the given multicast ethernet frame has the potential to be
1060   * forwarded with a mode more optimal than classic flooding.
1061   *
1062   * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1063   */
batadv_mcast_forw_mode_check(struct batadv_priv * bat_priv,struct sk_buff * skb,bool * is_unsnoopable,int * is_routable)1064  static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
1065  					struct sk_buff *skb,
1066  					bool *is_unsnoopable,
1067  					int *is_routable)
1068  {
1069  	struct ethhdr *ethhdr = eth_hdr(skb);
1070  
1071  	if (!atomic_read(&bat_priv->multicast_mode))
1072  		return -EINVAL;
1073  
1074  	switch (ntohs(ethhdr->h_proto)) {
1075  	case ETH_P_IP:
1076  		return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
1077  							 is_unsnoopable,
1078  							 is_routable);
1079  	case ETH_P_IPV6:
1080  		if (!IS_ENABLED(CONFIG_IPV6))
1081  			return -EINVAL;
1082  
1083  		return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
1084  							 is_unsnoopable,
1085  							 is_routable);
1086  	default:
1087  		return -EINVAL;
1088  	}
1089  }
1090  
1091  /**
1092   * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
1093   *  interest
1094   * @bat_priv: the bat priv with all the soft interface information
1095   * @ethhdr: ethernet header of a packet
1096   *
1097   * Return: the number of nodes which want all IPv4 multicast traffic if the
1098   * given ethhdr is from an IPv4 packet or the number of nodes which want all
1099   * IPv6 traffic if it matches an IPv6 packet.
1100   */
batadv_mcast_forw_want_all_ip_count(struct batadv_priv * bat_priv,struct ethhdr * ethhdr)1101  static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
1102  					       struct ethhdr *ethhdr)
1103  {
1104  	switch (ntohs(ethhdr->h_proto)) {
1105  	case ETH_P_IP:
1106  		return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
1107  	case ETH_P_IPV6:
1108  		return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
1109  	default:
1110  		/* we shouldn't be here... */
1111  		return 0;
1112  	}
1113  }
1114  
1115  /**
1116   * batadv_mcast_forw_rtr_count() - count nodes with a multicast router
1117   * @bat_priv: the bat priv with all the soft interface information
1118   * @protocol: the ethernet protocol type to count multicast routers for
1119   *
1120   * Return: the number of nodes which want all routable IPv4 multicast traffic
1121   * if the protocol is ETH_P_IP or the number of nodes which want all routable
1122   * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0.
1123   */
1124  
batadv_mcast_forw_rtr_count(struct batadv_priv * bat_priv,int protocol)1125  static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
1126  				       int protocol)
1127  {
1128  	switch (protocol) {
1129  	case ETH_P_IP:
1130  		return atomic_read(&bat_priv->mcast.num_want_all_rtr4);
1131  	case ETH_P_IPV6:
1132  		return atomic_read(&bat_priv->mcast.num_want_all_rtr6);
1133  	default:
1134  		return 0;
1135  	}
1136  }
1137  
1138  /**
1139   * batadv_mcast_forw_mode() - check on how to forward a multicast packet
1140   * @bat_priv: the bat priv with all the soft interface information
1141   * @skb: the multicast packet to check
1142   * @is_routable: stores whether the destination is routable
1143   *
1144   * Return: The forwarding mode as enum batadv_forw_mode.
1145   */
1146  enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv * bat_priv,struct sk_buff * skb,int * is_routable)1147  batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
1148  		       int *is_routable)
1149  {
1150  	int ret, tt_count, ip_count, unsnoop_count, total_count;
1151  	bool is_unsnoopable = false;
1152  	struct ethhdr *ethhdr;
1153  	int rtr_count = 0;
1154  
1155  	ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
1156  					   is_routable);
1157  	if (ret == -ENOMEM)
1158  		return BATADV_FORW_NONE;
1159  	else if (ret < 0)
1160  		return BATADV_FORW_BCAST;
1161  
1162  	ethhdr = eth_hdr(skb);
1163  
1164  	tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
1165  					       BATADV_NO_FLAGS);
1166  	ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
1167  	unsnoop_count = !is_unsnoopable ? 0 :
1168  			atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
1169  	rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
1170  
1171  	total_count = tt_count + ip_count + unsnoop_count + rtr_count;
1172  
1173  	if (!total_count)
1174  		return BATADV_FORW_NONE;
1175  	else if (unsnoop_count)
1176  		return BATADV_FORW_BCAST;
1177  
1178  	if (total_count <= atomic_read(&bat_priv->multicast_fanout))
1179  		return BATADV_FORW_UCASTS;
1180  
1181  	return BATADV_FORW_BCAST;
1182  }
1183  
1184  /**
1185   * batadv_mcast_forw_send_orig() - send a multicast packet to an originator
1186   * @bat_priv: the bat priv with all the soft interface information
1187   * @skb: the multicast packet to send
1188   * @vid: the vlan identifier
1189   * @orig_node: the originator to send the packet to
1190   *
1191   * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
1192   */
batadv_mcast_forw_send_orig(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,struct batadv_orig_node * orig_node)1193  static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
1194  				       struct sk_buff *skb,
1195  				       unsigned short vid,
1196  				       struct batadv_orig_node *orig_node)
1197  {
1198  	/* Avoid sending multicast-in-unicast packets to other BLA
1199  	 * gateways - they already got the frame from the LAN side
1200  	 * we share with them.
1201  	 * TODO: Refactor to take BLA into account earlier, to avoid
1202  	 * reducing the mcast_fanout count.
1203  	 */
1204  	if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
1205  		dev_kfree_skb(skb);
1206  		return NET_XMIT_SUCCESS;
1207  	}
1208  
1209  	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
1210  				       orig_node, vid);
1211  }
1212  
1213  /**
1214   * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
1215   * @bat_priv: the bat priv with all the soft interface information
1216   * @skb: the multicast packet to transmit
1217   * @vid: the vlan identifier
1218   *
1219   * Sends copies of a frame with multicast destination to any multicast
1220   * listener registered in the translation table. A transmission is performed
1221   * via a batman-adv unicast packet for each such destination node.
1222   *
1223   * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1224   * otherwise.
1225   */
1226  static int
batadv_mcast_forw_tt(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1227  batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1228  		     unsigned short vid)
1229  {
1230  	int ret = NET_XMIT_SUCCESS;
1231  	struct sk_buff *newskb;
1232  
1233  	struct batadv_tt_orig_list_entry *orig_entry;
1234  
1235  	struct batadv_tt_global_entry *tt_global;
1236  	const u8 *addr = eth_hdr(skb)->h_dest;
1237  
1238  	tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1239  	if (!tt_global)
1240  		goto out;
1241  
1242  	rcu_read_lock();
1243  	hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1244  		newskb = skb_copy(skb, GFP_ATOMIC);
1245  		if (!newskb) {
1246  			ret = NET_XMIT_DROP;
1247  			break;
1248  		}
1249  
1250  		batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
1251  					    orig_entry->orig_node);
1252  	}
1253  	rcu_read_unlock();
1254  
1255  	batadv_tt_global_entry_put(tt_global);
1256  
1257  out:
1258  	return ret;
1259  }
1260  
1261  /**
1262   * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
1263   * @bat_priv: the bat priv with all the soft interface information
1264   * @skb: the multicast packet to transmit
1265   * @vid: the vlan identifier
1266   *
1267   * Sends copies of a frame with multicast destination to any node with a
1268   * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
1269   * batman-adv unicast packet for each such destination node.
1270   *
1271   * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1272   * otherwise.
1273   */
1274  static int
batadv_mcast_forw_want_all_ipv4(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1275  batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1276  				struct sk_buff *skb, unsigned short vid)
1277  {
1278  	struct batadv_orig_node *orig_node;
1279  	int ret = NET_XMIT_SUCCESS;
1280  	struct sk_buff *newskb;
1281  
1282  	rcu_read_lock();
1283  	hlist_for_each_entry_rcu(orig_node,
1284  				 &bat_priv->mcast.want_all_ipv4_list,
1285  				 mcast_want_all_ipv4_node) {
1286  		newskb = skb_copy(skb, GFP_ATOMIC);
1287  		if (!newskb) {
1288  			ret = NET_XMIT_DROP;
1289  			break;
1290  		}
1291  
1292  		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1293  	}
1294  	rcu_read_unlock();
1295  	return ret;
1296  }
1297  
1298  /**
1299   * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
1300   * @bat_priv: the bat priv with all the soft interface information
1301   * @skb: The multicast packet to transmit
1302   * @vid: the vlan identifier
1303   *
1304   * Sends copies of a frame with multicast destination to any node with a
1305   * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
1306   * batman-adv unicast packet for each such destination node.
1307   *
1308   * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1309   * otherwise.
1310   */
1311  static int
batadv_mcast_forw_want_all_ipv6(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1312  batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1313  				struct sk_buff *skb, unsigned short vid)
1314  {
1315  	struct batadv_orig_node *orig_node;
1316  	int ret = NET_XMIT_SUCCESS;
1317  	struct sk_buff *newskb;
1318  
1319  	rcu_read_lock();
1320  	hlist_for_each_entry_rcu(orig_node,
1321  				 &bat_priv->mcast.want_all_ipv6_list,
1322  				 mcast_want_all_ipv6_node) {
1323  		newskb = skb_copy(skb, GFP_ATOMIC);
1324  		if (!newskb) {
1325  			ret = NET_XMIT_DROP;
1326  			break;
1327  		}
1328  
1329  		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1330  	}
1331  	rcu_read_unlock();
1332  	return ret;
1333  }
1334  
1335  /**
1336   * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
1337   * @bat_priv: the bat priv with all the soft interface information
1338   * @skb: the multicast packet to transmit
1339   * @vid: the vlan identifier
1340   *
1341   * Sends copies of a frame with multicast destination to any node with a
1342   * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
1343   * transmission is performed via a batman-adv unicast packet for each such
1344   * destination node.
1345   *
1346   * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1347   * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1348   */
1349  static int
batadv_mcast_forw_want_all(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1350  batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1351  			   struct sk_buff *skb, unsigned short vid)
1352  {
1353  	switch (ntohs(eth_hdr(skb)->h_proto)) {
1354  	case ETH_P_IP:
1355  		return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1356  	case ETH_P_IPV6:
1357  		return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1358  	default:
1359  		/* we shouldn't be here... */
1360  		return NET_XMIT_DROP;
1361  	}
1362  }
1363  
1364  /**
1365   * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4
1366   * @bat_priv: the bat priv with all the soft interface information
1367   * @skb: the multicast packet to transmit
1368   * @vid: the vlan identifier
1369   *
1370   * Sends copies of a frame with multicast destination to any node with a
1371   * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a
1372   * batman-adv unicast packet for each such destination node.
1373   *
1374   * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1375   * otherwise.
1376   */
1377  static int
batadv_mcast_forw_want_all_rtr4(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1378  batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
1379  				struct sk_buff *skb, unsigned short vid)
1380  {
1381  	struct batadv_orig_node *orig_node;
1382  	int ret = NET_XMIT_SUCCESS;
1383  	struct sk_buff *newskb;
1384  
1385  	rcu_read_lock();
1386  	hlist_for_each_entry_rcu(orig_node,
1387  				 &bat_priv->mcast.want_all_rtr4_list,
1388  				 mcast_want_all_rtr4_node) {
1389  		newskb = skb_copy(skb, GFP_ATOMIC);
1390  		if (!newskb) {
1391  			ret = NET_XMIT_DROP;
1392  			break;
1393  		}
1394  
1395  		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1396  	}
1397  	rcu_read_unlock();
1398  	return ret;
1399  }
1400  
1401  /**
1402   * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6
1403   * @bat_priv: the bat priv with all the soft interface information
1404   * @skb: The multicast packet to transmit
1405   * @vid: the vlan identifier
1406   *
1407   * Sends copies of a frame with multicast destination to any node with a
1408   * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a
1409   * batman-adv unicast packet for each such destination node.
1410   *
1411   * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1412   * otherwise.
1413   */
1414  static int
batadv_mcast_forw_want_all_rtr6(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1415  batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
1416  				struct sk_buff *skb, unsigned short vid)
1417  {
1418  	struct batadv_orig_node *orig_node;
1419  	int ret = NET_XMIT_SUCCESS;
1420  	struct sk_buff *newskb;
1421  
1422  	rcu_read_lock();
1423  	hlist_for_each_entry_rcu(orig_node,
1424  				 &bat_priv->mcast.want_all_rtr6_list,
1425  				 mcast_want_all_rtr6_node) {
1426  		newskb = skb_copy(skb, GFP_ATOMIC);
1427  		if (!newskb) {
1428  			ret = NET_XMIT_DROP;
1429  			break;
1430  		}
1431  
1432  		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1433  	}
1434  	rcu_read_unlock();
1435  	return ret;
1436  }
1437  
1438  /**
1439   * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list
1440   * @bat_priv: the bat priv with all the soft interface information
1441   * @skb: the multicast packet to transmit
1442   * @vid: the vlan identifier
1443   *
1444   * Sends copies of a frame with multicast destination to any node with a
1445   * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A
1446   * transmission is performed via a batman-adv unicast packet for each such
1447   * destination node.
1448   *
1449   * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1450   * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1451   */
1452  static int
batadv_mcast_forw_want_rtr(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1453  batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
1454  			   struct sk_buff *skb, unsigned short vid)
1455  {
1456  	switch (ntohs(eth_hdr(skb)->h_proto)) {
1457  	case ETH_P_IP:
1458  		return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
1459  	case ETH_P_IPV6:
1460  		return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
1461  	default:
1462  		/* we shouldn't be here... */
1463  		return NET_XMIT_DROP;
1464  	}
1465  }
1466  
1467  /**
1468   * batadv_mcast_forw_send() - send packet to any detected multicast recipient
1469   * @bat_priv: the bat priv with all the soft interface information
1470   * @skb: the multicast packet to transmit
1471   * @vid: the vlan identifier
1472   * @is_routable: stores whether the destination is routable
1473   *
1474   * Sends copies of a frame with multicast destination to any node that signaled
1475   * interest in it, that is either via the translation table or the according
1476   * want-all flags. A transmission is performed via a batman-adv unicast packet
1477   * for each such destination node.
1478   *
1479   * The given skb is consumed/freed.
1480   *
1481   * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1482   * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1483   */
batadv_mcast_forw_send(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,int is_routable)1484  int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1485  			   unsigned short vid, int is_routable)
1486  {
1487  	int ret;
1488  
1489  	ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1490  	if (ret != NET_XMIT_SUCCESS) {
1491  		kfree_skb(skb);
1492  		return ret;
1493  	}
1494  
1495  	ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1496  	if (ret != NET_XMIT_SUCCESS) {
1497  		kfree_skb(skb);
1498  		return ret;
1499  	}
1500  
1501  	if (!is_routable)
1502  		goto skip_mc_router;
1503  
1504  	ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
1505  	if (ret != NET_XMIT_SUCCESS) {
1506  		kfree_skb(skb);
1507  		return ret;
1508  	}
1509  
1510  skip_mc_router:
1511  	consume_skb(skb);
1512  	return ret;
1513  }
1514  
1515  /**
1516   * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
1517   * @bat_priv: the bat priv with all the soft interface information
1518   * @orig: the orig_node which multicast state might have changed of
1519   * @mcast_flags: flags indicating the new multicast state
1520   *
1521   * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
1522   * orig, has toggled then this method updates the counter and the list
1523   * accordingly.
1524   *
1525   * Caller needs to hold orig->mcast_handler_lock.
1526   */
batadv_mcast_want_unsnoop_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1527  static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1528  					     struct batadv_orig_node *orig,
1529  					     u8 mcast_flags)
1530  {
1531  	struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1532  	struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1533  
1534  	lockdep_assert_held(&orig->mcast_handler_lock);
1535  
1536  	/* switched from flag unset to set */
1537  	if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1538  	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1539  		atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1540  
1541  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1542  		/* flag checks above + mcast_handler_lock prevents this */
1543  		WARN_ON(!hlist_unhashed(node));
1544  
1545  		hlist_add_head_rcu(node, head);
1546  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1547  	/* switched from flag set to unset */
1548  	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1549  		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1550  		atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1551  
1552  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1553  		/* flag checks above + mcast_handler_lock prevents this */
1554  		WARN_ON(hlist_unhashed(node));
1555  
1556  		hlist_del_init_rcu(node);
1557  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1558  	}
1559  }
1560  
1561  /**
1562   * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
1563   * @bat_priv: the bat priv with all the soft interface information
1564   * @orig: the orig_node which multicast state might have changed of
1565   * @mcast_flags: flags indicating the new multicast state
1566   *
1567   * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
1568   * toggled then this method updates the counter and the list accordingly.
1569   *
1570   * Caller needs to hold orig->mcast_handler_lock.
1571   */
batadv_mcast_want_ipv4_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1572  static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1573  					  struct batadv_orig_node *orig,
1574  					  u8 mcast_flags)
1575  {
1576  	struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1577  	struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1578  
1579  	lockdep_assert_held(&orig->mcast_handler_lock);
1580  
1581  	/* switched from flag unset to set */
1582  	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1583  	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1584  		atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1585  
1586  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1587  		/* flag checks above + mcast_handler_lock prevents this */
1588  		WARN_ON(!hlist_unhashed(node));
1589  
1590  		hlist_add_head_rcu(node, head);
1591  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1592  	/* switched from flag set to unset */
1593  	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1594  		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1595  		atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1596  
1597  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1598  		/* flag checks above + mcast_handler_lock prevents this */
1599  		WARN_ON(hlist_unhashed(node));
1600  
1601  		hlist_del_init_rcu(node);
1602  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1603  	}
1604  }
1605  
1606  /**
1607   * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
1608   * @bat_priv: the bat priv with all the soft interface information
1609   * @orig: the orig_node which multicast state might have changed of
1610   * @mcast_flags: flags indicating the new multicast state
1611   *
1612   * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
1613   * toggled then this method updates the counter and the list accordingly.
1614   *
1615   * Caller needs to hold orig->mcast_handler_lock.
1616   */
batadv_mcast_want_ipv6_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1617  static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1618  					  struct batadv_orig_node *orig,
1619  					  u8 mcast_flags)
1620  {
1621  	struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1622  	struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1623  
1624  	lockdep_assert_held(&orig->mcast_handler_lock);
1625  
1626  	/* switched from flag unset to set */
1627  	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1628  	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1629  		atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1630  
1631  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1632  		/* flag checks above + mcast_handler_lock prevents this */
1633  		WARN_ON(!hlist_unhashed(node));
1634  
1635  		hlist_add_head_rcu(node, head);
1636  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1637  	/* switched from flag set to unset */
1638  	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1639  		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1640  		atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1641  
1642  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1643  		/* flag checks above + mcast_handler_lock prevents this */
1644  		WARN_ON(hlist_unhashed(node));
1645  
1646  		hlist_del_init_rcu(node);
1647  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1648  	}
1649  }
1650  
1651  /**
1652   * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list
1653   * @bat_priv: the bat priv with all the soft interface information
1654   * @orig: the orig_node which multicast state might have changed of
1655   * @mcast_flags: flags indicating the new multicast state
1656   *
1657   * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has
1658   * toggled then this method updates the counter and the list accordingly.
1659   *
1660   * Caller needs to hold orig->mcast_handler_lock.
1661   */
batadv_mcast_want_rtr4_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1662  static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
1663  					  struct batadv_orig_node *orig,
1664  					  u8 mcast_flags)
1665  {
1666  	struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
1667  	struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list;
1668  
1669  	lockdep_assert_held(&orig->mcast_handler_lock);
1670  
1671  	/* switched from flag set to unset */
1672  	if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) &&
1673  	    orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) {
1674  		atomic_inc(&bat_priv->mcast.num_want_all_rtr4);
1675  
1676  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1677  		/* flag checks above + mcast_handler_lock prevents this */
1678  		WARN_ON(!hlist_unhashed(node));
1679  
1680  		hlist_add_head_rcu(node, head);
1681  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1682  	/* switched from flag unset to set */
1683  	} else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 &&
1684  		   !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) {
1685  		atomic_dec(&bat_priv->mcast.num_want_all_rtr4);
1686  
1687  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1688  		/* flag checks above + mcast_handler_lock prevents this */
1689  		WARN_ON(hlist_unhashed(node));
1690  
1691  		hlist_del_init_rcu(node);
1692  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1693  	}
1694  }
1695  
1696  /**
1697   * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list
1698   * @bat_priv: the bat priv with all the soft interface information
1699   * @orig: the orig_node which multicast state might have changed of
1700   * @mcast_flags: flags indicating the new multicast state
1701   *
1702   * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has
1703   * toggled then this method updates the counter and the list accordingly.
1704   *
1705   * Caller needs to hold orig->mcast_handler_lock.
1706   */
batadv_mcast_want_rtr6_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1707  static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv,
1708  					  struct batadv_orig_node *orig,
1709  					  u8 mcast_flags)
1710  {
1711  	struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
1712  	struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list;
1713  
1714  	lockdep_assert_held(&orig->mcast_handler_lock);
1715  
1716  	/* switched from flag set to unset */
1717  	if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) &&
1718  	    orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) {
1719  		atomic_inc(&bat_priv->mcast.num_want_all_rtr6);
1720  
1721  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1722  		/* flag checks above + mcast_handler_lock prevents this */
1723  		WARN_ON(!hlist_unhashed(node));
1724  
1725  		hlist_add_head_rcu(node, head);
1726  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1727  	/* switched from flag unset to set */
1728  	} else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 &&
1729  		   !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) {
1730  		atomic_dec(&bat_priv->mcast.num_want_all_rtr6);
1731  
1732  		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1733  		/* flag checks above + mcast_handler_lock prevents this */
1734  		WARN_ON(hlist_unhashed(node));
1735  
1736  		hlist_del_init_rcu(node);
1737  		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1738  	}
1739  }
1740  
1741  /**
1742   * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV
1743   * @enabled: whether the originator has multicast TVLV support enabled
1744   * @tvlv_value: tvlv buffer containing the multicast flags
1745   * @tvlv_value_len: tvlv buffer length
1746   *
1747   * Return: multicast flags for the given tvlv buffer
1748   */
1749  static u8
batadv_mcast_tvlv_flags_get(bool enabled,void * tvlv_value,u16 tvlv_value_len)1750  batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len)
1751  {
1752  	u8 mcast_flags = BATADV_NO_FLAGS;
1753  
1754  	if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags))
1755  		mcast_flags = *(u8 *)tvlv_value;
1756  
1757  	if (!enabled) {
1758  		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
1759  		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
1760  	}
1761  
1762  	/* remove redundant flags to avoid sending duplicate packets later */
1763  	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)
1764  		mcast_flags |= BATADV_MCAST_WANT_NO_RTR4;
1765  
1766  	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)
1767  		mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
1768  
1769  	return mcast_flags;
1770  }
1771  
1772  /**
1773   * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
1774   * @bat_priv: the bat priv with all the soft interface information
1775   * @orig: the orig_node of the ogm
1776   * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
1777   * @tvlv_value: tvlv buffer containing the multicast data
1778   * @tvlv_value_len: tvlv buffer length
1779   */
batadv_mcast_tvlv_ogm_handler(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 flags,void * tvlv_value,u16 tvlv_value_len)1780  static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
1781  					  struct batadv_orig_node *orig,
1782  					  u8 flags,
1783  					  void *tvlv_value,
1784  					  u16 tvlv_value_len)
1785  {
1786  	bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1787  	u8 mcast_flags;
1788  
1789  	mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled,
1790  						  tvlv_value, tvlv_value_len);
1791  
1792  	spin_lock_bh(&orig->mcast_handler_lock);
1793  
1794  	if (orig_mcast_enabled &&
1795  	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1796  		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1797  	} else if (!orig_mcast_enabled &&
1798  		   test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1799  		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1800  	}
1801  
1802  	set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
1803  
1804  	batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
1805  	batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
1806  	batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
1807  	batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags);
1808  	batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags);
1809  
1810  	orig->mcast_flags = mcast_flags;
1811  	spin_unlock_bh(&orig->mcast_handler_lock);
1812  }
1813  
1814  /**
1815   * batadv_mcast_init() - initialize the multicast optimizations structures
1816   * @bat_priv: the bat priv with all the soft interface information
1817   */
batadv_mcast_init(struct batadv_priv * bat_priv)1818  void batadv_mcast_init(struct batadv_priv *bat_priv)
1819  {
1820  	batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
1821  				     NULL, NULL, BATADV_TVLV_MCAST, 2,
1822  				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1823  
1824  	INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
1825  	batadv_mcast_start_timer(bat_priv);
1826  }
1827  
1828  /**
1829   * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
1830   * @msg: buffer for the message
1831   * @bat_priv: the bat priv with all the soft interface information
1832   *
1833   * Return: 0 or error code.
1834   */
batadv_mcast_mesh_info_put(struct sk_buff * msg,struct batadv_priv * bat_priv)1835  int batadv_mcast_mesh_info_put(struct sk_buff *msg,
1836  			       struct batadv_priv *bat_priv)
1837  {
1838  	u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
1839  	u32 flags_priv = BATADV_NO_FLAGS;
1840  
1841  	if (bat_priv->mcast.mla_flags.bridged) {
1842  		flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
1843  
1844  		if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
1845  			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
1846  		if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
1847  			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
1848  		if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
1849  			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
1850  		if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
1851  			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
1852  	}
1853  
1854  	if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
1855  	    nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
1856  		return -EMSGSIZE;
1857  
1858  	return 0;
1859  }
1860  
1861  /**
1862   * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
1863   *  to a netlink socket
1864   * @msg: buffer for the message
1865   * @portid: netlink port
1866   * @cb: Control block containing additional options
1867   * @orig_node: originator to dump the multicast flags of
1868   *
1869   * Return: 0 or error code.
1870   */
1871  static int
batadv_mcast_flags_dump_entry(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_orig_node * orig_node)1872  batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
1873  			      struct netlink_callback *cb,
1874  			      struct batadv_orig_node *orig_node)
1875  {
1876  	void *hdr;
1877  
1878  	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
1879  			  &batadv_netlink_family, NLM_F_MULTI,
1880  			  BATADV_CMD_GET_MCAST_FLAGS);
1881  	if (!hdr)
1882  		return -ENOBUFS;
1883  
1884  	genl_dump_check_consistent(cb, hdr);
1885  
1886  	if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
1887  		    orig_node->orig)) {
1888  		genlmsg_cancel(msg, hdr);
1889  		return -EMSGSIZE;
1890  	}
1891  
1892  	if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1893  		     &orig_node->capabilities)) {
1894  		if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
1895  				orig_node->mcast_flags)) {
1896  			genlmsg_cancel(msg, hdr);
1897  			return -EMSGSIZE;
1898  		}
1899  	}
1900  
1901  	genlmsg_end(msg, hdr);
1902  	return 0;
1903  }
1904  
1905  /**
1906   * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
1907   *  table to a netlink socket
1908   * @msg: buffer for the message
1909   * @portid: netlink port
1910   * @cb: Control block containing additional options
1911   * @hash: hash to dump
1912   * @bucket: bucket index to dump
1913   * @idx_skip: How many entries to skip
1914   *
1915   * Return: 0 or error code.
1916   */
1917  static int
batadv_mcast_flags_dump_bucket(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hashtable * hash,unsigned int bucket,long * idx_skip)1918  batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
1919  			       struct netlink_callback *cb,
1920  			       struct batadv_hashtable *hash,
1921  			       unsigned int bucket, long *idx_skip)
1922  {
1923  	struct batadv_orig_node *orig_node;
1924  	long idx = 0;
1925  
1926  	spin_lock_bh(&hash->list_locks[bucket]);
1927  	cb->seq = atomic_read(&hash->generation) << 1 | 1;
1928  
1929  	hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
1930  		if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1931  			      &orig_node->capa_initialized))
1932  			continue;
1933  
1934  		if (idx < *idx_skip)
1935  			goto skip;
1936  
1937  		if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
1938  			spin_unlock_bh(&hash->list_locks[bucket]);
1939  			*idx_skip = idx;
1940  
1941  			return -EMSGSIZE;
1942  		}
1943  
1944  skip:
1945  		idx++;
1946  	}
1947  	spin_unlock_bh(&hash->list_locks[bucket]);
1948  
1949  	return 0;
1950  }
1951  
1952  /**
1953   * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
1954   * @msg: buffer for the message
1955   * @portid: netlink port
1956   * @cb: Control block containing additional options
1957   * @bat_priv: the bat priv with all the soft interface information
1958   * @bucket: current bucket to dump
1959   * @idx: index in current bucket to the next entry to dump
1960   *
1961   * Return: 0 or error code.
1962   */
1963  static int
__batadv_mcast_flags_dump(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_priv * bat_priv,long * bucket,long * idx)1964  __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
1965  			  struct netlink_callback *cb,
1966  			  struct batadv_priv *bat_priv, long *bucket, long *idx)
1967  {
1968  	struct batadv_hashtable *hash = bat_priv->orig_hash;
1969  	long bucket_tmp = *bucket;
1970  	long idx_tmp = *idx;
1971  
1972  	while (bucket_tmp < hash->size) {
1973  		if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
1974  						   bucket_tmp, &idx_tmp))
1975  			break;
1976  
1977  		bucket_tmp++;
1978  		idx_tmp = 0;
1979  	}
1980  
1981  	*bucket = bucket_tmp;
1982  	*idx = idx_tmp;
1983  
1984  	return msg->len;
1985  }
1986  
1987  /**
1988   * batadv_mcast_netlink_get_primary() - get primary interface from netlink
1989   *  callback
1990   * @cb: netlink callback structure
1991   * @primary_if: the primary interface pointer to return the result in
1992   *
1993   * Return: 0 or error code.
1994   */
1995  static int
batadv_mcast_netlink_get_primary(struct netlink_callback * cb,struct batadv_hard_iface ** primary_if)1996  batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
1997  				 struct batadv_hard_iface **primary_if)
1998  {
1999  	struct batadv_hard_iface *hard_iface = NULL;
2000  	struct net *net = sock_net(cb->skb->sk);
2001  	struct net_device *soft_iface;
2002  	struct batadv_priv *bat_priv;
2003  	int ifindex;
2004  	int ret = 0;
2005  
2006  	ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
2007  	if (!ifindex)
2008  		return -EINVAL;
2009  
2010  	soft_iface = dev_get_by_index(net, ifindex);
2011  	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2012  		ret = -ENODEV;
2013  		goto out;
2014  	}
2015  
2016  	bat_priv = netdev_priv(soft_iface);
2017  
2018  	hard_iface = batadv_primary_if_get_selected(bat_priv);
2019  	if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
2020  		ret = -ENOENT;
2021  		goto out;
2022  	}
2023  
2024  out:
2025  	dev_put(soft_iface);
2026  
2027  	if (!ret && primary_if)
2028  		*primary_if = hard_iface;
2029  	else
2030  		batadv_hardif_put(hard_iface);
2031  
2032  	return ret;
2033  }
2034  
2035  /**
2036   * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
2037   * @msg: buffer for the message
2038   * @cb: callback structure containing arguments
2039   *
2040   * Return: message length.
2041   */
batadv_mcast_flags_dump(struct sk_buff * msg,struct netlink_callback * cb)2042  int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
2043  {
2044  	struct batadv_hard_iface *primary_if = NULL;
2045  	int portid = NETLINK_CB(cb->skb).portid;
2046  	struct batadv_priv *bat_priv;
2047  	long *bucket = &cb->args[0];
2048  	long *idx = &cb->args[1];
2049  	int ret;
2050  
2051  	ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
2052  	if (ret)
2053  		return ret;
2054  
2055  	bat_priv = netdev_priv(primary_if->soft_iface);
2056  	ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
2057  
2058  	batadv_hardif_put(primary_if);
2059  	return ret;
2060  }
2061  
2062  /**
2063   * batadv_mcast_free() - free the multicast optimizations structures
2064   * @bat_priv: the bat priv with all the soft interface information
2065   */
batadv_mcast_free(struct batadv_priv * bat_priv)2066  void batadv_mcast_free(struct batadv_priv *bat_priv)
2067  {
2068  	cancel_delayed_work_sync(&bat_priv->mcast.work);
2069  
2070  	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2071  	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2072  
2073  	/* safely calling outside of worker, as worker was canceled above */
2074  	batadv_mcast_mla_tt_retract(bat_priv, NULL);
2075  }
2076  
2077  /**
2078   * batadv_mcast_purge_orig() - reset originator global mcast state modifications
2079   * @orig: the originator which is going to get purged
2080   */
batadv_mcast_purge_orig(struct batadv_orig_node * orig)2081  void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2082  {
2083  	struct batadv_priv *bat_priv = orig->bat_priv;
2084  
2085  	spin_lock_bh(&orig->mcast_handler_lock);
2086  
2087  	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2088  	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2089  	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2090  	batadv_mcast_want_rtr4_update(bat_priv, orig,
2091  				      BATADV_MCAST_WANT_NO_RTR4);
2092  	batadv_mcast_want_rtr6_update(bat_priv, orig,
2093  				      BATADV_MCAST_WANT_NO_RTR6);
2094  
2095  	spin_unlock_bh(&orig->mcast_handler_lock);
2096  }
2097