1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "multicast.h" 20 #include "main.h" 21 22 #include <linux/atomic.h> 23 #include <linux/bitops.h> 24 #include <linux/bug.h> 25 #include <linux/byteorder/generic.h> 26 #include <linux/errno.h> 27 #include <linux/etherdevice.h> 28 #include <linux/gfp.h> 29 #include <linux/icmpv6.h> 30 #include <linux/if_bridge.h> 31 #include <linux/if_ether.h> 32 #include <linux/igmp.h> 33 #include <linux/in.h> 34 #include <linux/in6.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/jiffies.h> 38 #include <linux/kernel.h> 39 #include <linux/kref.h> 40 #include <linux/list.h> 41 #include <linux/lockdep.h> 42 #include <linux/netdevice.h> 43 #include <linux/printk.h> 44 #include <linux/rculist.h> 45 #include <linux/rcupdate.h> 46 #include <linux/seq_file.h> 47 #include <linux/skbuff.h> 48 #include <linux/slab.h> 49 #include <linux/spinlock.h> 50 #include <linux/stddef.h> 51 #include <linux/string.h> 52 #include <linux/types.h> 53 #include <linux/workqueue.h> 54 #include <net/addrconf.h> 55 #include <net/if_inet6.h> 56 #include <net/ip.h> 57 #include <net/ipv6.h> 58 #include <uapi/linux/batadv_packet.h> 59 60 #include "hard-interface.h" 61 #include "hash.h" 62 #include "log.h" 63 #include "translation-table.h" 64 #include "tvlv.h" 65 66 static void batadv_mcast_mla_update(struct work_struct *work); 67 68 /** 69 * batadv_mcast_start_timer() - schedule the multicast periodic worker 70 * @bat_priv: the bat priv with all the soft interface information 71 */ 72 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 73 { 74 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 75 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 76 } 77 78 /** 79 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 80 * @soft_iface: netdev struct of the mesh interface 81 * 82 * If the given soft interface has a bridge on top then the refcount 83 * of the according net device is increased. 84 * 85 * Return: NULL if no such bridge exists. Otherwise the net device of the 86 * bridge. 87 */ 88 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 89 { 90 struct net_device *upper = soft_iface; 91 92 rcu_read_lock(); 93 do { 94 upper = netdev_master_upper_dev_get_rcu(upper); 95 } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); 96 97 if (upper) 98 dev_hold(upper); 99 rcu_read_unlock(); 100 101 return upper; 102 } 103 104 /** 105 * batadv_mcast_mla_softif_get() - get softif multicast listeners 106 * @dev: the device to collect multicast addresses from 107 * @mcast_list: a list to put found addresses into 108 * 109 * Collects multicast addresses of multicast listeners residing 110 * on this kernel on the given soft interface, dev, in 111 * the given mcast_list. In general, multicast listeners provided by 112 * your multicast receiving applications run directly on this node. 113 * 114 * If there is a bridge interface on top of dev, collects from that one 115 * instead. Just like with IP addresses and routes, multicast listeners 116 * will(/should) register to the bridge interface instead of an 117 * enslaved bat0. 118 * 119 * Return: -ENOMEM on memory allocation error or the number of 120 * items added to the mcast_list otherwise. 121 */ 122 static int batadv_mcast_mla_softif_get(struct net_device *dev, 123 struct hlist_head *mcast_list) 124 { 125 struct net_device *bridge = batadv_mcast_get_bridge(dev); 126 struct netdev_hw_addr *mc_list_entry; 127 struct batadv_hw_addr *new; 128 int ret = 0; 129 130 netif_addr_lock_bh(bridge ? bridge : dev); 131 netdev_for_each_mc_addr(mc_list_entry, bridge ? bridge : dev) { 132 new = kmalloc(sizeof(*new), GFP_ATOMIC); 133 if (!new) { 134 ret = -ENOMEM; 135 break; 136 } 137 138 ether_addr_copy(new->addr, mc_list_entry->addr); 139 hlist_add_head(&new->list, mcast_list); 140 ret++; 141 } 142 netif_addr_unlock_bh(bridge ? bridge : dev); 143 144 if (bridge) 145 dev_put(bridge); 146 147 return ret; 148 } 149 150 /** 151 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 152 * @mcast_addr: the multicast address to check 153 * @mcast_list: the list with multicast addresses to search in 154 * 155 * Return: true if the given address is already in the given list. 156 * Otherwise returns false. 157 */ 158 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 159 struct hlist_head *mcast_list) 160 { 161 struct batadv_hw_addr *mcast_entry; 162 163 hlist_for_each_entry(mcast_entry, mcast_list, list) 164 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 165 return true; 166 167 return false; 168 } 169 170 /** 171 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 172 * @dst: destination to write to - a multicast MAC address 173 * @src: source to read from - a multicast IP address 174 * 175 * Converts a given multicast IPv4/IPv6 address from a bridge 176 * to its matching multicast MAC address and copies it into the given 177 * destination buffer. 178 * 179 * Caller needs to make sure the destination buffer can hold 180 * at least ETH_ALEN bytes. 181 */ 182 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 183 { 184 if (src->proto == htons(ETH_P_IP)) 185 ip_eth_mc_map(src->u.ip4, dst); 186 #if IS_ENABLED(CONFIG_IPV6) 187 else if (src->proto == htons(ETH_P_IPV6)) 188 ipv6_eth_mc_map(&src->u.ip6, dst); 189 #endif 190 else 191 eth_zero_addr(dst); 192 } 193 194 /** 195 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 196 * @dev: a bridge slave whose bridge to collect multicast addresses from 197 * @mcast_list: a list to put found addresses into 198 * 199 * Collects multicast addresses of multicast listeners residing 200 * on foreign, non-mesh devices which we gave access to our mesh via 201 * a bridge on top of the given soft interface, dev, in the given 202 * mcast_list. 203 * 204 * Return: -ENOMEM on memory allocation error or the number of 205 * items added to the mcast_list otherwise. 206 */ 207 static int batadv_mcast_mla_bridge_get(struct net_device *dev, 208 struct hlist_head *mcast_list) 209 { 210 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 211 struct br_ip_list *br_ip_entry, *tmp; 212 struct batadv_hw_addr *new; 213 u8 mcast_addr[ETH_ALEN]; 214 int ret; 215 216 /* we don't need to detect these devices/listeners, the IGMP/MLD 217 * snooping code of the Linux bridge already does that for us 218 */ 219 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 220 if (ret < 0) 221 goto out; 222 223 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 224 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 225 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 226 continue; 227 228 new = kmalloc(sizeof(*new), GFP_ATOMIC); 229 if (!new) { 230 ret = -ENOMEM; 231 break; 232 } 233 234 ether_addr_copy(new->addr, mcast_addr); 235 hlist_add_head(&new->list, mcast_list); 236 } 237 238 out: 239 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 240 list_del(&br_ip_entry->list); 241 kfree(br_ip_entry); 242 } 243 244 return ret; 245 } 246 247 /** 248 * batadv_mcast_mla_list_free() - free a list of multicast addresses 249 * @mcast_list: the list to free 250 * 251 * Removes and frees all items in the given mcast_list. 252 */ 253 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 254 { 255 struct batadv_hw_addr *mcast_entry; 256 struct hlist_node *tmp; 257 258 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 259 hlist_del(&mcast_entry->list); 260 kfree(mcast_entry); 261 } 262 } 263 264 /** 265 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 266 * @bat_priv: the bat priv with all the soft interface information 267 * @mcast_list: a list of addresses which should _not_ be removed 268 * 269 * Retracts the announcement of any multicast listener from the 270 * translation table except the ones listed in the given mcast_list. 271 * 272 * If mcast_list is NULL then all are retracted. 273 * 274 * Do not call outside of the mcast worker! (or cancel mcast worker first) 275 */ 276 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 277 struct hlist_head *mcast_list) 278 { 279 struct batadv_hw_addr *mcast_entry; 280 struct hlist_node *tmp; 281 282 WARN_ON(delayed_work_pending(&bat_priv->mcast.work)); 283 284 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 285 list) { 286 if (mcast_list && 287 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 288 mcast_list)) 289 continue; 290 291 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 292 BATADV_NO_FLAGS, 293 "mcast TT outdated", false); 294 295 hlist_del(&mcast_entry->list); 296 kfree(mcast_entry); 297 } 298 } 299 300 /** 301 * batadv_mcast_mla_tt_add() - add multicast listener announcements 302 * @bat_priv: the bat priv with all the soft interface information 303 * @mcast_list: a list of addresses which are going to get added 304 * 305 * Adds multicast listener announcements from the given mcast_list to the 306 * translation table if they have not been added yet. 307 * 308 * Do not call outside of the mcast worker! (or cancel mcast worker first) 309 */ 310 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 311 struct hlist_head *mcast_list) 312 { 313 struct batadv_hw_addr *mcast_entry; 314 struct hlist_node *tmp; 315 316 WARN_ON(delayed_work_pending(&bat_priv->mcast.work)); 317 318 if (!mcast_list) 319 return; 320 321 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 322 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 323 &bat_priv->mcast.mla_list)) 324 continue; 325 326 if (!batadv_tt_local_add(bat_priv->soft_iface, 327 mcast_entry->addr, BATADV_NO_FLAGS, 328 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 329 continue; 330 331 hlist_del(&mcast_entry->list); 332 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 333 } 334 } 335 336 /** 337 * batadv_mcast_has_bridge() - check whether the soft-iface is bridged 338 * @bat_priv: the bat priv with all the soft interface information 339 * 340 * Checks whether there is a bridge on top of our soft interface. 341 * 342 * Return: true if there is a bridge, false otherwise. 343 */ 344 static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv) 345 { 346 struct net_device *upper = bat_priv->soft_iface; 347 348 rcu_read_lock(); 349 do { 350 upper = netdev_master_upper_dev_get_rcu(upper); 351 } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); 352 rcu_read_unlock(); 353 354 return upper; 355 } 356 357 /** 358 * batadv_mcast_querier_log() - debug output regarding the querier status on 359 * link 360 * @bat_priv: the bat priv with all the soft interface information 361 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 362 * @old_state: the previous querier state on our link 363 * @new_state: the new querier state on our link 364 * 365 * Outputs debug messages to the logging facility with log level 'mcast' 366 * regarding changes to the querier status on the link which are relevant 367 * to our multicast optimizations. 368 * 369 * Usually this is about whether a querier appeared or vanished in 370 * our mesh or whether the querier is in the suboptimal position of being 371 * behind our local bridge segment: Snooping switches will directly 372 * forward listener reports to the querier, therefore batman-adv and 373 * the bridge will potentially not see these listeners - the querier is 374 * potentially shadowing listeners from us then. 375 * 376 * This is only interesting for nodes with a bridge on top of their 377 * soft interface. 378 */ 379 static void 380 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 381 struct batadv_mcast_querier_state *old_state, 382 struct batadv_mcast_querier_state *new_state) 383 { 384 if (!old_state->exists && new_state->exists) 385 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 386 str_proto); 387 else if (old_state->exists && !new_state->exists) 388 batadv_info(bat_priv->soft_iface, 389 "%s Querier disappeared - multicast optimizations disabled\n", 390 str_proto); 391 else if (!bat_priv->mcast.bridged && !new_state->exists) 392 batadv_info(bat_priv->soft_iface, 393 "No %s Querier present - multicast optimizations disabled\n", 394 str_proto); 395 396 if (new_state->exists) { 397 if ((!old_state->shadowing && new_state->shadowing) || 398 (!old_state->exists && new_state->shadowing)) 399 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 400 "%s Querier is behind our bridged segment: Might shadow listeners\n", 401 str_proto); 402 else if (old_state->shadowing && !new_state->shadowing) 403 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 404 "%s Querier is not behind our bridged segment\n", 405 str_proto); 406 } 407 } 408 409 /** 410 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 411 * setups 412 * @bat_priv: the bat priv with all the soft interface information 413 * @bridged: a flag about whether the soft interface is currently bridged or not 414 * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier 415 * @querier_ipv6: (maybe) new status of a potential, selected MLD querier 416 * 417 * If no bridges are ever used on this node, then this function does nothing. 418 * 419 * Otherwise this function outputs debug information to the 'mcast' log level 420 * which might be relevant to our multicast optimizations. 421 * 422 * More precisely, it outputs information when a bridge interface is added or 423 * removed from a soft interface. And when a bridge is present, it further 424 * outputs information about the querier state which is relevant for the 425 * multicast flags this node is going to set. 426 */ 427 static void 428 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged, 429 struct batadv_mcast_querier_state *querier_ipv4, 430 struct batadv_mcast_querier_state *querier_ipv6) 431 { 432 if (!bat_priv->mcast.bridged && bridged) 433 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 434 "Bridge added: Setting Unsnoopables(U)-flag\n"); 435 else if (bat_priv->mcast.bridged && !bridged) 436 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 437 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 438 439 if (bridged) { 440 batadv_mcast_querier_log(bat_priv, "IGMP", 441 &bat_priv->mcast.querier_ipv4, 442 querier_ipv4); 443 batadv_mcast_querier_log(bat_priv, "MLD", 444 &bat_priv->mcast.querier_ipv6, 445 querier_ipv6); 446 } 447 } 448 449 /** 450 * batadv_mcast_flags_logs() - output debug information about mcast flag changes 451 * @bat_priv: the bat priv with all the soft interface information 452 * @flags: flags indicating the new multicast state 453 * 454 * Whenever the multicast flags this nodes announces changes (@mcast_flags vs. 455 * bat_priv->mcast.flags), this notifies userspace via the 'mcast' log level. 456 */ 457 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 458 { 459 u8 old_flags = bat_priv->mcast.flags; 460 char str_old_flags[] = "[...]"; 461 462 sprintf(str_old_flags, "[%c%c%c]", 463 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 464 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 465 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.'); 466 467 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 468 "Changing multicast flags from '%s' to '[%c%c%c]'\n", 469 bat_priv->mcast.enabled ? str_old_flags : "<undefined>", 470 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 471 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 472 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.'); 473 } 474 475 /** 476 * batadv_mcast_mla_tvlv_update() - update multicast tvlv 477 * @bat_priv: the bat priv with all the soft interface information 478 * 479 * Updates the own multicast tvlv with our current multicast related settings, 480 * capabilities and inabilities. 481 * 482 * Return: false if we want all IPv4 && IPv6 multicast traffic and true 483 * otherwise. 484 */ 485 static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv) 486 { 487 struct batadv_tvlv_mcast_data mcast_data; 488 struct batadv_mcast_querier_state querier4 = {false, false}; 489 struct batadv_mcast_querier_state querier6 = {false, false}; 490 struct net_device *dev = bat_priv->soft_iface; 491 bool bridged; 492 493 mcast_data.flags = BATADV_NO_FLAGS; 494 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 495 496 bridged = batadv_mcast_has_bridge(bat_priv); 497 if (!bridged) 498 goto update; 499 500 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 501 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 502 503 querier4.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 504 querier4.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 505 506 querier6.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 507 querier6.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 508 509 mcast_data.flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 510 511 /* 1) If no querier exists at all, then multicast listeners on 512 * our local TT clients behind the bridge will keep silent. 513 * 2) If the selected querier is on one of our local TT clients, 514 * behind the bridge, then this querier might shadow multicast 515 * listeners on our local TT clients, behind this bridge. 516 * 517 * In both cases, we will signalize other batman nodes that 518 * we need all multicast traffic of the according protocol. 519 */ 520 if (!querier4.exists || querier4.shadowing) 521 mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV4; 522 523 if (!querier6.exists || querier6.shadowing) 524 mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV6; 525 526 update: 527 batadv_mcast_bridge_log(bat_priv, bridged, &querier4, &querier6); 528 529 bat_priv->mcast.querier_ipv4.exists = querier4.exists; 530 bat_priv->mcast.querier_ipv4.shadowing = querier4.shadowing; 531 532 bat_priv->mcast.querier_ipv6.exists = querier6.exists; 533 bat_priv->mcast.querier_ipv6.shadowing = querier6.shadowing; 534 535 bat_priv->mcast.bridged = bridged; 536 537 if (!bat_priv->mcast.enabled || 538 mcast_data.flags != bat_priv->mcast.flags) { 539 batadv_mcast_flags_log(bat_priv, mcast_data.flags); 540 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 541 &mcast_data, sizeof(mcast_data)); 542 bat_priv->mcast.flags = mcast_data.flags; 543 bat_priv->mcast.enabled = true; 544 } 545 546 return !(mcast_data.flags & 547 (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); 548 } 549 550 /** 551 * __batadv_mcast_mla_update() - update the own MLAs 552 * @bat_priv: the bat priv with all the soft interface information 553 * 554 * Updates the own multicast listener announcements in the translation 555 * table as well as the own, announced multicast tvlv container. 556 * 557 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 558 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 559 * ensured by the non-parallel execution of the worker this function 560 * belongs to. 561 */ 562 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 563 { 564 struct net_device *soft_iface = bat_priv->soft_iface; 565 struct hlist_head mcast_list = HLIST_HEAD_INIT; 566 int ret; 567 568 if (!batadv_mcast_mla_tvlv_update(bat_priv)) 569 goto update; 570 571 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list); 572 if (ret < 0) 573 goto out; 574 575 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list); 576 if (ret < 0) 577 goto out; 578 579 update: 580 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 581 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 582 583 out: 584 batadv_mcast_mla_list_free(&mcast_list); 585 } 586 587 /** 588 * batadv_mcast_mla_update() - update the own MLAs 589 * @work: kernel work struct 590 * 591 * Updates the own multicast listener announcements in the translation 592 * table as well as the own, announced multicast tvlv container. 593 * 594 * In the end, reschedules the work timer. 595 */ 596 static void batadv_mcast_mla_update(struct work_struct *work) 597 { 598 struct delayed_work *delayed_work; 599 struct batadv_priv_mcast *priv_mcast; 600 struct batadv_priv *bat_priv; 601 602 delayed_work = to_delayed_work(work); 603 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 604 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 605 606 __batadv_mcast_mla_update(bat_priv); 607 batadv_mcast_start_timer(bat_priv); 608 } 609 610 /** 611 * batadv_mcast_is_report_ipv4() - check for IGMP reports 612 * @skb: the ethernet frame destined for the mesh 613 * 614 * This call might reallocate skb data. 615 * 616 * Checks whether the given frame is a valid IGMP report. 617 * 618 * Return: If so then true, otherwise false. 619 */ 620 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 621 { 622 if (ip_mc_check_igmp(skb, NULL) < 0) 623 return false; 624 625 switch (igmp_hdr(skb)->type) { 626 case IGMP_HOST_MEMBERSHIP_REPORT: 627 case IGMPV2_HOST_MEMBERSHIP_REPORT: 628 case IGMPV3_HOST_MEMBERSHIP_REPORT: 629 return true; 630 } 631 632 return false; 633 } 634 635 /** 636 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 637 * potential 638 * @bat_priv: the bat priv with all the soft interface information 639 * @skb: the IPv4 packet to check 640 * @is_unsnoopable: stores whether the destination is snoopable 641 * 642 * Checks whether the given IPv4 packet has the potential to be forwarded with a 643 * mode more optimal than classic flooding. 644 * 645 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 646 * allocation failure. 647 */ 648 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 649 struct sk_buff *skb, 650 bool *is_unsnoopable) 651 { 652 struct iphdr *iphdr; 653 654 /* We might fail due to out-of-memory -> drop it */ 655 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 656 return -ENOMEM; 657 658 if (batadv_mcast_is_report_ipv4(skb)) 659 return -EINVAL; 660 661 iphdr = ip_hdr(skb); 662 663 /* TODO: Implement Multicast Router Discovery (RFC4286), 664 * then allow scope > link local, too 665 */ 666 if (!ipv4_is_local_multicast(iphdr->daddr)) 667 return -EINVAL; 668 669 /* link-local multicast listeners behind a bridge are 670 * not snoopable (see RFC4541, section 2.1.2.2) 671 */ 672 *is_unsnoopable = true; 673 674 return 0; 675 } 676 677 /** 678 * batadv_mcast_is_report_ipv6() - check for MLD reports 679 * @skb: the ethernet frame destined for the mesh 680 * 681 * This call might reallocate skb data. 682 * 683 * Checks whether the given frame is a valid MLD report. 684 * 685 * Return: If so then true, otherwise false. 686 */ 687 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 688 { 689 if (ipv6_mc_check_mld(skb, NULL) < 0) 690 return false; 691 692 switch (icmp6_hdr(skb)->icmp6_type) { 693 case ICMPV6_MGM_REPORT: 694 case ICMPV6_MLD2_REPORT: 695 return true; 696 } 697 698 return false; 699 } 700 701 /** 702 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 703 * potential 704 * @bat_priv: the bat priv with all the soft interface information 705 * @skb: the IPv6 packet to check 706 * @is_unsnoopable: stores whether the destination is snoopable 707 * 708 * Checks whether the given IPv6 packet has the potential to be forwarded with a 709 * mode more optimal than classic flooding. 710 * 711 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 712 */ 713 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 714 struct sk_buff *skb, 715 bool *is_unsnoopable) 716 { 717 struct ipv6hdr *ip6hdr; 718 719 /* We might fail due to out-of-memory -> drop it */ 720 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 721 return -ENOMEM; 722 723 if (batadv_mcast_is_report_ipv6(skb)) 724 return -EINVAL; 725 726 ip6hdr = ipv6_hdr(skb); 727 728 /* TODO: Implement Multicast Router Discovery (RFC4286), 729 * then allow scope > link local, too 730 */ 731 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) != IPV6_ADDR_SCOPE_LINKLOCAL) 732 return -EINVAL; 733 734 /* link-local-all-nodes multicast listeners behind a bridge are 735 * not snoopable (see RFC4541, section 3, paragraph 3) 736 */ 737 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 738 *is_unsnoopable = true; 739 740 return 0; 741 } 742 743 /** 744 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 745 * @bat_priv: the bat priv with all the soft interface information 746 * @skb: the multicast frame to check 747 * @is_unsnoopable: stores whether the destination is snoopable 748 * 749 * Checks whether the given multicast ethernet frame has the potential to be 750 * forwarded with a mode more optimal than classic flooding. 751 * 752 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 753 */ 754 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 755 struct sk_buff *skb, 756 bool *is_unsnoopable) 757 { 758 struct ethhdr *ethhdr = eth_hdr(skb); 759 760 if (!atomic_read(&bat_priv->multicast_mode)) 761 return -EINVAL; 762 763 if (atomic_read(&bat_priv->mcast.num_disabled)) 764 return -EINVAL; 765 766 switch (ntohs(ethhdr->h_proto)) { 767 case ETH_P_IP: 768 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 769 is_unsnoopable); 770 case ETH_P_IPV6: 771 if (!IS_ENABLED(CONFIG_IPV6)) 772 return -EINVAL; 773 774 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 775 is_unsnoopable); 776 default: 777 return -EINVAL; 778 } 779 } 780 781 /** 782 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 783 * interest 784 * @bat_priv: the bat priv with all the soft interface information 785 * @ethhdr: ethernet header of a packet 786 * 787 * Return: the number of nodes which want all IPv4 multicast traffic if the 788 * given ethhdr is from an IPv4 packet or the number of nodes which want all 789 * IPv6 traffic if it matches an IPv6 packet. 790 */ 791 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 792 struct ethhdr *ethhdr) 793 { 794 switch (ntohs(ethhdr->h_proto)) { 795 case ETH_P_IP: 796 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 797 case ETH_P_IPV6: 798 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 799 default: 800 /* we shouldn't be here... */ 801 return 0; 802 } 803 } 804 805 /** 806 * batadv_mcast_forw_tt_node_get() - get a multicast tt node 807 * @bat_priv: the bat priv with all the soft interface information 808 * @ethhdr: the ether header containing the multicast destination 809 * 810 * Return: an orig_node matching the multicast address provided by ethhdr 811 * via a translation table lookup. This increases the returned nodes refcount. 812 */ 813 static struct batadv_orig_node * 814 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 815 struct ethhdr *ethhdr) 816 { 817 return batadv_transtable_search(bat_priv, ethhdr->h_source, 818 ethhdr->h_dest, BATADV_NO_FLAGS); 819 } 820 821 /** 822 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag 823 * @bat_priv: the bat priv with all the soft interface information 824 * 825 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and 826 * increases its refcount. 827 */ 828 static struct batadv_orig_node * 829 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv) 830 { 831 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 832 833 rcu_read_lock(); 834 hlist_for_each_entry_rcu(tmp_orig_node, 835 &bat_priv->mcast.want_all_ipv4_list, 836 mcast_want_all_ipv4_node) { 837 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 838 continue; 839 840 orig_node = tmp_orig_node; 841 break; 842 } 843 rcu_read_unlock(); 844 845 return orig_node; 846 } 847 848 /** 849 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag 850 * @bat_priv: the bat priv with all the soft interface information 851 * 852 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set 853 * and increases its refcount. 854 */ 855 static struct batadv_orig_node * 856 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) 857 { 858 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 859 860 rcu_read_lock(); 861 hlist_for_each_entry_rcu(tmp_orig_node, 862 &bat_priv->mcast.want_all_ipv6_list, 863 mcast_want_all_ipv6_node) { 864 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 865 continue; 866 867 orig_node = tmp_orig_node; 868 break; 869 } 870 rcu_read_unlock(); 871 872 return orig_node; 873 } 874 875 /** 876 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag 877 * @bat_priv: the bat priv with all the soft interface information 878 * @ethhdr: an ethernet header to determine the protocol family from 879 * 880 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or 881 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and 882 * increases its refcount. 883 */ 884 static struct batadv_orig_node * 885 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv, 886 struct ethhdr *ethhdr) 887 { 888 switch (ntohs(ethhdr->h_proto)) { 889 case ETH_P_IP: 890 return batadv_mcast_forw_ipv4_node_get(bat_priv); 891 case ETH_P_IPV6: 892 return batadv_mcast_forw_ipv6_node_get(bat_priv); 893 default: 894 /* we shouldn't be here... */ 895 return NULL; 896 } 897 } 898 899 /** 900 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag 901 * @bat_priv: the bat priv with all the soft interface information 902 * 903 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag 904 * set and increases its refcount. 905 */ 906 static struct batadv_orig_node * 907 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv) 908 { 909 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 910 911 rcu_read_lock(); 912 hlist_for_each_entry_rcu(tmp_orig_node, 913 &bat_priv->mcast.want_all_unsnoopables_list, 914 mcast_want_all_unsnoopables_node) { 915 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 916 continue; 917 918 orig_node = tmp_orig_node; 919 break; 920 } 921 rcu_read_unlock(); 922 923 return orig_node; 924 } 925 926 /** 927 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 928 * @bat_priv: the bat priv with all the soft interface information 929 * @skb: The multicast packet to check 930 * @orig: an originator to be set to forward the skb to 931 * 932 * Return: the forwarding mode as enum batadv_forw_mode and in case of 933 * BATADV_FORW_SINGLE set the orig to the single originator the skb 934 * should be forwarded to. 935 */ 936 enum batadv_forw_mode 937 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 938 struct batadv_orig_node **orig) 939 { 940 int ret, tt_count, ip_count, unsnoop_count, total_count; 941 bool is_unsnoopable = false; 942 struct ethhdr *ethhdr; 943 944 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable); 945 if (ret == -ENOMEM) 946 return BATADV_FORW_NONE; 947 else if (ret < 0) 948 return BATADV_FORW_ALL; 949 950 ethhdr = eth_hdr(skb); 951 952 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 953 BATADV_NO_FLAGS); 954 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 955 unsnoop_count = !is_unsnoopable ? 0 : 956 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 957 958 total_count = tt_count + ip_count + unsnoop_count; 959 960 switch (total_count) { 961 case 1: 962 if (tt_count) 963 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr); 964 else if (ip_count) 965 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr); 966 else if (unsnoop_count) 967 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv); 968 969 if (*orig) 970 return BATADV_FORW_SINGLE; 971 972 /* fall through */ 973 case 0: 974 return BATADV_FORW_NONE; 975 default: 976 return BATADV_FORW_ALL; 977 } 978 } 979 980 /** 981 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 982 * @bat_priv: the bat priv with all the soft interface information 983 * @orig: the orig_node which multicast state might have changed of 984 * @mcast_flags: flags indicating the new multicast state 985 * 986 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 987 * orig, has toggled then this method updates counter and list accordingly. 988 * 989 * Caller needs to hold orig->mcast_handler_lock. 990 */ 991 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 992 struct batadv_orig_node *orig, 993 u8 mcast_flags) 994 { 995 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 996 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 997 998 lockdep_assert_held(&orig->mcast_handler_lock); 999 1000 /* switched from flag unset to set */ 1001 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1002 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1003 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1004 1005 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1006 /* flag checks above + mcast_handler_lock prevents this */ 1007 WARN_ON(!hlist_unhashed(node)); 1008 1009 hlist_add_head_rcu(node, head); 1010 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1011 /* switched from flag set to unset */ 1012 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1013 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1014 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1015 1016 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1017 /* flag checks above + mcast_handler_lock prevents this */ 1018 WARN_ON(hlist_unhashed(node)); 1019 1020 hlist_del_init_rcu(node); 1021 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1022 } 1023 } 1024 1025 /** 1026 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1027 * @bat_priv: the bat priv with all the soft interface information 1028 * @orig: the orig_node which multicast state might have changed of 1029 * @mcast_flags: flags indicating the new multicast state 1030 * 1031 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1032 * toggled then this method updates counter and list accordingly. 1033 * 1034 * Caller needs to hold orig->mcast_handler_lock. 1035 */ 1036 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1037 struct batadv_orig_node *orig, 1038 u8 mcast_flags) 1039 { 1040 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1041 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1042 1043 lockdep_assert_held(&orig->mcast_handler_lock); 1044 1045 /* switched from flag unset to set */ 1046 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1047 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1048 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1049 1050 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1051 /* flag checks above + mcast_handler_lock prevents this */ 1052 WARN_ON(!hlist_unhashed(node)); 1053 1054 hlist_add_head_rcu(node, head); 1055 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1056 /* switched from flag set to unset */ 1057 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1058 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1059 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1060 1061 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1062 /* flag checks above + mcast_handler_lock prevents this */ 1063 WARN_ON(hlist_unhashed(node)); 1064 1065 hlist_del_init_rcu(node); 1066 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1067 } 1068 } 1069 1070 /** 1071 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1072 * @bat_priv: the bat priv with all the soft interface information 1073 * @orig: the orig_node which multicast state might have changed of 1074 * @mcast_flags: flags indicating the new multicast state 1075 * 1076 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1077 * toggled then this method updates counter and list accordingly. 1078 * 1079 * Caller needs to hold orig->mcast_handler_lock. 1080 */ 1081 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1082 struct batadv_orig_node *orig, 1083 u8 mcast_flags) 1084 { 1085 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1086 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1087 1088 lockdep_assert_held(&orig->mcast_handler_lock); 1089 1090 /* switched from flag unset to set */ 1091 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1092 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1093 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1094 1095 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1096 /* flag checks above + mcast_handler_lock prevents this */ 1097 WARN_ON(!hlist_unhashed(node)); 1098 1099 hlist_add_head_rcu(node, head); 1100 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1101 /* switched from flag set to unset */ 1102 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1103 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1104 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1105 1106 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1107 /* flag checks above + mcast_handler_lock prevents this */ 1108 WARN_ON(hlist_unhashed(node)); 1109 1110 hlist_del_init_rcu(node); 1111 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1112 } 1113 } 1114 1115 /** 1116 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 1117 * @bat_priv: the bat priv with all the soft interface information 1118 * @orig: the orig_node of the ogm 1119 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1120 * @tvlv_value: tvlv buffer containing the multicast data 1121 * @tvlv_value_len: tvlv buffer length 1122 */ 1123 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 1124 struct batadv_orig_node *orig, 1125 u8 flags, 1126 void *tvlv_value, 1127 u16 tvlv_value_len) 1128 { 1129 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1130 u8 mcast_flags = BATADV_NO_FLAGS; 1131 bool orig_initialized; 1132 1133 if (orig_mcast_enabled && tvlv_value && 1134 tvlv_value_len >= sizeof(mcast_flags)) 1135 mcast_flags = *(u8 *)tvlv_value; 1136 1137 spin_lock_bh(&orig->mcast_handler_lock); 1138 orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1139 &orig->capa_initialized); 1140 1141 /* If mcast support is turned on decrease the disabled mcast node 1142 * counter only if we had increased it for this node before. If this 1143 * is a completely new orig_node no need to decrease the counter. 1144 */ 1145 if (orig_mcast_enabled && 1146 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 1147 if (orig_initialized) 1148 atomic_dec(&bat_priv->mcast.num_disabled); 1149 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1150 /* If mcast support is being switched off or if this is an initial 1151 * OGM without mcast support then increase the disabled mcast 1152 * node counter. 1153 */ 1154 } else if (!orig_mcast_enabled && 1155 (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) || 1156 !orig_initialized)) { 1157 atomic_inc(&bat_priv->mcast.num_disabled); 1158 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1159 } 1160 1161 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 1162 1163 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 1164 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 1165 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 1166 1167 orig->mcast_flags = mcast_flags; 1168 spin_unlock_bh(&orig->mcast_handler_lock); 1169 } 1170 1171 /** 1172 * batadv_mcast_init() - initialize the multicast optimizations structures 1173 * @bat_priv: the bat priv with all the soft interface information 1174 */ 1175 void batadv_mcast_init(struct batadv_priv *bat_priv) 1176 { 1177 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 1178 NULL, BATADV_TVLV_MCAST, 2, 1179 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1180 1181 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 1182 batadv_mcast_start_timer(bat_priv); 1183 } 1184 1185 #ifdef CONFIG_BATMAN_ADV_DEBUGFS 1186 /** 1187 * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table 1188 * @bat_priv: the bat priv with all the soft interface information 1189 * @seq: debugfs table seq_file struct 1190 * 1191 * Prints our own multicast flags including a more specific reason why 1192 * they are set, that is prints the bridge and querier state too, to 1193 * the debugfs table specified via @seq. 1194 */ 1195 static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv, 1196 struct seq_file *seq) 1197 { 1198 u8 flags = bat_priv->mcast.flags; 1199 char querier4, querier6, shadowing4, shadowing6; 1200 bool bridged = bat_priv->mcast.bridged; 1201 1202 if (bridged) { 1203 querier4 = bat_priv->mcast.querier_ipv4.exists ? '.' : '4'; 1204 querier6 = bat_priv->mcast.querier_ipv6.exists ? '.' : '6'; 1205 shadowing4 = bat_priv->mcast.querier_ipv4.shadowing ? '4' : '.'; 1206 shadowing6 = bat_priv->mcast.querier_ipv6.shadowing ? '6' : '.'; 1207 } else { 1208 querier4 = '?'; 1209 querier6 = '?'; 1210 shadowing4 = '?'; 1211 shadowing6 = '?'; 1212 } 1213 1214 seq_printf(seq, "Multicast flags (own flags: [%c%c%c])\n", 1215 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 1216 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 1217 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.'); 1218 seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.'); 1219 seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n", 1220 querier4, querier6); 1221 seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n", 1222 shadowing4, shadowing6); 1223 seq_puts(seq, "-------------------------------------------\n"); 1224 seq_printf(seq, " %-10s %s\n", "Originator", "Flags"); 1225 } 1226 1227 /** 1228 * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes 1229 * @seq: seq file to print on 1230 * @offset: not used 1231 * 1232 * This prints a table of (primary) originators and their according 1233 * multicast flags, including (in the header) our own. 1234 * 1235 * Return: always 0 1236 */ 1237 int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset) 1238 { 1239 struct net_device *net_dev = (struct net_device *)seq->private; 1240 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1241 struct batadv_hard_iface *primary_if; 1242 struct batadv_hashtable *hash = bat_priv->orig_hash; 1243 struct batadv_orig_node *orig_node; 1244 struct hlist_head *head; 1245 u8 flags; 1246 u32 i; 1247 1248 primary_if = batadv_seq_print_text_primary_if_get(seq); 1249 if (!primary_if) 1250 return 0; 1251 1252 batadv_mcast_flags_print_header(bat_priv, seq); 1253 1254 for (i = 0; i < hash->size; i++) { 1255 head = &hash->table[i]; 1256 1257 rcu_read_lock(); 1258 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 1259 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1260 &orig_node->capa_initialized)) 1261 continue; 1262 1263 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1264 &orig_node->capabilities)) { 1265 seq_printf(seq, "%pM -\n", orig_node->orig); 1266 continue; 1267 } 1268 1269 flags = orig_node->mcast_flags; 1270 1271 seq_printf(seq, "%pM [%c%c%c]\n", orig_node->orig, 1272 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) 1273 ? 'U' : '.', 1274 (flags & BATADV_MCAST_WANT_ALL_IPV4) 1275 ? '4' : '.', 1276 (flags & BATADV_MCAST_WANT_ALL_IPV6) 1277 ? '6' : '.'); 1278 } 1279 rcu_read_unlock(); 1280 } 1281 1282 batadv_hardif_put(primary_if); 1283 1284 return 0; 1285 } 1286 #endif 1287 1288 /** 1289 * batadv_mcast_free() - free the multicast optimizations structures 1290 * @bat_priv: the bat priv with all the soft interface information 1291 */ 1292 void batadv_mcast_free(struct batadv_priv *bat_priv) 1293 { 1294 cancel_delayed_work_sync(&bat_priv->mcast.work); 1295 1296 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 1297 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 1298 1299 /* safely calling outside of worker, as worker was canceled above */ 1300 batadv_mcast_mla_tt_retract(bat_priv, NULL); 1301 } 1302 1303 /** 1304 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 1305 * @orig: the originator which is going to get purged 1306 */ 1307 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 1308 { 1309 struct batadv_priv *bat_priv = orig->bat_priv; 1310 1311 spin_lock_bh(&orig->mcast_handler_lock); 1312 1313 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) && 1314 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized)) 1315 atomic_dec(&bat_priv->mcast.num_disabled); 1316 1317 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 1318 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 1319 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 1320 1321 spin_unlock_bh(&orig->mcast_handler_lock); 1322 } 1323