1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2014-2018 B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "multicast.h" 20 #include "main.h" 21 22 #include <linux/atomic.h> 23 #include <linux/bitops.h> 24 #include <linux/bug.h> 25 #include <linux/byteorder/generic.h> 26 #include <linux/errno.h> 27 #include <linux/etherdevice.h> 28 #include <linux/gfp.h> 29 #include <linux/icmpv6.h> 30 #include <linux/if_bridge.h> 31 #include <linux/if_ether.h> 32 #include <linux/igmp.h> 33 #include <linux/in.h> 34 #include <linux/in6.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/jiffies.h> 38 #include <linux/kernel.h> 39 #include <linux/kref.h> 40 #include <linux/list.h> 41 #include <linux/lockdep.h> 42 #include <linux/netdevice.h> 43 #include <linux/netlink.h> 44 #include <linux/printk.h> 45 #include <linux/rculist.h> 46 #include <linux/rcupdate.h> 47 #include <linux/seq_file.h> 48 #include <linux/skbuff.h> 49 #include <linux/slab.h> 50 #include <linux/spinlock.h> 51 #include <linux/stddef.h> 52 #include <linux/string.h> 53 #include <linux/types.h> 54 #include <linux/workqueue.h> 55 #include <net/addrconf.h> 56 #include <net/genetlink.h> 57 #include <net/if_inet6.h> 58 #include <net/ip.h> 59 #include <net/ipv6.h> 60 #include <net/netlink.h> 61 #include <net/sock.h> 62 #include <uapi/linux/batadv_packet.h> 63 #include <uapi/linux/batman_adv.h> 64 65 #include "hard-interface.h" 66 #include "hash.h" 67 #include "log.h" 68 #include "netlink.h" 69 #include "soft-interface.h" 70 #include "translation-table.h" 71 #include "tvlv.h" 72 73 static void batadv_mcast_mla_update(struct work_struct *work); 74 75 /** 76 * batadv_mcast_start_timer() - schedule the multicast periodic worker 77 * @bat_priv: the bat priv with all the soft interface information 78 */ 79 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 80 { 81 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 82 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 83 } 84 85 /** 86 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 87 * @soft_iface: netdev struct of the mesh interface 88 * 89 * If the given soft interface has a bridge on top then the refcount 90 * of the according net device is increased. 91 * 92 * Return: NULL if no such bridge exists. Otherwise the net device of the 93 * bridge. 94 */ 95 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 96 { 97 struct net_device *upper = soft_iface; 98 99 rcu_read_lock(); 100 do { 101 upper = netdev_master_upper_dev_get_rcu(upper); 102 } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); 103 104 if (upper) 105 dev_hold(upper); 106 rcu_read_unlock(); 107 108 return upper; 109 } 110 111 /** 112 * batadv_mcast_addr_is_ipv4() - check if multicast MAC is IPv4 113 * @addr: the MAC address to check 114 * 115 * Return: True, if MAC address is one reserved for IPv4 multicast, false 116 * otherwise. 117 */ 118 static bool batadv_mcast_addr_is_ipv4(const u8 *addr) 119 { 120 static const u8 prefix[] = {0x01, 0x00, 0x5E}; 121 122 return memcmp(prefix, addr, sizeof(prefix)) == 0; 123 } 124 125 /** 126 * batadv_mcast_addr_is_ipv6() - check if multicast MAC is IPv6 127 * @addr: the MAC address to check 128 * 129 * Return: True, if MAC address is one reserved for IPv6 multicast, false 130 * otherwise. 131 */ 132 static bool batadv_mcast_addr_is_ipv6(const u8 *addr) 133 { 134 static const u8 prefix[] = {0x33, 0x33}; 135 136 return memcmp(prefix, addr, sizeof(prefix)) == 0; 137 } 138 139 /** 140 * batadv_mcast_mla_softif_get() - get softif multicast listeners 141 * @bat_priv: the bat priv with all the soft interface information 142 * @dev: the device to collect multicast addresses from 143 * @mcast_list: a list to put found addresses into 144 * 145 * Collects multicast addresses of multicast listeners residing 146 * on this kernel on the given soft interface, dev, in 147 * the given mcast_list. In general, multicast listeners provided by 148 * your multicast receiving applications run directly on this node. 149 * 150 * If there is a bridge interface on top of dev, collects from that one 151 * instead. Just like with IP addresses and routes, multicast listeners 152 * will(/should) register to the bridge interface instead of an 153 * enslaved bat0. 154 * 155 * Return: -ENOMEM on memory allocation error or the number of 156 * items added to the mcast_list otherwise. 157 */ 158 static int batadv_mcast_mla_softif_get(struct batadv_priv *bat_priv, 159 struct net_device *dev, 160 struct hlist_head *mcast_list) 161 { 162 bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4; 163 bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6; 164 struct net_device *bridge = batadv_mcast_get_bridge(dev); 165 struct netdev_hw_addr *mc_list_entry; 166 struct batadv_hw_addr *new; 167 int ret = 0; 168 169 netif_addr_lock_bh(bridge ? bridge : dev); 170 netdev_for_each_mc_addr(mc_list_entry, bridge ? bridge : dev) { 171 if (all_ipv4 && batadv_mcast_addr_is_ipv4(mc_list_entry->addr)) 172 continue; 173 174 if (all_ipv6 && batadv_mcast_addr_is_ipv6(mc_list_entry->addr)) 175 continue; 176 177 new = kmalloc(sizeof(*new), GFP_ATOMIC); 178 if (!new) { 179 ret = -ENOMEM; 180 break; 181 } 182 183 ether_addr_copy(new->addr, mc_list_entry->addr); 184 hlist_add_head(&new->list, mcast_list); 185 ret++; 186 } 187 netif_addr_unlock_bh(bridge ? bridge : dev); 188 189 if (bridge) 190 dev_put(bridge); 191 192 return ret; 193 } 194 195 /** 196 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 197 * @mcast_addr: the multicast address to check 198 * @mcast_list: the list with multicast addresses to search in 199 * 200 * Return: true if the given address is already in the given list. 201 * Otherwise returns false. 202 */ 203 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 204 struct hlist_head *mcast_list) 205 { 206 struct batadv_hw_addr *mcast_entry; 207 208 hlist_for_each_entry(mcast_entry, mcast_list, list) 209 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 210 return true; 211 212 return false; 213 } 214 215 /** 216 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 217 * @dst: destination to write to - a multicast MAC address 218 * @src: source to read from - a multicast IP address 219 * 220 * Converts a given multicast IPv4/IPv6 address from a bridge 221 * to its matching multicast MAC address and copies it into the given 222 * destination buffer. 223 * 224 * Caller needs to make sure the destination buffer can hold 225 * at least ETH_ALEN bytes. 226 */ 227 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 228 { 229 if (src->proto == htons(ETH_P_IP)) 230 ip_eth_mc_map(src->u.ip4, dst); 231 #if IS_ENABLED(CONFIG_IPV6) 232 else if (src->proto == htons(ETH_P_IPV6)) 233 ipv6_eth_mc_map(&src->u.ip6, dst); 234 #endif 235 else 236 eth_zero_addr(dst); 237 } 238 239 /** 240 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 241 * @bat_priv: the bat priv with all the soft interface information 242 * @dev: a bridge slave whose bridge to collect multicast addresses from 243 * @mcast_list: a list to put found addresses into 244 * 245 * Collects multicast addresses of multicast listeners residing 246 * on foreign, non-mesh devices which we gave access to our mesh via 247 * a bridge on top of the given soft interface, dev, in the given 248 * mcast_list. 249 * 250 * Return: -ENOMEM on memory allocation error or the number of 251 * items added to the mcast_list otherwise. 252 */ 253 static int batadv_mcast_mla_bridge_get(struct batadv_priv *bat_priv, 254 struct net_device *dev, 255 struct hlist_head *mcast_list) 256 { 257 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 258 bool all_ipv4 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV4; 259 bool all_ipv6 = bat_priv->mcast.flags & BATADV_MCAST_WANT_ALL_IPV6; 260 struct br_ip_list *br_ip_entry, *tmp; 261 struct batadv_hw_addr *new; 262 u8 mcast_addr[ETH_ALEN]; 263 int ret; 264 265 /* we don't need to detect these devices/listeners, the IGMP/MLD 266 * snooping code of the Linux bridge already does that for us 267 */ 268 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 269 if (ret < 0) 270 goto out; 271 272 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 273 if (all_ipv4 && br_ip_entry->addr.proto == htons(ETH_P_IP)) 274 continue; 275 276 if (all_ipv6 && br_ip_entry->addr.proto == htons(ETH_P_IPV6)) 277 continue; 278 279 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 280 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 281 continue; 282 283 new = kmalloc(sizeof(*new), GFP_ATOMIC); 284 if (!new) { 285 ret = -ENOMEM; 286 break; 287 } 288 289 ether_addr_copy(new->addr, mcast_addr); 290 hlist_add_head(&new->list, mcast_list); 291 } 292 293 out: 294 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 295 list_del(&br_ip_entry->list); 296 kfree(br_ip_entry); 297 } 298 299 return ret; 300 } 301 302 /** 303 * batadv_mcast_mla_list_free() - free a list of multicast addresses 304 * @mcast_list: the list to free 305 * 306 * Removes and frees all items in the given mcast_list. 307 */ 308 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 309 { 310 struct batadv_hw_addr *mcast_entry; 311 struct hlist_node *tmp; 312 313 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 314 hlist_del(&mcast_entry->list); 315 kfree(mcast_entry); 316 } 317 } 318 319 /** 320 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 321 * @bat_priv: the bat priv with all the soft interface information 322 * @mcast_list: a list of addresses which should _not_ be removed 323 * 324 * Retracts the announcement of any multicast listener from the 325 * translation table except the ones listed in the given mcast_list. 326 * 327 * If mcast_list is NULL then all are retracted. 328 * 329 * Do not call outside of the mcast worker! (or cancel mcast worker first) 330 */ 331 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 332 struct hlist_head *mcast_list) 333 { 334 struct batadv_hw_addr *mcast_entry; 335 struct hlist_node *tmp; 336 337 WARN_ON(delayed_work_pending(&bat_priv->mcast.work)); 338 339 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 340 list) { 341 if (mcast_list && 342 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 343 mcast_list)) 344 continue; 345 346 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 347 BATADV_NO_FLAGS, 348 "mcast TT outdated", false); 349 350 hlist_del(&mcast_entry->list); 351 kfree(mcast_entry); 352 } 353 } 354 355 /** 356 * batadv_mcast_mla_tt_add() - add multicast listener announcements 357 * @bat_priv: the bat priv with all the soft interface information 358 * @mcast_list: a list of addresses which are going to get added 359 * 360 * Adds multicast listener announcements from the given mcast_list to the 361 * translation table if they have not been added yet. 362 * 363 * Do not call outside of the mcast worker! (or cancel mcast worker first) 364 */ 365 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 366 struct hlist_head *mcast_list) 367 { 368 struct batadv_hw_addr *mcast_entry; 369 struct hlist_node *tmp; 370 371 WARN_ON(delayed_work_pending(&bat_priv->mcast.work)); 372 373 if (!mcast_list) 374 return; 375 376 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 377 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 378 &bat_priv->mcast.mla_list)) 379 continue; 380 381 if (!batadv_tt_local_add(bat_priv->soft_iface, 382 mcast_entry->addr, BATADV_NO_FLAGS, 383 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 384 continue; 385 386 hlist_del(&mcast_entry->list); 387 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 388 } 389 } 390 391 /** 392 * batadv_mcast_has_bridge() - check whether the soft-iface is bridged 393 * @bat_priv: the bat priv with all the soft interface information 394 * 395 * Checks whether there is a bridge on top of our soft interface. 396 * 397 * Return: true if there is a bridge, false otherwise. 398 */ 399 static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv) 400 { 401 struct net_device *upper = bat_priv->soft_iface; 402 403 rcu_read_lock(); 404 do { 405 upper = netdev_master_upper_dev_get_rcu(upper); 406 } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); 407 rcu_read_unlock(); 408 409 return upper; 410 } 411 412 /** 413 * batadv_mcast_querier_log() - debug output regarding the querier status on 414 * link 415 * @bat_priv: the bat priv with all the soft interface information 416 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 417 * @old_state: the previous querier state on our link 418 * @new_state: the new querier state on our link 419 * 420 * Outputs debug messages to the logging facility with log level 'mcast' 421 * regarding changes to the querier status on the link which are relevant 422 * to our multicast optimizations. 423 * 424 * Usually this is about whether a querier appeared or vanished in 425 * our mesh or whether the querier is in the suboptimal position of being 426 * behind our local bridge segment: Snooping switches will directly 427 * forward listener reports to the querier, therefore batman-adv and 428 * the bridge will potentially not see these listeners - the querier is 429 * potentially shadowing listeners from us then. 430 * 431 * This is only interesting for nodes with a bridge on top of their 432 * soft interface. 433 */ 434 static void 435 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 436 struct batadv_mcast_querier_state *old_state, 437 struct batadv_mcast_querier_state *new_state) 438 { 439 if (!old_state->exists && new_state->exists) 440 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 441 str_proto); 442 else if (old_state->exists && !new_state->exists) 443 batadv_info(bat_priv->soft_iface, 444 "%s Querier disappeared - multicast optimizations disabled\n", 445 str_proto); 446 else if (!bat_priv->mcast.bridged && !new_state->exists) 447 batadv_info(bat_priv->soft_iface, 448 "No %s Querier present - multicast optimizations disabled\n", 449 str_proto); 450 451 if (new_state->exists) { 452 if ((!old_state->shadowing && new_state->shadowing) || 453 (!old_state->exists && new_state->shadowing)) 454 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 455 "%s Querier is behind our bridged segment: Might shadow listeners\n", 456 str_proto); 457 else if (old_state->shadowing && !new_state->shadowing) 458 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 459 "%s Querier is not behind our bridged segment\n", 460 str_proto); 461 } 462 } 463 464 /** 465 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 466 * setups 467 * @bat_priv: the bat priv with all the soft interface information 468 * @bridged: a flag about whether the soft interface is currently bridged or not 469 * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier 470 * @querier_ipv6: (maybe) new status of a potential, selected MLD querier 471 * 472 * If no bridges are ever used on this node, then this function does nothing. 473 * 474 * Otherwise this function outputs debug information to the 'mcast' log level 475 * which might be relevant to our multicast optimizations. 476 * 477 * More precisely, it outputs information when a bridge interface is added or 478 * removed from a soft interface. And when a bridge is present, it further 479 * outputs information about the querier state which is relevant for the 480 * multicast flags this node is going to set. 481 */ 482 static void 483 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged, 484 struct batadv_mcast_querier_state *querier_ipv4, 485 struct batadv_mcast_querier_state *querier_ipv6) 486 { 487 if (!bat_priv->mcast.bridged && bridged) 488 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 489 "Bridge added: Setting Unsnoopables(U)-flag\n"); 490 else if (bat_priv->mcast.bridged && !bridged) 491 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 492 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 493 494 if (bridged) { 495 batadv_mcast_querier_log(bat_priv, "IGMP", 496 &bat_priv->mcast.querier_ipv4, 497 querier_ipv4); 498 batadv_mcast_querier_log(bat_priv, "MLD", 499 &bat_priv->mcast.querier_ipv6, 500 querier_ipv6); 501 } 502 } 503 504 /** 505 * batadv_mcast_flags_logs() - output debug information about mcast flag changes 506 * @bat_priv: the bat priv with all the soft interface information 507 * @flags: flags indicating the new multicast state 508 * 509 * Whenever the multicast flags this nodes announces changes (@mcast_flags vs. 510 * bat_priv->mcast.flags), this notifies userspace via the 'mcast' log level. 511 */ 512 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 513 { 514 u8 old_flags = bat_priv->mcast.flags; 515 char str_old_flags[] = "[...]"; 516 517 sprintf(str_old_flags, "[%c%c%c]", 518 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 519 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 520 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.'); 521 522 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 523 "Changing multicast flags from '%s' to '[%c%c%c]'\n", 524 bat_priv->mcast.enabled ? str_old_flags : "<undefined>", 525 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 526 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 527 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.'); 528 } 529 530 /** 531 * batadv_mcast_mla_tvlv_update() - update multicast tvlv 532 * @bat_priv: the bat priv with all the soft interface information 533 * 534 * Updates the own multicast tvlv with our current multicast related settings, 535 * capabilities and inabilities. 536 * 537 * Return: false if we want all IPv4 && IPv6 multicast traffic and true 538 * otherwise. 539 */ 540 static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv) 541 { 542 struct batadv_tvlv_mcast_data mcast_data; 543 struct batadv_mcast_querier_state querier4 = {false, false}; 544 struct batadv_mcast_querier_state querier6 = {false, false}; 545 struct net_device *dev = bat_priv->soft_iface; 546 bool bridged; 547 548 mcast_data.flags = BATADV_NO_FLAGS; 549 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 550 551 bridged = batadv_mcast_has_bridge(bat_priv); 552 if (!bridged) 553 goto update; 554 555 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 556 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 557 558 querier4.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 559 querier4.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 560 561 querier6.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 562 querier6.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 563 564 mcast_data.flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 565 566 /* 1) If no querier exists at all, then multicast listeners on 567 * our local TT clients behind the bridge will keep silent. 568 * 2) If the selected querier is on one of our local TT clients, 569 * behind the bridge, then this querier might shadow multicast 570 * listeners on our local TT clients, behind this bridge. 571 * 572 * In both cases, we will signalize other batman nodes that 573 * we need all multicast traffic of the according protocol. 574 */ 575 if (!querier4.exists || querier4.shadowing) 576 mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV4; 577 578 if (!querier6.exists || querier6.shadowing) 579 mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV6; 580 581 update: 582 batadv_mcast_bridge_log(bat_priv, bridged, &querier4, &querier6); 583 584 bat_priv->mcast.querier_ipv4.exists = querier4.exists; 585 bat_priv->mcast.querier_ipv4.shadowing = querier4.shadowing; 586 587 bat_priv->mcast.querier_ipv6.exists = querier6.exists; 588 bat_priv->mcast.querier_ipv6.shadowing = querier6.shadowing; 589 590 bat_priv->mcast.bridged = bridged; 591 592 if (!bat_priv->mcast.enabled || 593 mcast_data.flags != bat_priv->mcast.flags) { 594 batadv_mcast_flags_log(bat_priv, mcast_data.flags); 595 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 596 &mcast_data, sizeof(mcast_data)); 597 bat_priv->mcast.flags = mcast_data.flags; 598 bat_priv->mcast.enabled = true; 599 } 600 601 return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 && 602 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6); 603 } 604 605 /** 606 * __batadv_mcast_mla_update() - update the own MLAs 607 * @bat_priv: the bat priv with all the soft interface information 608 * 609 * Updates the own multicast listener announcements in the translation 610 * table as well as the own, announced multicast tvlv container. 611 * 612 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 613 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 614 * ensured by the non-parallel execution of the worker this function 615 * belongs to. 616 */ 617 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 618 { 619 struct net_device *soft_iface = bat_priv->soft_iface; 620 struct hlist_head mcast_list = HLIST_HEAD_INIT; 621 int ret; 622 623 if (!batadv_mcast_mla_tvlv_update(bat_priv)) 624 goto update; 625 626 ret = batadv_mcast_mla_softif_get(bat_priv, soft_iface, &mcast_list); 627 if (ret < 0) 628 goto out; 629 630 ret = batadv_mcast_mla_bridge_get(bat_priv, soft_iface, &mcast_list); 631 if (ret < 0) 632 goto out; 633 634 update: 635 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 636 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 637 638 out: 639 batadv_mcast_mla_list_free(&mcast_list); 640 } 641 642 /** 643 * batadv_mcast_mla_update() - update the own MLAs 644 * @work: kernel work struct 645 * 646 * Updates the own multicast listener announcements in the translation 647 * table as well as the own, announced multicast tvlv container. 648 * 649 * In the end, reschedules the work timer. 650 */ 651 static void batadv_mcast_mla_update(struct work_struct *work) 652 { 653 struct delayed_work *delayed_work; 654 struct batadv_priv_mcast *priv_mcast; 655 struct batadv_priv *bat_priv; 656 657 delayed_work = to_delayed_work(work); 658 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 659 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 660 661 __batadv_mcast_mla_update(bat_priv); 662 batadv_mcast_start_timer(bat_priv); 663 } 664 665 /** 666 * batadv_mcast_is_report_ipv4() - check for IGMP reports 667 * @skb: the ethernet frame destined for the mesh 668 * 669 * This call might reallocate skb data. 670 * 671 * Checks whether the given frame is a valid IGMP report. 672 * 673 * Return: If so then true, otherwise false. 674 */ 675 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 676 { 677 if (ip_mc_check_igmp(skb, NULL) < 0) 678 return false; 679 680 switch (igmp_hdr(skb)->type) { 681 case IGMP_HOST_MEMBERSHIP_REPORT: 682 case IGMPV2_HOST_MEMBERSHIP_REPORT: 683 case IGMPV3_HOST_MEMBERSHIP_REPORT: 684 return true; 685 } 686 687 return false; 688 } 689 690 /** 691 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 692 * potential 693 * @bat_priv: the bat priv with all the soft interface information 694 * @skb: the IPv4 packet to check 695 * @is_unsnoopable: stores whether the destination is snoopable 696 * 697 * Checks whether the given IPv4 packet has the potential to be forwarded with a 698 * mode more optimal than classic flooding. 699 * 700 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 701 * allocation failure. 702 */ 703 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 704 struct sk_buff *skb, 705 bool *is_unsnoopable) 706 { 707 struct iphdr *iphdr; 708 709 /* We might fail due to out-of-memory -> drop it */ 710 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 711 return -ENOMEM; 712 713 if (batadv_mcast_is_report_ipv4(skb)) 714 return -EINVAL; 715 716 iphdr = ip_hdr(skb); 717 718 /* TODO: Implement Multicast Router Discovery (RFC4286), 719 * then allow scope > link local, too 720 */ 721 if (!ipv4_is_local_multicast(iphdr->daddr)) 722 return -EINVAL; 723 724 /* link-local multicast listeners behind a bridge are 725 * not snoopable (see RFC4541, section 2.1.2.2) 726 */ 727 *is_unsnoopable = true; 728 729 return 0; 730 } 731 732 /** 733 * batadv_mcast_is_report_ipv6() - check for MLD reports 734 * @skb: the ethernet frame destined for the mesh 735 * 736 * This call might reallocate skb data. 737 * 738 * Checks whether the given frame is a valid MLD report. 739 * 740 * Return: If so then true, otherwise false. 741 */ 742 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 743 { 744 if (ipv6_mc_check_mld(skb, NULL) < 0) 745 return false; 746 747 switch (icmp6_hdr(skb)->icmp6_type) { 748 case ICMPV6_MGM_REPORT: 749 case ICMPV6_MLD2_REPORT: 750 return true; 751 } 752 753 return false; 754 } 755 756 /** 757 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 758 * potential 759 * @bat_priv: the bat priv with all the soft interface information 760 * @skb: the IPv6 packet to check 761 * @is_unsnoopable: stores whether the destination is snoopable 762 * 763 * Checks whether the given IPv6 packet has the potential to be forwarded with a 764 * mode more optimal than classic flooding. 765 * 766 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 767 */ 768 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 769 struct sk_buff *skb, 770 bool *is_unsnoopable) 771 { 772 struct ipv6hdr *ip6hdr; 773 774 /* We might fail due to out-of-memory -> drop it */ 775 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 776 return -ENOMEM; 777 778 if (batadv_mcast_is_report_ipv6(skb)) 779 return -EINVAL; 780 781 ip6hdr = ipv6_hdr(skb); 782 783 /* TODO: Implement Multicast Router Discovery (RFC4286), 784 * then allow scope > link local, too 785 */ 786 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) != IPV6_ADDR_SCOPE_LINKLOCAL) 787 return -EINVAL; 788 789 /* link-local-all-nodes multicast listeners behind a bridge are 790 * not snoopable (see RFC4541, section 3, paragraph 3) 791 */ 792 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 793 *is_unsnoopable = true; 794 795 return 0; 796 } 797 798 /** 799 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 800 * @bat_priv: the bat priv with all the soft interface information 801 * @skb: the multicast frame to check 802 * @is_unsnoopable: stores whether the destination is snoopable 803 * 804 * Checks whether the given multicast ethernet frame has the potential to be 805 * forwarded with a mode more optimal than classic flooding. 806 * 807 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 808 */ 809 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 810 struct sk_buff *skb, 811 bool *is_unsnoopable) 812 { 813 struct ethhdr *ethhdr = eth_hdr(skb); 814 815 if (!atomic_read(&bat_priv->multicast_mode)) 816 return -EINVAL; 817 818 switch (ntohs(ethhdr->h_proto)) { 819 case ETH_P_IP: 820 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 821 is_unsnoopable); 822 case ETH_P_IPV6: 823 if (!IS_ENABLED(CONFIG_IPV6)) 824 return -EINVAL; 825 826 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 827 is_unsnoopable); 828 default: 829 return -EINVAL; 830 } 831 } 832 833 /** 834 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 835 * interest 836 * @bat_priv: the bat priv with all the soft interface information 837 * @ethhdr: ethernet header of a packet 838 * 839 * Return: the number of nodes which want all IPv4 multicast traffic if the 840 * given ethhdr is from an IPv4 packet or the number of nodes which want all 841 * IPv6 traffic if it matches an IPv6 packet. 842 */ 843 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 844 struct ethhdr *ethhdr) 845 { 846 switch (ntohs(ethhdr->h_proto)) { 847 case ETH_P_IP: 848 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 849 case ETH_P_IPV6: 850 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 851 default: 852 /* we shouldn't be here... */ 853 return 0; 854 } 855 } 856 857 /** 858 * batadv_mcast_forw_tt_node_get() - get a multicast tt node 859 * @bat_priv: the bat priv with all the soft interface information 860 * @ethhdr: the ether header containing the multicast destination 861 * 862 * Return: an orig_node matching the multicast address provided by ethhdr 863 * via a translation table lookup. This increases the returned nodes refcount. 864 */ 865 static struct batadv_orig_node * 866 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 867 struct ethhdr *ethhdr) 868 { 869 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, 870 BATADV_NO_FLAGS); 871 } 872 873 /** 874 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag 875 * @bat_priv: the bat priv with all the soft interface information 876 * 877 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and 878 * increases its refcount. 879 */ 880 static struct batadv_orig_node * 881 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv) 882 { 883 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 884 885 rcu_read_lock(); 886 hlist_for_each_entry_rcu(tmp_orig_node, 887 &bat_priv->mcast.want_all_ipv4_list, 888 mcast_want_all_ipv4_node) { 889 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 890 continue; 891 892 orig_node = tmp_orig_node; 893 break; 894 } 895 rcu_read_unlock(); 896 897 return orig_node; 898 } 899 900 /** 901 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag 902 * @bat_priv: the bat priv with all the soft interface information 903 * 904 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set 905 * and increases its refcount. 906 */ 907 static struct batadv_orig_node * 908 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) 909 { 910 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 911 912 rcu_read_lock(); 913 hlist_for_each_entry_rcu(tmp_orig_node, 914 &bat_priv->mcast.want_all_ipv6_list, 915 mcast_want_all_ipv6_node) { 916 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 917 continue; 918 919 orig_node = tmp_orig_node; 920 break; 921 } 922 rcu_read_unlock(); 923 924 return orig_node; 925 } 926 927 /** 928 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag 929 * @bat_priv: the bat priv with all the soft interface information 930 * @ethhdr: an ethernet header to determine the protocol family from 931 * 932 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or 933 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and 934 * increases its refcount. 935 */ 936 static struct batadv_orig_node * 937 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv, 938 struct ethhdr *ethhdr) 939 { 940 switch (ntohs(ethhdr->h_proto)) { 941 case ETH_P_IP: 942 return batadv_mcast_forw_ipv4_node_get(bat_priv); 943 case ETH_P_IPV6: 944 return batadv_mcast_forw_ipv6_node_get(bat_priv); 945 default: 946 /* we shouldn't be here... */ 947 return NULL; 948 } 949 } 950 951 /** 952 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag 953 * @bat_priv: the bat priv with all the soft interface information 954 * 955 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag 956 * set and increases its refcount. 957 */ 958 static struct batadv_orig_node * 959 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv) 960 { 961 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 962 963 rcu_read_lock(); 964 hlist_for_each_entry_rcu(tmp_orig_node, 965 &bat_priv->mcast.want_all_unsnoopables_list, 966 mcast_want_all_unsnoopables_node) { 967 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 968 continue; 969 970 orig_node = tmp_orig_node; 971 break; 972 } 973 rcu_read_unlock(); 974 975 return orig_node; 976 } 977 978 /** 979 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 980 * @bat_priv: the bat priv with all the soft interface information 981 * @skb: The multicast packet to check 982 * @orig: an originator to be set to forward the skb to 983 * 984 * Return: the forwarding mode as enum batadv_forw_mode and in case of 985 * BATADV_FORW_SINGLE set the orig to the single originator the skb 986 * should be forwarded to. 987 */ 988 enum batadv_forw_mode 989 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 990 struct batadv_orig_node **orig) 991 { 992 int ret, tt_count, ip_count, unsnoop_count, total_count; 993 bool is_unsnoopable = false; 994 struct ethhdr *ethhdr; 995 996 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable); 997 if (ret == -ENOMEM) 998 return BATADV_FORW_NONE; 999 else if (ret < 0) 1000 return BATADV_FORW_ALL; 1001 1002 ethhdr = eth_hdr(skb); 1003 1004 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 1005 BATADV_NO_FLAGS); 1006 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 1007 unsnoop_count = !is_unsnoopable ? 0 : 1008 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 1009 1010 total_count = tt_count + ip_count + unsnoop_count; 1011 1012 switch (total_count) { 1013 case 1: 1014 if (tt_count) 1015 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr); 1016 else if (ip_count) 1017 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr); 1018 else if (unsnoop_count) 1019 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv); 1020 1021 if (*orig) 1022 return BATADV_FORW_SINGLE; 1023 1024 /* fall through */ 1025 case 0: 1026 return BATADV_FORW_NONE; 1027 default: 1028 return BATADV_FORW_ALL; 1029 } 1030 } 1031 1032 /** 1033 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 1034 * @bat_priv: the bat priv with all the soft interface information 1035 * @orig: the orig_node which multicast state might have changed of 1036 * @mcast_flags: flags indicating the new multicast state 1037 * 1038 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 1039 * orig, has toggled then this method updates counter and list accordingly. 1040 * 1041 * Caller needs to hold orig->mcast_handler_lock. 1042 */ 1043 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 1044 struct batadv_orig_node *orig, 1045 u8 mcast_flags) 1046 { 1047 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 1048 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 1049 1050 lockdep_assert_held(&orig->mcast_handler_lock); 1051 1052 /* switched from flag unset to set */ 1053 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1054 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1055 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1056 1057 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1058 /* flag checks above + mcast_handler_lock prevents this */ 1059 WARN_ON(!hlist_unhashed(node)); 1060 1061 hlist_add_head_rcu(node, head); 1062 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1063 /* switched from flag set to unset */ 1064 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1065 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1066 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1067 1068 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1069 /* flag checks above + mcast_handler_lock prevents this */ 1070 WARN_ON(hlist_unhashed(node)); 1071 1072 hlist_del_init_rcu(node); 1073 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1074 } 1075 } 1076 1077 /** 1078 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1079 * @bat_priv: the bat priv with all the soft interface information 1080 * @orig: the orig_node which multicast state might have changed of 1081 * @mcast_flags: flags indicating the new multicast state 1082 * 1083 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1084 * toggled then this method updates counter and list accordingly. 1085 * 1086 * Caller needs to hold orig->mcast_handler_lock. 1087 */ 1088 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1089 struct batadv_orig_node *orig, 1090 u8 mcast_flags) 1091 { 1092 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1093 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1094 1095 lockdep_assert_held(&orig->mcast_handler_lock); 1096 1097 /* switched from flag unset to set */ 1098 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1099 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1100 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1101 1102 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1103 /* flag checks above + mcast_handler_lock prevents this */ 1104 WARN_ON(!hlist_unhashed(node)); 1105 1106 hlist_add_head_rcu(node, head); 1107 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1108 /* switched from flag set to unset */ 1109 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1110 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1111 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1112 1113 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1114 /* flag checks above + mcast_handler_lock prevents this */ 1115 WARN_ON(hlist_unhashed(node)); 1116 1117 hlist_del_init_rcu(node); 1118 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1119 } 1120 } 1121 1122 /** 1123 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1124 * @bat_priv: the bat priv with all the soft interface information 1125 * @orig: the orig_node which multicast state might have changed of 1126 * @mcast_flags: flags indicating the new multicast state 1127 * 1128 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1129 * toggled then this method updates counter and list accordingly. 1130 * 1131 * Caller needs to hold orig->mcast_handler_lock. 1132 */ 1133 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1134 struct batadv_orig_node *orig, 1135 u8 mcast_flags) 1136 { 1137 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1138 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1139 1140 lockdep_assert_held(&orig->mcast_handler_lock); 1141 1142 /* switched from flag unset to set */ 1143 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1144 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1145 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1146 1147 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1148 /* flag checks above + mcast_handler_lock prevents this */ 1149 WARN_ON(!hlist_unhashed(node)); 1150 1151 hlist_add_head_rcu(node, head); 1152 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1153 /* switched from flag set to unset */ 1154 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1155 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1156 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1157 1158 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1159 /* flag checks above + mcast_handler_lock prevents this */ 1160 WARN_ON(hlist_unhashed(node)); 1161 1162 hlist_del_init_rcu(node); 1163 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1164 } 1165 } 1166 1167 /** 1168 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 1169 * @bat_priv: the bat priv with all the soft interface information 1170 * @orig: the orig_node of the ogm 1171 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1172 * @tvlv_value: tvlv buffer containing the multicast data 1173 * @tvlv_value_len: tvlv buffer length 1174 */ 1175 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 1176 struct batadv_orig_node *orig, 1177 u8 flags, 1178 void *tvlv_value, 1179 u16 tvlv_value_len) 1180 { 1181 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1182 u8 mcast_flags = BATADV_NO_FLAGS; 1183 1184 if (orig_mcast_enabled && tvlv_value && 1185 tvlv_value_len >= sizeof(mcast_flags)) 1186 mcast_flags = *(u8 *)tvlv_value; 1187 1188 if (!orig_mcast_enabled) { 1189 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4; 1190 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6; 1191 } 1192 1193 spin_lock_bh(&orig->mcast_handler_lock); 1194 1195 if (orig_mcast_enabled && 1196 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 1197 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1198 } else if (!orig_mcast_enabled && 1199 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 1200 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1201 } 1202 1203 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 1204 1205 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 1206 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 1207 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 1208 1209 orig->mcast_flags = mcast_flags; 1210 spin_unlock_bh(&orig->mcast_handler_lock); 1211 } 1212 1213 /** 1214 * batadv_mcast_init() - initialize the multicast optimizations structures 1215 * @bat_priv: the bat priv with all the soft interface information 1216 */ 1217 void batadv_mcast_init(struct batadv_priv *bat_priv) 1218 { 1219 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 1220 NULL, BATADV_TVLV_MCAST, 2, 1221 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1222 1223 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 1224 batadv_mcast_start_timer(bat_priv); 1225 } 1226 1227 #ifdef CONFIG_BATMAN_ADV_DEBUGFS 1228 /** 1229 * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table 1230 * @bat_priv: the bat priv with all the soft interface information 1231 * @seq: debugfs table seq_file struct 1232 * 1233 * Prints our own multicast flags including a more specific reason why 1234 * they are set, that is prints the bridge and querier state too, to 1235 * the debugfs table specified via @seq. 1236 */ 1237 static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv, 1238 struct seq_file *seq) 1239 { 1240 u8 flags = bat_priv->mcast.flags; 1241 char querier4, querier6, shadowing4, shadowing6; 1242 bool bridged = bat_priv->mcast.bridged; 1243 1244 if (bridged) { 1245 querier4 = bat_priv->mcast.querier_ipv4.exists ? '.' : '4'; 1246 querier6 = bat_priv->mcast.querier_ipv6.exists ? '.' : '6'; 1247 shadowing4 = bat_priv->mcast.querier_ipv4.shadowing ? '4' : '.'; 1248 shadowing6 = bat_priv->mcast.querier_ipv6.shadowing ? '6' : '.'; 1249 } else { 1250 querier4 = '?'; 1251 querier6 = '?'; 1252 shadowing4 = '?'; 1253 shadowing6 = '?'; 1254 } 1255 1256 seq_printf(seq, "Multicast flags (own flags: [%c%c%c])\n", 1257 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 1258 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 1259 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.'); 1260 seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.'); 1261 seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n", 1262 querier4, querier6); 1263 seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n", 1264 shadowing4, shadowing6); 1265 seq_puts(seq, "-------------------------------------------\n"); 1266 seq_printf(seq, " %-10s %s\n", "Originator", "Flags"); 1267 } 1268 1269 /** 1270 * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes 1271 * @seq: seq file to print on 1272 * @offset: not used 1273 * 1274 * This prints a table of (primary) originators and their according 1275 * multicast flags, including (in the header) our own. 1276 * 1277 * Return: always 0 1278 */ 1279 int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset) 1280 { 1281 struct net_device *net_dev = (struct net_device *)seq->private; 1282 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1283 struct batadv_hard_iface *primary_if; 1284 struct batadv_hashtable *hash = bat_priv->orig_hash; 1285 struct batadv_orig_node *orig_node; 1286 struct hlist_head *head; 1287 u8 flags; 1288 u32 i; 1289 1290 primary_if = batadv_seq_print_text_primary_if_get(seq); 1291 if (!primary_if) 1292 return 0; 1293 1294 batadv_mcast_flags_print_header(bat_priv, seq); 1295 1296 for (i = 0; i < hash->size; i++) { 1297 head = &hash->table[i]; 1298 1299 rcu_read_lock(); 1300 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 1301 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1302 &orig_node->capa_initialized)) 1303 continue; 1304 1305 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1306 &orig_node->capabilities)) { 1307 seq_printf(seq, "%pM -\n", orig_node->orig); 1308 continue; 1309 } 1310 1311 flags = orig_node->mcast_flags; 1312 1313 seq_printf(seq, "%pM [%c%c%c]\n", orig_node->orig, 1314 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) 1315 ? 'U' : '.', 1316 (flags & BATADV_MCAST_WANT_ALL_IPV4) 1317 ? '4' : '.', 1318 (flags & BATADV_MCAST_WANT_ALL_IPV6) 1319 ? '6' : '.'); 1320 } 1321 rcu_read_unlock(); 1322 } 1323 1324 batadv_hardif_put(primary_if); 1325 1326 return 0; 1327 } 1328 #endif 1329 1330 /** 1331 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message 1332 * @msg: buffer for the message 1333 * @bat_priv: the bat priv with all the soft interface information 1334 * 1335 * Return: 0 or error code. 1336 */ 1337 int batadv_mcast_mesh_info_put(struct sk_buff *msg, 1338 struct batadv_priv *bat_priv) 1339 { 1340 u32 flags = bat_priv->mcast.flags; 1341 u32 flags_priv = BATADV_NO_FLAGS; 1342 1343 if (bat_priv->mcast.bridged) { 1344 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; 1345 1346 if (bat_priv->mcast.querier_ipv4.exists) 1347 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; 1348 if (bat_priv->mcast.querier_ipv6.exists) 1349 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; 1350 if (bat_priv->mcast.querier_ipv4.shadowing) 1351 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; 1352 if (bat_priv->mcast.querier_ipv6.shadowing) 1353 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; 1354 } 1355 1356 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || 1357 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) 1358 return -EMSGSIZE; 1359 1360 return 0; 1361 } 1362 1363 /** 1364 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table 1365 * to a netlink socket 1366 * @msg: buffer for the message 1367 * @portid: netlink port 1368 * @seq: Sequence number of netlink message 1369 * @orig_node: originator to dump the multicast flags of 1370 * 1371 * Return: 0 or error code. 1372 */ 1373 static int 1374 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, 1375 struct batadv_orig_node *orig_node) 1376 { 1377 void *hdr; 1378 1379 hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, 1380 NLM_F_MULTI, BATADV_CMD_GET_MCAST_FLAGS); 1381 if (!hdr) 1382 return -ENOBUFS; 1383 1384 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, 1385 orig_node->orig)) { 1386 genlmsg_cancel(msg, hdr); 1387 return -EMSGSIZE; 1388 } 1389 1390 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1391 &orig_node->capabilities)) { 1392 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, 1393 orig_node->mcast_flags)) { 1394 genlmsg_cancel(msg, hdr); 1395 return -EMSGSIZE; 1396 } 1397 } 1398 1399 genlmsg_end(msg, hdr); 1400 return 0; 1401 } 1402 1403 /** 1404 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags 1405 * table to a netlink socket 1406 * @msg: buffer for the message 1407 * @portid: netlink port 1408 * @seq: Sequence number of netlink message 1409 * @head: bucket to dump 1410 * @idx_skip: How many entries to skip 1411 * 1412 * Return: 0 or error code. 1413 */ 1414 static int 1415 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, 1416 struct hlist_head *head, long *idx_skip) 1417 { 1418 struct batadv_orig_node *orig_node; 1419 long idx = 0; 1420 1421 rcu_read_lock(); 1422 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 1423 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 1424 &orig_node->capa_initialized)) 1425 continue; 1426 1427 if (idx < *idx_skip) 1428 goto skip; 1429 1430 if (batadv_mcast_flags_dump_entry(msg, portid, seq, 1431 orig_node)) { 1432 rcu_read_unlock(); 1433 *idx_skip = idx; 1434 1435 return -EMSGSIZE; 1436 } 1437 1438 skip: 1439 idx++; 1440 } 1441 rcu_read_unlock(); 1442 1443 return 0; 1444 } 1445 1446 /** 1447 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 1448 * @msg: buffer for the message 1449 * @portid: netlink port 1450 * @seq: Sequence number of netlink message 1451 * @bat_priv: the bat priv with all the soft interface information 1452 * @bucket: current bucket to dump 1453 * @idx: index in current bucket to the next entry to dump 1454 * 1455 * Return: 0 or error code. 1456 */ 1457 static int 1458 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, u32 seq, 1459 struct batadv_priv *bat_priv, long *bucket, long *idx) 1460 { 1461 struct batadv_hashtable *hash = bat_priv->orig_hash; 1462 long bucket_tmp = *bucket; 1463 struct hlist_head *head; 1464 long idx_tmp = *idx; 1465 1466 while (bucket_tmp < hash->size) { 1467 head = &hash->table[bucket_tmp]; 1468 1469 if (batadv_mcast_flags_dump_bucket(msg, portid, seq, head, 1470 &idx_tmp)) 1471 break; 1472 1473 bucket_tmp++; 1474 idx_tmp = 0; 1475 } 1476 1477 *bucket = bucket_tmp; 1478 *idx = idx_tmp; 1479 1480 return msg->len; 1481 } 1482 1483 /** 1484 * batadv_mcast_netlink_get_primary() - get primary interface from netlink 1485 * callback 1486 * @cb: netlink callback structure 1487 * @primary_if: the primary interface pointer to return the result in 1488 * 1489 * Return: 0 or error code. 1490 */ 1491 static int 1492 batadv_mcast_netlink_get_primary(struct netlink_callback *cb, 1493 struct batadv_hard_iface **primary_if) 1494 { 1495 struct batadv_hard_iface *hard_iface = NULL; 1496 struct net *net = sock_net(cb->skb->sk); 1497 struct net_device *soft_iface; 1498 struct batadv_priv *bat_priv; 1499 int ifindex; 1500 int ret = 0; 1501 1502 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); 1503 if (!ifindex) 1504 return -EINVAL; 1505 1506 soft_iface = dev_get_by_index(net, ifindex); 1507 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 1508 ret = -ENODEV; 1509 goto out; 1510 } 1511 1512 bat_priv = netdev_priv(soft_iface); 1513 1514 hard_iface = batadv_primary_if_get_selected(bat_priv); 1515 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { 1516 ret = -ENOENT; 1517 goto out; 1518 } 1519 1520 out: 1521 if (soft_iface) 1522 dev_put(soft_iface); 1523 1524 if (!ret && primary_if) 1525 *primary_if = hard_iface; 1526 else if (hard_iface) 1527 batadv_hardif_put(hard_iface); 1528 1529 return ret; 1530 } 1531 1532 /** 1533 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 1534 * @msg: buffer for the message 1535 * @cb: callback structure containing arguments 1536 * 1537 * Return: message length. 1538 */ 1539 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) 1540 { 1541 struct batadv_hard_iface *primary_if = NULL; 1542 int portid = NETLINK_CB(cb->skb).portid; 1543 struct batadv_priv *bat_priv; 1544 long *bucket = &cb->args[0]; 1545 long *idx = &cb->args[1]; 1546 int ret; 1547 1548 ret = batadv_mcast_netlink_get_primary(cb, &primary_if); 1549 if (ret) 1550 return ret; 1551 1552 bat_priv = netdev_priv(primary_if->soft_iface); 1553 ret = __batadv_mcast_flags_dump(msg, portid, cb->nlh->nlmsg_seq, 1554 bat_priv, bucket, idx); 1555 1556 batadv_hardif_put(primary_if); 1557 return ret; 1558 } 1559 1560 /** 1561 * batadv_mcast_free() - free the multicast optimizations structures 1562 * @bat_priv: the bat priv with all the soft interface information 1563 */ 1564 void batadv_mcast_free(struct batadv_priv *bat_priv) 1565 { 1566 cancel_delayed_work_sync(&bat_priv->mcast.work); 1567 1568 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 1569 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 1570 1571 /* safely calling outside of worker, as worker was canceled above */ 1572 batadv_mcast_mla_tt_retract(bat_priv, NULL); 1573 } 1574 1575 /** 1576 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 1577 * @orig: the originator which is going to get purged 1578 */ 1579 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 1580 { 1581 struct batadv_priv *bat_priv = orig->bat_priv; 1582 1583 spin_lock_bh(&orig->mcast_handler_lock); 1584 1585 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 1586 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 1587 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 1588 1589 spin_unlock_bh(&orig->mcast_handler_lock); 1590 } 1591