1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2014-2020 B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 */ 6 7 #include "multicast.h" 8 #include "main.h" 9 10 #include <linux/atomic.h> 11 #include <linux/bitops.h> 12 #include <linux/bug.h> 13 #include <linux/byteorder/generic.h> 14 #include <linux/errno.h> 15 #include <linux/etherdevice.h> 16 #include <linux/gfp.h> 17 #include <linux/icmpv6.h> 18 #include <linux/if_bridge.h> 19 #include <linux/if_ether.h> 20 #include <linux/igmp.h> 21 #include <linux/in.h> 22 #include <linux/in6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> 26 #include <linux/jiffies.h> 27 #include <linux/kernel.h> 28 #include <linux/kref.h> 29 #include <linux/list.h> 30 #include <linux/lockdep.h> 31 #include <linux/netdevice.h> 32 #include <linux/netlink.h> 33 #include <linux/printk.h> 34 #include <linux/rculist.h> 35 #include <linux/rcupdate.h> 36 #include <linux/seq_file.h> 37 #include <linux/skbuff.h> 38 #include <linux/slab.h> 39 #include <linux/spinlock.h> 40 #include <linux/stddef.h> 41 #include <linux/string.h> 42 #include <linux/types.h> 43 #include <linux/workqueue.h> 44 #include <net/addrconf.h> 45 #include <net/genetlink.h> 46 #include <net/if_inet6.h> 47 #include <net/ip.h> 48 #include <net/ipv6.h> 49 #include <net/netlink.h> 50 #include <net/sock.h> 51 #include <uapi/linux/batadv_packet.h> 52 #include <uapi/linux/batman_adv.h> 53 54 #include "bridge_loop_avoidance.h" 55 #include "hard-interface.h" 56 #include "hash.h" 57 #include "log.h" 58 #include "netlink.h" 59 #include "send.h" 60 #include "soft-interface.h" 61 #include "translation-table.h" 62 #include "tvlv.h" 63 64 static void batadv_mcast_mla_update(struct work_struct *work); 65 66 /** 67 * batadv_mcast_start_timer() - schedule the multicast periodic worker 68 * @bat_priv: the bat priv with all the soft interface information 69 */ 70 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 71 { 72 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 73 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 74 } 75 76 /** 77 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 78 * @soft_iface: netdev struct of the mesh interface 79 * 80 * If the given soft interface has a bridge on top then the refcount 81 * of the according net device is increased. 82 * 83 * Return: NULL if no such bridge exists. Otherwise the net device of the 84 * bridge. 85 */ 86 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 87 { 88 struct net_device *upper = soft_iface; 89 90 rcu_read_lock(); 91 do { 92 upper = netdev_master_upper_dev_get_rcu(upper); 93 } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); 94 95 if (upper) 96 dev_hold(upper); 97 rcu_read_unlock(); 98 99 return upper; 100 } 101 102 /** 103 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from 104 * node for IPv4 105 * @dev: the interface to check 106 * 107 * Checks the presence of an IPv4 multicast router on this node. 108 * 109 * Caller needs to hold rcu read lock. 110 * 111 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise. 112 */ 113 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) 114 { 115 struct in_device *in_dev = __in_dev_get_rcu(dev); 116 117 if (in_dev && IN_DEV_MFORWARD(in_dev)) 118 return BATADV_NO_FLAGS; 119 else 120 return BATADV_MCAST_WANT_NO_RTR4; 121 } 122 123 /** 124 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from 125 * node for IPv6 126 * @dev: the interface to check 127 * 128 * Checks the presence of an IPv6 multicast router on this node. 129 * 130 * Caller needs to hold rcu read lock. 131 * 132 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise. 133 */ 134 #if IS_ENABLED(CONFIG_IPV6_MROUTE) 135 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 136 { 137 struct inet6_dev *in6_dev = __in6_dev_get(dev); 138 139 if (in6_dev && in6_dev->cnf.mc_forwarding) 140 return BATADV_NO_FLAGS; 141 else 142 return BATADV_MCAST_WANT_NO_RTR6; 143 } 144 #else 145 static inline u8 146 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 147 { 148 return BATADV_MCAST_WANT_NO_RTR6; 149 } 150 #endif 151 152 /** 153 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node 154 * @bat_priv: the bat priv with all the soft interface information 155 * @bridge: bridge interface on top of the soft_iface if present, 156 * otherwise pass NULL 157 * 158 * Checks the presence of IPv4 and IPv6 multicast routers on this 159 * node. 160 * 161 * Return: 162 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 163 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 164 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 165 * The former two OR'd: no multicast router is present 166 */ 167 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, 168 struct net_device *bridge) 169 { 170 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface; 171 u8 flags = BATADV_NO_FLAGS; 172 173 rcu_read_lock(); 174 175 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev); 176 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev); 177 178 rcu_read_unlock(); 179 180 return flags; 181 } 182 183 /** 184 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge 185 * @bat_priv: the bat priv with all the soft interface information 186 * @bridge: bridge interface on top of the soft_iface if present, 187 * otherwise pass NULL 188 * 189 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge. 190 * 191 * Return: 192 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 193 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 194 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 195 * The former two OR'd: no multicast router is present 196 */ 197 #if IS_ENABLED(CONFIG_IPV6) 198 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, 199 struct net_device *bridge) 200 { 201 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 202 struct net_device *dev = bat_priv->soft_iface; 203 struct br_ip_list *br_ip_entry, *tmp; 204 u8 flags = BATADV_MCAST_WANT_NO_RTR6; 205 int ret; 206 207 if (!bridge) 208 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 209 210 /* TODO: ask the bridge if a multicast router is present (the bridge 211 * is capable of performing proper RFC4286 multicast router 212 * discovery) instead of searching for a ff02::2 listener here 213 */ 214 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 215 if (ret < 0) 216 return BATADV_NO_FLAGS; 217 218 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 219 /* the bridge snooping does not maintain IPv4 link-local 220 * addresses - therefore we won't find any IPv4 multicast router 221 * address here, only IPv6 ones 222 */ 223 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6) && 224 ipv6_addr_is_ll_all_routers(&br_ip_entry->addr.dst.ip6)) 225 flags &= ~BATADV_MCAST_WANT_NO_RTR6; 226 227 list_del(&br_ip_entry->list); 228 kfree(br_ip_entry); 229 } 230 231 return flags; 232 } 233 #else 234 static inline u8 235 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, 236 struct net_device *bridge) 237 { 238 if (bridge) 239 return BATADV_NO_FLAGS; 240 else 241 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 242 } 243 #endif 244 245 /** 246 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags 247 * @bat_priv: the bat priv with all the soft interface information 248 * @bridge: bridge interface on top of the soft_iface if present, 249 * otherwise pass NULL 250 * 251 * Checks the presence of IPv4 and IPv6 multicast routers on this 252 * node or behind its bridge. 253 * 254 * Return: 255 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 256 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 257 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 258 * The former two OR'd: no multicast router is present 259 */ 260 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, 261 struct net_device *bridge) 262 { 263 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 264 265 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge); 266 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge); 267 268 return flags; 269 } 270 271 /** 272 * batadv_mcast_mla_flags_get() - get the new multicast flags 273 * @bat_priv: the bat priv with all the soft interface information 274 * 275 * Return: A set of flags for the current/next TVLV, querier and 276 * bridge state. 277 */ 278 static struct batadv_mcast_mla_flags 279 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv) 280 { 281 struct net_device *dev = bat_priv->soft_iface; 282 struct batadv_mcast_querier_state *qr4, *qr6; 283 struct batadv_mcast_mla_flags mla_flags; 284 struct net_device *bridge; 285 286 bridge = batadv_mcast_get_bridge(dev); 287 288 memset(&mla_flags, 0, sizeof(mla_flags)); 289 mla_flags.enabled = 1; 290 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv, 291 bridge); 292 293 if (!bridge) 294 return mla_flags; 295 296 dev_put(bridge); 297 298 mla_flags.bridged = 1; 299 qr4 = &mla_flags.querier_ipv4; 300 qr6 = &mla_flags.querier_ipv6; 301 302 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 303 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 304 305 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 306 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 307 308 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 309 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 310 311 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 312 313 /* 1) If no querier exists at all, then multicast listeners on 314 * our local TT clients behind the bridge will keep silent. 315 * 2) If the selected querier is on one of our local TT clients, 316 * behind the bridge, then this querier might shadow multicast 317 * listeners on our local TT clients, behind this bridge. 318 * 319 * In both cases, we will signalize other batman nodes that 320 * we need all multicast traffic of the according protocol. 321 */ 322 if (!qr4->exists || qr4->shadowing) { 323 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4; 324 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4; 325 } 326 327 if (!qr6->exists || qr6->shadowing) { 328 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6; 329 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6; 330 } 331 332 return mla_flags; 333 } 334 335 /** 336 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 337 * @mcast_addr: the multicast address to check 338 * @mcast_list: the list with multicast addresses to search in 339 * 340 * Return: true if the given address is already in the given list. 341 * Otherwise returns false. 342 */ 343 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 344 struct hlist_head *mcast_list) 345 { 346 struct batadv_hw_addr *mcast_entry; 347 348 hlist_for_each_entry(mcast_entry, mcast_list, list) 349 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 350 return true; 351 352 return false; 353 } 354 355 /** 356 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners 357 * @dev: the device to collect multicast addresses from 358 * @mcast_list: a list to put found addresses into 359 * @flags: flags indicating the new multicast state 360 * 361 * Collects multicast addresses of IPv4 multicast listeners residing 362 * on this kernel on the given soft interface, dev, in 363 * the given mcast_list. In general, multicast listeners provided by 364 * your multicast receiving applications run directly on this node. 365 * 366 * Return: -ENOMEM on memory allocation error or the number of 367 * items added to the mcast_list otherwise. 368 */ 369 static int 370 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, 371 struct hlist_head *mcast_list, 372 struct batadv_mcast_mla_flags *flags) 373 { 374 struct batadv_hw_addr *new; 375 struct in_device *in_dev; 376 u8 mcast_addr[ETH_ALEN]; 377 struct ip_mc_list *pmc; 378 int ret = 0; 379 380 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 381 return 0; 382 383 rcu_read_lock(); 384 385 in_dev = __in_dev_get_rcu(dev); 386 if (!in_dev) { 387 rcu_read_unlock(); 388 return 0; 389 } 390 391 for (pmc = rcu_dereference(in_dev->mc_list); pmc; 392 pmc = rcu_dereference(pmc->next_rcu)) { 393 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 394 ipv4_is_local_multicast(pmc->multiaddr)) 395 continue; 396 397 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 398 !ipv4_is_local_multicast(pmc->multiaddr)) 399 continue; 400 401 ip_eth_mc_map(pmc->multiaddr, mcast_addr); 402 403 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 404 continue; 405 406 new = kmalloc(sizeof(*new), GFP_ATOMIC); 407 if (!new) { 408 ret = -ENOMEM; 409 break; 410 } 411 412 ether_addr_copy(new->addr, mcast_addr); 413 hlist_add_head(&new->list, mcast_list); 414 ret++; 415 } 416 rcu_read_unlock(); 417 418 return ret; 419 } 420 421 /** 422 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners 423 * @dev: the device to collect multicast addresses from 424 * @mcast_list: a list to put found addresses into 425 * @flags: flags indicating the new multicast state 426 * 427 * Collects multicast addresses of IPv6 multicast listeners residing 428 * on this kernel on the given soft interface, dev, in 429 * the given mcast_list. In general, multicast listeners provided by 430 * your multicast receiving applications run directly on this node. 431 * 432 * Return: -ENOMEM on memory allocation error or the number of 433 * items added to the mcast_list otherwise. 434 */ 435 #if IS_ENABLED(CONFIG_IPV6) 436 static int 437 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 438 struct hlist_head *mcast_list, 439 struct batadv_mcast_mla_flags *flags) 440 { 441 struct batadv_hw_addr *new; 442 struct inet6_dev *in6_dev; 443 u8 mcast_addr[ETH_ALEN]; 444 struct ifmcaddr6 *pmc6; 445 int ret = 0; 446 447 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 448 return 0; 449 450 rcu_read_lock(); 451 452 in6_dev = __in6_dev_get(dev); 453 if (!in6_dev) { 454 rcu_read_unlock(); 455 return 0; 456 } 457 458 read_lock_bh(&in6_dev->lock); 459 for (pmc6 = in6_dev->mc_list; pmc6; pmc6 = pmc6->next) { 460 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) < 461 IPV6_ADDR_SCOPE_LINKLOCAL) 462 continue; 463 464 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 465 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr)) 466 continue; 467 468 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 469 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) > 470 IPV6_ADDR_SCOPE_LINKLOCAL) 471 continue; 472 473 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr); 474 475 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 476 continue; 477 478 new = kmalloc(sizeof(*new), GFP_ATOMIC); 479 if (!new) { 480 ret = -ENOMEM; 481 break; 482 } 483 484 ether_addr_copy(new->addr, mcast_addr); 485 hlist_add_head(&new->list, mcast_list); 486 ret++; 487 } 488 read_unlock_bh(&in6_dev->lock); 489 rcu_read_unlock(); 490 491 return ret; 492 } 493 #else 494 static inline int 495 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 496 struct hlist_head *mcast_list, 497 struct batadv_mcast_mla_flags *flags) 498 { 499 return 0; 500 } 501 #endif 502 503 /** 504 * batadv_mcast_mla_softif_get() - get softif multicast listeners 505 * @dev: the device to collect multicast addresses from 506 * @mcast_list: a list to put found addresses into 507 * @flags: flags indicating the new multicast state 508 * 509 * Collects multicast addresses of multicast listeners residing 510 * on this kernel on the given soft interface, dev, in 511 * the given mcast_list. In general, multicast listeners provided by 512 * your multicast receiving applications run directly on this node. 513 * 514 * If there is a bridge interface on top of dev, collect from that one 515 * instead. Just like with IP addresses and routes, multicast listeners 516 * will(/should) register to the bridge interface instead of an 517 * enslaved bat0. 518 * 519 * Return: -ENOMEM on memory allocation error or the number of 520 * items added to the mcast_list otherwise. 521 */ 522 static int 523 batadv_mcast_mla_softif_get(struct net_device *dev, 524 struct hlist_head *mcast_list, 525 struct batadv_mcast_mla_flags *flags) 526 { 527 struct net_device *bridge = batadv_mcast_get_bridge(dev); 528 int ret4, ret6 = 0; 529 530 if (bridge) 531 dev = bridge; 532 533 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags); 534 if (ret4 < 0) 535 goto out; 536 537 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags); 538 if (ret6 < 0) { 539 ret4 = 0; 540 goto out; 541 } 542 543 out: 544 if (bridge) 545 dev_put(bridge); 546 547 return ret4 + ret6; 548 } 549 550 /** 551 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 552 * @dst: destination to write to - a multicast MAC address 553 * @src: source to read from - a multicast IP address 554 * 555 * Converts a given multicast IPv4/IPv6 address from a bridge 556 * to its matching multicast MAC address and copies it into the given 557 * destination buffer. 558 * 559 * Caller needs to make sure the destination buffer can hold 560 * at least ETH_ALEN bytes. 561 */ 562 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 563 { 564 if (src->proto == htons(ETH_P_IP)) 565 ip_eth_mc_map(src->dst.ip4, dst); 566 #if IS_ENABLED(CONFIG_IPV6) 567 else if (src->proto == htons(ETH_P_IPV6)) 568 ipv6_eth_mc_map(&src->dst.ip6, dst); 569 #endif 570 else 571 eth_zero_addr(dst); 572 } 573 574 /** 575 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 576 * @dev: a bridge slave whose bridge to collect multicast addresses from 577 * @mcast_list: a list to put found addresses into 578 * @flags: flags indicating the new multicast state 579 * 580 * Collects multicast addresses of multicast listeners residing 581 * on foreign, non-mesh devices which we gave access to our mesh via 582 * a bridge on top of the given soft interface, dev, in the given 583 * mcast_list. 584 * 585 * Return: -ENOMEM on memory allocation error or the number of 586 * items added to the mcast_list otherwise. 587 */ 588 static int batadv_mcast_mla_bridge_get(struct net_device *dev, 589 struct hlist_head *mcast_list, 590 struct batadv_mcast_mla_flags *flags) 591 { 592 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 593 struct br_ip_list *br_ip_entry, *tmp; 594 u8 tvlv_flags = flags->tvlv_flags; 595 struct batadv_hw_addr *new; 596 u8 mcast_addr[ETH_ALEN]; 597 int ret; 598 599 /* we don't need to detect these devices/listeners, the IGMP/MLD 600 * snooping code of the Linux bridge already does that for us 601 */ 602 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 603 if (ret < 0) 604 goto out; 605 606 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 607 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) { 608 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 609 continue; 610 611 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 612 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 613 continue; 614 615 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 616 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 617 continue; 618 } 619 620 #if IS_ENABLED(CONFIG_IPV6) 621 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) { 622 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 623 continue; 624 625 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 626 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6)) 627 continue; 628 629 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 630 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) > 631 IPV6_ADDR_SCOPE_LINKLOCAL) 632 continue; 633 } 634 #endif 635 636 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 637 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 638 continue; 639 640 new = kmalloc(sizeof(*new), GFP_ATOMIC); 641 if (!new) { 642 ret = -ENOMEM; 643 break; 644 } 645 646 ether_addr_copy(new->addr, mcast_addr); 647 hlist_add_head(&new->list, mcast_list); 648 } 649 650 out: 651 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 652 list_del(&br_ip_entry->list); 653 kfree(br_ip_entry); 654 } 655 656 return ret; 657 } 658 659 /** 660 * batadv_mcast_mla_list_free() - free a list of multicast addresses 661 * @mcast_list: the list to free 662 * 663 * Removes and frees all items in the given mcast_list. 664 */ 665 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 666 { 667 struct batadv_hw_addr *mcast_entry; 668 struct hlist_node *tmp; 669 670 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 671 hlist_del(&mcast_entry->list); 672 kfree(mcast_entry); 673 } 674 } 675 676 /** 677 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 678 * @bat_priv: the bat priv with all the soft interface information 679 * @mcast_list: a list of addresses which should _not_ be removed 680 * 681 * Retracts the announcement of any multicast listener from the 682 * translation table except the ones listed in the given mcast_list. 683 * 684 * If mcast_list is NULL then all are retracted. 685 */ 686 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 687 struct hlist_head *mcast_list) 688 { 689 struct batadv_hw_addr *mcast_entry; 690 struct hlist_node *tmp; 691 692 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 693 list) { 694 if (mcast_list && 695 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 696 mcast_list)) 697 continue; 698 699 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 700 BATADV_NO_FLAGS, 701 "mcast TT outdated", false); 702 703 hlist_del(&mcast_entry->list); 704 kfree(mcast_entry); 705 } 706 } 707 708 /** 709 * batadv_mcast_mla_tt_add() - add multicast listener announcements 710 * @bat_priv: the bat priv with all the soft interface information 711 * @mcast_list: a list of addresses which are going to get added 712 * 713 * Adds multicast listener announcements from the given mcast_list to the 714 * translation table if they have not been added yet. 715 */ 716 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 717 struct hlist_head *mcast_list) 718 { 719 struct batadv_hw_addr *mcast_entry; 720 struct hlist_node *tmp; 721 722 if (!mcast_list) 723 return; 724 725 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 726 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 727 &bat_priv->mcast.mla_list)) 728 continue; 729 730 if (!batadv_tt_local_add(bat_priv->soft_iface, 731 mcast_entry->addr, BATADV_NO_FLAGS, 732 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 733 continue; 734 735 hlist_del(&mcast_entry->list); 736 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 737 } 738 } 739 740 /** 741 * batadv_mcast_querier_log() - debug output regarding the querier status on 742 * link 743 * @bat_priv: the bat priv with all the soft interface information 744 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 745 * @old_state: the previous querier state on our link 746 * @new_state: the new querier state on our link 747 * 748 * Outputs debug messages to the logging facility with log level 'mcast' 749 * regarding changes to the querier status on the link which are relevant 750 * to our multicast optimizations. 751 * 752 * Usually this is about whether a querier appeared or vanished in 753 * our mesh or whether the querier is in the suboptimal position of being 754 * behind our local bridge segment: Snooping switches will directly 755 * forward listener reports to the querier, therefore batman-adv and 756 * the bridge will potentially not see these listeners - the querier is 757 * potentially shadowing listeners from us then. 758 * 759 * This is only interesting for nodes with a bridge on top of their 760 * soft interface. 761 */ 762 static void 763 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 764 struct batadv_mcast_querier_state *old_state, 765 struct batadv_mcast_querier_state *new_state) 766 { 767 if (!old_state->exists && new_state->exists) 768 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 769 str_proto); 770 else if (old_state->exists && !new_state->exists) 771 batadv_info(bat_priv->soft_iface, 772 "%s Querier disappeared - multicast optimizations disabled\n", 773 str_proto); 774 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists) 775 batadv_info(bat_priv->soft_iface, 776 "No %s Querier present - multicast optimizations disabled\n", 777 str_proto); 778 779 if (new_state->exists) { 780 if ((!old_state->shadowing && new_state->shadowing) || 781 (!old_state->exists && new_state->shadowing)) 782 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 783 "%s Querier is behind our bridged segment: Might shadow listeners\n", 784 str_proto); 785 else if (old_state->shadowing && !new_state->shadowing) 786 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 787 "%s Querier is not behind our bridged segment\n", 788 str_proto); 789 } 790 } 791 792 /** 793 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 794 * setups 795 * @bat_priv: the bat priv with all the soft interface information 796 * @new_flags: flags indicating the new multicast state 797 * 798 * If no bridges are ever used on this node, then this function does nothing. 799 * 800 * Otherwise this function outputs debug information to the 'mcast' log level 801 * which might be relevant to our multicast optimizations. 802 * 803 * More precisely, it outputs information when a bridge interface is added or 804 * removed from a soft interface. And when a bridge is present, it further 805 * outputs information about the querier state which is relevant for the 806 * multicast flags this node is going to set. 807 */ 808 static void 809 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, 810 struct batadv_mcast_mla_flags *new_flags) 811 { 812 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags; 813 814 if (!old_flags->bridged && new_flags->bridged) 815 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 816 "Bridge added: Setting Unsnoopables(U)-flag\n"); 817 else if (old_flags->bridged && !new_flags->bridged) 818 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 819 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 820 821 if (new_flags->bridged) { 822 batadv_mcast_querier_log(bat_priv, "IGMP", 823 &old_flags->querier_ipv4, 824 &new_flags->querier_ipv4); 825 batadv_mcast_querier_log(bat_priv, "MLD", 826 &old_flags->querier_ipv6, 827 &new_flags->querier_ipv6); 828 } 829 } 830 831 /** 832 * batadv_mcast_flags_logs() - output debug information about mcast flag changes 833 * @bat_priv: the bat priv with all the soft interface information 834 * @flags: TVLV flags indicating the new multicast state 835 * 836 * Whenever the multicast TVLV flags this node announces change, this function 837 * should be used to notify userspace about the change. 838 */ 839 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 840 { 841 bool old_enabled = bat_priv->mcast.mla_flags.enabled; 842 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags; 843 char str_old_flags[] = "[.... . ]"; 844 845 sprintf(str_old_flags, "[%c%c%c%s%s]", 846 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 847 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 848 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 849 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 850 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 851 852 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 853 "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n", 854 old_enabled ? str_old_flags : "<undefined>", 855 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 856 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 857 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 858 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 859 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 860 } 861 862 /** 863 * batadv_mcast_mla_flags_update() - update multicast flags 864 * @bat_priv: the bat priv with all the soft interface information 865 * @flags: flags indicating the new multicast state 866 * 867 * Updates the own multicast tvlv with our current multicast related settings, 868 * capabilities and inabilities. 869 */ 870 static void 871 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, 872 struct batadv_mcast_mla_flags *flags) 873 { 874 struct batadv_tvlv_mcast_data mcast_data; 875 876 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags))) 877 return; 878 879 batadv_mcast_bridge_log(bat_priv, flags); 880 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags); 881 882 mcast_data.flags = flags->tvlv_flags; 883 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 884 885 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 886 &mcast_data, sizeof(mcast_data)); 887 888 bat_priv->mcast.mla_flags = *flags; 889 } 890 891 /** 892 * __batadv_mcast_mla_update() - update the own MLAs 893 * @bat_priv: the bat priv with all the soft interface information 894 * 895 * Updates the own multicast listener announcements in the translation 896 * table as well as the own, announced multicast tvlv container. 897 * 898 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 899 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 900 * ensured by the non-parallel execution of the worker this function 901 * belongs to. 902 */ 903 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 904 { 905 struct net_device *soft_iface = bat_priv->soft_iface; 906 struct hlist_head mcast_list = HLIST_HEAD_INIT; 907 struct batadv_mcast_mla_flags flags; 908 int ret; 909 910 flags = batadv_mcast_mla_flags_get(bat_priv); 911 912 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags); 913 if (ret < 0) 914 goto out; 915 916 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags); 917 if (ret < 0) 918 goto out; 919 920 spin_lock(&bat_priv->mcast.mla_lock); 921 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 922 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 923 batadv_mcast_mla_flags_update(bat_priv, &flags); 924 spin_unlock(&bat_priv->mcast.mla_lock); 925 926 out: 927 batadv_mcast_mla_list_free(&mcast_list); 928 } 929 930 /** 931 * batadv_mcast_mla_update() - update the own MLAs 932 * @work: kernel work struct 933 * 934 * Updates the own multicast listener announcements in the translation 935 * table as well as the own, announced multicast tvlv container. 936 * 937 * In the end, reschedules the work timer. 938 */ 939 static void batadv_mcast_mla_update(struct work_struct *work) 940 { 941 struct delayed_work *delayed_work; 942 struct batadv_priv_mcast *priv_mcast; 943 struct batadv_priv *bat_priv; 944 945 delayed_work = to_delayed_work(work); 946 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 947 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 948 949 __batadv_mcast_mla_update(bat_priv); 950 batadv_mcast_start_timer(bat_priv); 951 } 952 953 /** 954 * batadv_mcast_is_report_ipv4() - check for IGMP reports 955 * @skb: the ethernet frame destined for the mesh 956 * 957 * This call might reallocate skb data. 958 * 959 * Checks whether the given frame is a valid IGMP report. 960 * 961 * Return: If so then true, otherwise false. 962 */ 963 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 964 { 965 if (ip_mc_check_igmp(skb) < 0) 966 return false; 967 968 switch (igmp_hdr(skb)->type) { 969 case IGMP_HOST_MEMBERSHIP_REPORT: 970 case IGMPV2_HOST_MEMBERSHIP_REPORT: 971 case IGMPV3_HOST_MEMBERSHIP_REPORT: 972 return true; 973 } 974 975 return false; 976 } 977 978 /** 979 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 980 * potential 981 * @bat_priv: the bat priv with all the soft interface information 982 * @skb: the IPv4 packet to check 983 * @is_unsnoopable: stores whether the destination is snoopable 984 * @is_routable: stores whether the destination is routable 985 * 986 * Checks whether the given IPv4 packet has the potential to be forwarded with a 987 * mode more optimal than classic flooding. 988 * 989 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 990 * allocation failure. 991 */ 992 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 993 struct sk_buff *skb, 994 bool *is_unsnoopable, 995 int *is_routable) 996 { 997 struct iphdr *iphdr; 998 999 /* We might fail due to out-of-memory -> drop it */ 1000 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 1001 return -ENOMEM; 1002 1003 if (batadv_mcast_is_report_ipv4(skb)) 1004 return -EINVAL; 1005 1006 iphdr = ip_hdr(skb); 1007 1008 /* link-local multicast listeners behind a bridge are 1009 * not snoopable (see RFC4541, section 2.1.2.2) 1010 */ 1011 if (ipv4_is_local_multicast(iphdr->daddr)) 1012 *is_unsnoopable = true; 1013 else 1014 *is_routable = ETH_P_IP; 1015 1016 return 0; 1017 } 1018 1019 /** 1020 * batadv_mcast_is_report_ipv6() - check for MLD reports 1021 * @skb: the ethernet frame destined for the mesh 1022 * 1023 * This call might reallocate skb data. 1024 * 1025 * Checks whether the given frame is a valid MLD report. 1026 * 1027 * Return: If so then true, otherwise false. 1028 */ 1029 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 1030 { 1031 if (ipv6_mc_check_mld(skb) < 0) 1032 return false; 1033 1034 switch (icmp6_hdr(skb)->icmp6_type) { 1035 case ICMPV6_MGM_REPORT: 1036 case ICMPV6_MLD2_REPORT: 1037 return true; 1038 } 1039 1040 return false; 1041 } 1042 1043 /** 1044 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 1045 * potential 1046 * @bat_priv: the bat priv with all the soft interface information 1047 * @skb: the IPv6 packet to check 1048 * @is_unsnoopable: stores whether the destination is snoopable 1049 * @is_routable: stores whether the destination is routable 1050 * 1051 * Checks whether the given IPv6 packet has the potential to be forwarded with a 1052 * mode more optimal than classic flooding. 1053 * 1054 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1055 */ 1056 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 1057 struct sk_buff *skb, 1058 bool *is_unsnoopable, 1059 int *is_routable) 1060 { 1061 struct ipv6hdr *ip6hdr; 1062 1063 /* We might fail due to out-of-memory -> drop it */ 1064 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 1065 return -ENOMEM; 1066 1067 if (batadv_mcast_is_report_ipv6(skb)) 1068 return -EINVAL; 1069 1070 ip6hdr = ipv6_hdr(skb); 1071 1072 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1073 return -EINVAL; 1074 1075 /* link-local-all-nodes multicast listeners behind a bridge are 1076 * not snoopable (see RFC4541, section 3, paragraph 3) 1077 */ 1078 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 1079 *is_unsnoopable = true; 1080 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL) 1081 *is_routable = ETH_P_IPV6; 1082 1083 return 0; 1084 } 1085 1086 /** 1087 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 1088 * @bat_priv: the bat priv with all the soft interface information 1089 * @skb: the multicast frame to check 1090 * @is_unsnoopable: stores whether the destination is snoopable 1091 * @is_routable: stores whether the destination is routable 1092 * 1093 * Checks whether the given multicast ethernet frame has the potential to be 1094 * forwarded with a mode more optimal than classic flooding. 1095 * 1096 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1097 */ 1098 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 1099 struct sk_buff *skb, 1100 bool *is_unsnoopable, 1101 int *is_routable) 1102 { 1103 struct ethhdr *ethhdr = eth_hdr(skb); 1104 1105 if (!atomic_read(&bat_priv->multicast_mode)) 1106 return -EINVAL; 1107 1108 switch (ntohs(ethhdr->h_proto)) { 1109 case ETH_P_IP: 1110 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 1111 is_unsnoopable, 1112 is_routable); 1113 case ETH_P_IPV6: 1114 if (!IS_ENABLED(CONFIG_IPV6)) 1115 return -EINVAL; 1116 1117 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 1118 is_unsnoopable, 1119 is_routable); 1120 default: 1121 return -EINVAL; 1122 } 1123 } 1124 1125 /** 1126 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 1127 * interest 1128 * @bat_priv: the bat priv with all the soft interface information 1129 * @ethhdr: ethernet header of a packet 1130 * 1131 * Return: the number of nodes which want all IPv4 multicast traffic if the 1132 * given ethhdr is from an IPv4 packet or the number of nodes which want all 1133 * IPv6 traffic if it matches an IPv6 packet. 1134 */ 1135 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 1136 struct ethhdr *ethhdr) 1137 { 1138 switch (ntohs(ethhdr->h_proto)) { 1139 case ETH_P_IP: 1140 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 1141 case ETH_P_IPV6: 1142 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 1143 default: 1144 /* we shouldn't be here... */ 1145 return 0; 1146 } 1147 } 1148 1149 /** 1150 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router 1151 * @bat_priv: the bat priv with all the soft interface information 1152 * @protocol: the ethernet protocol type to count multicast routers for 1153 * 1154 * Return: the number of nodes which want all routable IPv4 multicast traffic 1155 * if the protocol is ETH_P_IP or the number of nodes which want all routable 1156 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0. 1157 */ 1158 1159 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv, 1160 int protocol) 1161 { 1162 switch (protocol) { 1163 case ETH_P_IP: 1164 return atomic_read(&bat_priv->mcast.num_want_all_rtr4); 1165 case ETH_P_IPV6: 1166 return atomic_read(&bat_priv->mcast.num_want_all_rtr6); 1167 default: 1168 return 0; 1169 } 1170 } 1171 1172 /** 1173 * batadv_mcast_forw_tt_node_get() - get a multicast tt node 1174 * @bat_priv: the bat priv with all the soft interface information 1175 * @ethhdr: the ether header containing the multicast destination 1176 * 1177 * Return: an orig_node matching the multicast address provided by ethhdr 1178 * via a translation table lookup. This increases the returned nodes refcount. 1179 */ 1180 static struct batadv_orig_node * 1181 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 1182 struct ethhdr *ethhdr) 1183 { 1184 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, 1185 BATADV_NO_FLAGS); 1186 } 1187 1188 /** 1189 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag 1190 * @bat_priv: the bat priv with all the soft interface information 1191 * 1192 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and 1193 * increases its refcount. 1194 */ 1195 static struct batadv_orig_node * 1196 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv) 1197 { 1198 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1199 1200 rcu_read_lock(); 1201 hlist_for_each_entry_rcu(tmp_orig_node, 1202 &bat_priv->mcast.want_all_ipv4_list, 1203 mcast_want_all_ipv4_node) { 1204 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1205 continue; 1206 1207 orig_node = tmp_orig_node; 1208 break; 1209 } 1210 rcu_read_unlock(); 1211 1212 return orig_node; 1213 } 1214 1215 /** 1216 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag 1217 * @bat_priv: the bat priv with all the soft interface information 1218 * 1219 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set 1220 * and increases its refcount. 1221 */ 1222 static struct batadv_orig_node * 1223 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) 1224 { 1225 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1226 1227 rcu_read_lock(); 1228 hlist_for_each_entry_rcu(tmp_orig_node, 1229 &bat_priv->mcast.want_all_ipv6_list, 1230 mcast_want_all_ipv6_node) { 1231 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1232 continue; 1233 1234 orig_node = tmp_orig_node; 1235 break; 1236 } 1237 rcu_read_unlock(); 1238 1239 return orig_node; 1240 } 1241 1242 /** 1243 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag 1244 * @bat_priv: the bat priv with all the soft interface information 1245 * @ethhdr: an ethernet header to determine the protocol family from 1246 * 1247 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or 1248 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and 1249 * increases its refcount. 1250 */ 1251 static struct batadv_orig_node * 1252 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv, 1253 struct ethhdr *ethhdr) 1254 { 1255 switch (ntohs(ethhdr->h_proto)) { 1256 case ETH_P_IP: 1257 return batadv_mcast_forw_ipv4_node_get(bat_priv); 1258 case ETH_P_IPV6: 1259 return batadv_mcast_forw_ipv6_node_get(bat_priv); 1260 default: 1261 /* we shouldn't be here... */ 1262 return NULL; 1263 } 1264 } 1265 1266 /** 1267 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag 1268 * @bat_priv: the bat priv with all the soft interface information 1269 * 1270 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag 1271 * set and increases its refcount. 1272 */ 1273 static struct batadv_orig_node * 1274 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv) 1275 { 1276 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1277 1278 rcu_read_lock(); 1279 hlist_for_each_entry_rcu(tmp_orig_node, 1280 &bat_priv->mcast.want_all_unsnoopables_list, 1281 mcast_want_all_unsnoopables_node) { 1282 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1283 continue; 1284 1285 orig_node = tmp_orig_node; 1286 break; 1287 } 1288 rcu_read_unlock(); 1289 1290 return orig_node; 1291 } 1292 1293 /** 1294 * batadv_mcast_forw_rtr4_node_get() - get a node with an ipv4 mcast router flag 1295 * @bat_priv: the bat priv with all the soft interface information 1296 * 1297 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR4 flag unset and 1298 * increases its refcount. 1299 */ 1300 static struct batadv_orig_node * 1301 batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv) 1302 { 1303 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1304 1305 rcu_read_lock(); 1306 hlist_for_each_entry_rcu(tmp_orig_node, 1307 &bat_priv->mcast.want_all_rtr4_list, 1308 mcast_want_all_rtr4_node) { 1309 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1310 continue; 1311 1312 orig_node = tmp_orig_node; 1313 break; 1314 } 1315 rcu_read_unlock(); 1316 1317 return orig_node; 1318 } 1319 1320 /** 1321 * batadv_mcast_forw_rtr6_node_get() - get a node with an ipv6 mcast router flag 1322 * @bat_priv: the bat priv with all the soft interface information 1323 * 1324 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR6 flag unset 1325 * and increases its refcount. 1326 */ 1327 static struct batadv_orig_node * 1328 batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv) 1329 { 1330 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1331 1332 rcu_read_lock(); 1333 hlist_for_each_entry_rcu(tmp_orig_node, 1334 &bat_priv->mcast.want_all_rtr6_list, 1335 mcast_want_all_rtr6_node) { 1336 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1337 continue; 1338 1339 orig_node = tmp_orig_node; 1340 break; 1341 } 1342 rcu_read_unlock(); 1343 1344 return orig_node; 1345 } 1346 1347 /** 1348 * batadv_mcast_forw_rtr_node_get() - get a node with an ipv4/ipv6 router flag 1349 * @bat_priv: the bat priv with all the soft interface information 1350 * @ethhdr: an ethernet header to determine the protocol family from 1351 * 1352 * Return: an orig_node which has no BATADV_MCAST_WANT_NO_RTR4 or 1353 * BATADV_MCAST_WANT_NO_RTR6 flag, depending on the provided ethhdr, set and 1354 * increases its refcount. 1355 */ 1356 static struct batadv_orig_node * 1357 batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv, 1358 struct ethhdr *ethhdr) 1359 { 1360 switch (ntohs(ethhdr->h_proto)) { 1361 case ETH_P_IP: 1362 return batadv_mcast_forw_rtr4_node_get(bat_priv); 1363 case ETH_P_IPV6: 1364 return batadv_mcast_forw_rtr6_node_get(bat_priv); 1365 default: 1366 /* we shouldn't be here... */ 1367 return NULL; 1368 } 1369 } 1370 1371 /** 1372 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 1373 * @bat_priv: the bat priv with all the soft interface information 1374 * @skb: The multicast packet to check 1375 * @orig: an originator to be set to forward the skb to 1376 * 1377 * Return: the forwarding mode as enum batadv_forw_mode and in case of 1378 * BATADV_FORW_SINGLE set the orig to the single originator the skb 1379 * should be forwarded to. 1380 */ 1381 enum batadv_forw_mode 1382 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 1383 struct batadv_orig_node **orig) 1384 { 1385 int ret, tt_count, ip_count, unsnoop_count, total_count; 1386 bool is_unsnoopable = false; 1387 unsigned int mcast_fanout; 1388 struct ethhdr *ethhdr; 1389 int is_routable = 0; 1390 int rtr_count = 0; 1391 1392 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, 1393 &is_routable); 1394 if (ret == -ENOMEM) 1395 return BATADV_FORW_NONE; 1396 else if (ret < 0) 1397 return BATADV_FORW_ALL; 1398 1399 ethhdr = eth_hdr(skb); 1400 1401 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 1402 BATADV_NO_FLAGS); 1403 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 1404 unsnoop_count = !is_unsnoopable ? 0 : 1405 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 1406 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable); 1407 1408 total_count = tt_count + ip_count + unsnoop_count + rtr_count; 1409 1410 switch (total_count) { 1411 case 1: 1412 if (tt_count) 1413 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr); 1414 else if (ip_count) 1415 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr); 1416 else if (unsnoop_count) 1417 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv); 1418 else if (rtr_count) 1419 *orig = batadv_mcast_forw_rtr_node_get(bat_priv, 1420 ethhdr); 1421 1422 if (*orig) 1423 return BATADV_FORW_SINGLE; 1424 1425 fallthrough; 1426 case 0: 1427 return BATADV_FORW_NONE; 1428 default: 1429 mcast_fanout = atomic_read(&bat_priv->multicast_fanout); 1430 1431 if (!unsnoop_count && total_count <= mcast_fanout) 1432 return BATADV_FORW_SOME; 1433 } 1434 1435 return BATADV_FORW_ALL; 1436 } 1437 1438 /** 1439 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator 1440 * @bat_priv: the bat priv with all the soft interface information 1441 * @skb: the multicast packet to send 1442 * @vid: the vlan identifier 1443 * @orig_node: the originator to send the packet to 1444 * 1445 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 1446 */ 1447 int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, 1448 struct sk_buff *skb, 1449 unsigned short vid, 1450 struct batadv_orig_node *orig_node) 1451 { 1452 /* Avoid sending multicast-in-unicast packets to other BLA 1453 * gateways - they already got the frame from the LAN side 1454 * we share with them. 1455 * TODO: Refactor to take BLA into account earlier, to avoid 1456 * reducing the mcast_fanout count. 1457 */ 1458 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { 1459 dev_kfree_skb(skb); 1460 return NET_XMIT_SUCCESS; 1461 } 1462 1463 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 1464 orig_node, vid); 1465 } 1466 1467 /** 1468 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners 1469 * @bat_priv: the bat priv with all the soft interface information 1470 * @skb: the multicast packet to transmit 1471 * @vid: the vlan identifier 1472 * 1473 * Sends copies of a frame with multicast destination to any multicast 1474 * listener registered in the translation table. A transmission is performed 1475 * via a batman-adv unicast packet for each such destination node. 1476 * 1477 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1478 * otherwise. 1479 */ 1480 static int 1481 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, 1482 unsigned short vid) 1483 { 1484 int ret = NET_XMIT_SUCCESS; 1485 struct sk_buff *newskb; 1486 1487 struct batadv_tt_orig_list_entry *orig_entry; 1488 1489 struct batadv_tt_global_entry *tt_global; 1490 const u8 *addr = eth_hdr(skb)->h_dest; 1491 1492 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid); 1493 if (!tt_global) 1494 goto out; 1495 1496 rcu_read_lock(); 1497 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) { 1498 newskb = skb_copy(skb, GFP_ATOMIC); 1499 if (!newskb) { 1500 ret = NET_XMIT_DROP; 1501 break; 1502 } 1503 1504 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, 1505 orig_entry->orig_node); 1506 } 1507 rcu_read_unlock(); 1508 1509 batadv_tt_global_entry_put(tt_global); 1510 1511 out: 1512 return ret; 1513 } 1514 1515 /** 1516 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4 1517 * @bat_priv: the bat priv with all the soft interface information 1518 * @skb: the multicast packet to transmit 1519 * @vid: the vlan identifier 1520 * 1521 * Sends copies of a frame with multicast destination to any node with a 1522 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a 1523 * batman-adv unicast packet for each such destination node. 1524 * 1525 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1526 * otherwise. 1527 */ 1528 static int 1529 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, 1530 struct sk_buff *skb, unsigned short vid) 1531 { 1532 struct batadv_orig_node *orig_node; 1533 int ret = NET_XMIT_SUCCESS; 1534 struct sk_buff *newskb; 1535 1536 rcu_read_lock(); 1537 hlist_for_each_entry_rcu(orig_node, 1538 &bat_priv->mcast.want_all_ipv4_list, 1539 mcast_want_all_ipv4_node) { 1540 newskb = skb_copy(skb, GFP_ATOMIC); 1541 if (!newskb) { 1542 ret = NET_XMIT_DROP; 1543 break; 1544 } 1545 1546 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1547 } 1548 rcu_read_unlock(); 1549 return ret; 1550 } 1551 1552 /** 1553 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6 1554 * @bat_priv: the bat priv with all the soft interface information 1555 * @skb: The multicast packet to transmit 1556 * @vid: the vlan identifier 1557 * 1558 * Sends copies of a frame with multicast destination to any node with a 1559 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a 1560 * batman-adv unicast packet for each such destination node. 1561 * 1562 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1563 * otherwise. 1564 */ 1565 static int 1566 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, 1567 struct sk_buff *skb, unsigned short vid) 1568 { 1569 struct batadv_orig_node *orig_node; 1570 int ret = NET_XMIT_SUCCESS; 1571 struct sk_buff *newskb; 1572 1573 rcu_read_lock(); 1574 hlist_for_each_entry_rcu(orig_node, 1575 &bat_priv->mcast.want_all_ipv6_list, 1576 mcast_want_all_ipv6_node) { 1577 newskb = skb_copy(skb, GFP_ATOMIC); 1578 if (!newskb) { 1579 ret = NET_XMIT_DROP; 1580 break; 1581 } 1582 1583 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1584 } 1585 rcu_read_unlock(); 1586 return ret; 1587 } 1588 1589 /** 1590 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list 1591 * @bat_priv: the bat priv with all the soft interface information 1592 * @skb: the multicast packet to transmit 1593 * @vid: the vlan identifier 1594 * 1595 * Sends copies of a frame with multicast destination to any node with a 1596 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A 1597 * transmission is performed via a batman-adv unicast packet for each such 1598 * destination node. 1599 * 1600 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1601 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1602 */ 1603 static int 1604 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv, 1605 struct sk_buff *skb, unsigned short vid) 1606 { 1607 switch (ntohs(eth_hdr(skb)->h_proto)) { 1608 case ETH_P_IP: 1609 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid); 1610 case ETH_P_IPV6: 1611 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid); 1612 default: 1613 /* we shouldn't be here... */ 1614 return NET_XMIT_DROP; 1615 } 1616 } 1617 1618 /** 1619 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4 1620 * @bat_priv: the bat priv with all the soft interface information 1621 * @skb: the multicast packet to transmit 1622 * @vid: the vlan identifier 1623 * 1624 * Sends copies of a frame with multicast destination to any node with a 1625 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a 1626 * batman-adv unicast packet for each such destination node. 1627 * 1628 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1629 * otherwise. 1630 */ 1631 static int 1632 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, 1633 struct sk_buff *skb, unsigned short vid) 1634 { 1635 struct batadv_orig_node *orig_node; 1636 int ret = NET_XMIT_SUCCESS; 1637 struct sk_buff *newskb; 1638 1639 rcu_read_lock(); 1640 hlist_for_each_entry_rcu(orig_node, 1641 &bat_priv->mcast.want_all_rtr4_list, 1642 mcast_want_all_rtr4_node) { 1643 newskb = skb_copy(skb, GFP_ATOMIC); 1644 if (!newskb) { 1645 ret = NET_XMIT_DROP; 1646 break; 1647 } 1648 1649 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1650 } 1651 rcu_read_unlock(); 1652 return ret; 1653 } 1654 1655 /** 1656 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6 1657 * @bat_priv: the bat priv with all the soft interface information 1658 * @skb: The multicast packet to transmit 1659 * @vid: the vlan identifier 1660 * 1661 * Sends copies of a frame with multicast destination to any node with a 1662 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a 1663 * batman-adv unicast packet for each such destination node. 1664 * 1665 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1666 * otherwise. 1667 */ 1668 static int 1669 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, 1670 struct sk_buff *skb, unsigned short vid) 1671 { 1672 struct batadv_orig_node *orig_node; 1673 int ret = NET_XMIT_SUCCESS; 1674 struct sk_buff *newskb; 1675 1676 rcu_read_lock(); 1677 hlist_for_each_entry_rcu(orig_node, 1678 &bat_priv->mcast.want_all_rtr6_list, 1679 mcast_want_all_rtr6_node) { 1680 newskb = skb_copy(skb, GFP_ATOMIC); 1681 if (!newskb) { 1682 ret = NET_XMIT_DROP; 1683 break; 1684 } 1685 1686 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1687 } 1688 rcu_read_unlock(); 1689 return ret; 1690 } 1691 1692 /** 1693 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list 1694 * @bat_priv: the bat priv with all the soft interface information 1695 * @skb: the multicast packet to transmit 1696 * @vid: the vlan identifier 1697 * 1698 * Sends copies of a frame with multicast destination to any node with a 1699 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A 1700 * transmission is performed via a batman-adv unicast packet for each such 1701 * destination node. 1702 * 1703 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1704 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1705 */ 1706 static int 1707 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, 1708 struct sk_buff *skb, unsigned short vid) 1709 { 1710 switch (ntohs(eth_hdr(skb)->h_proto)) { 1711 case ETH_P_IP: 1712 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid); 1713 case ETH_P_IPV6: 1714 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid); 1715 default: 1716 /* we shouldn't be here... */ 1717 return NET_XMIT_DROP; 1718 } 1719 } 1720 1721 /** 1722 * batadv_mcast_forw_send() - send packet to any detected multicast recipient 1723 * @bat_priv: the bat priv with all the soft interface information 1724 * @skb: the multicast packet to transmit 1725 * @vid: the vlan identifier 1726 * 1727 * Sends copies of a frame with multicast destination to any node that signaled 1728 * interest in it, that is either via the translation table or the according 1729 * want-all flags. A transmission is performed via a batman-adv unicast packet 1730 * for each such destination node. 1731 * 1732 * The given skb is consumed/freed. 1733 * 1734 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1735 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1736 */ 1737 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, 1738 unsigned short vid) 1739 { 1740 int ret; 1741 1742 ret = batadv_mcast_forw_tt(bat_priv, skb, vid); 1743 if (ret != NET_XMIT_SUCCESS) { 1744 kfree_skb(skb); 1745 return ret; 1746 } 1747 1748 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid); 1749 if (ret != NET_XMIT_SUCCESS) { 1750 kfree_skb(skb); 1751 return ret; 1752 } 1753 1754 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); 1755 if (ret != NET_XMIT_SUCCESS) { 1756 kfree_skb(skb); 1757 return ret; 1758 } 1759 1760 consume_skb(skb); 1761 return ret; 1762 } 1763 1764 /** 1765 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 1766 * @bat_priv: the bat priv with all the soft interface information 1767 * @orig: the orig_node which multicast state might have changed of 1768 * @mcast_flags: flags indicating the new multicast state 1769 * 1770 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 1771 * orig, has toggled then this method updates the counter and the list 1772 * accordingly. 1773 * 1774 * Caller needs to hold orig->mcast_handler_lock. 1775 */ 1776 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 1777 struct batadv_orig_node *orig, 1778 u8 mcast_flags) 1779 { 1780 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 1781 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 1782 1783 lockdep_assert_held(&orig->mcast_handler_lock); 1784 1785 /* switched from flag unset to set */ 1786 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1787 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1788 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1789 1790 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1791 /* flag checks above + mcast_handler_lock prevents this */ 1792 WARN_ON(!hlist_unhashed(node)); 1793 1794 hlist_add_head_rcu(node, head); 1795 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1796 /* switched from flag set to unset */ 1797 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1798 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1799 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1800 1801 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1802 /* flag checks above + mcast_handler_lock prevents this */ 1803 WARN_ON(hlist_unhashed(node)); 1804 1805 hlist_del_init_rcu(node); 1806 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1807 } 1808 } 1809 1810 /** 1811 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1812 * @bat_priv: the bat priv with all the soft interface information 1813 * @orig: the orig_node which multicast state might have changed of 1814 * @mcast_flags: flags indicating the new multicast state 1815 * 1816 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1817 * toggled then this method updates the counter and the list accordingly. 1818 * 1819 * Caller needs to hold orig->mcast_handler_lock. 1820 */ 1821 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1822 struct batadv_orig_node *orig, 1823 u8 mcast_flags) 1824 { 1825 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1826 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1827 1828 lockdep_assert_held(&orig->mcast_handler_lock); 1829 1830 /* switched from flag unset to set */ 1831 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1832 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1833 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1834 1835 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1836 /* flag checks above + mcast_handler_lock prevents this */ 1837 WARN_ON(!hlist_unhashed(node)); 1838 1839 hlist_add_head_rcu(node, head); 1840 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1841 /* switched from flag set to unset */ 1842 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1843 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1844 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1845 1846 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1847 /* flag checks above + mcast_handler_lock prevents this */ 1848 WARN_ON(hlist_unhashed(node)); 1849 1850 hlist_del_init_rcu(node); 1851 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1852 } 1853 } 1854 1855 /** 1856 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1857 * @bat_priv: the bat priv with all the soft interface information 1858 * @orig: the orig_node which multicast state might have changed of 1859 * @mcast_flags: flags indicating the new multicast state 1860 * 1861 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1862 * toggled then this method updates the counter and the list accordingly. 1863 * 1864 * Caller needs to hold orig->mcast_handler_lock. 1865 */ 1866 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1867 struct batadv_orig_node *orig, 1868 u8 mcast_flags) 1869 { 1870 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1871 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1872 1873 lockdep_assert_held(&orig->mcast_handler_lock); 1874 1875 /* switched from flag unset to set */ 1876 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1877 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1878 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1879 1880 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1881 /* flag checks above + mcast_handler_lock prevents this */ 1882 WARN_ON(!hlist_unhashed(node)); 1883 1884 hlist_add_head_rcu(node, head); 1885 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1886 /* switched from flag set to unset */ 1887 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1888 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1889 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1890 1891 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1892 /* flag checks above + mcast_handler_lock prevents this */ 1893 WARN_ON(hlist_unhashed(node)); 1894 1895 hlist_del_init_rcu(node); 1896 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1897 } 1898 } 1899 1900 /** 1901 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list 1902 * @bat_priv: the bat priv with all the soft interface information 1903 * @orig: the orig_node which multicast state might have changed of 1904 * @mcast_flags: flags indicating the new multicast state 1905 * 1906 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has 1907 * toggled then this method updates the counter and the list accordingly. 1908 * 1909 * Caller needs to hold orig->mcast_handler_lock. 1910 */ 1911 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, 1912 struct batadv_orig_node *orig, 1913 u8 mcast_flags) 1914 { 1915 struct hlist_node *node = &orig->mcast_want_all_rtr4_node; 1916 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list; 1917 1918 lockdep_assert_held(&orig->mcast_handler_lock); 1919 1920 /* switched from flag set to unset */ 1921 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) && 1922 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) { 1923 atomic_inc(&bat_priv->mcast.num_want_all_rtr4); 1924 1925 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1926 /* flag checks above + mcast_handler_lock prevents this */ 1927 WARN_ON(!hlist_unhashed(node)); 1928 1929 hlist_add_head_rcu(node, head); 1930 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1931 /* switched from flag unset to set */ 1932 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 && 1933 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) { 1934 atomic_dec(&bat_priv->mcast.num_want_all_rtr4); 1935 1936 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1937 /* flag checks above + mcast_handler_lock prevents this */ 1938 WARN_ON(hlist_unhashed(node)); 1939 1940 hlist_del_init_rcu(node); 1941 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1942 } 1943 } 1944 1945 /** 1946 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list 1947 * @bat_priv: the bat priv with all the soft interface information 1948 * @orig: the orig_node which multicast state might have changed of 1949 * @mcast_flags: flags indicating the new multicast state 1950 * 1951 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has 1952 * toggled then this method updates the counter and the list accordingly. 1953 * 1954 * Caller needs to hold orig->mcast_handler_lock. 1955 */ 1956 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv, 1957 struct batadv_orig_node *orig, 1958 u8 mcast_flags) 1959 { 1960 struct hlist_node *node = &orig->mcast_want_all_rtr6_node; 1961 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list; 1962 1963 lockdep_assert_held(&orig->mcast_handler_lock); 1964 1965 /* switched from flag set to unset */ 1966 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) && 1967 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) { 1968 atomic_inc(&bat_priv->mcast.num_want_all_rtr6); 1969 1970 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1971 /* flag checks above + mcast_handler_lock prevents this */ 1972 WARN_ON(!hlist_unhashed(node)); 1973 1974 hlist_add_head_rcu(node, head); 1975 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1976 /* switched from flag unset to set */ 1977 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 && 1978 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) { 1979 atomic_dec(&bat_priv->mcast.num_want_all_rtr6); 1980 1981 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1982 /* flag checks above + mcast_handler_lock prevents this */ 1983 WARN_ON(hlist_unhashed(node)); 1984 1985 hlist_del_init_rcu(node); 1986 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1987 } 1988 } 1989 1990 /** 1991 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV 1992 * @enabled: whether the originator has multicast TVLV support enabled 1993 * @tvlv_value: tvlv buffer containing the multicast flags 1994 * @tvlv_value_len: tvlv buffer length 1995 * 1996 * Return: multicast flags for the given tvlv buffer 1997 */ 1998 static u8 1999 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len) 2000 { 2001 u8 mcast_flags = BATADV_NO_FLAGS; 2002 2003 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags)) 2004 mcast_flags = *(u8 *)tvlv_value; 2005 2006 if (!enabled) { 2007 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4; 2008 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6; 2009 } 2010 2011 /* remove redundant flags to avoid sending duplicate packets later */ 2012 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) 2013 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4; 2014 2015 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) 2016 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6; 2017 2018 return mcast_flags; 2019 } 2020 2021 /** 2022 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 2023 * @bat_priv: the bat priv with all the soft interface information 2024 * @orig: the orig_node of the ogm 2025 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 2026 * @tvlv_value: tvlv buffer containing the multicast data 2027 * @tvlv_value_len: tvlv buffer length 2028 */ 2029 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 2030 struct batadv_orig_node *orig, 2031 u8 flags, 2032 void *tvlv_value, 2033 u16 tvlv_value_len) 2034 { 2035 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2036 u8 mcast_flags; 2037 2038 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled, 2039 tvlv_value, tvlv_value_len); 2040 2041 spin_lock_bh(&orig->mcast_handler_lock); 2042 2043 if (orig_mcast_enabled && 2044 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2045 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2046 } else if (!orig_mcast_enabled && 2047 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2048 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2049 } 2050 2051 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 2052 2053 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 2054 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 2055 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 2056 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags); 2057 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags); 2058 2059 orig->mcast_flags = mcast_flags; 2060 spin_unlock_bh(&orig->mcast_handler_lock); 2061 } 2062 2063 /** 2064 * batadv_mcast_init() - initialize the multicast optimizations structures 2065 * @bat_priv: the bat priv with all the soft interface information 2066 */ 2067 void batadv_mcast_init(struct batadv_priv *bat_priv) 2068 { 2069 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 2070 NULL, BATADV_TVLV_MCAST, 2, 2071 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2072 2073 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 2074 batadv_mcast_start_timer(bat_priv); 2075 } 2076 2077 #ifdef CONFIG_BATMAN_ADV_DEBUGFS 2078 /** 2079 * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table 2080 * @bat_priv: the bat priv with all the soft interface information 2081 * @seq: debugfs table seq_file struct 2082 * 2083 * Prints our own multicast flags including a more specific reason why 2084 * they are set, that is prints the bridge and querier state too, to 2085 * the debugfs table specified via @seq. 2086 */ 2087 static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv, 2088 struct seq_file *seq) 2089 { 2090 struct batadv_mcast_mla_flags *mla_flags = &bat_priv->mcast.mla_flags; 2091 char querier4, querier6, shadowing4, shadowing6; 2092 bool bridged = mla_flags->bridged; 2093 u8 flags = mla_flags->tvlv_flags; 2094 2095 if (bridged) { 2096 querier4 = mla_flags->querier_ipv4.exists ? '.' : '4'; 2097 querier6 = mla_flags->querier_ipv6.exists ? '.' : '6'; 2098 shadowing4 = mla_flags->querier_ipv4.shadowing ? '4' : '.'; 2099 shadowing6 = mla_flags->querier_ipv6.shadowing ? '6' : '.'; 2100 } else { 2101 querier4 = '?'; 2102 querier6 = '?'; 2103 shadowing4 = '?'; 2104 shadowing6 = '?'; 2105 } 2106 2107 seq_printf(seq, "Multicast flags (own flags: [%c%c%c%s%s])\n", 2108 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 2109 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 2110 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 2111 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 2112 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 2113 seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.'); 2114 seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n", 2115 querier4, querier6); 2116 seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n", 2117 shadowing4, shadowing6); 2118 seq_puts(seq, "-------------------------------------------\n"); 2119 seq_printf(seq, " %-10s %s\n", "Originator", "Flags"); 2120 } 2121 2122 /** 2123 * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes 2124 * @seq: seq file to print on 2125 * @offset: not used 2126 * 2127 * This prints a table of (primary) originators and their according 2128 * multicast flags, including (in the header) our own. 2129 * 2130 * Return: always 0 2131 */ 2132 int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset) 2133 { 2134 struct net_device *net_dev = (struct net_device *)seq->private; 2135 struct batadv_priv *bat_priv = netdev_priv(net_dev); 2136 struct batadv_hard_iface *primary_if; 2137 struct batadv_hashtable *hash = bat_priv->orig_hash; 2138 struct batadv_orig_node *orig_node; 2139 struct hlist_head *head; 2140 u8 flags; 2141 u32 i; 2142 2143 primary_if = batadv_seq_print_text_primary_if_get(seq); 2144 if (!primary_if) 2145 return 0; 2146 2147 batadv_mcast_flags_print_header(bat_priv, seq); 2148 2149 for (i = 0; i < hash->size; i++) { 2150 head = &hash->table[i]; 2151 2152 rcu_read_lock(); 2153 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 2154 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2155 &orig_node->capa_initialized)) 2156 continue; 2157 2158 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2159 &orig_node->capabilities)) { 2160 seq_printf(seq, "%pM -\n", orig_node->orig); 2161 continue; 2162 } 2163 2164 flags = orig_node->mcast_flags; 2165 2166 seq_printf(seq, "%pM [%c%c%c%s%s]\n", orig_node->orig, 2167 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) 2168 ? 'U' : '.', 2169 (flags & BATADV_MCAST_WANT_ALL_IPV4) 2170 ? '4' : '.', 2171 (flags & BATADV_MCAST_WANT_ALL_IPV6) 2172 ? '6' : '.', 2173 !(flags & BATADV_MCAST_WANT_NO_RTR4) 2174 ? "R4" : ". ", 2175 !(flags & BATADV_MCAST_WANT_NO_RTR6) 2176 ? "R6" : ". "); 2177 } 2178 rcu_read_unlock(); 2179 } 2180 2181 batadv_hardif_put(primary_if); 2182 2183 return 0; 2184 } 2185 #endif 2186 2187 /** 2188 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message 2189 * @msg: buffer for the message 2190 * @bat_priv: the bat priv with all the soft interface information 2191 * 2192 * Return: 0 or error code. 2193 */ 2194 int batadv_mcast_mesh_info_put(struct sk_buff *msg, 2195 struct batadv_priv *bat_priv) 2196 { 2197 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags; 2198 u32 flags_priv = BATADV_NO_FLAGS; 2199 2200 if (bat_priv->mcast.mla_flags.bridged) { 2201 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; 2202 2203 if (bat_priv->mcast.mla_flags.querier_ipv4.exists) 2204 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; 2205 if (bat_priv->mcast.mla_flags.querier_ipv6.exists) 2206 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; 2207 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing) 2208 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; 2209 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing) 2210 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; 2211 } 2212 2213 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || 2214 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) 2215 return -EMSGSIZE; 2216 2217 return 0; 2218 } 2219 2220 /** 2221 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table 2222 * to a netlink socket 2223 * @msg: buffer for the message 2224 * @portid: netlink port 2225 * @cb: Control block containing additional options 2226 * @orig_node: originator to dump the multicast flags of 2227 * 2228 * Return: 0 or error code. 2229 */ 2230 static int 2231 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, 2232 struct netlink_callback *cb, 2233 struct batadv_orig_node *orig_node) 2234 { 2235 void *hdr; 2236 2237 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, 2238 &batadv_netlink_family, NLM_F_MULTI, 2239 BATADV_CMD_GET_MCAST_FLAGS); 2240 if (!hdr) 2241 return -ENOBUFS; 2242 2243 genl_dump_check_consistent(cb, hdr); 2244 2245 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, 2246 orig_node->orig)) { 2247 genlmsg_cancel(msg, hdr); 2248 return -EMSGSIZE; 2249 } 2250 2251 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2252 &orig_node->capabilities)) { 2253 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, 2254 orig_node->mcast_flags)) { 2255 genlmsg_cancel(msg, hdr); 2256 return -EMSGSIZE; 2257 } 2258 } 2259 2260 genlmsg_end(msg, hdr); 2261 return 0; 2262 } 2263 2264 /** 2265 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags 2266 * table to a netlink socket 2267 * @msg: buffer for the message 2268 * @portid: netlink port 2269 * @cb: Control block containing additional options 2270 * @hash: hash to dump 2271 * @bucket: bucket index to dump 2272 * @idx_skip: How many entries to skip 2273 * 2274 * Return: 0 or error code. 2275 */ 2276 static int 2277 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, 2278 struct netlink_callback *cb, 2279 struct batadv_hashtable *hash, 2280 unsigned int bucket, long *idx_skip) 2281 { 2282 struct batadv_orig_node *orig_node; 2283 long idx = 0; 2284 2285 spin_lock_bh(&hash->list_locks[bucket]); 2286 cb->seq = atomic_read(&hash->generation) << 1 | 1; 2287 2288 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { 2289 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2290 &orig_node->capa_initialized)) 2291 continue; 2292 2293 if (idx < *idx_skip) 2294 goto skip; 2295 2296 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) { 2297 spin_unlock_bh(&hash->list_locks[bucket]); 2298 *idx_skip = idx; 2299 2300 return -EMSGSIZE; 2301 } 2302 2303 skip: 2304 idx++; 2305 } 2306 spin_unlock_bh(&hash->list_locks[bucket]); 2307 2308 return 0; 2309 } 2310 2311 /** 2312 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2313 * @msg: buffer for the message 2314 * @portid: netlink port 2315 * @cb: Control block containing additional options 2316 * @bat_priv: the bat priv with all the soft interface information 2317 * @bucket: current bucket to dump 2318 * @idx: index in current bucket to the next entry to dump 2319 * 2320 * Return: 0 or error code. 2321 */ 2322 static int 2323 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, 2324 struct netlink_callback *cb, 2325 struct batadv_priv *bat_priv, long *bucket, long *idx) 2326 { 2327 struct batadv_hashtable *hash = bat_priv->orig_hash; 2328 long bucket_tmp = *bucket; 2329 long idx_tmp = *idx; 2330 2331 while (bucket_tmp < hash->size) { 2332 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2333 bucket_tmp, &idx_tmp)) 2334 break; 2335 2336 bucket_tmp++; 2337 idx_tmp = 0; 2338 } 2339 2340 *bucket = bucket_tmp; 2341 *idx = idx_tmp; 2342 2343 return msg->len; 2344 } 2345 2346 /** 2347 * batadv_mcast_netlink_get_primary() - get primary interface from netlink 2348 * callback 2349 * @cb: netlink callback structure 2350 * @primary_if: the primary interface pointer to return the result in 2351 * 2352 * Return: 0 or error code. 2353 */ 2354 static int 2355 batadv_mcast_netlink_get_primary(struct netlink_callback *cb, 2356 struct batadv_hard_iface **primary_if) 2357 { 2358 struct batadv_hard_iface *hard_iface = NULL; 2359 struct net *net = sock_net(cb->skb->sk); 2360 struct net_device *soft_iface; 2361 struct batadv_priv *bat_priv; 2362 int ifindex; 2363 int ret = 0; 2364 2365 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); 2366 if (!ifindex) 2367 return -EINVAL; 2368 2369 soft_iface = dev_get_by_index(net, ifindex); 2370 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 2371 ret = -ENODEV; 2372 goto out; 2373 } 2374 2375 bat_priv = netdev_priv(soft_iface); 2376 2377 hard_iface = batadv_primary_if_get_selected(bat_priv); 2378 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { 2379 ret = -ENOENT; 2380 goto out; 2381 } 2382 2383 out: 2384 if (soft_iface) 2385 dev_put(soft_iface); 2386 2387 if (!ret && primary_if) 2388 *primary_if = hard_iface; 2389 else if (hard_iface) 2390 batadv_hardif_put(hard_iface); 2391 2392 return ret; 2393 } 2394 2395 /** 2396 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2397 * @msg: buffer for the message 2398 * @cb: callback structure containing arguments 2399 * 2400 * Return: message length. 2401 */ 2402 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) 2403 { 2404 struct batadv_hard_iface *primary_if = NULL; 2405 int portid = NETLINK_CB(cb->skb).portid; 2406 struct batadv_priv *bat_priv; 2407 long *bucket = &cb->args[0]; 2408 long *idx = &cb->args[1]; 2409 int ret; 2410 2411 ret = batadv_mcast_netlink_get_primary(cb, &primary_if); 2412 if (ret) 2413 return ret; 2414 2415 bat_priv = netdev_priv(primary_if->soft_iface); 2416 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); 2417 2418 batadv_hardif_put(primary_if); 2419 return ret; 2420 } 2421 2422 /** 2423 * batadv_mcast_free() - free the multicast optimizations structures 2424 * @bat_priv: the bat priv with all the soft interface information 2425 */ 2426 void batadv_mcast_free(struct batadv_priv *bat_priv) 2427 { 2428 cancel_delayed_work_sync(&bat_priv->mcast.work); 2429 2430 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2431 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2432 2433 /* safely calling outside of worker, as worker was canceled above */ 2434 batadv_mcast_mla_tt_retract(bat_priv, NULL); 2435 } 2436 2437 /** 2438 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 2439 * @orig: the originator which is going to get purged 2440 */ 2441 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 2442 { 2443 struct batadv_priv *bat_priv = orig->bat_priv; 2444 2445 spin_lock_bh(&orig->mcast_handler_lock); 2446 2447 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2448 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2449 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2450 batadv_mcast_want_rtr4_update(bat_priv, orig, 2451 BATADV_MCAST_WANT_NO_RTR4); 2452 batadv_mcast_want_rtr6_update(bat_priv, orig, 2453 BATADV_MCAST_WANT_NO_RTR6); 2454 2455 spin_unlock_bh(&orig->mcast_handler_lock); 2456 } 2457