1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 */ 6 7 #include "multicast.h" 8 #include "main.h" 9 10 #include <linux/atomic.h> 11 #include <linux/bitops.h> 12 #include <linux/bug.h> 13 #include <linux/byteorder/generic.h> 14 #include <linux/container_of.h> 15 #include <linux/errno.h> 16 #include <linux/etherdevice.h> 17 #include <linux/gfp.h> 18 #include <linux/icmpv6.h> 19 #include <linux/if_bridge.h> 20 #include <linux/if_ether.h> 21 #include <linux/igmp.h> 22 #include <linux/in.h> 23 #include <linux/in6.h> 24 #include <linux/inetdevice.h> 25 #include <linux/ip.h> 26 #include <linux/ipv6.h> 27 #include <linux/jiffies.h> 28 #include <linux/kernel.h> 29 #include <linux/kref.h> 30 #include <linux/list.h> 31 #include <linux/lockdep.h> 32 #include <linux/netdevice.h> 33 #include <linux/netlink.h> 34 #include <linux/printk.h> 35 #include <linux/rculist.h> 36 #include <linux/rcupdate.h> 37 #include <linux/skbuff.h> 38 #include <linux/slab.h> 39 #include <linux/spinlock.h> 40 #include <linux/stddef.h> 41 #include <linux/string.h> 42 #include <linux/types.h> 43 #include <linux/workqueue.h> 44 #include <net/addrconf.h> 45 #include <net/genetlink.h> 46 #include <net/if_inet6.h> 47 #include <net/ip.h> 48 #include <net/ipv6.h> 49 #include <net/netlink.h> 50 #include <net/sock.h> 51 #include <uapi/linux/batadv_packet.h> 52 #include <uapi/linux/batman_adv.h> 53 54 #include "bridge_loop_avoidance.h" 55 #include "hard-interface.h" 56 #include "hash.h" 57 #include "log.h" 58 #include "netlink.h" 59 #include "send.h" 60 #include "soft-interface.h" 61 #include "translation-table.h" 62 #include "tvlv.h" 63 64 static void batadv_mcast_mla_update(struct work_struct *work); 65 66 /** 67 * batadv_mcast_start_timer() - schedule the multicast periodic worker 68 * @bat_priv: the bat priv with all the soft interface information 69 */ 70 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 71 { 72 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 73 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 74 } 75 76 /** 77 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 78 * @soft_iface: netdev struct of the mesh interface 79 * 80 * If the given soft interface has a bridge on top then the refcount 81 * of the according net device is increased. 82 * 83 * Return: NULL if no such bridge exists. Otherwise the net device of the 84 * bridge. 85 */ 86 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 87 { 88 struct net_device *upper = soft_iface; 89 90 rcu_read_lock(); 91 do { 92 upper = netdev_master_upper_dev_get_rcu(upper); 93 } while (upper && !netif_is_bridge_master(upper)); 94 95 dev_hold(upper); 96 rcu_read_unlock(); 97 98 return upper; 99 } 100 101 /** 102 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from 103 * node for IPv4 104 * @dev: the interface to check 105 * 106 * Checks the presence of an IPv4 multicast router on this node. 107 * 108 * Caller needs to hold rcu read lock. 109 * 110 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise. 111 */ 112 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) 113 { 114 struct in_device *in_dev = __in_dev_get_rcu(dev); 115 116 if (in_dev && IN_DEV_MFORWARD(in_dev)) 117 return BATADV_NO_FLAGS; 118 else 119 return BATADV_MCAST_WANT_NO_RTR4; 120 } 121 122 /** 123 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from 124 * node for IPv6 125 * @dev: the interface to check 126 * 127 * Checks the presence of an IPv6 multicast router on this node. 128 * 129 * Caller needs to hold rcu read lock. 130 * 131 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise. 132 */ 133 #if IS_ENABLED(CONFIG_IPV6_MROUTE) 134 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 135 { 136 struct inet6_dev *in6_dev = __in6_dev_get(dev); 137 138 if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding)) 139 return BATADV_NO_FLAGS; 140 else 141 return BATADV_MCAST_WANT_NO_RTR6; 142 } 143 #else 144 static inline u8 145 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 146 { 147 return BATADV_MCAST_WANT_NO_RTR6; 148 } 149 #endif 150 151 /** 152 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node 153 * @bat_priv: the bat priv with all the soft interface information 154 * @bridge: bridge interface on top of the soft_iface if present, 155 * otherwise pass NULL 156 * 157 * Checks the presence of IPv4 and IPv6 multicast routers on this 158 * node. 159 * 160 * Return: 161 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 162 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 163 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 164 * The former two OR'd: no multicast router is present 165 */ 166 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, 167 struct net_device *bridge) 168 { 169 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface; 170 u8 flags = BATADV_NO_FLAGS; 171 172 rcu_read_lock(); 173 174 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev); 175 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev); 176 177 rcu_read_unlock(); 178 179 return flags; 180 } 181 182 /** 183 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge 184 * @bat_priv: the bat priv with all the soft interface information 185 * @bridge: bridge interface on top of the soft_iface if present, 186 * otherwise pass NULL 187 * 188 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge. 189 * 190 * Return: 191 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 192 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 193 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 194 * The former two OR'd: no multicast router is present 195 */ 196 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, 197 struct net_device *bridge) 198 { 199 struct net_device *dev = bat_priv->soft_iface; 200 u8 flags = BATADV_NO_FLAGS; 201 202 if (!bridge) 203 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 204 205 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP)) 206 flags |= BATADV_MCAST_WANT_NO_RTR4; 207 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6)) 208 flags |= BATADV_MCAST_WANT_NO_RTR6; 209 210 return flags; 211 } 212 213 /** 214 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags 215 * @bat_priv: the bat priv with all the soft interface information 216 * @bridge: bridge interface on top of the soft_iface if present, 217 * otherwise pass NULL 218 * 219 * Checks the presence of IPv4 and IPv6 multicast routers on this 220 * node or behind its bridge. 221 * 222 * Return: 223 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 224 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 225 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 226 * The former two OR'd: no multicast router is present 227 */ 228 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, 229 struct net_device *bridge) 230 { 231 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 232 233 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge); 234 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge); 235 236 return flags; 237 } 238 239 /** 240 * batadv_mcast_mla_flags_get() - get the new multicast flags 241 * @bat_priv: the bat priv with all the soft interface information 242 * 243 * Return: A set of flags for the current/next TVLV, querier and 244 * bridge state. 245 */ 246 static struct batadv_mcast_mla_flags 247 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv) 248 { 249 struct net_device *dev = bat_priv->soft_iface; 250 struct batadv_mcast_querier_state *qr4, *qr6; 251 struct batadv_mcast_mla_flags mla_flags; 252 struct net_device *bridge; 253 254 bridge = batadv_mcast_get_bridge(dev); 255 256 memset(&mla_flags, 0, sizeof(mla_flags)); 257 mla_flags.enabled = 1; 258 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv, 259 bridge); 260 261 if (!bridge) 262 return mla_flags; 263 264 dev_put(bridge); 265 266 mla_flags.bridged = 1; 267 qr4 = &mla_flags.querier_ipv4; 268 qr6 = &mla_flags.querier_ipv6; 269 270 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 271 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 272 273 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 274 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 275 276 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 277 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 278 279 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 280 281 /* 1) If no querier exists at all, then multicast listeners on 282 * our local TT clients behind the bridge will keep silent. 283 * 2) If the selected querier is on one of our local TT clients, 284 * behind the bridge, then this querier might shadow multicast 285 * listeners on our local TT clients, behind this bridge. 286 * 287 * In both cases, we will signalize other batman nodes that 288 * we need all multicast traffic of the according protocol. 289 */ 290 if (!qr4->exists || qr4->shadowing) { 291 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4; 292 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4; 293 } 294 295 if (!qr6->exists || qr6->shadowing) { 296 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6; 297 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6; 298 } 299 300 return mla_flags; 301 } 302 303 /** 304 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 305 * @mcast_addr: the multicast address to check 306 * @mcast_list: the list with multicast addresses to search in 307 * 308 * Return: true if the given address is already in the given list. 309 * Otherwise returns false. 310 */ 311 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 312 struct hlist_head *mcast_list) 313 { 314 struct batadv_hw_addr *mcast_entry; 315 316 hlist_for_each_entry(mcast_entry, mcast_list, list) 317 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 318 return true; 319 320 return false; 321 } 322 323 /** 324 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners 325 * @dev: the device to collect multicast addresses from 326 * @mcast_list: a list to put found addresses into 327 * @flags: flags indicating the new multicast state 328 * 329 * Collects multicast addresses of IPv4 multicast listeners residing 330 * on this kernel on the given soft interface, dev, in 331 * the given mcast_list. In general, multicast listeners provided by 332 * your multicast receiving applications run directly on this node. 333 * 334 * Return: -ENOMEM on memory allocation error or the number of 335 * items added to the mcast_list otherwise. 336 */ 337 static int 338 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, 339 struct hlist_head *mcast_list, 340 struct batadv_mcast_mla_flags *flags) 341 { 342 struct batadv_hw_addr *new; 343 struct in_device *in_dev; 344 u8 mcast_addr[ETH_ALEN]; 345 struct ip_mc_list *pmc; 346 int ret = 0; 347 348 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 349 return 0; 350 351 rcu_read_lock(); 352 353 in_dev = __in_dev_get_rcu(dev); 354 if (!in_dev) { 355 rcu_read_unlock(); 356 return 0; 357 } 358 359 for (pmc = rcu_dereference(in_dev->mc_list); pmc; 360 pmc = rcu_dereference(pmc->next_rcu)) { 361 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 362 ipv4_is_local_multicast(pmc->multiaddr)) 363 continue; 364 365 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 366 !ipv4_is_local_multicast(pmc->multiaddr)) 367 continue; 368 369 ip_eth_mc_map(pmc->multiaddr, mcast_addr); 370 371 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 372 continue; 373 374 new = kmalloc(sizeof(*new), GFP_ATOMIC); 375 if (!new) { 376 ret = -ENOMEM; 377 break; 378 } 379 380 ether_addr_copy(new->addr, mcast_addr); 381 hlist_add_head(&new->list, mcast_list); 382 ret++; 383 } 384 rcu_read_unlock(); 385 386 return ret; 387 } 388 389 /** 390 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners 391 * @dev: the device to collect multicast addresses from 392 * @mcast_list: a list to put found addresses into 393 * @flags: flags indicating the new multicast state 394 * 395 * Collects multicast addresses of IPv6 multicast listeners residing 396 * on this kernel on the given soft interface, dev, in 397 * the given mcast_list. In general, multicast listeners provided by 398 * your multicast receiving applications run directly on this node. 399 * 400 * Return: -ENOMEM on memory allocation error or the number of 401 * items added to the mcast_list otherwise. 402 */ 403 #if IS_ENABLED(CONFIG_IPV6) 404 static int 405 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 406 struct hlist_head *mcast_list, 407 struct batadv_mcast_mla_flags *flags) 408 { 409 struct batadv_hw_addr *new; 410 struct inet6_dev *in6_dev; 411 u8 mcast_addr[ETH_ALEN]; 412 struct ifmcaddr6 *pmc6; 413 int ret = 0; 414 415 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 416 return 0; 417 418 rcu_read_lock(); 419 420 in6_dev = __in6_dev_get(dev); 421 if (!in6_dev) { 422 rcu_read_unlock(); 423 return 0; 424 } 425 426 for (pmc6 = rcu_dereference(in6_dev->mc_list); 427 pmc6; 428 pmc6 = rcu_dereference(pmc6->next)) { 429 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) < 430 IPV6_ADDR_SCOPE_LINKLOCAL) 431 continue; 432 433 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 434 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr)) 435 continue; 436 437 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 438 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) > 439 IPV6_ADDR_SCOPE_LINKLOCAL) 440 continue; 441 442 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr); 443 444 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 445 continue; 446 447 new = kmalloc(sizeof(*new), GFP_ATOMIC); 448 if (!new) { 449 ret = -ENOMEM; 450 break; 451 } 452 453 ether_addr_copy(new->addr, mcast_addr); 454 hlist_add_head(&new->list, mcast_list); 455 ret++; 456 } 457 rcu_read_unlock(); 458 459 return ret; 460 } 461 #else 462 static inline int 463 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 464 struct hlist_head *mcast_list, 465 struct batadv_mcast_mla_flags *flags) 466 { 467 return 0; 468 } 469 #endif 470 471 /** 472 * batadv_mcast_mla_softif_get() - get softif multicast listeners 473 * @dev: the device to collect multicast addresses from 474 * @mcast_list: a list to put found addresses into 475 * @flags: flags indicating the new multicast state 476 * 477 * Collects multicast addresses of multicast listeners residing 478 * on this kernel on the given soft interface, dev, in 479 * the given mcast_list. In general, multicast listeners provided by 480 * your multicast receiving applications run directly on this node. 481 * 482 * If there is a bridge interface on top of dev, collect from that one 483 * instead. Just like with IP addresses and routes, multicast listeners 484 * will(/should) register to the bridge interface instead of an 485 * enslaved bat0. 486 * 487 * Return: -ENOMEM on memory allocation error or the number of 488 * items added to the mcast_list otherwise. 489 */ 490 static int 491 batadv_mcast_mla_softif_get(struct net_device *dev, 492 struct hlist_head *mcast_list, 493 struct batadv_mcast_mla_flags *flags) 494 { 495 struct net_device *bridge = batadv_mcast_get_bridge(dev); 496 int ret4, ret6 = 0; 497 498 if (bridge) 499 dev = bridge; 500 501 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags); 502 if (ret4 < 0) 503 goto out; 504 505 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags); 506 if (ret6 < 0) { 507 ret4 = 0; 508 goto out; 509 } 510 511 out: 512 dev_put(bridge); 513 514 return ret4 + ret6; 515 } 516 517 /** 518 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 519 * @dst: destination to write to - a multicast MAC address 520 * @src: source to read from - a multicast IP address 521 * 522 * Converts a given multicast IPv4/IPv6 address from a bridge 523 * to its matching multicast MAC address and copies it into the given 524 * destination buffer. 525 * 526 * Caller needs to make sure the destination buffer can hold 527 * at least ETH_ALEN bytes. 528 */ 529 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 530 { 531 if (src->proto == htons(ETH_P_IP)) 532 ip_eth_mc_map(src->dst.ip4, dst); 533 #if IS_ENABLED(CONFIG_IPV6) 534 else if (src->proto == htons(ETH_P_IPV6)) 535 ipv6_eth_mc_map(&src->dst.ip6, dst); 536 #endif 537 else 538 eth_zero_addr(dst); 539 } 540 541 /** 542 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 543 * @dev: a bridge slave whose bridge to collect multicast addresses from 544 * @mcast_list: a list to put found addresses into 545 * @flags: flags indicating the new multicast state 546 * 547 * Collects multicast addresses of multicast listeners residing 548 * on foreign, non-mesh devices which we gave access to our mesh via 549 * a bridge on top of the given soft interface, dev, in the given 550 * mcast_list. 551 * 552 * Return: -ENOMEM on memory allocation error or the number of 553 * items added to the mcast_list otherwise. 554 */ 555 static int batadv_mcast_mla_bridge_get(struct net_device *dev, 556 struct hlist_head *mcast_list, 557 struct batadv_mcast_mla_flags *flags) 558 { 559 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 560 struct br_ip_list *br_ip_entry, *tmp; 561 u8 tvlv_flags = flags->tvlv_flags; 562 struct batadv_hw_addr *new; 563 u8 mcast_addr[ETH_ALEN]; 564 int ret; 565 566 /* we don't need to detect these devices/listeners, the IGMP/MLD 567 * snooping code of the Linux bridge already does that for us 568 */ 569 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 570 if (ret < 0) 571 goto out; 572 573 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 574 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) { 575 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 576 continue; 577 578 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 579 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 580 continue; 581 582 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 583 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 584 continue; 585 } 586 587 #if IS_ENABLED(CONFIG_IPV6) 588 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) { 589 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 590 continue; 591 592 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 593 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6)) 594 continue; 595 596 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 597 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) > 598 IPV6_ADDR_SCOPE_LINKLOCAL) 599 continue; 600 } 601 #endif 602 603 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 604 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 605 continue; 606 607 new = kmalloc(sizeof(*new), GFP_ATOMIC); 608 if (!new) { 609 ret = -ENOMEM; 610 break; 611 } 612 613 ether_addr_copy(new->addr, mcast_addr); 614 hlist_add_head(&new->list, mcast_list); 615 } 616 617 out: 618 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 619 list_del(&br_ip_entry->list); 620 kfree(br_ip_entry); 621 } 622 623 return ret; 624 } 625 626 /** 627 * batadv_mcast_mla_list_free() - free a list of multicast addresses 628 * @mcast_list: the list to free 629 * 630 * Removes and frees all items in the given mcast_list. 631 */ 632 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 633 { 634 struct batadv_hw_addr *mcast_entry; 635 struct hlist_node *tmp; 636 637 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 638 hlist_del(&mcast_entry->list); 639 kfree(mcast_entry); 640 } 641 } 642 643 /** 644 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 645 * @bat_priv: the bat priv with all the soft interface information 646 * @mcast_list: a list of addresses which should _not_ be removed 647 * 648 * Retracts the announcement of any multicast listener from the 649 * translation table except the ones listed in the given mcast_list. 650 * 651 * If mcast_list is NULL then all are retracted. 652 */ 653 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 654 struct hlist_head *mcast_list) 655 { 656 struct batadv_hw_addr *mcast_entry; 657 struct hlist_node *tmp; 658 659 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 660 list) { 661 if (mcast_list && 662 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 663 mcast_list)) 664 continue; 665 666 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 667 BATADV_NO_FLAGS, 668 "mcast TT outdated", false); 669 670 hlist_del(&mcast_entry->list); 671 kfree(mcast_entry); 672 } 673 } 674 675 /** 676 * batadv_mcast_mla_tt_add() - add multicast listener announcements 677 * @bat_priv: the bat priv with all the soft interface information 678 * @mcast_list: a list of addresses which are going to get added 679 * 680 * Adds multicast listener announcements from the given mcast_list to the 681 * translation table if they have not been added yet. 682 */ 683 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 684 struct hlist_head *mcast_list) 685 { 686 struct batadv_hw_addr *mcast_entry; 687 struct hlist_node *tmp; 688 689 if (!mcast_list) 690 return; 691 692 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 693 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 694 &bat_priv->mcast.mla_list)) 695 continue; 696 697 if (!batadv_tt_local_add(bat_priv->soft_iface, 698 mcast_entry->addr, BATADV_NO_FLAGS, 699 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 700 continue; 701 702 hlist_del(&mcast_entry->list); 703 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 704 } 705 } 706 707 /** 708 * batadv_mcast_querier_log() - debug output regarding the querier status on 709 * link 710 * @bat_priv: the bat priv with all the soft interface information 711 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 712 * @old_state: the previous querier state on our link 713 * @new_state: the new querier state on our link 714 * 715 * Outputs debug messages to the logging facility with log level 'mcast' 716 * regarding changes to the querier status on the link which are relevant 717 * to our multicast optimizations. 718 * 719 * Usually this is about whether a querier appeared or vanished in 720 * our mesh or whether the querier is in the suboptimal position of being 721 * behind our local bridge segment: Snooping switches will directly 722 * forward listener reports to the querier, therefore batman-adv and 723 * the bridge will potentially not see these listeners - the querier is 724 * potentially shadowing listeners from us then. 725 * 726 * This is only interesting for nodes with a bridge on top of their 727 * soft interface. 728 */ 729 static void 730 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 731 struct batadv_mcast_querier_state *old_state, 732 struct batadv_mcast_querier_state *new_state) 733 { 734 if (!old_state->exists && new_state->exists) 735 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 736 str_proto); 737 else if (old_state->exists && !new_state->exists) 738 batadv_info(bat_priv->soft_iface, 739 "%s Querier disappeared - multicast optimizations disabled\n", 740 str_proto); 741 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists) 742 batadv_info(bat_priv->soft_iface, 743 "No %s Querier present - multicast optimizations disabled\n", 744 str_proto); 745 746 if (new_state->exists) { 747 if ((!old_state->shadowing && new_state->shadowing) || 748 (!old_state->exists && new_state->shadowing)) 749 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 750 "%s Querier is behind our bridged segment: Might shadow listeners\n", 751 str_proto); 752 else if (old_state->shadowing && !new_state->shadowing) 753 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 754 "%s Querier is not behind our bridged segment\n", 755 str_proto); 756 } 757 } 758 759 /** 760 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 761 * setups 762 * @bat_priv: the bat priv with all the soft interface information 763 * @new_flags: flags indicating the new multicast state 764 * 765 * If no bridges are ever used on this node, then this function does nothing. 766 * 767 * Otherwise this function outputs debug information to the 'mcast' log level 768 * which might be relevant to our multicast optimizations. 769 * 770 * More precisely, it outputs information when a bridge interface is added or 771 * removed from a soft interface. And when a bridge is present, it further 772 * outputs information about the querier state which is relevant for the 773 * multicast flags this node is going to set. 774 */ 775 static void 776 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, 777 struct batadv_mcast_mla_flags *new_flags) 778 { 779 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags; 780 781 if (!old_flags->bridged && new_flags->bridged) 782 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 783 "Bridge added: Setting Unsnoopables(U)-flag\n"); 784 else if (old_flags->bridged && !new_flags->bridged) 785 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 786 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 787 788 if (new_flags->bridged) { 789 batadv_mcast_querier_log(bat_priv, "IGMP", 790 &old_flags->querier_ipv4, 791 &new_flags->querier_ipv4); 792 batadv_mcast_querier_log(bat_priv, "MLD", 793 &old_flags->querier_ipv6, 794 &new_flags->querier_ipv6); 795 } 796 } 797 798 /** 799 * batadv_mcast_flags_log() - output debug information about mcast flag changes 800 * @bat_priv: the bat priv with all the soft interface information 801 * @flags: TVLV flags indicating the new multicast state 802 * 803 * Whenever the multicast TVLV flags this node announces change, this function 804 * should be used to notify userspace about the change. 805 */ 806 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 807 { 808 bool old_enabled = bat_priv->mcast.mla_flags.enabled; 809 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags; 810 char str_old_flags[] = "[.... . ]"; 811 812 sprintf(str_old_flags, "[%c%c%c%s%s]", 813 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 814 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 815 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 816 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 817 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 818 819 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 820 "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n", 821 old_enabled ? str_old_flags : "<undefined>", 822 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 823 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 824 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 825 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 826 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 827 } 828 829 /** 830 * batadv_mcast_mla_flags_update() - update multicast flags 831 * @bat_priv: the bat priv with all the soft interface information 832 * @flags: flags indicating the new multicast state 833 * 834 * Updates the own multicast tvlv with our current multicast related settings, 835 * capabilities and inabilities. 836 */ 837 static void 838 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, 839 struct batadv_mcast_mla_flags *flags) 840 { 841 struct batadv_tvlv_mcast_data mcast_data; 842 843 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags))) 844 return; 845 846 batadv_mcast_bridge_log(bat_priv, flags); 847 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags); 848 849 mcast_data.flags = flags->tvlv_flags; 850 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 851 852 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 853 &mcast_data, sizeof(mcast_data)); 854 855 bat_priv->mcast.mla_flags = *flags; 856 } 857 858 /** 859 * __batadv_mcast_mla_update() - update the own MLAs 860 * @bat_priv: the bat priv with all the soft interface information 861 * 862 * Updates the own multicast listener announcements in the translation 863 * table as well as the own, announced multicast tvlv container. 864 * 865 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 866 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 867 * ensured by the non-parallel execution of the worker this function 868 * belongs to. 869 */ 870 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 871 { 872 struct net_device *soft_iface = bat_priv->soft_iface; 873 struct hlist_head mcast_list = HLIST_HEAD_INIT; 874 struct batadv_mcast_mla_flags flags; 875 int ret; 876 877 flags = batadv_mcast_mla_flags_get(bat_priv); 878 879 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags); 880 if (ret < 0) 881 goto out; 882 883 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags); 884 if (ret < 0) 885 goto out; 886 887 spin_lock(&bat_priv->mcast.mla_lock); 888 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 889 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 890 batadv_mcast_mla_flags_update(bat_priv, &flags); 891 spin_unlock(&bat_priv->mcast.mla_lock); 892 893 out: 894 batadv_mcast_mla_list_free(&mcast_list); 895 } 896 897 /** 898 * batadv_mcast_mla_update() - update the own MLAs 899 * @work: kernel work struct 900 * 901 * Updates the own multicast listener announcements in the translation 902 * table as well as the own, announced multicast tvlv container. 903 * 904 * In the end, reschedules the work timer. 905 */ 906 static void batadv_mcast_mla_update(struct work_struct *work) 907 { 908 struct delayed_work *delayed_work; 909 struct batadv_priv_mcast *priv_mcast; 910 struct batadv_priv *bat_priv; 911 912 delayed_work = to_delayed_work(work); 913 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 914 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 915 916 __batadv_mcast_mla_update(bat_priv); 917 batadv_mcast_start_timer(bat_priv); 918 } 919 920 /** 921 * batadv_mcast_is_report_ipv4() - check for IGMP reports 922 * @skb: the ethernet frame destined for the mesh 923 * 924 * This call might reallocate skb data. 925 * 926 * Checks whether the given frame is a valid IGMP report. 927 * 928 * Return: If so then true, otherwise false. 929 */ 930 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 931 { 932 if (ip_mc_check_igmp(skb) < 0) 933 return false; 934 935 switch (igmp_hdr(skb)->type) { 936 case IGMP_HOST_MEMBERSHIP_REPORT: 937 case IGMPV2_HOST_MEMBERSHIP_REPORT: 938 case IGMPV3_HOST_MEMBERSHIP_REPORT: 939 return true; 940 } 941 942 return false; 943 } 944 945 /** 946 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 947 * potential 948 * @bat_priv: the bat priv with all the soft interface information 949 * @skb: the IPv4 packet to check 950 * @is_unsnoopable: stores whether the destination is snoopable 951 * @is_routable: stores whether the destination is routable 952 * 953 * Checks whether the given IPv4 packet has the potential to be forwarded with a 954 * mode more optimal than classic flooding. 955 * 956 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 957 * allocation failure. 958 */ 959 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 960 struct sk_buff *skb, 961 bool *is_unsnoopable, 962 int *is_routable) 963 { 964 struct iphdr *iphdr; 965 966 /* We might fail due to out-of-memory -> drop it */ 967 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 968 return -ENOMEM; 969 970 if (batadv_mcast_is_report_ipv4(skb)) 971 return -EINVAL; 972 973 iphdr = ip_hdr(skb); 974 975 /* link-local multicast listeners behind a bridge are 976 * not snoopable (see RFC4541, section 2.1.2.2) 977 */ 978 if (ipv4_is_local_multicast(iphdr->daddr)) 979 *is_unsnoopable = true; 980 else 981 *is_routable = ETH_P_IP; 982 983 return 0; 984 } 985 986 /** 987 * batadv_mcast_is_report_ipv6() - check for MLD reports 988 * @skb: the ethernet frame destined for the mesh 989 * 990 * This call might reallocate skb data. 991 * 992 * Checks whether the given frame is a valid MLD report. 993 * 994 * Return: If so then true, otherwise false. 995 */ 996 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 997 { 998 if (ipv6_mc_check_mld(skb) < 0) 999 return false; 1000 1001 switch (icmp6_hdr(skb)->icmp6_type) { 1002 case ICMPV6_MGM_REPORT: 1003 case ICMPV6_MLD2_REPORT: 1004 return true; 1005 } 1006 1007 return false; 1008 } 1009 1010 /** 1011 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 1012 * potential 1013 * @bat_priv: the bat priv with all the soft interface information 1014 * @skb: the IPv6 packet to check 1015 * @is_unsnoopable: stores whether the destination is snoopable 1016 * @is_routable: stores whether the destination is routable 1017 * 1018 * Checks whether the given IPv6 packet has the potential to be forwarded with a 1019 * mode more optimal than classic flooding. 1020 * 1021 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1022 */ 1023 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 1024 struct sk_buff *skb, 1025 bool *is_unsnoopable, 1026 int *is_routable) 1027 { 1028 struct ipv6hdr *ip6hdr; 1029 1030 /* We might fail due to out-of-memory -> drop it */ 1031 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 1032 return -ENOMEM; 1033 1034 if (batadv_mcast_is_report_ipv6(skb)) 1035 return -EINVAL; 1036 1037 ip6hdr = ipv6_hdr(skb); 1038 1039 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1040 return -EINVAL; 1041 1042 /* link-local-all-nodes multicast listeners behind a bridge are 1043 * not snoopable (see RFC4541, section 3, paragraph 3) 1044 */ 1045 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 1046 *is_unsnoopable = true; 1047 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL) 1048 *is_routable = ETH_P_IPV6; 1049 1050 return 0; 1051 } 1052 1053 /** 1054 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 1055 * @bat_priv: the bat priv with all the soft interface information 1056 * @skb: the multicast frame to check 1057 * @is_unsnoopable: stores whether the destination is snoopable 1058 * @is_routable: stores whether the destination is routable 1059 * 1060 * Checks whether the given multicast ethernet frame has the potential to be 1061 * forwarded with a mode more optimal than classic flooding. 1062 * 1063 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1064 */ 1065 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 1066 struct sk_buff *skb, 1067 bool *is_unsnoopable, 1068 int *is_routable) 1069 { 1070 struct ethhdr *ethhdr = eth_hdr(skb); 1071 1072 if (!atomic_read(&bat_priv->multicast_mode)) 1073 return -EINVAL; 1074 1075 switch (ntohs(ethhdr->h_proto)) { 1076 case ETH_P_IP: 1077 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 1078 is_unsnoopable, 1079 is_routable); 1080 case ETH_P_IPV6: 1081 if (!IS_ENABLED(CONFIG_IPV6)) 1082 return -EINVAL; 1083 1084 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 1085 is_unsnoopable, 1086 is_routable); 1087 default: 1088 return -EINVAL; 1089 } 1090 } 1091 1092 /** 1093 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 1094 * interest 1095 * @bat_priv: the bat priv with all the soft interface information 1096 * @ethhdr: ethernet header of a packet 1097 * 1098 * Return: the number of nodes which want all IPv4 multicast traffic if the 1099 * given ethhdr is from an IPv4 packet or the number of nodes which want all 1100 * IPv6 traffic if it matches an IPv6 packet. 1101 */ 1102 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 1103 struct ethhdr *ethhdr) 1104 { 1105 switch (ntohs(ethhdr->h_proto)) { 1106 case ETH_P_IP: 1107 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 1108 case ETH_P_IPV6: 1109 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 1110 default: 1111 /* we shouldn't be here... */ 1112 return 0; 1113 } 1114 } 1115 1116 /** 1117 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router 1118 * @bat_priv: the bat priv with all the soft interface information 1119 * @protocol: the ethernet protocol type to count multicast routers for 1120 * 1121 * Return: the number of nodes which want all routable IPv4 multicast traffic 1122 * if the protocol is ETH_P_IP or the number of nodes which want all routable 1123 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0. 1124 */ 1125 1126 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv, 1127 int protocol) 1128 { 1129 switch (protocol) { 1130 case ETH_P_IP: 1131 return atomic_read(&bat_priv->mcast.num_want_all_rtr4); 1132 case ETH_P_IPV6: 1133 return atomic_read(&bat_priv->mcast.num_want_all_rtr6); 1134 default: 1135 return 0; 1136 } 1137 } 1138 1139 /** 1140 * batadv_mcast_forw_tt_node_get() - get a multicast tt node 1141 * @bat_priv: the bat priv with all the soft interface information 1142 * @ethhdr: the ether header containing the multicast destination 1143 * 1144 * Return: an orig_node matching the multicast address provided by ethhdr 1145 * via a translation table lookup. This increases the returned nodes refcount. 1146 */ 1147 static struct batadv_orig_node * 1148 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 1149 struct ethhdr *ethhdr) 1150 { 1151 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, 1152 BATADV_NO_FLAGS); 1153 } 1154 1155 /** 1156 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag 1157 * @bat_priv: the bat priv with all the soft interface information 1158 * 1159 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and 1160 * increases its refcount. 1161 */ 1162 static struct batadv_orig_node * 1163 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv) 1164 { 1165 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1166 1167 rcu_read_lock(); 1168 hlist_for_each_entry_rcu(tmp_orig_node, 1169 &bat_priv->mcast.want_all_ipv4_list, 1170 mcast_want_all_ipv4_node) { 1171 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1172 continue; 1173 1174 orig_node = tmp_orig_node; 1175 break; 1176 } 1177 rcu_read_unlock(); 1178 1179 return orig_node; 1180 } 1181 1182 /** 1183 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag 1184 * @bat_priv: the bat priv with all the soft interface information 1185 * 1186 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set 1187 * and increases its refcount. 1188 */ 1189 static struct batadv_orig_node * 1190 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) 1191 { 1192 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1193 1194 rcu_read_lock(); 1195 hlist_for_each_entry_rcu(tmp_orig_node, 1196 &bat_priv->mcast.want_all_ipv6_list, 1197 mcast_want_all_ipv6_node) { 1198 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1199 continue; 1200 1201 orig_node = tmp_orig_node; 1202 break; 1203 } 1204 rcu_read_unlock(); 1205 1206 return orig_node; 1207 } 1208 1209 /** 1210 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag 1211 * @bat_priv: the bat priv with all the soft interface information 1212 * @ethhdr: an ethernet header to determine the protocol family from 1213 * 1214 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or 1215 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and 1216 * increases its refcount. 1217 */ 1218 static struct batadv_orig_node * 1219 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv, 1220 struct ethhdr *ethhdr) 1221 { 1222 switch (ntohs(ethhdr->h_proto)) { 1223 case ETH_P_IP: 1224 return batadv_mcast_forw_ipv4_node_get(bat_priv); 1225 case ETH_P_IPV6: 1226 return batadv_mcast_forw_ipv6_node_get(bat_priv); 1227 default: 1228 /* we shouldn't be here... */ 1229 return NULL; 1230 } 1231 } 1232 1233 /** 1234 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag 1235 * @bat_priv: the bat priv with all the soft interface information 1236 * 1237 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag 1238 * set and increases its refcount. 1239 */ 1240 static struct batadv_orig_node * 1241 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv) 1242 { 1243 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1244 1245 rcu_read_lock(); 1246 hlist_for_each_entry_rcu(tmp_orig_node, 1247 &bat_priv->mcast.want_all_unsnoopables_list, 1248 mcast_want_all_unsnoopables_node) { 1249 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1250 continue; 1251 1252 orig_node = tmp_orig_node; 1253 break; 1254 } 1255 rcu_read_unlock(); 1256 1257 return orig_node; 1258 } 1259 1260 /** 1261 * batadv_mcast_forw_rtr4_node_get() - get a node with an ipv4 mcast router flag 1262 * @bat_priv: the bat priv with all the soft interface information 1263 * 1264 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR4 flag unset and 1265 * increases its refcount. 1266 */ 1267 static struct batadv_orig_node * 1268 batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv) 1269 { 1270 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1271 1272 rcu_read_lock(); 1273 hlist_for_each_entry_rcu(tmp_orig_node, 1274 &bat_priv->mcast.want_all_rtr4_list, 1275 mcast_want_all_rtr4_node) { 1276 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1277 continue; 1278 1279 orig_node = tmp_orig_node; 1280 break; 1281 } 1282 rcu_read_unlock(); 1283 1284 return orig_node; 1285 } 1286 1287 /** 1288 * batadv_mcast_forw_rtr6_node_get() - get a node with an ipv6 mcast router flag 1289 * @bat_priv: the bat priv with all the soft interface information 1290 * 1291 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR6 flag unset 1292 * and increases its refcount. 1293 */ 1294 static struct batadv_orig_node * 1295 batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv) 1296 { 1297 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1298 1299 rcu_read_lock(); 1300 hlist_for_each_entry_rcu(tmp_orig_node, 1301 &bat_priv->mcast.want_all_rtr6_list, 1302 mcast_want_all_rtr6_node) { 1303 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1304 continue; 1305 1306 orig_node = tmp_orig_node; 1307 break; 1308 } 1309 rcu_read_unlock(); 1310 1311 return orig_node; 1312 } 1313 1314 /** 1315 * batadv_mcast_forw_rtr_node_get() - get a node with an ipv4/ipv6 router flag 1316 * @bat_priv: the bat priv with all the soft interface information 1317 * @ethhdr: an ethernet header to determine the protocol family from 1318 * 1319 * Return: an orig_node which has no BATADV_MCAST_WANT_NO_RTR4 or 1320 * BATADV_MCAST_WANT_NO_RTR6 flag, depending on the provided ethhdr, set and 1321 * increases its refcount. 1322 */ 1323 static struct batadv_orig_node * 1324 batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv, 1325 struct ethhdr *ethhdr) 1326 { 1327 switch (ntohs(ethhdr->h_proto)) { 1328 case ETH_P_IP: 1329 return batadv_mcast_forw_rtr4_node_get(bat_priv); 1330 case ETH_P_IPV6: 1331 return batadv_mcast_forw_rtr6_node_get(bat_priv); 1332 default: 1333 /* we shouldn't be here... */ 1334 return NULL; 1335 } 1336 } 1337 1338 /** 1339 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 1340 * @bat_priv: the bat priv with all the soft interface information 1341 * @skb: The multicast packet to check 1342 * @orig: an originator to be set to forward the skb to 1343 * @is_routable: stores whether the destination is routable 1344 * 1345 * Return: the forwarding mode as enum batadv_forw_mode and in case of 1346 * BATADV_FORW_SINGLE set the orig to the single originator the skb 1347 * should be forwarded to. 1348 */ 1349 enum batadv_forw_mode 1350 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 1351 struct batadv_orig_node **orig, int *is_routable) 1352 { 1353 int ret, tt_count, ip_count, unsnoop_count, total_count; 1354 bool is_unsnoopable = false; 1355 unsigned int mcast_fanout; 1356 struct ethhdr *ethhdr; 1357 int rtr_count = 0; 1358 1359 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, 1360 is_routable); 1361 if (ret == -ENOMEM) 1362 return BATADV_FORW_NONE; 1363 else if (ret < 0) 1364 return BATADV_FORW_ALL; 1365 1366 ethhdr = eth_hdr(skb); 1367 1368 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 1369 BATADV_NO_FLAGS); 1370 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 1371 unsnoop_count = !is_unsnoopable ? 0 : 1372 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 1373 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable); 1374 1375 total_count = tt_count + ip_count + unsnoop_count + rtr_count; 1376 1377 switch (total_count) { 1378 case 1: 1379 if (tt_count) 1380 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr); 1381 else if (ip_count) 1382 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr); 1383 else if (unsnoop_count) 1384 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv); 1385 else if (rtr_count) 1386 *orig = batadv_mcast_forw_rtr_node_get(bat_priv, 1387 ethhdr); 1388 1389 if (*orig) 1390 return BATADV_FORW_SINGLE; 1391 1392 fallthrough; 1393 case 0: 1394 return BATADV_FORW_NONE; 1395 default: 1396 mcast_fanout = atomic_read(&bat_priv->multicast_fanout); 1397 1398 if (!unsnoop_count && total_count <= mcast_fanout) 1399 return BATADV_FORW_SOME; 1400 } 1401 1402 return BATADV_FORW_ALL; 1403 } 1404 1405 /** 1406 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator 1407 * @bat_priv: the bat priv with all the soft interface information 1408 * @skb: the multicast packet to send 1409 * @vid: the vlan identifier 1410 * @orig_node: the originator to send the packet to 1411 * 1412 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 1413 */ 1414 int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, 1415 struct sk_buff *skb, 1416 unsigned short vid, 1417 struct batadv_orig_node *orig_node) 1418 { 1419 /* Avoid sending multicast-in-unicast packets to other BLA 1420 * gateways - they already got the frame from the LAN side 1421 * we share with them. 1422 * TODO: Refactor to take BLA into account earlier, to avoid 1423 * reducing the mcast_fanout count. 1424 */ 1425 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { 1426 dev_kfree_skb(skb); 1427 return NET_XMIT_SUCCESS; 1428 } 1429 1430 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 1431 orig_node, vid); 1432 } 1433 1434 /** 1435 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners 1436 * @bat_priv: the bat priv with all the soft interface information 1437 * @skb: the multicast packet to transmit 1438 * @vid: the vlan identifier 1439 * 1440 * Sends copies of a frame with multicast destination to any multicast 1441 * listener registered in the translation table. A transmission is performed 1442 * via a batman-adv unicast packet for each such destination node. 1443 * 1444 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1445 * otherwise. 1446 */ 1447 static int 1448 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, 1449 unsigned short vid) 1450 { 1451 int ret = NET_XMIT_SUCCESS; 1452 struct sk_buff *newskb; 1453 1454 struct batadv_tt_orig_list_entry *orig_entry; 1455 1456 struct batadv_tt_global_entry *tt_global; 1457 const u8 *addr = eth_hdr(skb)->h_dest; 1458 1459 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid); 1460 if (!tt_global) 1461 goto out; 1462 1463 rcu_read_lock(); 1464 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) { 1465 newskb = skb_copy(skb, GFP_ATOMIC); 1466 if (!newskb) { 1467 ret = NET_XMIT_DROP; 1468 break; 1469 } 1470 1471 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, 1472 orig_entry->orig_node); 1473 } 1474 rcu_read_unlock(); 1475 1476 batadv_tt_global_entry_put(tt_global); 1477 1478 out: 1479 return ret; 1480 } 1481 1482 /** 1483 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4 1484 * @bat_priv: the bat priv with all the soft interface information 1485 * @skb: the multicast packet to transmit 1486 * @vid: the vlan identifier 1487 * 1488 * Sends copies of a frame with multicast destination to any node with a 1489 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a 1490 * batman-adv unicast packet for each such destination node. 1491 * 1492 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1493 * otherwise. 1494 */ 1495 static int 1496 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, 1497 struct sk_buff *skb, unsigned short vid) 1498 { 1499 struct batadv_orig_node *orig_node; 1500 int ret = NET_XMIT_SUCCESS; 1501 struct sk_buff *newskb; 1502 1503 rcu_read_lock(); 1504 hlist_for_each_entry_rcu(orig_node, 1505 &bat_priv->mcast.want_all_ipv4_list, 1506 mcast_want_all_ipv4_node) { 1507 newskb = skb_copy(skb, GFP_ATOMIC); 1508 if (!newskb) { 1509 ret = NET_XMIT_DROP; 1510 break; 1511 } 1512 1513 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1514 } 1515 rcu_read_unlock(); 1516 return ret; 1517 } 1518 1519 /** 1520 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6 1521 * @bat_priv: the bat priv with all the soft interface information 1522 * @skb: The multicast packet to transmit 1523 * @vid: the vlan identifier 1524 * 1525 * Sends copies of a frame with multicast destination to any node with a 1526 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a 1527 * batman-adv unicast packet for each such destination node. 1528 * 1529 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1530 * otherwise. 1531 */ 1532 static int 1533 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, 1534 struct sk_buff *skb, unsigned short vid) 1535 { 1536 struct batadv_orig_node *orig_node; 1537 int ret = NET_XMIT_SUCCESS; 1538 struct sk_buff *newskb; 1539 1540 rcu_read_lock(); 1541 hlist_for_each_entry_rcu(orig_node, 1542 &bat_priv->mcast.want_all_ipv6_list, 1543 mcast_want_all_ipv6_node) { 1544 newskb = skb_copy(skb, GFP_ATOMIC); 1545 if (!newskb) { 1546 ret = NET_XMIT_DROP; 1547 break; 1548 } 1549 1550 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1551 } 1552 rcu_read_unlock(); 1553 return ret; 1554 } 1555 1556 /** 1557 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list 1558 * @bat_priv: the bat priv with all the soft interface information 1559 * @skb: the multicast packet to transmit 1560 * @vid: the vlan identifier 1561 * 1562 * Sends copies of a frame with multicast destination to any node with a 1563 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A 1564 * transmission is performed via a batman-adv unicast packet for each such 1565 * destination node. 1566 * 1567 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1568 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1569 */ 1570 static int 1571 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv, 1572 struct sk_buff *skb, unsigned short vid) 1573 { 1574 switch (ntohs(eth_hdr(skb)->h_proto)) { 1575 case ETH_P_IP: 1576 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid); 1577 case ETH_P_IPV6: 1578 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid); 1579 default: 1580 /* we shouldn't be here... */ 1581 return NET_XMIT_DROP; 1582 } 1583 } 1584 1585 /** 1586 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4 1587 * @bat_priv: the bat priv with all the soft interface information 1588 * @skb: the multicast packet to transmit 1589 * @vid: the vlan identifier 1590 * 1591 * Sends copies of a frame with multicast destination to any node with a 1592 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a 1593 * batman-adv unicast packet for each such destination node. 1594 * 1595 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1596 * otherwise. 1597 */ 1598 static int 1599 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, 1600 struct sk_buff *skb, unsigned short vid) 1601 { 1602 struct batadv_orig_node *orig_node; 1603 int ret = NET_XMIT_SUCCESS; 1604 struct sk_buff *newskb; 1605 1606 rcu_read_lock(); 1607 hlist_for_each_entry_rcu(orig_node, 1608 &bat_priv->mcast.want_all_rtr4_list, 1609 mcast_want_all_rtr4_node) { 1610 newskb = skb_copy(skb, GFP_ATOMIC); 1611 if (!newskb) { 1612 ret = NET_XMIT_DROP; 1613 break; 1614 } 1615 1616 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1617 } 1618 rcu_read_unlock(); 1619 return ret; 1620 } 1621 1622 /** 1623 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6 1624 * @bat_priv: the bat priv with all the soft interface information 1625 * @skb: The multicast packet to transmit 1626 * @vid: the vlan identifier 1627 * 1628 * Sends copies of a frame with multicast destination to any node with a 1629 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a 1630 * batman-adv unicast packet for each such destination node. 1631 * 1632 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1633 * otherwise. 1634 */ 1635 static int 1636 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, 1637 struct sk_buff *skb, unsigned short vid) 1638 { 1639 struct batadv_orig_node *orig_node; 1640 int ret = NET_XMIT_SUCCESS; 1641 struct sk_buff *newskb; 1642 1643 rcu_read_lock(); 1644 hlist_for_each_entry_rcu(orig_node, 1645 &bat_priv->mcast.want_all_rtr6_list, 1646 mcast_want_all_rtr6_node) { 1647 newskb = skb_copy(skb, GFP_ATOMIC); 1648 if (!newskb) { 1649 ret = NET_XMIT_DROP; 1650 break; 1651 } 1652 1653 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1654 } 1655 rcu_read_unlock(); 1656 return ret; 1657 } 1658 1659 /** 1660 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list 1661 * @bat_priv: the bat priv with all the soft interface information 1662 * @skb: the multicast packet to transmit 1663 * @vid: the vlan identifier 1664 * 1665 * Sends copies of a frame with multicast destination to any node with a 1666 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A 1667 * transmission is performed via a batman-adv unicast packet for each such 1668 * destination node. 1669 * 1670 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1671 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1672 */ 1673 static int 1674 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, 1675 struct sk_buff *skb, unsigned short vid) 1676 { 1677 switch (ntohs(eth_hdr(skb)->h_proto)) { 1678 case ETH_P_IP: 1679 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid); 1680 case ETH_P_IPV6: 1681 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid); 1682 default: 1683 /* we shouldn't be here... */ 1684 return NET_XMIT_DROP; 1685 } 1686 } 1687 1688 /** 1689 * batadv_mcast_forw_send() - send packet to any detected multicast recipient 1690 * @bat_priv: the bat priv with all the soft interface information 1691 * @skb: the multicast packet to transmit 1692 * @vid: the vlan identifier 1693 * @is_routable: stores whether the destination is routable 1694 * 1695 * Sends copies of a frame with multicast destination to any node that signaled 1696 * interest in it, that is either via the translation table or the according 1697 * want-all flags. A transmission is performed via a batman-adv unicast packet 1698 * for each such destination node. 1699 * 1700 * The given skb is consumed/freed. 1701 * 1702 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1703 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1704 */ 1705 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, 1706 unsigned short vid, int is_routable) 1707 { 1708 int ret; 1709 1710 ret = batadv_mcast_forw_tt(bat_priv, skb, vid); 1711 if (ret != NET_XMIT_SUCCESS) { 1712 kfree_skb(skb); 1713 return ret; 1714 } 1715 1716 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid); 1717 if (ret != NET_XMIT_SUCCESS) { 1718 kfree_skb(skb); 1719 return ret; 1720 } 1721 1722 if (!is_routable) 1723 goto skip_mc_router; 1724 1725 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); 1726 if (ret != NET_XMIT_SUCCESS) { 1727 kfree_skb(skb); 1728 return ret; 1729 } 1730 1731 skip_mc_router: 1732 consume_skb(skb); 1733 return ret; 1734 } 1735 1736 /** 1737 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 1738 * @bat_priv: the bat priv with all the soft interface information 1739 * @orig: the orig_node which multicast state might have changed of 1740 * @mcast_flags: flags indicating the new multicast state 1741 * 1742 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 1743 * orig, has toggled then this method updates the counter and the list 1744 * accordingly. 1745 * 1746 * Caller needs to hold orig->mcast_handler_lock. 1747 */ 1748 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 1749 struct batadv_orig_node *orig, 1750 u8 mcast_flags) 1751 { 1752 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 1753 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 1754 1755 lockdep_assert_held(&orig->mcast_handler_lock); 1756 1757 /* switched from flag unset to set */ 1758 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1759 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1760 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1761 1762 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1763 /* flag checks above + mcast_handler_lock prevents this */ 1764 WARN_ON(!hlist_unhashed(node)); 1765 1766 hlist_add_head_rcu(node, head); 1767 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1768 /* switched from flag set to unset */ 1769 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1770 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1771 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1772 1773 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1774 /* flag checks above + mcast_handler_lock prevents this */ 1775 WARN_ON(hlist_unhashed(node)); 1776 1777 hlist_del_init_rcu(node); 1778 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1779 } 1780 } 1781 1782 /** 1783 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1784 * @bat_priv: the bat priv with all the soft interface information 1785 * @orig: the orig_node which multicast state might have changed of 1786 * @mcast_flags: flags indicating the new multicast state 1787 * 1788 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1789 * toggled then this method updates the counter and the list accordingly. 1790 * 1791 * Caller needs to hold orig->mcast_handler_lock. 1792 */ 1793 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1794 struct batadv_orig_node *orig, 1795 u8 mcast_flags) 1796 { 1797 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1798 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1799 1800 lockdep_assert_held(&orig->mcast_handler_lock); 1801 1802 /* switched from flag unset to set */ 1803 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1804 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1805 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1806 1807 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1808 /* flag checks above + mcast_handler_lock prevents this */ 1809 WARN_ON(!hlist_unhashed(node)); 1810 1811 hlist_add_head_rcu(node, head); 1812 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1813 /* switched from flag set to unset */ 1814 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1815 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1816 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1817 1818 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1819 /* flag checks above + mcast_handler_lock prevents this */ 1820 WARN_ON(hlist_unhashed(node)); 1821 1822 hlist_del_init_rcu(node); 1823 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1824 } 1825 } 1826 1827 /** 1828 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1829 * @bat_priv: the bat priv with all the soft interface information 1830 * @orig: the orig_node which multicast state might have changed of 1831 * @mcast_flags: flags indicating the new multicast state 1832 * 1833 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1834 * toggled then this method updates the counter and the list accordingly. 1835 * 1836 * Caller needs to hold orig->mcast_handler_lock. 1837 */ 1838 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1839 struct batadv_orig_node *orig, 1840 u8 mcast_flags) 1841 { 1842 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1843 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1844 1845 lockdep_assert_held(&orig->mcast_handler_lock); 1846 1847 /* switched from flag unset to set */ 1848 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1849 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1850 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1851 1852 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1853 /* flag checks above + mcast_handler_lock prevents this */ 1854 WARN_ON(!hlist_unhashed(node)); 1855 1856 hlist_add_head_rcu(node, head); 1857 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1858 /* switched from flag set to unset */ 1859 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1860 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1861 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1862 1863 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1864 /* flag checks above + mcast_handler_lock prevents this */ 1865 WARN_ON(hlist_unhashed(node)); 1866 1867 hlist_del_init_rcu(node); 1868 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1869 } 1870 } 1871 1872 /** 1873 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list 1874 * @bat_priv: the bat priv with all the soft interface information 1875 * @orig: the orig_node which multicast state might have changed of 1876 * @mcast_flags: flags indicating the new multicast state 1877 * 1878 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has 1879 * toggled then this method updates the counter and the list accordingly. 1880 * 1881 * Caller needs to hold orig->mcast_handler_lock. 1882 */ 1883 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, 1884 struct batadv_orig_node *orig, 1885 u8 mcast_flags) 1886 { 1887 struct hlist_node *node = &orig->mcast_want_all_rtr4_node; 1888 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list; 1889 1890 lockdep_assert_held(&orig->mcast_handler_lock); 1891 1892 /* switched from flag set to unset */ 1893 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) && 1894 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) { 1895 atomic_inc(&bat_priv->mcast.num_want_all_rtr4); 1896 1897 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1898 /* flag checks above + mcast_handler_lock prevents this */ 1899 WARN_ON(!hlist_unhashed(node)); 1900 1901 hlist_add_head_rcu(node, head); 1902 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1903 /* switched from flag unset to set */ 1904 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 && 1905 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) { 1906 atomic_dec(&bat_priv->mcast.num_want_all_rtr4); 1907 1908 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1909 /* flag checks above + mcast_handler_lock prevents this */ 1910 WARN_ON(hlist_unhashed(node)); 1911 1912 hlist_del_init_rcu(node); 1913 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1914 } 1915 } 1916 1917 /** 1918 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list 1919 * @bat_priv: the bat priv with all the soft interface information 1920 * @orig: the orig_node which multicast state might have changed of 1921 * @mcast_flags: flags indicating the new multicast state 1922 * 1923 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has 1924 * toggled then this method updates the counter and the list accordingly. 1925 * 1926 * Caller needs to hold orig->mcast_handler_lock. 1927 */ 1928 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv, 1929 struct batadv_orig_node *orig, 1930 u8 mcast_flags) 1931 { 1932 struct hlist_node *node = &orig->mcast_want_all_rtr6_node; 1933 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list; 1934 1935 lockdep_assert_held(&orig->mcast_handler_lock); 1936 1937 /* switched from flag set to unset */ 1938 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) && 1939 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) { 1940 atomic_inc(&bat_priv->mcast.num_want_all_rtr6); 1941 1942 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1943 /* flag checks above + mcast_handler_lock prevents this */ 1944 WARN_ON(!hlist_unhashed(node)); 1945 1946 hlist_add_head_rcu(node, head); 1947 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1948 /* switched from flag unset to set */ 1949 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 && 1950 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) { 1951 atomic_dec(&bat_priv->mcast.num_want_all_rtr6); 1952 1953 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1954 /* flag checks above + mcast_handler_lock prevents this */ 1955 WARN_ON(hlist_unhashed(node)); 1956 1957 hlist_del_init_rcu(node); 1958 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1959 } 1960 } 1961 1962 /** 1963 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV 1964 * @enabled: whether the originator has multicast TVLV support enabled 1965 * @tvlv_value: tvlv buffer containing the multicast flags 1966 * @tvlv_value_len: tvlv buffer length 1967 * 1968 * Return: multicast flags for the given tvlv buffer 1969 */ 1970 static u8 1971 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len) 1972 { 1973 u8 mcast_flags = BATADV_NO_FLAGS; 1974 1975 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags)) 1976 mcast_flags = *(u8 *)tvlv_value; 1977 1978 if (!enabled) { 1979 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4; 1980 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6; 1981 } 1982 1983 /* remove redundant flags to avoid sending duplicate packets later */ 1984 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) 1985 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4; 1986 1987 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) 1988 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6; 1989 1990 return mcast_flags; 1991 } 1992 1993 /** 1994 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 1995 * @bat_priv: the bat priv with all the soft interface information 1996 * @orig: the orig_node of the ogm 1997 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1998 * @tvlv_value: tvlv buffer containing the multicast data 1999 * @tvlv_value_len: tvlv buffer length 2000 */ 2001 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 2002 struct batadv_orig_node *orig, 2003 u8 flags, 2004 void *tvlv_value, 2005 u16 tvlv_value_len) 2006 { 2007 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2008 u8 mcast_flags; 2009 2010 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled, 2011 tvlv_value, tvlv_value_len); 2012 2013 spin_lock_bh(&orig->mcast_handler_lock); 2014 2015 if (orig_mcast_enabled && 2016 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2017 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2018 } else if (!orig_mcast_enabled && 2019 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2020 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2021 } 2022 2023 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 2024 2025 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 2026 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 2027 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 2028 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags); 2029 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags); 2030 2031 orig->mcast_flags = mcast_flags; 2032 spin_unlock_bh(&orig->mcast_handler_lock); 2033 } 2034 2035 /** 2036 * batadv_mcast_init() - initialize the multicast optimizations structures 2037 * @bat_priv: the bat priv with all the soft interface information 2038 */ 2039 void batadv_mcast_init(struct batadv_priv *bat_priv) 2040 { 2041 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 2042 NULL, BATADV_TVLV_MCAST, 2, 2043 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2044 2045 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 2046 batadv_mcast_start_timer(bat_priv); 2047 } 2048 2049 /** 2050 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message 2051 * @msg: buffer for the message 2052 * @bat_priv: the bat priv with all the soft interface information 2053 * 2054 * Return: 0 or error code. 2055 */ 2056 int batadv_mcast_mesh_info_put(struct sk_buff *msg, 2057 struct batadv_priv *bat_priv) 2058 { 2059 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags; 2060 u32 flags_priv = BATADV_NO_FLAGS; 2061 2062 if (bat_priv->mcast.mla_flags.bridged) { 2063 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; 2064 2065 if (bat_priv->mcast.mla_flags.querier_ipv4.exists) 2066 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; 2067 if (bat_priv->mcast.mla_flags.querier_ipv6.exists) 2068 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; 2069 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing) 2070 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; 2071 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing) 2072 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; 2073 } 2074 2075 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || 2076 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) 2077 return -EMSGSIZE; 2078 2079 return 0; 2080 } 2081 2082 /** 2083 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table 2084 * to a netlink socket 2085 * @msg: buffer for the message 2086 * @portid: netlink port 2087 * @cb: Control block containing additional options 2088 * @orig_node: originator to dump the multicast flags of 2089 * 2090 * Return: 0 or error code. 2091 */ 2092 static int 2093 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, 2094 struct netlink_callback *cb, 2095 struct batadv_orig_node *orig_node) 2096 { 2097 void *hdr; 2098 2099 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, 2100 &batadv_netlink_family, NLM_F_MULTI, 2101 BATADV_CMD_GET_MCAST_FLAGS); 2102 if (!hdr) 2103 return -ENOBUFS; 2104 2105 genl_dump_check_consistent(cb, hdr); 2106 2107 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, 2108 orig_node->orig)) { 2109 genlmsg_cancel(msg, hdr); 2110 return -EMSGSIZE; 2111 } 2112 2113 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2114 &orig_node->capabilities)) { 2115 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, 2116 orig_node->mcast_flags)) { 2117 genlmsg_cancel(msg, hdr); 2118 return -EMSGSIZE; 2119 } 2120 } 2121 2122 genlmsg_end(msg, hdr); 2123 return 0; 2124 } 2125 2126 /** 2127 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags 2128 * table to a netlink socket 2129 * @msg: buffer for the message 2130 * @portid: netlink port 2131 * @cb: Control block containing additional options 2132 * @hash: hash to dump 2133 * @bucket: bucket index to dump 2134 * @idx_skip: How many entries to skip 2135 * 2136 * Return: 0 or error code. 2137 */ 2138 static int 2139 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, 2140 struct netlink_callback *cb, 2141 struct batadv_hashtable *hash, 2142 unsigned int bucket, long *idx_skip) 2143 { 2144 struct batadv_orig_node *orig_node; 2145 long idx = 0; 2146 2147 spin_lock_bh(&hash->list_locks[bucket]); 2148 cb->seq = atomic_read(&hash->generation) << 1 | 1; 2149 2150 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { 2151 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2152 &orig_node->capa_initialized)) 2153 continue; 2154 2155 if (idx < *idx_skip) 2156 goto skip; 2157 2158 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) { 2159 spin_unlock_bh(&hash->list_locks[bucket]); 2160 *idx_skip = idx; 2161 2162 return -EMSGSIZE; 2163 } 2164 2165 skip: 2166 idx++; 2167 } 2168 spin_unlock_bh(&hash->list_locks[bucket]); 2169 2170 return 0; 2171 } 2172 2173 /** 2174 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2175 * @msg: buffer for the message 2176 * @portid: netlink port 2177 * @cb: Control block containing additional options 2178 * @bat_priv: the bat priv with all the soft interface information 2179 * @bucket: current bucket to dump 2180 * @idx: index in current bucket to the next entry to dump 2181 * 2182 * Return: 0 or error code. 2183 */ 2184 static int 2185 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, 2186 struct netlink_callback *cb, 2187 struct batadv_priv *bat_priv, long *bucket, long *idx) 2188 { 2189 struct batadv_hashtable *hash = bat_priv->orig_hash; 2190 long bucket_tmp = *bucket; 2191 long idx_tmp = *idx; 2192 2193 while (bucket_tmp < hash->size) { 2194 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2195 bucket_tmp, &idx_tmp)) 2196 break; 2197 2198 bucket_tmp++; 2199 idx_tmp = 0; 2200 } 2201 2202 *bucket = bucket_tmp; 2203 *idx = idx_tmp; 2204 2205 return msg->len; 2206 } 2207 2208 /** 2209 * batadv_mcast_netlink_get_primary() - get primary interface from netlink 2210 * callback 2211 * @cb: netlink callback structure 2212 * @primary_if: the primary interface pointer to return the result in 2213 * 2214 * Return: 0 or error code. 2215 */ 2216 static int 2217 batadv_mcast_netlink_get_primary(struct netlink_callback *cb, 2218 struct batadv_hard_iface **primary_if) 2219 { 2220 struct batadv_hard_iface *hard_iface = NULL; 2221 struct net *net = sock_net(cb->skb->sk); 2222 struct net_device *soft_iface; 2223 struct batadv_priv *bat_priv; 2224 int ifindex; 2225 int ret = 0; 2226 2227 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); 2228 if (!ifindex) 2229 return -EINVAL; 2230 2231 soft_iface = dev_get_by_index(net, ifindex); 2232 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 2233 ret = -ENODEV; 2234 goto out; 2235 } 2236 2237 bat_priv = netdev_priv(soft_iface); 2238 2239 hard_iface = batadv_primary_if_get_selected(bat_priv); 2240 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { 2241 ret = -ENOENT; 2242 goto out; 2243 } 2244 2245 out: 2246 dev_put(soft_iface); 2247 2248 if (!ret && primary_if) 2249 *primary_if = hard_iface; 2250 else 2251 batadv_hardif_put(hard_iface); 2252 2253 return ret; 2254 } 2255 2256 /** 2257 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2258 * @msg: buffer for the message 2259 * @cb: callback structure containing arguments 2260 * 2261 * Return: message length. 2262 */ 2263 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) 2264 { 2265 struct batadv_hard_iface *primary_if = NULL; 2266 int portid = NETLINK_CB(cb->skb).portid; 2267 struct batadv_priv *bat_priv; 2268 long *bucket = &cb->args[0]; 2269 long *idx = &cb->args[1]; 2270 int ret; 2271 2272 ret = batadv_mcast_netlink_get_primary(cb, &primary_if); 2273 if (ret) 2274 return ret; 2275 2276 bat_priv = netdev_priv(primary_if->soft_iface); 2277 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); 2278 2279 batadv_hardif_put(primary_if); 2280 return ret; 2281 } 2282 2283 /** 2284 * batadv_mcast_free() - free the multicast optimizations structures 2285 * @bat_priv: the bat priv with all the soft interface information 2286 */ 2287 void batadv_mcast_free(struct batadv_priv *bat_priv) 2288 { 2289 cancel_delayed_work_sync(&bat_priv->mcast.work); 2290 2291 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2292 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2293 2294 /* safely calling outside of worker, as worker was canceled above */ 2295 batadv_mcast_mla_tt_retract(bat_priv, NULL); 2296 } 2297 2298 /** 2299 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 2300 * @orig: the originator which is going to get purged 2301 */ 2302 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 2303 { 2304 struct batadv_priv *bat_priv = orig->bat_priv; 2305 2306 spin_lock_bh(&orig->mcast_handler_lock); 2307 2308 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2309 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2310 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2311 batadv_mcast_want_rtr4_update(bat_priv, orig, 2312 BATADV_MCAST_WANT_NO_RTR4); 2313 batadv_mcast_want_rtr6_update(bat_priv, orig, 2314 BATADV_MCAST_WANT_NO_RTR6); 2315 2316 spin_unlock_bh(&orig->mcast_handler_lock); 2317 } 2318