1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 */ 6 7 #include "multicast.h" 8 #include "main.h" 9 10 #include <linux/atomic.h> 11 #include <linux/bitops.h> 12 #include <linux/bug.h> 13 #include <linux/byteorder/generic.h> 14 #include <linux/errno.h> 15 #include <linux/etherdevice.h> 16 #include <linux/gfp.h> 17 #include <linux/icmpv6.h> 18 #include <linux/if_bridge.h> 19 #include <linux/if_ether.h> 20 #include <linux/igmp.h> 21 #include <linux/in.h> 22 #include <linux/in6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> 26 #include <linux/jiffies.h> 27 #include <linux/kernel.h> 28 #include <linux/kref.h> 29 #include <linux/list.h> 30 #include <linux/lockdep.h> 31 #include <linux/netdevice.h> 32 #include <linux/netlink.h> 33 #include <linux/printk.h> 34 #include <linux/rculist.h> 35 #include <linux/rcupdate.h> 36 #include <linux/skbuff.h> 37 #include <linux/slab.h> 38 #include <linux/spinlock.h> 39 #include <linux/stddef.h> 40 #include <linux/string.h> 41 #include <linux/types.h> 42 #include <linux/workqueue.h> 43 #include <net/addrconf.h> 44 #include <net/genetlink.h> 45 #include <net/if_inet6.h> 46 #include <net/ip.h> 47 #include <net/ipv6.h> 48 #include <net/netlink.h> 49 #include <net/sock.h> 50 #include <uapi/linux/batadv_packet.h> 51 #include <uapi/linux/batman_adv.h> 52 53 #include "bridge_loop_avoidance.h" 54 #include "hard-interface.h" 55 #include "hash.h" 56 #include "log.h" 57 #include "netlink.h" 58 #include "send.h" 59 #include "soft-interface.h" 60 #include "translation-table.h" 61 #include "tvlv.h" 62 63 static void batadv_mcast_mla_update(struct work_struct *work); 64 65 /** 66 * batadv_mcast_start_timer() - schedule the multicast periodic worker 67 * @bat_priv: the bat priv with all the soft interface information 68 */ 69 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 70 { 71 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 72 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 73 } 74 75 /** 76 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 77 * @soft_iface: netdev struct of the mesh interface 78 * 79 * If the given soft interface has a bridge on top then the refcount 80 * of the according net device is increased. 81 * 82 * Return: NULL if no such bridge exists. Otherwise the net device of the 83 * bridge. 84 */ 85 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 86 { 87 struct net_device *upper = soft_iface; 88 89 rcu_read_lock(); 90 do { 91 upper = netdev_master_upper_dev_get_rcu(upper); 92 } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); 93 94 if (upper) 95 dev_hold(upper); 96 rcu_read_unlock(); 97 98 return upper; 99 } 100 101 /** 102 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from 103 * node for IPv4 104 * @dev: the interface to check 105 * 106 * Checks the presence of an IPv4 multicast router on this node. 107 * 108 * Caller needs to hold rcu read lock. 109 * 110 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise. 111 */ 112 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) 113 { 114 struct in_device *in_dev = __in_dev_get_rcu(dev); 115 116 if (in_dev && IN_DEV_MFORWARD(in_dev)) 117 return BATADV_NO_FLAGS; 118 else 119 return BATADV_MCAST_WANT_NO_RTR4; 120 } 121 122 /** 123 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from 124 * node for IPv6 125 * @dev: the interface to check 126 * 127 * Checks the presence of an IPv6 multicast router on this node. 128 * 129 * Caller needs to hold rcu read lock. 130 * 131 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise. 132 */ 133 #if IS_ENABLED(CONFIG_IPV6_MROUTE) 134 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 135 { 136 struct inet6_dev *in6_dev = __in6_dev_get(dev); 137 138 if (in6_dev && in6_dev->cnf.mc_forwarding) 139 return BATADV_NO_FLAGS; 140 else 141 return BATADV_MCAST_WANT_NO_RTR6; 142 } 143 #else 144 static inline u8 145 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 146 { 147 return BATADV_MCAST_WANT_NO_RTR6; 148 } 149 #endif 150 151 /** 152 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node 153 * @bat_priv: the bat priv with all the soft interface information 154 * @bridge: bridge interface on top of the soft_iface if present, 155 * otherwise pass NULL 156 * 157 * Checks the presence of IPv4 and IPv6 multicast routers on this 158 * node. 159 * 160 * Return: 161 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 162 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 163 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 164 * The former two OR'd: no multicast router is present 165 */ 166 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, 167 struct net_device *bridge) 168 { 169 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface; 170 u8 flags = BATADV_NO_FLAGS; 171 172 rcu_read_lock(); 173 174 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev); 175 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev); 176 177 rcu_read_unlock(); 178 179 return flags; 180 } 181 182 /** 183 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge 184 * @bat_priv: the bat priv with all the soft interface information 185 * @bridge: bridge interface on top of the soft_iface if present, 186 * otherwise pass NULL 187 * 188 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge. 189 * 190 * Return: 191 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 192 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 193 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 194 * The former two OR'd: no multicast router is present 195 */ 196 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, 197 struct net_device *bridge) 198 { 199 struct net_device *dev = bat_priv->soft_iface; 200 u8 flags = BATADV_NO_FLAGS; 201 202 if (!bridge) 203 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 204 205 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP)) 206 flags |= BATADV_MCAST_WANT_NO_RTR4; 207 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6)) 208 flags |= BATADV_MCAST_WANT_NO_RTR6; 209 210 return flags; 211 } 212 213 /** 214 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags 215 * @bat_priv: the bat priv with all the soft interface information 216 * @bridge: bridge interface on top of the soft_iface if present, 217 * otherwise pass NULL 218 * 219 * Checks the presence of IPv4 and IPv6 multicast routers on this 220 * node or behind its bridge. 221 * 222 * Return: 223 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 224 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 225 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 226 * The former two OR'd: no multicast router is present 227 */ 228 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, 229 struct net_device *bridge) 230 { 231 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 232 233 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge); 234 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge); 235 236 return flags; 237 } 238 239 /** 240 * batadv_mcast_mla_flags_get() - get the new multicast flags 241 * @bat_priv: the bat priv with all the soft interface information 242 * 243 * Return: A set of flags for the current/next TVLV, querier and 244 * bridge state. 245 */ 246 static struct batadv_mcast_mla_flags 247 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv) 248 { 249 struct net_device *dev = bat_priv->soft_iface; 250 struct batadv_mcast_querier_state *qr4, *qr6; 251 struct batadv_mcast_mla_flags mla_flags; 252 struct net_device *bridge; 253 254 bridge = batadv_mcast_get_bridge(dev); 255 256 memset(&mla_flags, 0, sizeof(mla_flags)); 257 mla_flags.enabled = 1; 258 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv, 259 bridge); 260 261 if (!bridge) 262 return mla_flags; 263 264 dev_put(bridge); 265 266 mla_flags.bridged = 1; 267 qr4 = &mla_flags.querier_ipv4; 268 qr6 = &mla_flags.querier_ipv6; 269 270 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 271 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 272 273 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 274 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 275 276 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 277 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 278 279 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 280 281 /* 1) If no querier exists at all, then multicast listeners on 282 * our local TT clients behind the bridge will keep silent. 283 * 2) If the selected querier is on one of our local TT clients, 284 * behind the bridge, then this querier might shadow multicast 285 * listeners on our local TT clients, behind this bridge. 286 * 287 * In both cases, we will signalize other batman nodes that 288 * we need all multicast traffic of the according protocol. 289 */ 290 if (!qr4->exists || qr4->shadowing) { 291 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4; 292 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4; 293 } 294 295 if (!qr6->exists || qr6->shadowing) { 296 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6; 297 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6; 298 } 299 300 return mla_flags; 301 } 302 303 /** 304 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 305 * @mcast_addr: the multicast address to check 306 * @mcast_list: the list with multicast addresses to search in 307 * 308 * Return: true if the given address is already in the given list. 309 * Otherwise returns false. 310 */ 311 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 312 struct hlist_head *mcast_list) 313 { 314 struct batadv_hw_addr *mcast_entry; 315 316 hlist_for_each_entry(mcast_entry, mcast_list, list) 317 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 318 return true; 319 320 return false; 321 } 322 323 /** 324 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners 325 * @dev: the device to collect multicast addresses from 326 * @mcast_list: a list to put found addresses into 327 * @flags: flags indicating the new multicast state 328 * 329 * Collects multicast addresses of IPv4 multicast listeners residing 330 * on this kernel on the given soft interface, dev, in 331 * the given mcast_list. In general, multicast listeners provided by 332 * your multicast receiving applications run directly on this node. 333 * 334 * Return: -ENOMEM on memory allocation error or the number of 335 * items added to the mcast_list otherwise. 336 */ 337 static int 338 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, 339 struct hlist_head *mcast_list, 340 struct batadv_mcast_mla_flags *flags) 341 { 342 struct batadv_hw_addr *new; 343 struct in_device *in_dev; 344 u8 mcast_addr[ETH_ALEN]; 345 struct ip_mc_list *pmc; 346 int ret = 0; 347 348 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 349 return 0; 350 351 rcu_read_lock(); 352 353 in_dev = __in_dev_get_rcu(dev); 354 if (!in_dev) { 355 rcu_read_unlock(); 356 return 0; 357 } 358 359 for (pmc = rcu_dereference(in_dev->mc_list); pmc; 360 pmc = rcu_dereference(pmc->next_rcu)) { 361 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 362 ipv4_is_local_multicast(pmc->multiaddr)) 363 continue; 364 365 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 366 !ipv4_is_local_multicast(pmc->multiaddr)) 367 continue; 368 369 ip_eth_mc_map(pmc->multiaddr, mcast_addr); 370 371 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 372 continue; 373 374 new = kmalloc(sizeof(*new), GFP_ATOMIC); 375 if (!new) { 376 ret = -ENOMEM; 377 break; 378 } 379 380 ether_addr_copy(new->addr, mcast_addr); 381 hlist_add_head(&new->list, mcast_list); 382 ret++; 383 } 384 rcu_read_unlock(); 385 386 return ret; 387 } 388 389 /** 390 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners 391 * @dev: the device to collect multicast addresses from 392 * @mcast_list: a list to put found addresses into 393 * @flags: flags indicating the new multicast state 394 * 395 * Collects multicast addresses of IPv6 multicast listeners residing 396 * on this kernel on the given soft interface, dev, in 397 * the given mcast_list. In general, multicast listeners provided by 398 * your multicast receiving applications run directly on this node. 399 * 400 * Return: -ENOMEM on memory allocation error or the number of 401 * items added to the mcast_list otherwise. 402 */ 403 #if IS_ENABLED(CONFIG_IPV6) 404 static int 405 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 406 struct hlist_head *mcast_list, 407 struct batadv_mcast_mla_flags *flags) 408 { 409 struct batadv_hw_addr *new; 410 struct inet6_dev *in6_dev; 411 u8 mcast_addr[ETH_ALEN]; 412 struct ifmcaddr6 *pmc6; 413 int ret = 0; 414 415 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 416 return 0; 417 418 rcu_read_lock(); 419 420 in6_dev = __in6_dev_get(dev); 421 if (!in6_dev) { 422 rcu_read_unlock(); 423 return 0; 424 } 425 426 for (pmc6 = rcu_dereference(in6_dev->mc_list); 427 pmc6; 428 pmc6 = rcu_dereference(pmc6->next)) { 429 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) < 430 IPV6_ADDR_SCOPE_LINKLOCAL) 431 continue; 432 433 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 434 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr)) 435 continue; 436 437 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 438 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) > 439 IPV6_ADDR_SCOPE_LINKLOCAL) 440 continue; 441 442 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr); 443 444 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 445 continue; 446 447 new = kmalloc(sizeof(*new), GFP_ATOMIC); 448 if (!new) { 449 ret = -ENOMEM; 450 break; 451 } 452 453 ether_addr_copy(new->addr, mcast_addr); 454 hlist_add_head(&new->list, mcast_list); 455 ret++; 456 } 457 rcu_read_unlock(); 458 459 return ret; 460 } 461 #else 462 static inline int 463 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 464 struct hlist_head *mcast_list, 465 struct batadv_mcast_mla_flags *flags) 466 { 467 return 0; 468 } 469 #endif 470 471 /** 472 * batadv_mcast_mla_softif_get() - get softif multicast listeners 473 * @dev: the device to collect multicast addresses from 474 * @mcast_list: a list to put found addresses into 475 * @flags: flags indicating the new multicast state 476 * 477 * Collects multicast addresses of multicast listeners residing 478 * on this kernel on the given soft interface, dev, in 479 * the given mcast_list. In general, multicast listeners provided by 480 * your multicast receiving applications run directly on this node. 481 * 482 * If there is a bridge interface on top of dev, collect from that one 483 * instead. Just like with IP addresses and routes, multicast listeners 484 * will(/should) register to the bridge interface instead of an 485 * enslaved bat0. 486 * 487 * Return: -ENOMEM on memory allocation error or the number of 488 * items added to the mcast_list otherwise. 489 */ 490 static int 491 batadv_mcast_mla_softif_get(struct net_device *dev, 492 struct hlist_head *mcast_list, 493 struct batadv_mcast_mla_flags *flags) 494 { 495 struct net_device *bridge = batadv_mcast_get_bridge(dev); 496 int ret4, ret6 = 0; 497 498 if (bridge) 499 dev = bridge; 500 501 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags); 502 if (ret4 < 0) 503 goto out; 504 505 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags); 506 if (ret6 < 0) { 507 ret4 = 0; 508 goto out; 509 } 510 511 out: 512 if (bridge) 513 dev_put(bridge); 514 515 return ret4 + ret6; 516 } 517 518 /** 519 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 520 * @dst: destination to write to - a multicast MAC address 521 * @src: source to read from - a multicast IP address 522 * 523 * Converts a given multicast IPv4/IPv6 address from a bridge 524 * to its matching multicast MAC address and copies it into the given 525 * destination buffer. 526 * 527 * Caller needs to make sure the destination buffer can hold 528 * at least ETH_ALEN bytes. 529 */ 530 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 531 { 532 if (src->proto == htons(ETH_P_IP)) 533 ip_eth_mc_map(src->dst.ip4, dst); 534 #if IS_ENABLED(CONFIG_IPV6) 535 else if (src->proto == htons(ETH_P_IPV6)) 536 ipv6_eth_mc_map(&src->dst.ip6, dst); 537 #endif 538 else 539 eth_zero_addr(dst); 540 } 541 542 /** 543 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 544 * @dev: a bridge slave whose bridge to collect multicast addresses from 545 * @mcast_list: a list to put found addresses into 546 * @flags: flags indicating the new multicast state 547 * 548 * Collects multicast addresses of multicast listeners residing 549 * on foreign, non-mesh devices which we gave access to our mesh via 550 * a bridge on top of the given soft interface, dev, in the given 551 * mcast_list. 552 * 553 * Return: -ENOMEM on memory allocation error or the number of 554 * items added to the mcast_list otherwise. 555 */ 556 static int batadv_mcast_mla_bridge_get(struct net_device *dev, 557 struct hlist_head *mcast_list, 558 struct batadv_mcast_mla_flags *flags) 559 { 560 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 561 struct br_ip_list *br_ip_entry, *tmp; 562 u8 tvlv_flags = flags->tvlv_flags; 563 struct batadv_hw_addr *new; 564 u8 mcast_addr[ETH_ALEN]; 565 int ret; 566 567 /* we don't need to detect these devices/listeners, the IGMP/MLD 568 * snooping code of the Linux bridge already does that for us 569 */ 570 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 571 if (ret < 0) 572 goto out; 573 574 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 575 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) { 576 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 577 continue; 578 579 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 580 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 581 continue; 582 583 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 584 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 585 continue; 586 } 587 588 #if IS_ENABLED(CONFIG_IPV6) 589 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) { 590 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 591 continue; 592 593 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 594 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6)) 595 continue; 596 597 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 598 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) > 599 IPV6_ADDR_SCOPE_LINKLOCAL) 600 continue; 601 } 602 #endif 603 604 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 605 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 606 continue; 607 608 new = kmalloc(sizeof(*new), GFP_ATOMIC); 609 if (!new) { 610 ret = -ENOMEM; 611 break; 612 } 613 614 ether_addr_copy(new->addr, mcast_addr); 615 hlist_add_head(&new->list, mcast_list); 616 } 617 618 out: 619 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 620 list_del(&br_ip_entry->list); 621 kfree(br_ip_entry); 622 } 623 624 return ret; 625 } 626 627 /** 628 * batadv_mcast_mla_list_free() - free a list of multicast addresses 629 * @mcast_list: the list to free 630 * 631 * Removes and frees all items in the given mcast_list. 632 */ 633 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 634 { 635 struct batadv_hw_addr *mcast_entry; 636 struct hlist_node *tmp; 637 638 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 639 hlist_del(&mcast_entry->list); 640 kfree(mcast_entry); 641 } 642 } 643 644 /** 645 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 646 * @bat_priv: the bat priv with all the soft interface information 647 * @mcast_list: a list of addresses which should _not_ be removed 648 * 649 * Retracts the announcement of any multicast listener from the 650 * translation table except the ones listed in the given mcast_list. 651 * 652 * If mcast_list is NULL then all are retracted. 653 */ 654 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 655 struct hlist_head *mcast_list) 656 { 657 struct batadv_hw_addr *mcast_entry; 658 struct hlist_node *tmp; 659 660 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 661 list) { 662 if (mcast_list && 663 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 664 mcast_list)) 665 continue; 666 667 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 668 BATADV_NO_FLAGS, 669 "mcast TT outdated", false); 670 671 hlist_del(&mcast_entry->list); 672 kfree(mcast_entry); 673 } 674 } 675 676 /** 677 * batadv_mcast_mla_tt_add() - add multicast listener announcements 678 * @bat_priv: the bat priv with all the soft interface information 679 * @mcast_list: a list of addresses which are going to get added 680 * 681 * Adds multicast listener announcements from the given mcast_list to the 682 * translation table if they have not been added yet. 683 */ 684 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 685 struct hlist_head *mcast_list) 686 { 687 struct batadv_hw_addr *mcast_entry; 688 struct hlist_node *tmp; 689 690 if (!mcast_list) 691 return; 692 693 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 694 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 695 &bat_priv->mcast.mla_list)) 696 continue; 697 698 if (!batadv_tt_local_add(bat_priv->soft_iface, 699 mcast_entry->addr, BATADV_NO_FLAGS, 700 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 701 continue; 702 703 hlist_del(&mcast_entry->list); 704 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 705 } 706 } 707 708 /** 709 * batadv_mcast_querier_log() - debug output regarding the querier status on 710 * link 711 * @bat_priv: the bat priv with all the soft interface information 712 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 713 * @old_state: the previous querier state on our link 714 * @new_state: the new querier state on our link 715 * 716 * Outputs debug messages to the logging facility with log level 'mcast' 717 * regarding changes to the querier status on the link which are relevant 718 * to our multicast optimizations. 719 * 720 * Usually this is about whether a querier appeared or vanished in 721 * our mesh or whether the querier is in the suboptimal position of being 722 * behind our local bridge segment: Snooping switches will directly 723 * forward listener reports to the querier, therefore batman-adv and 724 * the bridge will potentially not see these listeners - the querier is 725 * potentially shadowing listeners from us then. 726 * 727 * This is only interesting for nodes with a bridge on top of their 728 * soft interface. 729 */ 730 static void 731 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 732 struct batadv_mcast_querier_state *old_state, 733 struct batadv_mcast_querier_state *new_state) 734 { 735 if (!old_state->exists && new_state->exists) 736 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 737 str_proto); 738 else if (old_state->exists && !new_state->exists) 739 batadv_info(bat_priv->soft_iface, 740 "%s Querier disappeared - multicast optimizations disabled\n", 741 str_proto); 742 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists) 743 batadv_info(bat_priv->soft_iface, 744 "No %s Querier present - multicast optimizations disabled\n", 745 str_proto); 746 747 if (new_state->exists) { 748 if ((!old_state->shadowing && new_state->shadowing) || 749 (!old_state->exists && new_state->shadowing)) 750 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 751 "%s Querier is behind our bridged segment: Might shadow listeners\n", 752 str_proto); 753 else if (old_state->shadowing && !new_state->shadowing) 754 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 755 "%s Querier is not behind our bridged segment\n", 756 str_proto); 757 } 758 } 759 760 /** 761 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 762 * setups 763 * @bat_priv: the bat priv with all the soft interface information 764 * @new_flags: flags indicating the new multicast state 765 * 766 * If no bridges are ever used on this node, then this function does nothing. 767 * 768 * Otherwise this function outputs debug information to the 'mcast' log level 769 * which might be relevant to our multicast optimizations. 770 * 771 * More precisely, it outputs information when a bridge interface is added or 772 * removed from a soft interface. And when a bridge is present, it further 773 * outputs information about the querier state which is relevant for the 774 * multicast flags this node is going to set. 775 */ 776 static void 777 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, 778 struct batadv_mcast_mla_flags *new_flags) 779 { 780 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags; 781 782 if (!old_flags->bridged && new_flags->bridged) 783 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 784 "Bridge added: Setting Unsnoopables(U)-flag\n"); 785 else if (old_flags->bridged && !new_flags->bridged) 786 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 787 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 788 789 if (new_flags->bridged) { 790 batadv_mcast_querier_log(bat_priv, "IGMP", 791 &old_flags->querier_ipv4, 792 &new_flags->querier_ipv4); 793 batadv_mcast_querier_log(bat_priv, "MLD", 794 &old_flags->querier_ipv6, 795 &new_flags->querier_ipv6); 796 } 797 } 798 799 /** 800 * batadv_mcast_flags_log() - output debug information about mcast flag changes 801 * @bat_priv: the bat priv with all the soft interface information 802 * @flags: TVLV flags indicating the new multicast state 803 * 804 * Whenever the multicast TVLV flags this node announces change, this function 805 * should be used to notify userspace about the change. 806 */ 807 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 808 { 809 bool old_enabled = bat_priv->mcast.mla_flags.enabled; 810 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags; 811 char str_old_flags[] = "[.... . ]"; 812 813 sprintf(str_old_flags, "[%c%c%c%s%s]", 814 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 815 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 816 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 817 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 818 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 819 820 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 821 "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n", 822 old_enabled ? str_old_flags : "<undefined>", 823 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 824 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 825 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 826 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 827 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". "); 828 } 829 830 /** 831 * batadv_mcast_mla_flags_update() - update multicast flags 832 * @bat_priv: the bat priv with all the soft interface information 833 * @flags: flags indicating the new multicast state 834 * 835 * Updates the own multicast tvlv with our current multicast related settings, 836 * capabilities and inabilities. 837 */ 838 static void 839 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, 840 struct batadv_mcast_mla_flags *flags) 841 { 842 struct batadv_tvlv_mcast_data mcast_data; 843 844 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags))) 845 return; 846 847 batadv_mcast_bridge_log(bat_priv, flags); 848 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags); 849 850 mcast_data.flags = flags->tvlv_flags; 851 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 852 853 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 854 &mcast_data, sizeof(mcast_data)); 855 856 bat_priv->mcast.mla_flags = *flags; 857 } 858 859 /** 860 * __batadv_mcast_mla_update() - update the own MLAs 861 * @bat_priv: the bat priv with all the soft interface information 862 * 863 * Updates the own multicast listener announcements in the translation 864 * table as well as the own, announced multicast tvlv container. 865 * 866 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 867 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 868 * ensured by the non-parallel execution of the worker this function 869 * belongs to. 870 */ 871 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 872 { 873 struct net_device *soft_iface = bat_priv->soft_iface; 874 struct hlist_head mcast_list = HLIST_HEAD_INIT; 875 struct batadv_mcast_mla_flags flags; 876 int ret; 877 878 flags = batadv_mcast_mla_flags_get(bat_priv); 879 880 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags); 881 if (ret < 0) 882 goto out; 883 884 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags); 885 if (ret < 0) 886 goto out; 887 888 spin_lock(&bat_priv->mcast.mla_lock); 889 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 890 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 891 batadv_mcast_mla_flags_update(bat_priv, &flags); 892 spin_unlock(&bat_priv->mcast.mla_lock); 893 894 out: 895 batadv_mcast_mla_list_free(&mcast_list); 896 } 897 898 /** 899 * batadv_mcast_mla_update() - update the own MLAs 900 * @work: kernel work struct 901 * 902 * Updates the own multicast listener announcements in the translation 903 * table as well as the own, announced multicast tvlv container. 904 * 905 * In the end, reschedules the work timer. 906 */ 907 static void batadv_mcast_mla_update(struct work_struct *work) 908 { 909 struct delayed_work *delayed_work; 910 struct batadv_priv_mcast *priv_mcast; 911 struct batadv_priv *bat_priv; 912 913 delayed_work = to_delayed_work(work); 914 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 915 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 916 917 __batadv_mcast_mla_update(bat_priv); 918 batadv_mcast_start_timer(bat_priv); 919 } 920 921 /** 922 * batadv_mcast_is_report_ipv4() - check for IGMP reports 923 * @skb: the ethernet frame destined for the mesh 924 * 925 * This call might reallocate skb data. 926 * 927 * Checks whether the given frame is a valid IGMP report. 928 * 929 * Return: If so then true, otherwise false. 930 */ 931 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 932 { 933 if (ip_mc_check_igmp(skb) < 0) 934 return false; 935 936 switch (igmp_hdr(skb)->type) { 937 case IGMP_HOST_MEMBERSHIP_REPORT: 938 case IGMPV2_HOST_MEMBERSHIP_REPORT: 939 case IGMPV3_HOST_MEMBERSHIP_REPORT: 940 return true; 941 } 942 943 return false; 944 } 945 946 /** 947 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 948 * potential 949 * @bat_priv: the bat priv with all the soft interface information 950 * @skb: the IPv4 packet to check 951 * @is_unsnoopable: stores whether the destination is snoopable 952 * @is_routable: stores whether the destination is routable 953 * 954 * Checks whether the given IPv4 packet has the potential to be forwarded with a 955 * mode more optimal than classic flooding. 956 * 957 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 958 * allocation failure. 959 */ 960 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 961 struct sk_buff *skb, 962 bool *is_unsnoopable, 963 int *is_routable) 964 { 965 struct iphdr *iphdr; 966 967 /* We might fail due to out-of-memory -> drop it */ 968 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 969 return -ENOMEM; 970 971 if (batadv_mcast_is_report_ipv4(skb)) 972 return -EINVAL; 973 974 iphdr = ip_hdr(skb); 975 976 /* link-local multicast listeners behind a bridge are 977 * not snoopable (see RFC4541, section 2.1.2.2) 978 */ 979 if (ipv4_is_local_multicast(iphdr->daddr)) 980 *is_unsnoopable = true; 981 else 982 *is_routable = ETH_P_IP; 983 984 return 0; 985 } 986 987 /** 988 * batadv_mcast_is_report_ipv6() - check for MLD reports 989 * @skb: the ethernet frame destined for the mesh 990 * 991 * This call might reallocate skb data. 992 * 993 * Checks whether the given frame is a valid MLD report. 994 * 995 * Return: If so then true, otherwise false. 996 */ 997 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 998 { 999 if (ipv6_mc_check_mld(skb) < 0) 1000 return false; 1001 1002 switch (icmp6_hdr(skb)->icmp6_type) { 1003 case ICMPV6_MGM_REPORT: 1004 case ICMPV6_MLD2_REPORT: 1005 return true; 1006 } 1007 1008 return false; 1009 } 1010 1011 /** 1012 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 1013 * potential 1014 * @bat_priv: the bat priv with all the soft interface information 1015 * @skb: the IPv6 packet to check 1016 * @is_unsnoopable: stores whether the destination is snoopable 1017 * @is_routable: stores whether the destination is routable 1018 * 1019 * Checks whether the given IPv6 packet has the potential to be forwarded with a 1020 * mode more optimal than classic flooding. 1021 * 1022 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1023 */ 1024 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 1025 struct sk_buff *skb, 1026 bool *is_unsnoopable, 1027 int *is_routable) 1028 { 1029 struct ipv6hdr *ip6hdr; 1030 1031 /* We might fail due to out-of-memory -> drop it */ 1032 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 1033 return -ENOMEM; 1034 1035 if (batadv_mcast_is_report_ipv6(skb)) 1036 return -EINVAL; 1037 1038 ip6hdr = ipv6_hdr(skb); 1039 1040 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1041 return -EINVAL; 1042 1043 /* link-local-all-nodes multicast listeners behind a bridge are 1044 * not snoopable (see RFC4541, section 3, paragraph 3) 1045 */ 1046 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 1047 *is_unsnoopable = true; 1048 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL) 1049 *is_routable = ETH_P_IPV6; 1050 1051 return 0; 1052 } 1053 1054 /** 1055 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 1056 * @bat_priv: the bat priv with all the soft interface information 1057 * @skb: the multicast frame to check 1058 * @is_unsnoopable: stores whether the destination is snoopable 1059 * @is_routable: stores whether the destination is routable 1060 * 1061 * Checks whether the given multicast ethernet frame has the potential to be 1062 * forwarded with a mode more optimal than classic flooding. 1063 * 1064 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1065 */ 1066 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 1067 struct sk_buff *skb, 1068 bool *is_unsnoopable, 1069 int *is_routable) 1070 { 1071 struct ethhdr *ethhdr = eth_hdr(skb); 1072 1073 if (!atomic_read(&bat_priv->multicast_mode)) 1074 return -EINVAL; 1075 1076 switch (ntohs(ethhdr->h_proto)) { 1077 case ETH_P_IP: 1078 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 1079 is_unsnoopable, 1080 is_routable); 1081 case ETH_P_IPV6: 1082 if (!IS_ENABLED(CONFIG_IPV6)) 1083 return -EINVAL; 1084 1085 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 1086 is_unsnoopable, 1087 is_routable); 1088 default: 1089 return -EINVAL; 1090 } 1091 } 1092 1093 /** 1094 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 1095 * interest 1096 * @bat_priv: the bat priv with all the soft interface information 1097 * @ethhdr: ethernet header of a packet 1098 * 1099 * Return: the number of nodes which want all IPv4 multicast traffic if the 1100 * given ethhdr is from an IPv4 packet or the number of nodes which want all 1101 * IPv6 traffic if it matches an IPv6 packet. 1102 */ 1103 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 1104 struct ethhdr *ethhdr) 1105 { 1106 switch (ntohs(ethhdr->h_proto)) { 1107 case ETH_P_IP: 1108 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 1109 case ETH_P_IPV6: 1110 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 1111 default: 1112 /* we shouldn't be here... */ 1113 return 0; 1114 } 1115 } 1116 1117 /** 1118 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router 1119 * @bat_priv: the bat priv with all the soft interface information 1120 * @protocol: the ethernet protocol type to count multicast routers for 1121 * 1122 * Return: the number of nodes which want all routable IPv4 multicast traffic 1123 * if the protocol is ETH_P_IP or the number of nodes which want all routable 1124 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0. 1125 */ 1126 1127 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv, 1128 int protocol) 1129 { 1130 switch (protocol) { 1131 case ETH_P_IP: 1132 return atomic_read(&bat_priv->mcast.num_want_all_rtr4); 1133 case ETH_P_IPV6: 1134 return atomic_read(&bat_priv->mcast.num_want_all_rtr6); 1135 default: 1136 return 0; 1137 } 1138 } 1139 1140 /** 1141 * batadv_mcast_forw_tt_node_get() - get a multicast tt node 1142 * @bat_priv: the bat priv with all the soft interface information 1143 * @ethhdr: the ether header containing the multicast destination 1144 * 1145 * Return: an orig_node matching the multicast address provided by ethhdr 1146 * via a translation table lookup. This increases the returned nodes refcount. 1147 */ 1148 static struct batadv_orig_node * 1149 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 1150 struct ethhdr *ethhdr) 1151 { 1152 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, 1153 BATADV_NO_FLAGS); 1154 } 1155 1156 /** 1157 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag 1158 * @bat_priv: the bat priv with all the soft interface information 1159 * 1160 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and 1161 * increases its refcount. 1162 */ 1163 static struct batadv_orig_node * 1164 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv) 1165 { 1166 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1167 1168 rcu_read_lock(); 1169 hlist_for_each_entry_rcu(tmp_orig_node, 1170 &bat_priv->mcast.want_all_ipv4_list, 1171 mcast_want_all_ipv4_node) { 1172 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1173 continue; 1174 1175 orig_node = tmp_orig_node; 1176 break; 1177 } 1178 rcu_read_unlock(); 1179 1180 return orig_node; 1181 } 1182 1183 /** 1184 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag 1185 * @bat_priv: the bat priv with all the soft interface information 1186 * 1187 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set 1188 * and increases its refcount. 1189 */ 1190 static struct batadv_orig_node * 1191 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) 1192 { 1193 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1194 1195 rcu_read_lock(); 1196 hlist_for_each_entry_rcu(tmp_orig_node, 1197 &bat_priv->mcast.want_all_ipv6_list, 1198 mcast_want_all_ipv6_node) { 1199 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1200 continue; 1201 1202 orig_node = tmp_orig_node; 1203 break; 1204 } 1205 rcu_read_unlock(); 1206 1207 return orig_node; 1208 } 1209 1210 /** 1211 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag 1212 * @bat_priv: the bat priv with all the soft interface information 1213 * @ethhdr: an ethernet header to determine the protocol family from 1214 * 1215 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or 1216 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and 1217 * increases its refcount. 1218 */ 1219 static struct batadv_orig_node * 1220 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv, 1221 struct ethhdr *ethhdr) 1222 { 1223 switch (ntohs(ethhdr->h_proto)) { 1224 case ETH_P_IP: 1225 return batadv_mcast_forw_ipv4_node_get(bat_priv); 1226 case ETH_P_IPV6: 1227 return batadv_mcast_forw_ipv6_node_get(bat_priv); 1228 default: 1229 /* we shouldn't be here... */ 1230 return NULL; 1231 } 1232 } 1233 1234 /** 1235 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag 1236 * @bat_priv: the bat priv with all the soft interface information 1237 * 1238 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag 1239 * set and increases its refcount. 1240 */ 1241 static struct batadv_orig_node * 1242 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv) 1243 { 1244 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1245 1246 rcu_read_lock(); 1247 hlist_for_each_entry_rcu(tmp_orig_node, 1248 &bat_priv->mcast.want_all_unsnoopables_list, 1249 mcast_want_all_unsnoopables_node) { 1250 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1251 continue; 1252 1253 orig_node = tmp_orig_node; 1254 break; 1255 } 1256 rcu_read_unlock(); 1257 1258 return orig_node; 1259 } 1260 1261 /** 1262 * batadv_mcast_forw_rtr4_node_get() - get a node with an ipv4 mcast router flag 1263 * @bat_priv: the bat priv with all the soft interface information 1264 * 1265 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR4 flag unset and 1266 * increases its refcount. 1267 */ 1268 static struct batadv_orig_node * 1269 batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv) 1270 { 1271 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1272 1273 rcu_read_lock(); 1274 hlist_for_each_entry_rcu(tmp_orig_node, 1275 &bat_priv->mcast.want_all_rtr4_list, 1276 mcast_want_all_rtr4_node) { 1277 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1278 continue; 1279 1280 orig_node = tmp_orig_node; 1281 break; 1282 } 1283 rcu_read_unlock(); 1284 1285 return orig_node; 1286 } 1287 1288 /** 1289 * batadv_mcast_forw_rtr6_node_get() - get a node with an ipv6 mcast router flag 1290 * @bat_priv: the bat priv with all the soft interface information 1291 * 1292 * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR6 flag unset 1293 * and increases its refcount. 1294 */ 1295 static struct batadv_orig_node * 1296 batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv) 1297 { 1298 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL; 1299 1300 rcu_read_lock(); 1301 hlist_for_each_entry_rcu(tmp_orig_node, 1302 &bat_priv->mcast.want_all_rtr6_list, 1303 mcast_want_all_rtr6_node) { 1304 if (!kref_get_unless_zero(&tmp_orig_node->refcount)) 1305 continue; 1306 1307 orig_node = tmp_orig_node; 1308 break; 1309 } 1310 rcu_read_unlock(); 1311 1312 return orig_node; 1313 } 1314 1315 /** 1316 * batadv_mcast_forw_rtr_node_get() - get a node with an ipv4/ipv6 router flag 1317 * @bat_priv: the bat priv with all the soft interface information 1318 * @ethhdr: an ethernet header to determine the protocol family from 1319 * 1320 * Return: an orig_node which has no BATADV_MCAST_WANT_NO_RTR4 or 1321 * BATADV_MCAST_WANT_NO_RTR6 flag, depending on the provided ethhdr, set and 1322 * increases its refcount. 1323 */ 1324 static struct batadv_orig_node * 1325 batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv, 1326 struct ethhdr *ethhdr) 1327 { 1328 switch (ntohs(ethhdr->h_proto)) { 1329 case ETH_P_IP: 1330 return batadv_mcast_forw_rtr4_node_get(bat_priv); 1331 case ETH_P_IPV6: 1332 return batadv_mcast_forw_rtr6_node_get(bat_priv); 1333 default: 1334 /* we shouldn't be here... */ 1335 return NULL; 1336 } 1337 } 1338 1339 /** 1340 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 1341 * @bat_priv: the bat priv with all the soft interface information 1342 * @skb: The multicast packet to check 1343 * @orig: an originator to be set to forward the skb to 1344 * 1345 * Return: the forwarding mode as enum batadv_forw_mode and in case of 1346 * BATADV_FORW_SINGLE set the orig to the single originator the skb 1347 * should be forwarded to. 1348 */ 1349 enum batadv_forw_mode 1350 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 1351 struct batadv_orig_node **orig) 1352 { 1353 int ret, tt_count, ip_count, unsnoop_count, total_count; 1354 bool is_unsnoopable = false; 1355 unsigned int mcast_fanout; 1356 struct ethhdr *ethhdr; 1357 int is_routable = 0; 1358 int rtr_count = 0; 1359 1360 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, 1361 &is_routable); 1362 if (ret == -ENOMEM) 1363 return BATADV_FORW_NONE; 1364 else if (ret < 0) 1365 return BATADV_FORW_ALL; 1366 1367 ethhdr = eth_hdr(skb); 1368 1369 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 1370 BATADV_NO_FLAGS); 1371 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 1372 unsnoop_count = !is_unsnoopable ? 0 : 1373 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 1374 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable); 1375 1376 total_count = tt_count + ip_count + unsnoop_count + rtr_count; 1377 1378 switch (total_count) { 1379 case 1: 1380 if (tt_count) 1381 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr); 1382 else if (ip_count) 1383 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr); 1384 else if (unsnoop_count) 1385 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv); 1386 else if (rtr_count) 1387 *orig = batadv_mcast_forw_rtr_node_get(bat_priv, 1388 ethhdr); 1389 1390 if (*orig) 1391 return BATADV_FORW_SINGLE; 1392 1393 fallthrough; 1394 case 0: 1395 return BATADV_FORW_NONE; 1396 default: 1397 mcast_fanout = atomic_read(&bat_priv->multicast_fanout); 1398 1399 if (!unsnoop_count && total_count <= mcast_fanout) 1400 return BATADV_FORW_SOME; 1401 } 1402 1403 return BATADV_FORW_ALL; 1404 } 1405 1406 /** 1407 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator 1408 * @bat_priv: the bat priv with all the soft interface information 1409 * @skb: the multicast packet to send 1410 * @vid: the vlan identifier 1411 * @orig_node: the originator to send the packet to 1412 * 1413 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 1414 */ 1415 int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, 1416 struct sk_buff *skb, 1417 unsigned short vid, 1418 struct batadv_orig_node *orig_node) 1419 { 1420 /* Avoid sending multicast-in-unicast packets to other BLA 1421 * gateways - they already got the frame from the LAN side 1422 * we share with them. 1423 * TODO: Refactor to take BLA into account earlier, to avoid 1424 * reducing the mcast_fanout count. 1425 */ 1426 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { 1427 dev_kfree_skb(skb); 1428 return NET_XMIT_SUCCESS; 1429 } 1430 1431 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 1432 orig_node, vid); 1433 } 1434 1435 /** 1436 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners 1437 * @bat_priv: the bat priv with all the soft interface information 1438 * @skb: the multicast packet to transmit 1439 * @vid: the vlan identifier 1440 * 1441 * Sends copies of a frame with multicast destination to any multicast 1442 * listener registered in the translation table. A transmission is performed 1443 * via a batman-adv unicast packet for each such destination node. 1444 * 1445 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1446 * otherwise. 1447 */ 1448 static int 1449 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, 1450 unsigned short vid) 1451 { 1452 int ret = NET_XMIT_SUCCESS; 1453 struct sk_buff *newskb; 1454 1455 struct batadv_tt_orig_list_entry *orig_entry; 1456 1457 struct batadv_tt_global_entry *tt_global; 1458 const u8 *addr = eth_hdr(skb)->h_dest; 1459 1460 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid); 1461 if (!tt_global) 1462 goto out; 1463 1464 rcu_read_lock(); 1465 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) { 1466 newskb = skb_copy(skb, GFP_ATOMIC); 1467 if (!newskb) { 1468 ret = NET_XMIT_DROP; 1469 break; 1470 } 1471 1472 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, 1473 orig_entry->orig_node); 1474 } 1475 rcu_read_unlock(); 1476 1477 batadv_tt_global_entry_put(tt_global); 1478 1479 out: 1480 return ret; 1481 } 1482 1483 /** 1484 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4 1485 * @bat_priv: the bat priv with all the soft interface information 1486 * @skb: the multicast packet to transmit 1487 * @vid: the vlan identifier 1488 * 1489 * Sends copies of a frame with multicast destination to any node with a 1490 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a 1491 * batman-adv unicast packet for each such destination node. 1492 * 1493 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1494 * otherwise. 1495 */ 1496 static int 1497 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, 1498 struct sk_buff *skb, unsigned short vid) 1499 { 1500 struct batadv_orig_node *orig_node; 1501 int ret = NET_XMIT_SUCCESS; 1502 struct sk_buff *newskb; 1503 1504 rcu_read_lock(); 1505 hlist_for_each_entry_rcu(orig_node, 1506 &bat_priv->mcast.want_all_ipv4_list, 1507 mcast_want_all_ipv4_node) { 1508 newskb = skb_copy(skb, GFP_ATOMIC); 1509 if (!newskb) { 1510 ret = NET_XMIT_DROP; 1511 break; 1512 } 1513 1514 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1515 } 1516 rcu_read_unlock(); 1517 return ret; 1518 } 1519 1520 /** 1521 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6 1522 * @bat_priv: the bat priv with all the soft interface information 1523 * @skb: The multicast packet to transmit 1524 * @vid: the vlan identifier 1525 * 1526 * Sends copies of a frame with multicast destination to any node with a 1527 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a 1528 * batman-adv unicast packet for each such destination node. 1529 * 1530 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1531 * otherwise. 1532 */ 1533 static int 1534 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, 1535 struct sk_buff *skb, unsigned short vid) 1536 { 1537 struct batadv_orig_node *orig_node; 1538 int ret = NET_XMIT_SUCCESS; 1539 struct sk_buff *newskb; 1540 1541 rcu_read_lock(); 1542 hlist_for_each_entry_rcu(orig_node, 1543 &bat_priv->mcast.want_all_ipv6_list, 1544 mcast_want_all_ipv6_node) { 1545 newskb = skb_copy(skb, GFP_ATOMIC); 1546 if (!newskb) { 1547 ret = NET_XMIT_DROP; 1548 break; 1549 } 1550 1551 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1552 } 1553 rcu_read_unlock(); 1554 return ret; 1555 } 1556 1557 /** 1558 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list 1559 * @bat_priv: the bat priv with all the soft interface information 1560 * @skb: the multicast packet to transmit 1561 * @vid: the vlan identifier 1562 * 1563 * Sends copies of a frame with multicast destination to any node with a 1564 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A 1565 * transmission is performed via a batman-adv unicast packet for each such 1566 * destination node. 1567 * 1568 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1569 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1570 */ 1571 static int 1572 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv, 1573 struct sk_buff *skb, unsigned short vid) 1574 { 1575 switch (ntohs(eth_hdr(skb)->h_proto)) { 1576 case ETH_P_IP: 1577 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid); 1578 case ETH_P_IPV6: 1579 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid); 1580 default: 1581 /* we shouldn't be here... */ 1582 return NET_XMIT_DROP; 1583 } 1584 } 1585 1586 /** 1587 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4 1588 * @bat_priv: the bat priv with all the soft interface information 1589 * @skb: the multicast packet to transmit 1590 * @vid: the vlan identifier 1591 * 1592 * Sends copies of a frame with multicast destination to any node with a 1593 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a 1594 * batman-adv unicast packet for each such destination node. 1595 * 1596 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1597 * otherwise. 1598 */ 1599 static int 1600 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, 1601 struct sk_buff *skb, unsigned short vid) 1602 { 1603 struct batadv_orig_node *orig_node; 1604 int ret = NET_XMIT_SUCCESS; 1605 struct sk_buff *newskb; 1606 1607 rcu_read_lock(); 1608 hlist_for_each_entry_rcu(orig_node, 1609 &bat_priv->mcast.want_all_rtr4_list, 1610 mcast_want_all_rtr4_node) { 1611 newskb = skb_copy(skb, GFP_ATOMIC); 1612 if (!newskb) { 1613 ret = NET_XMIT_DROP; 1614 break; 1615 } 1616 1617 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1618 } 1619 rcu_read_unlock(); 1620 return ret; 1621 } 1622 1623 /** 1624 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6 1625 * @bat_priv: the bat priv with all the soft interface information 1626 * @skb: The multicast packet to transmit 1627 * @vid: the vlan identifier 1628 * 1629 * Sends copies of a frame with multicast destination to any node with a 1630 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a 1631 * batman-adv unicast packet for each such destination node. 1632 * 1633 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1634 * otherwise. 1635 */ 1636 static int 1637 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, 1638 struct sk_buff *skb, unsigned short vid) 1639 { 1640 struct batadv_orig_node *orig_node; 1641 int ret = NET_XMIT_SUCCESS; 1642 struct sk_buff *newskb; 1643 1644 rcu_read_lock(); 1645 hlist_for_each_entry_rcu(orig_node, 1646 &bat_priv->mcast.want_all_rtr6_list, 1647 mcast_want_all_rtr6_node) { 1648 newskb = skb_copy(skb, GFP_ATOMIC); 1649 if (!newskb) { 1650 ret = NET_XMIT_DROP; 1651 break; 1652 } 1653 1654 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1655 } 1656 rcu_read_unlock(); 1657 return ret; 1658 } 1659 1660 /** 1661 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list 1662 * @bat_priv: the bat priv with all the soft interface information 1663 * @skb: the multicast packet to transmit 1664 * @vid: the vlan identifier 1665 * 1666 * Sends copies of a frame with multicast destination to any node with a 1667 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A 1668 * transmission is performed via a batman-adv unicast packet for each such 1669 * destination node. 1670 * 1671 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1672 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1673 */ 1674 static int 1675 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, 1676 struct sk_buff *skb, unsigned short vid) 1677 { 1678 switch (ntohs(eth_hdr(skb)->h_proto)) { 1679 case ETH_P_IP: 1680 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid); 1681 case ETH_P_IPV6: 1682 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid); 1683 default: 1684 /* we shouldn't be here... */ 1685 return NET_XMIT_DROP; 1686 } 1687 } 1688 1689 /** 1690 * batadv_mcast_forw_send() - send packet to any detected multicast recipient 1691 * @bat_priv: the bat priv with all the soft interface information 1692 * @skb: the multicast packet to transmit 1693 * @vid: the vlan identifier 1694 * 1695 * Sends copies of a frame with multicast destination to any node that signaled 1696 * interest in it, that is either via the translation table or the according 1697 * want-all flags. A transmission is performed via a batman-adv unicast packet 1698 * for each such destination node. 1699 * 1700 * The given skb is consumed/freed. 1701 * 1702 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1703 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1704 */ 1705 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, 1706 unsigned short vid) 1707 { 1708 int ret; 1709 1710 ret = batadv_mcast_forw_tt(bat_priv, skb, vid); 1711 if (ret != NET_XMIT_SUCCESS) { 1712 kfree_skb(skb); 1713 return ret; 1714 } 1715 1716 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid); 1717 if (ret != NET_XMIT_SUCCESS) { 1718 kfree_skb(skb); 1719 return ret; 1720 } 1721 1722 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); 1723 if (ret != NET_XMIT_SUCCESS) { 1724 kfree_skb(skb); 1725 return ret; 1726 } 1727 1728 consume_skb(skb); 1729 return ret; 1730 } 1731 1732 /** 1733 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 1734 * @bat_priv: the bat priv with all the soft interface information 1735 * @orig: the orig_node which multicast state might have changed of 1736 * @mcast_flags: flags indicating the new multicast state 1737 * 1738 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 1739 * orig, has toggled then this method updates the counter and the list 1740 * accordingly. 1741 * 1742 * Caller needs to hold orig->mcast_handler_lock. 1743 */ 1744 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 1745 struct batadv_orig_node *orig, 1746 u8 mcast_flags) 1747 { 1748 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 1749 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 1750 1751 lockdep_assert_held(&orig->mcast_handler_lock); 1752 1753 /* switched from flag unset to set */ 1754 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1755 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1756 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1757 1758 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1759 /* flag checks above + mcast_handler_lock prevents this */ 1760 WARN_ON(!hlist_unhashed(node)); 1761 1762 hlist_add_head_rcu(node, head); 1763 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1764 /* switched from flag set to unset */ 1765 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1766 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1767 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1768 1769 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1770 /* flag checks above + mcast_handler_lock prevents this */ 1771 WARN_ON(hlist_unhashed(node)); 1772 1773 hlist_del_init_rcu(node); 1774 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1775 } 1776 } 1777 1778 /** 1779 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1780 * @bat_priv: the bat priv with all the soft interface information 1781 * @orig: the orig_node which multicast state might have changed of 1782 * @mcast_flags: flags indicating the new multicast state 1783 * 1784 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1785 * toggled then this method updates the counter and the list accordingly. 1786 * 1787 * Caller needs to hold orig->mcast_handler_lock. 1788 */ 1789 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1790 struct batadv_orig_node *orig, 1791 u8 mcast_flags) 1792 { 1793 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1794 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1795 1796 lockdep_assert_held(&orig->mcast_handler_lock); 1797 1798 /* switched from flag unset to set */ 1799 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1800 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1801 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1802 1803 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1804 /* flag checks above + mcast_handler_lock prevents this */ 1805 WARN_ON(!hlist_unhashed(node)); 1806 1807 hlist_add_head_rcu(node, head); 1808 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1809 /* switched from flag set to unset */ 1810 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1811 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1812 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1813 1814 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1815 /* flag checks above + mcast_handler_lock prevents this */ 1816 WARN_ON(hlist_unhashed(node)); 1817 1818 hlist_del_init_rcu(node); 1819 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1820 } 1821 } 1822 1823 /** 1824 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1825 * @bat_priv: the bat priv with all the soft interface information 1826 * @orig: the orig_node which multicast state might have changed of 1827 * @mcast_flags: flags indicating the new multicast state 1828 * 1829 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1830 * toggled then this method updates the counter and the list accordingly. 1831 * 1832 * Caller needs to hold orig->mcast_handler_lock. 1833 */ 1834 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1835 struct batadv_orig_node *orig, 1836 u8 mcast_flags) 1837 { 1838 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1839 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1840 1841 lockdep_assert_held(&orig->mcast_handler_lock); 1842 1843 /* switched from flag unset to set */ 1844 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1845 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1846 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1847 1848 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1849 /* flag checks above + mcast_handler_lock prevents this */ 1850 WARN_ON(!hlist_unhashed(node)); 1851 1852 hlist_add_head_rcu(node, head); 1853 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1854 /* switched from flag set to unset */ 1855 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1856 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1857 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1858 1859 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1860 /* flag checks above + mcast_handler_lock prevents this */ 1861 WARN_ON(hlist_unhashed(node)); 1862 1863 hlist_del_init_rcu(node); 1864 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1865 } 1866 } 1867 1868 /** 1869 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list 1870 * @bat_priv: the bat priv with all the soft interface information 1871 * @orig: the orig_node which multicast state might have changed of 1872 * @mcast_flags: flags indicating the new multicast state 1873 * 1874 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has 1875 * toggled then this method updates the counter and the list accordingly. 1876 * 1877 * Caller needs to hold orig->mcast_handler_lock. 1878 */ 1879 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, 1880 struct batadv_orig_node *orig, 1881 u8 mcast_flags) 1882 { 1883 struct hlist_node *node = &orig->mcast_want_all_rtr4_node; 1884 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list; 1885 1886 lockdep_assert_held(&orig->mcast_handler_lock); 1887 1888 /* switched from flag set to unset */ 1889 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) && 1890 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) { 1891 atomic_inc(&bat_priv->mcast.num_want_all_rtr4); 1892 1893 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1894 /* flag checks above + mcast_handler_lock prevents this */ 1895 WARN_ON(!hlist_unhashed(node)); 1896 1897 hlist_add_head_rcu(node, head); 1898 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1899 /* switched from flag unset to set */ 1900 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 && 1901 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) { 1902 atomic_dec(&bat_priv->mcast.num_want_all_rtr4); 1903 1904 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1905 /* flag checks above + mcast_handler_lock prevents this */ 1906 WARN_ON(hlist_unhashed(node)); 1907 1908 hlist_del_init_rcu(node); 1909 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1910 } 1911 } 1912 1913 /** 1914 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list 1915 * @bat_priv: the bat priv with all the soft interface information 1916 * @orig: the orig_node which multicast state might have changed of 1917 * @mcast_flags: flags indicating the new multicast state 1918 * 1919 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has 1920 * toggled then this method updates the counter and the list accordingly. 1921 * 1922 * Caller needs to hold orig->mcast_handler_lock. 1923 */ 1924 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv, 1925 struct batadv_orig_node *orig, 1926 u8 mcast_flags) 1927 { 1928 struct hlist_node *node = &orig->mcast_want_all_rtr6_node; 1929 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list; 1930 1931 lockdep_assert_held(&orig->mcast_handler_lock); 1932 1933 /* switched from flag set to unset */ 1934 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) && 1935 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) { 1936 atomic_inc(&bat_priv->mcast.num_want_all_rtr6); 1937 1938 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1939 /* flag checks above + mcast_handler_lock prevents this */ 1940 WARN_ON(!hlist_unhashed(node)); 1941 1942 hlist_add_head_rcu(node, head); 1943 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1944 /* switched from flag unset to set */ 1945 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 && 1946 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) { 1947 atomic_dec(&bat_priv->mcast.num_want_all_rtr6); 1948 1949 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1950 /* flag checks above + mcast_handler_lock prevents this */ 1951 WARN_ON(hlist_unhashed(node)); 1952 1953 hlist_del_init_rcu(node); 1954 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1955 } 1956 } 1957 1958 /** 1959 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV 1960 * @enabled: whether the originator has multicast TVLV support enabled 1961 * @tvlv_value: tvlv buffer containing the multicast flags 1962 * @tvlv_value_len: tvlv buffer length 1963 * 1964 * Return: multicast flags for the given tvlv buffer 1965 */ 1966 static u8 1967 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len) 1968 { 1969 u8 mcast_flags = BATADV_NO_FLAGS; 1970 1971 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags)) 1972 mcast_flags = *(u8 *)tvlv_value; 1973 1974 if (!enabled) { 1975 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4; 1976 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6; 1977 } 1978 1979 /* remove redundant flags to avoid sending duplicate packets later */ 1980 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) 1981 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4; 1982 1983 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) 1984 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6; 1985 1986 return mcast_flags; 1987 } 1988 1989 /** 1990 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 1991 * @bat_priv: the bat priv with all the soft interface information 1992 * @orig: the orig_node of the ogm 1993 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1994 * @tvlv_value: tvlv buffer containing the multicast data 1995 * @tvlv_value_len: tvlv buffer length 1996 */ 1997 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 1998 struct batadv_orig_node *orig, 1999 u8 flags, 2000 void *tvlv_value, 2001 u16 tvlv_value_len) 2002 { 2003 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2004 u8 mcast_flags; 2005 2006 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled, 2007 tvlv_value, tvlv_value_len); 2008 2009 spin_lock_bh(&orig->mcast_handler_lock); 2010 2011 if (orig_mcast_enabled && 2012 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2013 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2014 } else if (!orig_mcast_enabled && 2015 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 2016 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 2017 } 2018 2019 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 2020 2021 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 2022 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 2023 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 2024 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags); 2025 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags); 2026 2027 orig->mcast_flags = mcast_flags; 2028 spin_unlock_bh(&orig->mcast_handler_lock); 2029 } 2030 2031 /** 2032 * batadv_mcast_init() - initialize the multicast optimizations structures 2033 * @bat_priv: the bat priv with all the soft interface information 2034 */ 2035 void batadv_mcast_init(struct batadv_priv *bat_priv) 2036 { 2037 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 2038 NULL, BATADV_TVLV_MCAST, 2, 2039 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 2040 2041 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 2042 batadv_mcast_start_timer(bat_priv); 2043 } 2044 2045 /** 2046 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message 2047 * @msg: buffer for the message 2048 * @bat_priv: the bat priv with all the soft interface information 2049 * 2050 * Return: 0 or error code. 2051 */ 2052 int batadv_mcast_mesh_info_put(struct sk_buff *msg, 2053 struct batadv_priv *bat_priv) 2054 { 2055 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags; 2056 u32 flags_priv = BATADV_NO_FLAGS; 2057 2058 if (bat_priv->mcast.mla_flags.bridged) { 2059 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; 2060 2061 if (bat_priv->mcast.mla_flags.querier_ipv4.exists) 2062 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; 2063 if (bat_priv->mcast.mla_flags.querier_ipv6.exists) 2064 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; 2065 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing) 2066 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; 2067 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing) 2068 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; 2069 } 2070 2071 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || 2072 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) 2073 return -EMSGSIZE; 2074 2075 return 0; 2076 } 2077 2078 /** 2079 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table 2080 * to a netlink socket 2081 * @msg: buffer for the message 2082 * @portid: netlink port 2083 * @cb: Control block containing additional options 2084 * @orig_node: originator to dump the multicast flags of 2085 * 2086 * Return: 0 or error code. 2087 */ 2088 static int 2089 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, 2090 struct netlink_callback *cb, 2091 struct batadv_orig_node *orig_node) 2092 { 2093 void *hdr; 2094 2095 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, 2096 &batadv_netlink_family, NLM_F_MULTI, 2097 BATADV_CMD_GET_MCAST_FLAGS); 2098 if (!hdr) 2099 return -ENOBUFS; 2100 2101 genl_dump_check_consistent(cb, hdr); 2102 2103 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, 2104 orig_node->orig)) { 2105 genlmsg_cancel(msg, hdr); 2106 return -EMSGSIZE; 2107 } 2108 2109 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2110 &orig_node->capabilities)) { 2111 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, 2112 orig_node->mcast_flags)) { 2113 genlmsg_cancel(msg, hdr); 2114 return -EMSGSIZE; 2115 } 2116 } 2117 2118 genlmsg_end(msg, hdr); 2119 return 0; 2120 } 2121 2122 /** 2123 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags 2124 * table to a netlink socket 2125 * @msg: buffer for the message 2126 * @portid: netlink port 2127 * @cb: Control block containing additional options 2128 * @hash: hash to dump 2129 * @bucket: bucket index to dump 2130 * @idx_skip: How many entries to skip 2131 * 2132 * Return: 0 or error code. 2133 */ 2134 static int 2135 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, 2136 struct netlink_callback *cb, 2137 struct batadv_hashtable *hash, 2138 unsigned int bucket, long *idx_skip) 2139 { 2140 struct batadv_orig_node *orig_node; 2141 long idx = 0; 2142 2143 spin_lock_bh(&hash->list_locks[bucket]); 2144 cb->seq = atomic_read(&hash->generation) << 1 | 1; 2145 2146 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { 2147 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2148 &orig_node->capa_initialized)) 2149 continue; 2150 2151 if (idx < *idx_skip) 2152 goto skip; 2153 2154 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) { 2155 spin_unlock_bh(&hash->list_locks[bucket]); 2156 *idx_skip = idx; 2157 2158 return -EMSGSIZE; 2159 } 2160 2161 skip: 2162 idx++; 2163 } 2164 spin_unlock_bh(&hash->list_locks[bucket]); 2165 2166 return 0; 2167 } 2168 2169 /** 2170 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2171 * @msg: buffer for the message 2172 * @portid: netlink port 2173 * @cb: Control block containing additional options 2174 * @bat_priv: the bat priv with all the soft interface information 2175 * @bucket: current bucket to dump 2176 * @idx: index in current bucket to the next entry to dump 2177 * 2178 * Return: 0 or error code. 2179 */ 2180 static int 2181 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, 2182 struct netlink_callback *cb, 2183 struct batadv_priv *bat_priv, long *bucket, long *idx) 2184 { 2185 struct batadv_hashtable *hash = bat_priv->orig_hash; 2186 long bucket_tmp = *bucket; 2187 long idx_tmp = *idx; 2188 2189 while (bucket_tmp < hash->size) { 2190 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2191 bucket_tmp, &idx_tmp)) 2192 break; 2193 2194 bucket_tmp++; 2195 idx_tmp = 0; 2196 } 2197 2198 *bucket = bucket_tmp; 2199 *idx = idx_tmp; 2200 2201 return msg->len; 2202 } 2203 2204 /** 2205 * batadv_mcast_netlink_get_primary() - get primary interface from netlink 2206 * callback 2207 * @cb: netlink callback structure 2208 * @primary_if: the primary interface pointer to return the result in 2209 * 2210 * Return: 0 or error code. 2211 */ 2212 static int 2213 batadv_mcast_netlink_get_primary(struct netlink_callback *cb, 2214 struct batadv_hard_iface **primary_if) 2215 { 2216 struct batadv_hard_iface *hard_iface = NULL; 2217 struct net *net = sock_net(cb->skb->sk); 2218 struct net_device *soft_iface; 2219 struct batadv_priv *bat_priv; 2220 int ifindex; 2221 int ret = 0; 2222 2223 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); 2224 if (!ifindex) 2225 return -EINVAL; 2226 2227 soft_iface = dev_get_by_index(net, ifindex); 2228 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 2229 ret = -ENODEV; 2230 goto out; 2231 } 2232 2233 bat_priv = netdev_priv(soft_iface); 2234 2235 hard_iface = batadv_primary_if_get_selected(bat_priv); 2236 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { 2237 ret = -ENOENT; 2238 goto out; 2239 } 2240 2241 out: 2242 if (soft_iface) 2243 dev_put(soft_iface); 2244 2245 if (!ret && primary_if) 2246 *primary_if = hard_iface; 2247 else if (hard_iface) 2248 batadv_hardif_put(hard_iface); 2249 2250 return ret; 2251 } 2252 2253 /** 2254 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2255 * @msg: buffer for the message 2256 * @cb: callback structure containing arguments 2257 * 2258 * Return: message length. 2259 */ 2260 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) 2261 { 2262 struct batadv_hard_iface *primary_if = NULL; 2263 int portid = NETLINK_CB(cb->skb).portid; 2264 struct batadv_priv *bat_priv; 2265 long *bucket = &cb->args[0]; 2266 long *idx = &cb->args[1]; 2267 int ret; 2268 2269 ret = batadv_mcast_netlink_get_primary(cb, &primary_if); 2270 if (ret) 2271 return ret; 2272 2273 bat_priv = netdev_priv(primary_if->soft_iface); 2274 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); 2275 2276 batadv_hardif_put(primary_if); 2277 return ret; 2278 } 2279 2280 /** 2281 * batadv_mcast_free() - free the multicast optimizations structures 2282 * @bat_priv: the bat priv with all the soft interface information 2283 */ 2284 void batadv_mcast_free(struct batadv_priv *bat_priv) 2285 { 2286 cancel_delayed_work_sync(&bat_priv->mcast.work); 2287 2288 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2289 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2290 2291 /* safely calling outside of worker, as worker was canceled above */ 2292 batadv_mcast_mla_tt_retract(bat_priv, NULL); 2293 } 2294 2295 /** 2296 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 2297 * @orig: the originator which is going to get purged 2298 */ 2299 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 2300 { 2301 struct batadv_priv *bat_priv = orig->bat_priv; 2302 2303 spin_lock_bh(&orig->mcast_handler_lock); 2304 2305 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2306 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2307 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2308 batadv_mcast_want_rtr4_update(bat_priv, orig, 2309 BATADV_MCAST_WANT_NO_RTR4); 2310 batadv_mcast_want_rtr6_update(bat_priv, orig, 2311 BATADV_MCAST_WANT_NO_RTR6); 2312 2313 spin_unlock_bh(&orig->mcast_handler_lock); 2314 } 2315