1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <net/ip.h> 29 #if IS_ENABLED(CONFIG_IPV6) 30 #include <net/ipv6.h> 31 #include <net/mld.h> 32 #include <net/ip6_checksum.h> 33 #include <net/addrconf.h> 34 #endif 35 36 #include "br_private.h" 37 38 static void br_multicast_start_querier(struct net_bridge *br, 39 struct bridge_mcast_own_query *query); 40 static void br_multicast_add_router(struct net_bridge *br, 41 struct net_bridge_port *port); 42 static void br_ip4_multicast_leave_group(struct net_bridge *br, 43 struct net_bridge_port *port, 44 __be32 group, 45 __u16 vid); 46 #if IS_ENABLED(CONFIG_IPV6) 47 static void br_ip6_multicast_leave_group(struct net_bridge *br, 48 struct net_bridge_port *port, 49 const struct in6_addr *group, 50 __u16 vid); 51 #endif 52 unsigned int br_mdb_rehash_seq; 53 54 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 55 { 56 if (a->proto != b->proto) 57 return 0; 58 if (a->vid != b->vid) 59 return 0; 60 switch (a->proto) { 61 case htons(ETH_P_IP): 62 return a->u.ip4 == b->u.ip4; 63 #if IS_ENABLED(CONFIG_IPV6) 64 case htons(ETH_P_IPV6): 65 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 66 #endif 67 } 68 return 0; 69 } 70 71 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 72 __u16 vid) 73 { 74 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 75 } 76 77 #if IS_ENABLED(CONFIG_IPV6) 78 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 79 const struct in6_addr *ip, 80 __u16 vid) 81 { 82 return jhash_2words(ipv6_addr_hash(ip), vid, 83 mdb->secret) & (mdb->max - 1); 84 } 85 #endif 86 87 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 88 struct br_ip *ip) 89 { 90 switch (ip->proto) { 91 case htons(ETH_P_IP): 92 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 93 #if IS_ENABLED(CONFIG_IPV6) 94 case htons(ETH_P_IPV6): 95 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 96 #endif 97 } 98 return 0; 99 } 100 101 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 102 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 103 { 104 struct net_bridge_mdb_entry *mp; 105 106 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 107 if (br_ip_equal(&mp->addr, dst)) 108 return mp; 109 } 110 111 return NULL; 112 } 113 114 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 115 struct br_ip *dst) 116 { 117 if (!mdb) 118 return NULL; 119 120 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 121 } 122 123 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 124 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 125 { 126 struct br_ip br_dst; 127 128 br_dst.u.ip4 = dst; 129 br_dst.proto = htons(ETH_P_IP); 130 br_dst.vid = vid; 131 132 return br_mdb_ip_get(mdb, &br_dst); 133 } 134 135 #if IS_ENABLED(CONFIG_IPV6) 136 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 137 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 138 __u16 vid) 139 { 140 struct br_ip br_dst; 141 142 br_dst.u.ip6 = *dst; 143 br_dst.proto = htons(ETH_P_IPV6); 144 br_dst.vid = vid; 145 146 return br_mdb_ip_get(mdb, &br_dst); 147 } 148 #endif 149 150 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 151 struct sk_buff *skb, u16 vid) 152 { 153 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 154 struct br_ip ip; 155 156 if (br->multicast_disabled) 157 return NULL; 158 159 if (BR_INPUT_SKB_CB(skb)->igmp) 160 return NULL; 161 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.u.ip4 = ip_hdr(skb)->daddr; 168 break; 169 #if IS_ENABLED(CONFIG_IPV6) 170 case htons(ETH_P_IPV6): 171 ip.u.ip6 = ipv6_hdr(skb)->daddr; 172 break; 173 #endif 174 default: 175 return NULL; 176 } 177 178 return br_mdb_ip_get(mdb, &ip); 179 } 180 181 static void br_mdb_free(struct rcu_head *head) 182 { 183 struct net_bridge_mdb_htable *mdb = 184 container_of(head, struct net_bridge_mdb_htable, rcu); 185 struct net_bridge_mdb_htable *old = mdb->old; 186 187 mdb->old = NULL; 188 kfree(old->mhash); 189 kfree(old); 190 } 191 192 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 193 struct net_bridge_mdb_htable *old, 194 int elasticity) 195 { 196 struct net_bridge_mdb_entry *mp; 197 int maxlen; 198 int len; 199 int i; 200 201 for (i = 0; i < old->max; i++) 202 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 203 hlist_add_head(&mp->hlist[new->ver], 204 &new->mhash[br_ip_hash(new, &mp->addr)]); 205 206 if (!elasticity) 207 return 0; 208 209 maxlen = 0; 210 for (i = 0; i < new->max; i++) { 211 len = 0; 212 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 213 len++; 214 if (len > maxlen) 215 maxlen = len; 216 } 217 218 return maxlen > elasticity ? -EINVAL : 0; 219 } 220 221 void br_multicast_free_pg(struct rcu_head *head) 222 { 223 struct net_bridge_port_group *p = 224 container_of(head, struct net_bridge_port_group, rcu); 225 226 kfree(p); 227 } 228 229 static void br_multicast_free_group(struct rcu_head *head) 230 { 231 struct net_bridge_mdb_entry *mp = 232 container_of(head, struct net_bridge_mdb_entry, rcu); 233 234 kfree(mp); 235 } 236 237 static void br_multicast_group_expired(unsigned long data) 238 { 239 struct net_bridge_mdb_entry *mp = (void *)data; 240 struct net_bridge *br = mp->br; 241 struct net_bridge_mdb_htable *mdb; 242 243 spin_lock(&br->multicast_lock); 244 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 245 goto out; 246 247 mp->mglist = false; 248 249 if (mp->ports) 250 goto out; 251 252 mdb = mlock_dereference(br->mdb, br); 253 254 hlist_del_rcu(&mp->hlist[mdb->ver]); 255 mdb->size--; 256 257 call_rcu_bh(&mp->rcu, br_multicast_free_group); 258 259 out: 260 spin_unlock(&br->multicast_lock); 261 } 262 263 static void br_multicast_del_pg(struct net_bridge *br, 264 struct net_bridge_port_group *pg) 265 { 266 struct net_bridge_mdb_htable *mdb; 267 struct net_bridge_mdb_entry *mp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_port_group __rcu **pp; 270 271 mdb = mlock_dereference(br->mdb, br); 272 273 mp = br_mdb_ip_get(mdb, &pg->addr); 274 if (WARN_ON(!mp)) 275 return; 276 277 for (pp = &mp->ports; 278 (p = mlock_dereference(*pp, br)) != NULL; 279 pp = &p->next) { 280 if (p != pg) 281 continue; 282 283 rcu_assign_pointer(*pp, p->next); 284 hlist_del_init(&p->mglist); 285 del_timer(&p->timer); 286 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 287 p->flags); 288 call_rcu_bh(&p->rcu, br_multicast_free_pg); 289 290 if (!mp->ports && !mp->mglist && 291 netif_running(br->dev)) 292 mod_timer(&mp->timer, jiffies); 293 294 return; 295 } 296 297 WARN_ON(1); 298 } 299 300 static void br_multicast_port_group_expired(unsigned long data) 301 { 302 struct net_bridge_port_group *pg = (void *)data; 303 struct net_bridge *br = pg->port->br; 304 305 spin_lock(&br->multicast_lock); 306 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 307 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 308 goto out; 309 310 br_multicast_del_pg(br, pg); 311 312 out: 313 spin_unlock(&br->multicast_lock); 314 } 315 316 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 317 int elasticity) 318 { 319 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 320 struct net_bridge_mdb_htable *mdb; 321 int err; 322 323 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 324 if (!mdb) 325 return -ENOMEM; 326 327 mdb->max = max; 328 mdb->old = old; 329 330 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 331 if (!mdb->mhash) { 332 kfree(mdb); 333 return -ENOMEM; 334 } 335 336 mdb->size = old ? old->size : 0; 337 mdb->ver = old ? old->ver ^ 1 : 0; 338 339 if (!old || elasticity) 340 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 341 else 342 mdb->secret = old->secret; 343 344 if (!old) 345 goto out; 346 347 err = br_mdb_copy(mdb, old, elasticity); 348 if (err) { 349 kfree(mdb->mhash); 350 kfree(mdb); 351 return err; 352 } 353 354 br_mdb_rehash_seq++; 355 call_rcu_bh(&mdb->rcu, br_mdb_free); 356 357 out: 358 rcu_assign_pointer(*mdbp, mdb); 359 360 return 0; 361 } 362 363 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 364 __be32 group) 365 { 366 struct sk_buff *skb; 367 struct igmphdr *ih; 368 struct ethhdr *eth; 369 struct iphdr *iph; 370 371 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 372 sizeof(*ih) + 4); 373 if (!skb) 374 goto out; 375 376 skb->protocol = htons(ETH_P_IP); 377 378 skb_reset_mac_header(skb); 379 eth = eth_hdr(skb); 380 381 ether_addr_copy(eth->h_source, br->dev->dev_addr); 382 eth->h_dest[0] = 1; 383 eth->h_dest[1] = 0; 384 eth->h_dest[2] = 0x5e; 385 eth->h_dest[3] = 0; 386 eth->h_dest[4] = 0; 387 eth->h_dest[5] = 1; 388 eth->h_proto = htons(ETH_P_IP); 389 skb_put(skb, sizeof(*eth)); 390 391 skb_set_network_header(skb, skb->len); 392 iph = ip_hdr(skb); 393 394 iph->version = 4; 395 iph->ihl = 6; 396 iph->tos = 0xc0; 397 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 398 iph->id = 0; 399 iph->frag_off = htons(IP_DF); 400 iph->ttl = 1; 401 iph->protocol = IPPROTO_IGMP; 402 iph->saddr = br->multicast_query_use_ifaddr ? 403 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 404 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 405 ((u8 *)&iph[1])[0] = IPOPT_RA; 406 ((u8 *)&iph[1])[1] = 4; 407 ((u8 *)&iph[1])[2] = 0; 408 ((u8 *)&iph[1])[3] = 0; 409 ip_send_check(iph); 410 skb_put(skb, 24); 411 412 skb_set_transport_header(skb, skb->len); 413 ih = igmp_hdr(skb); 414 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 415 ih->code = (group ? br->multicast_last_member_interval : 416 br->multicast_query_response_interval) / 417 (HZ / IGMP_TIMER_SCALE); 418 ih->group = group; 419 ih->csum = 0; 420 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 421 skb_put(skb, sizeof(*ih)); 422 423 __skb_pull(skb, sizeof(*eth)); 424 425 out: 426 return skb; 427 } 428 429 #if IS_ENABLED(CONFIG_IPV6) 430 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 431 const struct in6_addr *group) 432 { 433 struct sk_buff *skb; 434 struct ipv6hdr *ip6h; 435 struct mld_msg *mldq; 436 struct ethhdr *eth; 437 u8 *hopopt; 438 unsigned long interval; 439 440 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 441 8 + sizeof(*mldq)); 442 if (!skb) 443 goto out; 444 445 skb->protocol = htons(ETH_P_IPV6); 446 447 /* Ethernet header */ 448 skb_reset_mac_header(skb); 449 eth = eth_hdr(skb); 450 451 ether_addr_copy(eth->h_source, br->dev->dev_addr); 452 eth->h_proto = htons(ETH_P_IPV6); 453 skb_put(skb, sizeof(*eth)); 454 455 /* IPv6 header + HbH option */ 456 skb_set_network_header(skb, skb->len); 457 ip6h = ipv6_hdr(skb); 458 459 *(__force __be32 *)ip6h = htonl(0x60000000); 460 ip6h->payload_len = htons(8 + sizeof(*mldq)); 461 ip6h->nexthdr = IPPROTO_HOPOPTS; 462 ip6h->hop_limit = 1; 463 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 464 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 465 &ip6h->saddr)) { 466 kfree_skb(skb); 467 br->has_ipv6_addr = 0; 468 return NULL; 469 } 470 471 br->has_ipv6_addr = 1; 472 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 473 474 hopopt = (u8 *)(ip6h + 1); 475 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 476 hopopt[1] = 0; /* length of HbH */ 477 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 478 hopopt[3] = 2; /* Length of RA Option */ 479 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 480 hopopt[5] = 0; 481 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 482 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 483 484 skb_put(skb, sizeof(*ip6h) + 8); 485 486 /* ICMPv6 */ 487 skb_set_transport_header(skb, skb->len); 488 mldq = (struct mld_msg *) icmp6_hdr(skb); 489 490 interval = ipv6_addr_any(group) ? 491 br->multicast_query_response_interval : 492 br->multicast_last_member_interval; 493 494 mldq->mld_type = ICMPV6_MGM_QUERY; 495 mldq->mld_code = 0; 496 mldq->mld_cksum = 0; 497 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 498 mldq->mld_reserved = 0; 499 mldq->mld_mca = *group; 500 501 /* checksum */ 502 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 503 sizeof(*mldq), IPPROTO_ICMPV6, 504 csum_partial(mldq, 505 sizeof(*mldq), 0)); 506 skb_put(skb, sizeof(*mldq)); 507 508 __skb_pull(skb, sizeof(*eth)); 509 510 out: 511 return skb; 512 } 513 #endif 514 515 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 516 struct br_ip *addr) 517 { 518 switch (addr->proto) { 519 case htons(ETH_P_IP): 520 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 521 #if IS_ENABLED(CONFIG_IPV6) 522 case htons(ETH_P_IPV6): 523 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 524 #endif 525 } 526 return NULL; 527 } 528 529 static struct net_bridge_mdb_entry *br_multicast_get_group( 530 struct net_bridge *br, struct net_bridge_port *port, 531 struct br_ip *group, int hash) 532 { 533 struct net_bridge_mdb_htable *mdb; 534 struct net_bridge_mdb_entry *mp; 535 unsigned int count = 0; 536 unsigned int max; 537 int elasticity; 538 int err; 539 540 mdb = rcu_dereference_protected(br->mdb, 1); 541 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 542 count++; 543 if (unlikely(br_ip_equal(group, &mp->addr))) 544 return mp; 545 } 546 547 elasticity = 0; 548 max = mdb->max; 549 550 if (unlikely(count > br->hash_elasticity && count)) { 551 if (net_ratelimit()) 552 br_info(br, "Multicast hash table " 553 "chain limit reached: %s\n", 554 port ? port->dev->name : br->dev->name); 555 556 elasticity = br->hash_elasticity; 557 } 558 559 if (mdb->size >= max) { 560 max *= 2; 561 if (unlikely(max > br->hash_max)) { 562 br_warn(br, "Multicast hash table maximum of %d " 563 "reached, disabling snooping: %s\n", 564 br->hash_max, 565 port ? port->dev->name : br->dev->name); 566 err = -E2BIG; 567 disable: 568 br->multicast_disabled = 1; 569 goto err; 570 } 571 } 572 573 if (max > mdb->max || elasticity) { 574 if (mdb->old) { 575 if (net_ratelimit()) 576 br_info(br, "Multicast hash table " 577 "on fire: %s\n", 578 port ? port->dev->name : br->dev->name); 579 err = -EEXIST; 580 goto err; 581 } 582 583 err = br_mdb_rehash(&br->mdb, max, elasticity); 584 if (err) { 585 br_warn(br, "Cannot rehash multicast " 586 "hash table, disabling snooping: %s, %d, %d\n", 587 port ? port->dev->name : br->dev->name, 588 mdb->size, err); 589 goto disable; 590 } 591 592 err = -EAGAIN; 593 goto err; 594 } 595 596 return NULL; 597 598 err: 599 mp = ERR_PTR(err); 600 return mp; 601 } 602 603 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 604 struct net_bridge_port *port, struct br_ip *group) 605 { 606 struct net_bridge_mdb_htable *mdb; 607 struct net_bridge_mdb_entry *mp; 608 int hash; 609 int err; 610 611 mdb = rcu_dereference_protected(br->mdb, 1); 612 if (!mdb) { 613 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 614 if (err) 615 return ERR_PTR(err); 616 goto rehash; 617 } 618 619 hash = br_ip_hash(mdb, group); 620 mp = br_multicast_get_group(br, port, group, hash); 621 switch (PTR_ERR(mp)) { 622 case 0: 623 break; 624 625 case -EAGAIN: 626 rehash: 627 mdb = rcu_dereference_protected(br->mdb, 1); 628 hash = br_ip_hash(mdb, group); 629 break; 630 631 default: 632 goto out; 633 } 634 635 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 636 if (unlikely(!mp)) 637 return ERR_PTR(-ENOMEM); 638 639 mp->br = br; 640 mp->addr = *group; 641 setup_timer(&mp->timer, br_multicast_group_expired, 642 (unsigned long)mp); 643 644 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 645 mdb->size++; 646 647 out: 648 return mp; 649 } 650 651 struct net_bridge_port_group *br_multicast_new_port_group( 652 struct net_bridge_port *port, 653 struct br_ip *group, 654 struct net_bridge_port_group __rcu *next, 655 unsigned char flags) 656 { 657 struct net_bridge_port_group *p; 658 659 p = kzalloc(sizeof(*p), GFP_ATOMIC); 660 if (unlikely(!p)) 661 return NULL; 662 663 p->addr = *group; 664 p->port = port; 665 p->flags = flags; 666 rcu_assign_pointer(p->next, next); 667 hlist_add_head(&p->mglist, &port->mglist); 668 setup_timer(&p->timer, br_multicast_port_group_expired, 669 (unsigned long)p); 670 return p; 671 } 672 673 static int br_multicast_add_group(struct net_bridge *br, 674 struct net_bridge_port *port, 675 struct br_ip *group) 676 { 677 struct net_bridge_mdb_entry *mp; 678 struct net_bridge_port_group *p; 679 struct net_bridge_port_group __rcu **pp; 680 unsigned long now = jiffies; 681 int err; 682 683 spin_lock(&br->multicast_lock); 684 if (!netif_running(br->dev) || 685 (port && port->state == BR_STATE_DISABLED)) 686 goto out; 687 688 mp = br_multicast_new_group(br, port, group); 689 err = PTR_ERR(mp); 690 if (IS_ERR(mp)) 691 goto err; 692 693 if (!port) { 694 mp->mglist = true; 695 mod_timer(&mp->timer, now + br->multicast_membership_interval); 696 goto out; 697 } 698 699 for (pp = &mp->ports; 700 (p = mlock_dereference(*pp, br)) != NULL; 701 pp = &p->next) { 702 if (p->port == port) 703 goto found; 704 if ((unsigned long)p->port < (unsigned long)port) 705 break; 706 } 707 708 p = br_multicast_new_port_group(port, group, *pp, 0); 709 if (unlikely(!p)) 710 goto err; 711 rcu_assign_pointer(*pp, p); 712 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 713 714 found: 715 mod_timer(&p->timer, now + br->multicast_membership_interval); 716 out: 717 err = 0; 718 719 err: 720 spin_unlock(&br->multicast_lock); 721 return err; 722 } 723 724 static int br_ip4_multicast_add_group(struct net_bridge *br, 725 struct net_bridge_port *port, 726 __be32 group, 727 __u16 vid) 728 { 729 struct br_ip br_group; 730 731 if (ipv4_is_local_multicast(group)) 732 return 0; 733 734 br_group.u.ip4 = group; 735 br_group.proto = htons(ETH_P_IP); 736 br_group.vid = vid; 737 738 return br_multicast_add_group(br, port, &br_group); 739 } 740 741 #if IS_ENABLED(CONFIG_IPV6) 742 static int br_ip6_multicast_add_group(struct net_bridge *br, 743 struct net_bridge_port *port, 744 const struct in6_addr *group, 745 __u16 vid) 746 { 747 struct br_ip br_group; 748 749 if (ipv6_addr_is_ll_all_nodes(group)) 750 return 0; 751 752 br_group.u.ip6 = *group; 753 br_group.proto = htons(ETH_P_IPV6); 754 br_group.vid = vid; 755 756 return br_multicast_add_group(br, port, &br_group); 757 } 758 #endif 759 760 static void br_multicast_router_expired(unsigned long data) 761 { 762 struct net_bridge_port *port = (void *)data; 763 struct net_bridge *br = port->br; 764 765 spin_lock(&br->multicast_lock); 766 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 767 port->multicast_router == MDB_RTR_TYPE_PERM || 768 timer_pending(&port->multicast_router_timer) || 769 hlist_unhashed(&port->rlist)) 770 goto out; 771 772 hlist_del_init_rcu(&port->rlist); 773 br_rtr_notify(br->dev, port, RTM_DELMDB); 774 /* Don't allow timer refresh if the router expired */ 775 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 776 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 777 778 out: 779 spin_unlock(&br->multicast_lock); 780 } 781 782 static void br_multicast_local_router_expired(unsigned long data) 783 { 784 } 785 786 static void br_multicast_querier_expired(struct net_bridge *br, 787 struct bridge_mcast_own_query *query) 788 { 789 spin_lock(&br->multicast_lock); 790 if (!netif_running(br->dev) || br->multicast_disabled) 791 goto out; 792 793 br_multicast_start_querier(br, query); 794 795 out: 796 spin_unlock(&br->multicast_lock); 797 } 798 799 static void br_ip4_multicast_querier_expired(unsigned long data) 800 { 801 struct net_bridge *br = (void *)data; 802 803 br_multicast_querier_expired(br, &br->ip4_own_query); 804 } 805 806 #if IS_ENABLED(CONFIG_IPV6) 807 static void br_ip6_multicast_querier_expired(unsigned long data) 808 { 809 struct net_bridge *br = (void *)data; 810 811 br_multicast_querier_expired(br, &br->ip6_own_query); 812 } 813 #endif 814 815 static void br_multicast_select_own_querier(struct net_bridge *br, 816 struct br_ip *ip, 817 struct sk_buff *skb) 818 { 819 if (ip->proto == htons(ETH_P_IP)) 820 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 821 #if IS_ENABLED(CONFIG_IPV6) 822 else 823 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 824 #endif 825 } 826 827 static void __br_multicast_send_query(struct net_bridge *br, 828 struct net_bridge_port *port, 829 struct br_ip *ip) 830 { 831 struct sk_buff *skb; 832 833 skb = br_multicast_alloc_query(br, ip); 834 if (!skb) 835 return; 836 837 if (port) { 838 skb->dev = port->dev; 839 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 840 dev_net(port->dev), NULL, skb, NULL, skb->dev, 841 br_dev_queue_push_xmit); 842 } else { 843 br_multicast_select_own_querier(br, ip, skb); 844 netif_rx(skb); 845 } 846 } 847 848 static void br_multicast_send_query(struct net_bridge *br, 849 struct net_bridge_port *port, 850 struct bridge_mcast_own_query *own_query) 851 { 852 unsigned long time; 853 struct br_ip br_group; 854 struct bridge_mcast_other_query *other_query = NULL; 855 856 if (!netif_running(br->dev) || br->multicast_disabled || 857 !br->multicast_querier) 858 return; 859 860 memset(&br_group.u, 0, sizeof(br_group.u)); 861 862 if (port ? (own_query == &port->ip4_own_query) : 863 (own_query == &br->ip4_own_query)) { 864 other_query = &br->ip4_other_query; 865 br_group.proto = htons(ETH_P_IP); 866 #if IS_ENABLED(CONFIG_IPV6) 867 } else { 868 other_query = &br->ip6_other_query; 869 br_group.proto = htons(ETH_P_IPV6); 870 #endif 871 } 872 873 if (!other_query || timer_pending(&other_query->timer)) 874 return; 875 876 __br_multicast_send_query(br, port, &br_group); 877 878 time = jiffies; 879 time += own_query->startup_sent < br->multicast_startup_query_count ? 880 br->multicast_startup_query_interval : 881 br->multicast_query_interval; 882 mod_timer(&own_query->timer, time); 883 } 884 885 static void 886 br_multicast_port_query_expired(struct net_bridge_port *port, 887 struct bridge_mcast_own_query *query) 888 { 889 struct net_bridge *br = port->br; 890 891 spin_lock(&br->multicast_lock); 892 if (port->state == BR_STATE_DISABLED || 893 port->state == BR_STATE_BLOCKING) 894 goto out; 895 896 if (query->startup_sent < br->multicast_startup_query_count) 897 query->startup_sent++; 898 899 br_multicast_send_query(port->br, port, query); 900 901 out: 902 spin_unlock(&br->multicast_lock); 903 } 904 905 static void br_ip4_multicast_port_query_expired(unsigned long data) 906 { 907 struct net_bridge_port *port = (void *)data; 908 909 br_multicast_port_query_expired(port, &port->ip4_own_query); 910 } 911 912 #if IS_ENABLED(CONFIG_IPV6) 913 static void br_ip6_multicast_port_query_expired(unsigned long data) 914 { 915 struct net_bridge_port *port = (void *)data; 916 917 br_multicast_port_query_expired(port, &port->ip6_own_query); 918 } 919 #endif 920 921 void br_multicast_add_port(struct net_bridge_port *port) 922 { 923 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 924 925 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 926 (unsigned long)port); 927 setup_timer(&port->ip4_own_query.timer, 928 br_ip4_multicast_port_query_expired, (unsigned long)port); 929 #if IS_ENABLED(CONFIG_IPV6) 930 setup_timer(&port->ip6_own_query.timer, 931 br_ip6_multicast_port_query_expired, (unsigned long)port); 932 #endif 933 } 934 935 void br_multicast_del_port(struct net_bridge_port *port) 936 { 937 struct net_bridge *br = port->br; 938 struct net_bridge_port_group *pg; 939 struct hlist_node *n; 940 941 /* Take care of the remaining groups, only perm ones should be left */ 942 spin_lock_bh(&br->multicast_lock); 943 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 944 br_multicast_del_pg(br, pg); 945 spin_unlock_bh(&br->multicast_lock); 946 del_timer_sync(&port->multicast_router_timer); 947 } 948 949 static void br_multicast_enable(struct bridge_mcast_own_query *query) 950 { 951 query->startup_sent = 0; 952 953 if (try_to_del_timer_sync(&query->timer) >= 0 || 954 del_timer(&query->timer)) 955 mod_timer(&query->timer, jiffies); 956 } 957 958 void br_multicast_enable_port(struct net_bridge_port *port) 959 { 960 struct net_bridge *br = port->br; 961 962 spin_lock(&br->multicast_lock); 963 if (br->multicast_disabled || !netif_running(br->dev)) 964 goto out; 965 966 br_multicast_enable(&port->ip4_own_query); 967 #if IS_ENABLED(CONFIG_IPV6) 968 br_multicast_enable(&port->ip6_own_query); 969 #endif 970 if (port->multicast_router == MDB_RTR_TYPE_PERM && 971 hlist_unhashed(&port->rlist)) 972 br_multicast_add_router(br, port); 973 974 out: 975 spin_unlock(&br->multicast_lock); 976 } 977 978 void br_multicast_disable_port(struct net_bridge_port *port) 979 { 980 struct net_bridge *br = port->br; 981 struct net_bridge_port_group *pg; 982 struct hlist_node *n; 983 984 spin_lock(&br->multicast_lock); 985 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 986 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 987 br_multicast_del_pg(br, pg); 988 989 if (!hlist_unhashed(&port->rlist)) { 990 hlist_del_init_rcu(&port->rlist); 991 br_rtr_notify(br->dev, port, RTM_DELMDB); 992 /* Don't allow timer refresh if disabling */ 993 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 994 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 995 } 996 del_timer(&port->multicast_router_timer); 997 del_timer(&port->ip4_own_query.timer); 998 #if IS_ENABLED(CONFIG_IPV6) 999 del_timer(&port->ip6_own_query.timer); 1000 #endif 1001 spin_unlock(&br->multicast_lock); 1002 } 1003 1004 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1005 struct net_bridge_port *port, 1006 struct sk_buff *skb, 1007 u16 vid) 1008 { 1009 struct igmpv3_report *ih; 1010 struct igmpv3_grec *grec; 1011 int i; 1012 int len; 1013 int num; 1014 int type; 1015 int err = 0; 1016 __be32 group; 1017 1018 ih = igmpv3_report_hdr(skb); 1019 num = ntohs(ih->ngrec); 1020 len = skb_transport_offset(skb) + sizeof(*ih); 1021 1022 for (i = 0; i < num; i++) { 1023 len += sizeof(*grec); 1024 if (!pskb_may_pull(skb, len)) 1025 return -EINVAL; 1026 1027 grec = (void *)(skb->data + len - sizeof(*grec)); 1028 group = grec->grec_mca; 1029 type = grec->grec_type; 1030 1031 len += ntohs(grec->grec_nsrcs) * 4; 1032 if (!pskb_may_pull(skb, len)) 1033 return -EINVAL; 1034 1035 /* We treat this as an IGMPv2 report for now. */ 1036 switch (type) { 1037 case IGMPV3_MODE_IS_INCLUDE: 1038 case IGMPV3_MODE_IS_EXCLUDE: 1039 case IGMPV3_CHANGE_TO_INCLUDE: 1040 case IGMPV3_CHANGE_TO_EXCLUDE: 1041 case IGMPV3_ALLOW_NEW_SOURCES: 1042 case IGMPV3_BLOCK_OLD_SOURCES: 1043 break; 1044 1045 default: 1046 continue; 1047 } 1048 1049 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1050 type == IGMPV3_MODE_IS_INCLUDE) && 1051 ntohs(grec->grec_nsrcs) == 0) { 1052 br_ip4_multicast_leave_group(br, port, group, vid); 1053 } else { 1054 err = br_ip4_multicast_add_group(br, port, group, vid); 1055 if (err) 1056 break; 1057 } 1058 } 1059 1060 return err; 1061 } 1062 1063 #if IS_ENABLED(CONFIG_IPV6) 1064 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1065 struct net_bridge_port *port, 1066 struct sk_buff *skb, 1067 u16 vid) 1068 { 1069 struct icmp6hdr *icmp6h; 1070 struct mld2_grec *grec; 1071 int i; 1072 int len; 1073 int num; 1074 int err = 0; 1075 1076 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1077 return -EINVAL; 1078 1079 icmp6h = icmp6_hdr(skb); 1080 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1081 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1082 1083 for (i = 0; i < num; i++) { 1084 __be16 *nsrcs, _nsrcs; 1085 1086 nsrcs = skb_header_pointer(skb, 1087 len + offsetof(struct mld2_grec, 1088 grec_nsrcs), 1089 sizeof(_nsrcs), &_nsrcs); 1090 if (!nsrcs) 1091 return -EINVAL; 1092 1093 if (!pskb_may_pull(skb, 1094 len + sizeof(*grec) + 1095 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1096 return -EINVAL; 1097 1098 grec = (struct mld2_grec *)(skb->data + len); 1099 len += sizeof(*grec) + 1100 sizeof(struct in6_addr) * ntohs(*nsrcs); 1101 1102 /* We treat these as MLDv1 reports for now. */ 1103 switch (grec->grec_type) { 1104 case MLD2_MODE_IS_INCLUDE: 1105 case MLD2_MODE_IS_EXCLUDE: 1106 case MLD2_CHANGE_TO_INCLUDE: 1107 case MLD2_CHANGE_TO_EXCLUDE: 1108 case MLD2_ALLOW_NEW_SOURCES: 1109 case MLD2_BLOCK_OLD_SOURCES: 1110 break; 1111 1112 default: 1113 continue; 1114 } 1115 1116 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1117 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1118 ntohs(*nsrcs) == 0) { 1119 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1120 vid); 1121 } else { 1122 err = br_ip6_multicast_add_group(br, port, 1123 &grec->grec_mca, vid); 1124 if (!err) 1125 break; 1126 } 1127 } 1128 1129 return err; 1130 } 1131 #endif 1132 1133 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1134 struct net_bridge_port *port, 1135 __be32 saddr) 1136 { 1137 if (!timer_pending(&br->ip4_own_query.timer) && 1138 !timer_pending(&br->ip4_other_query.timer)) 1139 goto update; 1140 1141 if (!br->ip4_querier.addr.u.ip4) 1142 goto update; 1143 1144 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1145 goto update; 1146 1147 return false; 1148 1149 update: 1150 br->ip4_querier.addr.u.ip4 = saddr; 1151 1152 /* update protected by general multicast_lock by caller */ 1153 rcu_assign_pointer(br->ip4_querier.port, port); 1154 1155 return true; 1156 } 1157 1158 #if IS_ENABLED(CONFIG_IPV6) 1159 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1160 struct net_bridge_port *port, 1161 struct in6_addr *saddr) 1162 { 1163 if (!timer_pending(&br->ip6_own_query.timer) && 1164 !timer_pending(&br->ip6_other_query.timer)) 1165 goto update; 1166 1167 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1168 goto update; 1169 1170 return false; 1171 1172 update: 1173 br->ip6_querier.addr.u.ip6 = *saddr; 1174 1175 /* update protected by general multicast_lock by caller */ 1176 rcu_assign_pointer(br->ip6_querier.port, port); 1177 1178 return true; 1179 } 1180 #endif 1181 1182 static bool br_multicast_select_querier(struct net_bridge *br, 1183 struct net_bridge_port *port, 1184 struct br_ip *saddr) 1185 { 1186 switch (saddr->proto) { 1187 case htons(ETH_P_IP): 1188 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1189 #if IS_ENABLED(CONFIG_IPV6) 1190 case htons(ETH_P_IPV6): 1191 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1192 #endif 1193 } 1194 1195 return false; 1196 } 1197 1198 static void 1199 br_multicast_update_query_timer(struct net_bridge *br, 1200 struct bridge_mcast_other_query *query, 1201 unsigned long max_delay) 1202 { 1203 if (!timer_pending(&query->timer)) 1204 query->delay_time = jiffies + max_delay; 1205 1206 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1207 } 1208 1209 /* 1210 * Add port to router_list 1211 * list is maintained ordered by pointer value 1212 * and locked by br->multicast_lock and RCU 1213 */ 1214 static void br_multicast_add_router(struct net_bridge *br, 1215 struct net_bridge_port *port) 1216 { 1217 struct net_bridge_port *p; 1218 struct hlist_node *slot = NULL; 1219 1220 if (!hlist_unhashed(&port->rlist)) 1221 return; 1222 1223 hlist_for_each_entry(p, &br->router_list, rlist) { 1224 if ((unsigned long) port >= (unsigned long) p) 1225 break; 1226 slot = &p->rlist; 1227 } 1228 1229 if (slot) 1230 hlist_add_behind_rcu(&port->rlist, slot); 1231 else 1232 hlist_add_head_rcu(&port->rlist, &br->router_list); 1233 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1234 } 1235 1236 static void br_multicast_mark_router(struct net_bridge *br, 1237 struct net_bridge_port *port) 1238 { 1239 unsigned long now = jiffies; 1240 1241 if (!port) { 1242 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) 1243 mod_timer(&br->multicast_router_timer, 1244 now + br->multicast_querier_interval); 1245 return; 1246 } 1247 1248 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1249 port->multicast_router == MDB_RTR_TYPE_PERM) 1250 return; 1251 1252 br_multicast_add_router(br, port); 1253 1254 mod_timer(&port->multicast_router_timer, 1255 now + br->multicast_querier_interval); 1256 } 1257 1258 static void br_multicast_query_received(struct net_bridge *br, 1259 struct net_bridge_port *port, 1260 struct bridge_mcast_other_query *query, 1261 struct br_ip *saddr, 1262 unsigned long max_delay) 1263 { 1264 if (!br_multicast_select_querier(br, port, saddr)) 1265 return; 1266 1267 br_multicast_update_query_timer(br, query, max_delay); 1268 br_multicast_mark_router(br, port); 1269 } 1270 1271 static int br_ip4_multicast_query(struct net_bridge *br, 1272 struct net_bridge_port *port, 1273 struct sk_buff *skb, 1274 u16 vid) 1275 { 1276 const struct iphdr *iph = ip_hdr(skb); 1277 struct igmphdr *ih = igmp_hdr(skb); 1278 struct net_bridge_mdb_entry *mp; 1279 struct igmpv3_query *ih3; 1280 struct net_bridge_port_group *p; 1281 struct net_bridge_port_group __rcu **pp; 1282 struct br_ip saddr; 1283 unsigned long max_delay; 1284 unsigned long now = jiffies; 1285 unsigned int offset = skb_transport_offset(skb); 1286 __be32 group; 1287 int err = 0; 1288 1289 spin_lock(&br->multicast_lock); 1290 if (!netif_running(br->dev) || 1291 (port && port->state == BR_STATE_DISABLED)) 1292 goto out; 1293 1294 group = ih->group; 1295 1296 if (skb->len == offset + sizeof(*ih)) { 1297 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1298 1299 if (!max_delay) { 1300 max_delay = 10 * HZ; 1301 group = 0; 1302 } 1303 } else if (skb->len >= offset + sizeof(*ih3)) { 1304 ih3 = igmpv3_query_hdr(skb); 1305 if (ih3->nsrcs) 1306 goto out; 1307 1308 max_delay = ih3->code ? 1309 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1310 } else { 1311 goto out; 1312 } 1313 1314 if (!group) { 1315 saddr.proto = htons(ETH_P_IP); 1316 saddr.u.ip4 = iph->saddr; 1317 1318 br_multicast_query_received(br, port, &br->ip4_other_query, 1319 &saddr, max_delay); 1320 goto out; 1321 } 1322 1323 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1324 if (!mp) 1325 goto out; 1326 1327 max_delay *= br->multicast_last_member_count; 1328 1329 if (mp->mglist && 1330 (timer_pending(&mp->timer) ? 1331 time_after(mp->timer.expires, now + max_delay) : 1332 try_to_del_timer_sync(&mp->timer) >= 0)) 1333 mod_timer(&mp->timer, now + max_delay); 1334 1335 for (pp = &mp->ports; 1336 (p = mlock_dereference(*pp, br)) != NULL; 1337 pp = &p->next) { 1338 if (timer_pending(&p->timer) ? 1339 time_after(p->timer.expires, now + max_delay) : 1340 try_to_del_timer_sync(&p->timer) >= 0) 1341 mod_timer(&p->timer, now + max_delay); 1342 } 1343 1344 out: 1345 spin_unlock(&br->multicast_lock); 1346 return err; 1347 } 1348 1349 #if IS_ENABLED(CONFIG_IPV6) 1350 static int br_ip6_multicast_query(struct net_bridge *br, 1351 struct net_bridge_port *port, 1352 struct sk_buff *skb, 1353 u16 vid) 1354 { 1355 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1356 struct mld_msg *mld; 1357 struct net_bridge_mdb_entry *mp; 1358 struct mld2_query *mld2q; 1359 struct net_bridge_port_group *p; 1360 struct net_bridge_port_group __rcu **pp; 1361 struct br_ip saddr; 1362 unsigned long max_delay; 1363 unsigned long now = jiffies; 1364 unsigned int offset = skb_transport_offset(skb); 1365 const struct in6_addr *group = NULL; 1366 bool is_general_query; 1367 int err = 0; 1368 1369 spin_lock(&br->multicast_lock); 1370 if (!netif_running(br->dev) || 1371 (port && port->state == BR_STATE_DISABLED)) 1372 goto out; 1373 1374 if (skb->len == offset + sizeof(*mld)) { 1375 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1376 err = -EINVAL; 1377 goto out; 1378 } 1379 mld = (struct mld_msg *) icmp6_hdr(skb); 1380 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1381 if (max_delay) 1382 group = &mld->mld_mca; 1383 } else { 1384 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1385 err = -EINVAL; 1386 goto out; 1387 } 1388 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1389 if (!mld2q->mld2q_nsrcs) 1390 group = &mld2q->mld2q_mca; 1391 1392 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1393 } 1394 1395 is_general_query = group && ipv6_addr_any(group); 1396 1397 if (is_general_query) { 1398 saddr.proto = htons(ETH_P_IPV6); 1399 saddr.u.ip6 = ip6h->saddr; 1400 1401 br_multicast_query_received(br, port, &br->ip6_other_query, 1402 &saddr, max_delay); 1403 goto out; 1404 } else if (!group) { 1405 goto out; 1406 } 1407 1408 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1409 if (!mp) 1410 goto out; 1411 1412 max_delay *= br->multicast_last_member_count; 1413 if (mp->mglist && 1414 (timer_pending(&mp->timer) ? 1415 time_after(mp->timer.expires, now + max_delay) : 1416 try_to_del_timer_sync(&mp->timer) >= 0)) 1417 mod_timer(&mp->timer, now + max_delay); 1418 1419 for (pp = &mp->ports; 1420 (p = mlock_dereference(*pp, br)) != NULL; 1421 pp = &p->next) { 1422 if (timer_pending(&p->timer) ? 1423 time_after(p->timer.expires, now + max_delay) : 1424 try_to_del_timer_sync(&p->timer) >= 0) 1425 mod_timer(&p->timer, now + max_delay); 1426 } 1427 1428 out: 1429 spin_unlock(&br->multicast_lock); 1430 return err; 1431 } 1432 #endif 1433 1434 static void 1435 br_multicast_leave_group(struct net_bridge *br, 1436 struct net_bridge_port *port, 1437 struct br_ip *group, 1438 struct bridge_mcast_other_query *other_query, 1439 struct bridge_mcast_own_query *own_query) 1440 { 1441 struct net_bridge_mdb_htable *mdb; 1442 struct net_bridge_mdb_entry *mp; 1443 struct net_bridge_port_group *p; 1444 unsigned long now; 1445 unsigned long time; 1446 1447 spin_lock(&br->multicast_lock); 1448 if (!netif_running(br->dev) || 1449 (port && port->state == BR_STATE_DISABLED)) 1450 goto out; 1451 1452 mdb = mlock_dereference(br->mdb, br); 1453 mp = br_mdb_ip_get(mdb, group); 1454 if (!mp) 1455 goto out; 1456 1457 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1458 struct net_bridge_port_group __rcu **pp; 1459 1460 for (pp = &mp->ports; 1461 (p = mlock_dereference(*pp, br)) != NULL; 1462 pp = &p->next) { 1463 if (p->port != port) 1464 continue; 1465 1466 rcu_assign_pointer(*pp, p->next); 1467 hlist_del_init(&p->mglist); 1468 del_timer(&p->timer); 1469 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1470 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1471 p->flags); 1472 1473 if (!mp->ports && !mp->mglist && 1474 netif_running(br->dev)) 1475 mod_timer(&mp->timer, jiffies); 1476 } 1477 goto out; 1478 } 1479 1480 if (timer_pending(&other_query->timer)) 1481 goto out; 1482 1483 if (br->multicast_querier) { 1484 __br_multicast_send_query(br, port, &mp->addr); 1485 1486 time = jiffies + br->multicast_last_member_count * 1487 br->multicast_last_member_interval; 1488 1489 mod_timer(&own_query->timer, time); 1490 1491 for (p = mlock_dereference(mp->ports, br); 1492 p != NULL; 1493 p = mlock_dereference(p->next, br)) { 1494 if (p->port != port) 1495 continue; 1496 1497 if (!hlist_unhashed(&p->mglist) && 1498 (timer_pending(&p->timer) ? 1499 time_after(p->timer.expires, time) : 1500 try_to_del_timer_sync(&p->timer) >= 0)) { 1501 mod_timer(&p->timer, time); 1502 } 1503 1504 break; 1505 } 1506 } 1507 1508 now = jiffies; 1509 time = now + br->multicast_last_member_count * 1510 br->multicast_last_member_interval; 1511 1512 if (!port) { 1513 if (mp->mglist && 1514 (timer_pending(&mp->timer) ? 1515 time_after(mp->timer.expires, time) : 1516 try_to_del_timer_sync(&mp->timer) >= 0)) { 1517 mod_timer(&mp->timer, time); 1518 } 1519 1520 goto out; 1521 } 1522 1523 for (p = mlock_dereference(mp->ports, br); 1524 p != NULL; 1525 p = mlock_dereference(p->next, br)) { 1526 if (p->port != port) 1527 continue; 1528 1529 if (!hlist_unhashed(&p->mglist) && 1530 (timer_pending(&p->timer) ? 1531 time_after(p->timer.expires, time) : 1532 try_to_del_timer_sync(&p->timer) >= 0)) { 1533 mod_timer(&p->timer, time); 1534 } 1535 1536 break; 1537 } 1538 out: 1539 spin_unlock(&br->multicast_lock); 1540 } 1541 1542 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1543 struct net_bridge_port *port, 1544 __be32 group, 1545 __u16 vid) 1546 { 1547 struct br_ip br_group; 1548 struct bridge_mcast_own_query *own_query; 1549 1550 if (ipv4_is_local_multicast(group)) 1551 return; 1552 1553 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1554 1555 br_group.u.ip4 = group; 1556 br_group.proto = htons(ETH_P_IP); 1557 br_group.vid = vid; 1558 1559 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1560 own_query); 1561 } 1562 1563 #if IS_ENABLED(CONFIG_IPV6) 1564 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1565 struct net_bridge_port *port, 1566 const struct in6_addr *group, 1567 __u16 vid) 1568 { 1569 struct br_ip br_group; 1570 struct bridge_mcast_own_query *own_query; 1571 1572 if (ipv6_addr_is_ll_all_nodes(group)) 1573 return; 1574 1575 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1576 1577 br_group.u.ip6 = *group; 1578 br_group.proto = htons(ETH_P_IPV6); 1579 br_group.vid = vid; 1580 1581 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1582 own_query); 1583 } 1584 #endif 1585 1586 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1587 struct net_bridge_port *port, 1588 struct sk_buff *skb, 1589 u16 vid) 1590 { 1591 struct sk_buff *skb_trimmed = NULL; 1592 struct igmphdr *ih; 1593 int err; 1594 1595 err = ip_mc_check_igmp(skb, &skb_trimmed); 1596 1597 if (err == -ENOMSG) { 1598 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) 1599 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1600 return 0; 1601 } else if (err < 0) { 1602 return err; 1603 } 1604 1605 BR_INPUT_SKB_CB(skb)->igmp = 1; 1606 ih = igmp_hdr(skb); 1607 1608 switch (ih->type) { 1609 case IGMP_HOST_MEMBERSHIP_REPORT: 1610 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1611 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1612 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1613 break; 1614 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1615 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1616 break; 1617 case IGMP_HOST_MEMBERSHIP_QUERY: 1618 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1619 break; 1620 case IGMP_HOST_LEAVE_MESSAGE: 1621 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1622 break; 1623 } 1624 1625 if (skb_trimmed && skb_trimmed != skb) 1626 kfree_skb(skb_trimmed); 1627 1628 return err; 1629 } 1630 1631 #if IS_ENABLED(CONFIG_IPV6) 1632 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1633 struct net_bridge_port *port, 1634 struct sk_buff *skb, 1635 u16 vid) 1636 { 1637 struct sk_buff *skb_trimmed = NULL; 1638 struct mld_msg *mld; 1639 int err; 1640 1641 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1642 1643 if (err == -ENOMSG) { 1644 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1645 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1646 return 0; 1647 } else if (err < 0) { 1648 return err; 1649 } 1650 1651 BR_INPUT_SKB_CB(skb)->igmp = 1; 1652 mld = (struct mld_msg *)skb_transport_header(skb); 1653 1654 switch (mld->mld_type) { 1655 case ICMPV6_MGM_REPORT: 1656 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1657 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1658 break; 1659 case ICMPV6_MLD2_REPORT: 1660 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1661 break; 1662 case ICMPV6_MGM_QUERY: 1663 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1664 break; 1665 case ICMPV6_MGM_REDUCTION: 1666 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1667 break; 1668 } 1669 1670 if (skb_trimmed && skb_trimmed != skb) 1671 kfree_skb(skb_trimmed); 1672 1673 return err; 1674 } 1675 #endif 1676 1677 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1678 struct sk_buff *skb, u16 vid) 1679 { 1680 BR_INPUT_SKB_CB(skb)->igmp = 0; 1681 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1682 1683 if (br->multicast_disabled) 1684 return 0; 1685 1686 switch (skb->protocol) { 1687 case htons(ETH_P_IP): 1688 return br_multicast_ipv4_rcv(br, port, skb, vid); 1689 #if IS_ENABLED(CONFIG_IPV6) 1690 case htons(ETH_P_IPV6): 1691 return br_multicast_ipv6_rcv(br, port, skb, vid); 1692 #endif 1693 } 1694 1695 return 0; 1696 } 1697 1698 static void br_multicast_query_expired(struct net_bridge *br, 1699 struct bridge_mcast_own_query *query, 1700 struct bridge_mcast_querier *querier) 1701 { 1702 spin_lock(&br->multicast_lock); 1703 if (query->startup_sent < br->multicast_startup_query_count) 1704 query->startup_sent++; 1705 1706 RCU_INIT_POINTER(querier->port, NULL); 1707 br_multicast_send_query(br, NULL, query); 1708 spin_unlock(&br->multicast_lock); 1709 } 1710 1711 static void br_ip4_multicast_query_expired(unsigned long data) 1712 { 1713 struct net_bridge *br = (void *)data; 1714 1715 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1716 } 1717 1718 #if IS_ENABLED(CONFIG_IPV6) 1719 static void br_ip6_multicast_query_expired(unsigned long data) 1720 { 1721 struct net_bridge *br = (void *)data; 1722 1723 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1724 } 1725 #endif 1726 1727 void br_multicast_init(struct net_bridge *br) 1728 { 1729 br->hash_elasticity = 4; 1730 br->hash_max = 512; 1731 1732 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1733 br->multicast_querier = 0; 1734 br->multicast_query_use_ifaddr = 0; 1735 br->multicast_last_member_count = 2; 1736 br->multicast_startup_query_count = 2; 1737 1738 br->multicast_last_member_interval = HZ; 1739 br->multicast_query_response_interval = 10 * HZ; 1740 br->multicast_startup_query_interval = 125 * HZ / 4; 1741 br->multicast_query_interval = 125 * HZ; 1742 br->multicast_querier_interval = 255 * HZ; 1743 br->multicast_membership_interval = 260 * HZ; 1744 1745 br->ip4_other_query.delay_time = 0; 1746 br->ip4_querier.port = NULL; 1747 #if IS_ENABLED(CONFIG_IPV6) 1748 br->ip6_other_query.delay_time = 0; 1749 br->ip6_querier.port = NULL; 1750 #endif 1751 br->has_ipv6_addr = 1; 1752 1753 spin_lock_init(&br->multicast_lock); 1754 setup_timer(&br->multicast_router_timer, 1755 br_multicast_local_router_expired, 0); 1756 setup_timer(&br->ip4_other_query.timer, 1757 br_ip4_multicast_querier_expired, (unsigned long)br); 1758 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1759 (unsigned long)br); 1760 #if IS_ENABLED(CONFIG_IPV6) 1761 setup_timer(&br->ip6_other_query.timer, 1762 br_ip6_multicast_querier_expired, (unsigned long)br); 1763 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1764 (unsigned long)br); 1765 #endif 1766 } 1767 1768 static void __br_multicast_open(struct net_bridge *br, 1769 struct bridge_mcast_own_query *query) 1770 { 1771 query->startup_sent = 0; 1772 1773 if (br->multicast_disabled) 1774 return; 1775 1776 mod_timer(&query->timer, jiffies); 1777 } 1778 1779 void br_multicast_open(struct net_bridge *br) 1780 { 1781 __br_multicast_open(br, &br->ip4_own_query); 1782 #if IS_ENABLED(CONFIG_IPV6) 1783 __br_multicast_open(br, &br->ip6_own_query); 1784 #endif 1785 } 1786 1787 void br_multicast_stop(struct net_bridge *br) 1788 { 1789 del_timer_sync(&br->multicast_router_timer); 1790 del_timer_sync(&br->ip4_other_query.timer); 1791 del_timer_sync(&br->ip4_own_query.timer); 1792 #if IS_ENABLED(CONFIG_IPV6) 1793 del_timer_sync(&br->ip6_other_query.timer); 1794 del_timer_sync(&br->ip6_own_query.timer); 1795 #endif 1796 } 1797 1798 void br_multicast_dev_del(struct net_bridge *br) 1799 { 1800 struct net_bridge_mdb_htable *mdb; 1801 struct net_bridge_mdb_entry *mp; 1802 struct hlist_node *n; 1803 u32 ver; 1804 int i; 1805 1806 spin_lock_bh(&br->multicast_lock); 1807 mdb = mlock_dereference(br->mdb, br); 1808 if (!mdb) 1809 goto out; 1810 1811 br->mdb = NULL; 1812 1813 ver = mdb->ver; 1814 for (i = 0; i < mdb->max; i++) { 1815 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1816 hlist[ver]) { 1817 del_timer(&mp->timer); 1818 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1819 } 1820 } 1821 1822 if (mdb->old) { 1823 spin_unlock_bh(&br->multicast_lock); 1824 rcu_barrier_bh(); 1825 spin_lock_bh(&br->multicast_lock); 1826 WARN_ON(mdb->old); 1827 } 1828 1829 mdb->old = mdb; 1830 call_rcu_bh(&mdb->rcu, br_mdb_free); 1831 1832 out: 1833 spin_unlock_bh(&br->multicast_lock); 1834 } 1835 1836 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1837 { 1838 int err = -EINVAL; 1839 1840 spin_lock_bh(&br->multicast_lock); 1841 1842 switch (val) { 1843 case MDB_RTR_TYPE_DISABLED: 1844 case MDB_RTR_TYPE_PERM: 1845 del_timer(&br->multicast_router_timer); 1846 /* fall through */ 1847 case MDB_RTR_TYPE_TEMP_QUERY: 1848 br->multicast_router = val; 1849 err = 0; 1850 break; 1851 } 1852 1853 spin_unlock_bh(&br->multicast_lock); 1854 1855 return err; 1856 } 1857 1858 static void __del_port_router(struct net_bridge_port *p) 1859 { 1860 if (hlist_unhashed(&p->rlist)) 1861 return; 1862 hlist_del_init_rcu(&p->rlist); 1863 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 1864 } 1865 1866 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1867 { 1868 struct net_bridge *br = p->br; 1869 unsigned long now = jiffies; 1870 int err = -EINVAL; 1871 1872 spin_lock(&br->multicast_lock); 1873 if (p->multicast_router == val) { 1874 /* Refresh the temp router port timer */ 1875 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 1876 mod_timer(&p->multicast_router_timer, 1877 now + br->multicast_querier_interval); 1878 err = 0; 1879 goto unlock; 1880 } 1881 switch (val) { 1882 case MDB_RTR_TYPE_DISABLED: 1883 p->multicast_router = MDB_RTR_TYPE_DISABLED; 1884 __del_port_router(p); 1885 del_timer(&p->multicast_router_timer); 1886 break; 1887 case MDB_RTR_TYPE_TEMP_QUERY: 1888 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1889 __del_port_router(p); 1890 break; 1891 case MDB_RTR_TYPE_PERM: 1892 p->multicast_router = MDB_RTR_TYPE_PERM; 1893 del_timer(&p->multicast_router_timer); 1894 br_multicast_add_router(br, p); 1895 break; 1896 case MDB_RTR_TYPE_TEMP: 1897 p->multicast_router = MDB_RTR_TYPE_TEMP; 1898 br_multicast_mark_router(br, p); 1899 break; 1900 default: 1901 goto unlock; 1902 } 1903 err = 0; 1904 unlock: 1905 spin_unlock(&br->multicast_lock); 1906 1907 return err; 1908 } 1909 1910 static void br_multicast_start_querier(struct net_bridge *br, 1911 struct bridge_mcast_own_query *query) 1912 { 1913 struct net_bridge_port *port; 1914 1915 __br_multicast_open(br, query); 1916 1917 list_for_each_entry(port, &br->port_list, list) { 1918 if (port->state == BR_STATE_DISABLED || 1919 port->state == BR_STATE_BLOCKING) 1920 continue; 1921 1922 if (query == &br->ip4_own_query) 1923 br_multicast_enable(&port->ip4_own_query); 1924 #if IS_ENABLED(CONFIG_IPV6) 1925 else 1926 br_multicast_enable(&port->ip6_own_query); 1927 #endif 1928 } 1929 } 1930 1931 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1932 { 1933 int err = 0; 1934 struct net_bridge_mdb_htable *mdb; 1935 1936 spin_lock_bh(&br->multicast_lock); 1937 if (br->multicast_disabled == !val) 1938 goto unlock; 1939 1940 br->multicast_disabled = !val; 1941 if (br->multicast_disabled) 1942 goto unlock; 1943 1944 if (!netif_running(br->dev)) 1945 goto unlock; 1946 1947 mdb = mlock_dereference(br->mdb, br); 1948 if (mdb) { 1949 if (mdb->old) { 1950 err = -EEXIST; 1951 rollback: 1952 br->multicast_disabled = !!val; 1953 goto unlock; 1954 } 1955 1956 err = br_mdb_rehash(&br->mdb, mdb->max, 1957 br->hash_elasticity); 1958 if (err) 1959 goto rollback; 1960 } 1961 1962 br_multicast_start_querier(br, &br->ip4_own_query); 1963 #if IS_ENABLED(CONFIG_IPV6) 1964 br_multicast_start_querier(br, &br->ip6_own_query); 1965 #endif 1966 1967 unlock: 1968 spin_unlock_bh(&br->multicast_lock); 1969 1970 return err; 1971 } 1972 1973 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1974 { 1975 unsigned long max_delay; 1976 1977 val = !!val; 1978 1979 spin_lock_bh(&br->multicast_lock); 1980 if (br->multicast_querier == val) 1981 goto unlock; 1982 1983 br->multicast_querier = val; 1984 if (!val) 1985 goto unlock; 1986 1987 max_delay = br->multicast_query_response_interval; 1988 1989 if (!timer_pending(&br->ip4_other_query.timer)) 1990 br->ip4_other_query.delay_time = jiffies + max_delay; 1991 1992 br_multicast_start_querier(br, &br->ip4_own_query); 1993 1994 #if IS_ENABLED(CONFIG_IPV6) 1995 if (!timer_pending(&br->ip6_other_query.timer)) 1996 br->ip6_other_query.delay_time = jiffies + max_delay; 1997 1998 br_multicast_start_querier(br, &br->ip6_own_query); 1999 #endif 2000 2001 unlock: 2002 spin_unlock_bh(&br->multicast_lock); 2003 2004 return 0; 2005 } 2006 2007 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2008 { 2009 int err = -EINVAL; 2010 u32 old; 2011 struct net_bridge_mdb_htable *mdb; 2012 2013 spin_lock_bh(&br->multicast_lock); 2014 if (!is_power_of_2(val)) 2015 goto unlock; 2016 2017 mdb = mlock_dereference(br->mdb, br); 2018 if (mdb && val < mdb->size) 2019 goto unlock; 2020 2021 err = 0; 2022 2023 old = br->hash_max; 2024 br->hash_max = val; 2025 2026 if (mdb) { 2027 if (mdb->old) { 2028 err = -EEXIST; 2029 rollback: 2030 br->hash_max = old; 2031 goto unlock; 2032 } 2033 2034 err = br_mdb_rehash(&br->mdb, br->hash_max, 2035 br->hash_elasticity); 2036 if (err) 2037 goto rollback; 2038 } 2039 2040 unlock: 2041 spin_unlock_bh(&br->multicast_lock); 2042 2043 return err; 2044 } 2045 2046 /** 2047 * br_multicast_list_adjacent - Returns snooped multicast addresses 2048 * @dev: The bridge port adjacent to which to retrieve addresses 2049 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2050 * 2051 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2052 * snooping feature on all bridge ports of dev's bridge device, excluding 2053 * the addresses from dev itself. 2054 * 2055 * Returns the number of items added to br_ip_list. 2056 * 2057 * Notes: 2058 * - br_ip_list needs to be initialized by caller 2059 * - br_ip_list might contain duplicates in the end 2060 * (needs to be taken care of by caller) 2061 * - br_ip_list needs to be freed by caller 2062 */ 2063 int br_multicast_list_adjacent(struct net_device *dev, 2064 struct list_head *br_ip_list) 2065 { 2066 struct net_bridge *br; 2067 struct net_bridge_port *port; 2068 struct net_bridge_port_group *group; 2069 struct br_ip_list *entry; 2070 int count = 0; 2071 2072 rcu_read_lock(); 2073 if (!br_ip_list || !br_port_exists(dev)) 2074 goto unlock; 2075 2076 port = br_port_get_rcu(dev); 2077 if (!port || !port->br) 2078 goto unlock; 2079 2080 br = port->br; 2081 2082 list_for_each_entry_rcu(port, &br->port_list, list) { 2083 if (!port->dev || port->dev == dev) 2084 continue; 2085 2086 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2087 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2088 if (!entry) 2089 goto unlock; 2090 2091 entry->addr = group->addr; 2092 list_add(&entry->list, br_ip_list); 2093 count++; 2094 } 2095 } 2096 2097 unlock: 2098 rcu_read_unlock(); 2099 return count; 2100 } 2101 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2102 2103 /** 2104 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2105 * @dev: The bridge port providing the bridge on which to check for a querier 2106 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2107 * 2108 * Checks whether the given interface has a bridge on top and if so returns 2109 * true if a valid querier exists anywhere on the bridged link layer. 2110 * Otherwise returns false. 2111 */ 2112 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2113 { 2114 struct net_bridge *br; 2115 struct net_bridge_port *port; 2116 struct ethhdr eth; 2117 bool ret = false; 2118 2119 rcu_read_lock(); 2120 if (!br_port_exists(dev)) 2121 goto unlock; 2122 2123 port = br_port_get_rcu(dev); 2124 if (!port || !port->br) 2125 goto unlock; 2126 2127 br = port->br; 2128 2129 memset(ð, 0, sizeof(eth)); 2130 eth.h_proto = htons(proto); 2131 2132 ret = br_multicast_querier_exists(br, ð); 2133 2134 unlock: 2135 rcu_read_unlock(); 2136 return ret; 2137 } 2138 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2139 2140 /** 2141 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2142 * @dev: The bridge port adjacent to which to check for a querier 2143 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2144 * 2145 * Checks whether the given interface has a bridge on top and if so returns 2146 * true if a selected querier is behind one of the other ports of this 2147 * bridge. Otherwise returns false. 2148 */ 2149 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2150 { 2151 struct net_bridge *br; 2152 struct net_bridge_port *port; 2153 bool ret = false; 2154 2155 rcu_read_lock(); 2156 if (!br_port_exists(dev)) 2157 goto unlock; 2158 2159 port = br_port_get_rcu(dev); 2160 if (!port || !port->br) 2161 goto unlock; 2162 2163 br = port->br; 2164 2165 switch (proto) { 2166 case ETH_P_IP: 2167 if (!timer_pending(&br->ip4_other_query.timer) || 2168 rcu_dereference(br->ip4_querier.port) == port) 2169 goto unlock; 2170 break; 2171 #if IS_ENABLED(CONFIG_IPV6) 2172 case ETH_P_IPV6: 2173 if (!timer_pending(&br->ip6_other_query.timer) || 2174 rcu_dereference(br->ip6_querier.port) == port) 2175 goto unlock; 2176 break; 2177 #endif 2178 default: 2179 goto unlock; 2180 } 2181 2182 ret = true; 2183 unlock: 2184 rcu_read_unlock(); 2185 return ret; 2186 } 2187 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2188