1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <net/ip.h> 29 #if IS_ENABLED(CONFIG_IPV6) 30 #include <net/ipv6.h> 31 #include <net/mld.h> 32 #include <net/ip6_checksum.h> 33 #include <net/addrconf.h> 34 #endif 35 36 #include "br_private.h" 37 38 static void br_multicast_start_querier(struct net_bridge *br, 39 struct bridge_mcast_own_query *query); 40 static void br_multicast_add_router(struct net_bridge *br, 41 struct net_bridge_port *port); 42 static void br_ip4_multicast_leave_group(struct net_bridge *br, 43 struct net_bridge_port *port, 44 __be32 group, 45 __u16 vid); 46 #if IS_ENABLED(CONFIG_IPV6) 47 static void br_ip6_multicast_leave_group(struct net_bridge *br, 48 struct net_bridge_port *port, 49 const struct in6_addr *group, 50 __u16 vid); 51 #endif 52 unsigned int br_mdb_rehash_seq; 53 54 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 55 { 56 if (a->proto != b->proto) 57 return 0; 58 if (a->vid != b->vid) 59 return 0; 60 switch (a->proto) { 61 case htons(ETH_P_IP): 62 return a->u.ip4 == b->u.ip4; 63 #if IS_ENABLED(CONFIG_IPV6) 64 case htons(ETH_P_IPV6): 65 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 66 #endif 67 } 68 return 0; 69 } 70 71 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 72 __u16 vid) 73 { 74 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 75 } 76 77 #if IS_ENABLED(CONFIG_IPV6) 78 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 79 const struct in6_addr *ip, 80 __u16 vid) 81 { 82 return jhash_2words(ipv6_addr_hash(ip), vid, 83 mdb->secret) & (mdb->max - 1); 84 } 85 #endif 86 87 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 88 struct br_ip *ip) 89 { 90 switch (ip->proto) { 91 case htons(ETH_P_IP): 92 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 93 #if IS_ENABLED(CONFIG_IPV6) 94 case htons(ETH_P_IPV6): 95 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 96 #endif 97 } 98 return 0; 99 } 100 101 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 102 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 103 { 104 struct net_bridge_mdb_entry *mp; 105 106 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 107 if (br_ip_equal(&mp->addr, dst)) 108 return mp; 109 } 110 111 return NULL; 112 } 113 114 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 115 struct br_ip *dst) 116 { 117 if (!mdb) 118 return NULL; 119 120 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 121 } 122 123 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 124 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 125 { 126 struct br_ip br_dst; 127 128 br_dst.u.ip4 = dst; 129 br_dst.proto = htons(ETH_P_IP); 130 br_dst.vid = vid; 131 132 return br_mdb_ip_get(mdb, &br_dst); 133 } 134 135 #if IS_ENABLED(CONFIG_IPV6) 136 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 137 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 138 __u16 vid) 139 { 140 struct br_ip br_dst; 141 142 br_dst.u.ip6 = *dst; 143 br_dst.proto = htons(ETH_P_IPV6); 144 br_dst.vid = vid; 145 146 return br_mdb_ip_get(mdb, &br_dst); 147 } 148 #endif 149 150 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 151 struct sk_buff *skb, u16 vid) 152 { 153 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 154 struct br_ip ip; 155 156 if (br->multicast_disabled) 157 return NULL; 158 159 if (BR_INPUT_SKB_CB(skb)->igmp) 160 return NULL; 161 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.u.ip4 = ip_hdr(skb)->daddr; 168 break; 169 #if IS_ENABLED(CONFIG_IPV6) 170 case htons(ETH_P_IPV6): 171 ip.u.ip6 = ipv6_hdr(skb)->daddr; 172 break; 173 #endif 174 default: 175 return NULL; 176 } 177 178 return br_mdb_ip_get(mdb, &ip); 179 } 180 181 static void br_mdb_free(struct rcu_head *head) 182 { 183 struct net_bridge_mdb_htable *mdb = 184 container_of(head, struct net_bridge_mdb_htable, rcu); 185 struct net_bridge_mdb_htable *old = mdb->old; 186 187 mdb->old = NULL; 188 kfree(old->mhash); 189 kfree(old); 190 } 191 192 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 193 struct net_bridge_mdb_htable *old, 194 int elasticity) 195 { 196 struct net_bridge_mdb_entry *mp; 197 int maxlen; 198 int len; 199 int i; 200 201 for (i = 0; i < old->max; i++) 202 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 203 hlist_add_head(&mp->hlist[new->ver], 204 &new->mhash[br_ip_hash(new, &mp->addr)]); 205 206 if (!elasticity) 207 return 0; 208 209 maxlen = 0; 210 for (i = 0; i < new->max; i++) { 211 len = 0; 212 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 213 len++; 214 if (len > maxlen) 215 maxlen = len; 216 } 217 218 return maxlen > elasticity ? -EINVAL : 0; 219 } 220 221 void br_multicast_free_pg(struct rcu_head *head) 222 { 223 struct net_bridge_port_group *p = 224 container_of(head, struct net_bridge_port_group, rcu); 225 226 kfree(p); 227 } 228 229 static void br_multicast_free_group(struct rcu_head *head) 230 { 231 struct net_bridge_mdb_entry *mp = 232 container_of(head, struct net_bridge_mdb_entry, rcu); 233 234 kfree(mp); 235 } 236 237 static void br_multicast_group_expired(unsigned long data) 238 { 239 struct net_bridge_mdb_entry *mp = (void *)data; 240 struct net_bridge *br = mp->br; 241 struct net_bridge_mdb_htable *mdb; 242 243 spin_lock(&br->multicast_lock); 244 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 245 goto out; 246 247 mp->mglist = false; 248 249 if (mp->ports) 250 goto out; 251 252 mdb = mlock_dereference(br->mdb, br); 253 254 hlist_del_rcu(&mp->hlist[mdb->ver]); 255 mdb->size--; 256 257 call_rcu_bh(&mp->rcu, br_multicast_free_group); 258 259 out: 260 spin_unlock(&br->multicast_lock); 261 } 262 263 static void br_multicast_del_pg(struct net_bridge *br, 264 struct net_bridge_port_group *pg) 265 { 266 struct net_bridge_mdb_htable *mdb; 267 struct net_bridge_mdb_entry *mp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_port_group __rcu **pp; 270 271 mdb = mlock_dereference(br->mdb, br); 272 273 mp = br_mdb_ip_get(mdb, &pg->addr); 274 if (WARN_ON(!mp)) 275 return; 276 277 for (pp = &mp->ports; 278 (p = mlock_dereference(*pp, br)) != NULL; 279 pp = &p->next) { 280 if (p != pg) 281 continue; 282 283 rcu_assign_pointer(*pp, p->next); 284 hlist_del_init(&p->mglist); 285 del_timer(&p->timer); 286 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 287 p->flags); 288 call_rcu_bh(&p->rcu, br_multicast_free_pg); 289 290 if (!mp->ports && !mp->mglist && 291 netif_running(br->dev)) 292 mod_timer(&mp->timer, jiffies); 293 294 return; 295 } 296 297 WARN_ON(1); 298 } 299 300 static void br_multicast_port_group_expired(unsigned long data) 301 { 302 struct net_bridge_port_group *pg = (void *)data; 303 struct net_bridge *br = pg->port->br; 304 305 spin_lock(&br->multicast_lock); 306 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 307 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 308 goto out; 309 310 br_multicast_del_pg(br, pg); 311 312 out: 313 spin_unlock(&br->multicast_lock); 314 } 315 316 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 317 int elasticity) 318 { 319 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 320 struct net_bridge_mdb_htable *mdb; 321 int err; 322 323 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 324 if (!mdb) 325 return -ENOMEM; 326 327 mdb->max = max; 328 mdb->old = old; 329 330 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 331 if (!mdb->mhash) { 332 kfree(mdb); 333 return -ENOMEM; 334 } 335 336 mdb->size = old ? old->size : 0; 337 mdb->ver = old ? old->ver ^ 1 : 0; 338 339 if (!old || elasticity) 340 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 341 else 342 mdb->secret = old->secret; 343 344 if (!old) 345 goto out; 346 347 err = br_mdb_copy(mdb, old, elasticity); 348 if (err) { 349 kfree(mdb->mhash); 350 kfree(mdb); 351 return err; 352 } 353 354 br_mdb_rehash_seq++; 355 call_rcu_bh(&mdb->rcu, br_mdb_free); 356 357 out: 358 rcu_assign_pointer(*mdbp, mdb); 359 360 return 0; 361 } 362 363 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 364 __be32 group, 365 u8 *igmp_type) 366 { 367 struct sk_buff *skb; 368 struct igmphdr *ih; 369 struct ethhdr *eth; 370 struct iphdr *iph; 371 372 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 373 sizeof(*ih) + 4); 374 if (!skb) 375 goto out; 376 377 skb->protocol = htons(ETH_P_IP); 378 379 skb_reset_mac_header(skb); 380 eth = eth_hdr(skb); 381 382 ether_addr_copy(eth->h_source, br->dev->dev_addr); 383 eth->h_dest[0] = 1; 384 eth->h_dest[1] = 0; 385 eth->h_dest[2] = 0x5e; 386 eth->h_dest[3] = 0; 387 eth->h_dest[4] = 0; 388 eth->h_dest[5] = 1; 389 eth->h_proto = htons(ETH_P_IP); 390 skb_put(skb, sizeof(*eth)); 391 392 skb_set_network_header(skb, skb->len); 393 iph = ip_hdr(skb); 394 395 iph->version = 4; 396 iph->ihl = 6; 397 iph->tos = 0xc0; 398 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 399 iph->id = 0; 400 iph->frag_off = htons(IP_DF); 401 iph->ttl = 1; 402 iph->protocol = IPPROTO_IGMP; 403 iph->saddr = br->multicast_query_use_ifaddr ? 404 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 405 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 406 ((u8 *)&iph[1])[0] = IPOPT_RA; 407 ((u8 *)&iph[1])[1] = 4; 408 ((u8 *)&iph[1])[2] = 0; 409 ((u8 *)&iph[1])[3] = 0; 410 ip_send_check(iph); 411 skb_put(skb, 24); 412 413 skb_set_transport_header(skb, skb->len); 414 ih = igmp_hdr(skb); 415 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 416 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 417 ih->code = (group ? br->multicast_last_member_interval : 418 br->multicast_query_response_interval) / 419 (HZ / IGMP_TIMER_SCALE); 420 ih->group = group; 421 ih->csum = 0; 422 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 423 skb_put(skb, sizeof(*ih)); 424 425 __skb_pull(skb, sizeof(*eth)); 426 427 out: 428 return skb; 429 } 430 431 #if IS_ENABLED(CONFIG_IPV6) 432 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 433 const struct in6_addr *grp, 434 u8 *igmp_type) 435 { 436 struct sk_buff *skb; 437 struct ipv6hdr *ip6h; 438 struct mld_msg *mldq; 439 struct ethhdr *eth; 440 u8 *hopopt; 441 unsigned long interval; 442 443 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 444 8 + sizeof(*mldq)); 445 if (!skb) 446 goto out; 447 448 skb->protocol = htons(ETH_P_IPV6); 449 450 /* Ethernet header */ 451 skb_reset_mac_header(skb); 452 eth = eth_hdr(skb); 453 454 ether_addr_copy(eth->h_source, br->dev->dev_addr); 455 eth->h_proto = htons(ETH_P_IPV6); 456 skb_put(skb, sizeof(*eth)); 457 458 /* IPv6 header + HbH option */ 459 skb_set_network_header(skb, skb->len); 460 ip6h = ipv6_hdr(skb); 461 462 *(__force __be32 *)ip6h = htonl(0x60000000); 463 ip6h->payload_len = htons(8 + sizeof(*mldq)); 464 ip6h->nexthdr = IPPROTO_HOPOPTS; 465 ip6h->hop_limit = 1; 466 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 467 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 468 &ip6h->saddr)) { 469 kfree_skb(skb); 470 br->has_ipv6_addr = 0; 471 return NULL; 472 } 473 474 br->has_ipv6_addr = 1; 475 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 476 477 hopopt = (u8 *)(ip6h + 1); 478 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 479 hopopt[1] = 0; /* length of HbH */ 480 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 481 hopopt[3] = 2; /* Length of RA Option */ 482 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 483 hopopt[5] = 0; 484 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 485 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 486 487 skb_put(skb, sizeof(*ip6h) + 8); 488 489 /* ICMPv6 */ 490 skb_set_transport_header(skb, skb->len); 491 mldq = (struct mld_msg *) icmp6_hdr(skb); 492 493 interval = ipv6_addr_any(grp) ? 494 br->multicast_query_response_interval : 495 br->multicast_last_member_interval; 496 497 *igmp_type = ICMPV6_MGM_QUERY; 498 mldq->mld_type = ICMPV6_MGM_QUERY; 499 mldq->mld_code = 0; 500 mldq->mld_cksum = 0; 501 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 502 mldq->mld_reserved = 0; 503 mldq->mld_mca = *grp; 504 505 /* checksum */ 506 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 507 sizeof(*mldq), IPPROTO_ICMPV6, 508 csum_partial(mldq, 509 sizeof(*mldq), 0)); 510 skb_put(skb, sizeof(*mldq)); 511 512 __skb_pull(skb, sizeof(*eth)); 513 514 out: 515 return skb; 516 } 517 #endif 518 519 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 520 struct br_ip *addr, 521 u8 *igmp_type) 522 { 523 switch (addr->proto) { 524 case htons(ETH_P_IP): 525 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type); 526 #if IS_ENABLED(CONFIG_IPV6) 527 case htons(ETH_P_IPV6): 528 return br_ip6_multicast_alloc_query(br, &addr->u.ip6, 529 igmp_type); 530 #endif 531 } 532 return NULL; 533 } 534 535 static struct net_bridge_mdb_entry *br_multicast_get_group( 536 struct net_bridge *br, struct net_bridge_port *port, 537 struct br_ip *group, int hash) 538 { 539 struct net_bridge_mdb_htable *mdb; 540 struct net_bridge_mdb_entry *mp; 541 unsigned int count = 0; 542 unsigned int max; 543 int elasticity; 544 int err; 545 546 mdb = rcu_dereference_protected(br->mdb, 1); 547 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 548 count++; 549 if (unlikely(br_ip_equal(group, &mp->addr))) 550 return mp; 551 } 552 553 elasticity = 0; 554 max = mdb->max; 555 556 if (unlikely(count > br->hash_elasticity && count)) { 557 if (net_ratelimit()) 558 br_info(br, "Multicast hash table " 559 "chain limit reached: %s\n", 560 port ? port->dev->name : br->dev->name); 561 562 elasticity = br->hash_elasticity; 563 } 564 565 if (mdb->size >= max) { 566 max *= 2; 567 if (unlikely(max > br->hash_max)) { 568 br_warn(br, "Multicast hash table maximum of %d " 569 "reached, disabling snooping: %s\n", 570 br->hash_max, 571 port ? port->dev->name : br->dev->name); 572 err = -E2BIG; 573 disable: 574 br->multicast_disabled = 1; 575 goto err; 576 } 577 } 578 579 if (max > mdb->max || elasticity) { 580 if (mdb->old) { 581 if (net_ratelimit()) 582 br_info(br, "Multicast hash table " 583 "on fire: %s\n", 584 port ? port->dev->name : br->dev->name); 585 err = -EEXIST; 586 goto err; 587 } 588 589 err = br_mdb_rehash(&br->mdb, max, elasticity); 590 if (err) { 591 br_warn(br, "Cannot rehash multicast " 592 "hash table, disabling snooping: %s, %d, %d\n", 593 port ? port->dev->name : br->dev->name, 594 mdb->size, err); 595 goto disable; 596 } 597 598 err = -EAGAIN; 599 goto err; 600 } 601 602 return NULL; 603 604 err: 605 mp = ERR_PTR(err); 606 return mp; 607 } 608 609 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 610 struct net_bridge_port *port, struct br_ip *group) 611 { 612 struct net_bridge_mdb_htable *mdb; 613 struct net_bridge_mdb_entry *mp; 614 int hash; 615 int err; 616 617 mdb = rcu_dereference_protected(br->mdb, 1); 618 if (!mdb) { 619 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 620 if (err) 621 return ERR_PTR(err); 622 goto rehash; 623 } 624 625 hash = br_ip_hash(mdb, group); 626 mp = br_multicast_get_group(br, port, group, hash); 627 switch (PTR_ERR(mp)) { 628 case 0: 629 break; 630 631 case -EAGAIN: 632 rehash: 633 mdb = rcu_dereference_protected(br->mdb, 1); 634 hash = br_ip_hash(mdb, group); 635 break; 636 637 default: 638 goto out; 639 } 640 641 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 642 if (unlikely(!mp)) 643 return ERR_PTR(-ENOMEM); 644 645 mp->br = br; 646 mp->addr = *group; 647 setup_timer(&mp->timer, br_multicast_group_expired, 648 (unsigned long)mp); 649 650 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 651 mdb->size++; 652 653 out: 654 return mp; 655 } 656 657 struct net_bridge_port_group *br_multicast_new_port_group( 658 struct net_bridge_port *port, 659 struct br_ip *group, 660 struct net_bridge_port_group __rcu *next, 661 unsigned char flags) 662 { 663 struct net_bridge_port_group *p; 664 665 p = kzalloc(sizeof(*p), GFP_ATOMIC); 666 if (unlikely(!p)) 667 return NULL; 668 669 p->addr = *group; 670 p->port = port; 671 p->flags = flags; 672 rcu_assign_pointer(p->next, next); 673 hlist_add_head(&p->mglist, &port->mglist); 674 setup_timer(&p->timer, br_multicast_port_group_expired, 675 (unsigned long)p); 676 return p; 677 } 678 679 static int br_multicast_add_group(struct net_bridge *br, 680 struct net_bridge_port *port, 681 struct br_ip *group) 682 { 683 struct net_bridge_mdb_entry *mp; 684 struct net_bridge_port_group *p; 685 struct net_bridge_port_group __rcu **pp; 686 unsigned long now = jiffies; 687 int err; 688 689 spin_lock(&br->multicast_lock); 690 if (!netif_running(br->dev) || 691 (port && port->state == BR_STATE_DISABLED)) 692 goto out; 693 694 mp = br_multicast_new_group(br, port, group); 695 err = PTR_ERR(mp); 696 if (IS_ERR(mp)) 697 goto err; 698 699 if (!port) { 700 mp->mglist = true; 701 mod_timer(&mp->timer, now + br->multicast_membership_interval); 702 goto out; 703 } 704 705 for (pp = &mp->ports; 706 (p = mlock_dereference(*pp, br)) != NULL; 707 pp = &p->next) { 708 if (p->port == port) 709 goto found; 710 if ((unsigned long)p->port < (unsigned long)port) 711 break; 712 } 713 714 p = br_multicast_new_port_group(port, group, *pp, 0); 715 if (unlikely(!p)) 716 goto err; 717 rcu_assign_pointer(*pp, p); 718 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 719 720 found: 721 mod_timer(&p->timer, now + br->multicast_membership_interval); 722 out: 723 err = 0; 724 725 err: 726 spin_unlock(&br->multicast_lock); 727 return err; 728 } 729 730 static int br_ip4_multicast_add_group(struct net_bridge *br, 731 struct net_bridge_port *port, 732 __be32 group, 733 __u16 vid) 734 { 735 struct br_ip br_group; 736 737 if (ipv4_is_local_multicast(group)) 738 return 0; 739 740 br_group.u.ip4 = group; 741 br_group.proto = htons(ETH_P_IP); 742 br_group.vid = vid; 743 744 return br_multicast_add_group(br, port, &br_group); 745 } 746 747 #if IS_ENABLED(CONFIG_IPV6) 748 static int br_ip6_multicast_add_group(struct net_bridge *br, 749 struct net_bridge_port *port, 750 const struct in6_addr *group, 751 __u16 vid) 752 { 753 struct br_ip br_group; 754 755 if (ipv6_addr_is_ll_all_nodes(group)) 756 return 0; 757 758 br_group.u.ip6 = *group; 759 br_group.proto = htons(ETH_P_IPV6); 760 br_group.vid = vid; 761 762 return br_multicast_add_group(br, port, &br_group); 763 } 764 #endif 765 766 static void br_multicast_router_expired(unsigned long data) 767 { 768 struct net_bridge_port *port = (void *)data; 769 struct net_bridge *br = port->br; 770 771 spin_lock(&br->multicast_lock); 772 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 773 port->multicast_router == MDB_RTR_TYPE_PERM || 774 timer_pending(&port->multicast_router_timer) || 775 hlist_unhashed(&port->rlist)) 776 goto out; 777 778 hlist_del_init_rcu(&port->rlist); 779 br_rtr_notify(br->dev, port, RTM_DELMDB); 780 /* Don't allow timer refresh if the router expired */ 781 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 782 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 783 784 out: 785 spin_unlock(&br->multicast_lock); 786 } 787 788 static void br_multicast_local_router_expired(unsigned long data) 789 { 790 } 791 792 static void br_multicast_querier_expired(struct net_bridge *br, 793 struct bridge_mcast_own_query *query) 794 { 795 spin_lock(&br->multicast_lock); 796 if (!netif_running(br->dev) || br->multicast_disabled) 797 goto out; 798 799 br_multicast_start_querier(br, query); 800 801 out: 802 spin_unlock(&br->multicast_lock); 803 } 804 805 static void br_ip4_multicast_querier_expired(unsigned long data) 806 { 807 struct net_bridge *br = (void *)data; 808 809 br_multicast_querier_expired(br, &br->ip4_own_query); 810 } 811 812 #if IS_ENABLED(CONFIG_IPV6) 813 static void br_ip6_multicast_querier_expired(unsigned long data) 814 { 815 struct net_bridge *br = (void *)data; 816 817 br_multicast_querier_expired(br, &br->ip6_own_query); 818 } 819 #endif 820 821 static void br_multicast_select_own_querier(struct net_bridge *br, 822 struct br_ip *ip, 823 struct sk_buff *skb) 824 { 825 if (ip->proto == htons(ETH_P_IP)) 826 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 827 #if IS_ENABLED(CONFIG_IPV6) 828 else 829 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 830 #endif 831 } 832 833 static void __br_multicast_send_query(struct net_bridge *br, 834 struct net_bridge_port *port, 835 struct br_ip *ip) 836 { 837 struct sk_buff *skb; 838 u8 igmp_type; 839 840 skb = br_multicast_alloc_query(br, ip, &igmp_type); 841 if (!skb) 842 return; 843 844 if (port) { 845 skb->dev = port->dev; 846 br_multicast_count(br, port, skb, igmp_type, 847 BR_MCAST_DIR_TX); 848 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 849 dev_net(port->dev), NULL, skb, NULL, skb->dev, 850 br_dev_queue_push_xmit); 851 } else { 852 br_multicast_select_own_querier(br, ip, skb); 853 br_multicast_count(br, port, skb, igmp_type, 854 BR_MCAST_DIR_RX); 855 netif_rx(skb); 856 } 857 } 858 859 static void br_multicast_send_query(struct net_bridge *br, 860 struct net_bridge_port *port, 861 struct bridge_mcast_own_query *own_query) 862 { 863 unsigned long time; 864 struct br_ip br_group; 865 struct bridge_mcast_other_query *other_query = NULL; 866 867 if (!netif_running(br->dev) || br->multicast_disabled || 868 !br->multicast_querier) 869 return; 870 871 memset(&br_group.u, 0, sizeof(br_group.u)); 872 873 if (port ? (own_query == &port->ip4_own_query) : 874 (own_query == &br->ip4_own_query)) { 875 other_query = &br->ip4_other_query; 876 br_group.proto = htons(ETH_P_IP); 877 #if IS_ENABLED(CONFIG_IPV6) 878 } else { 879 other_query = &br->ip6_other_query; 880 br_group.proto = htons(ETH_P_IPV6); 881 #endif 882 } 883 884 if (!other_query || timer_pending(&other_query->timer)) 885 return; 886 887 __br_multicast_send_query(br, port, &br_group); 888 889 time = jiffies; 890 time += own_query->startup_sent < br->multicast_startup_query_count ? 891 br->multicast_startup_query_interval : 892 br->multicast_query_interval; 893 mod_timer(&own_query->timer, time); 894 } 895 896 static void 897 br_multicast_port_query_expired(struct net_bridge_port *port, 898 struct bridge_mcast_own_query *query) 899 { 900 struct net_bridge *br = port->br; 901 902 spin_lock(&br->multicast_lock); 903 if (port->state == BR_STATE_DISABLED || 904 port->state == BR_STATE_BLOCKING) 905 goto out; 906 907 if (query->startup_sent < br->multicast_startup_query_count) 908 query->startup_sent++; 909 910 br_multicast_send_query(port->br, port, query); 911 912 out: 913 spin_unlock(&br->multicast_lock); 914 } 915 916 static void br_ip4_multicast_port_query_expired(unsigned long data) 917 { 918 struct net_bridge_port *port = (void *)data; 919 920 br_multicast_port_query_expired(port, &port->ip4_own_query); 921 } 922 923 #if IS_ENABLED(CONFIG_IPV6) 924 static void br_ip6_multicast_port_query_expired(unsigned long data) 925 { 926 struct net_bridge_port *port = (void *)data; 927 928 br_multicast_port_query_expired(port, &port->ip6_own_query); 929 } 930 #endif 931 932 int br_multicast_add_port(struct net_bridge_port *port) 933 { 934 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 935 936 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 937 (unsigned long)port); 938 setup_timer(&port->ip4_own_query.timer, 939 br_ip4_multicast_port_query_expired, (unsigned long)port); 940 #if IS_ENABLED(CONFIG_IPV6) 941 setup_timer(&port->ip6_own_query.timer, 942 br_ip6_multicast_port_query_expired, (unsigned long)port); 943 #endif 944 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 945 if (!port->mcast_stats) 946 return -ENOMEM; 947 948 return 0; 949 } 950 951 void br_multicast_del_port(struct net_bridge_port *port) 952 { 953 struct net_bridge *br = port->br; 954 struct net_bridge_port_group *pg; 955 struct hlist_node *n; 956 957 /* Take care of the remaining groups, only perm ones should be left */ 958 spin_lock_bh(&br->multicast_lock); 959 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 960 br_multicast_del_pg(br, pg); 961 spin_unlock_bh(&br->multicast_lock); 962 del_timer_sync(&port->multicast_router_timer); 963 free_percpu(port->mcast_stats); 964 } 965 966 static void br_multicast_enable(struct bridge_mcast_own_query *query) 967 { 968 query->startup_sent = 0; 969 970 if (try_to_del_timer_sync(&query->timer) >= 0 || 971 del_timer(&query->timer)) 972 mod_timer(&query->timer, jiffies); 973 } 974 975 void br_multicast_enable_port(struct net_bridge_port *port) 976 { 977 struct net_bridge *br = port->br; 978 979 spin_lock(&br->multicast_lock); 980 if (br->multicast_disabled || !netif_running(br->dev)) 981 goto out; 982 983 br_multicast_enable(&port->ip4_own_query); 984 #if IS_ENABLED(CONFIG_IPV6) 985 br_multicast_enable(&port->ip6_own_query); 986 #endif 987 if (port->multicast_router == MDB_RTR_TYPE_PERM && 988 hlist_unhashed(&port->rlist)) 989 br_multicast_add_router(br, port); 990 991 out: 992 spin_unlock(&br->multicast_lock); 993 } 994 995 void br_multicast_disable_port(struct net_bridge_port *port) 996 { 997 struct net_bridge *br = port->br; 998 struct net_bridge_port_group *pg; 999 struct hlist_node *n; 1000 1001 spin_lock(&br->multicast_lock); 1002 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1003 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1004 br_multicast_del_pg(br, pg); 1005 1006 if (!hlist_unhashed(&port->rlist)) { 1007 hlist_del_init_rcu(&port->rlist); 1008 br_rtr_notify(br->dev, port, RTM_DELMDB); 1009 /* Don't allow timer refresh if disabling */ 1010 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 1011 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1012 } 1013 del_timer(&port->multicast_router_timer); 1014 del_timer(&port->ip4_own_query.timer); 1015 #if IS_ENABLED(CONFIG_IPV6) 1016 del_timer(&port->ip6_own_query.timer); 1017 #endif 1018 spin_unlock(&br->multicast_lock); 1019 } 1020 1021 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1022 struct net_bridge_port *port, 1023 struct sk_buff *skb, 1024 u16 vid) 1025 { 1026 struct igmpv3_report *ih; 1027 struct igmpv3_grec *grec; 1028 int i; 1029 int len; 1030 int num; 1031 int type; 1032 int err = 0; 1033 __be32 group; 1034 1035 ih = igmpv3_report_hdr(skb); 1036 num = ntohs(ih->ngrec); 1037 len = skb_transport_offset(skb) + sizeof(*ih); 1038 1039 for (i = 0; i < num; i++) { 1040 len += sizeof(*grec); 1041 if (!pskb_may_pull(skb, len)) 1042 return -EINVAL; 1043 1044 grec = (void *)(skb->data + len - sizeof(*grec)); 1045 group = grec->grec_mca; 1046 type = grec->grec_type; 1047 1048 len += ntohs(grec->grec_nsrcs) * 4; 1049 if (!pskb_may_pull(skb, len)) 1050 return -EINVAL; 1051 1052 /* We treat this as an IGMPv2 report for now. */ 1053 switch (type) { 1054 case IGMPV3_MODE_IS_INCLUDE: 1055 case IGMPV3_MODE_IS_EXCLUDE: 1056 case IGMPV3_CHANGE_TO_INCLUDE: 1057 case IGMPV3_CHANGE_TO_EXCLUDE: 1058 case IGMPV3_ALLOW_NEW_SOURCES: 1059 case IGMPV3_BLOCK_OLD_SOURCES: 1060 break; 1061 1062 default: 1063 continue; 1064 } 1065 1066 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1067 type == IGMPV3_MODE_IS_INCLUDE) && 1068 ntohs(grec->grec_nsrcs) == 0) { 1069 br_ip4_multicast_leave_group(br, port, group, vid); 1070 } else { 1071 err = br_ip4_multicast_add_group(br, port, group, vid); 1072 if (err) 1073 break; 1074 } 1075 } 1076 1077 return err; 1078 } 1079 1080 #if IS_ENABLED(CONFIG_IPV6) 1081 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1082 struct net_bridge_port *port, 1083 struct sk_buff *skb, 1084 u16 vid) 1085 { 1086 struct icmp6hdr *icmp6h; 1087 struct mld2_grec *grec; 1088 int i; 1089 int len; 1090 int num; 1091 int err = 0; 1092 1093 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1094 return -EINVAL; 1095 1096 icmp6h = icmp6_hdr(skb); 1097 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1098 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1099 1100 for (i = 0; i < num; i++) { 1101 __be16 *nsrcs, _nsrcs; 1102 1103 nsrcs = skb_header_pointer(skb, 1104 len + offsetof(struct mld2_grec, 1105 grec_nsrcs), 1106 sizeof(_nsrcs), &_nsrcs); 1107 if (!nsrcs) 1108 return -EINVAL; 1109 1110 if (!pskb_may_pull(skb, 1111 len + sizeof(*grec) + 1112 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1113 return -EINVAL; 1114 1115 grec = (struct mld2_grec *)(skb->data + len); 1116 len += sizeof(*grec) + 1117 sizeof(struct in6_addr) * ntohs(*nsrcs); 1118 1119 /* We treat these as MLDv1 reports for now. */ 1120 switch (grec->grec_type) { 1121 case MLD2_MODE_IS_INCLUDE: 1122 case MLD2_MODE_IS_EXCLUDE: 1123 case MLD2_CHANGE_TO_INCLUDE: 1124 case MLD2_CHANGE_TO_EXCLUDE: 1125 case MLD2_ALLOW_NEW_SOURCES: 1126 case MLD2_BLOCK_OLD_SOURCES: 1127 break; 1128 1129 default: 1130 continue; 1131 } 1132 1133 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1134 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1135 ntohs(*nsrcs) == 0) { 1136 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1137 vid); 1138 } else { 1139 err = br_ip6_multicast_add_group(br, port, 1140 &grec->grec_mca, vid); 1141 if (err) 1142 break; 1143 } 1144 } 1145 1146 return err; 1147 } 1148 #endif 1149 1150 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1151 struct net_bridge_port *port, 1152 __be32 saddr) 1153 { 1154 if (!timer_pending(&br->ip4_own_query.timer) && 1155 !timer_pending(&br->ip4_other_query.timer)) 1156 goto update; 1157 1158 if (!br->ip4_querier.addr.u.ip4) 1159 goto update; 1160 1161 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1162 goto update; 1163 1164 return false; 1165 1166 update: 1167 br->ip4_querier.addr.u.ip4 = saddr; 1168 1169 /* update protected by general multicast_lock by caller */ 1170 rcu_assign_pointer(br->ip4_querier.port, port); 1171 1172 return true; 1173 } 1174 1175 #if IS_ENABLED(CONFIG_IPV6) 1176 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1177 struct net_bridge_port *port, 1178 struct in6_addr *saddr) 1179 { 1180 if (!timer_pending(&br->ip6_own_query.timer) && 1181 !timer_pending(&br->ip6_other_query.timer)) 1182 goto update; 1183 1184 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1185 goto update; 1186 1187 return false; 1188 1189 update: 1190 br->ip6_querier.addr.u.ip6 = *saddr; 1191 1192 /* update protected by general multicast_lock by caller */ 1193 rcu_assign_pointer(br->ip6_querier.port, port); 1194 1195 return true; 1196 } 1197 #endif 1198 1199 static bool br_multicast_select_querier(struct net_bridge *br, 1200 struct net_bridge_port *port, 1201 struct br_ip *saddr) 1202 { 1203 switch (saddr->proto) { 1204 case htons(ETH_P_IP): 1205 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1206 #if IS_ENABLED(CONFIG_IPV6) 1207 case htons(ETH_P_IPV6): 1208 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1209 #endif 1210 } 1211 1212 return false; 1213 } 1214 1215 static void 1216 br_multicast_update_query_timer(struct net_bridge *br, 1217 struct bridge_mcast_other_query *query, 1218 unsigned long max_delay) 1219 { 1220 if (!timer_pending(&query->timer)) 1221 query->delay_time = jiffies + max_delay; 1222 1223 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1224 } 1225 1226 /* 1227 * Add port to router_list 1228 * list is maintained ordered by pointer value 1229 * and locked by br->multicast_lock and RCU 1230 */ 1231 static void br_multicast_add_router(struct net_bridge *br, 1232 struct net_bridge_port *port) 1233 { 1234 struct net_bridge_port *p; 1235 struct hlist_node *slot = NULL; 1236 1237 if (!hlist_unhashed(&port->rlist)) 1238 return; 1239 1240 hlist_for_each_entry(p, &br->router_list, rlist) { 1241 if ((unsigned long) port >= (unsigned long) p) 1242 break; 1243 slot = &p->rlist; 1244 } 1245 1246 if (slot) 1247 hlist_add_behind_rcu(&port->rlist, slot); 1248 else 1249 hlist_add_head_rcu(&port->rlist, &br->router_list); 1250 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1251 } 1252 1253 static void br_multicast_mark_router(struct net_bridge *br, 1254 struct net_bridge_port *port) 1255 { 1256 unsigned long now = jiffies; 1257 1258 if (!port) { 1259 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) 1260 mod_timer(&br->multicast_router_timer, 1261 now + br->multicast_querier_interval); 1262 return; 1263 } 1264 1265 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1266 port->multicast_router == MDB_RTR_TYPE_PERM) 1267 return; 1268 1269 br_multicast_add_router(br, port); 1270 1271 mod_timer(&port->multicast_router_timer, 1272 now + br->multicast_querier_interval); 1273 } 1274 1275 static void br_multicast_query_received(struct net_bridge *br, 1276 struct net_bridge_port *port, 1277 struct bridge_mcast_other_query *query, 1278 struct br_ip *saddr, 1279 unsigned long max_delay) 1280 { 1281 if (!br_multicast_select_querier(br, port, saddr)) 1282 return; 1283 1284 br_multicast_update_query_timer(br, query, max_delay); 1285 br_multicast_mark_router(br, port); 1286 } 1287 1288 static int br_ip4_multicast_query(struct net_bridge *br, 1289 struct net_bridge_port *port, 1290 struct sk_buff *skb, 1291 u16 vid) 1292 { 1293 const struct iphdr *iph = ip_hdr(skb); 1294 struct igmphdr *ih = igmp_hdr(skb); 1295 struct net_bridge_mdb_entry *mp; 1296 struct igmpv3_query *ih3; 1297 struct net_bridge_port_group *p; 1298 struct net_bridge_port_group __rcu **pp; 1299 struct br_ip saddr; 1300 unsigned long max_delay; 1301 unsigned long now = jiffies; 1302 unsigned int offset = skb_transport_offset(skb); 1303 __be32 group; 1304 int err = 0; 1305 1306 spin_lock(&br->multicast_lock); 1307 if (!netif_running(br->dev) || 1308 (port && port->state == BR_STATE_DISABLED)) 1309 goto out; 1310 1311 group = ih->group; 1312 1313 if (skb->len == offset + sizeof(*ih)) { 1314 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1315 1316 if (!max_delay) { 1317 max_delay = 10 * HZ; 1318 group = 0; 1319 } 1320 } else if (skb->len >= offset + sizeof(*ih3)) { 1321 ih3 = igmpv3_query_hdr(skb); 1322 if (ih3->nsrcs) 1323 goto out; 1324 1325 max_delay = ih3->code ? 1326 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1327 } else { 1328 goto out; 1329 } 1330 1331 if (!group) { 1332 saddr.proto = htons(ETH_P_IP); 1333 saddr.u.ip4 = iph->saddr; 1334 1335 br_multicast_query_received(br, port, &br->ip4_other_query, 1336 &saddr, max_delay); 1337 goto out; 1338 } 1339 1340 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1341 if (!mp) 1342 goto out; 1343 1344 max_delay *= br->multicast_last_member_count; 1345 1346 if (mp->mglist && 1347 (timer_pending(&mp->timer) ? 1348 time_after(mp->timer.expires, now + max_delay) : 1349 try_to_del_timer_sync(&mp->timer) >= 0)) 1350 mod_timer(&mp->timer, now + max_delay); 1351 1352 for (pp = &mp->ports; 1353 (p = mlock_dereference(*pp, br)) != NULL; 1354 pp = &p->next) { 1355 if (timer_pending(&p->timer) ? 1356 time_after(p->timer.expires, now + max_delay) : 1357 try_to_del_timer_sync(&p->timer) >= 0) 1358 mod_timer(&p->timer, now + max_delay); 1359 } 1360 1361 out: 1362 spin_unlock(&br->multicast_lock); 1363 return err; 1364 } 1365 1366 #if IS_ENABLED(CONFIG_IPV6) 1367 static int br_ip6_multicast_query(struct net_bridge *br, 1368 struct net_bridge_port *port, 1369 struct sk_buff *skb, 1370 u16 vid) 1371 { 1372 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1373 struct mld_msg *mld; 1374 struct net_bridge_mdb_entry *mp; 1375 struct mld2_query *mld2q; 1376 struct net_bridge_port_group *p; 1377 struct net_bridge_port_group __rcu **pp; 1378 struct br_ip saddr; 1379 unsigned long max_delay; 1380 unsigned long now = jiffies; 1381 unsigned int offset = skb_transport_offset(skb); 1382 const struct in6_addr *group = NULL; 1383 bool is_general_query; 1384 int err = 0; 1385 1386 spin_lock(&br->multicast_lock); 1387 if (!netif_running(br->dev) || 1388 (port && port->state == BR_STATE_DISABLED)) 1389 goto out; 1390 1391 if (skb->len == offset + sizeof(*mld)) { 1392 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1393 err = -EINVAL; 1394 goto out; 1395 } 1396 mld = (struct mld_msg *) icmp6_hdr(skb); 1397 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1398 if (max_delay) 1399 group = &mld->mld_mca; 1400 } else { 1401 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1402 err = -EINVAL; 1403 goto out; 1404 } 1405 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1406 if (!mld2q->mld2q_nsrcs) 1407 group = &mld2q->mld2q_mca; 1408 1409 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1410 } 1411 1412 is_general_query = group && ipv6_addr_any(group); 1413 1414 if (is_general_query) { 1415 saddr.proto = htons(ETH_P_IPV6); 1416 saddr.u.ip6 = ip6h->saddr; 1417 1418 br_multicast_query_received(br, port, &br->ip6_other_query, 1419 &saddr, max_delay); 1420 goto out; 1421 } else if (!group) { 1422 goto out; 1423 } 1424 1425 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1426 if (!mp) 1427 goto out; 1428 1429 max_delay *= br->multicast_last_member_count; 1430 if (mp->mglist && 1431 (timer_pending(&mp->timer) ? 1432 time_after(mp->timer.expires, now + max_delay) : 1433 try_to_del_timer_sync(&mp->timer) >= 0)) 1434 mod_timer(&mp->timer, now + max_delay); 1435 1436 for (pp = &mp->ports; 1437 (p = mlock_dereference(*pp, br)) != NULL; 1438 pp = &p->next) { 1439 if (timer_pending(&p->timer) ? 1440 time_after(p->timer.expires, now + max_delay) : 1441 try_to_del_timer_sync(&p->timer) >= 0) 1442 mod_timer(&p->timer, now + max_delay); 1443 } 1444 1445 out: 1446 spin_unlock(&br->multicast_lock); 1447 return err; 1448 } 1449 #endif 1450 1451 static void 1452 br_multicast_leave_group(struct net_bridge *br, 1453 struct net_bridge_port *port, 1454 struct br_ip *group, 1455 struct bridge_mcast_other_query *other_query, 1456 struct bridge_mcast_own_query *own_query) 1457 { 1458 struct net_bridge_mdb_htable *mdb; 1459 struct net_bridge_mdb_entry *mp; 1460 struct net_bridge_port_group *p; 1461 unsigned long now; 1462 unsigned long time; 1463 1464 spin_lock(&br->multicast_lock); 1465 if (!netif_running(br->dev) || 1466 (port && port->state == BR_STATE_DISABLED)) 1467 goto out; 1468 1469 mdb = mlock_dereference(br->mdb, br); 1470 mp = br_mdb_ip_get(mdb, group); 1471 if (!mp) 1472 goto out; 1473 1474 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1475 struct net_bridge_port_group __rcu **pp; 1476 1477 for (pp = &mp->ports; 1478 (p = mlock_dereference(*pp, br)) != NULL; 1479 pp = &p->next) { 1480 if (p->port != port) 1481 continue; 1482 1483 rcu_assign_pointer(*pp, p->next); 1484 hlist_del_init(&p->mglist); 1485 del_timer(&p->timer); 1486 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1487 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1488 p->flags); 1489 1490 if (!mp->ports && !mp->mglist && 1491 netif_running(br->dev)) 1492 mod_timer(&mp->timer, jiffies); 1493 } 1494 goto out; 1495 } 1496 1497 if (timer_pending(&other_query->timer)) 1498 goto out; 1499 1500 if (br->multicast_querier) { 1501 __br_multicast_send_query(br, port, &mp->addr); 1502 1503 time = jiffies + br->multicast_last_member_count * 1504 br->multicast_last_member_interval; 1505 1506 mod_timer(&own_query->timer, time); 1507 1508 for (p = mlock_dereference(mp->ports, br); 1509 p != NULL; 1510 p = mlock_dereference(p->next, br)) { 1511 if (p->port != port) 1512 continue; 1513 1514 if (!hlist_unhashed(&p->mglist) && 1515 (timer_pending(&p->timer) ? 1516 time_after(p->timer.expires, time) : 1517 try_to_del_timer_sync(&p->timer) >= 0)) { 1518 mod_timer(&p->timer, time); 1519 } 1520 1521 break; 1522 } 1523 } 1524 1525 now = jiffies; 1526 time = now + br->multicast_last_member_count * 1527 br->multicast_last_member_interval; 1528 1529 if (!port) { 1530 if (mp->mglist && 1531 (timer_pending(&mp->timer) ? 1532 time_after(mp->timer.expires, time) : 1533 try_to_del_timer_sync(&mp->timer) >= 0)) { 1534 mod_timer(&mp->timer, time); 1535 } 1536 1537 goto out; 1538 } 1539 1540 for (p = mlock_dereference(mp->ports, br); 1541 p != NULL; 1542 p = mlock_dereference(p->next, br)) { 1543 if (p->port != port) 1544 continue; 1545 1546 if (!hlist_unhashed(&p->mglist) && 1547 (timer_pending(&p->timer) ? 1548 time_after(p->timer.expires, time) : 1549 try_to_del_timer_sync(&p->timer) >= 0)) { 1550 mod_timer(&p->timer, time); 1551 } 1552 1553 break; 1554 } 1555 out: 1556 spin_unlock(&br->multicast_lock); 1557 } 1558 1559 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1560 struct net_bridge_port *port, 1561 __be32 group, 1562 __u16 vid) 1563 { 1564 struct br_ip br_group; 1565 struct bridge_mcast_own_query *own_query; 1566 1567 if (ipv4_is_local_multicast(group)) 1568 return; 1569 1570 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1571 1572 br_group.u.ip4 = group; 1573 br_group.proto = htons(ETH_P_IP); 1574 br_group.vid = vid; 1575 1576 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1577 own_query); 1578 } 1579 1580 #if IS_ENABLED(CONFIG_IPV6) 1581 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1582 struct net_bridge_port *port, 1583 const struct in6_addr *group, 1584 __u16 vid) 1585 { 1586 struct br_ip br_group; 1587 struct bridge_mcast_own_query *own_query; 1588 1589 if (ipv6_addr_is_ll_all_nodes(group)) 1590 return; 1591 1592 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1593 1594 br_group.u.ip6 = *group; 1595 br_group.proto = htons(ETH_P_IPV6); 1596 br_group.vid = vid; 1597 1598 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1599 own_query); 1600 } 1601 #endif 1602 1603 static void br_multicast_err_count(const struct net_bridge *br, 1604 const struct net_bridge_port *p, 1605 __be16 proto) 1606 { 1607 struct bridge_mcast_stats __percpu *stats; 1608 struct bridge_mcast_stats *pstats; 1609 1610 if (!br->multicast_stats_enabled) 1611 return; 1612 1613 if (p) 1614 stats = p->mcast_stats; 1615 else 1616 stats = br->mcast_stats; 1617 if (WARN_ON(!stats)) 1618 return; 1619 1620 pstats = this_cpu_ptr(stats); 1621 1622 u64_stats_update_begin(&pstats->syncp); 1623 switch (proto) { 1624 case htons(ETH_P_IP): 1625 pstats->mstats.igmp_parse_errors++; 1626 break; 1627 #if IS_ENABLED(CONFIG_IPV6) 1628 case htons(ETH_P_IPV6): 1629 pstats->mstats.mld_parse_errors++; 1630 break; 1631 #endif 1632 } 1633 u64_stats_update_end(&pstats->syncp); 1634 } 1635 1636 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1637 struct net_bridge_port *port, 1638 struct sk_buff *skb, 1639 u16 vid) 1640 { 1641 struct sk_buff *skb_trimmed = NULL; 1642 struct igmphdr *ih; 1643 int err; 1644 1645 err = ip_mc_check_igmp(skb, &skb_trimmed); 1646 1647 if (err == -ENOMSG) { 1648 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) 1649 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1650 return 0; 1651 } else if (err < 0) { 1652 br_multicast_err_count(br, port, skb->protocol); 1653 return err; 1654 } 1655 1656 ih = igmp_hdr(skb); 1657 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 1658 1659 switch (ih->type) { 1660 case IGMP_HOST_MEMBERSHIP_REPORT: 1661 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1662 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1663 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1664 break; 1665 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1666 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1667 break; 1668 case IGMP_HOST_MEMBERSHIP_QUERY: 1669 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1670 break; 1671 case IGMP_HOST_LEAVE_MESSAGE: 1672 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1673 break; 1674 } 1675 1676 if (skb_trimmed && skb_trimmed != skb) 1677 kfree_skb(skb_trimmed); 1678 1679 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1680 BR_MCAST_DIR_RX); 1681 1682 return err; 1683 } 1684 1685 #if IS_ENABLED(CONFIG_IPV6) 1686 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1687 struct net_bridge_port *port, 1688 struct sk_buff *skb, 1689 u16 vid) 1690 { 1691 struct sk_buff *skb_trimmed = NULL; 1692 struct mld_msg *mld; 1693 int err; 1694 1695 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1696 1697 if (err == -ENOMSG) { 1698 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1699 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1700 return 0; 1701 } else if (err < 0) { 1702 br_multicast_err_count(br, port, skb->protocol); 1703 return err; 1704 } 1705 1706 mld = (struct mld_msg *)skb_transport_header(skb); 1707 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 1708 1709 switch (mld->mld_type) { 1710 case ICMPV6_MGM_REPORT: 1711 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1712 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1713 break; 1714 case ICMPV6_MLD2_REPORT: 1715 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1716 break; 1717 case ICMPV6_MGM_QUERY: 1718 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1719 break; 1720 case ICMPV6_MGM_REDUCTION: 1721 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1722 break; 1723 } 1724 1725 if (skb_trimmed && skb_trimmed != skb) 1726 kfree_skb(skb_trimmed); 1727 1728 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1729 BR_MCAST_DIR_RX); 1730 1731 return err; 1732 } 1733 #endif 1734 1735 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1736 struct sk_buff *skb, u16 vid) 1737 { 1738 int ret = 0; 1739 1740 BR_INPUT_SKB_CB(skb)->igmp = 0; 1741 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1742 1743 if (br->multicast_disabled) 1744 return 0; 1745 1746 switch (skb->protocol) { 1747 case htons(ETH_P_IP): 1748 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 1749 break; 1750 #if IS_ENABLED(CONFIG_IPV6) 1751 case htons(ETH_P_IPV6): 1752 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 1753 break; 1754 #endif 1755 } 1756 1757 return ret; 1758 } 1759 1760 static void br_multicast_query_expired(struct net_bridge *br, 1761 struct bridge_mcast_own_query *query, 1762 struct bridge_mcast_querier *querier) 1763 { 1764 spin_lock(&br->multicast_lock); 1765 if (query->startup_sent < br->multicast_startup_query_count) 1766 query->startup_sent++; 1767 1768 RCU_INIT_POINTER(querier->port, NULL); 1769 br_multicast_send_query(br, NULL, query); 1770 spin_unlock(&br->multicast_lock); 1771 } 1772 1773 static void br_ip4_multicast_query_expired(unsigned long data) 1774 { 1775 struct net_bridge *br = (void *)data; 1776 1777 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1778 } 1779 1780 #if IS_ENABLED(CONFIG_IPV6) 1781 static void br_ip6_multicast_query_expired(unsigned long data) 1782 { 1783 struct net_bridge *br = (void *)data; 1784 1785 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1786 } 1787 #endif 1788 1789 void br_multicast_init(struct net_bridge *br) 1790 { 1791 br->hash_elasticity = 4; 1792 br->hash_max = 512; 1793 1794 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1795 br->multicast_querier = 0; 1796 br->multicast_query_use_ifaddr = 0; 1797 br->multicast_last_member_count = 2; 1798 br->multicast_startup_query_count = 2; 1799 1800 br->multicast_last_member_interval = HZ; 1801 br->multicast_query_response_interval = 10 * HZ; 1802 br->multicast_startup_query_interval = 125 * HZ / 4; 1803 br->multicast_query_interval = 125 * HZ; 1804 br->multicast_querier_interval = 255 * HZ; 1805 br->multicast_membership_interval = 260 * HZ; 1806 1807 br->ip4_other_query.delay_time = 0; 1808 br->ip4_querier.port = NULL; 1809 #if IS_ENABLED(CONFIG_IPV6) 1810 br->ip6_other_query.delay_time = 0; 1811 br->ip6_querier.port = NULL; 1812 #endif 1813 br->has_ipv6_addr = 1; 1814 1815 spin_lock_init(&br->multicast_lock); 1816 setup_timer(&br->multicast_router_timer, 1817 br_multicast_local_router_expired, 0); 1818 setup_timer(&br->ip4_other_query.timer, 1819 br_ip4_multicast_querier_expired, (unsigned long)br); 1820 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1821 (unsigned long)br); 1822 #if IS_ENABLED(CONFIG_IPV6) 1823 setup_timer(&br->ip6_other_query.timer, 1824 br_ip6_multicast_querier_expired, (unsigned long)br); 1825 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1826 (unsigned long)br); 1827 #endif 1828 } 1829 1830 static void __br_multicast_open(struct net_bridge *br, 1831 struct bridge_mcast_own_query *query) 1832 { 1833 query->startup_sent = 0; 1834 1835 if (br->multicast_disabled) 1836 return; 1837 1838 mod_timer(&query->timer, jiffies); 1839 } 1840 1841 void br_multicast_open(struct net_bridge *br) 1842 { 1843 __br_multicast_open(br, &br->ip4_own_query); 1844 #if IS_ENABLED(CONFIG_IPV6) 1845 __br_multicast_open(br, &br->ip6_own_query); 1846 #endif 1847 } 1848 1849 void br_multicast_stop(struct net_bridge *br) 1850 { 1851 del_timer_sync(&br->multicast_router_timer); 1852 del_timer_sync(&br->ip4_other_query.timer); 1853 del_timer_sync(&br->ip4_own_query.timer); 1854 #if IS_ENABLED(CONFIG_IPV6) 1855 del_timer_sync(&br->ip6_other_query.timer); 1856 del_timer_sync(&br->ip6_own_query.timer); 1857 #endif 1858 } 1859 1860 void br_multicast_dev_del(struct net_bridge *br) 1861 { 1862 struct net_bridge_mdb_htable *mdb; 1863 struct net_bridge_mdb_entry *mp; 1864 struct hlist_node *n; 1865 u32 ver; 1866 int i; 1867 1868 spin_lock_bh(&br->multicast_lock); 1869 mdb = mlock_dereference(br->mdb, br); 1870 if (!mdb) 1871 goto out; 1872 1873 br->mdb = NULL; 1874 1875 ver = mdb->ver; 1876 for (i = 0; i < mdb->max; i++) { 1877 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1878 hlist[ver]) { 1879 del_timer(&mp->timer); 1880 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1881 } 1882 } 1883 1884 if (mdb->old) { 1885 spin_unlock_bh(&br->multicast_lock); 1886 rcu_barrier_bh(); 1887 spin_lock_bh(&br->multicast_lock); 1888 WARN_ON(mdb->old); 1889 } 1890 1891 mdb->old = mdb; 1892 call_rcu_bh(&mdb->rcu, br_mdb_free); 1893 1894 out: 1895 spin_unlock_bh(&br->multicast_lock); 1896 1897 free_percpu(br->mcast_stats); 1898 } 1899 1900 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1901 { 1902 int err = -EINVAL; 1903 1904 spin_lock_bh(&br->multicast_lock); 1905 1906 switch (val) { 1907 case MDB_RTR_TYPE_DISABLED: 1908 case MDB_RTR_TYPE_PERM: 1909 del_timer(&br->multicast_router_timer); 1910 /* fall through */ 1911 case MDB_RTR_TYPE_TEMP_QUERY: 1912 br->multicast_router = val; 1913 err = 0; 1914 break; 1915 } 1916 1917 spin_unlock_bh(&br->multicast_lock); 1918 1919 return err; 1920 } 1921 1922 static void __del_port_router(struct net_bridge_port *p) 1923 { 1924 if (hlist_unhashed(&p->rlist)) 1925 return; 1926 hlist_del_init_rcu(&p->rlist); 1927 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 1928 } 1929 1930 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1931 { 1932 struct net_bridge *br = p->br; 1933 unsigned long now = jiffies; 1934 int err = -EINVAL; 1935 1936 spin_lock(&br->multicast_lock); 1937 if (p->multicast_router == val) { 1938 /* Refresh the temp router port timer */ 1939 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 1940 mod_timer(&p->multicast_router_timer, 1941 now + br->multicast_querier_interval); 1942 err = 0; 1943 goto unlock; 1944 } 1945 switch (val) { 1946 case MDB_RTR_TYPE_DISABLED: 1947 p->multicast_router = MDB_RTR_TYPE_DISABLED; 1948 __del_port_router(p); 1949 del_timer(&p->multicast_router_timer); 1950 break; 1951 case MDB_RTR_TYPE_TEMP_QUERY: 1952 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1953 __del_port_router(p); 1954 break; 1955 case MDB_RTR_TYPE_PERM: 1956 p->multicast_router = MDB_RTR_TYPE_PERM; 1957 del_timer(&p->multicast_router_timer); 1958 br_multicast_add_router(br, p); 1959 break; 1960 case MDB_RTR_TYPE_TEMP: 1961 p->multicast_router = MDB_RTR_TYPE_TEMP; 1962 br_multicast_mark_router(br, p); 1963 break; 1964 default: 1965 goto unlock; 1966 } 1967 err = 0; 1968 unlock: 1969 spin_unlock(&br->multicast_lock); 1970 1971 return err; 1972 } 1973 1974 static void br_multicast_start_querier(struct net_bridge *br, 1975 struct bridge_mcast_own_query *query) 1976 { 1977 struct net_bridge_port *port; 1978 1979 __br_multicast_open(br, query); 1980 1981 list_for_each_entry(port, &br->port_list, list) { 1982 if (port->state == BR_STATE_DISABLED || 1983 port->state == BR_STATE_BLOCKING) 1984 continue; 1985 1986 if (query == &br->ip4_own_query) 1987 br_multicast_enable(&port->ip4_own_query); 1988 #if IS_ENABLED(CONFIG_IPV6) 1989 else 1990 br_multicast_enable(&port->ip6_own_query); 1991 #endif 1992 } 1993 } 1994 1995 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1996 { 1997 int err = 0; 1998 struct net_bridge_mdb_htable *mdb; 1999 2000 spin_lock_bh(&br->multicast_lock); 2001 if (br->multicast_disabled == !val) 2002 goto unlock; 2003 2004 br->multicast_disabled = !val; 2005 if (br->multicast_disabled) 2006 goto unlock; 2007 2008 if (!netif_running(br->dev)) 2009 goto unlock; 2010 2011 mdb = mlock_dereference(br->mdb, br); 2012 if (mdb) { 2013 if (mdb->old) { 2014 err = -EEXIST; 2015 rollback: 2016 br->multicast_disabled = !!val; 2017 goto unlock; 2018 } 2019 2020 err = br_mdb_rehash(&br->mdb, mdb->max, 2021 br->hash_elasticity); 2022 if (err) 2023 goto rollback; 2024 } 2025 2026 br_multicast_start_querier(br, &br->ip4_own_query); 2027 #if IS_ENABLED(CONFIG_IPV6) 2028 br_multicast_start_querier(br, &br->ip6_own_query); 2029 #endif 2030 2031 unlock: 2032 spin_unlock_bh(&br->multicast_lock); 2033 2034 return err; 2035 } 2036 2037 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2038 { 2039 unsigned long max_delay; 2040 2041 val = !!val; 2042 2043 spin_lock_bh(&br->multicast_lock); 2044 if (br->multicast_querier == val) 2045 goto unlock; 2046 2047 br->multicast_querier = val; 2048 if (!val) 2049 goto unlock; 2050 2051 max_delay = br->multicast_query_response_interval; 2052 2053 if (!timer_pending(&br->ip4_other_query.timer)) 2054 br->ip4_other_query.delay_time = jiffies + max_delay; 2055 2056 br_multicast_start_querier(br, &br->ip4_own_query); 2057 2058 #if IS_ENABLED(CONFIG_IPV6) 2059 if (!timer_pending(&br->ip6_other_query.timer)) 2060 br->ip6_other_query.delay_time = jiffies + max_delay; 2061 2062 br_multicast_start_querier(br, &br->ip6_own_query); 2063 #endif 2064 2065 unlock: 2066 spin_unlock_bh(&br->multicast_lock); 2067 2068 return 0; 2069 } 2070 2071 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2072 { 2073 int err = -EINVAL; 2074 u32 old; 2075 struct net_bridge_mdb_htable *mdb; 2076 2077 spin_lock_bh(&br->multicast_lock); 2078 if (!is_power_of_2(val)) 2079 goto unlock; 2080 2081 mdb = mlock_dereference(br->mdb, br); 2082 if (mdb && val < mdb->size) 2083 goto unlock; 2084 2085 err = 0; 2086 2087 old = br->hash_max; 2088 br->hash_max = val; 2089 2090 if (mdb) { 2091 if (mdb->old) { 2092 err = -EEXIST; 2093 rollback: 2094 br->hash_max = old; 2095 goto unlock; 2096 } 2097 2098 err = br_mdb_rehash(&br->mdb, br->hash_max, 2099 br->hash_elasticity); 2100 if (err) 2101 goto rollback; 2102 } 2103 2104 unlock: 2105 spin_unlock_bh(&br->multicast_lock); 2106 2107 return err; 2108 } 2109 2110 /** 2111 * br_multicast_list_adjacent - Returns snooped multicast addresses 2112 * @dev: The bridge port adjacent to which to retrieve addresses 2113 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2114 * 2115 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2116 * snooping feature on all bridge ports of dev's bridge device, excluding 2117 * the addresses from dev itself. 2118 * 2119 * Returns the number of items added to br_ip_list. 2120 * 2121 * Notes: 2122 * - br_ip_list needs to be initialized by caller 2123 * - br_ip_list might contain duplicates in the end 2124 * (needs to be taken care of by caller) 2125 * - br_ip_list needs to be freed by caller 2126 */ 2127 int br_multicast_list_adjacent(struct net_device *dev, 2128 struct list_head *br_ip_list) 2129 { 2130 struct net_bridge *br; 2131 struct net_bridge_port *port; 2132 struct net_bridge_port_group *group; 2133 struct br_ip_list *entry; 2134 int count = 0; 2135 2136 rcu_read_lock(); 2137 if (!br_ip_list || !br_port_exists(dev)) 2138 goto unlock; 2139 2140 port = br_port_get_rcu(dev); 2141 if (!port || !port->br) 2142 goto unlock; 2143 2144 br = port->br; 2145 2146 list_for_each_entry_rcu(port, &br->port_list, list) { 2147 if (!port->dev || port->dev == dev) 2148 continue; 2149 2150 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2151 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2152 if (!entry) 2153 goto unlock; 2154 2155 entry->addr = group->addr; 2156 list_add(&entry->list, br_ip_list); 2157 count++; 2158 } 2159 } 2160 2161 unlock: 2162 rcu_read_unlock(); 2163 return count; 2164 } 2165 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2166 2167 /** 2168 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2169 * @dev: The bridge port providing the bridge on which to check for a querier 2170 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2171 * 2172 * Checks whether the given interface has a bridge on top and if so returns 2173 * true if a valid querier exists anywhere on the bridged link layer. 2174 * Otherwise returns false. 2175 */ 2176 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2177 { 2178 struct net_bridge *br; 2179 struct net_bridge_port *port; 2180 struct ethhdr eth; 2181 bool ret = false; 2182 2183 rcu_read_lock(); 2184 if (!br_port_exists(dev)) 2185 goto unlock; 2186 2187 port = br_port_get_rcu(dev); 2188 if (!port || !port->br) 2189 goto unlock; 2190 2191 br = port->br; 2192 2193 memset(ð, 0, sizeof(eth)); 2194 eth.h_proto = htons(proto); 2195 2196 ret = br_multicast_querier_exists(br, ð); 2197 2198 unlock: 2199 rcu_read_unlock(); 2200 return ret; 2201 } 2202 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2203 2204 /** 2205 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2206 * @dev: The bridge port adjacent to which to check for a querier 2207 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2208 * 2209 * Checks whether the given interface has a bridge on top and if so returns 2210 * true if a selected querier is behind one of the other ports of this 2211 * bridge. Otherwise returns false. 2212 */ 2213 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2214 { 2215 struct net_bridge *br; 2216 struct net_bridge_port *port; 2217 bool ret = false; 2218 2219 rcu_read_lock(); 2220 if (!br_port_exists(dev)) 2221 goto unlock; 2222 2223 port = br_port_get_rcu(dev); 2224 if (!port || !port->br) 2225 goto unlock; 2226 2227 br = port->br; 2228 2229 switch (proto) { 2230 case ETH_P_IP: 2231 if (!timer_pending(&br->ip4_other_query.timer) || 2232 rcu_dereference(br->ip4_querier.port) == port) 2233 goto unlock; 2234 break; 2235 #if IS_ENABLED(CONFIG_IPV6) 2236 case ETH_P_IPV6: 2237 if (!timer_pending(&br->ip6_other_query.timer) || 2238 rcu_dereference(br->ip6_querier.port) == port) 2239 goto unlock; 2240 break; 2241 #endif 2242 default: 2243 goto unlock; 2244 } 2245 2246 ret = true; 2247 unlock: 2248 rcu_read_unlock(); 2249 return ret; 2250 } 2251 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2252 2253 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 2254 const struct sk_buff *skb, u8 type, u8 dir) 2255 { 2256 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 2257 __be16 proto = skb->protocol; 2258 unsigned int t_len; 2259 2260 u64_stats_update_begin(&pstats->syncp); 2261 switch (proto) { 2262 case htons(ETH_P_IP): 2263 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 2264 switch (type) { 2265 case IGMP_HOST_MEMBERSHIP_REPORT: 2266 pstats->mstats.igmp_v1reports[dir]++; 2267 break; 2268 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2269 pstats->mstats.igmp_v2reports[dir]++; 2270 break; 2271 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2272 pstats->mstats.igmp_v3reports[dir]++; 2273 break; 2274 case IGMP_HOST_MEMBERSHIP_QUERY: 2275 if (t_len != sizeof(struct igmphdr)) { 2276 pstats->mstats.igmp_v3queries[dir]++; 2277 } else { 2278 unsigned int offset = skb_transport_offset(skb); 2279 struct igmphdr *ih, _ihdr; 2280 2281 ih = skb_header_pointer(skb, offset, 2282 sizeof(_ihdr), &_ihdr); 2283 if (!ih) 2284 break; 2285 if (!ih->code) 2286 pstats->mstats.igmp_v1queries[dir]++; 2287 else 2288 pstats->mstats.igmp_v2queries[dir]++; 2289 } 2290 break; 2291 case IGMP_HOST_LEAVE_MESSAGE: 2292 pstats->mstats.igmp_leaves[dir]++; 2293 break; 2294 } 2295 break; 2296 #if IS_ENABLED(CONFIG_IPV6) 2297 case htons(ETH_P_IPV6): 2298 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 2299 sizeof(struct ipv6hdr); 2300 t_len -= skb_network_header_len(skb); 2301 switch (type) { 2302 case ICMPV6_MGM_REPORT: 2303 pstats->mstats.mld_v1reports[dir]++; 2304 break; 2305 case ICMPV6_MLD2_REPORT: 2306 pstats->mstats.mld_v2reports[dir]++; 2307 break; 2308 case ICMPV6_MGM_QUERY: 2309 if (t_len != sizeof(struct mld_msg)) 2310 pstats->mstats.mld_v2queries[dir]++; 2311 else 2312 pstats->mstats.mld_v1queries[dir]++; 2313 break; 2314 case ICMPV6_MGM_REDUCTION: 2315 pstats->mstats.mld_leaves[dir]++; 2316 break; 2317 } 2318 break; 2319 #endif /* CONFIG_IPV6 */ 2320 } 2321 u64_stats_update_end(&pstats->syncp); 2322 } 2323 2324 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 2325 const struct sk_buff *skb, u8 type, u8 dir) 2326 { 2327 struct bridge_mcast_stats __percpu *stats; 2328 2329 /* if multicast_disabled is true then igmp type can't be set */ 2330 if (!type || !br->multicast_stats_enabled) 2331 return; 2332 2333 if (p) 2334 stats = p->mcast_stats; 2335 else 2336 stats = br->mcast_stats; 2337 if (WARN_ON(!stats)) 2338 return; 2339 2340 br_mcast_stats_add(stats, skb, type, dir); 2341 } 2342 2343 int br_multicast_init_stats(struct net_bridge *br) 2344 { 2345 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2346 if (!br->mcast_stats) 2347 return -ENOMEM; 2348 2349 return 0; 2350 } 2351 2352 static void mcast_stats_add_dir(u64 *dst, u64 *src) 2353 { 2354 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2355 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 2356 } 2357 2358 void br_multicast_get_stats(const struct net_bridge *br, 2359 const struct net_bridge_port *p, 2360 struct br_mcast_stats *dest) 2361 { 2362 struct bridge_mcast_stats __percpu *stats; 2363 struct br_mcast_stats tdst; 2364 int i; 2365 2366 memset(dest, 0, sizeof(*dest)); 2367 if (p) 2368 stats = p->mcast_stats; 2369 else 2370 stats = br->mcast_stats; 2371 if (WARN_ON(!stats)) 2372 return; 2373 2374 memset(&tdst, 0, sizeof(tdst)); 2375 for_each_possible_cpu(i) { 2376 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 2377 struct br_mcast_stats temp; 2378 unsigned int start; 2379 2380 do { 2381 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2382 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 2383 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2384 2385 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 2386 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 2387 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 2388 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 2389 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 2390 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 2391 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 2392 tdst.igmp_parse_errors += temp.igmp_parse_errors; 2393 2394 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 2395 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 2396 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 2397 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 2398 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 2399 tdst.mld_parse_errors += temp.mld_parse_errors; 2400 } 2401 memcpy(dest, &tdst, sizeof(*dest)); 2402 } 2403