1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <net/ip.h> 29 #if IS_ENABLED(CONFIG_IPV6) 30 #include <net/ipv6.h> 31 #include <net/mld.h> 32 #include <net/ip6_checksum.h> 33 #include <net/addrconf.h> 34 #endif 35 36 #include "br_private.h" 37 38 static void br_multicast_start_querier(struct net_bridge *br, 39 struct bridge_mcast_own_query *query); 40 static void br_multicast_add_router(struct net_bridge *br, 41 struct net_bridge_port *port); 42 static void br_ip4_multicast_leave_group(struct net_bridge *br, 43 struct net_bridge_port *port, 44 __be32 group, 45 __u16 vid); 46 #if IS_ENABLED(CONFIG_IPV6) 47 static void br_ip6_multicast_leave_group(struct net_bridge *br, 48 struct net_bridge_port *port, 49 const struct in6_addr *group, 50 __u16 vid); 51 #endif 52 unsigned int br_mdb_rehash_seq; 53 54 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 55 { 56 if (a->proto != b->proto) 57 return 0; 58 if (a->vid != b->vid) 59 return 0; 60 switch (a->proto) { 61 case htons(ETH_P_IP): 62 return a->u.ip4 == b->u.ip4; 63 #if IS_ENABLED(CONFIG_IPV6) 64 case htons(ETH_P_IPV6): 65 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 66 #endif 67 } 68 return 0; 69 } 70 71 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 72 __u16 vid) 73 { 74 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 75 } 76 77 #if IS_ENABLED(CONFIG_IPV6) 78 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 79 const struct in6_addr *ip, 80 __u16 vid) 81 { 82 return jhash_2words(ipv6_addr_hash(ip), vid, 83 mdb->secret) & (mdb->max - 1); 84 } 85 #endif 86 87 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 88 struct br_ip *ip) 89 { 90 switch (ip->proto) { 91 case htons(ETH_P_IP): 92 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 93 #if IS_ENABLED(CONFIG_IPV6) 94 case htons(ETH_P_IPV6): 95 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 96 #endif 97 } 98 return 0; 99 } 100 101 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 102 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 103 { 104 struct net_bridge_mdb_entry *mp; 105 106 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 107 if (br_ip_equal(&mp->addr, dst)) 108 return mp; 109 } 110 111 return NULL; 112 } 113 114 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 115 struct br_ip *dst) 116 { 117 if (!mdb) 118 return NULL; 119 120 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 121 } 122 123 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 124 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 125 { 126 struct br_ip br_dst; 127 128 br_dst.u.ip4 = dst; 129 br_dst.proto = htons(ETH_P_IP); 130 br_dst.vid = vid; 131 132 return br_mdb_ip_get(mdb, &br_dst); 133 } 134 135 #if IS_ENABLED(CONFIG_IPV6) 136 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 137 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 138 __u16 vid) 139 { 140 struct br_ip br_dst; 141 142 br_dst.u.ip6 = *dst; 143 br_dst.proto = htons(ETH_P_IPV6); 144 br_dst.vid = vid; 145 146 return br_mdb_ip_get(mdb, &br_dst); 147 } 148 #endif 149 150 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 151 struct sk_buff *skb, u16 vid) 152 { 153 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 154 struct br_ip ip; 155 156 if (br->multicast_disabled) 157 return NULL; 158 159 if (BR_INPUT_SKB_CB(skb)->igmp) 160 return NULL; 161 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.u.ip4 = ip_hdr(skb)->daddr; 168 break; 169 #if IS_ENABLED(CONFIG_IPV6) 170 case htons(ETH_P_IPV6): 171 ip.u.ip6 = ipv6_hdr(skb)->daddr; 172 break; 173 #endif 174 default: 175 return NULL; 176 } 177 178 return br_mdb_ip_get(mdb, &ip); 179 } 180 181 static void br_mdb_free(struct rcu_head *head) 182 { 183 struct net_bridge_mdb_htable *mdb = 184 container_of(head, struct net_bridge_mdb_htable, rcu); 185 struct net_bridge_mdb_htable *old = mdb->old; 186 187 mdb->old = NULL; 188 kfree(old->mhash); 189 kfree(old); 190 } 191 192 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 193 struct net_bridge_mdb_htable *old, 194 int elasticity) 195 { 196 struct net_bridge_mdb_entry *mp; 197 int maxlen; 198 int len; 199 int i; 200 201 for (i = 0; i < old->max; i++) 202 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 203 hlist_add_head(&mp->hlist[new->ver], 204 &new->mhash[br_ip_hash(new, &mp->addr)]); 205 206 if (!elasticity) 207 return 0; 208 209 maxlen = 0; 210 for (i = 0; i < new->max; i++) { 211 len = 0; 212 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 213 len++; 214 if (len > maxlen) 215 maxlen = len; 216 } 217 218 return maxlen > elasticity ? -EINVAL : 0; 219 } 220 221 void br_multicast_free_pg(struct rcu_head *head) 222 { 223 struct net_bridge_port_group *p = 224 container_of(head, struct net_bridge_port_group, rcu); 225 226 kfree(p); 227 } 228 229 static void br_multicast_free_group(struct rcu_head *head) 230 { 231 struct net_bridge_mdb_entry *mp = 232 container_of(head, struct net_bridge_mdb_entry, rcu); 233 234 kfree(mp); 235 } 236 237 static void br_multicast_group_expired(unsigned long data) 238 { 239 struct net_bridge_mdb_entry *mp = (void *)data; 240 struct net_bridge *br = mp->br; 241 struct net_bridge_mdb_htable *mdb; 242 243 spin_lock(&br->multicast_lock); 244 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 245 goto out; 246 247 mp->mglist = false; 248 249 if (mp->ports) 250 goto out; 251 252 mdb = mlock_dereference(br->mdb, br); 253 254 hlist_del_rcu(&mp->hlist[mdb->ver]); 255 mdb->size--; 256 257 call_rcu_bh(&mp->rcu, br_multicast_free_group); 258 259 out: 260 spin_unlock(&br->multicast_lock); 261 } 262 263 static void br_multicast_del_pg(struct net_bridge *br, 264 struct net_bridge_port_group *pg) 265 { 266 struct net_bridge_mdb_htable *mdb; 267 struct net_bridge_mdb_entry *mp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_port_group __rcu **pp; 270 271 mdb = mlock_dereference(br->mdb, br); 272 273 mp = br_mdb_ip_get(mdb, &pg->addr); 274 if (WARN_ON(!mp)) 275 return; 276 277 for (pp = &mp->ports; 278 (p = mlock_dereference(*pp, br)) != NULL; 279 pp = &p->next) { 280 if (p != pg) 281 continue; 282 283 rcu_assign_pointer(*pp, p->next); 284 hlist_del_init(&p->mglist); 285 del_timer(&p->timer); 286 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 287 p->flags); 288 call_rcu_bh(&p->rcu, br_multicast_free_pg); 289 290 if (!mp->ports && !mp->mglist && 291 netif_running(br->dev)) 292 mod_timer(&mp->timer, jiffies); 293 294 return; 295 } 296 297 WARN_ON(1); 298 } 299 300 static void br_multicast_port_group_expired(unsigned long data) 301 { 302 struct net_bridge_port_group *pg = (void *)data; 303 struct net_bridge *br = pg->port->br; 304 305 spin_lock(&br->multicast_lock); 306 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 307 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 308 goto out; 309 310 br_multicast_del_pg(br, pg); 311 312 out: 313 spin_unlock(&br->multicast_lock); 314 } 315 316 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 317 int elasticity) 318 { 319 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 320 struct net_bridge_mdb_htable *mdb; 321 int err; 322 323 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 324 if (!mdb) 325 return -ENOMEM; 326 327 mdb->max = max; 328 mdb->old = old; 329 330 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 331 if (!mdb->mhash) { 332 kfree(mdb); 333 return -ENOMEM; 334 } 335 336 mdb->size = old ? old->size : 0; 337 mdb->ver = old ? old->ver ^ 1 : 0; 338 339 if (!old || elasticity) 340 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 341 else 342 mdb->secret = old->secret; 343 344 if (!old) 345 goto out; 346 347 err = br_mdb_copy(mdb, old, elasticity); 348 if (err) { 349 kfree(mdb->mhash); 350 kfree(mdb); 351 return err; 352 } 353 354 br_mdb_rehash_seq++; 355 call_rcu_bh(&mdb->rcu, br_mdb_free); 356 357 out: 358 rcu_assign_pointer(*mdbp, mdb); 359 360 return 0; 361 } 362 363 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 364 __be32 group, 365 u8 *igmp_type) 366 { 367 struct sk_buff *skb; 368 struct igmphdr *ih; 369 struct ethhdr *eth; 370 struct iphdr *iph; 371 372 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 373 sizeof(*ih) + 4); 374 if (!skb) 375 goto out; 376 377 skb->protocol = htons(ETH_P_IP); 378 379 skb_reset_mac_header(skb); 380 eth = eth_hdr(skb); 381 382 ether_addr_copy(eth->h_source, br->dev->dev_addr); 383 eth->h_dest[0] = 1; 384 eth->h_dest[1] = 0; 385 eth->h_dest[2] = 0x5e; 386 eth->h_dest[3] = 0; 387 eth->h_dest[4] = 0; 388 eth->h_dest[5] = 1; 389 eth->h_proto = htons(ETH_P_IP); 390 skb_put(skb, sizeof(*eth)); 391 392 skb_set_network_header(skb, skb->len); 393 iph = ip_hdr(skb); 394 395 iph->version = 4; 396 iph->ihl = 6; 397 iph->tos = 0xc0; 398 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 399 iph->id = 0; 400 iph->frag_off = htons(IP_DF); 401 iph->ttl = 1; 402 iph->protocol = IPPROTO_IGMP; 403 iph->saddr = br->multicast_query_use_ifaddr ? 404 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 405 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 406 ((u8 *)&iph[1])[0] = IPOPT_RA; 407 ((u8 *)&iph[1])[1] = 4; 408 ((u8 *)&iph[1])[2] = 0; 409 ((u8 *)&iph[1])[3] = 0; 410 ip_send_check(iph); 411 skb_put(skb, 24); 412 413 skb_set_transport_header(skb, skb->len); 414 ih = igmp_hdr(skb); 415 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 416 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 417 ih->code = (group ? br->multicast_last_member_interval : 418 br->multicast_query_response_interval) / 419 (HZ / IGMP_TIMER_SCALE); 420 ih->group = group; 421 ih->csum = 0; 422 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 423 skb_put(skb, sizeof(*ih)); 424 425 __skb_pull(skb, sizeof(*eth)); 426 427 out: 428 return skb; 429 } 430 431 #if IS_ENABLED(CONFIG_IPV6) 432 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 433 const struct in6_addr *grp, 434 u8 *igmp_type) 435 { 436 struct sk_buff *skb; 437 struct ipv6hdr *ip6h; 438 struct mld_msg *mldq; 439 struct ethhdr *eth; 440 u8 *hopopt; 441 unsigned long interval; 442 443 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 444 8 + sizeof(*mldq)); 445 if (!skb) 446 goto out; 447 448 skb->protocol = htons(ETH_P_IPV6); 449 450 /* Ethernet header */ 451 skb_reset_mac_header(skb); 452 eth = eth_hdr(skb); 453 454 ether_addr_copy(eth->h_source, br->dev->dev_addr); 455 eth->h_proto = htons(ETH_P_IPV6); 456 skb_put(skb, sizeof(*eth)); 457 458 /* IPv6 header + HbH option */ 459 skb_set_network_header(skb, skb->len); 460 ip6h = ipv6_hdr(skb); 461 462 *(__force __be32 *)ip6h = htonl(0x60000000); 463 ip6h->payload_len = htons(8 + sizeof(*mldq)); 464 ip6h->nexthdr = IPPROTO_HOPOPTS; 465 ip6h->hop_limit = 1; 466 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 467 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 468 &ip6h->saddr)) { 469 kfree_skb(skb); 470 br->has_ipv6_addr = 0; 471 return NULL; 472 } 473 474 br->has_ipv6_addr = 1; 475 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 476 477 hopopt = (u8 *)(ip6h + 1); 478 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 479 hopopt[1] = 0; /* length of HbH */ 480 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 481 hopopt[3] = 2; /* Length of RA Option */ 482 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 483 hopopt[5] = 0; 484 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 485 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 486 487 skb_put(skb, sizeof(*ip6h) + 8); 488 489 /* ICMPv6 */ 490 skb_set_transport_header(skb, skb->len); 491 mldq = (struct mld_msg *) icmp6_hdr(skb); 492 493 interval = ipv6_addr_any(grp) ? 494 br->multicast_query_response_interval : 495 br->multicast_last_member_interval; 496 497 *igmp_type = ICMPV6_MGM_QUERY; 498 mldq->mld_type = ICMPV6_MGM_QUERY; 499 mldq->mld_code = 0; 500 mldq->mld_cksum = 0; 501 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 502 mldq->mld_reserved = 0; 503 mldq->mld_mca = *grp; 504 505 /* checksum */ 506 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 507 sizeof(*mldq), IPPROTO_ICMPV6, 508 csum_partial(mldq, 509 sizeof(*mldq), 0)); 510 skb_put(skb, sizeof(*mldq)); 511 512 __skb_pull(skb, sizeof(*eth)); 513 514 out: 515 return skb; 516 } 517 #endif 518 519 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 520 struct br_ip *addr, 521 u8 *igmp_type) 522 { 523 switch (addr->proto) { 524 case htons(ETH_P_IP): 525 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type); 526 #if IS_ENABLED(CONFIG_IPV6) 527 case htons(ETH_P_IPV6): 528 return br_ip6_multicast_alloc_query(br, &addr->u.ip6, 529 igmp_type); 530 #endif 531 } 532 return NULL; 533 } 534 535 static struct net_bridge_mdb_entry *br_multicast_get_group( 536 struct net_bridge *br, struct net_bridge_port *port, 537 struct br_ip *group, int hash) 538 { 539 struct net_bridge_mdb_htable *mdb; 540 struct net_bridge_mdb_entry *mp; 541 unsigned int count = 0; 542 unsigned int max; 543 int elasticity; 544 int err; 545 546 mdb = rcu_dereference_protected(br->mdb, 1); 547 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 548 count++; 549 if (unlikely(br_ip_equal(group, &mp->addr))) 550 return mp; 551 } 552 553 elasticity = 0; 554 max = mdb->max; 555 556 if (unlikely(count > br->hash_elasticity && count)) { 557 if (net_ratelimit()) 558 br_info(br, "Multicast hash table " 559 "chain limit reached: %s\n", 560 port ? port->dev->name : br->dev->name); 561 562 elasticity = br->hash_elasticity; 563 } 564 565 if (mdb->size >= max) { 566 max *= 2; 567 if (unlikely(max > br->hash_max)) { 568 br_warn(br, "Multicast hash table maximum of %d " 569 "reached, disabling snooping: %s\n", 570 br->hash_max, 571 port ? port->dev->name : br->dev->name); 572 err = -E2BIG; 573 disable: 574 br->multicast_disabled = 1; 575 goto err; 576 } 577 } 578 579 if (max > mdb->max || elasticity) { 580 if (mdb->old) { 581 if (net_ratelimit()) 582 br_info(br, "Multicast hash table " 583 "on fire: %s\n", 584 port ? port->dev->name : br->dev->name); 585 err = -EEXIST; 586 goto err; 587 } 588 589 err = br_mdb_rehash(&br->mdb, max, elasticity); 590 if (err) { 591 br_warn(br, "Cannot rehash multicast " 592 "hash table, disabling snooping: %s, %d, %d\n", 593 port ? port->dev->name : br->dev->name, 594 mdb->size, err); 595 goto disable; 596 } 597 598 err = -EAGAIN; 599 goto err; 600 } 601 602 return NULL; 603 604 err: 605 mp = ERR_PTR(err); 606 return mp; 607 } 608 609 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 610 struct net_bridge_port *port, struct br_ip *group) 611 { 612 struct net_bridge_mdb_htable *mdb; 613 struct net_bridge_mdb_entry *mp; 614 int hash; 615 int err; 616 617 mdb = rcu_dereference_protected(br->mdb, 1); 618 if (!mdb) { 619 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 620 if (err) 621 return ERR_PTR(err); 622 goto rehash; 623 } 624 625 hash = br_ip_hash(mdb, group); 626 mp = br_multicast_get_group(br, port, group, hash); 627 switch (PTR_ERR(mp)) { 628 case 0: 629 break; 630 631 case -EAGAIN: 632 rehash: 633 mdb = rcu_dereference_protected(br->mdb, 1); 634 hash = br_ip_hash(mdb, group); 635 break; 636 637 default: 638 goto out; 639 } 640 641 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 642 if (unlikely(!mp)) 643 return ERR_PTR(-ENOMEM); 644 645 mp->br = br; 646 mp->addr = *group; 647 setup_timer(&mp->timer, br_multicast_group_expired, 648 (unsigned long)mp); 649 650 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 651 mdb->size++; 652 653 out: 654 return mp; 655 } 656 657 struct net_bridge_port_group *br_multicast_new_port_group( 658 struct net_bridge_port *port, 659 struct br_ip *group, 660 struct net_bridge_port_group __rcu *next, 661 unsigned char flags) 662 { 663 struct net_bridge_port_group *p; 664 665 p = kzalloc(sizeof(*p), GFP_ATOMIC); 666 if (unlikely(!p)) 667 return NULL; 668 669 p->addr = *group; 670 p->port = port; 671 p->flags = flags; 672 rcu_assign_pointer(p->next, next); 673 hlist_add_head(&p->mglist, &port->mglist); 674 setup_timer(&p->timer, br_multicast_port_group_expired, 675 (unsigned long)p); 676 return p; 677 } 678 679 static int br_multicast_add_group(struct net_bridge *br, 680 struct net_bridge_port *port, 681 struct br_ip *group) 682 { 683 struct net_bridge_mdb_entry *mp; 684 struct net_bridge_port_group *p; 685 struct net_bridge_port_group __rcu **pp; 686 unsigned long now = jiffies; 687 int err; 688 689 spin_lock(&br->multicast_lock); 690 if (!netif_running(br->dev) || 691 (port && port->state == BR_STATE_DISABLED)) 692 goto out; 693 694 mp = br_multicast_new_group(br, port, group); 695 err = PTR_ERR(mp); 696 if (IS_ERR(mp)) 697 goto err; 698 699 if (!port) { 700 mp->mglist = true; 701 mod_timer(&mp->timer, now + br->multicast_membership_interval); 702 goto out; 703 } 704 705 for (pp = &mp->ports; 706 (p = mlock_dereference(*pp, br)) != NULL; 707 pp = &p->next) { 708 if (p->port == port) 709 goto found; 710 if ((unsigned long)p->port < (unsigned long)port) 711 break; 712 } 713 714 p = br_multicast_new_port_group(port, group, *pp, 0); 715 if (unlikely(!p)) 716 goto err; 717 rcu_assign_pointer(*pp, p); 718 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 719 720 found: 721 mod_timer(&p->timer, now + br->multicast_membership_interval); 722 out: 723 err = 0; 724 725 err: 726 spin_unlock(&br->multicast_lock); 727 return err; 728 } 729 730 static int br_ip4_multicast_add_group(struct net_bridge *br, 731 struct net_bridge_port *port, 732 __be32 group, 733 __u16 vid) 734 { 735 struct br_ip br_group; 736 737 if (ipv4_is_local_multicast(group)) 738 return 0; 739 740 br_group.u.ip4 = group; 741 br_group.proto = htons(ETH_P_IP); 742 br_group.vid = vid; 743 744 return br_multicast_add_group(br, port, &br_group); 745 } 746 747 #if IS_ENABLED(CONFIG_IPV6) 748 static int br_ip6_multicast_add_group(struct net_bridge *br, 749 struct net_bridge_port *port, 750 const struct in6_addr *group, 751 __u16 vid) 752 { 753 struct br_ip br_group; 754 755 if (ipv6_addr_is_ll_all_nodes(group)) 756 return 0; 757 758 br_group.u.ip6 = *group; 759 br_group.proto = htons(ETH_P_IPV6); 760 br_group.vid = vid; 761 762 return br_multicast_add_group(br, port, &br_group); 763 } 764 #endif 765 766 static void br_multicast_router_expired(unsigned long data) 767 { 768 struct net_bridge_port *port = (void *)data; 769 struct net_bridge *br = port->br; 770 771 spin_lock(&br->multicast_lock); 772 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 773 port->multicast_router == MDB_RTR_TYPE_PERM || 774 timer_pending(&port->multicast_router_timer) || 775 hlist_unhashed(&port->rlist)) 776 goto out; 777 778 hlist_del_init_rcu(&port->rlist); 779 br_rtr_notify(br->dev, port, RTM_DELMDB); 780 /* Don't allow timer refresh if the router expired */ 781 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 782 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 783 784 out: 785 spin_unlock(&br->multicast_lock); 786 } 787 788 static void br_multicast_local_router_expired(unsigned long data) 789 { 790 } 791 792 static void br_multicast_querier_expired(struct net_bridge *br, 793 struct bridge_mcast_own_query *query) 794 { 795 spin_lock(&br->multicast_lock); 796 if (!netif_running(br->dev) || br->multicast_disabled) 797 goto out; 798 799 br_multicast_start_querier(br, query); 800 801 out: 802 spin_unlock(&br->multicast_lock); 803 } 804 805 static void br_ip4_multicast_querier_expired(unsigned long data) 806 { 807 struct net_bridge *br = (void *)data; 808 809 br_multicast_querier_expired(br, &br->ip4_own_query); 810 } 811 812 #if IS_ENABLED(CONFIG_IPV6) 813 static void br_ip6_multicast_querier_expired(unsigned long data) 814 { 815 struct net_bridge *br = (void *)data; 816 817 br_multicast_querier_expired(br, &br->ip6_own_query); 818 } 819 #endif 820 821 static void br_multicast_select_own_querier(struct net_bridge *br, 822 struct br_ip *ip, 823 struct sk_buff *skb) 824 { 825 if (ip->proto == htons(ETH_P_IP)) 826 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 827 #if IS_ENABLED(CONFIG_IPV6) 828 else 829 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 830 #endif 831 } 832 833 static void __br_multicast_send_query(struct net_bridge *br, 834 struct net_bridge_port *port, 835 struct br_ip *ip) 836 { 837 struct sk_buff *skb; 838 u8 igmp_type; 839 840 skb = br_multicast_alloc_query(br, ip, &igmp_type); 841 if (!skb) 842 return; 843 844 if (port) { 845 skb->dev = port->dev; 846 br_multicast_count(br, port, skb, igmp_type, 847 BR_MCAST_DIR_TX); 848 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 849 dev_net(port->dev), NULL, skb, NULL, skb->dev, 850 br_dev_queue_push_xmit); 851 } else { 852 br_multicast_select_own_querier(br, ip, skb); 853 br_multicast_count(br, port, skb, igmp_type, 854 BR_MCAST_DIR_RX); 855 netif_rx(skb); 856 } 857 } 858 859 static void br_multicast_send_query(struct net_bridge *br, 860 struct net_bridge_port *port, 861 struct bridge_mcast_own_query *own_query) 862 { 863 unsigned long time; 864 struct br_ip br_group; 865 struct bridge_mcast_other_query *other_query = NULL; 866 867 if (!netif_running(br->dev) || br->multicast_disabled || 868 !br->multicast_querier) 869 return; 870 871 memset(&br_group.u, 0, sizeof(br_group.u)); 872 873 if (port ? (own_query == &port->ip4_own_query) : 874 (own_query == &br->ip4_own_query)) { 875 other_query = &br->ip4_other_query; 876 br_group.proto = htons(ETH_P_IP); 877 #if IS_ENABLED(CONFIG_IPV6) 878 } else { 879 other_query = &br->ip6_other_query; 880 br_group.proto = htons(ETH_P_IPV6); 881 #endif 882 } 883 884 if (!other_query || timer_pending(&other_query->timer)) 885 return; 886 887 __br_multicast_send_query(br, port, &br_group); 888 889 time = jiffies; 890 time += own_query->startup_sent < br->multicast_startup_query_count ? 891 br->multicast_startup_query_interval : 892 br->multicast_query_interval; 893 mod_timer(&own_query->timer, time); 894 } 895 896 static void 897 br_multicast_port_query_expired(struct net_bridge_port *port, 898 struct bridge_mcast_own_query *query) 899 { 900 struct net_bridge *br = port->br; 901 902 spin_lock(&br->multicast_lock); 903 if (port->state == BR_STATE_DISABLED || 904 port->state == BR_STATE_BLOCKING) 905 goto out; 906 907 if (query->startup_sent < br->multicast_startup_query_count) 908 query->startup_sent++; 909 910 br_multicast_send_query(port->br, port, query); 911 912 out: 913 spin_unlock(&br->multicast_lock); 914 } 915 916 static void br_ip4_multicast_port_query_expired(unsigned long data) 917 { 918 struct net_bridge_port *port = (void *)data; 919 920 br_multicast_port_query_expired(port, &port->ip4_own_query); 921 } 922 923 #if IS_ENABLED(CONFIG_IPV6) 924 static void br_ip6_multicast_port_query_expired(unsigned long data) 925 { 926 struct net_bridge_port *port = (void *)data; 927 928 br_multicast_port_query_expired(port, &port->ip6_own_query); 929 } 930 #endif 931 932 int br_multicast_add_port(struct net_bridge_port *port) 933 { 934 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 935 936 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 937 (unsigned long)port); 938 setup_timer(&port->ip4_own_query.timer, 939 br_ip4_multicast_port_query_expired, (unsigned long)port); 940 #if IS_ENABLED(CONFIG_IPV6) 941 setup_timer(&port->ip6_own_query.timer, 942 br_ip6_multicast_port_query_expired, (unsigned long)port); 943 #endif 944 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 945 if (!port->mcast_stats) 946 return -ENOMEM; 947 948 return 0; 949 } 950 951 void br_multicast_del_port(struct net_bridge_port *port) 952 { 953 struct net_bridge *br = port->br; 954 struct net_bridge_port_group *pg; 955 struct hlist_node *n; 956 957 /* Take care of the remaining groups, only perm ones should be left */ 958 spin_lock_bh(&br->multicast_lock); 959 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 960 br_multicast_del_pg(br, pg); 961 spin_unlock_bh(&br->multicast_lock); 962 del_timer_sync(&port->multicast_router_timer); 963 free_percpu(port->mcast_stats); 964 } 965 966 static void br_multicast_enable(struct bridge_mcast_own_query *query) 967 { 968 query->startup_sent = 0; 969 970 if (try_to_del_timer_sync(&query->timer) >= 0 || 971 del_timer(&query->timer)) 972 mod_timer(&query->timer, jiffies); 973 } 974 975 static void __br_multicast_enable_port(struct net_bridge_port *port) 976 { 977 struct net_bridge *br = port->br; 978 979 if (br->multicast_disabled || !netif_running(br->dev)) 980 return; 981 982 br_multicast_enable(&port->ip4_own_query); 983 #if IS_ENABLED(CONFIG_IPV6) 984 br_multicast_enable(&port->ip6_own_query); 985 #endif 986 if (port->multicast_router == MDB_RTR_TYPE_PERM && 987 hlist_unhashed(&port->rlist)) 988 br_multicast_add_router(br, port); 989 } 990 991 void br_multicast_enable_port(struct net_bridge_port *port) 992 { 993 struct net_bridge *br = port->br; 994 995 spin_lock(&br->multicast_lock); 996 __br_multicast_enable_port(port); 997 spin_unlock(&br->multicast_lock); 998 } 999 1000 void br_multicast_disable_port(struct net_bridge_port *port) 1001 { 1002 struct net_bridge *br = port->br; 1003 struct net_bridge_port_group *pg; 1004 struct hlist_node *n; 1005 1006 spin_lock(&br->multicast_lock); 1007 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1008 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1009 br_multicast_del_pg(br, pg); 1010 1011 if (!hlist_unhashed(&port->rlist)) { 1012 hlist_del_init_rcu(&port->rlist); 1013 br_rtr_notify(br->dev, port, RTM_DELMDB); 1014 /* Don't allow timer refresh if disabling */ 1015 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 1016 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1017 } 1018 del_timer(&port->multicast_router_timer); 1019 del_timer(&port->ip4_own_query.timer); 1020 #if IS_ENABLED(CONFIG_IPV6) 1021 del_timer(&port->ip6_own_query.timer); 1022 #endif 1023 spin_unlock(&br->multicast_lock); 1024 } 1025 1026 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1027 struct net_bridge_port *port, 1028 struct sk_buff *skb, 1029 u16 vid) 1030 { 1031 struct igmpv3_report *ih; 1032 struct igmpv3_grec *grec; 1033 int i; 1034 int len; 1035 int num; 1036 int type; 1037 int err = 0; 1038 __be32 group; 1039 1040 ih = igmpv3_report_hdr(skb); 1041 num = ntohs(ih->ngrec); 1042 len = skb_transport_offset(skb) + sizeof(*ih); 1043 1044 for (i = 0; i < num; i++) { 1045 len += sizeof(*grec); 1046 if (!pskb_may_pull(skb, len)) 1047 return -EINVAL; 1048 1049 grec = (void *)(skb->data + len - sizeof(*grec)); 1050 group = grec->grec_mca; 1051 type = grec->grec_type; 1052 1053 len += ntohs(grec->grec_nsrcs) * 4; 1054 if (!pskb_may_pull(skb, len)) 1055 return -EINVAL; 1056 1057 /* We treat this as an IGMPv2 report for now. */ 1058 switch (type) { 1059 case IGMPV3_MODE_IS_INCLUDE: 1060 case IGMPV3_MODE_IS_EXCLUDE: 1061 case IGMPV3_CHANGE_TO_INCLUDE: 1062 case IGMPV3_CHANGE_TO_EXCLUDE: 1063 case IGMPV3_ALLOW_NEW_SOURCES: 1064 case IGMPV3_BLOCK_OLD_SOURCES: 1065 break; 1066 1067 default: 1068 continue; 1069 } 1070 1071 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1072 type == IGMPV3_MODE_IS_INCLUDE) && 1073 ntohs(grec->grec_nsrcs) == 0) { 1074 br_ip4_multicast_leave_group(br, port, group, vid); 1075 } else { 1076 err = br_ip4_multicast_add_group(br, port, group, vid); 1077 if (err) 1078 break; 1079 } 1080 } 1081 1082 return err; 1083 } 1084 1085 #if IS_ENABLED(CONFIG_IPV6) 1086 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1087 struct net_bridge_port *port, 1088 struct sk_buff *skb, 1089 u16 vid) 1090 { 1091 struct icmp6hdr *icmp6h; 1092 struct mld2_grec *grec; 1093 int i; 1094 int len; 1095 int num; 1096 int err = 0; 1097 1098 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1099 return -EINVAL; 1100 1101 icmp6h = icmp6_hdr(skb); 1102 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1103 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1104 1105 for (i = 0; i < num; i++) { 1106 __be16 *nsrcs, _nsrcs; 1107 1108 nsrcs = skb_header_pointer(skb, 1109 len + offsetof(struct mld2_grec, 1110 grec_nsrcs), 1111 sizeof(_nsrcs), &_nsrcs); 1112 if (!nsrcs) 1113 return -EINVAL; 1114 1115 if (!pskb_may_pull(skb, 1116 len + sizeof(*grec) + 1117 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1118 return -EINVAL; 1119 1120 grec = (struct mld2_grec *)(skb->data + len); 1121 len += sizeof(*grec) + 1122 sizeof(struct in6_addr) * ntohs(*nsrcs); 1123 1124 /* We treat these as MLDv1 reports for now. */ 1125 switch (grec->grec_type) { 1126 case MLD2_MODE_IS_INCLUDE: 1127 case MLD2_MODE_IS_EXCLUDE: 1128 case MLD2_CHANGE_TO_INCLUDE: 1129 case MLD2_CHANGE_TO_EXCLUDE: 1130 case MLD2_ALLOW_NEW_SOURCES: 1131 case MLD2_BLOCK_OLD_SOURCES: 1132 break; 1133 1134 default: 1135 continue; 1136 } 1137 1138 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1139 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1140 ntohs(*nsrcs) == 0) { 1141 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1142 vid); 1143 } else { 1144 err = br_ip6_multicast_add_group(br, port, 1145 &grec->grec_mca, vid); 1146 if (err) 1147 break; 1148 } 1149 } 1150 1151 return err; 1152 } 1153 #endif 1154 1155 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1156 struct net_bridge_port *port, 1157 __be32 saddr) 1158 { 1159 if (!timer_pending(&br->ip4_own_query.timer) && 1160 !timer_pending(&br->ip4_other_query.timer)) 1161 goto update; 1162 1163 if (!br->ip4_querier.addr.u.ip4) 1164 goto update; 1165 1166 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1167 goto update; 1168 1169 return false; 1170 1171 update: 1172 br->ip4_querier.addr.u.ip4 = saddr; 1173 1174 /* update protected by general multicast_lock by caller */ 1175 rcu_assign_pointer(br->ip4_querier.port, port); 1176 1177 return true; 1178 } 1179 1180 #if IS_ENABLED(CONFIG_IPV6) 1181 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1182 struct net_bridge_port *port, 1183 struct in6_addr *saddr) 1184 { 1185 if (!timer_pending(&br->ip6_own_query.timer) && 1186 !timer_pending(&br->ip6_other_query.timer)) 1187 goto update; 1188 1189 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1190 goto update; 1191 1192 return false; 1193 1194 update: 1195 br->ip6_querier.addr.u.ip6 = *saddr; 1196 1197 /* update protected by general multicast_lock by caller */ 1198 rcu_assign_pointer(br->ip6_querier.port, port); 1199 1200 return true; 1201 } 1202 #endif 1203 1204 static bool br_multicast_select_querier(struct net_bridge *br, 1205 struct net_bridge_port *port, 1206 struct br_ip *saddr) 1207 { 1208 switch (saddr->proto) { 1209 case htons(ETH_P_IP): 1210 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1211 #if IS_ENABLED(CONFIG_IPV6) 1212 case htons(ETH_P_IPV6): 1213 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1214 #endif 1215 } 1216 1217 return false; 1218 } 1219 1220 static void 1221 br_multicast_update_query_timer(struct net_bridge *br, 1222 struct bridge_mcast_other_query *query, 1223 unsigned long max_delay) 1224 { 1225 if (!timer_pending(&query->timer)) 1226 query->delay_time = jiffies + max_delay; 1227 1228 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1229 } 1230 1231 /* 1232 * Add port to router_list 1233 * list is maintained ordered by pointer value 1234 * and locked by br->multicast_lock and RCU 1235 */ 1236 static void br_multicast_add_router(struct net_bridge *br, 1237 struct net_bridge_port *port) 1238 { 1239 struct net_bridge_port *p; 1240 struct hlist_node *slot = NULL; 1241 1242 if (!hlist_unhashed(&port->rlist)) 1243 return; 1244 1245 hlist_for_each_entry(p, &br->router_list, rlist) { 1246 if ((unsigned long) port >= (unsigned long) p) 1247 break; 1248 slot = &p->rlist; 1249 } 1250 1251 if (slot) 1252 hlist_add_behind_rcu(&port->rlist, slot); 1253 else 1254 hlist_add_head_rcu(&port->rlist, &br->router_list); 1255 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1256 } 1257 1258 static void br_multicast_mark_router(struct net_bridge *br, 1259 struct net_bridge_port *port) 1260 { 1261 unsigned long now = jiffies; 1262 1263 if (!port) { 1264 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) 1265 mod_timer(&br->multicast_router_timer, 1266 now + br->multicast_querier_interval); 1267 return; 1268 } 1269 1270 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1271 port->multicast_router == MDB_RTR_TYPE_PERM) 1272 return; 1273 1274 br_multicast_add_router(br, port); 1275 1276 mod_timer(&port->multicast_router_timer, 1277 now + br->multicast_querier_interval); 1278 } 1279 1280 static void br_multicast_query_received(struct net_bridge *br, 1281 struct net_bridge_port *port, 1282 struct bridge_mcast_other_query *query, 1283 struct br_ip *saddr, 1284 unsigned long max_delay) 1285 { 1286 if (!br_multicast_select_querier(br, port, saddr)) 1287 return; 1288 1289 br_multicast_update_query_timer(br, query, max_delay); 1290 br_multicast_mark_router(br, port); 1291 } 1292 1293 static int br_ip4_multicast_query(struct net_bridge *br, 1294 struct net_bridge_port *port, 1295 struct sk_buff *skb, 1296 u16 vid) 1297 { 1298 const struct iphdr *iph = ip_hdr(skb); 1299 struct igmphdr *ih = igmp_hdr(skb); 1300 struct net_bridge_mdb_entry *mp; 1301 struct igmpv3_query *ih3; 1302 struct net_bridge_port_group *p; 1303 struct net_bridge_port_group __rcu **pp; 1304 struct br_ip saddr; 1305 unsigned long max_delay; 1306 unsigned long now = jiffies; 1307 unsigned int offset = skb_transport_offset(skb); 1308 __be32 group; 1309 int err = 0; 1310 1311 spin_lock(&br->multicast_lock); 1312 if (!netif_running(br->dev) || 1313 (port && port->state == BR_STATE_DISABLED)) 1314 goto out; 1315 1316 group = ih->group; 1317 1318 if (skb->len == offset + sizeof(*ih)) { 1319 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1320 1321 if (!max_delay) { 1322 max_delay = 10 * HZ; 1323 group = 0; 1324 } 1325 } else if (skb->len >= offset + sizeof(*ih3)) { 1326 ih3 = igmpv3_query_hdr(skb); 1327 if (ih3->nsrcs) 1328 goto out; 1329 1330 max_delay = ih3->code ? 1331 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1332 } else { 1333 goto out; 1334 } 1335 1336 if (!group) { 1337 saddr.proto = htons(ETH_P_IP); 1338 saddr.u.ip4 = iph->saddr; 1339 1340 br_multicast_query_received(br, port, &br->ip4_other_query, 1341 &saddr, max_delay); 1342 goto out; 1343 } 1344 1345 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1346 if (!mp) 1347 goto out; 1348 1349 max_delay *= br->multicast_last_member_count; 1350 1351 if (mp->mglist && 1352 (timer_pending(&mp->timer) ? 1353 time_after(mp->timer.expires, now + max_delay) : 1354 try_to_del_timer_sync(&mp->timer) >= 0)) 1355 mod_timer(&mp->timer, now + max_delay); 1356 1357 for (pp = &mp->ports; 1358 (p = mlock_dereference(*pp, br)) != NULL; 1359 pp = &p->next) { 1360 if (timer_pending(&p->timer) ? 1361 time_after(p->timer.expires, now + max_delay) : 1362 try_to_del_timer_sync(&p->timer) >= 0) 1363 mod_timer(&p->timer, now + max_delay); 1364 } 1365 1366 out: 1367 spin_unlock(&br->multicast_lock); 1368 return err; 1369 } 1370 1371 #if IS_ENABLED(CONFIG_IPV6) 1372 static int br_ip6_multicast_query(struct net_bridge *br, 1373 struct net_bridge_port *port, 1374 struct sk_buff *skb, 1375 u16 vid) 1376 { 1377 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1378 struct mld_msg *mld; 1379 struct net_bridge_mdb_entry *mp; 1380 struct mld2_query *mld2q; 1381 struct net_bridge_port_group *p; 1382 struct net_bridge_port_group __rcu **pp; 1383 struct br_ip saddr; 1384 unsigned long max_delay; 1385 unsigned long now = jiffies; 1386 unsigned int offset = skb_transport_offset(skb); 1387 const struct in6_addr *group = NULL; 1388 bool is_general_query; 1389 int err = 0; 1390 1391 spin_lock(&br->multicast_lock); 1392 if (!netif_running(br->dev) || 1393 (port && port->state == BR_STATE_DISABLED)) 1394 goto out; 1395 1396 if (skb->len == offset + sizeof(*mld)) { 1397 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1398 err = -EINVAL; 1399 goto out; 1400 } 1401 mld = (struct mld_msg *) icmp6_hdr(skb); 1402 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1403 if (max_delay) 1404 group = &mld->mld_mca; 1405 } else { 1406 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1407 err = -EINVAL; 1408 goto out; 1409 } 1410 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1411 if (!mld2q->mld2q_nsrcs) 1412 group = &mld2q->mld2q_mca; 1413 1414 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1415 } 1416 1417 is_general_query = group && ipv6_addr_any(group); 1418 1419 if (is_general_query) { 1420 saddr.proto = htons(ETH_P_IPV6); 1421 saddr.u.ip6 = ip6h->saddr; 1422 1423 br_multicast_query_received(br, port, &br->ip6_other_query, 1424 &saddr, max_delay); 1425 goto out; 1426 } else if (!group) { 1427 goto out; 1428 } 1429 1430 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1431 if (!mp) 1432 goto out; 1433 1434 max_delay *= br->multicast_last_member_count; 1435 if (mp->mglist && 1436 (timer_pending(&mp->timer) ? 1437 time_after(mp->timer.expires, now + max_delay) : 1438 try_to_del_timer_sync(&mp->timer) >= 0)) 1439 mod_timer(&mp->timer, now + max_delay); 1440 1441 for (pp = &mp->ports; 1442 (p = mlock_dereference(*pp, br)) != NULL; 1443 pp = &p->next) { 1444 if (timer_pending(&p->timer) ? 1445 time_after(p->timer.expires, now + max_delay) : 1446 try_to_del_timer_sync(&p->timer) >= 0) 1447 mod_timer(&p->timer, now + max_delay); 1448 } 1449 1450 out: 1451 spin_unlock(&br->multicast_lock); 1452 return err; 1453 } 1454 #endif 1455 1456 static void 1457 br_multicast_leave_group(struct net_bridge *br, 1458 struct net_bridge_port *port, 1459 struct br_ip *group, 1460 struct bridge_mcast_other_query *other_query, 1461 struct bridge_mcast_own_query *own_query) 1462 { 1463 struct net_bridge_mdb_htable *mdb; 1464 struct net_bridge_mdb_entry *mp; 1465 struct net_bridge_port_group *p; 1466 unsigned long now; 1467 unsigned long time; 1468 1469 spin_lock(&br->multicast_lock); 1470 if (!netif_running(br->dev) || 1471 (port && port->state == BR_STATE_DISABLED)) 1472 goto out; 1473 1474 mdb = mlock_dereference(br->mdb, br); 1475 mp = br_mdb_ip_get(mdb, group); 1476 if (!mp) 1477 goto out; 1478 1479 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1480 struct net_bridge_port_group __rcu **pp; 1481 1482 for (pp = &mp->ports; 1483 (p = mlock_dereference(*pp, br)) != NULL; 1484 pp = &p->next) { 1485 if (p->port != port) 1486 continue; 1487 1488 rcu_assign_pointer(*pp, p->next); 1489 hlist_del_init(&p->mglist); 1490 del_timer(&p->timer); 1491 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1492 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1493 p->flags); 1494 1495 if (!mp->ports && !mp->mglist && 1496 netif_running(br->dev)) 1497 mod_timer(&mp->timer, jiffies); 1498 } 1499 goto out; 1500 } 1501 1502 if (timer_pending(&other_query->timer)) 1503 goto out; 1504 1505 if (br->multicast_querier) { 1506 __br_multicast_send_query(br, port, &mp->addr); 1507 1508 time = jiffies + br->multicast_last_member_count * 1509 br->multicast_last_member_interval; 1510 1511 mod_timer(&own_query->timer, time); 1512 1513 for (p = mlock_dereference(mp->ports, br); 1514 p != NULL; 1515 p = mlock_dereference(p->next, br)) { 1516 if (p->port != port) 1517 continue; 1518 1519 if (!hlist_unhashed(&p->mglist) && 1520 (timer_pending(&p->timer) ? 1521 time_after(p->timer.expires, time) : 1522 try_to_del_timer_sync(&p->timer) >= 0)) { 1523 mod_timer(&p->timer, time); 1524 } 1525 1526 break; 1527 } 1528 } 1529 1530 now = jiffies; 1531 time = now + br->multicast_last_member_count * 1532 br->multicast_last_member_interval; 1533 1534 if (!port) { 1535 if (mp->mglist && 1536 (timer_pending(&mp->timer) ? 1537 time_after(mp->timer.expires, time) : 1538 try_to_del_timer_sync(&mp->timer) >= 0)) { 1539 mod_timer(&mp->timer, time); 1540 } 1541 1542 goto out; 1543 } 1544 1545 for (p = mlock_dereference(mp->ports, br); 1546 p != NULL; 1547 p = mlock_dereference(p->next, br)) { 1548 if (p->port != port) 1549 continue; 1550 1551 if (!hlist_unhashed(&p->mglist) && 1552 (timer_pending(&p->timer) ? 1553 time_after(p->timer.expires, time) : 1554 try_to_del_timer_sync(&p->timer) >= 0)) { 1555 mod_timer(&p->timer, time); 1556 } 1557 1558 break; 1559 } 1560 out: 1561 spin_unlock(&br->multicast_lock); 1562 } 1563 1564 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1565 struct net_bridge_port *port, 1566 __be32 group, 1567 __u16 vid) 1568 { 1569 struct br_ip br_group; 1570 struct bridge_mcast_own_query *own_query; 1571 1572 if (ipv4_is_local_multicast(group)) 1573 return; 1574 1575 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1576 1577 br_group.u.ip4 = group; 1578 br_group.proto = htons(ETH_P_IP); 1579 br_group.vid = vid; 1580 1581 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1582 own_query); 1583 } 1584 1585 #if IS_ENABLED(CONFIG_IPV6) 1586 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1587 struct net_bridge_port *port, 1588 const struct in6_addr *group, 1589 __u16 vid) 1590 { 1591 struct br_ip br_group; 1592 struct bridge_mcast_own_query *own_query; 1593 1594 if (ipv6_addr_is_ll_all_nodes(group)) 1595 return; 1596 1597 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1598 1599 br_group.u.ip6 = *group; 1600 br_group.proto = htons(ETH_P_IPV6); 1601 br_group.vid = vid; 1602 1603 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1604 own_query); 1605 } 1606 #endif 1607 1608 static void br_multicast_err_count(const struct net_bridge *br, 1609 const struct net_bridge_port *p, 1610 __be16 proto) 1611 { 1612 struct bridge_mcast_stats __percpu *stats; 1613 struct bridge_mcast_stats *pstats; 1614 1615 if (!br->multicast_stats_enabled) 1616 return; 1617 1618 if (p) 1619 stats = p->mcast_stats; 1620 else 1621 stats = br->mcast_stats; 1622 if (WARN_ON(!stats)) 1623 return; 1624 1625 pstats = this_cpu_ptr(stats); 1626 1627 u64_stats_update_begin(&pstats->syncp); 1628 switch (proto) { 1629 case htons(ETH_P_IP): 1630 pstats->mstats.igmp_parse_errors++; 1631 break; 1632 #if IS_ENABLED(CONFIG_IPV6) 1633 case htons(ETH_P_IPV6): 1634 pstats->mstats.mld_parse_errors++; 1635 break; 1636 #endif 1637 } 1638 u64_stats_update_end(&pstats->syncp); 1639 } 1640 1641 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1642 struct net_bridge_port *port, 1643 struct sk_buff *skb, 1644 u16 vid) 1645 { 1646 struct sk_buff *skb_trimmed = NULL; 1647 struct igmphdr *ih; 1648 int err; 1649 1650 err = ip_mc_check_igmp(skb, &skb_trimmed); 1651 1652 if (err == -ENOMSG) { 1653 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) 1654 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1655 return 0; 1656 } else if (err < 0) { 1657 br_multicast_err_count(br, port, skb->protocol); 1658 return err; 1659 } 1660 1661 ih = igmp_hdr(skb); 1662 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 1663 1664 switch (ih->type) { 1665 case IGMP_HOST_MEMBERSHIP_REPORT: 1666 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1667 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1668 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1669 break; 1670 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1671 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1672 break; 1673 case IGMP_HOST_MEMBERSHIP_QUERY: 1674 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1675 break; 1676 case IGMP_HOST_LEAVE_MESSAGE: 1677 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1678 break; 1679 } 1680 1681 if (skb_trimmed && skb_trimmed != skb) 1682 kfree_skb(skb_trimmed); 1683 1684 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1685 BR_MCAST_DIR_RX); 1686 1687 return err; 1688 } 1689 1690 #if IS_ENABLED(CONFIG_IPV6) 1691 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1692 struct net_bridge_port *port, 1693 struct sk_buff *skb, 1694 u16 vid) 1695 { 1696 struct sk_buff *skb_trimmed = NULL; 1697 struct mld_msg *mld; 1698 int err; 1699 1700 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1701 1702 if (err == -ENOMSG) { 1703 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1704 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1705 return 0; 1706 } else if (err < 0) { 1707 br_multicast_err_count(br, port, skb->protocol); 1708 return err; 1709 } 1710 1711 mld = (struct mld_msg *)skb_transport_header(skb); 1712 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 1713 1714 switch (mld->mld_type) { 1715 case ICMPV6_MGM_REPORT: 1716 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1717 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1718 break; 1719 case ICMPV6_MLD2_REPORT: 1720 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1721 break; 1722 case ICMPV6_MGM_QUERY: 1723 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1724 break; 1725 case ICMPV6_MGM_REDUCTION: 1726 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1727 break; 1728 } 1729 1730 if (skb_trimmed && skb_trimmed != skb) 1731 kfree_skb(skb_trimmed); 1732 1733 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1734 BR_MCAST_DIR_RX); 1735 1736 return err; 1737 } 1738 #endif 1739 1740 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1741 struct sk_buff *skb, u16 vid) 1742 { 1743 int ret = 0; 1744 1745 BR_INPUT_SKB_CB(skb)->igmp = 0; 1746 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1747 1748 if (br->multicast_disabled) 1749 return 0; 1750 1751 switch (skb->protocol) { 1752 case htons(ETH_P_IP): 1753 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 1754 break; 1755 #if IS_ENABLED(CONFIG_IPV6) 1756 case htons(ETH_P_IPV6): 1757 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 1758 break; 1759 #endif 1760 } 1761 1762 return ret; 1763 } 1764 1765 static void br_multicast_query_expired(struct net_bridge *br, 1766 struct bridge_mcast_own_query *query, 1767 struct bridge_mcast_querier *querier) 1768 { 1769 spin_lock(&br->multicast_lock); 1770 if (query->startup_sent < br->multicast_startup_query_count) 1771 query->startup_sent++; 1772 1773 RCU_INIT_POINTER(querier->port, NULL); 1774 br_multicast_send_query(br, NULL, query); 1775 spin_unlock(&br->multicast_lock); 1776 } 1777 1778 static void br_ip4_multicast_query_expired(unsigned long data) 1779 { 1780 struct net_bridge *br = (void *)data; 1781 1782 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1783 } 1784 1785 #if IS_ENABLED(CONFIG_IPV6) 1786 static void br_ip6_multicast_query_expired(unsigned long data) 1787 { 1788 struct net_bridge *br = (void *)data; 1789 1790 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1791 } 1792 #endif 1793 1794 void br_multicast_init(struct net_bridge *br) 1795 { 1796 br->hash_elasticity = 4; 1797 br->hash_max = 512; 1798 1799 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1800 br->multicast_querier = 0; 1801 br->multicast_query_use_ifaddr = 0; 1802 br->multicast_last_member_count = 2; 1803 br->multicast_startup_query_count = 2; 1804 1805 br->multicast_last_member_interval = HZ; 1806 br->multicast_query_response_interval = 10 * HZ; 1807 br->multicast_startup_query_interval = 125 * HZ / 4; 1808 br->multicast_query_interval = 125 * HZ; 1809 br->multicast_querier_interval = 255 * HZ; 1810 br->multicast_membership_interval = 260 * HZ; 1811 1812 br->ip4_other_query.delay_time = 0; 1813 br->ip4_querier.port = NULL; 1814 #if IS_ENABLED(CONFIG_IPV6) 1815 br->ip6_other_query.delay_time = 0; 1816 br->ip6_querier.port = NULL; 1817 #endif 1818 br->has_ipv6_addr = 1; 1819 1820 spin_lock_init(&br->multicast_lock); 1821 setup_timer(&br->multicast_router_timer, 1822 br_multicast_local_router_expired, 0); 1823 setup_timer(&br->ip4_other_query.timer, 1824 br_ip4_multicast_querier_expired, (unsigned long)br); 1825 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1826 (unsigned long)br); 1827 #if IS_ENABLED(CONFIG_IPV6) 1828 setup_timer(&br->ip6_other_query.timer, 1829 br_ip6_multicast_querier_expired, (unsigned long)br); 1830 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1831 (unsigned long)br); 1832 #endif 1833 } 1834 1835 static void __br_multicast_open(struct net_bridge *br, 1836 struct bridge_mcast_own_query *query) 1837 { 1838 query->startup_sent = 0; 1839 1840 if (br->multicast_disabled) 1841 return; 1842 1843 mod_timer(&query->timer, jiffies); 1844 } 1845 1846 void br_multicast_open(struct net_bridge *br) 1847 { 1848 __br_multicast_open(br, &br->ip4_own_query); 1849 #if IS_ENABLED(CONFIG_IPV6) 1850 __br_multicast_open(br, &br->ip6_own_query); 1851 #endif 1852 } 1853 1854 void br_multicast_stop(struct net_bridge *br) 1855 { 1856 del_timer_sync(&br->multicast_router_timer); 1857 del_timer_sync(&br->ip4_other_query.timer); 1858 del_timer_sync(&br->ip4_own_query.timer); 1859 #if IS_ENABLED(CONFIG_IPV6) 1860 del_timer_sync(&br->ip6_other_query.timer); 1861 del_timer_sync(&br->ip6_own_query.timer); 1862 #endif 1863 } 1864 1865 void br_multicast_dev_del(struct net_bridge *br) 1866 { 1867 struct net_bridge_mdb_htable *mdb; 1868 struct net_bridge_mdb_entry *mp; 1869 struct hlist_node *n; 1870 u32 ver; 1871 int i; 1872 1873 spin_lock_bh(&br->multicast_lock); 1874 mdb = mlock_dereference(br->mdb, br); 1875 if (!mdb) 1876 goto out; 1877 1878 br->mdb = NULL; 1879 1880 ver = mdb->ver; 1881 for (i = 0; i < mdb->max; i++) { 1882 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1883 hlist[ver]) { 1884 del_timer(&mp->timer); 1885 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1886 } 1887 } 1888 1889 if (mdb->old) { 1890 spin_unlock_bh(&br->multicast_lock); 1891 rcu_barrier_bh(); 1892 spin_lock_bh(&br->multicast_lock); 1893 WARN_ON(mdb->old); 1894 } 1895 1896 mdb->old = mdb; 1897 call_rcu_bh(&mdb->rcu, br_mdb_free); 1898 1899 out: 1900 spin_unlock_bh(&br->multicast_lock); 1901 1902 free_percpu(br->mcast_stats); 1903 } 1904 1905 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1906 { 1907 int err = -EINVAL; 1908 1909 spin_lock_bh(&br->multicast_lock); 1910 1911 switch (val) { 1912 case MDB_RTR_TYPE_DISABLED: 1913 case MDB_RTR_TYPE_PERM: 1914 del_timer(&br->multicast_router_timer); 1915 /* fall through */ 1916 case MDB_RTR_TYPE_TEMP_QUERY: 1917 br->multicast_router = val; 1918 err = 0; 1919 break; 1920 } 1921 1922 spin_unlock_bh(&br->multicast_lock); 1923 1924 return err; 1925 } 1926 1927 static void __del_port_router(struct net_bridge_port *p) 1928 { 1929 if (hlist_unhashed(&p->rlist)) 1930 return; 1931 hlist_del_init_rcu(&p->rlist); 1932 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 1933 } 1934 1935 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1936 { 1937 struct net_bridge *br = p->br; 1938 unsigned long now = jiffies; 1939 int err = -EINVAL; 1940 1941 spin_lock(&br->multicast_lock); 1942 if (p->multicast_router == val) { 1943 /* Refresh the temp router port timer */ 1944 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 1945 mod_timer(&p->multicast_router_timer, 1946 now + br->multicast_querier_interval); 1947 err = 0; 1948 goto unlock; 1949 } 1950 switch (val) { 1951 case MDB_RTR_TYPE_DISABLED: 1952 p->multicast_router = MDB_RTR_TYPE_DISABLED; 1953 __del_port_router(p); 1954 del_timer(&p->multicast_router_timer); 1955 break; 1956 case MDB_RTR_TYPE_TEMP_QUERY: 1957 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1958 __del_port_router(p); 1959 break; 1960 case MDB_RTR_TYPE_PERM: 1961 p->multicast_router = MDB_RTR_TYPE_PERM; 1962 del_timer(&p->multicast_router_timer); 1963 br_multicast_add_router(br, p); 1964 break; 1965 case MDB_RTR_TYPE_TEMP: 1966 p->multicast_router = MDB_RTR_TYPE_TEMP; 1967 br_multicast_mark_router(br, p); 1968 break; 1969 default: 1970 goto unlock; 1971 } 1972 err = 0; 1973 unlock: 1974 spin_unlock(&br->multicast_lock); 1975 1976 return err; 1977 } 1978 1979 static void br_multicast_start_querier(struct net_bridge *br, 1980 struct bridge_mcast_own_query *query) 1981 { 1982 struct net_bridge_port *port; 1983 1984 __br_multicast_open(br, query); 1985 1986 list_for_each_entry(port, &br->port_list, list) { 1987 if (port->state == BR_STATE_DISABLED || 1988 port->state == BR_STATE_BLOCKING) 1989 continue; 1990 1991 if (query == &br->ip4_own_query) 1992 br_multicast_enable(&port->ip4_own_query); 1993 #if IS_ENABLED(CONFIG_IPV6) 1994 else 1995 br_multicast_enable(&port->ip6_own_query); 1996 #endif 1997 } 1998 } 1999 2000 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2001 { 2002 struct net_bridge_mdb_htable *mdb; 2003 struct net_bridge_port *port; 2004 int err = 0; 2005 2006 spin_lock_bh(&br->multicast_lock); 2007 if (br->multicast_disabled == !val) 2008 goto unlock; 2009 2010 br->multicast_disabled = !val; 2011 if (br->multicast_disabled) 2012 goto unlock; 2013 2014 if (!netif_running(br->dev)) 2015 goto unlock; 2016 2017 mdb = mlock_dereference(br->mdb, br); 2018 if (mdb) { 2019 if (mdb->old) { 2020 err = -EEXIST; 2021 rollback: 2022 br->multicast_disabled = !!val; 2023 goto unlock; 2024 } 2025 2026 err = br_mdb_rehash(&br->mdb, mdb->max, 2027 br->hash_elasticity); 2028 if (err) 2029 goto rollback; 2030 } 2031 2032 br_multicast_open(br); 2033 list_for_each_entry(port, &br->port_list, list) 2034 __br_multicast_enable_port(port); 2035 2036 unlock: 2037 spin_unlock_bh(&br->multicast_lock); 2038 2039 return err; 2040 } 2041 2042 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2043 { 2044 unsigned long max_delay; 2045 2046 val = !!val; 2047 2048 spin_lock_bh(&br->multicast_lock); 2049 if (br->multicast_querier == val) 2050 goto unlock; 2051 2052 br->multicast_querier = val; 2053 if (!val) 2054 goto unlock; 2055 2056 max_delay = br->multicast_query_response_interval; 2057 2058 if (!timer_pending(&br->ip4_other_query.timer)) 2059 br->ip4_other_query.delay_time = jiffies + max_delay; 2060 2061 br_multicast_start_querier(br, &br->ip4_own_query); 2062 2063 #if IS_ENABLED(CONFIG_IPV6) 2064 if (!timer_pending(&br->ip6_other_query.timer)) 2065 br->ip6_other_query.delay_time = jiffies + max_delay; 2066 2067 br_multicast_start_querier(br, &br->ip6_own_query); 2068 #endif 2069 2070 unlock: 2071 spin_unlock_bh(&br->multicast_lock); 2072 2073 return 0; 2074 } 2075 2076 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2077 { 2078 int err = -EINVAL; 2079 u32 old; 2080 struct net_bridge_mdb_htable *mdb; 2081 2082 spin_lock_bh(&br->multicast_lock); 2083 if (!is_power_of_2(val)) 2084 goto unlock; 2085 2086 mdb = mlock_dereference(br->mdb, br); 2087 if (mdb && val < mdb->size) 2088 goto unlock; 2089 2090 err = 0; 2091 2092 old = br->hash_max; 2093 br->hash_max = val; 2094 2095 if (mdb) { 2096 if (mdb->old) { 2097 err = -EEXIST; 2098 rollback: 2099 br->hash_max = old; 2100 goto unlock; 2101 } 2102 2103 err = br_mdb_rehash(&br->mdb, br->hash_max, 2104 br->hash_elasticity); 2105 if (err) 2106 goto rollback; 2107 } 2108 2109 unlock: 2110 spin_unlock_bh(&br->multicast_lock); 2111 2112 return err; 2113 } 2114 2115 /** 2116 * br_multicast_list_adjacent - Returns snooped multicast addresses 2117 * @dev: The bridge port adjacent to which to retrieve addresses 2118 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2119 * 2120 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2121 * snooping feature on all bridge ports of dev's bridge device, excluding 2122 * the addresses from dev itself. 2123 * 2124 * Returns the number of items added to br_ip_list. 2125 * 2126 * Notes: 2127 * - br_ip_list needs to be initialized by caller 2128 * - br_ip_list might contain duplicates in the end 2129 * (needs to be taken care of by caller) 2130 * - br_ip_list needs to be freed by caller 2131 */ 2132 int br_multicast_list_adjacent(struct net_device *dev, 2133 struct list_head *br_ip_list) 2134 { 2135 struct net_bridge *br; 2136 struct net_bridge_port *port; 2137 struct net_bridge_port_group *group; 2138 struct br_ip_list *entry; 2139 int count = 0; 2140 2141 rcu_read_lock(); 2142 if (!br_ip_list || !br_port_exists(dev)) 2143 goto unlock; 2144 2145 port = br_port_get_rcu(dev); 2146 if (!port || !port->br) 2147 goto unlock; 2148 2149 br = port->br; 2150 2151 list_for_each_entry_rcu(port, &br->port_list, list) { 2152 if (!port->dev || port->dev == dev) 2153 continue; 2154 2155 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2156 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2157 if (!entry) 2158 goto unlock; 2159 2160 entry->addr = group->addr; 2161 list_add(&entry->list, br_ip_list); 2162 count++; 2163 } 2164 } 2165 2166 unlock: 2167 rcu_read_unlock(); 2168 return count; 2169 } 2170 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2171 2172 /** 2173 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2174 * @dev: The bridge port providing the bridge on which to check for a querier 2175 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2176 * 2177 * Checks whether the given interface has a bridge on top and if so returns 2178 * true if a valid querier exists anywhere on the bridged link layer. 2179 * Otherwise returns false. 2180 */ 2181 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2182 { 2183 struct net_bridge *br; 2184 struct net_bridge_port *port; 2185 struct ethhdr eth; 2186 bool ret = false; 2187 2188 rcu_read_lock(); 2189 if (!br_port_exists(dev)) 2190 goto unlock; 2191 2192 port = br_port_get_rcu(dev); 2193 if (!port || !port->br) 2194 goto unlock; 2195 2196 br = port->br; 2197 2198 memset(ð, 0, sizeof(eth)); 2199 eth.h_proto = htons(proto); 2200 2201 ret = br_multicast_querier_exists(br, ð); 2202 2203 unlock: 2204 rcu_read_unlock(); 2205 return ret; 2206 } 2207 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2208 2209 /** 2210 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2211 * @dev: The bridge port adjacent to which to check for a querier 2212 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2213 * 2214 * Checks whether the given interface has a bridge on top and if so returns 2215 * true if a selected querier is behind one of the other ports of this 2216 * bridge. Otherwise returns false. 2217 */ 2218 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2219 { 2220 struct net_bridge *br; 2221 struct net_bridge_port *port; 2222 bool ret = false; 2223 2224 rcu_read_lock(); 2225 if (!br_port_exists(dev)) 2226 goto unlock; 2227 2228 port = br_port_get_rcu(dev); 2229 if (!port || !port->br) 2230 goto unlock; 2231 2232 br = port->br; 2233 2234 switch (proto) { 2235 case ETH_P_IP: 2236 if (!timer_pending(&br->ip4_other_query.timer) || 2237 rcu_dereference(br->ip4_querier.port) == port) 2238 goto unlock; 2239 break; 2240 #if IS_ENABLED(CONFIG_IPV6) 2241 case ETH_P_IPV6: 2242 if (!timer_pending(&br->ip6_other_query.timer) || 2243 rcu_dereference(br->ip6_querier.port) == port) 2244 goto unlock; 2245 break; 2246 #endif 2247 default: 2248 goto unlock; 2249 } 2250 2251 ret = true; 2252 unlock: 2253 rcu_read_unlock(); 2254 return ret; 2255 } 2256 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2257 2258 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 2259 const struct sk_buff *skb, u8 type, u8 dir) 2260 { 2261 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 2262 __be16 proto = skb->protocol; 2263 unsigned int t_len; 2264 2265 u64_stats_update_begin(&pstats->syncp); 2266 switch (proto) { 2267 case htons(ETH_P_IP): 2268 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 2269 switch (type) { 2270 case IGMP_HOST_MEMBERSHIP_REPORT: 2271 pstats->mstats.igmp_v1reports[dir]++; 2272 break; 2273 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2274 pstats->mstats.igmp_v2reports[dir]++; 2275 break; 2276 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2277 pstats->mstats.igmp_v3reports[dir]++; 2278 break; 2279 case IGMP_HOST_MEMBERSHIP_QUERY: 2280 if (t_len != sizeof(struct igmphdr)) { 2281 pstats->mstats.igmp_v3queries[dir]++; 2282 } else { 2283 unsigned int offset = skb_transport_offset(skb); 2284 struct igmphdr *ih, _ihdr; 2285 2286 ih = skb_header_pointer(skb, offset, 2287 sizeof(_ihdr), &_ihdr); 2288 if (!ih) 2289 break; 2290 if (!ih->code) 2291 pstats->mstats.igmp_v1queries[dir]++; 2292 else 2293 pstats->mstats.igmp_v2queries[dir]++; 2294 } 2295 break; 2296 case IGMP_HOST_LEAVE_MESSAGE: 2297 pstats->mstats.igmp_leaves[dir]++; 2298 break; 2299 } 2300 break; 2301 #if IS_ENABLED(CONFIG_IPV6) 2302 case htons(ETH_P_IPV6): 2303 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 2304 sizeof(struct ipv6hdr); 2305 t_len -= skb_network_header_len(skb); 2306 switch (type) { 2307 case ICMPV6_MGM_REPORT: 2308 pstats->mstats.mld_v1reports[dir]++; 2309 break; 2310 case ICMPV6_MLD2_REPORT: 2311 pstats->mstats.mld_v2reports[dir]++; 2312 break; 2313 case ICMPV6_MGM_QUERY: 2314 if (t_len != sizeof(struct mld_msg)) 2315 pstats->mstats.mld_v2queries[dir]++; 2316 else 2317 pstats->mstats.mld_v1queries[dir]++; 2318 break; 2319 case ICMPV6_MGM_REDUCTION: 2320 pstats->mstats.mld_leaves[dir]++; 2321 break; 2322 } 2323 break; 2324 #endif /* CONFIG_IPV6 */ 2325 } 2326 u64_stats_update_end(&pstats->syncp); 2327 } 2328 2329 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 2330 const struct sk_buff *skb, u8 type, u8 dir) 2331 { 2332 struct bridge_mcast_stats __percpu *stats; 2333 2334 /* if multicast_disabled is true then igmp type can't be set */ 2335 if (!type || !br->multicast_stats_enabled) 2336 return; 2337 2338 if (p) 2339 stats = p->mcast_stats; 2340 else 2341 stats = br->mcast_stats; 2342 if (WARN_ON(!stats)) 2343 return; 2344 2345 br_mcast_stats_add(stats, skb, type, dir); 2346 } 2347 2348 int br_multicast_init_stats(struct net_bridge *br) 2349 { 2350 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2351 if (!br->mcast_stats) 2352 return -ENOMEM; 2353 2354 return 0; 2355 } 2356 2357 static void mcast_stats_add_dir(u64 *dst, u64 *src) 2358 { 2359 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2360 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 2361 } 2362 2363 void br_multicast_get_stats(const struct net_bridge *br, 2364 const struct net_bridge_port *p, 2365 struct br_mcast_stats *dest) 2366 { 2367 struct bridge_mcast_stats __percpu *stats; 2368 struct br_mcast_stats tdst; 2369 int i; 2370 2371 memset(dest, 0, sizeof(*dest)); 2372 if (p) 2373 stats = p->mcast_stats; 2374 else 2375 stats = br->mcast_stats; 2376 if (WARN_ON(!stats)) 2377 return; 2378 2379 memset(&tdst, 0, sizeof(tdst)); 2380 for_each_possible_cpu(i) { 2381 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 2382 struct br_mcast_stats temp; 2383 unsigned int start; 2384 2385 do { 2386 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2387 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 2388 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2389 2390 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 2391 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 2392 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 2393 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 2394 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 2395 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 2396 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 2397 tdst.igmp_parse_errors += temp.igmp_parse_errors; 2398 2399 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 2400 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 2401 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 2402 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 2403 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 2404 tdst.mld_parse_errors += temp.mld_parse_errors; 2405 } 2406 memcpy(dest, &tdst, sizeof(*dest)); 2407 } 2408