1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <net/ip.h> 29 #if IS_ENABLED(CONFIG_IPV6) 30 #include <net/ipv6.h> 31 #include <net/mld.h> 32 #include <net/ip6_checksum.h> 33 #include <net/addrconf.h> 34 #endif 35 36 #include "br_private.h" 37 38 static void br_multicast_start_querier(struct net_bridge *br, 39 struct bridge_mcast_own_query *query); 40 static void br_multicast_add_router(struct net_bridge *br, 41 struct net_bridge_port *port); 42 static void br_ip4_multicast_leave_group(struct net_bridge *br, 43 struct net_bridge_port *port, 44 __be32 group, 45 __u16 vid); 46 #if IS_ENABLED(CONFIG_IPV6) 47 static void br_ip6_multicast_leave_group(struct net_bridge *br, 48 struct net_bridge_port *port, 49 const struct in6_addr *group, 50 __u16 vid); 51 #endif 52 unsigned int br_mdb_rehash_seq; 53 54 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 55 { 56 if (a->proto != b->proto) 57 return 0; 58 if (a->vid != b->vid) 59 return 0; 60 switch (a->proto) { 61 case htons(ETH_P_IP): 62 return a->u.ip4 == b->u.ip4; 63 #if IS_ENABLED(CONFIG_IPV6) 64 case htons(ETH_P_IPV6): 65 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 66 #endif 67 } 68 return 0; 69 } 70 71 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 72 __u16 vid) 73 { 74 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 75 } 76 77 #if IS_ENABLED(CONFIG_IPV6) 78 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 79 const struct in6_addr *ip, 80 __u16 vid) 81 { 82 return jhash_2words(ipv6_addr_hash(ip), vid, 83 mdb->secret) & (mdb->max - 1); 84 } 85 #endif 86 87 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 88 struct br_ip *ip) 89 { 90 switch (ip->proto) { 91 case htons(ETH_P_IP): 92 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 93 #if IS_ENABLED(CONFIG_IPV6) 94 case htons(ETH_P_IPV6): 95 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 96 #endif 97 } 98 return 0; 99 } 100 101 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 102 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 103 { 104 struct net_bridge_mdb_entry *mp; 105 106 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 107 if (br_ip_equal(&mp->addr, dst)) 108 return mp; 109 } 110 111 return NULL; 112 } 113 114 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 115 struct br_ip *dst) 116 { 117 if (!mdb) 118 return NULL; 119 120 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 121 } 122 123 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 124 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 125 { 126 struct br_ip br_dst; 127 128 br_dst.u.ip4 = dst; 129 br_dst.proto = htons(ETH_P_IP); 130 br_dst.vid = vid; 131 132 return br_mdb_ip_get(mdb, &br_dst); 133 } 134 135 #if IS_ENABLED(CONFIG_IPV6) 136 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 137 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 138 __u16 vid) 139 { 140 struct br_ip br_dst; 141 142 br_dst.u.ip6 = *dst; 143 br_dst.proto = htons(ETH_P_IPV6); 144 br_dst.vid = vid; 145 146 return br_mdb_ip_get(mdb, &br_dst); 147 } 148 #endif 149 150 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 151 struct sk_buff *skb, u16 vid) 152 { 153 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 154 struct br_ip ip; 155 156 if (br->multicast_disabled) 157 return NULL; 158 159 if (BR_INPUT_SKB_CB(skb)->igmp) 160 return NULL; 161 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.u.ip4 = ip_hdr(skb)->daddr; 168 break; 169 #if IS_ENABLED(CONFIG_IPV6) 170 case htons(ETH_P_IPV6): 171 ip.u.ip6 = ipv6_hdr(skb)->daddr; 172 break; 173 #endif 174 default: 175 return NULL; 176 } 177 178 return br_mdb_ip_get(mdb, &ip); 179 } 180 181 static void br_mdb_free(struct rcu_head *head) 182 { 183 struct net_bridge_mdb_htable *mdb = 184 container_of(head, struct net_bridge_mdb_htable, rcu); 185 struct net_bridge_mdb_htable *old = mdb->old; 186 187 mdb->old = NULL; 188 kfree(old->mhash); 189 kfree(old); 190 } 191 192 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 193 struct net_bridge_mdb_htable *old, 194 int elasticity) 195 { 196 struct net_bridge_mdb_entry *mp; 197 int maxlen; 198 int len; 199 int i; 200 201 for (i = 0; i < old->max; i++) 202 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 203 hlist_add_head(&mp->hlist[new->ver], 204 &new->mhash[br_ip_hash(new, &mp->addr)]); 205 206 if (!elasticity) 207 return 0; 208 209 maxlen = 0; 210 for (i = 0; i < new->max; i++) { 211 len = 0; 212 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 213 len++; 214 if (len > maxlen) 215 maxlen = len; 216 } 217 218 return maxlen > elasticity ? -EINVAL : 0; 219 } 220 221 void br_multicast_free_pg(struct rcu_head *head) 222 { 223 struct net_bridge_port_group *p = 224 container_of(head, struct net_bridge_port_group, rcu); 225 226 kfree(p); 227 } 228 229 static void br_multicast_free_group(struct rcu_head *head) 230 { 231 struct net_bridge_mdb_entry *mp = 232 container_of(head, struct net_bridge_mdb_entry, rcu); 233 234 kfree(mp); 235 } 236 237 static void br_multicast_group_expired(unsigned long data) 238 { 239 struct net_bridge_mdb_entry *mp = (void *)data; 240 struct net_bridge *br = mp->br; 241 struct net_bridge_mdb_htable *mdb; 242 243 spin_lock(&br->multicast_lock); 244 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 245 goto out; 246 247 mp->mglist = false; 248 249 if (mp->ports) 250 goto out; 251 252 mdb = mlock_dereference(br->mdb, br); 253 254 hlist_del_rcu(&mp->hlist[mdb->ver]); 255 mdb->size--; 256 257 call_rcu_bh(&mp->rcu, br_multicast_free_group); 258 259 out: 260 spin_unlock(&br->multicast_lock); 261 } 262 263 static void br_multicast_del_pg(struct net_bridge *br, 264 struct net_bridge_port_group *pg) 265 { 266 struct net_bridge_mdb_htable *mdb; 267 struct net_bridge_mdb_entry *mp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_port_group __rcu **pp; 270 271 mdb = mlock_dereference(br->mdb, br); 272 273 mp = br_mdb_ip_get(mdb, &pg->addr); 274 if (WARN_ON(!mp)) 275 return; 276 277 for (pp = &mp->ports; 278 (p = mlock_dereference(*pp, br)) != NULL; 279 pp = &p->next) { 280 if (p != pg) 281 continue; 282 283 rcu_assign_pointer(*pp, p->next); 284 hlist_del_init(&p->mglist); 285 del_timer(&p->timer); 286 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 287 p->state); 288 call_rcu_bh(&p->rcu, br_multicast_free_pg); 289 290 if (!mp->ports && !mp->mglist && 291 netif_running(br->dev)) 292 mod_timer(&mp->timer, jiffies); 293 294 return; 295 } 296 297 WARN_ON(1); 298 } 299 300 static void br_multicast_port_group_expired(unsigned long data) 301 { 302 struct net_bridge_port_group *pg = (void *)data; 303 struct net_bridge *br = pg->port->br; 304 305 spin_lock(&br->multicast_lock); 306 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 307 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT) 308 goto out; 309 310 br_multicast_del_pg(br, pg); 311 312 out: 313 spin_unlock(&br->multicast_lock); 314 } 315 316 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 317 int elasticity) 318 { 319 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 320 struct net_bridge_mdb_htable *mdb; 321 int err; 322 323 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 324 if (!mdb) 325 return -ENOMEM; 326 327 mdb->max = max; 328 mdb->old = old; 329 330 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 331 if (!mdb->mhash) { 332 kfree(mdb); 333 return -ENOMEM; 334 } 335 336 mdb->size = old ? old->size : 0; 337 mdb->ver = old ? old->ver ^ 1 : 0; 338 339 if (!old || elasticity) 340 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 341 else 342 mdb->secret = old->secret; 343 344 if (!old) 345 goto out; 346 347 err = br_mdb_copy(mdb, old, elasticity); 348 if (err) { 349 kfree(mdb->mhash); 350 kfree(mdb); 351 return err; 352 } 353 354 br_mdb_rehash_seq++; 355 call_rcu_bh(&mdb->rcu, br_mdb_free); 356 357 out: 358 rcu_assign_pointer(*mdbp, mdb); 359 360 return 0; 361 } 362 363 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 364 __be32 group) 365 { 366 struct sk_buff *skb; 367 struct igmphdr *ih; 368 struct ethhdr *eth; 369 struct iphdr *iph; 370 371 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 372 sizeof(*ih) + 4); 373 if (!skb) 374 goto out; 375 376 skb->protocol = htons(ETH_P_IP); 377 378 skb_reset_mac_header(skb); 379 eth = eth_hdr(skb); 380 381 ether_addr_copy(eth->h_source, br->dev->dev_addr); 382 eth->h_dest[0] = 1; 383 eth->h_dest[1] = 0; 384 eth->h_dest[2] = 0x5e; 385 eth->h_dest[3] = 0; 386 eth->h_dest[4] = 0; 387 eth->h_dest[5] = 1; 388 eth->h_proto = htons(ETH_P_IP); 389 skb_put(skb, sizeof(*eth)); 390 391 skb_set_network_header(skb, skb->len); 392 iph = ip_hdr(skb); 393 394 iph->version = 4; 395 iph->ihl = 6; 396 iph->tos = 0xc0; 397 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 398 iph->id = 0; 399 iph->frag_off = htons(IP_DF); 400 iph->ttl = 1; 401 iph->protocol = IPPROTO_IGMP; 402 iph->saddr = br->multicast_query_use_ifaddr ? 403 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 404 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 405 ((u8 *)&iph[1])[0] = IPOPT_RA; 406 ((u8 *)&iph[1])[1] = 4; 407 ((u8 *)&iph[1])[2] = 0; 408 ((u8 *)&iph[1])[3] = 0; 409 ip_send_check(iph); 410 skb_put(skb, 24); 411 412 skb_set_transport_header(skb, skb->len); 413 ih = igmp_hdr(skb); 414 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 415 ih->code = (group ? br->multicast_last_member_interval : 416 br->multicast_query_response_interval) / 417 (HZ / IGMP_TIMER_SCALE); 418 ih->group = group; 419 ih->csum = 0; 420 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 421 skb_put(skb, sizeof(*ih)); 422 423 __skb_pull(skb, sizeof(*eth)); 424 425 out: 426 return skb; 427 } 428 429 #if IS_ENABLED(CONFIG_IPV6) 430 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 431 const struct in6_addr *group) 432 { 433 struct sk_buff *skb; 434 struct ipv6hdr *ip6h; 435 struct mld_msg *mldq; 436 struct ethhdr *eth; 437 u8 *hopopt; 438 unsigned long interval; 439 440 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 441 8 + sizeof(*mldq)); 442 if (!skb) 443 goto out; 444 445 skb->protocol = htons(ETH_P_IPV6); 446 447 /* Ethernet header */ 448 skb_reset_mac_header(skb); 449 eth = eth_hdr(skb); 450 451 ether_addr_copy(eth->h_source, br->dev->dev_addr); 452 eth->h_proto = htons(ETH_P_IPV6); 453 skb_put(skb, sizeof(*eth)); 454 455 /* IPv6 header + HbH option */ 456 skb_set_network_header(skb, skb->len); 457 ip6h = ipv6_hdr(skb); 458 459 *(__force __be32 *)ip6h = htonl(0x60000000); 460 ip6h->payload_len = htons(8 + sizeof(*mldq)); 461 ip6h->nexthdr = IPPROTO_HOPOPTS; 462 ip6h->hop_limit = 1; 463 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 464 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 465 &ip6h->saddr)) { 466 kfree_skb(skb); 467 return NULL; 468 } 469 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 470 471 hopopt = (u8 *)(ip6h + 1); 472 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 473 hopopt[1] = 0; /* length of HbH */ 474 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 475 hopopt[3] = 2; /* Length of RA Option */ 476 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 477 hopopt[5] = 0; 478 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 479 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 480 481 skb_put(skb, sizeof(*ip6h) + 8); 482 483 /* ICMPv6 */ 484 skb_set_transport_header(skb, skb->len); 485 mldq = (struct mld_msg *) icmp6_hdr(skb); 486 487 interval = ipv6_addr_any(group) ? 488 br->multicast_query_response_interval : 489 br->multicast_last_member_interval; 490 491 mldq->mld_type = ICMPV6_MGM_QUERY; 492 mldq->mld_code = 0; 493 mldq->mld_cksum = 0; 494 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 495 mldq->mld_reserved = 0; 496 mldq->mld_mca = *group; 497 498 /* checksum */ 499 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 500 sizeof(*mldq), IPPROTO_ICMPV6, 501 csum_partial(mldq, 502 sizeof(*mldq), 0)); 503 skb_put(skb, sizeof(*mldq)); 504 505 __skb_pull(skb, sizeof(*eth)); 506 507 out: 508 return skb; 509 } 510 #endif 511 512 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 513 struct br_ip *addr) 514 { 515 switch (addr->proto) { 516 case htons(ETH_P_IP): 517 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 518 #if IS_ENABLED(CONFIG_IPV6) 519 case htons(ETH_P_IPV6): 520 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 521 #endif 522 } 523 return NULL; 524 } 525 526 static struct net_bridge_mdb_entry *br_multicast_get_group( 527 struct net_bridge *br, struct net_bridge_port *port, 528 struct br_ip *group, int hash) 529 { 530 struct net_bridge_mdb_htable *mdb; 531 struct net_bridge_mdb_entry *mp; 532 unsigned int count = 0; 533 unsigned int max; 534 int elasticity; 535 int err; 536 537 mdb = rcu_dereference_protected(br->mdb, 1); 538 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 539 count++; 540 if (unlikely(br_ip_equal(group, &mp->addr))) 541 return mp; 542 } 543 544 elasticity = 0; 545 max = mdb->max; 546 547 if (unlikely(count > br->hash_elasticity && count)) { 548 if (net_ratelimit()) 549 br_info(br, "Multicast hash table " 550 "chain limit reached: %s\n", 551 port ? port->dev->name : br->dev->name); 552 553 elasticity = br->hash_elasticity; 554 } 555 556 if (mdb->size >= max) { 557 max *= 2; 558 if (unlikely(max > br->hash_max)) { 559 br_warn(br, "Multicast hash table maximum of %d " 560 "reached, disabling snooping: %s\n", 561 br->hash_max, 562 port ? port->dev->name : br->dev->name); 563 err = -E2BIG; 564 disable: 565 br->multicast_disabled = 1; 566 goto err; 567 } 568 } 569 570 if (max > mdb->max || elasticity) { 571 if (mdb->old) { 572 if (net_ratelimit()) 573 br_info(br, "Multicast hash table " 574 "on fire: %s\n", 575 port ? port->dev->name : br->dev->name); 576 err = -EEXIST; 577 goto err; 578 } 579 580 err = br_mdb_rehash(&br->mdb, max, elasticity); 581 if (err) { 582 br_warn(br, "Cannot rehash multicast " 583 "hash table, disabling snooping: %s, %d, %d\n", 584 port ? port->dev->name : br->dev->name, 585 mdb->size, err); 586 goto disable; 587 } 588 589 err = -EAGAIN; 590 goto err; 591 } 592 593 return NULL; 594 595 err: 596 mp = ERR_PTR(err); 597 return mp; 598 } 599 600 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 601 struct net_bridge_port *port, struct br_ip *group) 602 { 603 struct net_bridge_mdb_htable *mdb; 604 struct net_bridge_mdb_entry *mp; 605 int hash; 606 int err; 607 608 mdb = rcu_dereference_protected(br->mdb, 1); 609 if (!mdb) { 610 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 611 if (err) 612 return ERR_PTR(err); 613 goto rehash; 614 } 615 616 hash = br_ip_hash(mdb, group); 617 mp = br_multicast_get_group(br, port, group, hash); 618 switch (PTR_ERR(mp)) { 619 case 0: 620 break; 621 622 case -EAGAIN: 623 rehash: 624 mdb = rcu_dereference_protected(br->mdb, 1); 625 hash = br_ip_hash(mdb, group); 626 break; 627 628 default: 629 goto out; 630 } 631 632 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 633 if (unlikely(!mp)) 634 return ERR_PTR(-ENOMEM); 635 636 mp->br = br; 637 mp->addr = *group; 638 setup_timer(&mp->timer, br_multicast_group_expired, 639 (unsigned long)mp); 640 641 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 642 mdb->size++; 643 644 out: 645 return mp; 646 } 647 648 struct net_bridge_port_group *br_multicast_new_port_group( 649 struct net_bridge_port *port, 650 struct br_ip *group, 651 struct net_bridge_port_group __rcu *next, 652 unsigned char state) 653 { 654 struct net_bridge_port_group *p; 655 656 p = kzalloc(sizeof(*p), GFP_ATOMIC); 657 if (unlikely(!p)) 658 return NULL; 659 660 p->addr = *group; 661 p->port = port; 662 p->state = state; 663 rcu_assign_pointer(p->next, next); 664 hlist_add_head(&p->mglist, &port->mglist); 665 setup_timer(&p->timer, br_multicast_port_group_expired, 666 (unsigned long)p); 667 return p; 668 } 669 670 static int br_multicast_add_group(struct net_bridge *br, 671 struct net_bridge_port *port, 672 struct br_ip *group) 673 { 674 struct net_bridge_mdb_entry *mp; 675 struct net_bridge_port_group *p; 676 struct net_bridge_port_group __rcu **pp; 677 unsigned long now = jiffies; 678 int err; 679 680 spin_lock(&br->multicast_lock); 681 if (!netif_running(br->dev) || 682 (port && port->state == BR_STATE_DISABLED)) 683 goto out; 684 685 mp = br_multicast_new_group(br, port, group); 686 err = PTR_ERR(mp); 687 if (IS_ERR(mp)) 688 goto err; 689 690 if (!port) { 691 mp->mglist = true; 692 mod_timer(&mp->timer, now + br->multicast_membership_interval); 693 goto out; 694 } 695 696 for (pp = &mp->ports; 697 (p = mlock_dereference(*pp, br)) != NULL; 698 pp = &p->next) { 699 if (p->port == port) 700 goto found; 701 if ((unsigned long)p->port < (unsigned long)port) 702 break; 703 } 704 705 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); 706 if (unlikely(!p)) 707 goto err; 708 rcu_assign_pointer(*pp, p); 709 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY); 710 711 found: 712 mod_timer(&p->timer, now + br->multicast_membership_interval); 713 out: 714 err = 0; 715 716 err: 717 spin_unlock(&br->multicast_lock); 718 return err; 719 } 720 721 static int br_ip4_multicast_add_group(struct net_bridge *br, 722 struct net_bridge_port *port, 723 __be32 group, 724 __u16 vid) 725 { 726 struct br_ip br_group; 727 728 if (ipv4_is_local_multicast(group)) 729 return 0; 730 731 br_group.u.ip4 = group; 732 br_group.proto = htons(ETH_P_IP); 733 br_group.vid = vid; 734 735 return br_multicast_add_group(br, port, &br_group); 736 } 737 738 #if IS_ENABLED(CONFIG_IPV6) 739 static int br_ip6_multicast_add_group(struct net_bridge *br, 740 struct net_bridge_port *port, 741 const struct in6_addr *group, 742 __u16 vid) 743 { 744 struct br_ip br_group; 745 746 if (ipv6_addr_is_ll_all_nodes(group)) 747 return 0; 748 749 br_group.u.ip6 = *group; 750 br_group.proto = htons(ETH_P_IPV6); 751 br_group.vid = vid; 752 753 return br_multicast_add_group(br, port, &br_group); 754 } 755 #endif 756 757 static void br_multicast_router_expired(unsigned long data) 758 { 759 struct net_bridge_port *port = (void *)data; 760 struct net_bridge *br = port->br; 761 762 spin_lock(&br->multicast_lock); 763 if (port->multicast_router != 1 || 764 timer_pending(&port->multicast_router_timer) || 765 hlist_unhashed(&port->rlist)) 766 goto out; 767 768 hlist_del_init_rcu(&port->rlist); 769 br_rtr_notify(br->dev, port, RTM_DELMDB); 770 771 out: 772 spin_unlock(&br->multicast_lock); 773 } 774 775 static void br_multicast_local_router_expired(unsigned long data) 776 { 777 } 778 779 static void br_multicast_querier_expired(struct net_bridge *br, 780 struct bridge_mcast_own_query *query) 781 { 782 spin_lock(&br->multicast_lock); 783 if (!netif_running(br->dev) || br->multicast_disabled) 784 goto out; 785 786 br_multicast_start_querier(br, query); 787 788 out: 789 spin_unlock(&br->multicast_lock); 790 } 791 792 static void br_ip4_multicast_querier_expired(unsigned long data) 793 { 794 struct net_bridge *br = (void *)data; 795 796 br_multicast_querier_expired(br, &br->ip4_own_query); 797 } 798 799 #if IS_ENABLED(CONFIG_IPV6) 800 static void br_ip6_multicast_querier_expired(unsigned long data) 801 { 802 struct net_bridge *br = (void *)data; 803 804 br_multicast_querier_expired(br, &br->ip6_own_query); 805 } 806 #endif 807 808 static void br_multicast_select_own_querier(struct net_bridge *br, 809 struct br_ip *ip, 810 struct sk_buff *skb) 811 { 812 if (ip->proto == htons(ETH_P_IP)) 813 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 814 #if IS_ENABLED(CONFIG_IPV6) 815 else 816 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 817 #endif 818 } 819 820 static void __br_multicast_send_query(struct net_bridge *br, 821 struct net_bridge_port *port, 822 struct br_ip *ip) 823 { 824 struct sk_buff *skb; 825 826 skb = br_multicast_alloc_query(br, ip); 827 if (!skb) 828 return; 829 830 if (port) { 831 skb->dev = port->dev; 832 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb, 833 NULL, skb->dev, 834 br_dev_queue_push_xmit); 835 } else { 836 br_multicast_select_own_querier(br, ip, skb); 837 netif_rx(skb); 838 } 839 } 840 841 static void br_multicast_send_query(struct net_bridge *br, 842 struct net_bridge_port *port, 843 struct bridge_mcast_own_query *own_query) 844 { 845 unsigned long time; 846 struct br_ip br_group; 847 struct bridge_mcast_other_query *other_query = NULL; 848 849 if (!netif_running(br->dev) || br->multicast_disabled || 850 !br->multicast_querier) 851 return; 852 853 memset(&br_group.u, 0, sizeof(br_group.u)); 854 855 if (port ? (own_query == &port->ip4_own_query) : 856 (own_query == &br->ip4_own_query)) { 857 other_query = &br->ip4_other_query; 858 br_group.proto = htons(ETH_P_IP); 859 #if IS_ENABLED(CONFIG_IPV6) 860 } else { 861 other_query = &br->ip6_other_query; 862 br_group.proto = htons(ETH_P_IPV6); 863 #endif 864 } 865 866 if (!other_query || timer_pending(&other_query->timer)) 867 return; 868 869 __br_multicast_send_query(br, port, &br_group); 870 871 time = jiffies; 872 time += own_query->startup_sent < br->multicast_startup_query_count ? 873 br->multicast_startup_query_interval : 874 br->multicast_query_interval; 875 mod_timer(&own_query->timer, time); 876 } 877 878 static void 879 br_multicast_port_query_expired(struct net_bridge_port *port, 880 struct bridge_mcast_own_query *query) 881 { 882 struct net_bridge *br = port->br; 883 884 spin_lock(&br->multicast_lock); 885 if (port->state == BR_STATE_DISABLED || 886 port->state == BR_STATE_BLOCKING) 887 goto out; 888 889 if (query->startup_sent < br->multicast_startup_query_count) 890 query->startup_sent++; 891 892 br_multicast_send_query(port->br, port, query); 893 894 out: 895 spin_unlock(&br->multicast_lock); 896 } 897 898 static void br_ip4_multicast_port_query_expired(unsigned long data) 899 { 900 struct net_bridge_port *port = (void *)data; 901 902 br_multicast_port_query_expired(port, &port->ip4_own_query); 903 } 904 905 #if IS_ENABLED(CONFIG_IPV6) 906 static void br_ip6_multicast_port_query_expired(unsigned long data) 907 { 908 struct net_bridge_port *port = (void *)data; 909 910 br_multicast_port_query_expired(port, &port->ip6_own_query); 911 } 912 #endif 913 914 void br_multicast_add_port(struct net_bridge_port *port) 915 { 916 port->multicast_router = 1; 917 918 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 919 (unsigned long)port); 920 setup_timer(&port->ip4_own_query.timer, 921 br_ip4_multicast_port_query_expired, (unsigned long)port); 922 #if IS_ENABLED(CONFIG_IPV6) 923 setup_timer(&port->ip6_own_query.timer, 924 br_ip6_multicast_port_query_expired, (unsigned long)port); 925 #endif 926 } 927 928 void br_multicast_del_port(struct net_bridge_port *port) 929 { 930 struct net_bridge *br = port->br; 931 struct net_bridge_port_group *pg; 932 struct hlist_node *n; 933 934 /* Take care of the remaining groups, only perm ones should be left */ 935 spin_lock_bh(&br->multicast_lock); 936 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 937 br_multicast_del_pg(br, pg); 938 spin_unlock_bh(&br->multicast_lock); 939 del_timer_sync(&port->multicast_router_timer); 940 } 941 942 static void br_multicast_enable(struct bridge_mcast_own_query *query) 943 { 944 query->startup_sent = 0; 945 946 if (try_to_del_timer_sync(&query->timer) >= 0 || 947 del_timer(&query->timer)) 948 mod_timer(&query->timer, jiffies); 949 } 950 951 void br_multicast_enable_port(struct net_bridge_port *port) 952 { 953 struct net_bridge *br = port->br; 954 955 spin_lock(&br->multicast_lock); 956 if (br->multicast_disabled || !netif_running(br->dev)) 957 goto out; 958 959 br_multicast_enable(&port->ip4_own_query); 960 #if IS_ENABLED(CONFIG_IPV6) 961 br_multicast_enable(&port->ip6_own_query); 962 #endif 963 if (port->multicast_router == 2 && hlist_unhashed(&port->rlist)) 964 br_multicast_add_router(br, port); 965 966 out: 967 spin_unlock(&br->multicast_lock); 968 } 969 970 void br_multicast_disable_port(struct net_bridge_port *port) 971 { 972 struct net_bridge *br = port->br; 973 struct net_bridge_port_group *pg; 974 struct hlist_node *n; 975 976 spin_lock(&br->multicast_lock); 977 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 978 if (pg->state == MDB_TEMPORARY) 979 br_multicast_del_pg(br, pg); 980 981 if (!hlist_unhashed(&port->rlist)) { 982 hlist_del_init_rcu(&port->rlist); 983 br_rtr_notify(br->dev, port, RTM_DELMDB); 984 } 985 del_timer(&port->multicast_router_timer); 986 del_timer(&port->ip4_own_query.timer); 987 #if IS_ENABLED(CONFIG_IPV6) 988 del_timer(&port->ip6_own_query.timer); 989 #endif 990 spin_unlock(&br->multicast_lock); 991 } 992 993 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 994 struct net_bridge_port *port, 995 struct sk_buff *skb, 996 u16 vid) 997 { 998 struct igmpv3_report *ih; 999 struct igmpv3_grec *grec; 1000 int i; 1001 int len; 1002 int num; 1003 int type; 1004 int err = 0; 1005 __be32 group; 1006 1007 ih = igmpv3_report_hdr(skb); 1008 num = ntohs(ih->ngrec); 1009 len = skb_transport_offset(skb) + sizeof(*ih); 1010 1011 for (i = 0; i < num; i++) { 1012 len += sizeof(*grec); 1013 if (!pskb_may_pull(skb, len)) 1014 return -EINVAL; 1015 1016 grec = (void *)(skb->data + len - sizeof(*grec)); 1017 group = grec->grec_mca; 1018 type = grec->grec_type; 1019 1020 len += ntohs(grec->grec_nsrcs) * 4; 1021 if (!pskb_may_pull(skb, len)) 1022 return -EINVAL; 1023 1024 /* We treat this as an IGMPv2 report for now. */ 1025 switch (type) { 1026 case IGMPV3_MODE_IS_INCLUDE: 1027 case IGMPV3_MODE_IS_EXCLUDE: 1028 case IGMPV3_CHANGE_TO_INCLUDE: 1029 case IGMPV3_CHANGE_TO_EXCLUDE: 1030 case IGMPV3_ALLOW_NEW_SOURCES: 1031 case IGMPV3_BLOCK_OLD_SOURCES: 1032 break; 1033 1034 default: 1035 continue; 1036 } 1037 1038 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1039 type == IGMPV3_MODE_IS_INCLUDE) && 1040 ntohs(grec->grec_nsrcs) == 0) { 1041 br_ip4_multicast_leave_group(br, port, group, vid); 1042 } else { 1043 err = br_ip4_multicast_add_group(br, port, group, vid); 1044 if (err) 1045 break; 1046 } 1047 } 1048 1049 return err; 1050 } 1051 1052 #if IS_ENABLED(CONFIG_IPV6) 1053 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1054 struct net_bridge_port *port, 1055 struct sk_buff *skb, 1056 u16 vid) 1057 { 1058 struct icmp6hdr *icmp6h; 1059 struct mld2_grec *grec; 1060 int i; 1061 int len; 1062 int num; 1063 int err = 0; 1064 1065 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1066 return -EINVAL; 1067 1068 icmp6h = icmp6_hdr(skb); 1069 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1070 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1071 1072 for (i = 0; i < num; i++) { 1073 __be16 *nsrcs, _nsrcs; 1074 1075 nsrcs = skb_header_pointer(skb, 1076 len + offsetof(struct mld2_grec, 1077 grec_nsrcs), 1078 sizeof(_nsrcs), &_nsrcs); 1079 if (!nsrcs) 1080 return -EINVAL; 1081 1082 if (!pskb_may_pull(skb, 1083 len + sizeof(*grec) + 1084 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1085 return -EINVAL; 1086 1087 grec = (struct mld2_grec *)(skb->data + len); 1088 len += sizeof(*grec) + 1089 sizeof(struct in6_addr) * ntohs(*nsrcs); 1090 1091 /* We treat these as MLDv1 reports for now. */ 1092 switch (grec->grec_type) { 1093 case MLD2_MODE_IS_INCLUDE: 1094 case MLD2_MODE_IS_EXCLUDE: 1095 case MLD2_CHANGE_TO_INCLUDE: 1096 case MLD2_CHANGE_TO_EXCLUDE: 1097 case MLD2_ALLOW_NEW_SOURCES: 1098 case MLD2_BLOCK_OLD_SOURCES: 1099 break; 1100 1101 default: 1102 continue; 1103 } 1104 1105 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1106 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1107 ntohs(*nsrcs) == 0) { 1108 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1109 vid); 1110 } else { 1111 err = br_ip6_multicast_add_group(br, port, 1112 &grec->grec_mca, vid); 1113 if (!err) 1114 break; 1115 } 1116 } 1117 1118 return err; 1119 } 1120 #endif 1121 1122 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1123 struct net_bridge_port *port, 1124 __be32 saddr) 1125 { 1126 if (!timer_pending(&br->ip4_own_query.timer) && 1127 !timer_pending(&br->ip4_other_query.timer)) 1128 goto update; 1129 1130 if (!br->ip4_querier.addr.u.ip4) 1131 goto update; 1132 1133 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1134 goto update; 1135 1136 return false; 1137 1138 update: 1139 br->ip4_querier.addr.u.ip4 = saddr; 1140 1141 /* update protected by general multicast_lock by caller */ 1142 rcu_assign_pointer(br->ip4_querier.port, port); 1143 1144 return true; 1145 } 1146 1147 #if IS_ENABLED(CONFIG_IPV6) 1148 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1149 struct net_bridge_port *port, 1150 struct in6_addr *saddr) 1151 { 1152 if (!timer_pending(&br->ip6_own_query.timer) && 1153 !timer_pending(&br->ip6_other_query.timer)) 1154 goto update; 1155 1156 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1157 goto update; 1158 1159 return false; 1160 1161 update: 1162 br->ip6_querier.addr.u.ip6 = *saddr; 1163 1164 /* update protected by general multicast_lock by caller */ 1165 rcu_assign_pointer(br->ip6_querier.port, port); 1166 1167 return true; 1168 } 1169 #endif 1170 1171 static bool br_multicast_select_querier(struct net_bridge *br, 1172 struct net_bridge_port *port, 1173 struct br_ip *saddr) 1174 { 1175 switch (saddr->proto) { 1176 case htons(ETH_P_IP): 1177 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1178 #if IS_ENABLED(CONFIG_IPV6) 1179 case htons(ETH_P_IPV6): 1180 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1181 #endif 1182 } 1183 1184 return false; 1185 } 1186 1187 static void 1188 br_multicast_update_query_timer(struct net_bridge *br, 1189 struct bridge_mcast_other_query *query, 1190 unsigned long max_delay) 1191 { 1192 if (!timer_pending(&query->timer)) 1193 query->delay_time = jiffies + max_delay; 1194 1195 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1196 } 1197 1198 /* 1199 * Add port to router_list 1200 * list is maintained ordered by pointer value 1201 * and locked by br->multicast_lock and RCU 1202 */ 1203 static void br_multicast_add_router(struct net_bridge *br, 1204 struct net_bridge_port *port) 1205 { 1206 struct net_bridge_port *p; 1207 struct hlist_node *slot = NULL; 1208 1209 if (!hlist_unhashed(&port->rlist)) 1210 return; 1211 1212 hlist_for_each_entry(p, &br->router_list, rlist) { 1213 if ((unsigned long) port >= (unsigned long) p) 1214 break; 1215 slot = &p->rlist; 1216 } 1217 1218 if (slot) 1219 hlist_add_behind_rcu(&port->rlist, slot); 1220 else 1221 hlist_add_head_rcu(&port->rlist, &br->router_list); 1222 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1223 } 1224 1225 static void br_multicast_mark_router(struct net_bridge *br, 1226 struct net_bridge_port *port) 1227 { 1228 unsigned long now = jiffies; 1229 1230 if (!port) { 1231 if (br->multicast_router == 1) 1232 mod_timer(&br->multicast_router_timer, 1233 now + br->multicast_querier_interval); 1234 return; 1235 } 1236 1237 if (port->multicast_router != 1) 1238 return; 1239 1240 br_multicast_add_router(br, port); 1241 1242 mod_timer(&port->multicast_router_timer, 1243 now + br->multicast_querier_interval); 1244 } 1245 1246 static void br_multicast_query_received(struct net_bridge *br, 1247 struct net_bridge_port *port, 1248 struct bridge_mcast_other_query *query, 1249 struct br_ip *saddr, 1250 unsigned long max_delay) 1251 { 1252 if (!br_multicast_select_querier(br, port, saddr)) 1253 return; 1254 1255 br_multicast_update_query_timer(br, query, max_delay); 1256 br_multicast_mark_router(br, port); 1257 } 1258 1259 static int br_ip4_multicast_query(struct net_bridge *br, 1260 struct net_bridge_port *port, 1261 struct sk_buff *skb, 1262 u16 vid) 1263 { 1264 const struct iphdr *iph = ip_hdr(skb); 1265 struct igmphdr *ih = igmp_hdr(skb); 1266 struct net_bridge_mdb_entry *mp; 1267 struct igmpv3_query *ih3; 1268 struct net_bridge_port_group *p; 1269 struct net_bridge_port_group __rcu **pp; 1270 struct br_ip saddr; 1271 unsigned long max_delay; 1272 unsigned long now = jiffies; 1273 __be32 group; 1274 int err = 0; 1275 1276 spin_lock(&br->multicast_lock); 1277 if (!netif_running(br->dev) || 1278 (port && port->state == BR_STATE_DISABLED)) 1279 goto out; 1280 1281 group = ih->group; 1282 1283 if (skb->len == sizeof(*ih)) { 1284 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1285 1286 if (!max_delay) { 1287 max_delay = 10 * HZ; 1288 group = 0; 1289 } 1290 } else if (skb->len >= sizeof(*ih3)) { 1291 ih3 = igmpv3_query_hdr(skb); 1292 if (ih3->nsrcs) 1293 goto out; 1294 1295 max_delay = ih3->code ? 1296 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1297 } else { 1298 goto out; 1299 } 1300 1301 if (!group) { 1302 saddr.proto = htons(ETH_P_IP); 1303 saddr.u.ip4 = iph->saddr; 1304 1305 br_multicast_query_received(br, port, &br->ip4_other_query, 1306 &saddr, max_delay); 1307 goto out; 1308 } 1309 1310 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1311 if (!mp) 1312 goto out; 1313 1314 max_delay *= br->multicast_last_member_count; 1315 1316 if (mp->mglist && 1317 (timer_pending(&mp->timer) ? 1318 time_after(mp->timer.expires, now + max_delay) : 1319 try_to_del_timer_sync(&mp->timer) >= 0)) 1320 mod_timer(&mp->timer, now + max_delay); 1321 1322 for (pp = &mp->ports; 1323 (p = mlock_dereference(*pp, br)) != NULL; 1324 pp = &p->next) { 1325 if (timer_pending(&p->timer) ? 1326 time_after(p->timer.expires, now + max_delay) : 1327 try_to_del_timer_sync(&p->timer) >= 0) 1328 mod_timer(&p->timer, now + max_delay); 1329 } 1330 1331 out: 1332 spin_unlock(&br->multicast_lock); 1333 return err; 1334 } 1335 1336 #if IS_ENABLED(CONFIG_IPV6) 1337 static int br_ip6_multicast_query(struct net_bridge *br, 1338 struct net_bridge_port *port, 1339 struct sk_buff *skb, 1340 u16 vid) 1341 { 1342 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1343 struct mld_msg *mld; 1344 struct net_bridge_mdb_entry *mp; 1345 struct mld2_query *mld2q; 1346 struct net_bridge_port_group *p; 1347 struct net_bridge_port_group __rcu **pp; 1348 struct br_ip saddr; 1349 unsigned long max_delay; 1350 unsigned long now = jiffies; 1351 const struct in6_addr *group = NULL; 1352 bool is_general_query; 1353 int err = 0; 1354 1355 spin_lock(&br->multicast_lock); 1356 if (!netif_running(br->dev) || 1357 (port && port->state == BR_STATE_DISABLED)) 1358 goto out; 1359 1360 if (skb->len == sizeof(*mld)) { 1361 if (!pskb_may_pull(skb, sizeof(*mld))) { 1362 err = -EINVAL; 1363 goto out; 1364 } 1365 mld = (struct mld_msg *) icmp6_hdr(skb); 1366 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1367 if (max_delay) 1368 group = &mld->mld_mca; 1369 } else { 1370 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1371 err = -EINVAL; 1372 goto out; 1373 } 1374 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1375 if (!mld2q->mld2q_nsrcs) 1376 group = &mld2q->mld2q_mca; 1377 1378 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1379 } 1380 1381 is_general_query = group && ipv6_addr_any(group); 1382 1383 if (is_general_query) { 1384 saddr.proto = htons(ETH_P_IPV6); 1385 saddr.u.ip6 = ip6h->saddr; 1386 1387 br_multicast_query_received(br, port, &br->ip6_other_query, 1388 &saddr, max_delay); 1389 goto out; 1390 } else if (!group) { 1391 goto out; 1392 } 1393 1394 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1395 if (!mp) 1396 goto out; 1397 1398 max_delay *= br->multicast_last_member_count; 1399 if (mp->mglist && 1400 (timer_pending(&mp->timer) ? 1401 time_after(mp->timer.expires, now + max_delay) : 1402 try_to_del_timer_sync(&mp->timer) >= 0)) 1403 mod_timer(&mp->timer, now + max_delay); 1404 1405 for (pp = &mp->ports; 1406 (p = mlock_dereference(*pp, br)) != NULL; 1407 pp = &p->next) { 1408 if (timer_pending(&p->timer) ? 1409 time_after(p->timer.expires, now + max_delay) : 1410 try_to_del_timer_sync(&p->timer) >= 0) 1411 mod_timer(&p->timer, now + max_delay); 1412 } 1413 1414 out: 1415 spin_unlock(&br->multicast_lock); 1416 return err; 1417 } 1418 #endif 1419 1420 static void 1421 br_multicast_leave_group(struct net_bridge *br, 1422 struct net_bridge_port *port, 1423 struct br_ip *group, 1424 struct bridge_mcast_other_query *other_query, 1425 struct bridge_mcast_own_query *own_query) 1426 { 1427 struct net_bridge_mdb_htable *mdb; 1428 struct net_bridge_mdb_entry *mp; 1429 struct net_bridge_port_group *p; 1430 unsigned long now; 1431 unsigned long time; 1432 1433 spin_lock(&br->multicast_lock); 1434 if (!netif_running(br->dev) || 1435 (port && port->state == BR_STATE_DISABLED)) 1436 goto out; 1437 1438 mdb = mlock_dereference(br->mdb, br); 1439 mp = br_mdb_ip_get(mdb, group); 1440 if (!mp) 1441 goto out; 1442 1443 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1444 struct net_bridge_port_group __rcu **pp; 1445 1446 for (pp = &mp->ports; 1447 (p = mlock_dereference(*pp, br)) != NULL; 1448 pp = &p->next) { 1449 if (p->port != port) 1450 continue; 1451 1452 rcu_assign_pointer(*pp, p->next); 1453 hlist_del_init(&p->mglist); 1454 del_timer(&p->timer); 1455 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1456 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1457 p->state); 1458 1459 if (!mp->ports && !mp->mglist && 1460 netif_running(br->dev)) 1461 mod_timer(&mp->timer, jiffies); 1462 } 1463 goto out; 1464 } 1465 1466 if (timer_pending(&other_query->timer)) 1467 goto out; 1468 1469 if (br->multicast_querier) { 1470 __br_multicast_send_query(br, port, &mp->addr); 1471 1472 time = jiffies + br->multicast_last_member_count * 1473 br->multicast_last_member_interval; 1474 1475 mod_timer(&own_query->timer, time); 1476 1477 for (p = mlock_dereference(mp->ports, br); 1478 p != NULL; 1479 p = mlock_dereference(p->next, br)) { 1480 if (p->port != port) 1481 continue; 1482 1483 if (!hlist_unhashed(&p->mglist) && 1484 (timer_pending(&p->timer) ? 1485 time_after(p->timer.expires, time) : 1486 try_to_del_timer_sync(&p->timer) >= 0)) { 1487 mod_timer(&p->timer, time); 1488 } 1489 1490 break; 1491 } 1492 } 1493 1494 now = jiffies; 1495 time = now + br->multicast_last_member_count * 1496 br->multicast_last_member_interval; 1497 1498 if (!port) { 1499 if (mp->mglist && 1500 (timer_pending(&mp->timer) ? 1501 time_after(mp->timer.expires, time) : 1502 try_to_del_timer_sync(&mp->timer) >= 0)) { 1503 mod_timer(&mp->timer, time); 1504 } 1505 1506 goto out; 1507 } 1508 1509 for (p = mlock_dereference(mp->ports, br); 1510 p != NULL; 1511 p = mlock_dereference(p->next, br)) { 1512 if (p->port != port) 1513 continue; 1514 1515 if (!hlist_unhashed(&p->mglist) && 1516 (timer_pending(&p->timer) ? 1517 time_after(p->timer.expires, time) : 1518 try_to_del_timer_sync(&p->timer) >= 0)) { 1519 mod_timer(&p->timer, time); 1520 } 1521 1522 break; 1523 } 1524 out: 1525 spin_unlock(&br->multicast_lock); 1526 } 1527 1528 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1529 struct net_bridge_port *port, 1530 __be32 group, 1531 __u16 vid) 1532 { 1533 struct br_ip br_group; 1534 struct bridge_mcast_own_query *own_query; 1535 1536 if (ipv4_is_local_multicast(group)) 1537 return; 1538 1539 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1540 1541 br_group.u.ip4 = group; 1542 br_group.proto = htons(ETH_P_IP); 1543 br_group.vid = vid; 1544 1545 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1546 own_query); 1547 } 1548 1549 #if IS_ENABLED(CONFIG_IPV6) 1550 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1551 struct net_bridge_port *port, 1552 const struct in6_addr *group, 1553 __u16 vid) 1554 { 1555 struct br_ip br_group; 1556 struct bridge_mcast_own_query *own_query; 1557 1558 if (ipv6_addr_is_ll_all_nodes(group)) 1559 return; 1560 1561 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1562 1563 br_group.u.ip6 = *group; 1564 br_group.proto = htons(ETH_P_IPV6); 1565 br_group.vid = vid; 1566 1567 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1568 own_query); 1569 } 1570 #endif 1571 1572 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1573 struct net_bridge_port *port, 1574 struct sk_buff *skb, 1575 u16 vid) 1576 { 1577 struct sk_buff *skb_trimmed = NULL; 1578 struct igmphdr *ih; 1579 int err; 1580 1581 err = ip_mc_check_igmp(skb, &skb_trimmed); 1582 1583 if (err == -ENOMSG) { 1584 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) 1585 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1586 return 0; 1587 } else if (err < 0) { 1588 return err; 1589 } 1590 1591 BR_INPUT_SKB_CB(skb)->igmp = 1; 1592 ih = igmp_hdr(skb); 1593 1594 switch (ih->type) { 1595 case IGMP_HOST_MEMBERSHIP_REPORT: 1596 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1597 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1598 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1599 break; 1600 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1601 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1602 break; 1603 case IGMP_HOST_MEMBERSHIP_QUERY: 1604 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1605 break; 1606 case IGMP_HOST_LEAVE_MESSAGE: 1607 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1608 break; 1609 } 1610 1611 if (skb_trimmed && skb_trimmed != skb) 1612 kfree_skb(skb_trimmed); 1613 1614 return err; 1615 } 1616 1617 #if IS_ENABLED(CONFIG_IPV6) 1618 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1619 struct net_bridge_port *port, 1620 struct sk_buff *skb, 1621 u16 vid) 1622 { 1623 struct sk_buff *skb_trimmed = NULL; 1624 struct mld_msg *mld; 1625 int err; 1626 1627 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1628 1629 if (err == -ENOMSG) { 1630 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1631 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1632 return 0; 1633 } else if (err < 0) { 1634 return err; 1635 } 1636 1637 BR_INPUT_SKB_CB(skb)->igmp = 1; 1638 mld = (struct mld_msg *)skb_transport_header(skb); 1639 1640 switch (mld->mld_type) { 1641 case ICMPV6_MGM_REPORT: 1642 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1643 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1644 break; 1645 case ICMPV6_MLD2_REPORT: 1646 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1647 break; 1648 case ICMPV6_MGM_QUERY: 1649 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1650 break; 1651 case ICMPV6_MGM_REDUCTION: 1652 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1653 break; 1654 } 1655 1656 if (skb_trimmed && skb_trimmed != skb) 1657 kfree_skb(skb_trimmed); 1658 1659 return err; 1660 } 1661 #endif 1662 1663 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1664 struct sk_buff *skb, u16 vid) 1665 { 1666 BR_INPUT_SKB_CB(skb)->igmp = 0; 1667 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1668 1669 if (br->multicast_disabled) 1670 return 0; 1671 1672 switch (skb->protocol) { 1673 case htons(ETH_P_IP): 1674 return br_multicast_ipv4_rcv(br, port, skb, vid); 1675 #if IS_ENABLED(CONFIG_IPV6) 1676 case htons(ETH_P_IPV6): 1677 return br_multicast_ipv6_rcv(br, port, skb, vid); 1678 #endif 1679 } 1680 1681 return 0; 1682 } 1683 1684 static void br_multicast_query_expired(struct net_bridge *br, 1685 struct bridge_mcast_own_query *query, 1686 struct bridge_mcast_querier *querier) 1687 { 1688 spin_lock(&br->multicast_lock); 1689 if (query->startup_sent < br->multicast_startup_query_count) 1690 query->startup_sent++; 1691 1692 RCU_INIT_POINTER(querier->port, NULL); 1693 br_multicast_send_query(br, NULL, query); 1694 spin_unlock(&br->multicast_lock); 1695 } 1696 1697 static void br_ip4_multicast_query_expired(unsigned long data) 1698 { 1699 struct net_bridge *br = (void *)data; 1700 1701 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1702 } 1703 1704 #if IS_ENABLED(CONFIG_IPV6) 1705 static void br_ip6_multicast_query_expired(unsigned long data) 1706 { 1707 struct net_bridge *br = (void *)data; 1708 1709 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1710 } 1711 #endif 1712 1713 void br_multicast_init(struct net_bridge *br) 1714 { 1715 br->hash_elasticity = 4; 1716 br->hash_max = 512; 1717 1718 br->multicast_router = 1; 1719 br->multicast_querier = 0; 1720 br->multicast_query_use_ifaddr = 0; 1721 br->multicast_last_member_count = 2; 1722 br->multicast_startup_query_count = 2; 1723 1724 br->multicast_last_member_interval = HZ; 1725 br->multicast_query_response_interval = 10 * HZ; 1726 br->multicast_startup_query_interval = 125 * HZ / 4; 1727 br->multicast_query_interval = 125 * HZ; 1728 br->multicast_querier_interval = 255 * HZ; 1729 br->multicast_membership_interval = 260 * HZ; 1730 1731 br->ip4_other_query.delay_time = 0; 1732 br->ip4_querier.port = NULL; 1733 #if IS_ENABLED(CONFIG_IPV6) 1734 br->ip6_other_query.delay_time = 0; 1735 br->ip6_querier.port = NULL; 1736 #endif 1737 1738 spin_lock_init(&br->multicast_lock); 1739 setup_timer(&br->multicast_router_timer, 1740 br_multicast_local_router_expired, 0); 1741 setup_timer(&br->ip4_other_query.timer, 1742 br_ip4_multicast_querier_expired, (unsigned long)br); 1743 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1744 (unsigned long)br); 1745 #if IS_ENABLED(CONFIG_IPV6) 1746 setup_timer(&br->ip6_other_query.timer, 1747 br_ip6_multicast_querier_expired, (unsigned long)br); 1748 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1749 (unsigned long)br); 1750 #endif 1751 } 1752 1753 static void __br_multicast_open(struct net_bridge *br, 1754 struct bridge_mcast_own_query *query) 1755 { 1756 query->startup_sent = 0; 1757 1758 if (br->multicast_disabled) 1759 return; 1760 1761 mod_timer(&query->timer, jiffies); 1762 } 1763 1764 void br_multicast_open(struct net_bridge *br) 1765 { 1766 __br_multicast_open(br, &br->ip4_own_query); 1767 #if IS_ENABLED(CONFIG_IPV6) 1768 __br_multicast_open(br, &br->ip6_own_query); 1769 #endif 1770 } 1771 1772 void br_multicast_stop(struct net_bridge *br) 1773 { 1774 del_timer_sync(&br->multicast_router_timer); 1775 del_timer_sync(&br->ip4_other_query.timer); 1776 del_timer_sync(&br->ip4_own_query.timer); 1777 #if IS_ENABLED(CONFIG_IPV6) 1778 del_timer_sync(&br->ip6_other_query.timer); 1779 del_timer_sync(&br->ip6_own_query.timer); 1780 #endif 1781 } 1782 1783 void br_multicast_dev_del(struct net_bridge *br) 1784 { 1785 struct net_bridge_mdb_htable *mdb; 1786 struct net_bridge_mdb_entry *mp; 1787 struct hlist_node *n; 1788 u32 ver; 1789 int i; 1790 1791 spin_lock_bh(&br->multicast_lock); 1792 mdb = mlock_dereference(br->mdb, br); 1793 if (!mdb) 1794 goto out; 1795 1796 br->mdb = NULL; 1797 1798 ver = mdb->ver; 1799 for (i = 0; i < mdb->max; i++) { 1800 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1801 hlist[ver]) { 1802 del_timer(&mp->timer); 1803 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1804 } 1805 } 1806 1807 if (mdb->old) { 1808 spin_unlock_bh(&br->multicast_lock); 1809 rcu_barrier_bh(); 1810 spin_lock_bh(&br->multicast_lock); 1811 WARN_ON(mdb->old); 1812 } 1813 1814 mdb->old = mdb; 1815 call_rcu_bh(&mdb->rcu, br_mdb_free); 1816 1817 out: 1818 spin_unlock_bh(&br->multicast_lock); 1819 } 1820 1821 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1822 { 1823 int err = -EINVAL; 1824 1825 spin_lock_bh(&br->multicast_lock); 1826 1827 switch (val) { 1828 case 0: 1829 case 2: 1830 del_timer(&br->multicast_router_timer); 1831 /* fall through */ 1832 case 1: 1833 br->multicast_router = val; 1834 err = 0; 1835 break; 1836 } 1837 1838 spin_unlock_bh(&br->multicast_lock); 1839 1840 return err; 1841 } 1842 1843 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1844 { 1845 struct net_bridge *br = p->br; 1846 int err = -EINVAL; 1847 1848 spin_lock(&br->multicast_lock); 1849 1850 switch (val) { 1851 case 0: 1852 case 1: 1853 case 2: 1854 p->multicast_router = val; 1855 err = 0; 1856 1857 if (val < 2 && !hlist_unhashed(&p->rlist)) { 1858 hlist_del_init_rcu(&p->rlist); 1859 br_rtr_notify(br->dev, p, RTM_DELMDB); 1860 } 1861 1862 if (val == 1) 1863 break; 1864 1865 del_timer(&p->multicast_router_timer); 1866 1867 if (val == 0) 1868 break; 1869 1870 br_multicast_add_router(br, p); 1871 break; 1872 } 1873 1874 spin_unlock(&br->multicast_lock); 1875 1876 return err; 1877 } 1878 1879 static void br_multicast_start_querier(struct net_bridge *br, 1880 struct bridge_mcast_own_query *query) 1881 { 1882 struct net_bridge_port *port; 1883 1884 __br_multicast_open(br, query); 1885 1886 list_for_each_entry(port, &br->port_list, list) { 1887 if (port->state == BR_STATE_DISABLED || 1888 port->state == BR_STATE_BLOCKING) 1889 continue; 1890 1891 if (query == &br->ip4_own_query) 1892 br_multicast_enable(&port->ip4_own_query); 1893 #if IS_ENABLED(CONFIG_IPV6) 1894 else 1895 br_multicast_enable(&port->ip6_own_query); 1896 #endif 1897 } 1898 } 1899 1900 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1901 { 1902 int err = 0; 1903 struct net_bridge_mdb_htable *mdb; 1904 1905 spin_lock_bh(&br->multicast_lock); 1906 if (br->multicast_disabled == !val) 1907 goto unlock; 1908 1909 br->multicast_disabled = !val; 1910 if (br->multicast_disabled) 1911 goto unlock; 1912 1913 if (!netif_running(br->dev)) 1914 goto unlock; 1915 1916 mdb = mlock_dereference(br->mdb, br); 1917 if (mdb) { 1918 if (mdb->old) { 1919 err = -EEXIST; 1920 rollback: 1921 br->multicast_disabled = !!val; 1922 goto unlock; 1923 } 1924 1925 err = br_mdb_rehash(&br->mdb, mdb->max, 1926 br->hash_elasticity); 1927 if (err) 1928 goto rollback; 1929 } 1930 1931 br_multicast_start_querier(br, &br->ip4_own_query); 1932 #if IS_ENABLED(CONFIG_IPV6) 1933 br_multicast_start_querier(br, &br->ip6_own_query); 1934 #endif 1935 1936 unlock: 1937 spin_unlock_bh(&br->multicast_lock); 1938 1939 return err; 1940 } 1941 1942 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1943 { 1944 unsigned long max_delay; 1945 1946 val = !!val; 1947 1948 spin_lock_bh(&br->multicast_lock); 1949 if (br->multicast_querier == val) 1950 goto unlock; 1951 1952 br->multicast_querier = val; 1953 if (!val) 1954 goto unlock; 1955 1956 max_delay = br->multicast_query_response_interval; 1957 1958 if (!timer_pending(&br->ip4_other_query.timer)) 1959 br->ip4_other_query.delay_time = jiffies + max_delay; 1960 1961 br_multicast_start_querier(br, &br->ip4_own_query); 1962 1963 #if IS_ENABLED(CONFIG_IPV6) 1964 if (!timer_pending(&br->ip6_other_query.timer)) 1965 br->ip6_other_query.delay_time = jiffies + max_delay; 1966 1967 br_multicast_start_querier(br, &br->ip6_own_query); 1968 #endif 1969 1970 unlock: 1971 spin_unlock_bh(&br->multicast_lock); 1972 1973 return 0; 1974 } 1975 1976 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1977 { 1978 int err = -EINVAL; 1979 u32 old; 1980 struct net_bridge_mdb_htable *mdb; 1981 1982 spin_lock_bh(&br->multicast_lock); 1983 if (!is_power_of_2(val)) 1984 goto unlock; 1985 1986 mdb = mlock_dereference(br->mdb, br); 1987 if (mdb && val < mdb->size) 1988 goto unlock; 1989 1990 err = 0; 1991 1992 old = br->hash_max; 1993 br->hash_max = val; 1994 1995 if (mdb) { 1996 if (mdb->old) { 1997 err = -EEXIST; 1998 rollback: 1999 br->hash_max = old; 2000 goto unlock; 2001 } 2002 2003 err = br_mdb_rehash(&br->mdb, br->hash_max, 2004 br->hash_elasticity); 2005 if (err) 2006 goto rollback; 2007 } 2008 2009 unlock: 2010 spin_unlock_bh(&br->multicast_lock); 2011 2012 return err; 2013 } 2014 2015 /** 2016 * br_multicast_list_adjacent - Returns snooped multicast addresses 2017 * @dev: The bridge port adjacent to which to retrieve addresses 2018 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2019 * 2020 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2021 * snooping feature on all bridge ports of dev's bridge device, excluding 2022 * the addresses from dev itself. 2023 * 2024 * Returns the number of items added to br_ip_list. 2025 * 2026 * Notes: 2027 * - br_ip_list needs to be initialized by caller 2028 * - br_ip_list might contain duplicates in the end 2029 * (needs to be taken care of by caller) 2030 * - br_ip_list needs to be freed by caller 2031 */ 2032 int br_multicast_list_adjacent(struct net_device *dev, 2033 struct list_head *br_ip_list) 2034 { 2035 struct net_bridge *br; 2036 struct net_bridge_port *port; 2037 struct net_bridge_port_group *group; 2038 struct br_ip_list *entry; 2039 int count = 0; 2040 2041 rcu_read_lock(); 2042 if (!br_ip_list || !br_port_exists(dev)) 2043 goto unlock; 2044 2045 port = br_port_get_rcu(dev); 2046 if (!port || !port->br) 2047 goto unlock; 2048 2049 br = port->br; 2050 2051 list_for_each_entry_rcu(port, &br->port_list, list) { 2052 if (!port->dev || port->dev == dev) 2053 continue; 2054 2055 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2056 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2057 if (!entry) 2058 goto unlock; 2059 2060 entry->addr = group->addr; 2061 list_add(&entry->list, br_ip_list); 2062 count++; 2063 } 2064 } 2065 2066 unlock: 2067 rcu_read_unlock(); 2068 return count; 2069 } 2070 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2071 2072 /** 2073 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2074 * @dev: The bridge port providing the bridge on which to check for a querier 2075 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2076 * 2077 * Checks whether the given interface has a bridge on top and if so returns 2078 * true if a valid querier exists anywhere on the bridged link layer. 2079 * Otherwise returns false. 2080 */ 2081 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2082 { 2083 struct net_bridge *br; 2084 struct net_bridge_port *port; 2085 struct ethhdr eth; 2086 bool ret = false; 2087 2088 rcu_read_lock(); 2089 if (!br_port_exists(dev)) 2090 goto unlock; 2091 2092 port = br_port_get_rcu(dev); 2093 if (!port || !port->br) 2094 goto unlock; 2095 2096 br = port->br; 2097 2098 memset(ð, 0, sizeof(eth)); 2099 eth.h_proto = htons(proto); 2100 2101 ret = br_multicast_querier_exists(br, ð); 2102 2103 unlock: 2104 rcu_read_unlock(); 2105 return ret; 2106 } 2107 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2108 2109 /** 2110 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2111 * @dev: The bridge port adjacent to which to check for a querier 2112 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2113 * 2114 * Checks whether the given interface has a bridge on top and if so returns 2115 * true if a selected querier is behind one of the other ports of this 2116 * bridge. Otherwise returns false. 2117 */ 2118 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2119 { 2120 struct net_bridge *br; 2121 struct net_bridge_port *port; 2122 bool ret = false; 2123 2124 rcu_read_lock(); 2125 if (!br_port_exists(dev)) 2126 goto unlock; 2127 2128 port = br_port_get_rcu(dev); 2129 if (!port || !port->br) 2130 goto unlock; 2131 2132 br = port->br; 2133 2134 switch (proto) { 2135 case ETH_P_IP: 2136 if (!timer_pending(&br->ip4_other_query.timer) || 2137 rcu_dereference(br->ip4_querier.port) == port) 2138 goto unlock; 2139 break; 2140 #if IS_ENABLED(CONFIG_IPV6) 2141 case ETH_P_IPV6: 2142 if (!timer_pending(&br->ip6_other_query.timer) || 2143 rcu_dereference(br->ip6_querier.port) == port) 2144 goto unlock; 2145 break; 2146 #endif 2147 default: 2148 goto unlock; 2149 } 2150 2151 ret = true; 2152 unlock: 2153 rcu_read_unlock(); 2154 return ret; 2155 } 2156 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2157