1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <net/ip.h> 29 #if IS_ENABLED(CONFIG_IPV6) 30 #include <net/ipv6.h> 31 #include <net/mld.h> 32 #include <net/ip6_checksum.h> 33 #include <net/addrconf.h> 34 #endif 35 36 #include "br_private.h" 37 38 static void br_multicast_start_querier(struct net_bridge *br, 39 struct bridge_mcast_own_query *query); 40 static void br_multicast_add_router(struct net_bridge *br, 41 struct net_bridge_port *port); 42 static void br_ip4_multicast_leave_group(struct net_bridge *br, 43 struct net_bridge_port *port, 44 __be32 group, 45 __u16 vid); 46 #if IS_ENABLED(CONFIG_IPV6) 47 static void br_ip6_multicast_leave_group(struct net_bridge *br, 48 struct net_bridge_port *port, 49 const struct in6_addr *group, 50 __u16 vid); 51 #endif 52 unsigned int br_mdb_rehash_seq; 53 54 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 55 { 56 if (a->proto != b->proto) 57 return 0; 58 if (a->vid != b->vid) 59 return 0; 60 switch (a->proto) { 61 case htons(ETH_P_IP): 62 return a->u.ip4 == b->u.ip4; 63 #if IS_ENABLED(CONFIG_IPV6) 64 case htons(ETH_P_IPV6): 65 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 66 #endif 67 } 68 return 0; 69 } 70 71 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 72 __u16 vid) 73 { 74 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 75 } 76 77 #if IS_ENABLED(CONFIG_IPV6) 78 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 79 const struct in6_addr *ip, 80 __u16 vid) 81 { 82 return jhash_2words(ipv6_addr_hash(ip), vid, 83 mdb->secret) & (mdb->max - 1); 84 } 85 #endif 86 87 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 88 struct br_ip *ip) 89 { 90 switch (ip->proto) { 91 case htons(ETH_P_IP): 92 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 93 #if IS_ENABLED(CONFIG_IPV6) 94 case htons(ETH_P_IPV6): 95 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 96 #endif 97 } 98 return 0; 99 } 100 101 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 102 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 103 { 104 struct net_bridge_mdb_entry *mp; 105 106 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 107 if (br_ip_equal(&mp->addr, dst)) 108 return mp; 109 } 110 111 return NULL; 112 } 113 114 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 115 struct br_ip *dst) 116 { 117 if (!mdb) 118 return NULL; 119 120 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 121 } 122 123 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 124 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 125 { 126 struct br_ip br_dst; 127 128 br_dst.u.ip4 = dst; 129 br_dst.proto = htons(ETH_P_IP); 130 br_dst.vid = vid; 131 132 return br_mdb_ip_get(mdb, &br_dst); 133 } 134 135 #if IS_ENABLED(CONFIG_IPV6) 136 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 137 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 138 __u16 vid) 139 { 140 struct br_ip br_dst; 141 142 br_dst.u.ip6 = *dst; 143 br_dst.proto = htons(ETH_P_IPV6); 144 br_dst.vid = vid; 145 146 return br_mdb_ip_get(mdb, &br_dst); 147 } 148 #endif 149 150 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 151 struct sk_buff *skb, u16 vid) 152 { 153 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 154 struct br_ip ip; 155 156 if (br->multicast_disabled) 157 return NULL; 158 159 if (BR_INPUT_SKB_CB(skb)->igmp) 160 return NULL; 161 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.u.ip4 = ip_hdr(skb)->daddr; 168 break; 169 #if IS_ENABLED(CONFIG_IPV6) 170 case htons(ETH_P_IPV6): 171 ip.u.ip6 = ipv6_hdr(skb)->daddr; 172 break; 173 #endif 174 default: 175 return NULL; 176 } 177 178 return br_mdb_ip_get(mdb, &ip); 179 } 180 181 static void br_mdb_free(struct rcu_head *head) 182 { 183 struct net_bridge_mdb_htable *mdb = 184 container_of(head, struct net_bridge_mdb_htable, rcu); 185 struct net_bridge_mdb_htable *old = mdb->old; 186 187 mdb->old = NULL; 188 kfree(old->mhash); 189 kfree(old); 190 } 191 192 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 193 struct net_bridge_mdb_htable *old, 194 int elasticity) 195 { 196 struct net_bridge_mdb_entry *mp; 197 int maxlen; 198 int len; 199 int i; 200 201 for (i = 0; i < old->max; i++) 202 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 203 hlist_add_head(&mp->hlist[new->ver], 204 &new->mhash[br_ip_hash(new, &mp->addr)]); 205 206 if (!elasticity) 207 return 0; 208 209 maxlen = 0; 210 for (i = 0; i < new->max; i++) { 211 len = 0; 212 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 213 len++; 214 if (len > maxlen) 215 maxlen = len; 216 } 217 218 return maxlen > elasticity ? -EINVAL : 0; 219 } 220 221 void br_multicast_free_pg(struct rcu_head *head) 222 { 223 struct net_bridge_port_group *p = 224 container_of(head, struct net_bridge_port_group, rcu); 225 226 kfree(p); 227 } 228 229 static void br_multicast_free_group(struct rcu_head *head) 230 { 231 struct net_bridge_mdb_entry *mp = 232 container_of(head, struct net_bridge_mdb_entry, rcu); 233 234 kfree(mp); 235 } 236 237 static void br_multicast_group_expired(unsigned long data) 238 { 239 struct net_bridge_mdb_entry *mp = (void *)data; 240 struct net_bridge *br = mp->br; 241 struct net_bridge_mdb_htable *mdb; 242 243 spin_lock(&br->multicast_lock); 244 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 245 goto out; 246 247 mp->mglist = false; 248 249 if (mp->ports) 250 goto out; 251 252 mdb = mlock_dereference(br->mdb, br); 253 254 hlist_del_rcu(&mp->hlist[mdb->ver]); 255 mdb->size--; 256 257 call_rcu_bh(&mp->rcu, br_multicast_free_group); 258 259 out: 260 spin_unlock(&br->multicast_lock); 261 } 262 263 static void br_multicast_del_pg(struct net_bridge *br, 264 struct net_bridge_port_group *pg) 265 { 266 struct net_bridge_mdb_htable *mdb; 267 struct net_bridge_mdb_entry *mp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_port_group __rcu **pp; 270 271 mdb = mlock_dereference(br->mdb, br); 272 273 mp = br_mdb_ip_get(mdb, &pg->addr); 274 if (WARN_ON(!mp)) 275 return; 276 277 for (pp = &mp->ports; 278 (p = mlock_dereference(*pp, br)) != NULL; 279 pp = &p->next) { 280 if (p != pg) 281 continue; 282 283 rcu_assign_pointer(*pp, p->next); 284 hlist_del_init(&p->mglist); 285 del_timer(&p->timer); 286 call_rcu_bh(&p->rcu, br_multicast_free_pg); 287 288 if (!mp->ports && !mp->mglist && 289 netif_running(br->dev)) 290 mod_timer(&mp->timer, jiffies); 291 292 return; 293 } 294 295 WARN_ON(1); 296 } 297 298 static void br_multicast_port_group_expired(unsigned long data) 299 { 300 struct net_bridge_port_group *pg = (void *)data; 301 struct net_bridge *br = pg->port->br; 302 303 spin_lock(&br->multicast_lock); 304 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 305 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT) 306 goto out; 307 308 br_multicast_del_pg(br, pg); 309 310 out: 311 spin_unlock(&br->multicast_lock); 312 } 313 314 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 315 int elasticity) 316 { 317 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 318 struct net_bridge_mdb_htable *mdb; 319 int err; 320 321 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 322 if (!mdb) 323 return -ENOMEM; 324 325 mdb->max = max; 326 mdb->old = old; 327 328 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 329 if (!mdb->mhash) { 330 kfree(mdb); 331 return -ENOMEM; 332 } 333 334 mdb->size = old ? old->size : 0; 335 mdb->ver = old ? old->ver ^ 1 : 0; 336 337 if (!old || elasticity) 338 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 339 else 340 mdb->secret = old->secret; 341 342 if (!old) 343 goto out; 344 345 err = br_mdb_copy(mdb, old, elasticity); 346 if (err) { 347 kfree(mdb->mhash); 348 kfree(mdb); 349 return err; 350 } 351 352 br_mdb_rehash_seq++; 353 call_rcu_bh(&mdb->rcu, br_mdb_free); 354 355 out: 356 rcu_assign_pointer(*mdbp, mdb); 357 358 return 0; 359 } 360 361 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 362 __be32 group) 363 { 364 struct sk_buff *skb; 365 struct igmphdr *ih; 366 struct ethhdr *eth; 367 struct iphdr *iph; 368 369 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 370 sizeof(*ih) + 4); 371 if (!skb) 372 goto out; 373 374 skb->protocol = htons(ETH_P_IP); 375 376 skb_reset_mac_header(skb); 377 eth = eth_hdr(skb); 378 379 ether_addr_copy(eth->h_source, br->dev->dev_addr); 380 eth->h_dest[0] = 1; 381 eth->h_dest[1] = 0; 382 eth->h_dest[2] = 0x5e; 383 eth->h_dest[3] = 0; 384 eth->h_dest[4] = 0; 385 eth->h_dest[5] = 1; 386 eth->h_proto = htons(ETH_P_IP); 387 skb_put(skb, sizeof(*eth)); 388 389 skb_set_network_header(skb, skb->len); 390 iph = ip_hdr(skb); 391 392 iph->version = 4; 393 iph->ihl = 6; 394 iph->tos = 0xc0; 395 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 396 iph->id = 0; 397 iph->frag_off = htons(IP_DF); 398 iph->ttl = 1; 399 iph->protocol = IPPROTO_IGMP; 400 iph->saddr = br->multicast_query_use_ifaddr ? 401 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 402 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 403 ((u8 *)&iph[1])[0] = IPOPT_RA; 404 ((u8 *)&iph[1])[1] = 4; 405 ((u8 *)&iph[1])[2] = 0; 406 ((u8 *)&iph[1])[3] = 0; 407 ip_send_check(iph); 408 skb_put(skb, 24); 409 410 skb_set_transport_header(skb, skb->len); 411 ih = igmp_hdr(skb); 412 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 413 ih->code = (group ? br->multicast_last_member_interval : 414 br->multicast_query_response_interval) / 415 (HZ / IGMP_TIMER_SCALE); 416 ih->group = group; 417 ih->csum = 0; 418 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 419 skb_put(skb, sizeof(*ih)); 420 421 __skb_pull(skb, sizeof(*eth)); 422 423 out: 424 return skb; 425 } 426 427 #if IS_ENABLED(CONFIG_IPV6) 428 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 429 const struct in6_addr *group) 430 { 431 struct sk_buff *skb; 432 struct ipv6hdr *ip6h; 433 struct mld_msg *mldq; 434 struct ethhdr *eth; 435 u8 *hopopt; 436 unsigned long interval; 437 438 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 439 8 + sizeof(*mldq)); 440 if (!skb) 441 goto out; 442 443 skb->protocol = htons(ETH_P_IPV6); 444 445 /* Ethernet header */ 446 skb_reset_mac_header(skb); 447 eth = eth_hdr(skb); 448 449 ether_addr_copy(eth->h_source, br->dev->dev_addr); 450 eth->h_proto = htons(ETH_P_IPV6); 451 skb_put(skb, sizeof(*eth)); 452 453 /* IPv6 header + HbH option */ 454 skb_set_network_header(skb, skb->len); 455 ip6h = ipv6_hdr(skb); 456 457 *(__force __be32 *)ip6h = htonl(0x60000000); 458 ip6h->payload_len = htons(8 + sizeof(*mldq)); 459 ip6h->nexthdr = IPPROTO_HOPOPTS; 460 ip6h->hop_limit = 1; 461 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 462 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 463 &ip6h->saddr)) { 464 kfree_skb(skb); 465 return NULL; 466 } 467 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 468 469 hopopt = (u8 *)(ip6h + 1); 470 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 471 hopopt[1] = 0; /* length of HbH */ 472 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 473 hopopt[3] = 2; /* Length of RA Option */ 474 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 475 hopopt[5] = 0; 476 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 477 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 478 479 skb_put(skb, sizeof(*ip6h) + 8); 480 481 /* ICMPv6 */ 482 skb_set_transport_header(skb, skb->len); 483 mldq = (struct mld_msg *) icmp6_hdr(skb); 484 485 interval = ipv6_addr_any(group) ? 486 br->multicast_query_response_interval : 487 br->multicast_last_member_interval; 488 489 mldq->mld_type = ICMPV6_MGM_QUERY; 490 mldq->mld_code = 0; 491 mldq->mld_cksum = 0; 492 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 493 mldq->mld_reserved = 0; 494 mldq->mld_mca = *group; 495 496 /* checksum */ 497 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 498 sizeof(*mldq), IPPROTO_ICMPV6, 499 csum_partial(mldq, 500 sizeof(*mldq), 0)); 501 skb_put(skb, sizeof(*mldq)); 502 503 __skb_pull(skb, sizeof(*eth)); 504 505 out: 506 return skb; 507 } 508 #endif 509 510 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 511 struct br_ip *addr) 512 { 513 switch (addr->proto) { 514 case htons(ETH_P_IP): 515 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 516 #if IS_ENABLED(CONFIG_IPV6) 517 case htons(ETH_P_IPV6): 518 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 519 #endif 520 } 521 return NULL; 522 } 523 524 static struct net_bridge_mdb_entry *br_multicast_get_group( 525 struct net_bridge *br, struct net_bridge_port *port, 526 struct br_ip *group, int hash) 527 { 528 struct net_bridge_mdb_htable *mdb; 529 struct net_bridge_mdb_entry *mp; 530 unsigned int count = 0; 531 unsigned int max; 532 int elasticity; 533 int err; 534 535 mdb = rcu_dereference_protected(br->mdb, 1); 536 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 537 count++; 538 if (unlikely(br_ip_equal(group, &mp->addr))) 539 return mp; 540 } 541 542 elasticity = 0; 543 max = mdb->max; 544 545 if (unlikely(count > br->hash_elasticity && count)) { 546 if (net_ratelimit()) 547 br_info(br, "Multicast hash table " 548 "chain limit reached: %s\n", 549 port ? port->dev->name : br->dev->name); 550 551 elasticity = br->hash_elasticity; 552 } 553 554 if (mdb->size >= max) { 555 max *= 2; 556 if (unlikely(max > br->hash_max)) { 557 br_warn(br, "Multicast hash table maximum of %d " 558 "reached, disabling snooping: %s\n", 559 br->hash_max, 560 port ? port->dev->name : br->dev->name); 561 err = -E2BIG; 562 disable: 563 br->multicast_disabled = 1; 564 goto err; 565 } 566 } 567 568 if (max > mdb->max || elasticity) { 569 if (mdb->old) { 570 if (net_ratelimit()) 571 br_info(br, "Multicast hash table " 572 "on fire: %s\n", 573 port ? port->dev->name : br->dev->name); 574 err = -EEXIST; 575 goto err; 576 } 577 578 err = br_mdb_rehash(&br->mdb, max, elasticity); 579 if (err) { 580 br_warn(br, "Cannot rehash multicast " 581 "hash table, disabling snooping: %s, %d, %d\n", 582 port ? port->dev->name : br->dev->name, 583 mdb->size, err); 584 goto disable; 585 } 586 587 err = -EAGAIN; 588 goto err; 589 } 590 591 return NULL; 592 593 err: 594 mp = ERR_PTR(err); 595 return mp; 596 } 597 598 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 599 struct net_bridge_port *port, struct br_ip *group) 600 { 601 struct net_bridge_mdb_htable *mdb; 602 struct net_bridge_mdb_entry *mp; 603 int hash; 604 int err; 605 606 mdb = rcu_dereference_protected(br->mdb, 1); 607 if (!mdb) { 608 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 609 if (err) 610 return ERR_PTR(err); 611 goto rehash; 612 } 613 614 hash = br_ip_hash(mdb, group); 615 mp = br_multicast_get_group(br, port, group, hash); 616 switch (PTR_ERR(mp)) { 617 case 0: 618 break; 619 620 case -EAGAIN: 621 rehash: 622 mdb = rcu_dereference_protected(br->mdb, 1); 623 hash = br_ip_hash(mdb, group); 624 break; 625 626 default: 627 goto out; 628 } 629 630 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 631 if (unlikely(!mp)) 632 return ERR_PTR(-ENOMEM); 633 634 mp->br = br; 635 mp->addr = *group; 636 setup_timer(&mp->timer, br_multicast_group_expired, 637 (unsigned long)mp); 638 639 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 640 mdb->size++; 641 642 out: 643 return mp; 644 } 645 646 struct net_bridge_port_group *br_multicast_new_port_group( 647 struct net_bridge_port *port, 648 struct br_ip *group, 649 struct net_bridge_port_group __rcu *next, 650 unsigned char state) 651 { 652 struct net_bridge_port_group *p; 653 654 p = kzalloc(sizeof(*p), GFP_ATOMIC); 655 if (unlikely(!p)) 656 return NULL; 657 658 p->addr = *group; 659 p->port = port; 660 p->state = state; 661 rcu_assign_pointer(p->next, next); 662 hlist_add_head(&p->mglist, &port->mglist); 663 setup_timer(&p->timer, br_multicast_port_group_expired, 664 (unsigned long)p); 665 return p; 666 } 667 668 static int br_multicast_add_group(struct net_bridge *br, 669 struct net_bridge_port *port, 670 struct br_ip *group) 671 { 672 struct net_bridge_mdb_entry *mp; 673 struct net_bridge_port_group *p; 674 struct net_bridge_port_group __rcu **pp; 675 unsigned long now = jiffies; 676 int err; 677 678 spin_lock(&br->multicast_lock); 679 if (!netif_running(br->dev) || 680 (port && port->state == BR_STATE_DISABLED)) 681 goto out; 682 683 mp = br_multicast_new_group(br, port, group); 684 err = PTR_ERR(mp); 685 if (IS_ERR(mp)) 686 goto err; 687 688 if (!port) { 689 mp->mglist = true; 690 mod_timer(&mp->timer, now + br->multicast_membership_interval); 691 goto out; 692 } 693 694 for (pp = &mp->ports; 695 (p = mlock_dereference(*pp, br)) != NULL; 696 pp = &p->next) { 697 if (p->port == port) 698 goto found; 699 if ((unsigned long)p->port < (unsigned long)port) 700 break; 701 } 702 703 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); 704 if (unlikely(!p)) 705 goto err; 706 rcu_assign_pointer(*pp, p); 707 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 708 709 found: 710 mod_timer(&p->timer, now + br->multicast_membership_interval); 711 out: 712 err = 0; 713 714 err: 715 spin_unlock(&br->multicast_lock); 716 return err; 717 } 718 719 static int br_ip4_multicast_add_group(struct net_bridge *br, 720 struct net_bridge_port *port, 721 __be32 group, 722 __u16 vid) 723 { 724 struct br_ip br_group; 725 726 if (ipv4_is_local_multicast(group)) 727 return 0; 728 729 br_group.u.ip4 = group; 730 br_group.proto = htons(ETH_P_IP); 731 br_group.vid = vid; 732 733 return br_multicast_add_group(br, port, &br_group); 734 } 735 736 #if IS_ENABLED(CONFIG_IPV6) 737 static int br_ip6_multicast_add_group(struct net_bridge *br, 738 struct net_bridge_port *port, 739 const struct in6_addr *group, 740 __u16 vid) 741 { 742 struct br_ip br_group; 743 744 if (ipv6_addr_is_ll_all_nodes(group)) 745 return 0; 746 747 br_group.u.ip6 = *group; 748 br_group.proto = htons(ETH_P_IPV6); 749 br_group.vid = vid; 750 751 return br_multicast_add_group(br, port, &br_group); 752 } 753 #endif 754 755 static void br_multicast_router_expired(unsigned long data) 756 { 757 struct net_bridge_port *port = (void *)data; 758 struct net_bridge *br = port->br; 759 760 spin_lock(&br->multicast_lock); 761 if (port->multicast_router != 1 || 762 timer_pending(&port->multicast_router_timer) || 763 hlist_unhashed(&port->rlist)) 764 goto out; 765 766 hlist_del_init_rcu(&port->rlist); 767 768 out: 769 spin_unlock(&br->multicast_lock); 770 } 771 772 static void br_multicast_local_router_expired(unsigned long data) 773 { 774 } 775 776 static void br_multicast_querier_expired(struct net_bridge *br, 777 struct bridge_mcast_own_query *query) 778 { 779 spin_lock(&br->multicast_lock); 780 if (!netif_running(br->dev) || br->multicast_disabled) 781 goto out; 782 783 br_multicast_start_querier(br, query); 784 785 out: 786 spin_unlock(&br->multicast_lock); 787 } 788 789 static void br_ip4_multicast_querier_expired(unsigned long data) 790 { 791 struct net_bridge *br = (void *)data; 792 793 br_multicast_querier_expired(br, &br->ip4_own_query); 794 } 795 796 #if IS_ENABLED(CONFIG_IPV6) 797 static void br_ip6_multicast_querier_expired(unsigned long data) 798 { 799 struct net_bridge *br = (void *)data; 800 801 br_multicast_querier_expired(br, &br->ip6_own_query); 802 } 803 #endif 804 805 static void br_multicast_select_own_querier(struct net_bridge *br, 806 struct br_ip *ip, 807 struct sk_buff *skb) 808 { 809 if (ip->proto == htons(ETH_P_IP)) 810 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 811 #if IS_ENABLED(CONFIG_IPV6) 812 else 813 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 814 #endif 815 } 816 817 static void __br_multicast_send_query(struct net_bridge *br, 818 struct net_bridge_port *port, 819 struct br_ip *ip) 820 { 821 struct sk_buff *skb; 822 823 skb = br_multicast_alloc_query(br, ip); 824 if (!skb) 825 return; 826 827 if (port) { 828 skb->dev = port->dev; 829 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb, 830 NULL, skb->dev, 831 br_dev_queue_push_xmit); 832 } else { 833 br_multicast_select_own_querier(br, ip, skb); 834 netif_rx(skb); 835 } 836 } 837 838 static void br_multicast_send_query(struct net_bridge *br, 839 struct net_bridge_port *port, 840 struct bridge_mcast_own_query *own_query) 841 { 842 unsigned long time; 843 struct br_ip br_group; 844 struct bridge_mcast_other_query *other_query = NULL; 845 846 if (!netif_running(br->dev) || br->multicast_disabled || 847 !br->multicast_querier) 848 return; 849 850 memset(&br_group.u, 0, sizeof(br_group.u)); 851 852 if (port ? (own_query == &port->ip4_own_query) : 853 (own_query == &br->ip4_own_query)) { 854 other_query = &br->ip4_other_query; 855 br_group.proto = htons(ETH_P_IP); 856 #if IS_ENABLED(CONFIG_IPV6) 857 } else { 858 other_query = &br->ip6_other_query; 859 br_group.proto = htons(ETH_P_IPV6); 860 #endif 861 } 862 863 if (!other_query || timer_pending(&other_query->timer)) 864 return; 865 866 __br_multicast_send_query(br, port, &br_group); 867 868 time = jiffies; 869 time += own_query->startup_sent < br->multicast_startup_query_count ? 870 br->multicast_startup_query_interval : 871 br->multicast_query_interval; 872 mod_timer(&own_query->timer, time); 873 } 874 875 static void 876 br_multicast_port_query_expired(struct net_bridge_port *port, 877 struct bridge_mcast_own_query *query) 878 { 879 struct net_bridge *br = port->br; 880 881 spin_lock(&br->multicast_lock); 882 if (port->state == BR_STATE_DISABLED || 883 port->state == BR_STATE_BLOCKING) 884 goto out; 885 886 if (query->startup_sent < br->multicast_startup_query_count) 887 query->startup_sent++; 888 889 br_multicast_send_query(port->br, port, query); 890 891 out: 892 spin_unlock(&br->multicast_lock); 893 } 894 895 static void br_ip4_multicast_port_query_expired(unsigned long data) 896 { 897 struct net_bridge_port *port = (void *)data; 898 899 br_multicast_port_query_expired(port, &port->ip4_own_query); 900 } 901 902 #if IS_ENABLED(CONFIG_IPV6) 903 static void br_ip6_multicast_port_query_expired(unsigned long data) 904 { 905 struct net_bridge_port *port = (void *)data; 906 907 br_multicast_port_query_expired(port, &port->ip6_own_query); 908 } 909 #endif 910 911 void br_multicast_add_port(struct net_bridge_port *port) 912 { 913 port->multicast_router = 1; 914 915 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 916 (unsigned long)port); 917 setup_timer(&port->ip4_own_query.timer, 918 br_ip4_multicast_port_query_expired, (unsigned long)port); 919 #if IS_ENABLED(CONFIG_IPV6) 920 setup_timer(&port->ip6_own_query.timer, 921 br_ip6_multicast_port_query_expired, (unsigned long)port); 922 #endif 923 } 924 925 void br_multicast_del_port(struct net_bridge_port *port) 926 { 927 del_timer_sync(&port->multicast_router_timer); 928 } 929 930 static void br_multicast_enable(struct bridge_mcast_own_query *query) 931 { 932 query->startup_sent = 0; 933 934 if (try_to_del_timer_sync(&query->timer) >= 0 || 935 del_timer(&query->timer)) 936 mod_timer(&query->timer, jiffies); 937 } 938 939 void br_multicast_enable_port(struct net_bridge_port *port) 940 { 941 struct net_bridge *br = port->br; 942 943 spin_lock(&br->multicast_lock); 944 if (br->multicast_disabled || !netif_running(br->dev)) 945 goto out; 946 947 br_multicast_enable(&port->ip4_own_query); 948 #if IS_ENABLED(CONFIG_IPV6) 949 br_multicast_enable(&port->ip6_own_query); 950 #endif 951 if (port->multicast_router == 2 && hlist_unhashed(&port->rlist)) 952 br_multicast_add_router(br, port); 953 954 out: 955 spin_unlock(&br->multicast_lock); 956 } 957 958 void br_multicast_disable_port(struct net_bridge_port *port) 959 { 960 struct net_bridge *br = port->br; 961 struct net_bridge_port_group *pg; 962 struct hlist_node *n; 963 964 spin_lock(&br->multicast_lock); 965 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 966 br_multicast_del_pg(br, pg); 967 968 if (!hlist_unhashed(&port->rlist)) 969 hlist_del_init_rcu(&port->rlist); 970 del_timer(&port->multicast_router_timer); 971 del_timer(&port->ip4_own_query.timer); 972 #if IS_ENABLED(CONFIG_IPV6) 973 del_timer(&port->ip6_own_query.timer); 974 #endif 975 spin_unlock(&br->multicast_lock); 976 } 977 978 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 979 struct net_bridge_port *port, 980 struct sk_buff *skb, 981 u16 vid) 982 { 983 struct igmpv3_report *ih; 984 struct igmpv3_grec *grec; 985 int i; 986 int len; 987 int num; 988 int type; 989 int err = 0; 990 __be32 group; 991 992 ih = igmpv3_report_hdr(skb); 993 num = ntohs(ih->ngrec); 994 len = sizeof(*ih); 995 996 for (i = 0; i < num; i++) { 997 len += sizeof(*grec); 998 if (!pskb_may_pull(skb, len)) 999 return -EINVAL; 1000 1001 grec = (void *)(skb->data + len - sizeof(*grec)); 1002 group = grec->grec_mca; 1003 type = grec->grec_type; 1004 1005 len += ntohs(grec->grec_nsrcs) * 4; 1006 if (!pskb_may_pull(skb, len)) 1007 return -EINVAL; 1008 1009 /* We treat this as an IGMPv2 report for now. */ 1010 switch (type) { 1011 case IGMPV3_MODE_IS_INCLUDE: 1012 case IGMPV3_MODE_IS_EXCLUDE: 1013 case IGMPV3_CHANGE_TO_INCLUDE: 1014 case IGMPV3_CHANGE_TO_EXCLUDE: 1015 case IGMPV3_ALLOW_NEW_SOURCES: 1016 case IGMPV3_BLOCK_OLD_SOURCES: 1017 break; 1018 1019 default: 1020 continue; 1021 } 1022 1023 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1024 type == IGMPV3_MODE_IS_INCLUDE) && 1025 ntohs(grec->grec_nsrcs) == 0) { 1026 br_ip4_multicast_leave_group(br, port, group, vid); 1027 } else { 1028 err = br_ip4_multicast_add_group(br, port, group, vid); 1029 if (err) 1030 break; 1031 } 1032 } 1033 1034 return err; 1035 } 1036 1037 #if IS_ENABLED(CONFIG_IPV6) 1038 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1039 struct net_bridge_port *port, 1040 struct sk_buff *skb, 1041 u16 vid) 1042 { 1043 struct icmp6hdr *icmp6h; 1044 struct mld2_grec *grec; 1045 int i; 1046 int len; 1047 int num; 1048 int err = 0; 1049 1050 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1051 return -EINVAL; 1052 1053 icmp6h = icmp6_hdr(skb); 1054 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1055 len = sizeof(*icmp6h); 1056 1057 for (i = 0; i < num; i++) { 1058 __be16 *nsrcs, _nsrcs; 1059 1060 nsrcs = skb_header_pointer(skb, 1061 len + offsetof(struct mld2_grec, 1062 grec_nsrcs), 1063 sizeof(_nsrcs), &_nsrcs); 1064 if (!nsrcs) 1065 return -EINVAL; 1066 1067 if (!pskb_may_pull(skb, 1068 len + sizeof(*grec) + 1069 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1070 return -EINVAL; 1071 1072 grec = (struct mld2_grec *)(skb->data + len); 1073 len += sizeof(*grec) + 1074 sizeof(struct in6_addr) * ntohs(*nsrcs); 1075 1076 /* We treat these as MLDv1 reports for now. */ 1077 switch (grec->grec_type) { 1078 case MLD2_MODE_IS_INCLUDE: 1079 case MLD2_MODE_IS_EXCLUDE: 1080 case MLD2_CHANGE_TO_INCLUDE: 1081 case MLD2_CHANGE_TO_EXCLUDE: 1082 case MLD2_ALLOW_NEW_SOURCES: 1083 case MLD2_BLOCK_OLD_SOURCES: 1084 break; 1085 1086 default: 1087 continue; 1088 } 1089 1090 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1091 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1092 ntohs(*nsrcs) == 0) { 1093 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1094 vid); 1095 } else { 1096 err = br_ip6_multicast_add_group(br, port, 1097 &grec->grec_mca, vid); 1098 if (!err) 1099 break; 1100 } 1101 } 1102 1103 return err; 1104 } 1105 #endif 1106 1107 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1108 struct net_bridge_port *port, 1109 __be32 saddr) 1110 { 1111 if (!timer_pending(&br->ip4_own_query.timer) && 1112 !timer_pending(&br->ip4_other_query.timer)) 1113 goto update; 1114 1115 if (!br->ip4_querier.addr.u.ip4) 1116 goto update; 1117 1118 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1119 goto update; 1120 1121 return false; 1122 1123 update: 1124 br->ip4_querier.addr.u.ip4 = saddr; 1125 1126 /* update protected by general multicast_lock by caller */ 1127 rcu_assign_pointer(br->ip4_querier.port, port); 1128 1129 return true; 1130 } 1131 1132 #if IS_ENABLED(CONFIG_IPV6) 1133 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1134 struct net_bridge_port *port, 1135 struct in6_addr *saddr) 1136 { 1137 if (!timer_pending(&br->ip6_own_query.timer) && 1138 !timer_pending(&br->ip6_other_query.timer)) 1139 goto update; 1140 1141 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1142 goto update; 1143 1144 return false; 1145 1146 update: 1147 br->ip6_querier.addr.u.ip6 = *saddr; 1148 1149 /* update protected by general multicast_lock by caller */ 1150 rcu_assign_pointer(br->ip6_querier.port, port); 1151 1152 return true; 1153 } 1154 #endif 1155 1156 static bool br_multicast_select_querier(struct net_bridge *br, 1157 struct net_bridge_port *port, 1158 struct br_ip *saddr) 1159 { 1160 switch (saddr->proto) { 1161 case htons(ETH_P_IP): 1162 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1163 #if IS_ENABLED(CONFIG_IPV6) 1164 case htons(ETH_P_IPV6): 1165 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1166 #endif 1167 } 1168 1169 return false; 1170 } 1171 1172 static void 1173 br_multicast_update_query_timer(struct net_bridge *br, 1174 struct bridge_mcast_other_query *query, 1175 unsigned long max_delay) 1176 { 1177 if (!timer_pending(&query->timer)) 1178 query->delay_time = jiffies + max_delay; 1179 1180 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1181 } 1182 1183 /* 1184 * Add port to router_list 1185 * list is maintained ordered by pointer value 1186 * and locked by br->multicast_lock and RCU 1187 */ 1188 static void br_multicast_add_router(struct net_bridge *br, 1189 struct net_bridge_port *port) 1190 { 1191 struct net_bridge_port *p; 1192 struct hlist_node *slot = NULL; 1193 1194 if (!hlist_unhashed(&port->rlist)) 1195 return; 1196 1197 hlist_for_each_entry(p, &br->router_list, rlist) { 1198 if ((unsigned long) port >= (unsigned long) p) 1199 break; 1200 slot = &p->rlist; 1201 } 1202 1203 if (slot) 1204 hlist_add_behind_rcu(&port->rlist, slot); 1205 else 1206 hlist_add_head_rcu(&port->rlist, &br->router_list); 1207 } 1208 1209 static void br_multicast_mark_router(struct net_bridge *br, 1210 struct net_bridge_port *port) 1211 { 1212 unsigned long now = jiffies; 1213 1214 if (!port) { 1215 if (br->multicast_router == 1) 1216 mod_timer(&br->multicast_router_timer, 1217 now + br->multicast_querier_interval); 1218 return; 1219 } 1220 1221 if (port->multicast_router != 1) 1222 return; 1223 1224 br_multicast_add_router(br, port); 1225 1226 mod_timer(&port->multicast_router_timer, 1227 now + br->multicast_querier_interval); 1228 } 1229 1230 static void br_multicast_query_received(struct net_bridge *br, 1231 struct net_bridge_port *port, 1232 struct bridge_mcast_other_query *query, 1233 struct br_ip *saddr, 1234 unsigned long max_delay) 1235 { 1236 if (!br_multicast_select_querier(br, port, saddr)) 1237 return; 1238 1239 br_multicast_update_query_timer(br, query, max_delay); 1240 br_multicast_mark_router(br, port); 1241 } 1242 1243 static int br_ip4_multicast_query(struct net_bridge *br, 1244 struct net_bridge_port *port, 1245 struct sk_buff *skb, 1246 u16 vid) 1247 { 1248 const struct iphdr *iph = ip_hdr(skb); 1249 struct igmphdr *ih = igmp_hdr(skb); 1250 struct net_bridge_mdb_entry *mp; 1251 struct igmpv3_query *ih3; 1252 struct net_bridge_port_group *p; 1253 struct net_bridge_port_group __rcu **pp; 1254 struct br_ip saddr; 1255 unsigned long max_delay; 1256 unsigned long now = jiffies; 1257 __be32 group; 1258 int err = 0; 1259 1260 spin_lock(&br->multicast_lock); 1261 if (!netif_running(br->dev) || 1262 (port && port->state == BR_STATE_DISABLED)) 1263 goto out; 1264 1265 group = ih->group; 1266 1267 if (skb->len == sizeof(*ih)) { 1268 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1269 1270 if (!max_delay) { 1271 max_delay = 10 * HZ; 1272 group = 0; 1273 } 1274 } else if (skb->len >= sizeof(*ih3)) { 1275 ih3 = igmpv3_query_hdr(skb); 1276 if (ih3->nsrcs) 1277 goto out; 1278 1279 max_delay = ih3->code ? 1280 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1281 } else { 1282 goto out; 1283 } 1284 1285 if (!group) { 1286 saddr.proto = htons(ETH_P_IP); 1287 saddr.u.ip4 = iph->saddr; 1288 1289 br_multicast_query_received(br, port, &br->ip4_other_query, 1290 &saddr, max_delay); 1291 goto out; 1292 } 1293 1294 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1295 if (!mp) 1296 goto out; 1297 1298 max_delay *= br->multicast_last_member_count; 1299 1300 if (mp->mglist && 1301 (timer_pending(&mp->timer) ? 1302 time_after(mp->timer.expires, now + max_delay) : 1303 try_to_del_timer_sync(&mp->timer) >= 0)) 1304 mod_timer(&mp->timer, now + max_delay); 1305 1306 for (pp = &mp->ports; 1307 (p = mlock_dereference(*pp, br)) != NULL; 1308 pp = &p->next) { 1309 if (timer_pending(&p->timer) ? 1310 time_after(p->timer.expires, now + max_delay) : 1311 try_to_del_timer_sync(&p->timer) >= 0) 1312 mod_timer(&p->timer, now + max_delay); 1313 } 1314 1315 out: 1316 spin_unlock(&br->multicast_lock); 1317 return err; 1318 } 1319 1320 #if IS_ENABLED(CONFIG_IPV6) 1321 static int br_ip6_multicast_query(struct net_bridge *br, 1322 struct net_bridge_port *port, 1323 struct sk_buff *skb, 1324 u16 vid) 1325 { 1326 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1327 struct mld_msg *mld; 1328 struct net_bridge_mdb_entry *mp; 1329 struct mld2_query *mld2q; 1330 struct net_bridge_port_group *p; 1331 struct net_bridge_port_group __rcu **pp; 1332 struct br_ip saddr; 1333 unsigned long max_delay; 1334 unsigned long now = jiffies; 1335 const struct in6_addr *group = NULL; 1336 bool is_general_query; 1337 int err = 0; 1338 1339 spin_lock(&br->multicast_lock); 1340 if (!netif_running(br->dev) || 1341 (port && port->state == BR_STATE_DISABLED)) 1342 goto out; 1343 1344 if (skb->len == sizeof(*mld)) { 1345 if (!pskb_may_pull(skb, sizeof(*mld))) { 1346 err = -EINVAL; 1347 goto out; 1348 } 1349 mld = (struct mld_msg *) icmp6_hdr(skb); 1350 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1351 if (max_delay) 1352 group = &mld->mld_mca; 1353 } else { 1354 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1355 err = -EINVAL; 1356 goto out; 1357 } 1358 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1359 if (!mld2q->mld2q_nsrcs) 1360 group = &mld2q->mld2q_mca; 1361 1362 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1363 } 1364 1365 is_general_query = group && ipv6_addr_any(group); 1366 1367 if (is_general_query) { 1368 saddr.proto = htons(ETH_P_IPV6); 1369 saddr.u.ip6 = ip6h->saddr; 1370 1371 br_multicast_query_received(br, port, &br->ip6_other_query, 1372 &saddr, max_delay); 1373 goto out; 1374 } else if (!group) { 1375 goto out; 1376 } 1377 1378 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1379 if (!mp) 1380 goto out; 1381 1382 max_delay *= br->multicast_last_member_count; 1383 if (mp->mglist && 1384 (timer_pending(&mp->timer) ? 1385 time_after(mp->timer.expires, now + max_delay) : 1386 try_to_del_timer_sync(&mp->timer) >= 0)) 1387 mod_timer(&mp->timer, now + max_delay); 1388 1389 for (pp = &mp->ports; 1390 (p = mlock_dereference(*pp, br)) != NULL; 1391 pp = &p->next) { 1392 if (timer_pending(&p->timer) ? 1393 time_after(p->timer.expires, now + max_delay) : 1394 try_to_del_timer_sync(&p->timer) >= 0) 1395 mod_timer(&p->timer, now + max_delay); 1396 } 1397 1398 out: 1399 spin_unlock(&br->multicast_lock); 1400 return err; 1401 } 1402 #endif 1403 1404 static void 1405 br_multicast_leave_group(struct net_bridge *br, 1406 struct net_bridge_port *port, 1407 struct br_ip *group, 1408 struct bridge_mcast_other_query *other_query, 1409 struct bridge_mcast_own_query *own_query) 1410 { 1411 struct net_bridge_mdb_htable *mdb; 1412 struct net_bridge_mdb_entry *mp; 1413 struct net_bridge_port_group *p; 1414 unsigned long now; 1415 unsigned long time; 1416 1417 spin_lock(&br->multicast_lock); 1418 if (!netif_running(br->dev) || 1419 (port && port->state == BR_STATE_DISABLED) || 1420 timer_pending(&other_query->timer)) 1421 goto out; 1422 1423 mdb = mlock_dereference(br->mdb, br); 1424 mp = br_mdb_ip_get(mdb, group); 1425 if (!mp) 1426 goto out; 1427 1428 if (br->multicast_querier) { 1429 __br_multicast_send_query(br, port, &mp->addr); 1430 1431 time = jiffies + br->multicast_last_member_count * 1432 br->multicast_last_member_interval; 1433 1434 mod_timer(&own_query->timer, time); 1435 1436 for (p = mlock_dereference(mp->ports, br); 1437 p != NULL; 1438 p = mlock_dereference(p->next, br)) { 1439 if (p->port != port) 1440 continue; 1441 1442 if (!hlist_unhashed(&p->mglist) && 1443 (timer_pending(&p->timer) ? 1444 time_after(p->timer.expires, time) : 1445 try_to_del_timer_sync(&p->timer) >= 0)) { 1446 mod_timer(&p->timer, time); 1447 } 1448 1449 break; 1450 } 1451 } 1452 1453 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1454 struct net_bridge_port_group __rcu **pp; 1455 1456 for (pp = &mp->ports; 1457 (p = mlock_dereference(*pp, br)) != NULL; 1458 pp = &p->next) { 1459 if (p->port != port) 1460 continue; 1461 1462 rcu_assign_pointer(*pp, p->next); 1463 hlist_del_init(&p->mglist); 1464 del_timer(&p->timer); 1465 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1466 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1467 1468 if (!mp->ports && !mp->mglist && 1469 netif_running(br->dev)) 1470 mod_timer(&mp->timer, jiffies); 1471 } 1472 goto out; 1473 } 1474 1475 now = jiffies; 1476 time = now + br->multicast_last_member_count * 1477 br->multicast_last_member_interval; 1478 1479 if (!port) { 1480 if (mp->mglist && 1481 (timer_pending(&mp->timer) ? 1482 time_after(mp->timer.expires, time) : 1483 try_to_del_timer_sync(&mp->timer) >= 0)) { 1484 mod_timer(&mp->timer, time); 1485 } 1486 1487 goto out; 1488 } 1489 1490 for (p = mlock_dereference(mp->ports, br); 1491 p != NULL; 1492 p = mlock_dereference(p->next, br)) { 1493 if (p->port != port) 1494 continue; 1495 1496 if (!hlist_unhashed(&p->mglist) && 1497 (timer_pending(&p->timer) ? 1498 time_after(p->timer.expires, time) : 1499 try_to_del_timer_sync(&p->timer) >= 0)) { 1500 mod_timer(&p->timer, time); 1501 } 1502 1503 break; 1504 } 1505 out: 1506 spin_unlock(&br->multicast_lock); 1507 } 1508 1509 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1510 struct net_bridge_port *port, 1511 __be32 group, 1512 __u16 vid) 1513 { 1514 struct br_ip br_group; 1515 struct bridge_mcast_own_query *own_query; 1516 1517 if (ipv4_is_local_multicast(group)) 1518 return; 1519 1520 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1521 1522 br_group.u.ip4 = group; 1523 br_group.proto = htons(ETH_P_IP); 1524 br_group.vid = vid; 1525 1526 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1527 own_query); 1528 } 1529 1530 #if IS_ENABLED(CONFIG_IPV6) 1531 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1532 struct net_bridge_port *port, 1533 const struct in6_addr *group, 1534 __u16 vid) 1535 { 1536 struct br_ip br_group; 1537 struct bridge_mcast_own_query *own_query; 1538 1539 if (ipv6_addr_is_ll_all_nodes(group)) 1540 return; 1541 1542 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1543 1544 br_group.u.ip6 = *group; 1545 br_group.proto = htons(ETH_P_IPV6); 1546 br_group.vid = vid; 1547 1548 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1549 own_query); 1550 } 1551 #endif 1552 1553 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1554 struct net_bridge_port *port, 1555 struct sk_buff *skb, 1556 u16 vid) 1557 { 1558 struct sk_buff *skb_trimmed = NULL; 1559 struct igmphdr *ih; 1560 int err; 1561 1562 err = ip_mc_check_igmp(skb, &skb_trimmed); 1563 1564 if (err == -ENOMSG) { 1565 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) 1566 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1567 return 0; 1568 } else if (err < 0) { 1569 return err; 1570 } 1571 1572 BR_INPUT_SKB_CB(skb)->igmp = 1; 1573 ih = igmp_hdr(skb); 1574 1575 switch (ih->type) { 1576 case IGMP_HOST_MEMBERSHIP_REPORT: 1577 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1578 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1579 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1580 break; 1581 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1582 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1583 break; 1584 case IGMP_HOST_MEMBERSHIP_QUERY: 1585 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1586 break; 1587 case IGMP_HOST_LEAVE_MESSAGE: 1588 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1589 break; 1590 } 1591 1592 if (skb_trimmed) 1593 kfree_skb(skb_trimmed); 1594 1595 return err; 1596 } 1597 1598 #if IS_ENABLED(CONFIG_IPV6) 1599 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1600 struct net_bridge_port *port, 1601 struct sk_buff *skb, 1602 u16 vid) 1603 { 1604 struct sk_buff *skb_trimmed = NULL; 1605 struct mld_msg *mld; 1606 int err; 1607 1608 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1609 1610 if (err == -ENOMSG) { 1611 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1612 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1613 return 0; 1614 } else if (err < 0) { 1615 return err; 1616 } 1617 1618 BR_INPUT_SKB_CB(skb)->igmp = 1; 1619 mld = (struct mld_msg *)skb_transport_header(skb); 1620 1621 switch (mld->mld_type) { 1622 case ICMPV6_MGM_REPORT: 1623 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1624 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1625 break; 1626 case ICMPV6_MLD2_REPORT: 1627 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1628 break; 1629 case ICMPV6_MGM_QUERY: 1630 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1631 break; 1632 case ICMPV6_MGM_REDUCTION: 1633 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1634 break; 1635 } 1636 1637 if (skb_trimmed) 1638 kfree_skb(skb_trimmed); 1639 1640 return err; 1641 } 1642 #endif 1643 1644 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1645 struct sk_buff *skb, u16 vid) 1646 { 1647 BR_INPUT_SKB_CB(skb)->igmp = 0; 1648 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1649 1650 if (br->multicast_disabled) 1651 return 0; 1652 1653 switch (skb->protocol) { 1654 case htons(ETH_P_IP): 1655 return br_multicast_ipv4_rcv(br, port, skb, vid); 1656 #if IS_ENABLED(CONFIG_IPV6) 1657 case htons(ETH_P_IPV6): 1658 return br_multicast_ipv6_rcv(br, port, skb, vid); 1659 #endif 1660 } 1661 1662 return 0; 1663 } 1664 1665 static void br_multicast_query_expired(struct net_bridge *br, 1666 struct bridge_mcast_own_query *query, 1667 struct bridge_mcast_querier *querier) 1668 { 1669 spin_lock(&br->multicast_lock); 1670 if (query->startup_sent < br->multicast_startup_query_count) 1671 query->startup_sent++; 1672 1673 RCU_INIT_POINTER(querier->port, NULL); 1674 br_multicast_send_query(br, NULL, query); 1675 spin_unlock(&br->multicast_lock); 1676 } 1677 1678 static void br_ip4_multicast_query_expired(unsigned long data) 1679 { 1680 struct net_bridge *br = (void *)data; 1681 1682 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1683 } 1684 1685 #if IS_ENABLED(CONFIG_IPV6) 1686 static void br_ip6_multicast_query_expired(unsigned long data) 1687 { 1688 struct net_bridge *br = (void *)data; 1689 1690 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1691 } 1692 #endif 1693 1694 void br_multicast_init(struct net_bridge *br) 1695 { 1696 br->hash_elasticity = 4; 1697 br->hash_max = 512; 1698 1699 br->multicast_router = 1; 1700 br->multicast_querier = 0; 1701 br->multicast_query_use_ifaddr = 0; 1702 br->multicast_last_member_count = 2; 1703 br->multicast_startup_query_count = 2; 1704 1705 br->multicast_last_member_interval = HZ; 1706 br->multicast_query_response_interval = 10 * HZ; 1707 br->multicast_startup_query_interval = 125 * HZ / 4; 1708 br->multicast_query_interval = 125 * HZ; 1709 br->multicast_querier_interval = 255 * HZ; 1710 br->multicast_membership_interval = 260 * HZ; 1711 1712 br->ip4_other_query.delay_time = 0; 1713 br->ip4_querier.port = NULL; 1714 #if IS_ENABLED(CONFIG_IPV6) 1715 br->ip6_other_query.delay_time = 0; 1716 br->ip6_querier.port = NULL; 1717 #endif 1718 1719 spin_lock_init(&br->multicast_lock); 1720 setup_timer(&br->multicast_router_timer, 1721 br_multicast_local_router_expired, 0); 1722 setup_timer(&br->ip4_other_query.timer, 1723 br_ip4_multicast_querier_expired, (unsigned long)br); 1724 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1725 (unsigned long)br); 1726 #if IS_ENABLED(CONFIG_IPV6) 1727 setup_timer(&br->ip6_other_query.timer, 1728 br_ip6_multicast_querier_expired, (unsigned long)br); 1729 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1730 (unsigned long)br); 1731 #endif 1732 } 1733 1734 static void __br_multicast_open(struct net_bridge *br, 1735 struct bridge_mcast_own_query *query) 1736 { 1737 query->startup_sent = 0; 1738 1739 if (br->multicast_disabled) 1740 return; 1741 1742 mod_timer(&query->timer, jiffies); 1743 } 1744 1745 void br_multicast_open(struct net_bridge *br) 1746 { 1747 __br_multicast_open(br, &br->ip4_own_query); 1748 #if IS_ENABLED(CONFIG_IPV6) 1749 __br_multicast_open(br, &br->ip6_own_query); 1750 #endif 1751 } 1752 1753 void br_multicast_stop(struct net_bridge *br) 1754 { 1755 struct net_bridge_mdb_htable *mdb; 1756 struct net_bridge_mdb_entry *mp; 1757 struct hlist_node *n; 1758 u32 ver; 1759 int i; 1760 1761 del_timer_sync(&br->multicast_router_timer); 1762 del_timer_sync(&br->ip4_other_query.timer); 1763 del_timer_sync(&br->ip4_own_query.timer); 1764 #if IS_ENABLED(CONFIG_IPV6) 1765 del_timer_sync(&br->ip6_other_query.timer); 1766 del_timer_sync(&br->ip6_own_query.timer); 1767 #endif 1768 1769 spin_lock_bh(&br->multicast_lock); 1770 mdb = mlock_dereference(br->mdb, br); 1771 if (!mdb) 1772 goto out; 1773 1774 br->mdb = NULL; 1775 1776 ver = mdb->ver; 1777 for (i = 0; i < mdb->max; i++) { 1778 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1779 hlist[ver]) { 1780 del_timer(&mp->timer); 1781 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1782 } 1783 } 1784 1785 if (mdb->old) { 1786 spin_unlock_bh(&br->multicast_lock); 1787 rcu_barrier_bh(); 1788 spin_lock_bh(&br->multicast_lock); 1789 WARN_ON(mdb->old); 1790 } 1791 1792 mdb->old = mdb; 1793 call_rcu_bh(&mdb->rcu, br_mdb_free); 1794 1795 out: 1796 spin_unlock_bh(&br->multicast_lock); 1797 } 1798 1799 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1800 { 1801 int err = -EINVAL; 1802 1803 spin_lock_bh(&br->multicast_lock); 1804 1805 switch (val) { 1806 case 0: 1807 case 2: 1808 del_timer(&br->multicast_router_timer); 1809 /* fall through */ 1810 case 1: 1811 br->multicast_router = val; 1812 err = 0; 1813 break; 1814 } 1815 1816 spin_unlock_bh(&br->multicast_lock); 1817 1818 return err; 1819 } 1820 1821 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1822 { 1823 struct net_bridge *br = p->br; 1824 int err = -EINVAL; 1825 1826 spin_lock(&br->multicast_lock); 1827 1828 switch (val) { 1829 case 0: 1830 case 1: 1831 case 2: 1832 p->multicast_router = val; 1833 err = 0; 1834 1835 if (val < 2 && !hlist_unhashed(&p->rlist)) 1836 hlist_del_init_rcu(&p->rlist); 1837 1838 if (val == 1) 1839 break; 1840 1841 del_timer(&p->multicast_router_timer); 1842 1843 if (val == 0) 1844 break; 1845 1846 br_multicast_add_router(br, p); 1847 break; 1848 } 1849 1850 spin_unlock(&br->multicast_lock); 1851 1852 return err; 1853 } 1854 1855 static void br_multicast_start_querier(struct net_bridge *br, 1856 struct bridge_mcast_own_query *query) 1857 { 1858 struct net_bridge_port *port; 1859 1860 __br_multicast_open(br, query); 1861 1862 list_for_each_entry(port, &br->port_list, list) { 1863 if (port->state == BR_STATE_DISABLED || 1864 port->state == BR_STATE_BLOCKING) 1865 continue; 1866 1867 if (query == &br->ip4_own_query) 1868 br_multicast_enable(&port->ip4_own_query); 1869 #if IS_ENABLED(CONFIG_IPV6) 1870 else 1871 br_multicast_enable(&port->ip6_own_query); 1872 #endif 1873 } 1874 } 1875 1876 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1877 { 1878 int err = 0; 1879 struct net_bridge_mdb_htable *mdb; 1880 1881 spin_lock_bh(&br->multicast_lock); 1882 if (br->multicast_disabled == !val) 1883 goto unlock; 1884 1885 br->multicast_disabled = !val; 1886 if (br->multicast_disabled) 1887 goto unlock; 1888 1889 if (!netif_running(br->dev)) 1890 goto unlock; 1891 1892 mdb = mlock_dereference(br->mdb, br); 1893 if (mdb) { 1894 if (mdb->old) { 1895 err = -EEXIST; 1896 rollback: 1897 br->multicast_disabled = !!val; 1898 goto unlock; 1899 } 1900 1901 err = br_mdb_rehash(&br->mdb, mdb->max, 1902 br->hash_elasticity); 1903 if (err) 1904 goto rollback; 1905 } 1906 1907 br_multicast_start_querier(br, &br->ip4_own_query); 1908 #if IS_ENABLED(CONFIG_IPV6) 1909 br_multicast_start_querier(br, &br->ip6_own_query); 1910 #endif 1911 1912 unlock: 1913 spin_unlock_bh(&br->multicast_lock); 1914 1915 return err; 1916 } 1917 1918 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1919 { 1920 unsigned long max_delay; 1921 1922 val = !!val; 1923 1924 spin_lock_bh(&br->multicast_lock); 1925 if (br->multicast_querier == val) 1926 goto unlock; 1927 1928 br->multicast_querier = val; 1929 if (!val) 1930 goto unlock; 1931 1932 max_delay = br->multicast_query_response_interval; 1933 1934 if (!timer_pending(&br->ip4_other_query.timer)) 1935 br->ip4_other_query.delay_time = jiffies + max_delay; 1936 1937 br_multicast_start_querier(br, &br->ip4_own_query); 1938 1939 #if IS_ENABLED(CONFIG_IPV6) 1940 if (!timer_pending(&br->ip6_other_query.timer)) 1941 br->ip6_other_query.delay_time = jiffies + max_delay; 1942 1943 br_multicast_start_querier(br, &br->ip6_own_query); 1944 #endif 1945 1946 unlock: 1947 spin_unlock_bh(&br->multicast_lock); 1948 1949 return 0; 1950 } 1951 1952 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1953 { 1954 int err = -EINVAL; 1955 u32 old; 1956 struct net_bridge_mdb_htable *mdb; 1957 1958 spin_lock_bh(&br->multicast_lock); 1959 if (!is_power_of_2(val)) 1960 goto unlock; 1961 1962 mdb = mlock_dereference(br->mdb, br); 1963 if (mdb && val < mdb->size) 1964 goto unlock; 1965 1966 err = 0; 1967 1968 old = br->hash_max; 1969 br->hash_max = val; 1970 1971 if (mdb) { 1972 if (mdb->old) { 1973 err = -EEXIST; 1974 rollback: 1975 br->hash_max = old; 1976 goto unlock; 1977 } 1978 1979 err = br_mdb_rehash(&br->mdb, br->hash_max, 1980 br->hash_elasticity); 1981 if (err) 1982 goto rollback; 1983 } 1984 1985 unlock: 1986 spin_unlock_bh(&br->multicast_lock); 1987 1988 return err; 1989 } 1990 1991 /** 1992 * br_multicast_list_adjacent - Returns snooped multicast addresses 1993 * @dev: The bridge port adjacent to which to retrieve addresses 1994 * @br_ip_list: The list to store found, snooped multicast IP addresses in 1995 * 1996 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 1997 * snooping feature on all bridge ports of dev's bridge device, excluding 1998 * the addresses from dev itself. 1999 * 2000 * Returns the number of items added to br_ip_list. 2001 * 2002 * Notes: 2003 * - br_ip_list needs to be initialized by caller 2004 * - br_ip_list might contain duplicates in the end 2005 * (needs to be taken care of by caller) 2006 * - br_ip_list needs to be freed by caller 2007 */ 2008 int br_multicast_list_adjacent(struct net_device *dev, 2009 struct list_head *br_ip_list) 2010 { 2011 struct net_bridge *br; 2012 struct net_bridge_port *port; 2013 struct net_bridge_port_group *group; 2014 struct br_ip_list *entry; 2015 int count = 0; 2016 2017 rcu_read_lock(); 2018 if (!br_ip_list || !br_port_exists(dev)) 2019 goto unlock; 2020 2021 port = br_port_get_rcu(dev); 2022 if (!port || !port->br) 2023 goto unlock; 2024 2025 br = port->br; 2026 2027 list_for_each_entry_rcu(port, &br->port_list, list) { 2028 if (!port->dev || port->dev == dev) 2029 continue; 2030 2031 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2032 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2033 if (!entry) 2034 goto unlock; 2035 2036 entry->addr = group->addr; 2037 list_add(&entry->list, br_ip_list); 2038 count++; 2039 } 2040 } 2041 2042 unlock: 2043 rcu_read_unlock(); 2044 return count; 2045 } 2046 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2047 2048 /** 2049 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2050 * @dev: The bridge port providing the bridge on which to check for a querier 2051 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2052 * 2053 * Checks whether the given interface has a bridge on top and if so returns 2054 * true if a valid querier exists anywhere on the bridged link layer. 2055 * Otherwise returns false. 2056 */ 2057 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2058 { 2059 struct net_bridge *br; 2060 struct net_bridge_port *port; 2061 struct ethhdr eth; 2062 bool ret = false; 2063 2064 rcu_read_lock(); 2065 if (!br_port_exists(dev)) 2066 goto unlock; 2067 2068 port = br_port_get_rcu(dev); 2069 if (!port || !port->br) 2070 goto unlock; 2071 2072 br = port->br; 2073 2074 memset(ð, 0, sizeof(eth)); 2075 eth.h_proto = htons(proto); 2076 2077 ret = br_multicast_querier_exists(br, ð); 2078 2079 unlock: 2080 rcu_read_unlock(); 2081 return ret; 2082 } 2083 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2084 2085 /** 2086 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2087 * @dev: The bridge port adjacent to which to check for a querier 2088 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2089 * 2090 * Checks whether the given interface has a bridge on top and if so returns 2091 * true if a selected querier is behind one of the other ports of this 2092 * bridge. Otherwise returns false. 2093 */ 2094 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2095 { 2096 struct net_bridge *br; 2097 struct net_bridge_port *port; 2098 bool ret = false; 2099 2100 rcu_read_lock(); 2101 if (!br_port_exists(dev)) 2102 goto unlock; 2103 2104 port = br_port_get_rcu(dev); 2105 if (!port || !port->br) 2106 goto unlock; 2107 2108 br = port->br; 2109 2110 switch (proto) { 2111 case ETH_P_IP: 2112 if (!timer_pending(&br->ip4_other_query.timer) || 2113 rcu_dereference(br->ip4_querier.port) == port) 2114 goto unlock; 2115 break; 2116 #if IS_ENABLED(CONFIG_IPV6) 2117 case ETH_P_IPV6: 2118 if (!timer_pending(&br->ip6_other_query.timer) || 2119 rcu_dereference(br->ip6_querier.port) == port) 2120 goto unlock; 2121 break; 2122 #endif 2123 default: 2124 goto unlock; 2125 } 2126 2127 ret = true; 2128 unlock: 2129 rcu_read_unlock(); 2130 return ret; 2131 } 2132 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2133