1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/if_ether.h> 15 #include <linux/igmp.h> 16 #include <linux/jhash.h> 17 #include <linux/kernel.h> 18 #include <linux/log2.h> 19 #include <linux/netdevice.h> 20 #include <linux/netfilter_bridge.h> 21 #include <linux/random.h> 22 #include <linux/rculist.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 #include <linux/timer.h> 26 #include <linux/inetdevice.h> 27 #include <net/ip.h> 28 #if IS_ENABLED(CONFIG_IPV6) 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #endif 33 34 #include "br_private.h" 35 36 static void br_multicast_start_querier(struct net_bridge *br); 37 unsigned int br_mdb_rehash_seq; 38 39 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 40 { 41 if (a->proto != b->proto) 42 return 0; 43 if (a->vid != b->vid) 44 return 0; 45 switch (a->proto) { 46 case htons(ETH_P_IP): 47 return a->u.ip4 == b->u.ip4; 48 #if IS_ENABLED(CONFIG_IPV6) 49 case htons(ETH_P_IPV6): 50 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 51 #endif 52 } 53 return 0; 54 } 55 56 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 57 __u16 vid) 58 { 59 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 60 } 61 62 #if IS_ENABLED(CONFIG_IPV6) 63 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 64 const struct in6_addr *ip, 65 __u16 vid) 66 { 67 return jhash_2words(ipv6_addr_hash(ip), vid, 68 mdb->secret) & (mdb->max - 1); 69 } 70 #endif 71 72 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 73 struct br_ip *ip) 74 { 75 switch (ip->proto) { 76 case htons(ETH_P_IP): 77 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 78 #if IS_ENABLED(CONFIG_IPV6) 79 case htons(ETH_P_IPV6): 80 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 81 #endif 82 } 83 return 0; 84 } 85 86 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 87 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 88 { 89 struct net_bridge_mdb_entry *mp; 90 91 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 92 if (br_ip_equal(&mp->addr, dst)) 93 return mp; 94 } 95 96 return NULL; 97 } 98 99 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 100 struct br_ip *dst) 101 { 102 if (!mdb) 103 return NULL; 104 105 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 106 } 107 108 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 109 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 110 { 111 struct br_ip br_dst; 112 113 br_dst.u.ip4 = dst; 114 br_dst.proto = htons(ETH_P_IP); 115 br_dst.vid = vid; 116 117 return br_mdb_ip_get(mdb, &br_dst); 118 } 119 120 #if IS_ENABLED(CONFIG_IPV6) 121 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 122 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 123 __u16 vid) 124 { 125 struct br_ip br_dst; 126 127 br_dst.u.ip6 = *dst; 128 br_dst.proto = htons(ETH_P_IPV6); 129 br_dst.vid = vid; 130 131 return br_mdb_ip_get(mdb, &br_dst); 132 } 133 #endif 134 135 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 136 struct sk_buff *skb, u16 vid) 137 { 138 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 139 struct br_ip ip; 140 141 if (br->multicast_disabled) 142 return NULL; 143 144 if (BR_INPUT_SKB_CB(skb)->igmp) 145 return NULL; 146 147 ip.proto = skb->protocol; 148 ip.vid = vid; 149 150 switch (skb->protocol) { 151 case htons(ETH_P_IP): 152 ip.u.ip4 = ip_hdr(skb)->daddr; 153 break; 154 #if IS_ENABLED(CONFIG_IPV6) 155 case htons(ETH_P_IPV6): 156 ip.u.ip6 = ipv6_hdr(skb)->daddr; 157 break; 158 #endif 159 default: 160 return NULL; 161 } 162 163 return br_mdb_ip_get(mdb, &ip); 164 } 165 166 static void br_mdb_free(struct rcu_head *head) 167 { 168 struct net_bridge_mdb_htable *mdb = 169 container_of(head, struct net_bridge_mdb_htable, rcu); 170 struct net_bridge_mdb_htable *old = mdb->old; 171 172 mdb->old = NULL; 173 kfree(old->mhash); 174 kfree(old); 175 } 176 177 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 178 struct net_bridge_mdb_htable *old, 179 int elasticity) 180 { 181 struct net_bridge_mdb_entry *mp; 182 int maxlen; 183 int len; 184 int i; 185 186 for (i = 0; i < old->max; i++) 187 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 188 hlist_add_head(&mp->hlist[new->ver], 189 &new->mhash[br_ip_hash(new, &mp->addr)]); 190 191 if (!elasticity) 192 return 0; 193 194 maxlen = 0; 195 for (i = 0; i < new->max; i++) { 196 len = 0; 197 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 198 len++; 199 if (len > maxlen) 200 maxlen = len; 201 } 202 203 return maxlen > elasticity ? -EINVAL : 0; 204 } 205 206 void br_multicast_free_pg(struct rcu_head *head) 207 { 208 struct net_bridge_port_group *p = 209 container_of(head, struct net_bridge_port_group, rcu); 210 211 kfree(p); 212 } 213 214 static void br_multicast_free_group(struct rcu_head *head) 215 { 216 struct net_bridge_mdb_entry *mp = 217 container_of(head, struct net_bridge_mdb_entry, rcu); 218 219 kfree(mp); 220 } 221 222 static void br_multicast_group_expired(unsigned long data) 223 { 224 struct net_bridge_mdb_entry *mp = (void *)data; 225 struct net_bridge *br = mp->br; 226 struct net_bridge_mdb_htable *mdb; 227 228 spin_lock(&br->multicast_lock); 229 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 230 goto out; 231 232 mp->mglist = false; 233 234 if (mp->ports) 235 goto out; 236 237 mdb = mlock_dereference(br->mdb, br); 238 239 hlist_del_rcu(&mp->hlist[mdb->ver]); 240 mdb->size--; 241 242 call_rcu_bh(&mp->rcu, br_multicast_free_group); 243 244 out: 245 spin_unlock(&br->multicast_lock); 246 } 247 248 static void br_multicast_del_pg(struct net_bridge *br, 249 struct net_bridge_port_group *pg) 250 { 251 struct net_bridge_mdb_htable *mdb; 252 struct net_bridge_mdb_entry *mp; 253 struct net_bridge_port_group *p; 254 struct net_bridge_port_group __rcu **pp; 255 256 mdb = mlock_dereference(br->mdb, br); 257 258 mp = br_mdb_ip_get(mdb, &pg->addr); 259 if (WARN_ON(!mp)) 260 return; 261 262 for (pp = &mp->ports; 263 (p = mlock_dereference(*pp, br)) != NULL; 264 pp = &p->next) { 265 if (p != pg) 266 continue; 267 268 rcu_assign_pointer(*pp, p->next); 269 hlist_del_init(&p->mglist); 270 del_timer(&p->timer); 271 call_rcu_bh(&p->rcu, br_multicast_free_pg); 272 273 if (!mp->ports && !mp->mglist && mp->timer_armed && 274 netif_running(br->dev)) 275 mod_timer(&mp->timer, jiffies); 276 277 return; 278 } 279 280 WARN_ON(1); 281 } 282 283 static void br_multicast_port_group_expired(unsigned long data) 284 { 285 struct net_bridge_port_group *pg = (void *)data; 286 struct net_bridge *br = pg->port->br; 287 288 spin_lock(&br->multicast_lock); 289 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 290 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT) 291 goto out; 292 293 br_multicast_del_pg(br, pg); 294 295 out: 296 spin_unlock(&br->multicast_lock); 297 } 298 299 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 300 int elasticity) 301 { 302 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 303 struct net_bridge_mdb_htable *mdb; 304 int err; 305 306 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 307 if (!mdb) 308 return -ENOMEM; 309 310 mdb->max = max; 311 mdb->old = old; 312 313 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 314 if (!mdb->mhash) { 315 kfree(mdb); 316 return -ENOMEM; 317 } 318 319 mdb->size = old ? old->size : 0; 320 mdb->ver = old ? old->ver ^ 1 : 0; 321 322 if (!old || elasticity) 323 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 324 else 325 mdb->secret = old->secret; 326 327 if (!old) 328 goto out; 329 330 err = br_mdb_copy(mdb, old, elasticity); 331 if (err) { 332 kfree(mdb->mhash); 333 kfree(mdb); 334 return err; 335 } 336 337 br_mdb_rehash_seq++; 338 call_rcu_bh(&mdb->rcu, br_mdb_free); 339 340 out: 341 rcu_assign_pointer(*mdbp, mdb); 342 343 return 0; 344 } 345 346 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 347 __be32 group) 348 { 349 struct sk_buff *skb; 350 struct igmphdr *ih; 351 struct ethhdr *eth; 352 struct iphdr *iph; 353 354 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 355 sizeof(*ih) + 4); 356 if (!skb) 357 goto out; 358 359 skb->protocol = htons(ETH_P_IP); 360 361 skb_reset_mac_header(skb); 362 eth = eth_hdr(skb); 363 364 memcpy(eth->h_source, br->dev->dev_addr, 6); 365 eth->h_dest[0] = 1; 366 eth->h_dest[1] = 0; 367 eth->h_dest[2] = 0x5e; 368 eth->h_dest[3] = 0; 369 eth->h_dest[4] = 0; 370 eth->h_dest[5] = 1; 371 eth->h_proto = htons(ETH_P_IP); 372 skb_put(skb, sizeof(*eth)); 373 374 skb_set_network_header(skb, skb->len); 375 iph = ip_hdr(skb); 376 377 iph->version = 4; 378 iph->ihl = 6; 379 iph->tos = 0xc0; 380 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 381 iph->id = 0; 382 iph->frag_off = htons(IP_DF); 383 iph->ttl = 1; 384 iph->protocol = IPPROTO_IGMP; 385 iph->saddr = br->multicast_query_use_ifaddr ? 386 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 387 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 388 ((u8 *)&iph[1])[0] = IPOPT_RA; 389 ((u8 *)&iph[1])[1] = 4; 390 ((u8 *)&iph[1])[2] = 0; 391 ((u8 *)&iph[1])[3] = 0; 392 ip_send_check(iph); 393 skb_put(skb, 24); 394 395 skb_set_transport_header(skb, skb->len); 396 ih = igmp_hdr(skb); 397 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 398 ih->code = (group ? br->multicast_last_member_interval : 399 br->multicast_query_response_interval) / 400 (HZ / IGMP_TIMER_SCALE); 401 ih->group = group; 402 ih->csum = 0; 403 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 404 skb_put(skb, sizeof(*ih)); 405 406 __skb_pull(skb, sizeof(*eth)); 407 408 out: 409 return skb; 410 } 411 412 #if IS_ENABLED(CONFIG_IPV6) 413 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 414 const struct in6_addr *group) 415 { 416 struct sk_buff *skb; 417 struct ipv6hdr *ip6h; 418 struct mld_msg *mldq; 419 struct ethhdr *eth; 420 u8 *hopopt; 421 unsigned long interval; 422 423 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 424 8 + sizeof(*mldq)); 425 if (!skb) 426 goto out; 427 428 skb->protocol = htons(ETH_P_IPV6); 429 430 /* Ethernet header */ 431 skb_reset_mac_header(skb); 432 eth = eth_hdr(skb); 433 434 memcpy(eth->h_source, br->dev->dev_addr, 6); 435 eth->h_proto = htons(ETH_P_IPV6); 436 skb_put(skb, sizeof(*eth)); 437 438 /* IPv6 header + HbH option */ 439 skb_set_network_header(skb, skb->len); 440 ip6h = ipv6_hdr(skb); 441 442 *(__force __be32 *)ip6h = htonl(0x60000000); 443 ip6h->payload_len = htons(8 + sizeof(*mldq)); 444 ip6h->nexthdr = IPPROTO_HOPOPTS; 445 ip6h->hop_limit = 1; 446 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 447 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 448 &ip6h->saddr)) { 449 kfree_skb(skb); 450 return NULL; 451 } 452 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 453 454 hopopt = (u8 *)(ip6h + 1); 455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 456 hopopt[1] = 0; /* length of HbH */ 457 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 458 hopopt[3] = 2; /* Length of RA Option */ 459 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 460 hopopt[5] = 0; 461 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 462 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 463 464 skb_put(skb, sizeof(*ip6h) + 8); 465 466 /* ICMPv6 */ 467 skb_set_transport_header(skb, skb->len); 468 mldq = (struct mld_msg *) icmp6_hdr(skb); 469 470 interval = ipv6_addr_any(group) ? 471 br->multicast_query_response_interval : 472 br->multicast_last_member_interval; 473 474 mldq->mld_type = ICMPV6_MGM_QUERY; 475 mldq->mld_code = 0; 476 mldq->mld_cksum = 0; 477 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 478 mldq->mld_reserved = 0; 479 mldq->mld_mca = *group; 480 481 /* checksum */ 482 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 483 sizeof(*mldq), IPPROTO_ICMPV6, 484 csum_partial(mldq, 485 sizeof(*mldq), 0)); 486 skb_put(skb, sizeof(*mldq)); 487 488 __skb_pull(skb, sizeof(*eth)); 489 490 out: 491 return skb; 492 } 493 #endif 494 495 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 496 struct br_ip *addr) 497 { 498 switch (addr->proto) { 499 case htons(ETH_P_IP): 500 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 501 #if IS_ENABLED(CONFIG_IPV6) 502 case htons(ETH_P_IPV6): 503 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 504 #endif 505 } 506 return NULL; 507 } 508 509 static struct net_bridge_mdb_entry *br_multicast_get_group( 510 struct net_bridge *br, struct net_bridge_port *port, 511 struct br_ip *group, int hash) 512 { 513 struct net_bridge_mdb_htable *mdb; 514 struct net_bridge_mdb_entry *mp; 515 unsigned int count = 0; 516 unsigned int max; 517 int elasticity; 518 int err; 519 520 mdb = rcu_dereference_protected(br->mdb, 1); 521 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 522 count++; 523 if (unlikely(br_ip_equal(group, &mp->addr))) 524 return mp; 525 } 526 527 elasticity = 0; 528 max = mdb->max; 529 530 if (unlikely(count > br->hash_elasticity && count)) { 531 if (net_ratelimit()) 532 br_info(br, "Multicast hash table " 533 "chain limit reached: %s\n", 534 port ? port->dev->name : br->dev->name); 535 536 elasticity = br->hash_elasticity; 537 } 538 539 if (mdb->size >= max) { 540 max *= 2; 541 if (unlikely(max > br->hash_max)) { 542 br_warn(br, "Multicast hash table maximum of %d " 543 "reached, disabling snooping: %s\n", 544 br->hash_max, 545 port ? port->dev->name : br->dev->name); 546 err = -E2BIG; 547 disable: 548 br->multicast_disabled = 1; 549 goto err; 550 } 551 } 552 553 if (max > mdb->max || elasticity) { 554 if (mdb->old) { 555 if (net_ratelimit()) 556 br_info(br, "Multicast hash table " 557 "on fire: %s\n", 558 port ? port->dev->name : br->dev->name); 559 err = -EEXIST; 560 goto err; 561 } 562 563 err = br_mdb_rehash(&br->mdb, max, elasticity); 564 if (err) { 565 br_warn(br, "Cannot rehash multicast " 566 "hash table, disabling snooping: %s, %d, %d\n", 567 port ? port->dev->name : br->dev->name, 568 mdb->size, err); 569 goto disable; 570 } 571 572 err = -EAGAIN; 573 goto err; 574 } 575 576 return NULL; 577 578 err: 579 mp = ERR_PTR(err); 580 return mp; 581 } 582 583 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 584 struct net_bridge_port *port, struct br_ip *group) 585 { 586 struct net_bridge_mdb_htable *mdb; 587 struct net_bridge_mdb_entry *mp; 588 int hash; 589 int err; 590 591 mdb = rcu_dereference_protected(br->mdb, 1); 592 if (!mdb) { 593 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 594 if (err) 595 return ERR_PTR(err); 596 goto rehash; 597 } 598 599 hash = br_ip_hash(mdb, group); 600 mp = br_multicast_get_group(br, port, group, hash); 601 switch (PTR_ERR(mp)) { 602 case 0: 603 break; 604 605 case -EAGAIN: 606 rehash: 607 mdb = rcu_dereference_protected(br->mdb, 1); 608 hash = br_ip_hash(mdb, group); 609 break; 610 611 default: 612 goto out; 613 } 614 615 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 616 if (unlikely(!mp)) 617 return ERR_PTR(-ENOMEM); 618 619 mp->br = br; 620 mp->addr = *group; 621 622 setup_timer(&mp->timer, br_multicast_group_expired, 623 (unsigned long)mp); 624 625 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 626 mdb->size++; 627 628 out: 629 return mp; 630 } 631 632 struct net_bridge_port_group *br_multicast_new_port_group( 633 struct net_bridge_port *port, 634 struct br_ip *group, 635 struct net_bridge_port_group __rcu *next, 636 unsigned char state) 637 { 638 struct net_bridge_port_group *p; 639 640 p = kzalloc(sizeof(*p), GFP_ATOMIC); 641 if (unlikely(!p)) 642 return NULL; 643 644 p->addr = *group; 645 p->port = port; 646 p->state = state; 647 rcu_assign_pointer(p->next, next); 648 hlist_add_head(&p->mglist, &port->mglist); 649 setup_timer(&p->timer, br_multicast_port_group_expired, 650 (unsigned long)p); 651 return p; 652 } 653 654 static int br_multicast_add_group(struct net_bridge *br, 655 struct net_bridge_port *port, 656 struct br_ip *group) 657 { 658 struct net_bridge_mdb_entry *mp; 659 struct net_bridge_port_group *p; 660 struct net_bridge_port_group __rcu **pp; 661 int err; 662 663 spin_lock(&br->multicast_lock); 664 if (!netif_running(br->dev) || 665 (port && port->state == BR_STATE_DISABLED)) 666 goto out; 667 668 mp = br_multicast_new_group(br, port, group); 669 err = PTR_ERR(mp); 670 if (IS_ERR(mp)) 671 goto err; 672 673 if (!port) { 674 mp->mglist = true; 675 goto out; 676 } 677 678 for (pp = &mp->ports; 679 (p = mlock_dereference(*pp, br)) != NULL; 680 pp = &p->next) { 681 if (p->port == port) 682 goto out; 683 if ((unsigned long)p->port < (unsigned long)port) 684 break; 685 } 686 687 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); 688 if (unlikely(!p)) 689 goto err; 690 rcu_assign_pointer(*pp, p); 691 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 692 693 out: 694 err = 0; 695 696 err: 697 spin_unlock(&br->multicast_lock); 698 return err; 699 } 700 701 static int br_ip4_multicast_add_group(struct net_bridge *br, 702 struct net_bridge_port *port, 703 __be32 group, 704 __u16 vid) 705 { 706 struct br_ip br_group; 707 708 if (ipv4_is_local_multicast(group)) 709 return 0; 710 711 br_group.u.ip4 = group; 712 br_group.proto = htons(ETH_P_IP); 713 br_group.vid = vid; 714 715 return br_multicast_add_group(br, port, &br_group); 716 } 717 718 #if IS_ENABLED(CONFIG_IPV6) 719 static int br_ip6_multicast_add_group(struct net_bridge *br, 720 struct net_bridge_port *port, 721 const struct in6_addr *group, 722 __u16 vid) 723 { 724 struct br_ip br_group; 725 726 if (!ipv6_is_transient_multicast(group)) 727 return 0; 728 729 br_group.u.ip6 = *group; 730 br_group.proto = htons(ETH_P_IPV6); 731 br_group.vid = vid; 732 733 return br_multicast_add_group(br, port, &br_group); 734 } 735 #endif 736 737 static void br_multicast_router_expired(unsigned long data) 738 { 739 struct net_bridge_port *port = (void *)data; 740 struct net_bridge *br = port->br; 741 742 spin_lock(&br->multicast_lock); 743 if (port->multicast_router != 1 || 744 timer_pending(&port->multicast_router_timer) || 745 hlist_unhashed(&port->rlist)) 746 goto out; 747 748 hlist_del_init_rcu(&port->rlist); 749 750 out: 751 spin_unlock(&br->multicast_lock); 752 } 753 754 static void br_multicast_local_router_expired(unsigned long data) 755 { 756 } 757 758 static void br_multicast_querier_expired(unsigned long data) 759 { 760 struct net_bridge *br = (void *)data; 761 762 spin_lock(&br->multicast_lock); 763 if (!netif_running(br->dev) || br->multicast_disabled) 764 goto out; 765 766 br_multicast_start_querier(br); 767 768 out: 769 spin_unlock(&br->multicast_lock); 770 } 771 772 static void __br_multicast_send_query(struct net_bridge *br, 773 struct net_bridge_port *port, 774 struct br_ip *ip) 775 { 776 struct sk_buff *skb; 777 778 skb = br_multicast_alloc_query(br, ip); 779 if (!skb) 780 return; 781 782 if (port) { 783 __skb_push(skb, sizeof(struct ethhdr)); 784 skb->dev = port->dev; 785 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 786 dev_queue_xmit); 787 } else 788 netif_rx(skb); 789 } 790 791 static void br_multicast_send_query(struct net_bridge *br, 792 struct net_bridge_port *port, u32 sent) 793 { 794 unsigned long time; 795 struct br_ip br_group; 796 797 if (!netif_running(br->dev) || br->multicast_disabled || 798 !br->multicast_querier || 799 timer_pending(&br->multicast_querier_timer)) 800 return; 801 802 memset(&br_group.u, 0, sizeof(br_group.u)); 803 804 br_group.proto = htons(ETH_P_IP); 805 __br_multicast_send_query(br, port, &br_group); 806 807 #if IS_ENABLED(CONFIG_IPV6) 808 br_group.proto = htons(ETH_P_IPV6); 809 __br_multicast_send_query(br, port, &br_group); 810 #endif 811 812 time = jiffies; 813 time += sent < br->multicast_startup_query_count ? 814 br->multicast_startup_query_interval : 815 br->multicast_query_interval; 816 mod_timer(port ? &port->multicast_query_timer : 817 &br->multicast_query_timer, time); 818 } 819 820 static void br_multicast_port_query_expired(unsigned long data) 821 { 822 struct net_bridge_port *port = (void *)data; 823 struct net_bridge *br = port->br; 824 825 spin_lock(&br->multicast_lock); 826 if (port->state == BR_STATE_DISABLED || 827 port->state == BR_STATE_BLOCKING) 828 goto out; 829 830 if (port->multicast_startup_queries_sent < 831 br->multicast_startup_query_count) 832 port->multicast_startup_queries_sent++; 833 834 br_multicast_send_query(port->br, port, 835 port->multicast_startup_queries_sent); 836 837 out: 838 spin_unlock(&br->multicast_lock); 839 } 840 841 void br_multicast_add_port(struct net_bridge_port *port) 842 { 843 port->multicast_router = 1; 844 845 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 846 (unsigned long)port); 847 setup_timer(&port->multicast_query_timer, 848 br_multicast_port_query_expired, (unsigned long)port); 849 } 850 851 void br_multicast_del_port(struct net_bridge_port *port) 852 { 853 del_timer_sync(&port->multicast_router_timer); 854 } 855 856 static void __br_multicast_enable_port(struct net_bridge_port *port) 857 { 858 port->multicast_startup_queries_sent = 0; 859 860 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 861 del_timer(&port->multicast_query_timer)) 862 mod_timer(&port->multicast_query_timer, jiffies); 863 } 864 865 void br_multicast_enable_port(struct net_bridge_port *port) 866 { 867 struct net_bridge *br = port->br; 868 869 spin_lock(&br->multicast_lock); 870 if (br->multicast_disabled || !netif_running(br->dev)) 871 goto out; 872 873 __br_multicast_enable_port(port); 874 875 out: 876 spin_unlock(&br->multicast_lock); 877 } 878 879 void br_multicast_disable_port(struct net_bridge_port *port) 880 { 881 struct net_bridge *br = port->br; 882 struct net_bridge_port_group *pg; 883 struct hlist_node *n; 884 885 spin_lock(&br->multicast_lock); 886 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 887 br_multicast_del_pg(br, pg); 888 889 if (!hlist_unhashed(&port->rlist)) 890 hlist_del_init_rcu(&port->rlist); 891 del_timer(&port->multicast_router_timer); 892 del_timer(&port->multicast_query_timer); 893 spin_unlock(&br->multicast_lock); 894 } 895 896 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 897 struct net_bridge_port *port, 898 struct sk_buff *skb) 899 { 900 struct igmpv3_report *ih; 901 struct igmpv3_grec *grec; 902 int i; 903 int len; 904 int num; 905 int type; 906 int err = 0; 907 __be32 group; 908 u16 vid = 0; 909 910 if (!pskb_may_pull(skb, sizeof(*ih))) 911 return -EINVAL; 912 913 br_vlan_get_tag(skb, &vid); 914 ih = igmpv3_report_hdr(skb); 915 num = ntohs(ih->ngrec); 916 len = sizeof(*ih); 917 918 for (i = 0; i < num; i++) { 919 len += sizeof(*grec); 920 if (!pskb_may_pull(skb, len)) 921 return -EINVAL; 922 923 grec = (void *)(skb->data + len - sizeof(*grec)); 924 group = grec->grec_mca; 925 type = grec->grec_type; 926 927 len += ntohs(grec->grec_nsrcs) * 4; 928 if (!pskb_may_pull(skb, len)) 929 return -EINVAL; 930 931 /* We treat this as an IGMPv2 report for now. */ 932 switch (type) { 933 case IGMPV3_MODE_IS_INCLUDE: 934 case IGMPV3_MODE_IS_EXCLUDE: 935 case IGMPV3_CHANGE_TO_INCLUDE: 936 case IGMPV3_CHANGE_TO_EXCLUDE: 937 case IGMPV3_ALLOW_NEW_SOURCES: 938 case IGMPV3_BLOCK_OLD_SOURCES: 939 break; 940 941 default: 942 continue; 943 } 944 945 err = br_ip4_multicast_add_group(br, port, group, vid); 946 if (err) 947 break; 948 } 949 950 return err; 951 } 952 953 #if IS_ENABLED(CONFIG_IPV6) 954 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 955 struct net_bridge_port *port, 956 struct sk_buff *skb) 957 { 958 struct icmp6hdr *icmp6h; 959 struct mld2_grec *grec; 960 int i; 961 int len; 962 int num; 963 int err = 0; 964 u16 vid = 0; 965 966 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 967 return -EINVAL; 968 969 br_vlan_get_tag(skb, &vid); 970 icmp6h = icmp6_hdr(skb); 971 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 972 len = sizeof(*icmp6h); 973 974 for (i = 0; i < num; i++) { 975 __be16 *nsrcs, _nsrcs; 976 977 nsrcs = skb_header_pointer(skb, 978 len + offsetof(struct mld2_grec, 979 grec_nsrcs), 980 sizeof(_nsrcs), &_nsrcs); 981 if (!nsrcs) 982 return -EINVAL; 983 984 if (!pskb_may_pull(skb, 985 len + sizeof(*grec) + 986 sizeof(struct in6_addr) * ntohs(*nsrcs))) 987 return -EINVAL; 988 989 grec = (struct mld2_grec *)(skb->data + len); 990 len += sizeof(*grec) + 991 sizeof(struct in6_addr) * ntohs(*nsrcs); 992 993 /* We treat these as MLDv1 reports for now. */ 994 switch (grec->grec_type) { 995 case MLD2_MODE_IS_INCLUDE: 996 case MLD2_MODE_IS_EXCLUDE: 997 case MLD2_CHANGE_TO_INCLUDE: 998 case MLD2_CHANGE_TO_EXCLUDE: 999 case MLD2_ALLOW_NEW_SOURCES: 1000 case MLD2_BLOCK_OLD_SOURCES: 1001 break; 1002 1003 default: 1004 continue; 1005 } 1006 1007 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1008 vid); 1009 if (!err) 1010 break; 1011 } 1012 1013 return err; 1014 } 1015 #endif 1016 1017 static void br_multicast_update_querier_timer(struct net_bridge *br, 1018 unsigned long max_delay) 1019 { 1020 if (!timer_pending(&br->multicast_querier_timer)) 1021 br->multicast_querier_delay_time = jiffies + max_delay; 1022 1023 mod_timer(&br->multicast_querier_timer, 1024 jiffies + br->multicast_querier_interval); 1025 } 1026 1027 /* 1028 * Add port to router_list 1029 * list is maintained ordered by pointer value 1030 * and locked by br->multicast_lock and RCU 1031 */ 1032 static void br_multicast_add_router(struct net_bridge *br, 1033 struct net_bridge_port *port) 1034 { 1035 struct net_bridge_port *p; 1036 struct hlist_node *slot = NULL; 1037 1038 hlist_for_each_entry(p, &br->router_list, rlist) { 1039 if ((unsigned long) port >= (unsigned long) p) 1040 break; 1041 slot = &p->rlist; 1042 } 1043 1044 if (slot) 1045 hlist_add_after_rcu(slot, &port->rlist); 1046 else 1047 hlist_add_head_rcu(&port->rlist, &br->router_list); 1048 } 1049 1050 static void br_multicast_mark_router(struct net_bridge *br, 1051 struct net_bridge_port *port) 1052 { 1053 unsigned long now = jiffies; 1054 1055 if (!port) { 1056 if (br->multicast_router == 1) 1057 mod_timer(&br->multicast_router_timer, 1058 now + br->multicast_querier_interval); 1059 return; 1060 } 1061 1062 if (port->multicast_router != 1) 1063 return; 1064 1065 if (!hlist_unhashed(&port->rlist)) 1066 goto timer; 1067 1068 br_multicast_add_router(br, port); 1069 1070 timer: 1071 mod_timer(&port->multicast_router_timer, 1072 now + br->multicast_querier_interval); 1073 } 1074 1075 static void br_multicast_query_received(struct net_bridge *br, 1076 struct net_bridge_port *port, 1077 int saddr, 1078 unsigned long max_delay) 1079 { 1080 if (saddr) 1081 br_multicast_update_querier_timer(br, max_delay); 1082 else if (timer_pending(&br->multicast_querier_timer)) 1083 return; 1084 1085 br_multicast_mark_router(br, port); 1086 } 1087 1088 static int br_ip4_multicast_query(struct net_bridge *br, 1089 struct net_bridge_port *port, 1090 struct sk_buff *skb) 1091 { 1092 const struct iphdr *iph = ip_hdr(skb); 1093 struct igmphdr *ih = igmp_hdr(skb); 1094 struct net_bridge_mdb_entry *mp; 1095 struct igmpv3_query *ih3; 1096 struct net_bridge_port_group *p; 1097 struct net_bridge_port_group __rcu **pp; 1098 unsigned long max_delay; 1099 unsigned long now = jiffies; 1100 __be32 group; 1101 int err = 0; 1102 u16 vid = 0; 1103 1104 spin_lock(&br->multicast_lock); 1105 if (!netif_running(br->dev) || 1106 (port && port->state == BR_STATE_DISABLED)) 1107 goto out; 1108 1109 group = ih->group; 1110 1111 if (skb->len == sizeof(*ih)) { 1112 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1113 1114 if (!max_delay) { 1115 max_delay = 10 * HZ; 1116 group = 0; 1117 } 1118 } else { 1119 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { 1120 err = -EINVAL; 1121 goto out; 1122 } 1123 1124 ih3 = igmpv3_query_hdr(skb); 1125 if (ih3->nsrcs) 1126 goto out; 1127 1128 max_delay = ih3->code ? 1129 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1130 } 1131 1132 br_multicast_query_received(br, port, !!iph->saddr, max_delay); 1133 1134 if (!group) 1135 goto out; 1136 1137 br_vlan_get_tag(skb, &vid); 1138 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1139 if (!mp) 1140 goto out; 1141 1142 mod_timer(&mp->timer, now + br->multicast_membership_interval); 1143 mp->timer_armed = true; 1144 1145 max_delay *= br->multicast_last_member_count; 1146 1147 if (mp->mglist && 1148 (timer_pending(&mp->timer) ? 1149 time_after(mp->timer.expires, now + max_delay) : 1150 try_to_del_timer_sync(&mp->timer) >= 0)) 1151 mod_timer(&mp->timer, now + max_delay); 1152 1153 for (pp = &mp->ports; 1154 (p = mlock_dereference(*pp, br)) != NULL; 1155 pp = &p->next) { 1156 if (timer_pending(&p->timer) ? 1157 time_after(p->timer.expires, now + max_delay) : 1158 try_to_del_timer_sync(&p->timer) >= 0) 1159 mod_timer(&p->timer, now + max_delay); 1160 } 1161 1162 out: 1163 spin_unlock(&br->multicast_lock); 1164 return err; 1165 } 1166 1167 #if IS_ENABLED(CONFIG_IPV6) 1168 static int br_ip6_multicast_query(struct net_bridge *br, 1169 struct net_bridge_port *port, 1170 struct sk_buff *skb) 1171 { 1172 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1173 struct mld_msg *mld; 1174 struct net_bridge_mdb_entry *mp; 1175 struct mld2_query *mld2q; 1176 struct net_bridge_port_group *p; 1177 struct net_bridge_port_group __rcu **pp; 1178 unsigned long max_delay; 1179 unsigned long now = jiffies; 1180 const struct in6_addr *group = NULL; 1181 int err = 0; 1182 u16 vid = 0; 1183 1184 spin_lock(&br->multicast_lock); 1185 if (!netif_running(br->dev) || 1186 (port && port->state == BR_STATE_DISABLED)) 1187 goto out; 1188 1189 if (skb->len == sizeof(*mld)) { 1190 if (!pskb_may_pull(skb, sizeof(*mld))) { 1191 err = -EINVAL; 1192 goto out; 1193 } 1194 mld = (struct mld_msg *) icmp6_hdr(skb); 1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1196 if (max_delay) 1197 group = &mld->mld_mca; 1198 } else { 1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1200 err = -EINVAL; 1201 goto out; 1202 } 1203 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1204 if (!mld2q->mld2q_nsrcs) 1205 group = &mld2q->mld2q_mca; 1206 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; 1207 } 1208 1209 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), 1210 max_delay); 1211 1212 if (!group) 1213 goto out; 1214 1215 br_vlan_get_tag(skb, &vid); 1216 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1217 if (!mp) 1218 goto out; 1219 1220 mod_timer(&mp->timer, now + br->multicast_membership_interval); 1221 mp->timer_armed = true; 1222 1223 max_delay *= br->multicast_last_member_count; 1224 if (mp->mglist && 1225 (timer_pending(&mp->timer) ? 1226 time_after(mp->timer.expires, now + max_delay) : 1227 try_to_del_timer_sync(&mp->timer) >= 0)) 1228 mod_timer(&mp->timer, now + max_delay); 1229 1230 for (pp = &mp->ports; 1231 (p = mlock_dereference(*pp, br)) != NULL; 1232 pp = &p->next) { 1233 if (timer_pending(&p->timer) ? 1234 time_after(p->timer.expires, now + max_delay) : 1235 try_to_del_timer_sync(&p->timer) >= 0) 1236 mod_timer(&p->timer, now + max_delay); 1237 } 1238 1239 out: 1240 spin_unlock(&br->multicast_lock); 1241 return err; 1242 } 1243 #endif 1244 1245 static void br_multicast_leave_group(struct net_bridge *br, 1246 struct net_bridge_port *port, 1247 struct br_ip *group) 1248 { 1249 struct net_bridge_mdb_htable *mdb; 1250 struct net_bridge_mdb_entry *mp; 1251 struct net_bridge_port_group *p; 1252 unsigned long now; 1253 unsigned long time; 1254 1255 spin_lock(&br->multicast_lock); 1256 if (!netif_running(br->dev) || 1257 (port && port->state == BR_STATE_DISABLED) || 1258 timer_pending(&br->multicast_querier_timer)) 1259 goto out; 1260 1261 mdb = mlock_dereference(br->mdb, br); 1262 mp = br_mdb_ip_get(mdb, group); 1263 if (!mp) 1264 goto out; 1265 1266 if (br->multicast_querier && 1267 !timer_pending(&br->multicast_querier_timer)) { 1268 __br_multicast_send_query(br, port, &mp->addr); 1269 1270 time = jiffies + br->multicast_last_member_count * 1271 br->multicast_last_member_interval; 1272 mod_timer(port ? &port->multicast_query_timer : 1273 &br->multicast_query_timer, time); 1274 1275 for (p = mlock_dereference(mp->ports, br); 1276 p != NULL; 1277 p = mlock_dereference(p->next, br)) { 1278 if (p->port != port) 1279 continue; 1280 1281 if (!hlist_unhashed(&p->mglist) && 1282 (timer_pending(&p->timer) ? 1283 time_after(p->timer.expires, time) : 1284 try_to_del_timer_sync(&p->timer) >= 0)) { 1285 mod_timer(&p->timer, time); 1286 } 1287 1288 break; 1289 } 1290 } 1291 1292 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1293 struct net_bridge_port_group __rcu **pp; 1294 1295 for (pp = &mp->ports; 1296 (p = mlock_dereference(*pp, br)) != NULL; 1297 pp = &p->next) { 1298 if (p->port != port) 1299 continue; 1300 1301 rcu_assign_pointer(*pp, p->next); 1302 hlist_del_init(&p->mglist); 1303 del_timer(&p->timer); 1304 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1305 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1306 1307 if (!mp->ports && !mp->mglist && mp->timer_armed && 1308 netif_running(br->dev)) 1309 mod_timer(&mp->timer, jiffies); 1310 } 1311 goto out; 1312 } 1313 1314 now = jiffies; 1315 time = now + br->multicast_last_member_count * 1316 br->multicast_last_member_interval; 1317 1318 if (!port) { 1319 if (mp->mglist && mp->timer_armed && 1320 (timer_pending(&mp->timer) ? 1321 time_after(mp->timer.expires, time) : 1322 try_to_del_timer_sync(&mp->timer) >= 0)) { 1323 mod_timer(&mp->timer, time); 1324 } 1325 } 1326 1327 out: 1328 spin_unlock(&br->multicast_lock); 1329 } 1330 1331 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1332 struct net_bridge_port *port, 1333 __be32 group, 1334 __u16 vid) 1335 { 1336 struct br_ip br_group; 1337 1338 if (ipv4_is_local_multicast(group)) 1339 return; 1340 1341 br_group.u.ip4 = group; 1342 br_group.proto = htons(ETH_P_IP); 1343 br_group.vid = vid; 1344 1345 br_multicast_leave_group(br, port, &br_group); 1346 } 1347 1348 #if IS_ENABLED(CONFIG_IPV6) 1349 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1350 struct net_bridge_port *port, 1351 const struct in6_addr *group, 1352 __u16 vid) 1353 { 1354 struct br_ip br_group; 1355 1356 if (!ipv6_is_transient_multicast(group)) 1357 return; 1358 1359 br_group.u.ip6 = *group; 1360 br_group.proto = htons(ETH_P_IPV6); 1361 br_group.vid = vid; 1362 1363 br_multicast_leave_group(br, port, &br_group); 1364 } 1365 #endif 1366 1367 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1368 struct net_bridge_port *port, 1369 struct sk_buff *skb) 1370 { 1371 struct sk_buff *skb2 = skb; 1372 const struct iphdr *iph; 1373 struct igmphdr *ih; 1374 unsigned int len; 1375 unsigned int offset; 1376 int err; 1377 u16 vid = 0; 1378 1379 /* We treat OOM as packet loss for now. */ 1380 if (!pskb_may_pull(skb, sizeof(*iph))) 1381 return -EINVAL; 1382 1383 iph = ip_hdr(skb); 1384 1385 if (iph->ihl < 5 || iph->version != 4) 1386 return -EINVAL; 1387 1388 if (!pskb_may_pull(skb, ip_hdrlen(skb))) 1389 return -EINVAL; 1390 1391 iph = ip_hdr(skb); 1392 1393 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1394 return -EINVAL; 1395 1396 if (iph->protocol != IPPROTO_IGMP) { 1397 if (!ipv4_is_local_multicast(iph->daddr)) 1398 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1399 return 0; 1400 } 1401 1402 len = ntohs(iph->tot_len); 1403 if (skb->len < len || len < ip_hdrlen(skb)) 1404 return -EINVAL; 1405 1406 if (skb->len > len) { 1407 skb2 = skb_clone(skb, GFP_ATOMIC); 1408 if (!skb2) 1409 return -ENOMEM; 1410 1411 err = pskb_trim_rcsum(skb2, len); 1412 if (err) 1413 goto err_out; 1414 } 1415 1416 len -= ip_hdrlen(skb2); 1417 offset = skb_network_offset(skb2) + ip_hdrlen(skb2); 1418 __skb_pull(skb2, offset); 1419 skb_reset_transport_header(skb2); 1420 1421 err = -EINVAL; 1422 if (!pskb_may_pull(skb2, sizeof(*ih))) 1423 goto out; 1424 1425 switch (skb2->ip_summed) { 1426 case CHECKSUM_COMPLETE: 1427 if (!csum_fold(skb2->csum)) 1428 break; 1429 /* fall through */ 1430 case CHECKSUM_NONE: 1431 skb2->csum = 0; 1432 if (skb_checksum_complete(skb2)) 1433 goto out; 1434 } 1435 1436 err = 0; 1437 1438 br_vlan_get_tag(skb2, &vid); 1439 BR_INPUT_SKB_CB(skb)->igmp = 1; 1440 ih = igmp_hdr(skb2); 1441 1442 switch (ih->type) { 1443 case IGMP_HOST_MEMBERSHIP_REPORT: 1444 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1445 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1446 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1447 break; 1448 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1449 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1450 break; 1451 case IGMP_HOST_MEMBERSHIP_QUERY: 1452 err = br_ip4_multicast_query(br, port, skb2); 1453 break; 1454 case IGMP_HOST_LEAVE_MESSAGE: 1455 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1456 break; 1457 } 1458 1459 out: 1460 __skb_push(skb2, offset); 1461 err_out: 1462 if (skb2 != skb) 1463 kfree_skb(skb2); 1464 return err; 1465 } 1466 1467 #if IS_ENABLED(CONFIG_IPV6) 1468 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1469 struct net_bridge_port *port, 1470 struct sk_buff *skb) 1471 { 1472 struct sk_buff *skb2; 1473 const struct ipv6hdr *ip6h; 1474 u8 icmp6_type; 1475 u8 nexthdr; 1476 __be16 frag_off; 1477 unsigned int len; 1478 int offset; 1479 int err; 1480 u16 vid = 0; 1481 1482 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1483 return -EINVAL; 1484 1485 ip6h = ipv6_hdr(skb); 1486 1487 /* 1488 * We're interested in MLD messages only. 1489 * - Version is 6 1490 * - MLD has always Router Alert hop-by-hop option 1491 * - But we do not support jumbrograms. 1492 */ 1493 if (ip6h->version != 6 || 1494 ip6h->nexthdr != IPPROTO_HOPOPTS || 1495 ip6h->payload_len == 0) 1496 return 0; 1497 1498 len = ntohs(ip6h->payload_len) + sizeof(*ip6h); 1499 if (skb->len < len) 1500 return -EINVAL; 1501 1502 nexthdr = ip6h->nexthdr; 1503 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off); 1504 1505 if (offset < 0 || nexthdr != IPPROTO_ICMPV6) 1506 return 0; 1507 1508 /* Okay, we found ICMPv6 header */ 1509 skb2 = skb_clone(skb, GFP_ATOMIC); 1510 if (!skb2) 1511 return -ENOMEM; 1512 1513 err = -EINVAL; 1514 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) 1515 goto out; 1516 1517 len -= offset - skb_network_offset(skb2); 1518 1519 __skb_pull(skb2, offset); 1520 skb_reset_transport_header(skb2); 1521 skb_postpull_rcsum(skb2, skb_network_header(skb2), 1522 skb_network_header_len(skb2)); 1523 1524 icmp6_type = icmp6_hdr(skb2)->icmp6_type; 1525 1526 switch (icmp6_type) { 1527 case ICMPV6_MGM_QUERY: 1528 case ICMPV6_MGM_REPORT: 1529 case ICMPV6_MGM_REDUCTION: 1530 case ICMPV6_MLD2_REPORT: 1531 break; 1532 default: 1533 err = 0; 1534 goto out; 1535 } 1536 1537 /* Okay, we found MLD message. Check further. */ 1538 if (skb2->len > len) { 1539 err = pskb_trim_rcsum(skb2, len); 1540 if (err) 1541 goto out; 1542 err = -EINVAL; 1543 } 1544 1545 ip6h = ipv6_hdr(skb2); 1546 1547 switch (skb2->ip_summed) { 1548 case CHECKSUM_COMPLETE: 1549 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, 1550 IPPROTO_ICMPV6, skb2->csum)) 1551 break; 1552 /*FALLTHROUGH*/ 1553 case CHECKSUM_NONE: 1554 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, 1555 &ip6h->daddr, 1556 skb2->len, 1557 IPPROTO_ICMPV6, 0)); 1558 if (__skb_checksum_complete(skb2)) 1559 goto out; 1560 } 1561 1562 err = 0; 1563 1564 br_vlan_get_tag(skb, &vid); 1565 BR_INPUT_SKB_CB(skb)->igmp = 1; 1566 1567 switch (icmp6_type) { 1568 case ICMPV6_MGM_REPORT: 1569 { 1570 struct mld_msg *mld; 1571 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1572 err = -EINVAL; 1573 goto out; 1574 } 1575 mld = (struct mld_msg *)skb_transport_header(skb2); 1576 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1577 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1578 break; 1579 } 1580 case ICMPV6_MLD2_REPORT: 1581 err = br_ip6_multicast_mld2_report(br, port, skb2); 1582 break; 1583 case ICMPV6_MGM_QUERY: 1584 err = br_ip6_multicast_query(br, port, skb2); 1585 break; 1586 case ICMPV6_MGM_REDUCTION: 1587 { 1588 struct mld_msg *mld; 1589 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1590 err = -EINVAL; 1591 goto out; 1592 } 1593 mld = (struct mld_msg *)skb_transport_header(skb2); 1594 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1595 } 1596 } 1597 1598 out: 1599 kfree_skb(skb2); 1600 return err; 1601 } 1602 #endif 1603 1604 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1605 struct sk_buff *skb) 1606 { 1607 BR_INPUT_SKB_CB(skb)->igmp = 0; 1608 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1609 1610 if (br->multicast_disabled) 1611 return 0; 1612 1613 switch (skb->protocol) { 1614 case htons(ETH_P_IP): 1615 return br_multicast_ipv4_rcv(br, port, skb); 1616 #if IS_ENABLED(CONFIG_IPV6) 1617 case htons(ETH_P_IPV6): 1618 return br_multicast_ipv6_rcv(br, port, skb); 1619 #endif 1620 } 1621 1622 return 0; 1623 } 1624 1625 static void br_multicast_query_expired(unsigned long data) 1626 { 1627 struct net_bridge *br = (void *)data; 1628 1629 spin_lock(&br->multicast_lock); 1630 if (br->multicast_startup_queries_sent < 1631 br->multicast_startup_query_count) 1632 br->multicast_startup_queries_sent++; 1633 1634 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1635 1636 spin_unlock(&br->multicast_lock); 1637 } 1638 1639 void br_multicast_init(struct net_bridge *br) 1640 { 1641 br->hash_elasticity = 4; 1642 br->hash_max = 512; 1643 1644 br->multicast_router = 1; 1645 br->multicast_querier = 0; 1646 br->multicast_query_use_ifaddr = 0; 1647 br->multicast_last_member_count = 2; 1648 br->multicast_startup_query_count = 2; 1649 1650 br->multicast_last_member_interval = HZ; 1651 br->multicast_query_response_interval = 10 * HZ; 1652 br->multicast_startup_query_interval = 125 * HZ / 4; 1653 br->multicast_query_interval = 125 * HZ; 1654 br->multicast_querier_interval = 255 * HZ; 1655 br->multicast_membership_interval = 260 * HZ; 1656 1657 br->multicast_querier_delay_time = 0; 1658 1659 spin_lock_init(&br->multicast_lock); 1660 setup_timer(&br->multicast_router_timer, 1661 br_multicast_local_router_expired, 0); 1662 setup_timer(&br->multicast_querier_timer, 1663 br_multicast_querier_expired, (unsigned long)br); 1664 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1665 (unsigned long)br); 1666 } 1667 1668 void br_multicast_open(struct net_bridge *br) 1669 { 1670 br->multicast_startup_queries_sent = 0; 1671 1672 if (br->multicast_disabled) 1673 return; 1674 1675 mod_timer(&br->multicast_query_timer, jiffies); 1676 } 1677 1678 void br_multicast_stop(struct net_bridge *br) 1679 { 1680 struct net_bridge_mdb_htable *mdb; 1681 struct net_bridge_mdb_entry *mp; 1682 struct hlist_node *n; 1683 u32 ver; 1684 int i; 1685 1686 del_timer_sync(&br->multicast_router_timer); 1687 del_timer_sync(&br->multicast_querier_timer); 1688 del_timer_sync(&br->multicast_query_timer); 1689 1690 spin_lock_bh(&br->multicast_lock); 1691 mdb = mlock_dereference(br->mdb, br); 1692 if (!mdb) 1693 goto out; 1694 1695 br->mdb = NULL; 1696 1697 ver = mdb->ver; 1698 for (i = 0; i < mdb->max; i++) { 1699 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1700 hlist[ver]) { 1701 del_timer(&mp->timer); 1702 mp->timer_armed = false; 1703 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1704 } 1705 } 1706 1707 if (mdb->old) { 1708 spin_unlock_bh(&br->multicast_lock); 1709 rcu_barrier_bh(); 1710 spin_lock_bh(&br->multicast_lock); 1711 WARN_ON(mdb->old); 1712 } 1713 1714 mdb->old = mdb; 1715 call_rcu_bh(&mdb->rcu, br_mdb_free); 1716 1717 out: 1718 spin_unlock_bh(&br->multicast_lock); 1719 } 1720 1721 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1722 { 1723 int err = -ENOENT; 1724 1725 spin_lock_bh(&br->multicast_lock); 1726 if (!netif_running(br->dev)) 1727 goto unlock; 1728 1729 switch (val) { 1730 case 0: 1731 case 2: 1732 del_timer(&br->multicast_router_timer); 1733 /* fall through */ 1734 case 1: 1735 br->multicast_router = val; 1736 err = 0; 1737 break; 1738 1739 default: 1740 err = -EINVAL; 1741 break; 1742 } 1743 1744 unlock: 1745 spin_unlock_bh(&br->multicast_lock); 1746 1747 return err; 1748 } 1749 1750 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1751 { 1752 struct net_bridge *br = p->br; 1753 int err = -ENOENT; 1754 1755 spin_lock(&br->multicast_lock); 1756 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) 1757 goto unlock; 1758 1759 switch (val) { 1760 case 0: 1761 case 1: 1762 case 2: 1763 p->multicast_router = val; 1764 err = 0; 1765 1766 if (val < 2 && !hlist_unhashed(&p->rlist)) 1767 hlist_del_init_rcu(&p->rlist); 1768 1769 if (val == 1) 1770 break; 1771 1772 del_timer(&p->multicast_router_timer); 1773 1774 if (val == 0) 1775 break; 1776 1777 br_multicast_add_router(br, p); 1778 break; 1779 1780 default: 1781 err = -EINVAL; 1782 break; 1783 } 1784 1785 unlock: 1786 spin_unlock(&br->multicast_lock); 1787 1788 return err; 1789 } 1790 1791 static void br_multicast_start_querier(struct net_bridge *br) 1792 { 1793 struct net_bridge_port *port; 1794 1795 br_multicast_open(br); 1796 1797 list_for_each_entry(port, &br->port_list, list) { 1798 if (port->state == BR_STATE_DISABLED || 1799 port->state == BR_STATE_BLOCKING) 1800 continue; 1801 1802 __br_multicast_enable_port(port); 1803 } 1804 } 1805 1806 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1807 { 1808 int err = 0; 1809 struct net_bridge_mdb_htable *mdb; 1810 1811 spin_lock_bh(&br->multicast_lock); 1812 if (br->multicast_disabled == !val) 1813 goto unlock; 1814 1815 br->multicast_disabled = !val; 1816 if (br->multicast_disabled) 1817 goto unlock; 1818 1819 if (!netif_running(br->dev)) 1820 goto unlock; 1821 1822 mdb = mlock_dereference(br->mdb, br); 1823 if (mdb) { 1824 if (mdb->old) { 1825 err = -EEXIST; 1826 rollback: 1827 br->multicast_disabled = !!val; 1828 goto unlock; 1829 } 1830 1831 err = br_mdb_rehash(&br->mdb, mdb->max, 1832 br->hash_elasticity); 1833 if (err) 1834 goto rollback; 1835 } 1836 1837 br_multicast_start_querier(br); 1838 1839 unlock: 1840 spin_unlock_bh(&br->multicast_lock); 1841 1842 return err; 1843 } 1844 1845 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1846 { 1847 unsigned long max_delay; 1848 1849 val = !!val; 1850 1851 spin_lock_bh(&br->multicast_lock); 1852 if (br->multicast_querier == val) 1853 goto unlock; 1854 1855 br->multicast_querier = val; 1856 if (!val) 1857 goto unlock; 1858 1859 max_delay = br->multicast_query_response_interval; 1860 if (!timer_pending(&br->multicast_querier_timer)) 1861 br->multicast_querier_delay_time = jiffies + max_delay; 1862 1863 br_multicast_start_querier(br); 1864 1865 unlock: 1866 spin_unlock_bh(&br->multicast_lock); 1867 1868 return 0; 1869 } 1870 1871 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1872 { 1873 int err = -ENOENT; 1874 u32 old; 1875 struct net_bridge_mdb_htable *mdb; 1876 1877 spin_lock(&br->multicast_lock); 1878 if (!netif_running(br->dev)) 1879 goto unlock; 1880 1881 err = -EINVAL; 1882 if (!is_power_of_2(val)) 1883 goto unlock; 1884 1885 mdb = mlock_dereference(br->mdb, br); 1886 if (mdb && val < mdb->size) 1887 goto unlock; 1888 1889 err = 0; 1890 1891 old = br->hash_max; 1892 br->hash_max = val; 1893 1894 if (mdb) { 1895 if (mdb->old) { 1896 err = -EEXIST; 1897 rollback: 1898 br->hash_max = old; 1899 goto unlock; 1900 } 1901 1902 err = br_mdb_rehash(&br->mdb, br->hash_max, 1903 br->hash_elasticity); 1904 if (err) 1905 goto rollback; 1906 } 1907 1908 unlock: 1909 spin_unlock(&br->multicast_lock); 1910 1911 return err; 1912 } 1913