1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/if_ether.h> 15 #include <linux/igmp.h> 16 #include <linux/jhash.h> 17 #include <linux/kernel.h> 18 #include <linux/log2.h> 19 #include <linux/netdevice.h> 20 #include <linux/netfilter_bridge.h> 21 #include <linux/random.h> 22 #include <linux/rculist.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 #include <linux/timer.h> 26 #include <net/ip.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <net/ipv6.h> 29 #include <net/mld.h> 30 #include <net/ip6_checksum.h> 31 #endif 32 33 #include "br_private.h" 34 35 static void br_multicast_start_querier(struct net_bridge *br); 36 unsigned int br_mdb_rehash_seq; 37 38 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 39 { 40 if (a->proto != b->proto) 41 return 0; 42 if (a->vid != b->vid) 43 return 0; 44 switch (a->proto) { 45 case htons(ETH_P_IP): 46 return a->u.ip4 == b->u.ip4; 47 #if IS_ENABLED(CONFIG_IPV6) 48 case htons(ETH_P_IPV6): 49 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 50 #endif 51 } 52 return 0; 53 } 54 55 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 56 __u16 vid) 57 { 58 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 59 } 60 61 #if IS_ENABLED(CONFIG_IPV6) 62 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 63 const struct in6_addr *ip, 64 __u16 vid) 65 { 66 return jhash_2words(ipv6_addr_hash(ip), vid, 67 mdb->secret) & (mdb->max - 1); 68 } 69 #endif 70 71 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 72 struct br_ip *ip) 73 { 74 switch (ip->proto) { 75 case htons(ETH_P_IP): 76 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 77 #if IS_ENABLED(CONFIG_IPV6) 78 case htons(ETH_P_IPV6): 79 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 80 #endif 81 } 82 return 0; 83 } 84 85 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 86 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 87 { 88 struct net_bridge_mdb_entry *mp; 89 struct hlist_node *p; 90 91 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 92 if (br_ip_equal(&mp->addr, dst)) 93 return mp; 94 } 95 96 return NULL; 97 } 98 99 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 100 struct br_ip *dst) 101 { 102 if (!mdb) 103 return NULL; 104 105 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 106 } 107 108 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 109 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 110 { 111 struct br_ip br_dst; 112 113 br_dst.u.ip4 = dst; 114 br_dst.proto = htons(ETH_P_IP); 115 br_dst.vid = vid; 116 117 return br_mdb_ip_get(mdb, &br_dst); 118 } 119 120 #if IS_ENABLED(CONFIG_IPV6) 121 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 122 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 123 __u16 vid) 124 { 125 struct br_ip br_dst; 126 127 br_dst.u.ip6 = *dst; 128 br_dst.proto = htons(ETH_P_IPV6); 129 br_dst.vid = vid; 130 131 return br_mdb_ip_get(mdb, &br_dst); 132 } 133 #endif 134 135 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 136 struct sk_buff *skb) 137 { 138 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 139 struct br_ip ip; 140 141 if (br->multicast_disabled) 142 return NULL; 143 144 if (BR_INPUT_SKB_CB(skb)->igmp) 145 return NULL; 146 147 ip.proto = skb->protocol; 148 149 switch (skb->protocol) { 150 case htons(ETH_P_IP): 151 ip.u.ip4 = ip_hdr(skb)->daddr; 152 break; 153 #if IS_ENABLED(CONFIG_IPV6) 154 case htons(ETH_P_IPV6): 155 ip.u.ip6 = ipv6_hdr(skb)->daddr; 156 break; 157 #endif 158 default: 159 return NULL; 160 } 161 162 return br_mdb_ip_get(mdb, &ip); 163 } 164 165 static void br_mdb_free(struct rcu_head *head) 166 { 167 struct net_bridge_mdb_htable *mdb = 168 container_of(head, struct net_bridge_mdb_htable, rcu); 169 struct net_bridge_mdb_htable *old = mdb->old; 170 171 mdb->old = NULL; 172 kfree(old->mhash); 173 kfree(old); 174 } 175 176 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 177 struct net_bridge_mdb_htable *old, 178 int elasticity) 179 { 180 struct net_bridge_mdb_entry *mp; 181 struct hlist_node *p; 182 int maxlen; 183 int len; 184 int i; 185 186 for (i = 0; i < old->max; i++) 187 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) 188 hlist_add_head(&mp->hlist[new->ver], 189 &new->mhash[br_ip_hash(new, &mp->addr)]); 190 191 if (!elasticity) 192 return 0; 193 194 maxlen = 0; 195 for (i = 0; i < new->max; i++) { 196 len = 0; 197 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) 198 len++; 199 if (len > maxlen) 200 maxlen = len; 201 } 202 203 return maxlen > elasticity ? -EINVAL : 0; 204 } 205 206 void br_multicast_free_pg(struct rcu_head *head) 207 { 208 struct net_bridge_port_group *p = 209 container_of(head, struct net_bridge_port_group, rcu); 210 211 kfree(p); 212 } 213 214 static void br_multicast_free_group(struct rcu_head *head) 215 { 216 struct net_bridge_mdb_entry *mp = 217 container_of(head, struct net_bridge_mdb_entry, rcu); 218 219 kfree(mp); 220 } 221 222 static void br_multicast_group_expired(unsigned long data) 223 { 224 struct net_bridge_mdb_entry *mp = (void *)data; 225 struct net_bridge *br = mp->br; 226 struct net_bridge_mdb_htable *mdb; 227 228 spin_lock(&br->multicast_lock); 229 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 230 goto out; 231 232 mp->mglist = false; 233 234 if (mp->ports) 235 goto out; 236 237 mdb = mlock_dereference(br->mdb, br); 238 239 hlist_del_rcu(&mp->hlist[mdb->ver]); 240 mdb->size--; 241 242 call_rcu_bh(&mp->rcu, br_multicast_free_group); 243 244 out: 245 spin_unlock(&br->multicast_lock); 246 } 247 248 static void br_multicast_del_pg(struct net_bridge *br, 249 struct net_bridge_port_group *pg) 250 { 251 struct net_bridge_mdb_htable *mdb; 252 struct net_bridge_mdb_entry *mp; 253 struct net_bridge_port_group *p; 254 struct net_bridge_port_group __rcu **pp; 255 256 mdb = mlock_dereference(br->mdb, br); 257 258 mp = br_mdb_ip_get(mdb, &pg->addr); 259 if (WARN_ON(!mp)) 260 return; 261 262 for (pp = &mp->ports; 263 (p = mlock_dereference(*pp, br)) != NULL; 264 pp = &p->next) { 265 if (p != pg) 266 continue; 267 268 rcu_assign_pointer(*pp, p->next); 269 hlist_del_init(&p->mglist); 270 del_timer(&p->timer); 271 call_rcu_bh(&p->rcu, br_multicast_free_pg); 272 273 if (!mp->ports && !mp->mglist && 274 netif_running(br->dev)) 275 mod_timer(&mp->timer, jiffies); 276 277 return; 278 } 279 280 WARN_ON(1); 281 } 282 283 static void br_multicast_port_group_expired(unsigned long data) 284 { 285 struct net_bridge_port_group *pg = (void *)data; 286 struct net_bridge *br = pg->port->br; 287 288 spin_lock(&br->multicast_lock); 289 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 290 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT) 291 goto out; 292 293 br_multicast_del_pg(br, pg); 294 295 out: 296 spin_unlock(&br->multicast_lock); 297 } 298 299 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 300 int elasticity) 301 { 302 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 303 struct net_bridge_mdb_htable *mdb; 304 int err; 305 306 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 307 if (!mdb) 308 return -ENOMEM; 309 310 mdb->max = max; 311 mdb->old = old; 312 313 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 314 if (!mdb->mhash) { 315 kfree(mdb); 316 return -ENOMEM; 317 } 318 319 mdb->size = old ? old->size : 0; 320 mdb->ver = old ? old->ver ^ 1 : 0; 321 322 if (!old || elasticity) 323 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 324 else 325 mdb->secret = old->secret; 326 327 if (!old) 328 goto out; 329 330 err = br_mdb_copy(mdb, old, elasticity); 331 if (err) { 332 kfree(mdb->mhash); 333 kfree(mdb); 334 return err; 335 } 336 337 br_mdb_rehash_seq++; 338 call_rcu_bh(&mdb->rcu, br_mdb_free); 339 340 out: 341 rcu_assign_pointer(*mdbp, mdb); 342 343 return 0; 344 } 345 346 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 347 __be32 group) 348 { 349 struct sk_buff *skb; 350 struct igmphdr *ih; 351 struct ethhdr *eth; 352 struct iphdr *iph; 353 354 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 355 sizeof(*ih) + 4); 356 if (!skb) 357 goto out; 358 359 skb->protocol = htons(ETH_P_IP); 360 361 skb_reset_mac_header(skb); 362 eth = eth_hdr(skb); 363 364 memcpy(eth->h_source, br->dev->dev_addr, 6); 365 eth->h_dest[0] = 1; 366 eth->h_dest[1] = 0; 367 eth->h_dest[2] = 0x5e; 368 eth->h_dest[3] = 0; 369 eth->h_dest[4] = 0; 370 eth->h_dest[5] = 1; 371 eth->h_proto = htons(ETH_P_IP); 372 skb_put(skb, sizeof(*eth)); 373 374 skb_set_network_header(skb, skb->len); 375 iph = ip_hdr(skb); 376 377 iph->version = 4; 378 iph->ihl = 6; 379 iph->tos = 0xc0; 380 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 381 iph->id = 0; 382 iph->frag_off = htons(IP_DF); 383 iph->ttl = 1; 384 iph->protocol = IPPROTO_IGMP; 385 iph->saddr = 0; 386 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 387 ((u8 *)&iph[1])[0] = IPOPT_RA; 388 ((u8 *)&iph[1])[1] = 4; 389 ((u8 *)&iph[1])[2] = 0; 390 ((u8 *)&iph[1])[3] = 0; 391 ip_send_check(iph); 392 skb_put(skb, 24); 393 394 skb_set_transport_header(skb, skb->len); 395 ih = igmp_hdr(skb); 396 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 397 ih->code = (group ? br->multicast_last_member_interval : 398 br->multicast_query_response_interval) / 399 (HZ / IGMP_TIMER_SCALE); 400 ih->group = group; 401 ih->csum = 0; 402 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 403 skb_put(skb, sizeof(*ih)); 404 405 __skb_pull(skb, sizeof(*eth)); 406 407 out: 408 return skb; 409 } 410 411 #if IS_ENABLED(CONFIG_IPV6) 412 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 413 const struct in6_addr *group) 414 { 415 struct sk_buff *skb; 416 struct ipv6hdr *ip6h; 417 struct mld_msg *mldq; 418 struct ethhdr *eth; 419 u8 *hopopt; 420 unsigned long interval; 421 422 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 423 8 + sizeof(*mldq)); 424 if (!skb) 425 goto out; 426 427 skb->protocol = htons(ETH_P_IPV6); 428 429 /* Ethernet header */ 430 skb_reset_mac_header(skb); 431 eth = eth_hdr(skb); 432 433 memcpy(eth->h_source, br->dev->dev_addr, 6); 434 eth->h_proto = htons(ETH_P_IPV6); 435 skb_put(skb, sizeof(*eth)); 436 437 /* IPv6 header + HbH option */ 438 skb_set_network_header(skb, skb->len); 439 ip6h = ipv6_hdr(skb); 440 441 *(__force __be32 *)ip6h = htonl(0x60000000); 442 ip6h->payload_len = htons(8 + sizeof(*mldq)); 443 ip6h->nexthdr = IPPROTO_HOPOPTS; 444 ip6h->hop_limit = 1; 445 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 446 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 447 &ip6h->saddr)) { 448 kfree_skb(skb); 449 return NULL; 450 } 451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 452 453 hopopt = (u8 *)(ip6h + 1); 454 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 455 hopopt[1] = 0; /* length of HbH */ 456 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 457 hopopt[3] = 2; /* Length of RA Option */ 458 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 459 hopopt[5] = 0; 460 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 461 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 462 463 skb_put(skb, sizeof(*ip6h) + 8); 464 465 /* ICMPv6 */ 466 skb_set_transport_header(skb, skb->len); 467 mldq = (struct mld_msg *) icmp6_hdr(skb); 468 469 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : 470 br->multicast_query_response_interval; 471 472 mldq->mld_type = ICMPV6_MGM_QUERY; 473 mldq->mld_code = 0; 474 mldq->mld_cksum = 0; 475 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 476 mldq->mld_reserved = 0; 477 mldq->mld_mca = *group; 478 479 /* checksum */ 480 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 481 sizeof(*mldq), IPPROTO_ICMPV6, 482 csum_partial(mldq, 483 sizeof(*mldq), 0)); 484 skb_put(skb, sizeof(*mldq)); 485 486 __skb_pull(skb, sizeof(*eth)); 487 488 out: 489 return skb; 490 } 491 #endif 492 493 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 494 struct br_ip *addr) 495 { 496 switch (addr->proto) { 497 case htons(ETH_P_IP): 498 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 499 #if IS_ENABLED(CONFIG_IPV6) 500 case htons(ETH_P_IPV6): 501 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 502 #endif 503 } 504 return NULL; 505 } 506 507 static struct net_bridge_mdb_entry *br_multicast_get_group( 508 struct net_bridge *br, struct net_bridge_port *port, 509 struct br_ip *group, int hash) 510 { 511 struct net_bridge_mdb_htable *mdb; 512 struct net_bridge_mdb_entry *mp; 513 struct hlist_node *p; 514 unsigned int count = 0; 515 unsigned int max; 516 int elasticity; 517 int err; 518 519 mdb = rcu_dereference_protected(br->mdb, 1); 520 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 521 count++; 522 if (unlikely(br_ip_equal(group, &mp->addr))) 523 return mp; 524 } 525 526 elasticity = 0; 527 max = mdb->max; 528 529 if (unlikely(count > br->hash_elasticity && count)) { 530 if (net_ratelimit()) 531 br_info(br, "Multicast hash table " 532 "chain limit reached: %s\n", 533 port ? port->dev->name : br->dev->name); 534 535 elasticity = br->hash_elasticity; 536 } 537 538 if (mdb->size >= max) { 539 max *= 2; 540 if (unlikely(max > br->hash_max)) { 541 br_warn(br, "Multicast hash table maximum of %d " 542 "reached, disabling snooping: %s\n", 543 br->hash_max, 544 port ? port->dev->name : br->dev->name); 545 err = -E2BIG; 546 disable: 547 br->multicast_disabled = 1; 548 goto err; 549 } 550 } 551 552 if (max > mdb->max || elasticity) { 553 if (mdb->old) { 554 if (net_ratelimit()) 555 br_info(br, "Multicast hash table " 556 "on fire: %s\n", 557 port ? port->dev->name : br->dev->name); 558 err = -EEXIST; 559 goto err; 560 } 561 562 err = br_mdb_rehash(&br->mdb, max, elasticity); 563 if (err) { 564 br_warn(br, "Cannot rehash multicast " 565 "hash table, disabling snooping: %s, %d, %d\n", 566 port ? port->dev->name : br->dev->name, 567 mdb->size, err); 568 goto disable; 569 } 570 571 err = -EAGAIN; 572 goto err; 573 } 574 575 return NULL; 576 577 err: 578 mp = ERR_PTR(err); 579 return mp; 580 } 581 582 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 583 struct net_bridge_port *port, struct br_ip *group) 584 { 585 struct net_bridge_mdb_htable *mdb; 586 struct net_bridge_mdb_entry *mp; 587 int hash; 588 int err; 589 590 mdb = rcu_dereference_protected(br->mdb, 1); 591 if (!mdb) { 592 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 593 if (err) 594 return ERR_PTR(err); 595 goto rehash; 596 } 597 598 hash = br_ip_hash(mdb, group); 599 mp = br_multicast_get_group(br, port, group, hash); 600 switch (PTR_ERR(mp)) { 601 case 0: 602 break; 603 604 case -EAGAIN: 605 rehash: 606 mdb = rcu_dereference_protected(br->mdb, 1); 607 hash = br_ip_hash(mdb, group); 608 break; 609 610 default: 611 goto out; 612 } 613 614 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 615 if (unlikely(!mp)) 616 return ERR_PTR(-ENOMEM); 617 618 mp->br = br; 619 mp->addr = *group; 620 setup_timer(&mp->timer, br_multicast_group_expired, 621 (unsigned long)mp); 622 623 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 624 mdb->size++; 625 626 out: 627 return mp; 628 } 629 630 struct net_bridge_port_group *br_multicast_new_port_group( 631 struct net_bridge_port *port, 632 struct br_ip *group, 633 struct net_bridge_port_group __rcu *next, 634 unsigned char state) 635 { 636 struct net_bridge_port_group *p; 637 638 p = kzalloc(sizeof(*p), GFP_ATOMIC); 639 if (unlikely(!p)) 640 return NULL; 641 642 p->addr = *group; 643 p->port = port; 644 p->state = state; 645 rcu_assign_pointer(p->next, next); 646 hlist_add_head(&p->mglist, &port->mglist); 647 setup_timer(&p->timer, br_multicast_port_group_expired, 648 (unsigned long)p); 649 return p; 650 } 651 652 static int br_multicast_add_group(struct net_bridge *br, 653 struct net_bridge_port *port, 654 struct br_ip *group) 655 { 656 struct net_bridge_mdb_entry *mp; 657 struct net_bridge_port_group *p; 658 struct net_bridge_port_group __rcu **pp; 659 unsigned long now = jiffies; 660 int err; 661 662 spin_lock(&br->multicast_lock); 663 if (!netif_running(br->dev) || 664 (port && port->state == BR_STATE_DISABLED)) 665 goto out; 666 667 mp = br_multicast_new_group(br, port, group); 668 err = PTR_ERR(mp); 669 if (IS_ERR(mp)) 670 goto err; 671 672 if (!port) { 673 mp->mglist = true; 674 mod_timer(&mp->timer, now + br->multicast_membership_interval); 675 goto out; 676 } 677 678 for (pp = &mp->ports; 679 (p = mlock_dereference(*pp, br)) != NULL; 680 pp = &p->next) { 681 if (p->port == port) 682 goto found; 683 if ((unsigned long)p->port < (unsigned long)port) 684 break; 685 } 686 687 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); 688 if (unlikely(!p)) 689 goto err; 690 rcu_assign_pointer(*pp, p); 691 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 692 693 found: 694 mod_timer(&p->timer, now + br->multicast_membership_interval); 695 out: 696 err = 0; 697 698 err: 699 spin_unlock(&br->multicast_lock); 700 return err; 701 } 702 703 static int br_ip4_multicast_add_group(struct net_bridge *br, 704 struct net_bridge_port *port, 705 __be32 group, 706 __u16 vid) 707 { 708 struct br_ip br_group; 709 710 if (ipv4_is_local_multicast(group)) 711 return 0; 712 713 br_group.u.ip4 = group; 714 br_group.proto = htons(ETH_P_IP); 715 br_group.vid = vid; 716 717 return br_multicast_add_group(br, port, &br_group); 718 } 719 720 #if IS_ENABLED(CONFIG_IPV6) 721 static int br_ip6_multicast_add_group(struct net_bridge *br, 722 struct net_bridge_port *port, 723 const struct in6_addr *group, 724 __u16 vid) 725 { 726 struct br_ip br_group; 727 728 if (!ipv6_is_transient_multicast(group)) 729 return 0; 730 731 br_group.u.ip6 = *group; 732 br_group.proto = htons(ETH_P_IPV6); 733 br_group.vid = vid; 734 735 return br_multicast_add_group(br, port, &br_group); 736 } 737 #endif 738 739 static void br_multicast_router_expired(unsigned long data) 740 { 741 struct net_bridge_port *port = (void *)data; 742 struct net_bridge *br = port->br; 743 744 spin_lock(&br->multicast_lock); 745 if (port->multicast_router != 1 || 746 timer_pending(&port->multicast_router_timer) || 747 hlist_unhashed(&port->rlist)) 748 goto out; 749 750 hlist_del_init_rcu(&port->rlist); 751 752 out: 753 spin_unlock(&br->multicast_lock); 754 } 755 756 static void br_multicast_local_router_expired(unsigned long data) 757 { 758 } 759 760 static void br_multicast_querier_expired(unsigned long data) 761 { 762 struct net_bridge *br = (void *)data; 763 764 spin_lock(&br->multicast_lock); 765 if (!netif_running(br->dev) || br->multicast_disabled) 766 goto out; 767 768 br_multicast_start_querier(br); 769 770 out: 771 spin_unlock(&br->multicast_lock); 772 } 773 774 static void __br_multicast_send_query(struct net_bridge *br, 775 struct net_bridge_port *port, 776 struct br_ip *ip) 777 { 778 struct sk_buff *skb; 779 780 skb = br_multicast_alloc_query(br, ip); 781 if (!skb) 782 return; 783 784 if (port) { 785 __skb_push(skb, sizeof(struct ethhdr)); 786 skb->dev = port->dev; 787 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 788 dev_queue_xmit); 789 } else 790 netif_rx(skb); 791 } 792 793 static void br_multicast_send_query(struct net_bridge *br, 794 struct net_bridge_port *port, u32 sent) 795 { 796 unsigned long time; 797 struct br_ip br_group; 798 799 if (!netif_running(br->dev) || br->multicast_disabled || 800 !br->multicast_querier || 801 timer_pending(&br->multicast_querier_timer)) 802 return; 803 804 memset(&br_group.u, 0, sizeof(br_group.u)); 805 806 br_group.proto = htons(ETH_P_IP); 807 __br_multicast_send_query(br, port, &br_group); 808 809 #if IS_ENABLED(CONFIG_IPV6) 810 br_group.proto = htons(ETH_P_IPV6); 811 __br_multicast_send_query(br, port, &br_group); 812 #endif 813 814 time = jiffies; 815 time += sent < br->multicast_startup_query_count ? 816 br->multicast_startup_query_interval : 817 br->multicast_query_interval; 818 mod_timer(port ? &port->multicast_query_timer : 819 &br->multicast_query_timer, time); 820 } 821 822 static void br_multicast_port_query_expired(unsigned long data) 823 { 824 struct net_bridge_port *port = (void *)data; 825 struct net_bridge *br = port->br; 826 827 spin_lock(&br->multicast_lock); 828 if (port->state == BR_STATE_DISABLED || 829 port->state == BR_STATE_BLOCKING) 830 goto out; 831 832 if (port->multicast_startup_queries_sent < 833 br->multicast_startup_query_count) 834 port->multicast_startup_queries_sent++; 835 836 br_multicast_send_query(port->br, port, 837 port->multicast_startup_queries_sent); 838 839 out: 840 spin_unlock(&br->multicast_lock); 841 } 842 843 void br_multicast_add_port(struct net_bridge_port *port) 844 { 845 port->multicast_router = 1; 846 847 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 848 (unsigned long)port); 849 setup_timer(&port->multicast_query_timer, 850 br_multicast_port_query_expired, (unsigned long)port); 851 } 852 853 void br_multicast_del_port(struct net_bridge_port *port) 854 { 855 del_timer_sync(&port->multicast_router_timer); 856 } 857 858 static void __br_multicast_enable_port(struct net_bridge_port *port) 859 { 860 port->multicast_startup_queries_sent = 0; 861 862 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 863 del_timer(&port->multicast_query_timer)) 864 mod_timer(&port->multicast_query_timer, jiffies); 865 } 866 867 void br_multicast_enable_port(struct net_bridge_port *port) 868 { 869 struct net_bridge *br = port->br; 870 871 spin_lock(&br->multicast_lock); 872 if (br->multicast_disabled || !netif_running(br->dev)) 873 goto out; 874 875 __br_multicast_enable_port(port); 876 877 out: 878 spin_unlock(&br->multicast_lock); 879 } 880 881 void br_multicast_disable_port(struct net_bridge_port *port) 882 { 883 struct net_bridge *br = port->br; 884 struct net_bridge_port_group *pg; 885 struct hlist_node *p, *n; 886 887 spin_lock(&br->multicast_lock); 888 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) 889 br_multicast_del_pg(br, pg); 890 891 if (!hlist_unhashed(&port->rlist)) 892 hlist_del_init_rcu(&port->rlist); 893 del_timer(&port->multicast_router_timer); 894 del_timer(&port->multicast_query_timer); 895 spin_unlock(&br->multicast_lock); 896 } 897 898 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 899 struct net_bridge_port *port, 900 struct sk_buff *skb) 901 { 902 struct igmpv3_report *ih; 903 struct igmpv3_grec *grec; 904 int i; 905 int len; 906 int num; 907 int type; 908 int err = 0; 909 __be32 group; 910 u16 vid = 0; 911 912 if (!pskb_may_pull(skb, sizeof(*ih))) 913 return -EINVAL; 914 915 br_vlan_get_tag(skb, &vid); 916 ih = igmpv3_report_hdr(skb); 917 num = ntohs(ih->ngrec); 918 len = sizeof(*ih); 919 920 for (i = 0; i < num; i++) { 921 len += sizeof(*grec); 922 if (!pskb_may_pull(skb, len)) 923 return -EINVAL; 924 925 grec = (void *)(skb->data + len - sizeof(*grec)); 926 group = grec->grec_mca; 927 type = grec->grec_type; 928 929 len += ntohs(grec->grec_nsrcs) * 4; 930 if (!pskb_may_pull(skb, len)) 931 return -EINVAL; 932 933 /* We treat this as an IGMPv2 report for now. */ 934 switch (type) { 935 case IGMPV3_MODE_IS_INCLUDE: 936 case IGMPV3_MODE_IS_EXCLUDE: 937 case IGMPV3_CHANGE_TO_INCLUDE: 938 case IGMPV3_CHANGE_TO_EXCLUDE: 939 case IGMPV3_ALLOW_NEW_SOURCES: 940 case IGMPV3_BLOCK_OLD_SOURCES: 941 break; 942 943 default: 944 continue; 945 } 946 947 err = br_ip4_multicast_add_group(br, port, group, vid); 948 if (err) 949 break; 950 } 951 952 return err; 953 } 954 955 #if IS_ENABLED(CONFIG_IPV6) 956 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 957 struct net_bridge_port *port, 958 struct sk_buff *skb) 959 { 960 struct icmp6hdr *icmp6h; 961 struct mld2_grec *grec; 962 int i; 963 int len; 964 int num; 965 int err = 0; 966 u16 vid = 0; 967 968 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 969 return -EINVAL; 970 971 br_vlan_get_tag(skb, &vid); 972 icmp6h = icmp6_hdr(skb); 973 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 974 len = sizeof(*icmp6h); 975 976 for (i = 0; i < num; i++) { 977 __be16 *nsrcs, _nsrcs; 978 979 nsrcs = skb_header_pointer(skb, 980 len + offsetof(struct mld2_grec, 981 grec_nsrcs), 982 sizeof(_nsrcs), &_nsrcs); 983 if (!nsrcs) 984 return -EINVAL; 985 986 if (!pskb_may_pull(skb, 987 len + sizeof(*grec) + 988 sizeof(struct in6_addr) * ntohs(*nsrcs))) 989 return -EINVAL; 990 991 grec = (struct mld2_grec *)(skb->data + len); 992 len += sizeof(*grec) + 993 sizeof(struct in6_addr) * ntohs(*nsrcs); 994 995 /* We treat these as MLDv1 reports for now. */ 996 switch (grec->grec_type) { 997 case MLD2_MODE_IS_INCLUDE: 998 case MLD2_MODE_IS_EXCLUDE: 999 case MLD2_CHANGE_TO_INCLUDE: 1000 case MLD2_CHANGE_TO_EXCLUDE: 1001 case MLD2_ALLOW_NEW_SOURCES: 1002 case MLD2_BLOCK_OLD_SOURCES: 1003 break; 1004 1005 default: 1006 continue; 1007 } 1008 1009 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1010 vid); 1011 if (!err) 1012 break; 1013 } 1014 1015 return err; 1016 } 1017 #endif 1018 1019 /* 1020 * Add port to rotuer_list 1021 * list is maintained ordered by pointer value 1022 * and locked by br->multicast_lock and RCU 1023 */ 1024 static void br_multicast_add_router(struct net_bridge *br, 1025 struct net_bridge_port *port) 1026 { 1027 struct net_bridge_port *p; 1028 struct hlist_node *n, *slot = NULL; 1029 1030 hlist_for_each_entry(p, n, &br->router_list, rlist) { 1031 if ((unsigned long) port >= (unsigned long) p) 1032 break; 1033 slot = n; 1034 } 1035 1036 if (slot) 1037 hlist_add_after_rcu(slot, &port->rlist); 1038 else 1039 hlist_add_head_rcu(&port->rlist, &br->router_list); 1040 } 1041 1042 static void br_multicast_mark_router(struct net_bridge *br, 1043 struct net_bridge_port *port) 1044 { 1045 unsigned long now = jiffies; 1046 1047 if (!port) { 1048 if (br->multicast_router == 1) 1049 mod_timer(&br->multicast_router_timer, 1050 now + br->multicast_querier_interval); 1051 return; 1052 } 1053 1054 if (port->multicast_router != 1) 1055 return; 1056 1057 if (!hlist_unhashed(&port->rlist)) 1058 goto timer; 1059 1060 br_multicast_add_router(br, port); 1061 1062 timer: 1063 mod_timer(&port->multicast_router_timer, 1064 now + br->multicast_querier_interval); 1065 } 1066 1067 static void br_multicast_query_received(struct net_bridge *br, 1068 struct net_bridge_port *port, 1069 int saddr) 1070 { 1071 if (saddr) 1072 mod_timer(&br->multicast_querier_timer, 1073 jiffies + br->multicast_querier_interval); 1074 else if (timer_pending(&br->multicast_querier_timer)) 1075 return; 1076 1077 br_multicast_mark_router(br, port); 1078 } 1079 1080 static int br_ip4_multicast_query(struct net_bridge *br, 1081 struct net_bridge_port *port, 1082 struct sk_buff *skb) 1083 { 1084 const struct iphdr *iph = ip_hdr(skb); 1085 struct igmphdr *ih = igmp_hdr(skb); 1086 struct net_bridge_mdb_entry *mp; 1087 struct igmpv3_query *ih3; 1088 struct net_bridge_port_group *p; 1089 struct net_bridge_port_group __rcu **pp; 1090 unsigned long max_delay; 1091 unsigned long now = jiffies; 1092 __be32 group; 1093 int err = 0; 1094 u16 vid = 0; 1095 1096 spin_lock(&br->multicast_lock); 1097 if (!netif_running(br->dev) || 1098 (port && port->state == BR_STATE_DISABLED)) 1099 goto out; 1100 1101 br_multicast_query_received(br, port, !!iph->saddr); 1102 1103 group = ih->group; 1104 1105 if (skb->len == sizeof(*ih)) { 1106 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1107 1108 if (!max_delay) { 1109 max_delay = 10 * HZ; 1110 group = 0; 1111 } 1112 } else { 1113 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { 1114 err = -EINVAL; 1115 goto out; 1116 } 1117 1118 ih3 = igmpv3_query_hdr(skb); 1119 if (ih3->nsrcs) 1120 goto out; 1121 1122 max_delay = ih3->code ? 1123 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1124 } 1125 1126 if (!group) 1127 goto out; 1128 1129 br_vlan_get_tag(skb, &vid); 1130 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1131 if (!mp) 1132 goto out; 1133 1134 max_delay *= br->multicast_last_member_count; 1135 1136 if (mp->mglist && 1137 (timer_pending(&mp->timer) ? 1138 time_after(mp->timer.expires, now + max_delay) : 1139 try_to_del_timer_sync(&mp->timer) >= 0)) 1140 mod_timer(&mp->timer, now + max_delay); 1141 1142 for (pp = &mp->ports; 1143 (p = mlock_dereference(*pp, br)) != NULL; 1144 pp = &p->next) { 1145 if (timer_pending(&p->timer) ? 1146 time_after(p->timer.expires, now + max_delay) : 1147 try_to_del_timer_sync(&p->timer) >= 0) 1148 mod_timer(&p->timer, now + max_delay); 1149 } 1150 1151 out: 1152 spin_unlock(&br->multicast_lock); 1153 return err; 1154 } 1155 1156 #if IS_ENABLED(CONFIG_IPV6) 1157 static int br_ip6_multicast_query(struct net_bridge *br, 1158 struct net_bridge_port *port, 1159 struct sk_buff *skb) 1160 { 1161 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1162 struct mld_msg *mld; 1163 struct net_bridge_mdb_entry *mp; 1164 struct mld2_query *mld2q; 1165 struct net_bridge_port_group *p; 1166 struct net_bridge_port_group __rcu **pp; 1167 unsigned long max_delay; 1168 unsigned long now = jiffies; 1169 const struct in6_addr *group = NULL; 1170 int err = 0; 1171 u16 vid = 0; 1172 1173 spin_lock(&br->multicast_lock); 1174 if (!netif_running(br->dev) || 1175 (port && port->state == BR_STATE_DISABLED)) 1176 goto out; 1177 1178 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); 1179 1180 if (skb->len == sizeof(*mld)) { 1181 if (!pskb_may_pull(skb, sizeof(*mld))) { 1182 err = -EINVAL; 1183 goto out; 1184 } 1185 mld = (struct mld_msg *) icmp6_hdr(skb); 1186 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1187 if (max_delay) 1188 group = &mld->mld_mca; 1189 } else if (skb->len >= sizeof(*mld2q)) { 1190 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1191 err = -EINVAL; 1192 goto out; 1193 } 1194 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1195 if (!mld2q->mld2q_nsrcs) 1196 group = &mld2q->mld2q_mca; 1197 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; 1198 } 1199 1200 if (!group) 1201 goto out; 1202 1203 br_vlan_get_tag(skb, &vid); 1204 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1205 if (!mp) 1206 goto out; 1207 1208 max_delay *= br->multicast_last_member_count; 1209 if (mp->mglist && 1210 (timer_pending(&mp->timer) ? 1211 time_after(mp->timer.expires, now + max_delay) : 1212 try_to_del_timer_sync(&mp->timer) >= 0)) 1213 mod_timer(&mp->timer, now + max_delay); 1214 1215 for (pp = &mp->ports; 1216 (p = mlock_dereference(*pp, br)) != NULL; 1217 pp = &p->next) { 1218 if (timer_pending(&p->timer) ? 1219 time_after(p->timer.expires, now + max_delay) : 1220 try_to_del_timer_sync(&p->timer) >= 0) 1221 mod_timer(&p->timer, now + max_delay); 1222 } 1223 1224 out: 1225 spin_unlock(&br->multicast_lock); 1226 return err; 1227 } 1228 #endif 1229 1230 static void br_multicast_leave_group(struct net_bridge *br, 1231 struct net_bridge_port *port, 1232 struct br_ip *group) 1233 { 1234 struct net_bridge_mdb_htable *mdb; 1235 struct net_bridge_mdb_entry *mp; 1236 struct net_bridge_port_group *p; 1237 unsigned long now; 1238 unsigned long time; 1239 1240 spin_lock(&br->multicast_lock); 1241 if (!netif_running(br->dev) || 1242 (port && port->state == BR_STATE_DISABLED) || 1243 timer_pending(&br->multicast_querier_timer)) 1244 goto out; 1245 1246 mdb = mlock_dereference(br->mdb, br); 1247 mp = br_mdb_ip_get(mdb, group); 1248 if (!mp) 1249 goto out; 1250 1251 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1252 struct net_bridge_port_group __rcu **pp; 1253 1254 for (pp = &mp->ports; 1255 (p = mlock_dereference(*pp, br)) != NULL; 1256 pp = &p->next) { 1257 if (p->port != port) 1258 continue; 1259 1260 rcu_assign_pointer(*pp, p->next); 1261 hlist_del_init(&p->mglist); 1262 del_timer(&p->timer); 1263 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1264 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1265 1266 if (!mp->ports && !mp->mglist && 1267 netif_running(br->dev)) 1268 mod_timer(&mp->timer, jiffies); 1269 } 1270 goto out; 1271 } 1272 1273 now = jiffies; 1274 time = now + br->multicast_last_member_count * 1275 br->multicast_last_member_interval; 1276 1277 if (!port) { 1278 if (mp->mglist && 1279 (timer_pending(&mp->timer) ? 1280 time_after(mp->timer.expires, time) : 1281 try_to_del_timer_sync(&mp->timer) >= 0)) { 1282 mod_timer(&mp->timer, time); 1283 } 1284 1285 goto out; 1286 } 1287 1288 for (p = mlock_dereference(mp->ports, br); 1289 p != NULL; 1290 p = mlock_dereference(p->next, br)) { 1291 if (p->port != port) 1292 continue; 1293 1294 if (!hlist_unhashed(&p->mglist) && 1295 (timer_pending(&p->timer) ? 1296 time_after(p->timer.expires, time) : 1297 try_to_del_timer_sync(&p->timer) >= 0)) { 1298 mod_timer(&p->timer, time); 1299 } 1300 1301 break; 1302 } 1303 1304 out: 1305 spin_unlock(&br->multicast_lock); 1306 } 1307 1308 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1309 struct net_bridge_port *port, 1310 __be32 group, 1311 __u16 vid) 1312 { 1313 struct br_ip br_group; 1314 1315 if (ipv4_is_local_multicast(group)) 1316 return; 1317 1318 br_group.u.ip4 = group; 1319 br_group.proto = htons(ETH_P_IP); 1320 br_group.vid = vid; 1321 1322 br_multicast_leave_group(br, port, &br_group); 1323 } 1324 1325 #if IS_ENABLED(CONFIG_IPV6) 1326 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1327 struct net_bridge_port *port, 1328 const struct in6_addr *group, 1329 __u16 vid) 1330 { 1331 struct br_ip br_group; 1332 1333 if (!ipv6_is_transient_multicast(group)) 1334 return; 1335 1336 br_group.u.ip6 = *group; 1337 br_group.proto = htons(ETH_P_IPV6); 1338 br_group.vid = vid; 1339 1340 br_multicast_leave_group(br, port, &br_group); 1341 } 1342 #endif 1343 1344 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1345 struct net_bridge_port *port, 1346 struct sk_buff *skb) 1347 { 1348 struct sk_buff *skb2 = skb; 1349 const struct iphdr *iph; 1350 struct igmphdr *ih; 1351 unsigned int len; 1352 unsigned int offset; 1353 int err; 1354 u16 vid = 0; 1355 1356 /* We treat OOM as packet loss for now. */ 1357 if (!pskb_may_pull(skb, sizeof(*iph))) 1358 return -EINVAL; 1359 1360 iph = ip_hdr(skb); 1361 1362 if (iph->ihl < 5 || iph->version != 4) 1363 return -EINVAL; 1364 1365 if (!pskb_may_pull(skb, ip_hdrlen(skb))) 1366 return -EINVAL; 1367 1368 iph = ip_hdr(skb); 1369 1370 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1371 return -EINVAL; 1372 1373 if (iph->protocol != IPPROTO_IGMP) { 1374 if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP) 1375 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1376 return 0; 1377 } 1378 1379 len = ntohs(iph->tot_len); 1380 if (skb->len < len || len < ip_hdrlen(skb)) 1381 return -EINVAL; 1382 1383 if (skb->len > len) { 1384 skb2 = skb_clone(skb, GFP_ATOMIC); 1385 if (!skb2) 1386 return -ENOMEM; 1387 1388 err = pskb_trim_rcsum(skb2, len); 1389 if (err) 1390 goto err_out; 1391 } 1392 1393 len -= ip_hdrlen(skb2); 1394 offset = skb_network_offset(skb2) + ip_hdrlen(skb2); 1395 __skb_pull(skb2, offset); 1396 skb_reset_transport_header(skb2); 1397 1398 err = -EINVAL; 1399 if (!pskb_may_pull(skb2, sizeof(*ih))) 1400 goto out; 1401 1402 switch (skb2->ip_summed) { 1403 case CHECKSUM_COMPLETE: 1404 if (!csum_fold(skb2->csum)) 1405 break; 1406 /* fall through */ 1407 case CHECKSUM_NONE: 1408 skb2->csum = 0; 1409 if (skb_checksum_complete(skb2)) 1410 goto out; 1411 } 1412 1413 err = 0; 1414 1415 br_vlan_get_tag(skb2, &vid); 1416 BR_INPUT_SKB_CB(skb)->igmp = 1; 1417 ih = igmp_hdr(skb2); 1418 1419 switch (ih->type) { 1420 case IGMP_HOST_MEMBERSHIP_REPORT: 1421 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1422 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1423 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1424 break; 1425 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1426 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1427 break; 1428 case IGMP_HOST_MEMBERSHIP_QUERY: 1429 err = br_ip4_multicast_query(br, port, skb2); 1430 break; 1431 case IGMP_HOST_LEAVE_MESSAGE: 1432 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1433 break; 1434 } 1435 1436 out: 1437 __skb_push(skb2, offset); 1438 err_out: 1439 if (skb2 != skb) 1440 kfree_skb(skb2); 1441 return err; 1442 } 1443 1444 #if IS_ENABLED(CONFIG_IPV6) 1445 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1446 struct net_bridge_port *port, 1447 struct sk_buff *skb) 1448 { 1449 struct sk_buff *skb2; 1450 const struct ipv6hdr *ip6h; 1451 u8 icmp6_type; 1452 u8 nexthdr; 1453 __be16 frag_off; 1454 unsigned int len; 1455 int offset; 1456 int err; 1457 u16 vid = 0; 1458 1459 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1460 return -EINVAL; 1461 1462 ip6h = ipv6_hdr(skb); 1463 1464 /* 1465 * We're interested in MLD messages only. 1466 * - Version is 6 1467 * - MLD has always Router Alert hop-by-hop option 1468 * - But we do not support jumbrograms. 1469 */ 1470 if (ip6h->version != 6 || 1471 ip6h->nexthdr != IPPROTO_HOPOPTS || 1472 ip6h->payload_len == 0) 1473 return 0; 1474 1475 len = ntohs(ip6h->payload_len) + sizeof(*ip6h); 1476 if (skb->len < len) 1477 return -EINVAL; 1478 1479 nexthdr = ip6h->nexthdr; 1480 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off); 1481 1482 if (offset < 0 || nexthdr != IPPROTO_ICMPV6) 1483 return 0; 1484 1485 /* Okay, we found ICMPv6 header */ 1486 skb2 = skb_clone(skb, GFP_ATOMIC); 1487 if (!skb2) 1488 return -ENOMEM; 1489 1490 err = -EINVAL; 1491 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) 1492 goto out; 1493 1494 len -= offset - skb_network_offset(skb2); 1495 1496 __skb_pull(skb2, offset); 1497 skb_reset_transport_header(skb2); 1498 skb_postpull_rcsum(skb2, skb_network_header(skb2), 1499 skb_network_header_len(skb2)); 1500 1501 icmp6_type = icmp6_hdr(skb2)->icmp6_type; 1502 1503 switch (icmp6_type) { 1504 case ICMPV6_MGM_QUERY: 1505 case ICMPV6_MGM_REPORT: 1506 case ICMPV6_MGM_REDUCTION: 1507 case ICMPV6_MLD2_REPORT: 1508 break; 1509 default: 1510 err = 0; 1511 goto out; 1512 } 1513 1514 /* Okay, we found MLD message. Check further. */ 1515 if (skb2->len > len) { 1516 err = pskb_trim_rcsum(skb2, len); 1517 if (err) 1518 goto out; 1519 err = -EINVAL; 1520 } 1521 1522 ip6h = ipv6_hdr(skb2); 1523 1524 switch (skb2->ip_summed) { 1525 case CHECKSUM_COMPLETE: 1526 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, 1527 IPPROTO_ICMPV6, skb2->csum)) 1528 break; 1529 /*FALLTHROUGH*/ 1530 case CHECKSUM_NONE: 1531 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, 1532 &ip6h->daddr, 1533 skb2->len, 1534 IPPROTO_ICMPV6, 0)); 1535 if (__skb_checksum_complete(skb2)) 1536 goto out; 1537 } 1538 1539 err = 0; 1540 1541 br_vlan_get_tag(skb, &vid); 1542 BR_INPUT_SKB_CB(skb)->igmp = 1; 1543 1544 switch (icmp6_type) { 1545 case ICMPV6_MGM_REPORT: 1546 { 1547 struct mld_msg *mld; 1548 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1549 err = -EINVAL; 1550 goto out; 1551 } 1552 mld = (struct mld_msg *)skb_transport_header(skb2); 1553 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1554 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1555 break; 1556 } 1557 case ICMPV6_MLD2_REPORT: 1558 err = br_ip6_multicast_mld2_report(br, port, skb2); 1559 break; 1560 case ICMPV6_MGM_QUERY: 1561 err = br_ip6_multicast_query(br, port, skb2); 1562 break; 1563 case ICMPV6_MGM_REDUCTION: 1564 { 1565 struct mld_msg *mld; 1566 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1567 err = -EINVAL; 1568 goto out; 1569 } 1570 mld = (struct mld_msg *)skb_transport_header(skb2); 1571 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1572 } 1573 } 1574 1575 out: 1576 kfree_skb(skb2); 1577 return err; 1578 } 1579 #endif 1580 1581 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1582 struct sk_buff *skb) 1583 { 1584 BR_INPUT_SKB_CB(skb)->igmp = 0; 1585 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1586 1587 if (br->multicast_disabled) 1588 return 0; 1589 1590 switch (skb->protocol) { 1591 case htons(ETH_P_IP): 1592 return br_multicast_ipv4_rcv(br, port, skb); 1593 #if IS_ENABLED(CONFIG_IPV6) 1594 case htons(ETH_P_IPV6): 1595 return br_multicast_ipv6_rcv(br, port, skb); 1596 #endif 1597 } 1598 1599 return 0; 1600 } 1601 1602 static void br_multicast_query_expired(unsigned long data) 1603 { 1604 struct net_bridge *br = (void *)data; 1605 1606 spin_lock(&br->multicast_lock); 1607 if (br->multicast_startup_queries_sent < 1608 br->multicast_startup_query_count) 1609 br->multicast_startup_queries_sent++; 1610 1611 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1612 1613 spin_unlock(&br->multicast_lock); 1614 } 1615 1616 void br_multicast_init(struct net_bridge *br) 1617 { 1618 br->hash_elasticity = 4; 1619 br->hash_max = 512; 1620 1621 br->multicast_router = 1; 1622 br->multicast_querier = 0; 1623 br->multicast_last_member_count = 2; 1624 br->multicast_startup_query_count = 2; 1625 1626 br->multicast_last_member_interval = HZ; 1627 br->multicast_query_response_interval = 10 * HZ; 1628 br->multicast_startup_query_interval = 125 * HZ / 4; 1629 br->multicast_query_interval = 125 * HZ; 1630 br->multicast_querier_interval = 255 * HZ; 1631 br->multicast_membership_interval = 260 * HZ; 1632 1633 spin_lock_init(&br->multicast_lock); 1634 setup_timer(&br->multicast_router_timer, 1635 br_multicast_local_router_expired, 0); 1636 setup_timer(&br->multicast_querier_timer, 1637 br_multicast_querier_expired, (unsigned long)br); 1638 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1639 (unsigned long)br); 1640 } 1641 1642 void br_multicast_open(struct net_bridge *br) 1643 { 1644 br->multicast_startup_queries_sent = 0; 1645 1646 if (br->multicast_disabled) 1647 return; 1648 1649 mod_timer(&br->multicast_query_timer, jiffies); 1650 } 1651 1652 void br_multicast_stop(struct net_bridge *br) 1653 { 1654 struct net_bridge_mdb_htable *mdb; 1655 struct net_bridge_mdb_entry *mp; 1656 struct hlist_node *p, *n; 1657 u32 ver; 1658 int i; 1659 1660 del_timer_sync(&br->multicast_router_timer); 1661 del_timer_sync(&br->multicast_querier_timer); 1662 del_timer_sync(&br->multicast_query_timer); 1663 1664 spin_lock_bh(&br->multicast_lock); 1665 mdb = mlock_dereference(br->mdb, br); 1666 if (!mdb) 1667 goto out; 1668 1669 br->mdb = NULL; 1670 1671 ver = mdb->ver; 1672 for (i = 0; i < mdb->max; i++) { 1673 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1674 hlist[ver]) { 1675 del_timer(&mp->timer); 1676 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1677 } 1678 } 1679 1680 if (mdb->old) { 1681 spin_unlock_bh(&br->multicast_lock); 1682 rcu_barrier_bh(); 1683 spin_lock_bh(&br->multicast_lock); 1684 WARN_ON(mdb->old); 1685 } 1686 1687 mdb->old = mdb; 1688 call_rcu_bh(&mdb->rcu, br_mdb_free); 1689 1690 out: 1691 spin_unlock_bh(&br->multicast_lock); 1692 } 1693 1694 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1695 { 1696 int err = -ENOENT; 1697 1698 spin_lock_bh(&br->multicast_lock); 1699 if (!netif_running(br->dev)) 1700 goto unlock; 1701 1702 switch (val) { 1703 case 0: 1704 case 2: 1705 del_timer(&br->multicast_router_timer); 1706 /* fall through */ 1707 case 1: 1708 br->multicast_router = val; 1709 err = 0; 1710 break; 1711 1712 default: 1713 err = -EINVAL; 1714 break; 1715 } 1716 1717 unlock: 1718 spin_unlock_bh(&br->multicast_lock); 1719 1720 return err; 1721 } 1722 1723 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1724 { 1725 struct net_bridge *br = p->br; 1726 int err = -ENOENT; 1727 1728 spin_lock(&br->multicast_lock); 1729 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) 1730 goto unlock; 1731 1732 switch (val) { 1733 case 0: 1734 case 1: 1735 case 2: 1736 p->multicast_router = val; 1737 err = 0; 1738 1739 if (val < 2 && !hlist_unhashed(&p->rlist)) 1740 hlist_del_init_rcu(&p->rlist); 1741 1742 if (val == 1) 1743 break; 1744 1745 del_timer(&p->multicast_router_timer); 1746 1747 if (val == 0) 1748 break; 1749 1750 br_multicast_add_router(br, p); 1751 break; 1752 1753 default: 1754 err = -EINVAL; 1755 break; 1756 } 1757 1758 unlock: 1759 spin_unlock(&br->multicast_lock); 1760 1761 return err; 1762 } 1763 1764 static void br_multicast_start_querier(struct net_bridge *br) 1765 { 1766 struct net_bridge_port *port; 1767 1768 br_multicast_open(br); 1769 1770 list_for_each_entry(port, &br->port_list, list) { 1771 if (port->state == BR_STATE_DISABLED || 1772 port->state == BR_STATE_BLOCKING) 1773 continue; 1774 1775 __br_multicast_enable_port(port); 1776 } 1777 } 1778 1779 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1780 { 1781 int err = 0; 1782 struct net_bridge_mdb_htable *mdb; 1783 1784 spin_lock_bh(&br->multicast_lock); 1785 if (br->multicast_disabled == !val) 1786 goto unlock; 1787 1788 br->multicast_disabled = !val; 1789 if (br->multicast_disabled) 1790 goto unlock; 1791 1792 if (!netif_running(br->dev)) 1793 goto unlock; 1794 1795 mdb = mlock_dereference(br->mdb, br); 1796 if (mdb) { 1797 if (mdb->old) { 1798 err = -EEXIST; 1799 rollback: 1800 br->multicast_disabled = !!val; 1801 goto unlock; 1802 } 1803 1804 err = br_mdb_rehash(&br->mdb, mdb->max, 1805 br->hash_elasticity); 1806 if (err) 1807 goto rollback; 1808 } 1809 1810 br_multicast_start_querier(br); 1811 1812 unlock: 1813 spin_unlock_bh(&br->multicast_lock); 1814 1815 return err; 1816 } 1817 1818 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 1819 { 1820 val = !!val; 1821 1822 spin_lock_bh(&br->multicast_lock); 1823 if (br->multicast_querier == val) 1824 goto unlock; 1825 1826 br->multicast_querier = val; 1827 if (val) 1828 br_multicast_start_querier(br); 1829 1830 unlock: 1831 spin_unlock_bh(&br->multicast_lock); 1832 1833 return 0; 1834 } 1835 1836 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1837 { 1838 int err = -ENOENT; 1839 u32 old; 1840 struct net_bridge_mdb_htable *mdb; 1841 1842 spin_lock(&br->multicast_lock); 1843 if (!netif_running(br->dev)) 1844 goto unlock; 1845 1846 err = -EINVAL; 1847 if (!is_power_of_2(val)) 1848 goto unlock; 1849 1850 mdb = mlock_dereference(br->mdb, br); 1851 if (mdb && val < mdb->size) 1852 goto unlock; 1853 1854 err = 0; 1855 1856 old = br->hash_max; 1857 br->hash_max = val; 1858 1859 if (mdb) { 1860 if (mdb->old) { 1861 err = -EEXIST; 1862 rollback: 1863 br->hash_max = old; 1864 goto unlock; 1865 } 1866 1867 err = br_mdb_rehash(&br->mdb, br->hash_max, 1868 br->hash_elasticity); 1869 if (err) 1870 goto rollback; 1871 } 1872 1873 unlock: 1874 spin_unlock(&br->multicast_lock); 1875 1876 return err; 1877 } 1878