1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/if_ether.h> 15 #include <linux/igmp.h> 16 #include <linux/jhash.h> 17 #include <linux/kernel.h> 18 #include <linux/log2.h> 19 #include <linux/netdevice.h> 20 #include <linux/netfilter_bridge.h> 21 #include <linux/random.h> 22 #include <linux/rculist.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 #include <linux/timer.h> 26 #include <net/ip.h> 27 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 28 #include <net/ipv6.h> 29 #include <net/mld.h> 30 #include <net/addrconf.h> 31 #include <net/ip6_checksum.h> 32 #endif 33 34 #include "br_private.h" 35 36 #define mlock_dereference(X, br) \ 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 38 39 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 40 static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 41 { 42 if (ipv6_addr_is_multicast(addr) && 43 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) 44 return 1; 45 return 0; 46 } 47 #endif 48 49 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 50 { 51 if (a->proto != b->proto) 52 return 0; 53 switch (a->proto) { 54 case htons(ETH_P_IP): 55 return a->u.ip4 == b->u.ip4; 56 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 57 case htons(ETH_P_IPV6): 58 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 59 #endif 60 } 61 return 0; 62 } 63 64 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) 65 { 66 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); 67 } 68 69 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 70 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 71 const struct in6_addr *ip) 72 { 73 return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); 74 } 75 #endif 76 77 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 78 struct br_ip *ip) 79 { 80 switch (ip->proto) { 81 case htons(ETH_P_IP): 82 return __br_ip4_hash(mdb, ip->u.ip4); 83 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 84 case htons(ETH_P_IPV6): 85 return __br_ip6_hash(mdb, &ip->u.ip6); 86 #endif 87 } 88 return 0; 89 } 90 91 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 92 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 93 { 94 struct net_bridge_mdb_entry *mp; 95 struct hlist_node *p; 96 97 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 98 if (br_ip_equal(&mp->addr, dst)) 99 return mp; 100 } 101 102 return NULL; 103 } 104 105 static struct net_bridge_mdb_entry *br_mdb_ip_get( 106 struct net_bridge_mdb_htable *mdb, struct br_ip *dst) 107 { 108 if (!mdb) 109 return NULL; 110 111 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 112 } 113 114 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 115 struct net_bridge_mdb_htable *mdb, __be32 dst) 116 { 117 struct br_ip br_dst; 118 119 br_dst.u.ip4 = dst; 120 br_dst.proto = htons(ETH_P_IP); 121 122 return br_mdb_ip_get(mdb, &br_dst); 123 } 124 125 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 126 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 127 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) 128 { 129 struct br_ip br_dst; 130 131 ipv6_addr_copy(&br_dst.u.ip6, dst); 132 br_dst.proto = htons(ETH_P_IPV6); 133 134 return br_mdb_ip_get(mdb, &br_dst); 135 } 136 #endif 137 138 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 139 struct sk_buff *skb) 140 { 141 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 142 struct br_ip ip; 143 144 if (br->multicast_disabled) 145 return NULL; 146 147 if (BR_INPUT_SKB_CB(skb)->igmp) 148 return NULL; 149 150 ip.proto = skb->protocol; 151 152 switch (skb->protocol) { 153 case htons(ETH_P_IP): 154 ip.u.ip4 = ip_hdr(skb)->daddr; 155 break; 156 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 157 case htons(ETH_P_IPV6): 158 ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); 159 break; 160 #endif 161 default: 162 return NULL; 163 } 164 165 return br_mdb_ip_get(mdb, &ip); 166 } 167 168 static void br_mdb_free(struct rcu_head *head) 169 { 170 struct net_bridge_mdb_htable *mdb = 171 container_of(head, struct net_bridge_mdb_htable, rcu); 172 struct net_bridge_mdb_htable *old = mdb->old; 173 174 mdb->old = NULL; 175 kfree(old->mhash); 176 kfree(old); 177 } 178 179 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 180 struct net_bridge_mdb_htable *old, 181 int elasticity) 182 { 183 struct net_bridge_mdb_entry *mp; 184 struct hlist_node *p; 185 int maxlen; 186 int len; 187 int i; 188 189 for (i = 0; i < old->max; i++) 190 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) 191 hlist_add_head(&mp->hlist[new->ver], 192 &new->mhash[br_ip_hash(new, &mp->addr)]); 193 194 if (!elasticity) 195 return 0; 196 197 maxlen = 0; 198 for (i = 0; i < new->max; i++) { 199 len = 0; 200 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) 201 len++; 202 if (len > maxlen) 203 maxlen = len; 204 } 205 206 return maxlen > elasticity ? -EINVAL : 0; 207 } 208 209 static void br_multicast_free_pg(struct rcu_head *head) 210 { 211 struct net_bridge_port_group *p = 212 container_of(head, struct net_bridge_port_group, rcu); 213 214 kfree(p); 215 } 216 217 static void br_multicast_free_group(struct rcu_head *head) 218 { 219 struct net_bridge_mdb_entry *mp = 220 container_of(head, struct net_bridge_mdb_entry, rcu); 221 222 kfree(mp); 223 } 224 225 static void br_multicast_group_expired(unsigned long data) 226 { 227 struct net_bridge_mdb_entry *mp = (void *)data; 228 struct net_bridge *br = mp->br; 229 struct net_bridge_mdb_htable *mdb; 230 231 spin_lock(&br->multicast_lock); 232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 233 goto out; 234 235 if (!hlist_unhashed(&mp->mglist)) 236 hlist_del_init(&mp->mglist); 237 238 if (mp->ports) 239 goto out; 240 241 mdb = mlock_dereference(br->mdb, br); 242 243 hlist_del_rcu(&mp->hlist[mdb->ver]); 244 mdb->size--; 245 246 del_timer(&mp->query_timer); 247 call_rcu_bh(&mp->rcu, br_multicast_free_group); 248 249 out: 250 spin_unlock(&br->multicast_lock); 251 } 252 253 static void br_multicast_del_pg(struct net_bridge *br, 254 struct net_bridge_port_group *pg) 255 { 256 struct net_bridge_mdb_htable *mdb; 257 struct net_bridge_mdb_entry *mp; 258 struct net_bridge_port_group *p; 259 struct net_bridge_port_group __rcu **pp; 260 261 mdb = mlock_dereference(br->mdb, br); 262 263 mp = br_mdb_ip_get(mdb, &pg->addr); 264 if (WARN_ON(!mp)) 265 return; 266 267 for (pp = &mp->ports; 268 (p = mlock_dereference(*pp, br)) != NULL; 269 pp = &p->next) { 270 if (p != pg) 271 continue; 272 273 rcu_assign_pointer(*pp, p->next); 274 hlist_del_init(&p->mglist); 275 del_timer(&p->timer); 276 del_timer(&p->query_timer); 277 call_rcu_bh(&p->rcu, br_multicast_free_pg); 278 279 if (!mp->ports && hlist_unhashed(&mp->mglist) && 280 netif_running(br->dev)) 281 mod_timer(&mp->timer, jiffies); 282 283 return; 284 } 285 286 WARN_ON(1); 287 } 288 289 static void br_multicast_port_group_expired(unsigned long data) 290 { 291 struct net_bridge_port_group *pg = (void *)data; 292 struct net_bridge *br = pg->port->br; 293 294 spin_lock(&br->multicast_lock); 295 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 296 hlist_unhashed(&pg->mglist)) 297 goto out; 298 299 br_multicast_del_pg(br, pg); 300 301 out: 302 spin_unlock(&br->multicast_lock); 303 } 304 305 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 306 int elasticity) 307 { 308 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 309 struct net_bridge_mdb_htable *mdb; 310 int err; 311 312 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 313 if (!mdb) 314 return -ENOMEM; 315 316 mdb->max = max; 317 mdb->old = old; 318 319 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 320 if (!mdb->mhash) { 321 kfree(mdb); 322 return -ENOMEM; 323 } 324 325 mdb->size = old ? old->size : 0; 326 mdb->ver = old ? old->ver ^ 1 : 0; 327 328 if (!old || elasticity) 329 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 330 else 331 mdb->secret = old->secret; 332 333 if (!old) 334 goto out; 335 336 err = br_mdb_copy(mdb, old, elasticity); 337 if (err) { 338 kfree(mdb->mhash); 339 kfree(mdb); 340 return err; 341 } 342 343 call_rcu_bh(&mdb->rcu, br_mdb_free); 344 345 out: 346 rcu_assign_pointer(*mdbp, mdb); 347 348 return 0; 349 } 350 351 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 352 __be32 group) 353 { 354 struct sk_buff *skb; 355 struct igmphdr *ih; 356 struct ethhdr *eth; 357 struct iphdr *iph; 358 359 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 360 sizeof(*ih) + 4); 361 if (!skb) 362 goto out; 363 364 skb->protocol = htons(ETH_P_IP); 365 366 skb_reset_mac_header(skb); 367 eth = eth_hdr(skb); 368 369 memcpy(eth->h_source, br->dev->dev_addr, 6); 370 eth->h_dest[0] = 1; 371 eth->h_dest[1] = 0; 372 eth->h_dest[2] = 0x5e; 373 eth->h_dest[3] = 0; 374 eth->h_dest[4] = 0; 375 eth->h_dest[5] = 1; 376 eth->h_proto = htons(ETH_P_IP); 377 skb_put(skb, sizeof(*eth)); 378 379 skb_set_network_header(skb, skb->len); 380 iph = ip_hdr(skb); 381 382 iph->version = 4; 383 iph->ihl = 6; 384 iph->tos = 0xc0; 385 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 386 iph->id = 0; 387 iph->frag_off = htons(IP_DF); 388 iph->ttl = 1; 389 iph->protocol = IPPROTO_IGMP; 390 iph->saddr = 0; 391 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 392 ((u8 *)&iph[1])[0] = IPOPT_RA; 393 ((u8 *)&iph[1])[1] = 4; 394 ((u8 *)&iph[1])[2] = 0; 395 ((u8 *)&iph[1])[3] = 0; 396 ip_send_check(iph); 397 skb_put(skb, 24); 398 399 skb_set_transport_header(skb, skb->len); 400 ih = igmp_hdr(skb); 401 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 402 ih->code = (group ? br->multicast_last_member_interval : 403 br->multicast_query_response_interval) / 404 (HZ / IGMP_TIMER_SCALE); 405 ih->group = group; 406 ih->csum = 0; 407 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 408 skb_put(skb, sizeof(*ih)); 409 410 __skb_pull(skb, sizeof(*eth)); 411 412 out: 413 return skb; 414 } 415 416 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 417 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 418 struct in6_addr *group) 419 { 420 struct sk_buff *skb; 421 struct ipv6hdr *ip6h; 422 struct mld_msg *mldq; 423 struct ethhdr *eth; 424 u8 *hopopt; 425 unsigned long interval; 426 427 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 428 8 + sizeof(*mldq)); 429 if (!skb) 430 goto out; 431 432 skb->protocol = htons(ETH_P_IPV6); 433 434 /* Ethernet header */ 435 skb_reset_mac_header(skb); 436 eth = eth_hdr(skb); 437 438 memcpy(eth->h_source, br->dev->dev_addr, 6); 439 ipv6_eth_mc_map(group, eth->h_dest); 440 eth->h_proto = htons(ETH_P_IPV6); 441 skb_put(skb, sizeof(*eth)); 442 443 /* IPv6 header + HbH option */ 444 skb_set_network_header(skb, skb->len); 445 ip6h = ipv6_hdr(skb); 446 447 *(__force __be32 *)ip6h = htonl(0x60000000); 448 ip6h->payload_len = htons(8 + sizeof(*mldq)); 449 ip6h->nexthdr = IPPROTO_HOPOPTS; 450 ip6h->hop_limit = 1; 451 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); 452 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 453 454 hopopt = (u8 *)(ip6h + 1); 455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 456 hopopt[1] = 0; /* length of HbH */ 457 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 458 hopopt[3] = 2; /* Length of RA Option */ 459 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 460 hopopt[5] = 0; 461 hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ 462 hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ 463 464 skb_put(skb, sizeof(*ip6h) + 8); 465 466 /* ICMPv6 */ 467 skb_set_transport_header(skb, skb->len); 468 mldq = (struct mld_msg *) icmp6_hdr(skb); 469 470 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : 471 br->multicast_query_response_interval; 472 473 mldq->mld_type = ICMPV6_MGM_QUERY; 474 mldq->mld_code = 0; 475 mldq->mld_cksum = 0; 476 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 477 mldq->mld_reserved = 0; 478 ipv6_addr_copy(&mldq->mld_mca, group); 479 480 /* checksum */ 481 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 482 sizeof(*mldq), IPPROTO_ICMPV6, 483 csum_partial(mldq, 484 sizeof(*mldq), 0)); 485 skb_put(skb, sizeof(*mldq)); 486 487 __skb_pull(skb, sizeof(*eth)); 488 489 out: 490 return skb; 491 } 492 #endif 493 494 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 495 struct br_ip *addr) 496 { 497 switch (addr->proto) { 498 case htons(ETH_P_IP): 499 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 500 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 501 case htons(ETH_P_IPV6): 502 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 503 #endif 504 } 505 return NULL; 506 } 507 508 static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) 509 { 510 struct net_bridge *br = mp->br; 511 struct sk_buff *skb; 512 513 skb = br_multicast_alloc_query(br, &mp->addr); 514 if (!skb) 515 goto timer; 516 517 netif_rx(skb); 518 519 timer: 520 if (++mp->queries_sent < br->multicast_last_member_count) 521 mod_timer(&mp->query_timer, 522 jiffies + br->multicast_last_member_interval); 523 } 524 525 static void br_multicast_group_query_expired(unsigned long data) 526 { 527 struct net_bridge_mdb_entry *mp = (void *)data; 528 struct net_bridge *br = mp->br; 529 530 spin_lock(&br->multicast_lock); 531 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || 532 mp->queries_sent >= br->multicast_last_member_count) 533 goto out; 534 535 br_multicast_send_group_query(mp); 536 537 out: 538 spin_unlock(&br->multicast_lock); 539 } 540 541 static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) 542 { 543 struct net_bridge_port *port = pg->port; 544 struct net_bridge *br = port->br; 545 struct sk_buff *skb; 546 547 skb = br_multicast_alloc_query(br, &pg->addr); 548 if (!skb) 549 goto timer; 550 551 br_deliver(port, skb); 552 553 timer: 554 if (++pg->queries_sent < br->multicast_last_member_count) 555 mod_timer(&pg->query_timer, 556 jiffies + br->multicast_last_member_interval); 557 } 558 559 static void br_multicast_port_group_query_expired(unsigned long data) 560 { 561 struct net_bridge_port_group *pg = (void *)data; 562 struct net_bridge_port *port = pg->port; 563 struct net_bridge *br = port->br; 564 565 spin_lock(&br->multicast_lock); 566 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 567 pg->queries_sent >= br->multicast_last_member_count) 568 goto out; 569 570 br_multicast_send_port_group_query(pg); 571 572 out: 573 spin_unlock(&br->multicast_lock); 574 } 575 576 static struct net_bridge_mdb_entry *br_multicast_get_group( 577 struct net_bridge *br, struct net_bridge_port *port, 578 struct br_ip *group, int hash) 579 { 580 struct net_bridge_mdb_htable *mdb; 581 struct net_bridge_mdb_entry *mp; 582 struct hlist_node *p; 583 unsigned count = 0; 584 unsigned max; 585 int elasticity; 586 int err; 587 588 mdb = rcu_dereference_protected(br->mdb, 1); 589 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 590 count++; 591 if (unlikely(br_ip_equal(group, &mp->addr))) 592 return mp; 593 } 594 595 elasticity = 0; 596 max = mdb->max; 597 598 if (unlikely(count > br->hash_elasticity && count)) { 599 if (net_ratelimit()) 600 br_info(br, "Multicast hash table " 601 "chain limit reached: %s\n", 602 port ? port->dev->name : br->dev->name); 603 604 elasticity = br->hash_elasticity; 605 } 606 607 if (mdb->size >= max) { 608 max *= 2; 609 if (unlikely(max >= br->hash_max)) { 610 br_warn(br, "Multicast hash table maximum " 611 "reached, disabling snooping: %s, %d\n", 612 port ? port->dev->name : br->dev->name, max); 613 err = -E2BIG; 614 disable: 615 br->multicast_disabled = 1; 616 goto err; 617 } 618 } 619 620 if (max > mdb->max || elasticity) { 621 if (mdb->old) { 622 if (net_ratelimit()) 623 br_info(br, "Multicast hash table " 624 "on fire: %s\n", 625 port ? port->dev->name : br->dev->name); 626 err = -EEXIST; 627 goto err; 628 } 629 630 err = br_mdb_rehash(&br->mdb, max, elasticity); 631 if (err) { 632 br_warn(br, "Cannot rehash multicast " 633 "hash table, disabling snooping: %s, %d, %d\n", 634 port ? port->dev->name : br->dev->name, 635 mdb->size, err); 636 goto disable; 637 } 638 639 err = -EAGAIN; 640 goto err; 641 } 642 643 return NULL; 644 645 err: 646 mp = ERR_PTR(err); 647 return mp; 648 } 649 650 static struct net_bridge_mdb_entry *br_multicast_new_group( 651 struct net_bridge *br, struct net_bridge_port *port, 652 struct br_ip *group) 653 { 654 struct net_bridge_mdb_htable *mdb; 655 struct net_bridge_mdb_entry *mp; 656 int hash; 657 int err; 658 659 mdb = rcu_dereference_protected(br->mdb, 1); 660 if (!mdb) { 661 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 662 if (err) 663 return ERR_PTR(err); 664 goto rehash; 665 } 666 667 hash = br_ip_hash(mdb, group); 668 mp = br_multicast_get_group(br, port, group, hash); 669 switch (PTR_ERR(mp)) { 670 case 0: 671 break; 672 673 case -EAGAIN: 674 rehash: 675 mdb = rcu_dereference_protected(br->mdb, 1); 676 hash = br_ip_hash(mdb, group); 677 break; 678 679 default: 680 goto out; 681 } 682 683 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 684 if (unlikely(!mp)) 685 return ERR_PTR(-ENOMEM); 686 687 mp->br = br; 688 mp->addr = *group; 689 setup_timer(&mp->timer, br_multicast_group_expired, 690 (unsigned long)mp); 691 setup_timer(&mp->query_timer, br_multicast_group_query_expired, 692 (unsigned long)mp); 693 694 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 695 mdb->size++; 696 697 out: 698 return mp; 699 } 700 701 static int br_multicast_add_group(struct net_bridge *br, 702 struct net_bridge_port *port, 703 struct br_ip *group) 704 { 705 struct net_bridge_mdb_entry *mp; 706 struct net_bridge_port_group *p; 707 struct net_bridge_port_group __rcu **pp; 708 unsigned long now = jiffies; 709 int err; 710 711 spin_lock(&br->multicast_lock); 712 if (!netif_running(br->dev) || 713 (port && port->state == BR_STATE_DISABLED)) 714 goto out; 715 716 mp = br_multicast_new_group(br, port, group); 717 err = PTR_ERR(mp); 718 if (IS_ERR(mp)) 719 goto err; 720 721 if (!port) { 722 hlist_add_head(&mp->mglist, &br->mglist); 723 mod_timer(&mp->timer, now + br->multicast_membership_interval); 724 goto out; 725 } 726 727 for (pp = &mp->ports; 728 (p = mlock_dereference(*pp, br)) != NULL; 729 pp = &p->next) { 730 if (p->port == port) 731 goto found; 732 if ((unsigned long)p->port < (unsigned long)port) 733 break; 734 } 735 736 p = kzalloc(sizeof(*p), GFP_ATOMIC); 737 err = -ENOMEM; 738 if (unlikely(!p)) 739 goto err; 740 741 p->addr = *group; 742 p->port = port; 743 p->next = *pp; 744 hlist_add_head(&p->mglist, &port->mglist); 745 setup_timer(&p->timer, br_multicast_port_group_expired, 746 (unsigned long)p); 747 setup_timer(&p->query_timer, br_multicast_port_group_query_expired, 748 (unsigned long)p); 749 750 rcu_assign_pointer(*pp, p); 751 752 found: 753 mod_timer(&p->timer, now + br->multicast_membership_interval); 754 out: 755 err = 0; 756 757 err: 758 spin_unlock(&br->multicast_lock); 759 return err; 760 } 761 762 static int br_ip4_multicast_add_group(struct net_bridge *br, 763 struct net_bridge_port *port, 764 __be32 group) 765 { 766 struct br_ip br_group; 767 768 if (ipv4_is_local_multicast(group)) 769 return 0; 770 771 br_group.u.ip4 = group; 772 br_group.proto = htons(ETH_P_IP); 773 774 return br_multicast_add_group(br, port, &br_group); 775 } 776 777 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 778 static int br_ip6_multicast_add_group(struct net_bridge *br, 779 struct net_bridge_port *port, 780 const struct in6_addr *group) 781 { 782 struct br_ip br_group; 783 784 if (ipv6_is_local_multicast(group)) 785 return 0; 786 787 ipv6_addr_copy(&br_group.u.ip6, group); 788 br_group.proto = htons(ETH_P_IP); 789 790 return br_multicast_add_group(br, port, &br_group); 791 } 792 #endif 793 794 static void br_multicast_router_expired(unsigned long data) 795 { 796 struct net_bridge_port *port = (void *)data; 797 struct net_bridge *br = port->br; 798 799 spin_lock(&br->multicast_lock); 800 if (port->multicast_router != 1 || 801 timer_pending(&port->multicast_router_timer) || 802 hlist_unhashed(&port->rlist)) 803 goto out; 804 805 hlist_del_init_rcu(&port->rlist); 806 807 out: 808 spin_unlock(&br->multicast_lock); 809 } 810 811 static void br_multicast_local_router_expired(unsigned long data) 812 { 813 } 814 815 static void __br_multicast_send_query(struct net_bridge *br, 816 struct net_bridge_port *port, 817 struct br_ip *ip) 818 { 819 struct sk_buff *skb; 820 821 skb = br_multicast_alloc_query(br, ip); 822 if (!skb) 823 return; 824 825 if (port) { 826 __skb_push(skb, sizeof(struct ethhdr)); 827 skb->dev = port->dev; 828 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 829 dev_queue_xmit); 830 } else 831 netif_rx(skb); 832 } 833 834 static void br_multicast_send_query(struct net_bridge *br, 835 struct net_bridge_port *port, u32 sent) 836 { 837 unsigned long time; 838 struct br_ip br_group; 839 840 if (!netif_running(br->dev) || br->multicast_disabled || 841 timer_pending(&br->multicast_querier_timer)) 842 return; 843 844 memset(&br_group.u, 0, sizeof(br_group.u)); 845 846 br_group.proto = htons(ETH_P_IP); 847 __br_multicast_send_query(br, port, &br_group); 848 849 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 850 br_group.proto = htons(ETH_P_IPV6); 851 __br_multicast_send_query(br, port, &br_group); 852 #endif 853 854 time = jiffies; 855 time += sent < br->multicast_startup_query_count ? 856 br->multicast_startup_query_interval : 857 br->multicast_query_interval; 858 mod_timer(port ? &port->multicast_query_timer : 859 &br->multicast_query_timer, time); 860 } 861 862 static void br_multicast_port_query_expired(unsigned long data) 863 { 864 struct net_bridge_port *port = (void *)data; 865 struct net_bridge *br = port->br; 866 867 spin_lock(&br->multicast_lock); 868 if (port->state == BR_STATE_DISABLED || 869 port->state == BR_STATE_BLOCKING) 870 goto out; 871 872 if (port->multicast_startup_queries_sent < 873 br->multicast_startup_query_count) 874 port->multicast_startup_queries_sent++; 875 876 br_multicast_send_query(port->br, port, 877 port->multicast_startup_queries_sent); 878 879 out: 880 spin_unlock(&br->multicast_lock); 881 } 882 883 void br_multicast_add_port(struct net_bridge_port *port) 884 { 885 port->multicast_router = 1; 886 887 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 888 (unsigned long)port); 889 setup_timer(&port->multicast_query_timer, 890 br_multicast_port_query_expired, (unsigned long)port); 891 } 892 893 void br_multicast_del_port(struct net_bridge_port *port) 894 { 895 del_timer_sync(&port->multicast_router_timer); 896 } 897 898 static void __br_multicast_enable_port(struct net_bridge_port *port) 899 { 900 port->multicast_startup_queries_sent = 0; 901 902 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 903 del_timer(&port->multicast_query_timer)) 904 mod_timer(&port->multicast_query_timer, jiffies); 905 } 906 907 void br_multicast_enable_port(struct net_bridge_port *port) 908 { 909 struct net_bridge *br = port->br; 910 911 spin_lock(&br->multicast_lock); 912 if (br->multicast_disabled || !netif_running(br->dev)) 913 goto out; 914 915 __br_multicast_enable_port(port); 916 917 out: 918 spin_unlock(&br->multicast_lock); 919 } 920 921 void br_multicast_disable_port(struct net_bridge_port *port) 922 { 923 struct net_bridge *br = port->br; 924 struct net_bridge_port_group *pg; 925 struct hlist_node *p, *n; 926 927 spin_lock(&br->multicast_lock); 928 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) 929 br_multicast_del_pg(br, pg); 930 931 if (!hlist_unhashed(&port->rlist)) 932 hlist_del_init_rcu(&port->rlist); 933 del_timer(&port->multicast_router_timer); 934 del_timer(&port->multicast_query_timer); 935 spin_unlock(&br->multicast_lock); 936 } 937 938 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 939 struct net_bridge_port *port, 940 struct sk_buff *skb) 941 { 942 struct igmpv3_report *ih; 943 struct igmpv3_grec *grec; 944 int i; 945 int len; 946 int num; 947 int type; 948 int err = 0; 949 __be32 group; 950 951 if (!pskb_may_pull(skb, sizeof(*ih))) 952 return -EINVAL; 953 954 ih = igmpv3_report_hdr(skb); 955 num = ntohs(ih->ngrec); 956 len = sizeof(*ih); 957 958 for (i = 0; i < num; i++) { 959 len += sizeof(*grec); 960 if (!pskb_may_pull(skb, len)) 961 return -EINVAL; 962 963 grec = (void *)(skb->data + len - sizeof(*grec)); 964 group = grec->grec_mca; 965 type = grec->grec_type; 966 967 len += ntohs(grec->grec_nsrcs) * 4; 968 if (!pskb_may_pull(skb, len)) 969 return -EINVAL; 970 971 /* We treat this as an IGMPv2 report for now. */ 972 switch (type) { 973 case IGMPV3_MODE_IS_INCLUDE: 974 case IGMPV3_MODE_IS_EXCLUDE: 975 case IGMPV3_CHANGE_TO_INCLUDE: 976 case IGMPV3_CHANGE_TO_EXCLUDE: 977 case IGMPV3_ALLOW_NEW_SOURCES: 978 case IGMPV3_BLOCK_OLD_SOURCES: 979 break; 980 981 default: 982 continue; 983 } 984 985 err = br_ip4_multicast_add_group(br, port, group); 986 if (err) 987 break; 988 } 989 990 return err; 991 } 992 993 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 994 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 995 struct net_bridge_port *port, 996 struct sk_buff *skb) 997 { 998 struct icmp6hdr *icmp6h; 999 struct mld2_grec *grec; 1000 int i; 1001 int len; 1002 int num; 1003 int err = 0; 1004 1005 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1006 return -EINVAL; 1007 1008 icmp6h = icmp6_hdr(skb); 1009 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1010 len = sizeof(*icmp6h); 1011 1012 for (i = 0; i < num; i++) { 1013 __be16 *nsrcs, _nsrcs; 1014 1015 nsrcs = skb_header_pointer(skb, 1016 len + offsetof(struct mld2_grec, 1017 grec_mca), 1018 sizeof(_nsrcs), &_nsrcs); 1019 if (!nsrcs) 1020 return -EINVAL; 1021 1022 if (!pskb_may_pull(skb, 1023 len + sizeof(*grec) + 1024 sizeof(struct in6_addr) * (*nsrcs))) 1025 return -EINVAL; 1026 1027 grec = (struct mld2_grec *)(skb->data + len); 1028 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); 1029 1030 /* We treat these as MLDv1 reports for now. */ 1031 switch (grec->grec_type) { 1032 case MLD2_MODE_IS_INCLUDE: 1033 case MLD2_MODE_IS_EXCLUDE: 1034 case MLD2_CHANGE_TO_INCLUDE: 1035 case MLD2_CHANGE_TO_EXCLUDE: 1036 case MLD2_ALLOW_NEW_SOURCES: 1037 case MLD2_BLOCK_OLD_SOURCES: 1038 break; 1039 1040 default: 1041 continue; 1042 } 1043 1044 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); 1045 if (!err) 1046 break; 1047 } 1048 1049 return err; 1050 } 1051 #endif 1052 1053 /* 1054 * Add port to rotuer_list 1055 * list is maintained ordered by pointer value 1056 * and locked by br->multicast_lock and RCU 1057 */ 1058 static void br_multicast_add_router(struct net_bridge *br, 1059 struct net_bridge_port *port) 1060 { 1061 struct net_bridge_port *p; 1062 struct hlist_node *n, *slot = NULL; 1063 1064 hlist_for_each_entry(p, n, &br->router_list, rlist) { 1065 if ((unsigned long) port >= (unsigned long) p) 1066 break; 1067 slot = n; 1068 } 1069 1070 if (slot) 1071 hlist_add_after_rcu(slot, &port->rlist); 1072 else 1073 hlist_add_head_rcu(&port->rlist, &br->router_list); 1074 } 1075 1076 static void br_multicast_mark_router(struct net_bridge *br, 1077 struct net_bridge_port *port) 1078 { 1079 unsigned long now = jiffies; 1080 1081 if (!port) { 1082 if (br->multicast_router == 1) 1083 mod_timer(&br->multicast_router_timer, 1084 now + br->multicast_querier_interval); 1085 return; 1086 } 1087 1088 if (port->multicast_router != 1) 1089 return; 1090 1091 if (!hlist_unhashed(&port->rlist)) 1092 goto timer; 1093 1094 br_multicast_add_router(br, port); 1095 1096 timer: 1097 mod_timer(&port->multicast_router_timer, 1098 now + br->multicast_querier_interval); 1099 } 1100 1101 static void br_multicast_query_received(struct net_bridge *br, 1102 struct net_bridge_port *port, 1103 int saddr) 1104 { 1105 if (saddr) 1106 mod_timer(&br->multicast_querier_timer, 1107 jiffies + br->multicast_querier_interval); 1108 else if (timer_pending(&br->multicast_querier_timer)) 1109 return; 1110 1111 br_multicast_mark_router(br, port); 1112 } 1113 1114 static int br_ip4_multicast_query(struct net_bridge *br, 1115 struct net_bridge_port *port, 1116 struct sk_buff *skb) 1117 { 1118 struct iphdr *iph = ip_hdr(skb); 1119 struct igmphdr *ih = igmp_hdr(skb); 1120 struct net_bridge_mdb_entry *mp; 1121 struct igmpv3_query *ih3; 1122 struct net_bridge_port_group *p; 1123 struct net_bridge_port_group __rcu **pp; 1124 unsigned long max_delay; 1125 unsigned long now = jiffies; 1126 __be32 group; 1127 int err = 0; 1128 1129 spin_lock(&br->multicast_lock); 1130 if (!netif_running(br->dev) || 1131 (port && port->state == BR_STATE_DISABLED)) 1132 goto out; 1133 1134 br_multicast_query_received(br, port, !!iph->saddr); 1135 1136 group = ih->group; 1137 1138 if (skb->len == sizeof(*ih)) { 1139 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1140 1141 if (!max_delay) { 1142 max_delay = 10 * HZ; 1143 group = 0; 1144 } 1145 } else { 1146 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { 1147 err = -EINVAL; 1148 goto out; 1149 } 1150 1151 ih3 = igmpv3_query_hdr(skb); 1152 if (ih3->nsrcs) 1153 goto out; 1154 1155 max_delay = ih3->code ? 1156 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1157 } 1158 1159 if (!group) 1160 goto out; 1161 1162 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group); 1163 if (!mp) 1164 goto out; 1165 1166 max_delay *= br->multicast_last_member_count; 1167 1168 if (!hlist_unhashed(&mp->mglist) && 1169 (timer_pending(&mp->timer) ? 1170 time_after(mp->timer.expires, now + max_delay) : 1171 try_to_del_timer_sync(&mp->timer) >= 0)) 1172 mod_timer(&mp->timer, now + max_delay); 1173 1174 for (pp = &mp->ports; 1175 (p = mlock_dereference(*pp, br)) != NULL; 1176 pp = &p->next) { 1177 if (timer_pending(&p->timer) ? 1178 time_after(p->timer.expires, now + max_delay) : 1179 try_to_del_timer_sync(&p->timer) >= 0) 1180 mod_timer(&mp->timer, now + max_delay); 1181 } 1182 1183 out: 1184 spin_unlock(&br->multicast_lock); 1185 return err; 1186 } 1187 1188 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1189 static int br_ip6_multicast_query(struct net_bridge *br, 1190 struct net_bridge_port *port, 1191 struct sk_buff *skb) 1192 { 1193 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1195 struct net_bridge_mdb_entry *mp; 1196 struct mld2_query *mld2q; 1197 struct net_bridge_port_group *p; 1198 struct net_bridge_port_group __rcu **pp; 1199 unsigned long max_delay; 1200 unsigned long now = jiffies; 1201 struct in6_addr *group = NULL; 1202 int err = 0; 1203 1204 spin_lock(&br->multicast_lock); 1205 if (!netif_running(br->dev) || 1206 (port && port->state == BR_STATE_DISABLED)) 1207 goto out; 1208 1209 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); 1210 1211 if (skb->len == sizeof(*mld)) { 1212 if (!pskb_may_pull(skb, sizeof(*mld))) { 1213 err = -EINVAL; 1214 goto out; 1215 } 1216 mld = (struct mld_msg *) icmp6_hdr(skb); 1217 max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); 1218 if (max_delay) 1219 group = &mld->mld_mca; 1220 } else if (skb->len >= sizeof(*mld2q)) { 1221 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1222 err = -EINVAL; 1223 goto out; 1224 } 1225 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1226 if (!mld2q->mld2q_nsrcs) 1227 group = &mld2q->mld2q_mca; 1228 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; 1229 } 1230 1231 if (!group) 1232 goto out; 1233 1234 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group); 1235 if (!mp) 1236 goto out; 1237 1238 max_delay *= br->multicast_last_member_count; 1239 if (!hlist_unhashed(&mp->mglist) && 1240 (timer_pending(&mp->timer) ? 1241 time_after(mp->timer.expires, now + max_delay) : 1242 try_to_del_timer_sync(&mp->timer) >= 0)) 1243 mod_timer(&mp->timer, now + max_delay); 1244 1245 for (pp = &mp->ports; 1246 (p = mlock_dereference(*pp, br)) != NULL; 1247 pp = &p->next) { 1248 if (timer_pending(&p->timer) ? 1249 time_after(p->timer.expires, now + max_delay) : 1250 try_to_del_timer_sync(&p->timer) >= 0) 1251 mod_timer(&mp->timer, now + max_delay); 1252 } 1253 1254 out: 1255 spin_unlock(&br->multicast_lock); 1256 return err; 1257 } 1258 #endif 1259 1260 static void br_multicast_leave_group(struct net_bridge *br, 1261 struct net_bridge_port *port, 1262 struct br_ip *group) 1263 { 1264 struct net_bridge_mdb_htable *mdb; 1265 struct net_bridge_mdb_entry *mp; 1266 struct net_bridge_port_group *p; 1267 unsigned long now; 1268 unsigned long time; 1269 1270 spin_lock(&br->multicast_lock); 1271 if (!netif_running(br->dev) || 1272 (port && port->state == BR_STATE_DISABLED) || 1273 timer_pending(&br->multicast_querier_timer)) 1274 goto out; 1275 1276 mdb = mlock_dereference(br->mdb, br); 1277 mp = br_mdb_ip_get(mdb, group); 1278 if (!mp) 1279 goto out; 1280 1281 now = jiffies; 1282 time = now + br->multicast_last_member_count * 1283 br->multicast_last_member_interval; 1284 1285 if (!port) { 1286 if (!hlist_unhashed(&mp->mglist) && 1287 (timer_pending(&mp->timer) ? 1288 time_after(mp->timer.expires, time) : 1289 try_to_del_timer_sync(&mp->timer) >= 0)) { 1290 mod_timer(&mp->timer, time); 1291 1292 mp->queries_sent = 0; 1293 mod_timer(&mp->query_timer, now); 1294 } 1295 1296 goto out; 1297 } 1298 1299 for (p = mlock_dereference(mp->ports, br); 1300 p != NULL; 1301 p = mlock_dereference(p->next, br)) { 1302 if (p->port != port) 1303 continue; 1304 1305 if (!hlist_unhashed(&p->mglist) && 1306 (timer_pending(&p->timer) ? 1307 time_after(p->timer.expires, time) : 1308 try_to_del_timer_sync(&p->timer) >= 0)) { 1309 mod_timer(&p->timer, time); 1310 1311 p->queries_sent = 0; 1312 mod_timer(&p->query_timer, now); 1313 } 1314 1315 break; 1316 } 1317 1318 out: 1319 spin_unlock(&br->multicast_lock); 1320 } 1321 1322 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1323 struct net_bridge_port *port, 1324 __be32 group) 1325 { 1326 struct br_ip br_group; 1327 1328 if (ipv4_is_local_multicast(group)) 1329 return; 1330 1331 br_group.u.ip4 = group; 1332 br_group.proto = htons(ETH_P_IP); 1333 1334 br_multicast_leave_group(br, port, &br_group); 1335 } 1336 1337 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1338 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1339 struct net_bridge_port *port, 1340 const struct in6_addr *group) 1341 { 1342 struct br_ip br_group; 1343 1344 if (ipv6_is_local_multicast(group)) 1345 return; 1346 1347 ipv6_addr_copy(&br_group.u.ip6, group); 1348 br_group.proto = htons(ETH_P_IPV6); 1349 1350 br_multicast_leave_group(br, port, &br_group); 1351 } 1352 #endif 1353 1354 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1355 struct net_bridge_port *port, 1356 struct sk_buff *skb) 1357 { 1358 struct sk_buff *skb2 = skb; 1359 struct iphdr *iph; 1360 struct igmphdr *ih; 1361 unsigned len; 1362 unsigned offset; 1363 int err; 1364 1365 /* We treat OOM as packet loss for now. */ 1366 if (!pskb_may_pull(skb, sizeof(*iph))) 1367 return -EINVAL; 1368 1369 iph = ip_hdr(skb); 1370 1371 if (iph->ihl < 5 || iph->version != 4) 1372 return -EINVAL; 1373 1374 if (!pskb_may_pull(skb, ip_hdrlen(skb))) 1375 return -EINVAL; 1376 1377 iph = ip_hdr(skb); 1378 1379 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1380 return -EINVAL; 1381 1382 if (iph->protocol != IPPROTO_IGMP) 1383 return 0; 1384 1385 len = ntohs(iph->tot_len); 1386 if (skb->len < len || len < ip_hdrlen(skb)) 1387 return -EINVAL; 1388 1389 if (skb->len > len) { 1390 skb2 = skb_clone(skb, GFP_ATOMIC); 1391 if (!skb2) 1392 return -ENOMEM; 1393 1394 err = pskb_trim_rcsum(skb2, len); 1395 if (err) 1396 goto err_out; 1397 } 1398 1399 len -= ip_hdrlen(skb2); 1400 offset = skb_network_offset(skb2) + ip_hdrlen(skb2); 1401 __skb_pull(skb2, offset); 1402 skb_reset_transport_header(skb2); 1403 1404 err = -EINVAL; 1405 if (!pskb_may_pull(skb2, sizeof(*ih))) 1406 goto out; 1407 1408 switch (skb2->ip_summed) { 1409 case CHECKSUM_COMPLETE: 1410 if (!csum_fold(skb2->csum)) 1411 break; 1412 /* fall through */ 1413 case CHECKSUM_NONE: 1414 skb2->csum = 0; 1415 if (skb_checksum_complete(skb2)) 1416 goto out; 1417 } 1418 1419 err = 0; 1420 1421 BR_INPUT_SKB_CB(skb)->igmp = 1; 1422 ih = igmp_hdr(skb2); 1423 1424 switch (ih->type) { 1425 case IGMP_HOST_MEMBERSHIP_REPORT: 1426 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1427 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1428 err = br_ip4_multicast_add_group(br, port, ih->group); 1429 break; 1430 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1431 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1432 break; 1433 case IGMP_HOST_MEMBERSHIP_QUERY: 1434 err = br_ip4_multicast_query(br, port, skb2); 1435 break; 1436 case IGMP_HOST_LEAVE_MESSAGE: 1437 br_ip4_multicast_leave_group(br, port, ih->group); 1438 break; 1439 } 1440 1441 out: 1442 __skb_push(skb2, offset); 1443 err_out: 1444 if (skb2 != skb) 1445 kfree_skb(skb2); 1446 return err; 1447 } 1448 1449 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1450 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1451 struct net_bridge_port *port, 1452 struct sk_buff *skb) 1453 { 1454 struct sk_buff *skb2; 1455 struct ipv6hdr *ip6h; 1456 struct icmp6hdr *icmp6h; 1457 u8 nexthdr; 1458 unsigned len; 1459 int offset; 1460 int err; 1461 1462 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1463 return -EINVAL; 1464 1465 ip6h = ipv6_hdr(skb); 1466 1467 /* 1468 * We're interested in MLD messages only. 1469 * - Version is 6 1470 * - MLD has always Router Alert hop-by-hop option 1471 * - But we do not support jumbrograms. 1472 */ 1473 if (ip6h->version != 6 || 1474 ip6h->nexthdr != IPPROTO_HOPOPTS || 1475 ip6h->payload_len == 0) 1476 return 0; 1477 1478 len = ntohs(ip6h->payload_len); 1479 if (skb->len < len) 1480 return -EINVAL; 1481 1482 nexthdr = ip6h->nexthdr; 1483 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr); 1484 1485 if (offset < 0 || nexthdr != IPPROTO_ICMPV6) 1486 return 0; 1487 1488 /* Okay, we found ICMPv6 header */ 1489 skb2 = skb_clone(skb, GFP_ATOMIC); 1490 if (!skb2) 1491 return -ENOMEM; 1492 1493 err = -EINVAL; 1494 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) 1495 goto out; 1496 1497 len -= offset - skb_network_offset(skb2); 1498 1499 __skb_pull(skb2, offset); 1500 skb_reset_transport_header(skb2); 1501 1502 icmp6h = icmp6_hdr(skb2); 1503 1504 switch (icmp6h->icmp6_type) { 1505 case ICMPV6_MGM_QUERY: 1506 case ICMPV6_MGM_REPORT: 1507 case ICMPV6_MGM_REDUCTION: 1508 case ICMPV6_MLD2_REPORT: 1509 break; 1510 default: 1511 err = 0; 1512 goto out; 1513 } 1514 1515 /* Okay, we found MLD message. Check further. */ 1516 if (skb2->len > len) { 1517 err = pskb_trim_rcsum(skb2, len); 1518 if (err) 1519 goto out; 1520 } 1521 1522 switch (skb2->ip_summed) { 1523 case CHECKSUM_COMPLETE: 1524 if (!csum_fold(skb2->csum)) 1525 break; 1526 /*FALLTHROUGH*/ 1527 case CHECKSUM_NONE: 1528 skb2->csum = 0; 1529 if (skb_checksum_complete(skb2)) 1530 goto out; 1531 } 1532 1533 err = 0; 1534 1535 BR_INPUT_SKB_CB(skb)->igmp = 1; 1536 1537 switch (icmp6h->icmp6_type) { 1538 case ICMPV6_MGM_REPORT: 1539 { 1540 struct mld_msg *mld; 1541 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1542 err = -EINVAL; 1543 goto out; 1544 } 1545 mld = (struct mld_msg *)skb_transport_header(skb2); 1546 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1548 break; 1549 } 1550 case ICMPV6_MLD2_REPORT: 1551 err = br_ip6_multicast_mld2_report(br, port, skb2); 1552 break; 1553 case ICMPV6_MGM_QUERY: 1554 err = br_ip6_multicast_query(br, port, skb2); 1555 break; 1556 case ICMPV6_MGM_REDUCTION: 1557 { 1558 struct mld_msg *mld; 1559 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1560 err = -EINVAL; 1561 goto out; 1562 } 1563 mld = (struct mld_msg *)skb_transport_header(skb2); 1564 br_ip6_multicast_leave_group(br, port, &mld->mld_mca); 1565 } 1566 } 1567 1568 out: 1569 kfree_skb(skb2); 1570 return err; 1571 } 1572 #endif 1573 1574 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1575 struct sk_buff *skb) 1576 { 1577 BR_INPUT_SKB_CB(skb)->igmp = 0; 1578 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1579 1580 if (br->multicast_disabled) 1581 return 0; 1582 1583 switch (skb->protocol) { 1584 case htons(ETH_P_IP): 1585 return br_multicast_ipv4_rcv(br, port, skb); 1586 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1587 case htons(ETH_P_IPV6): 1588 return br_multicast_ipv6_rcv(br, port, skb); 1589 #endif 1590 } 1591 1592 return 0; 1593 } 1594 1595 static void br_multicast_query_expired(unsigned long data) 1596 { 1597 struct net_bridge *br = (void *)data; 1598 1599 spin_lock(&br->multicast_lock); 1600 if (br->multicast_startup_queries_sent < 1601 br->multicast_startup_query_count) 1602 br->multicast_startup_queries_sent++; 1603 1604 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1605 1606 spin_unlock(&br->multicast_lock); 1607 } 1608 1609 void br_multicast_init(struct net_bridge *br) 1610 { 1611 br->hash_elasticity = 4; 1612 br->hash_max = 512; 1613 1614 br->multicast_router = 1; 1615 br->multicast_last_member_count = 2; 1616 br->multicast_startup_query_count = 2; 1617 1618 br->multicast_last_member_interval = HZ; 1619 br->multicast_query_response_interval = 10 * HZ; 1620 br->multicast_startup_query_interval = 125 * HZ / 4; 1621 br->multicast_query_interval = 125 * HZ; 1622 br->multicast_querier_interval = 255 * HZ; 1623 br->multicast_membership_interval = 260 * HZ; 1624 1625 spin_lock_init(&br->multicast_lock); 1626 setup_timer(&br->multicast_router_timer, 1627 br_multicast_local_router_expired, 0); 1628 setup_timer(&br->multicast_querier_timer, 1629 br_multicast_local_router_expired, 0); 1630 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1631 (unsigned long)br); 1632 } 1633 1634 void br_multicast_open(struct net_bridge *br) 1635 { 1636 br->multicast_startup_queries_sent = 0; 1637 1638 if (br->multicast_disabled) 1639 return; 1640 1641 mod_timer(&br->multicast_query_timer, jiffies); 1642 } 1643 1644 void br_multicast_stop(struct net_bridge *br) 1645 { 1646 struct net_bridge_mdb_htable *mdb; 1647 struct net_bridge_mdb_entry *mp; 1648 struct hlist_node *p, *n; 1649 u32 ver; 1650 int i; 1651 1652 del_timer_sync(&br->multicast_router_timer); 1653 del_timer_sync(&br->multicast_querier_timer); 1654 del_timer_sync(&br->multicast_query_timer); 1655 1656 spin_lock_bh(&br->multicast_lock); 1657 mdb = mlock_dereference(br->mdb, br); 1658 if (!mdb) 1659 goto out; 1660 1661 br->mdb = NULL; 1662 1663 ver = mdb->ver; 1664 for (i = 0; i < mdb->max; i++) { 1665 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1666 hlist[ver]) { 1667 del_timer(&mp->timer); 1668 del_timer(&mp->query_timer); 1669 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1670 } 1671 } 1672 1673 if (mdb->old) { 1674 spin_unlock_bh(&br->multicast_lock); 1675 rcu_barrier_bh(); 1676 spin_lock_bh(&br->multicast_lock); 1677 WARN_ON(mdb->old); 1678 } 1679 1680 mdb->old = mdb; 1681 call_rcu_bh(&mdb->rcu, br_mdb_free); 1682 1683 out: 1684 spin_unlock_bh(&br->multicast_lock); 1685 } 1686 1687 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1688 { 1689 int err = -ENOENT; 1690 1691 spin_lock_bh(&br->multicast_lock); 1692 if (!netif_running(br->dev)) 1693 goto unlock; 1694 1695 switch (val) { 1696 case 0: 1697 case 2: 1698 del_timer(&br->multicast_router_timer); 1699 /* fall through */ 1700 case 1: 1701 br->multicast_router = val; 1702 err = 0; 1703 break; 1704 1705 default: 1706 err = -EINVAL; 1707 break; 1708 } 1709 1710 unlock: 1711 spin_unlock_bh(&br->multicast_lock); 1712 1713 return err; 1714 } 1715 1716 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1717 { 1718 struct net_bridge *br = p->br; 1719 int err = -ENOENT; 1720 1721 spin_lock(&br->multicast_lock); 1722 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) 1723 goto unlock; 1724 1725 switch (val) { 1726 case 0: 1727 case 1: 1728 case 2: 1729 p->multicast_router = val; 1730 err = 0; 1731 1732 if (val < 2 && !hlist_unhashed(&p->rlist)) 1733 hlist_del_init_rcu(&p->rlist); 1734 1735 if (val == 1) 1736 break; 1737 1738 del_timer(&p->multicast_router_timer); 1739 1740 if (val == 0) 1741 break; 1742 1743 br_multicast_add_router(br, p); 1744 break; 1745 1746 default: 1747 err = -EINVAL; 1748 break; 1749 } 1750 1751 unlock: 1752 spin_unlock(&br->multicast_lock); 1753 1754 return err; 1755 } 1756 1757 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1758 { 1759 struct net_bridge_port *port; 1760 int err = 0; 1761 struct net_bridge_mdb_htable *mdb; 1762 1763 spin_lock(&br->multicast_lock); 1764 if (br->multicast_disabled == !val) 1765 goto unlock; 1766 1767 br->multicast_disabled = !val; 1768 if (br->multicast_disabled) 1769 goto unlock; 1770 1771 if (!netif_running(br->dev)) 1772 goto unlock; 1773 1774 mdb = mlock_dereference(br->mdb, br); 1775 if (mdb) { 1776 if (mdb->old) { 1777 err = -EEXIST; 1778 rollback: 1779 br->multicast_disabled = !!val; 1780 goto unlock; 1781 } 1782 1783 err = br_mdb_rehash(&br->mdb, mdb->max, 1784 br->hash_elasticity); 1785 if (err) 1786 goto rollback; 1787 } 1788 1789 br_multicast_open(br); 1790 list_for_each_entry(port, &br->port_list, list) { 1791 if (port->state == BR_STATE_DISABLED || 1792 port->state == BR_STATE_BLOCKING) 1793 continue; 1794 1795 __br_multicast_enable_port(port); 1796 } 1797 1798 unlock: 1799 spin_unlock(&br->multicast_lock); 1800 1801 return err; 1802 } 1803 1804 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1805 { 1806 int err = -ENOENT; 1807 u32 old; 1808 struct net_bridge_mdb_htable *mdb; 1809 1810 spin_lock(&br->multicast_lock); 1811 if (!netif_running(br->dev)) 1812 goto unlock; 1813 1814 err = -EINVAL; 1815 if (!is_power_of_2(val)) 1816 goto unlock; 1817 1818 mdb = mlock_dereference(br->mdb, br); 1819 if (mdb && val < mdb->size) 1820 goto unlock; 1821 1822 err = 0; 1823 1824 old = br->hash_max; 1825 br->hash_max = val; 1826 1827 if (mdb) { 1828 if (mdb->old) { 1829 err = -EEXIST; 1830 rollback: 1831 br->hash_max = old; 1832 goto unlock; 1833 } 1834 1835 err = br_mdb_rehash(&br->mdb, br->hash_max, 1836 br->hash_elasticity); 1837 if (err) 1838 goto rollback; 1839 } 1840 1841 unlock: 1842 spin_unlock(&br->multicast_lock); 1843 1844 return err; 1845 } 1846