1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <linux/mroute.h> 29 #include <net/ip.h> 30 #if IS_ENABLED(CONFIG_IPV6) 31 #include <net/ipv6.h> 32 #include <net/mld.h> 33 #include <net/ip6_checksum.h> 34 #include <net/addrconf.h> 35 #endif 36 37 #include "br_private.h" 38 39 static void br_multicast_start_querier(struct net_bridge *br, 40 struct bridge_mcast_own_query *query); 41 static void br_multicast_add_router(struct net_bridge *br, 42 struct net_bridge_port *port); 43 static void br_ip4_multicast_leave_group(struct net_bridge *br, 44 struct net_bridge_port *port, 45 __be32 group, 46 __u16 vid); 47 #if IS_ENABLED(CONFIG_IPV6) 48 static void br_ip6_multicast_leave_group(struct net_bridge *br, 49 struct net_bridge_port *port, 50 const struct in6_addr *group, 51 __u16 vid); 52 #endif 53 unsigned int br_mdb_rehash_seq; 54 55 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 56 { 57 if (a->proto != b->proto) 58 return 0; 59 if (a->vid != b->vid) 60 return 0; 61 switch (a->proto) { 62 case htons(ETH_P_IP): 63 return a->u.ip4 == b->u.ip4; 64 #if IS_ENABLED(CONFIG_IPV6) 65 case htons(ETH_P_IPV6): 66 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 67 #endif 68 } 69 return 0; 70 } 71 72 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 73 __u16 vid) 74 { 75 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 76 } 77 78 #if IS_ENABLED(CONFIG_IPV6) 79 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 80 const struct in6_addr *ip, 81 __u16 vid) 82 { 83 return jhash_2words(ipv6_addr_hash(ip), vid, 84 mdb->secret) & (mdb->max - 1); 85 } 86 #endif 87 88 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 89 struct br_ip *ip) 90 { 91 switch (ip->proto) { 92 case htons(ETH_P_IP): 93 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 94 #if IS_ENABLED(CONFIG_IPV6) 95 case htons(ETH_P_IPV6): 96 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 97 #endif 98 } 99 return 0; 100 } 101 102 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 103 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 104 { 105 struct net_bridge_mdb_entry *mp; 106 107 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 108 if (br_ip_equal(&mp->addr, dst)) 109 return mp; 110 } 111 112 return NULL; 113 } 114 115 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 116 struct br_ip *dst) 117 { 118 if (!mdb) 119 return NULL; 120 121 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 122 } 123 124 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 125 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 126 { 127 struct br_ip br_dst; 128 129 br_dst.u.ip4 = dst; 130 br_dst.proto = htons(ETH_P_IP); 131 br_dst.vid = vid; 132 133 return br_mdb_ip_get(mdb, &br_dst); 134 } 135 136 #if IS_ENABLED(CONFIG_IPV6) 137 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 138 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 139 __u16 vid) 140 { 141 struct br_ip br_dst; 142 143 br_dst.u.ip6 = *dst; 144 br_dst.proto = htons(ETH_P_IPV6); 145 br_dst.vid = vid; 146 147 return br_mdb_ip_get(mdb, &br_dst); 148 } 149 #endif 150 151 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 152 struct sk_buff *skb, u16 vid) 153 { 154 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 155 struct br_ip ip; 156 157 if (br->multicast_disabled) 158 return NULL; 159 160 if (BR_INPUT_SKB_CB(skb)->igmp) 161 return NULL; 162 163 ip.proto = skb->protocol; 164 ip.vid = vid; 165 166 switch (skb->protocol) { 167 case htons(ETH_P_IP): 168 ip.u.ip4 = ip_hdr(skb)->daddr; 169 break; 170 #if IS_ENABLED(CONFIG_IPV6) 171 case htons(ETH_P_IPV6): 172 ip.u.ip6 = ipv6_hdr(skb)->daddr; 173 break; 174 #endif 175 default: 176 return NULL; 177 } 178 179 return br_mdb_ip_get(mdb, &ip); 180 } 181 182 static void br_mdb_free(struct rcu_head *head) 183 { 184 struct net_bridge_mdb_htable *mdb = 185 container_of(head, struct net_bridge_mdb_htable, rcu); 186 struct net_bridge_mdb_htable *old = mdb->old; 187 188 mdb->old = NULL; 189 kfree(old->mhash); 190 kfree(old); 191 } 192 193 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 194 struct net_bridge_mdb_htable *old, 195 int elasticity) 196 { 197 struct net_bridge_mdb_entry *mp; 198 int maxlen; 199 int len; 200 int i; 201 202 for (i = 0; i < old->max; i++) 203 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 204 hlist_add_head(&mp->hlist[new->ver], 205 &new->mhash[br_ip_hash(new, &mp->addr)]); 206 207 if (!elasticity) 208 return 0; 209 210 maxlen = 0; 211 for (i = 0; i < new->max; i++) { 212 len = 0; 213 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 214 len++; 215 if (len > maxlen) 216 maxlen = len; 217 } 218 219 return maxlen > elasticity ? -EINVAL : 0; 220 } 221 222 void br_multicast_free_pg(struct rcu_head *head) 223 { 224 struct net_bridge_port_group *p = 225 container_of(head, struct net_bridge_port_group, rcu); 226 227 kfree(p); 228 } 229 230 static void br_multicast_free_group(struct rcu_head *head) 231 { 232 struct net_bridge_mdb_entry *mp = 233 container_of(head, struct net_bridge_mdb_entry, rcu); 234 235 kfree(mp); 236 } 237 238 static void br_multicast_group_expired(unsigned long data) 239 { 240 struct net_bridge_mdb_entry *mp = (void *)data; 241 struct net_bridge *br = mp->br; 242 struct net_bridge_mdb_htable *mdb; 243 244 spin_lock(&br->multicast_lock); 245 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 246 goto out; 247 248 mp->mglist = false; 249 250 if (mp->ports) 251 goto out; 252 253 mdb = mlock_dereference(br->mdb, br); 254 255 hlist_del_rcu(&mp->hlist[mdb->ver]); 256 mdb->size--; 257 258 call_rcu_bh(&mp->rcu, br_multicast_free_group); 259 260 out: 261 spin_unlock(&br->multicast_lock); 262 } 263 264 static void br_multicast_del_pg(struct net_bridge *br, 265 struct net_bridge_port_group *pg) 266 { 267 struct net_bridge_mdb_htable *mdb; 268 struct net_bridge_mdb_entry *mp; 269 struct net_bridge_port_group *p; 270 struct net_bridge_port_group __rcu **pp; 271 272 mdb = mlock_dereference(br->mdb, br); 273 274 mp = br_mdb_ip_get(mdb, &pg->addr); 275 if (WARN_ON(!mp)) 276 return; 277 278 for (pp = &mp->ports; 279 (p = mlock_dereference(*pp, br)) != NULL; 280 pp = &p->next) { 281 if (p != pg) 282 continue; 283 284 rcu_assign_pointer(*pp, p->next); 285 hlist_del_init(&p->mglist); 286 del_timer(&p->timer); 287 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 288 p->flags); 289 call_rcu_bh(&p->rcu, br_multicast_free_pg); 290 291 if (!mp->ports && !mp->mglist && 292 netif_running(br->dev)) 293 mod_timer(&mp->timer, jiffies); 294 295 return; 296 } 297 298 WARN_ON(1); 299 } 300 301 static void br_multicast_port_group_expired(unsigned long data) 302 { 303 struct net_bridge_port_group *pg = (void *)data; 304 struct net_bridge *br = pg->port->br; 305 306 spin_lock(&br->multicast_lock); 307 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 308 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 309 goto out; 310 311 br_multicast_del_pg(br, pg); 312 313 out: 314 spin_unlock(&br->multicast_lock); 315 } 316 317 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 318 int elasticity) 319 { 320 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 321 struct net_bridge_mdb_htable *mdb; 322 int err; 323 324 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 325 if (!mdb) 326 return -ENOMEM; 327 328 mdb->max = max; 329 mdb->old = old; 330 331 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 332 if (!mdb->mhash) { 333 kfree(mdb); 334 return -ENOMEM; 335 } 336 337 mdb->size = old ? old->size : 0; 338 mdb->ver = old ? old->ver ^ 1 : 0; 339 340 if (!old || elasticity) 341 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 342 else 343 mdb->secret = old->secret; 344 345 if (!old) 346 goto out; 347 348 err = br_mdb_copy(mdb, old, elasticity); 349 if (err) { 350 kfree(mdb->mhash); 351 kfree(mdb); 352 return err; 353 } 354 355 br_mdb_rehash_seq++; 356 call_rcu_bh(&mdb->rcu, br_mdb_free); 357 358 out: 359 rcu_assign_pointer(*mdbp, mdb); 360 361 return 0; 362 } 363 364 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 365 __be32 group, 366 u8 *igmp_type) 367 { 368 struct igmpv3_query *ihv3; 369 size_t igmp_hdr_size; 370 struct sk_buff *skb; 371 struct igmphdr *ih; 372 struct ethhdr *eth; 373 struct iphdr *iph; 374 375 igmp_hdr_size = sizeof(*ih); 376 if (br->multicast_igmp_version == 3) 377 igmp_hdr_size = sizeof(*ihv3); 378 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 379 igmp_hdr_size + 4); 380 if (!skb) 381 goto out; 382 383 skb->protocol = htons(ETH_P_IP); 384 385 skb_reset_mac_header(skb); 386 eth = eth_hdr(skb); 387 388 ether_addr_copy(eth->h_source, br->dev->dev_addr); 389 eth->h_dest[0] = 1; 390 eth->h_dest[1] = 0; 391 eth->h_dest[2] = 0x5e; 392 eth->h_dest[3] = 0; 393 eth->h_dest[4] = 0; 394 eth->h_dest[5] = 1; 395 eth->h_proto = htons(ETH_P_IP); 396 skb_put(skb, sizeof(*eth)); 397 398 skb_set_network_header(skb, skb->len); 399 iph = ip_hdr(skb); 400 401 iph->version = 4; 402 iph->ihl = 6; 403 iph->tos = 0xc0; 404 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4); 405 iph->id = 0; 406 iph->frag_off = htons(IP_DF); 407 iph->ttl = 1; 408 iph->protocol = IPPROTO_IGMP; 409 iph->saddr = br->multicast_query_use_ifaddr ? 410 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 411 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 412 ((u8 *)&iph[1])[0] = IPOPT_RA; 413 ((u8 *)&iph[1])[1] = 4; 414 ((u8 *)&iph[1])[2] = 0; 415 ((u8 *)&iph[1])[3] = 0; 416 ip_send_check(iph); 417 skb_put(skb, 24); 418 419 skb_set_transport_header(skb, skb->len); 420 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 421 422 switch (br->multicast_igmp_version) { 423 case 2: 424 ih = igmp_hdr(skb); 425 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 426 ih->code = (group ? br->multicast_last_member_interval : 427 br->multicast_query_response_interval) / 428 (HZ / IGMP_TIMER_SCALE); 429 ih->group = group; 430 ih->csum = 0; 431 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih)); 432 break; 433 case 3: 434 ihv3 = igmpv3_query_hdr(skb); 435 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 436 ihv3->code = (group ? br->multicast_last_member_interval : 437 br->multicast_query_response_interval) / 438 (HZ / IGMP_TIMER_SCALE); 439 ihv3->group = group; 440 ihv3->qqic = br->multicast_query_interval / HZ; 441 ihv3->nsrcs = 0; 442 ihv3->resv = 0; 443 ihv3->suppress = 0; 444 ihv3->qrv = 2; 445 ihv3->csum = 0; 446 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3)); 447 break; 448 } 449 450 skb_put(skb, igmp_hdr_size); 451 __skb_pull(skb, sizeof(*eth)); 452 453 out: 454 return skb; 455 } 456 457 #if IS_ENABLED(CONFIG_IPV6) 458 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 459 const struct in6_addr *grp, 460 u8 *igmp_type) 461 { 462 struct mld2_query *mld2q; 463 unsigned long interval; 464 struct ipv6hdr *ip6h; 465 struct mld_msg *mldq; 466 size_t mld_hdr_size; 467 struct sk_buff *skb; 468 struct ethhdr *eth; 469 u8 *hopopt; 470 471 mld_hdr_size = sizeof(*mldq); 472 if (br->multicast_mld_version == 2) 473 mld_hdr_size = sizeof(*mld2q); 474 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 475 8 + mld_hdr_size); 476 if (!skb) 477 goto out; 478 479 skb->protocol = htons(ETH_P_IPV6); 480 481 /* Ethernet header */ 482 skb_reset_mac_header(skb); 483 eth = eth_hdr(skb); 484 485 ether_addr_copy(eth->h_source, br->dev->dev_addr); 486 eth->h_proto = htons(ETH_P_IPV6); 487 skb_put(skb, sizeof(*eth)); 488 489 /* IPv6 header + HbH option */ 490 skb_set_network_header(skb, skb->len); 491 ip6h = ipv6_hdr(skb); 492 493 *(__force __be32 *)ip6h = htonl(0x60000000); 494 ip6h->payload_len = htons(8 + mld_hdr_size); 495 ip6h->nexthdr = IPPROTO_HOPOPTS; 496 ip6h->hop_limit = 1; 497 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 498 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 499 &ip6h->saddr)) { 500 kfree_skb(skb); 501 br->has_ipv6_addr = 0; 502 return NULL; 503 } 504 505 br->has_ipv6_addr = 1; 506 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 507 508 hopopt = (u8 *)(ip6h + 1); 509 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 510 hopopt[1] = 0; /* length of HbH */ 511 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 512 hopopt[3] = 2; /* Length of RA Option */ 513 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 514 hopopt[5] = 0; 515 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 516 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 517 518 skb_put(skb, sizeof(*ip6h) + 8); 519 520 /* ICMPv6 */ 521 skb_set_transport_header(skb, skb->len); 522 interval = ipv6_addr_any(grp) ? 523 br->multicast_query_response_interval : 524 br->multicast_last_member_interval; 525 *igmp_type = ICMPV6_MGM_QUERY; 526 switch (br->multicast_mld_version) { 527 case 1: 528 mldq = (struct mld_msg *)icmp6_hdr(skb); 529 mldq->mld_type = ICMPV6_MGM_QUERY; 530 mldq->mld_code = 0; 531 mldq->mld_cksum = 0; 532 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 533 mldq->mld_reserved = 0; 534 mldq->mld_mca = *grp; 535 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 536 sizeof(*mldq), IPPROTO_ICMPV6, 537 csum_partial(mldq, 538 sizeof(*mldq), 539 0)); 540 break; 541 case 2: 542 mld2q = (struct mld2_query *)icmp6_hdr(skb); 543 mld2q->mld2q_mrc = ntohs((u16)jiffies_to_msecs(interval)); 544 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 545 mld2q->mld2q_code = 0; 546 mld2q->mld2q_cksum = 0; 547 mld2q->mld2q_resv1 = 0; 548 mld2q->mld2q_resv2 = 0; 549 mld2q->mld2q_suppress = 0; 550 mld2q->mld2q_qrv = 2; 551 mld2q->mld2q_nsrcs = 0; 552 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 553 mld2q->mld2q_mca = *grp; 554 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 555 sizeof(*mld2q), 556 IPPROTO_ICMPV6, 557 csum_partial(mld2q, 558 sizeof(*mld2q), 559 0)); 560 break; 561 } 562 skb_put(skb, mld_hdr_size); 563 564 __skb_pull(skb, sizeof(*eth)); 565 566 out: 567 return skb; 568 } 569 #endif 570 571 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 572 struct br_ip *addr, 573 u8 *igmp_type) 574 { 575 switch (addr->proto) { 576 case htons(ETH_P_IP): 577 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type); 578 #if IS_ENABLED(CONFIG_IPV6) 579 case htons(ETH_P_IPV6): 580 return br_ip6_multicast_alloc_query(br, &addr->u.ip6, 581 igmp_type); 582 #endif 583 } 584 return NULL; 585 } 586 587 static struct net_bridge_mdb_entry *br_multicast_get_group( 588 struct net_bridge *br, struct net_bridge_port *port, 589 struct br_ip *group, int hash) 590 { 591 struct net_bridge_mdb_htable *mdb; 592 struct net_bridge_mdb_entry *mp; 593 unsigned int count = 0; 594 unsigned int max; 595 int elasticity; 596 int err; 597 598 mdb = rcu_dereference_protected(br->mdb, 1); 599 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 600 count++; 601 if (unlikely(br_ip_equal(group, &mp->addr))) 602 return mp; 603 } 604 605 elasticity = 0; 606 max = mdb->max; 607 608 if (unlikely(count > br->hash_elasticity && count)) { 609 if (net_ratelimit()) 610 br_info(br, "Multicast hash table " 611 "chain limit reached: %s\n", 612 port ? port->dev->name : br->dev->name); 613 614 elasticity = br->hash_elasticity; 615 } 616 617 if (mdb->size >= max) { 618 max *= 2; 619 if (unlikely(max > br->hash_max)) { 620 br_warn(br, "Multicast hash table maximum of %d " 621 "reached, disabling snooping: %s\n", 622 br->hash_max, 623 port ? port->dev->name : br->dev->name); 624 err = -E2BIG; 625 disable: 626 br->multicast_disabled = 1; 627 goto err; 628 } 629 } 630 631 if (max > mdb->max || elasticity) { 632 if (mdb->old) { 633 if (net_ratelimit()) 634 br_info(br, "Multicast hash table " 635 "on fire: %s\n", 636 port ? port->dev->name : br->dev->name); 637 err = -EEXIST; 638 goto err; 639 } 640 641 err = br_mdb_rehash(&br->mdb, max, elasticity); 642 if (err) { 643 br_warn(br, "Cannot rehash multicast " 644 "hash table, disabling snooping: %s, %d, %d\n", 645 port ? port->dev->name : br->dev->name, 646 mdb->size, err); 647 goto disable; 648 } 649 650 err = -EAGAIN; 651 goto err; 652 } 653 654 return NULL; 655 656 err: 657 mp = ERR_PTR(err); 658 return mp; 659 } 660 661 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 662 struct net_bridge_port *p, 663 struct br_ip *group) 664 { 665 struct net_bridge_mdb_htable *mdb; 666 struct net_bridge_mdb_entry *mp; 667 int hash; 668 int err; 669 670 mdb = rcu_dereference_protected(br->mdb, 1); 671 if (!mdb) { 672 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 673 if (err) 674 return ERR_PTR(err); 675 goto rehash; 676 } 677 678 hash = br_ip_hash(mdb, group); 679 mp = br_multicast_get_group(br, p, group, hash); 680 switch (PTR_ERR(mp)) { 681 case 0: 682 break; 683 684 case -EAGAIN: 685 rehash: 686 mdb = rcu_dereference_protected(br->mdb, 1); 687 hash = br_ip_hash(mdb, group); 688 break; 689 690 default: 691 goto out; 692 } 693 694 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 695 if (unlikely(!mp)) 696 return ERR_PTR(-ENOMEM); 697 698 mp->br = br; 699 mp->addr = *group; 700 setup_timer(&mp->timer, br_multicast_group_expired, 701 (unsigned long)mp); 702 703 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 704 mdb->size++; 705 706 out: 707 return mp; 708 } 709 710 struct net_bridge_port_group *br_multicast_new_port_group( 711 struct net_bridge_port *port, 712 struct br_ip *group, 713 struct net_bridge_port_group __rcu *next, 714 unsigned char flags) 715 { 716 struct net_bridge_port_group *p; 717 718 p = kzalloc(sizeof(*p), GFP_ATOMIC); 719 if (unlikely(!p)) 720 return NULL; 721 722 p->addr = *group; 723 p->port = port; 724 p->flags = flags; 725 rcu_assign_pointer(p->next, next); 726 hlist_add_head(&p->mglist, &port->mglist); 727 setup_timer(&p->timer, br_multicast_port_group_expired, 728 (unsigned long)p); 729 return p; 730 } 731 732 static int br_multicast_add_group(struct net_bridge *br, 733 struct net_bridge_port *port, 734 struct br_ip *group) 735 { 736 struct net_bridge_port_group __rcu **pp; 737 struct net_bridge_port_group *p; 738 struct net_bridge_mdb_entry *mp; 739 unsigned long now = jiffies; 740 int err; 741 742 spin_lock(&br->multicast_lock); 743 if (!netif_running(br->dev) || 744 (port && port->state == BR_STATE_DISABLED)) 745 goto out; 746 747 mp = br_multicast_new_group(br, port, group); 748 err = PTR_ERR(mp); 749 if (IS_ERR(mp)) 750 goto err; 751 752 if (!port) { 753 mp->mglist = true; 754 mod_timer(&mp->timer, now + br->multicast_membership_interval); 755 goto out; 756 } 757 758 for (pp = &mp->ports; 759 (p = mlock_dereference(*pp, br)) != NULL; 760 pp = &p->next) { 761 if (p->port == port) 762 goto found; 763 if ((unsigned long)p->port < (unsigned long)port) 764 break; 765 } 766 767 p = br_multicast_new_port_group(port, group, *pp, 0); 768 if (unlikely(!p)) 769 goto err; 770 rcu_assign_pointer(*pp, p); 771 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 772 773 found: 774 mod_timer(&p->timer, now + br->multicast_membership_interval); 775 out: 776 err = 0; 777 778 err: 779 spin_unlock(&br->multicast_lock); 780 return err; 781 } 782 783 static int br_ip4_multicast_add_group(struct net_bridge *br, 784 struct net_bridge_port *port, 785 __be32 group, 786 __u16 vid) 787 { 788 struct br_ip br_group; 789 790 if (ipv4_is_local_multicast(group)) 791 return 0; 792 793 br_group.u.ip4 = group; 794 br_group.proto = htons(ETH_P_IP); 795 br_group.vid = vid; 796 797 return br_multicast_add_group(br, port, &br_group); 798 } 799 800 #if IS_ENABLED(CONFIG_IPV6) 801 static int br_ip6_multicast_add_group(struct net_bridge *br, 802 struct net_bridge_port *port, 803 const struct in6_addr *group, 804 __u16 vid) 805 { 806 struct br_ip br_group; 807 808 if (ipv6_addr_is_ll_all_nodes(group)) 809 return 0; 810 811 br_group.u.ip6 = *group; 812 br_group.proto = htons(ETH_P_IPV6); 813 br_group.vid = vid; 814 815 return br_multicast_add_group(br, port, &br_group); 816 } 817 #endif 818 819 static void br_multicast_router_expired(unsigned long data) 820 { 821 struct net_bridge_port *port = (void *)data; 822 struct net_bridge *br = port->br; 823 824 spin_lock(&br->multicast_lock); 825 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 826 port->multicast_router == MDB_RTR_TYPE_PERM || 827 timer_pending(&port->multicast_router_timer) || 828 hlist_unhashed(&port->rlist)) 829 goto out; 830 831 hlist_del_init_rcu(&port->rlist); 832 br_rtr_notify(br->dev, port, RTM_DELMDB); 833 /* Don't allow timer refresh if the router expired */ 834 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 835 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 836 837 out: 838 spin_unlock(&br->multicast_lock); 839 } 840 841 static void br_multicast_local_router_expired(unsigned long data) 842 { 843 } 844 845 static void br_multicast_querier_expired(struct net_bridge *br, 846 struct bridge_mcast_own_query *query) 847 { 848 spin_lock(&br->multicast_lock); 849 if (!netif_running(br->dev) || br->multicast_disabled) 850 goto out; 851 852 br_multicast_start_querier(br, query); 853 854 out: 855 spin_unlock(&br->multicast_lock); 856 } 857 858 static void br_ip4_multicast_querier_expired(unsigned long data) 859 { 860 struct net_bridge *br = (void *)data; 861 862 br_multicast_querier_expired(br, &br->ip4_own_query); 863 } 864 865 #if IS_ENABLED(CONFIG_IPV6) 866 static void br_ip6_multicast_querier_expired(unsigned long data) 867 { 868 struct net_bridge *br = (void *)data; 869 870 br_multicast_querier_expired(br, &br->ip6_own_query); 871 } 872 #endif 873 874 static void br_multicast_select_own_querier(struct net_bridge *br, 875 struct br_ip *ip, 876 struct sk_buff *skb) 877 { 878 if (ip->proto == htons(ETH_P_IP)) 879 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 880 #if IS_ENABLED(CONFIG_IPV6) 881 else 882 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 883 #endif 884 } 885 886 static void __br_multicast_send_query(struct net_bridge *br, 887 struct net_bridge_port *port, 888 struct br_ip *ip) 889 { 890 struct sk_buff *skb; 891 u8 igmp_type; 892 893 skb = br_multicast_alloc_query(br, ip, &igmp_type); 894 if (!skb) 895 return; 896 897 if (port) { 898 skb->dev = port->dev; 899 br_multicast_count(br, port, skb, igmp_type, 900 BR_MCAST_DIR_TX); 901 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 902 dev_net(port->dev), NULL, skb, NULL, skb->dev, 903 br_dev_queue_push_xmit); 904 } else { 905 br_multicast_select_own_querier(br, ip, skb); 906 br_multicast_count(br, port, skb, igmp_type, 907 BR_MCAST_DIR_RX); 908 netif_rx(skb); 909 } 910 } 911 912 static void br_multicast_send_query(struct net_bridge *br, 913 struct net_bridge_port *port, 914 struct bridge_mcast_own_query *own_query) 915 { 916 struct bridge_mcast_other_query *other_query = NULL; 917 struct br_ip br_group; 918 unsigned long time; 919 920 if (!netif_running(br->dev) || br->multicast_disabled || 921 !br->multicast_querier) 922 return; 923 924 memset(&br_group.u, 0, sizeof(br_group.u)); 925 926 if (port ? (own_query == &port->ip4_own_query) : 927 (own_query == &br->ip4_own_query)) { 928 other_query = &br->ip4_other_query; 929 br_group.proto = htons(ETH_P_IP); 930 #if IS_ENABLED(CONFIG_IPV6) 931 } else { 932 other_query = &br->ip6_other_query; 933 br_group.proto = htons(ETH_P_IPV6); 934 #endif 935 } 936 937 if (!other_query || timer_pending(&other_query->timer)) 938 return; 939 940 __br_multicast_send_query(br, port, &br_group); 941 942 time = jiffies; 943 time += own_query->startup_sent < br->multicast_startup_query_count ? 944 br->multicast_startup_query_interval : 945 br->multicast_query_interval; 946 mod_timer(&own_query->timer, time); 947 } 948 949 static void 950 br_multicast_port_query_expired(struct net_bridge_port *port, 951 struct bridge_mcast_own_query *query) 952 { 953 struct net_bridge *br = port->br; 954 955 spin_lock(&br->multicast_lock); 956 if (port->state == BR_STATE_DISABLED || 957 port->state == BR_STATE_BLOCKING) 958 goto out; 959 960 if (query->startup_sent < br->multicast_startup_query_count) 961 query->startup_sent++; 962 963 br_multicast_send_query(port->br, port, query); 964 965 out: 966 spin_unlock(&br->multicast_lock); 967 } 968 969 static void br_ip4_multicast_port_query_expired(unsigned long data) 970 { 971 struct net_bridge_port *port = (void *)data; 972 973 br_multicast_port_query_expired(port, &port->ip4_own_query); 974 } 975 976 #if IS_ENABLED(CONFIG_IPV6) 977 static void br_ip6_multicast_port_query_expired(unsigned long data) 978 { 979 struct net_bridge_port *port = (void *)data; 980 981 br_multicast_port_query_expired(port, &port->ip6_own_query); 982 } 983 #endif 984 985 int br_multicast_add_port(struct net_bridge_port *port) 986 { 987 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 988 989 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 990 (unsigned long)port); 991 setup_timer(&port->ip4_own_query.timer, 992 br_ip4_multicast_port_query_expired, (unsigned long)port); 993 #if IS_ENABLED(CONFIG_IPV6) 994 setup_timer(&port->ip6_own_query.timer, 995 br_ip6_multicast_port_query_expired, (unsigned long)port); 996 #endif 997 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 998 if (!port->mcast_stats) 999 return -ENOMEM; 1000 1001 return 0; 1002 } 1003 1004 void br_multicast_del_port(struct net_bridge_port *port) 1005 { 1006 struct net_bridge *br = port->br; 1007 struct net_bridge_port_group *pg; 1008 struct hlist_node *n; 1009 1010 /* Take care of the remaining groups, only perm ones should be left */ 1011 spin_lock_bh(&br->multicast_lock); 1012 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1013 br_multicast_del_pg(br, pg); 1014 spin_unlock_bh(&br->multicast_lock); 1015 del_timer_sync(&port->multicast_router_timer); 1016 free_percpu(port->mcast_stats); 1017 } 1018 1019 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1020 { 1021 query->startup_sent = 0; 1022 1023 if (try_to_del_timer_sync(&query->timer) >= 0 || 1024 del_timer(&query->timer)) 1025 mod_timer(&query->timer, jiffies); 1026 } 1027 1028 static void __br_multicast_enable_port(struct net_bridge_port *port) 1029 { 1030 struct net_bridge *br = port->br; 1031 1032 if (br->multicast_disabled || !netif_running(br->dev)) 1033 return; 1034 1035 br_multicast_enable(&port->ip4_own_query); 1036 #if IS_ENABLED(CONFIG_IPV6) 1037 br_multicast_enable(&port->ip6_own_query); 1038 #endif 1039 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1040 hlist_unhashed(&port->rlist)) 1041 br_multicast_add_router(br, port); 1042 } 1043 1044 void br_multicast_enable_port(struct net_bridge_port *port) 1045 { 1046 struct net_bridge *br = port->br; 1047 1048 spin_lock(&br->multicast_lock); 1049 __br_multicast_enable_port(port); 1050 spin_unlock(&br->multicast_lock); 1051 } 1052 1053 void br_multicast_disable_port(struct net_bridge_port *port) 1054 { 1055 struct net_bridge *br = port->br; 1056 struct net_bridge_port_group *pg; 1057 struct hlist_node *n; 1058 1059 spin_lock(&br->multicast_lock); 1060 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1061 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1062 br_multicast_del_pg(br, pg); 1063 1064 if (!hlist_unhashed(&port->rlist)) { 1065 hlist_del_init_rcu(&port->rlist); 1066 br_rtr_notify(br->dev, port, RTM_DELMDB); 1067 /* Don't allow timer refresh if disabling */ 1068 if (port->multicast_router == MDB_RTR_TYPE_TEMP) 1069 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1070 } 1071 del_timer(&port->multicast_router_timer); 1072 del_timer(&port->ip4_own_query.timer); 1073 #if IS_ENABLED(CONFIG_IPV6) 1074 del_timer(&port->ip6_own_query.timer); 1075 #endif 1076 spin_unlock(&br->multicast_lock); 1077 } 1078 1079 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1080 struct net_bridge_port *port, 1081 struct sk_buff *skb, 1082 u16 vid) 1083 { 1084 struct igmpv3_report *ih; 1085 struct igmpv3_grec *grec; 1086 int i; 1087 int len; 1088 int num; 1089 int type; 1090 int err = 0; 1091 __be32 group; 1092 1093 ih = igmpv3_report_hdr(skb); 1094 num = ntohs(ih->ngrec); 1095 len = skb_transport_offset(skb) + sizeof(*ih); 1096 1097 for (i = 0; i < num; i++) { 1098 len += sizeof(*grec); 1099 if (!pskb_may_pull(skb, len)) 1100 return -EINVAL; 1101 1102 grec = (void *)(skb->data + len - sizeof(*grec)); 1103 group = grec->grec_mca; 1104 type = grec->grec_type; 1105 1106 len += ntohs(grec->grec_nsrcs) * 4; 1107 if (!pskb_may_pull(skb, len)) 1108 return -EINVAL; 1109 1110 /* We treat this as an IGMPv2 report for now. */ 1111 switch (type) { 1112 case IGMPV3_MODE_IS_INCLUDE: 1113 case IGMPV3_MODE_IS_EXCLUDE: 1114 case IGMPV3_CHANGE_TO_INCLUDE: 1115 case IGMPV3_CHANGE_TO_EXCLUDE: 1116 case IGMPV3_ALLOW_NEW_SOURCES: 1117 case IGMPV3_BLOCK_OLD_SOURCES: 1118 break; 1119 1120 default: 1121 continue; 1122 } 1123 1124 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1125 type == IGMPV3_MODE_IS_INCLUDE) && 1126 ntohs(grec->grec_nsrcs) == 0) { 1127 br_ip4_multicast_leave_group(br, port, group, vid); 1128 } else { 1129 err = br_ip4_multicast_add_group(br, port, group, vid); 1130 if (err) 1131 break; 1132 } 1133 } 1134 1135 return err; 1136 } 1137 1138 #if IS_ENABLED(CONFIG_IPV6) 1139 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1140 struct net_bridge_port *port, 1141 struct sk_buff *skb, 1142 u16 vid) 1143 { 1144 struct icmp6hdr *icmp6h; 1145 struct mld2_grec *grec; 1146 int i; 1147 int len; 1148 int num; 1149 int err = 0; 1150 1151 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1152 return -EINVAL; 1153 1154 icmp6h = icmp6_hdr(skb); 1155 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1156 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1157 1158 for (i = 0; i < num; i++) { 1159 __be16 *nsrcs, _nsrcs; 1160 1161 nsrcs = skb_header_pointer(skb, 1162 len + offsetof(struct mld2_grec, 1163 grec_nsrcs), 1164 sizeof(_nsrcs), &_nsrcs); 1165 if (!nsrcs) 1166 return -EINVAL; 1167 1168 if (!pskb_may_pull(skb, 1169 len + sizeof(*grec) + 1170 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1171 return -EINVAL; 1172 1173 grec = (struct mld2_grec *)(skb->data + len); 1174 len += sizeof(*grec) + 1175 sizeof(struct in6_addr) * ntohs(*nsrcs); 1176 1177 /* We treat these as MLDv1 reports for now. */ 1178 switch (grec->grec_type) { 1179 case MLD2_MODE_IS_INCLUDE: 1180 case MLD2_MODE_IS_EXCLUDE: 1181 case MLD2_CHANGE_TO_INCLUDE: 1182 case MLD2_CHANGE_TO_EXCLUDE: 1183 case MLD2_ALLOW_NEW_SOURCES: 1184 case MLD2_BLOCK_OLD_SOURCES: 1185 break; 1186 1187 default: 1188 continue; 1189 } 1190 1191 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1192 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1193 ntohs(*nsrcs) == 0) { 1194 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1195 vid); 1196 } else { 1197 err = br_ip6_multicast_add_group(br, port, 1198 &grec->grec_mca, vid); 1199 if (err) 1200 break; 1201 } 1202 } 1203 1204 return err; 1205 } 1206 #endif 1207 1208 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1209 struct net_bridge_port *port, 1210 __be32 saddr) 1211 { 1212 if (!timer_pending(&br->ip4_own_query.timer) && 1213 !timer_pending(&br->ip4_other_query.timer)) 1214 goto update; 1215 1216 if (!br->ip4_querier.addr.u.ip4) 1217 goto update; 1218 1219 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1220 goto update; 1221 1222 return false; 1223 1224 update: 1225 br->ip4_querier.addr.u.ip4 = saddr; 1226 1227 /* update protected by general multicast_lock by caller */ 1228 rcu_assign_pointer(br->ip4_querier.port, port); 1229 1230 return true; 1231 } 1232 1233 #if IS_ENABLED(CONFIG_IPV6) 1234 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1235 struct net_bridge_port *port, 1236 struct in6_addr *saddr) 1237 { 1238 if (!timer_pending(&br->ip6_own_query.timer) && 1239 !timer_pending(&br->ip6_other_query.timer)) 1240 goto update; 1241 1242 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1243 goto update; 1244 1245 return false; 1246 1247 update: 1248 br->ip6_querier.addr.u.ip6 = *saddr; 1249 1250 /* update protected by general multicast_lock by caller */ 1251 rcu_assign_pointer(br->ip6_querier.port, port); 1252 1253 return true; 1254 } 1255 #endif 1256 1257 static bool br_multicast_select_querier(struct net_bridge *br, 1258 struct net_bridge_port *port, 1259 struct br_ip *saddr) 1260 { 1261 switch (saddr->proto) { 1262 case htons(ETH_P_IP): 1263 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1264 #if IS_ENABLED(CONFIG_IPV6) 1265 case htons(ETH_P_IPV6): 1266 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1267 #endif 1268 } 1269 1270 return false; 1271 } 1272 1273 static void 1274 br_multicast_update_query_timer(struct net_bridge *br, 1275 struct bridge_mcast_other_query *query, 1276 unsigned long max_delay) 1277 { 1278 if (!timer_pending(&query->timer)) 1279 query->delay_time = jiffies + max_delay; 1280 1281 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1282 } 1283 1284 /* 1285 * Add port to router_list 1286 * list is maintained ordered by pointer value 1287 * and locked by br->multicast_lock and RCU 1288 */ 1289 static void br_multicast_add_router(struct net_bridge *br, 1290 struct net_bridge_port *port) 1291 { 1292 struct net_bridge_port *p; 1293 struct hlist_node *slot = NULL; 1294 1295 if (!hlist_unhashed(&port->rlist)) 1296 return; 1297 1298 hlist_for_each_entry(p, &br->router_list, rlist) { 1299 if ((unsigned long) port >= (unsigned long) p) 1300 break; 1301 slot = &p->rlist; 1302 } 1303 1304 if (slot) 1305 hlist_add_behind_rcu(&port->rlist, slot); 1306 else 1307 hlist_add_head_rcu(&port->rlist, &br->router_list); 1308 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1309 } 1310 1311 static void br_multicast_mark_router(struct net_bridge *br, 1312 struct net_bridge_port *port) 1313 { 1314 unsigned long now = jiffies; 1315 1316 if (!port) { 1317 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) 1318 mod_timer(&br->multicast_router_timer, 1319 now + br->multicast_querier_interval); 1320 return; 1321 } 1322 1323 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1324 port->multicast_router == MDB_RTR_TYPE_PERM) 1325 return; 1326 1327 br_multicast_add_router(br, port); 1328 1329 mod_timer(&port->multicast_router_timer, 1330 now + br->multicast_querier_interval); 1331 } 1332 1333 static void br_multicast_query_received(struct net_bridge *br, 1334 struct net_bridge_port *port, 1335 struct bridge_mcast_other_query *query, 1336 struct br_ip *saddr, 1337 unsigned long max_delay) 1338 { 1339 if (!br_multicast_select_querier(br, port, saddr)) 1340 return; 1341 1342 br_multicast_update_query_timer(br, query, max_delay); 1343 br_multicast_mark_router(br, port); 1344 } 1345 1346 static int br_ip4_multicast_query(struct net_bridge *br, 1347 struct net_bridge_port *port, 1348 struct sk_buff *skb, 1349 u16 vid) 1350 { 1351 const struct iphdr *iph = ip_hdr(skb); 1352 struct igmphdr *ih = igmp_hdr(skb); 1353 struct net_bridge_mdb_entry *mp; 1354 struct igmpv3_query *ih3; 1355 struct net_bridge_port_group *p; 1356 struct net_bridge_port_group __rcu **pp; 1357 struct br_ip saddr; 1358 unsigned long max_delay; 1359 unsigned long now = jiffies; 1360 unsigned int offset = skb_transport_offset(skb); 1361 __be32 group; 1362 int err = 0; 1363 1364 spin_lock(&br->multicast_lock); 1365 if (!netif_running(br->dev) || 1366 (port && port->state == BR_STATE_DISABLED)) 1367 goto out; 1368 1369 group = ih->group; 1370 1371 if (skb->len == offset + sizeof(*ih)) { 1372 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1373 1374 if (!max_delay) { 1375 max_delay = 10 * HZ; 1376 group = 0; 1377 } 1378 } else if (skb->len >= offset + sizeof(*ih3)) { 1379 ih3 = igmpv3_query_hdr(skb); 1380 if (ih3->nsrcs) 1381 goto out; 1382 1383 max_delay = ih3->code ? 1384 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1385 } else { 1386 goto out; 1387 } 1388 1389 if (!group) { 1390 saddr.proto = htons(ETH_P_IP); 1391 saddr.u.ip4 = iph->saddr; 1392 1393 br_multicast_query_received(br, port, &br->ip4_other_query, 1394 &saddr, max_delay); 1395 goto out; 1396 } 1397 1398 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1399 if (!mp) 1400 goto out; 1401 1402 max_delay *= br->multicast_last_member_count; 1403 1404 if (mp->mglist && 1405 (timer_pending(&mp->timer) ? 1406 time_after(mp->timer.expires, now + max_delay) : 1407 try_to_del_timer_sync(&mp->timer) >= 0)) 1408 mod_timer(&mp->timer, now + max_delay); 1409 1410 for (pp = &mp->ports; 1411 (p = mlock_dereference(*pp, br)) != NULL; 1412 pp = &p->next) { 1413 if (timer_pending(&p->timer) ? 1414 time_after(p->timer.expires, now + max_delay) : 1415 try_to_del_timer_sync(&p->timer) >= 0) 1416 mod_timer(&p->timer, now + max_delay); 1417 } 1418 1419 out: 1420 spin_unlock(&br->multicast_lock); 1421 return err; 1422 } 1423 1424 #if IS_ENABLED(CONFIG_IPV6) 1425 static int br_ip6_multicast_query(struct net_bridge *br, 1426 struct net_bridge_port *port, 1427 struct sk_buff *skb, 1428 u16 vid) 1429 { 1430 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1431 struct mld_msg *mld; 1432 struct net_bridge_mdb_entry *mp; 1433 struct mld2_query *mld2q; 1434 struct net_bridge_port_group *p; 1435 struct net_bridge_port_group __rcu **pp; 1436 struct br_ip saddr; 1437 unsigned long max_delay; 1438 unsigned long now = jiffies; 1439 unsigned int offset = skb_transport_offset(skb); 1440 const struct in6_addr *group = NULL; 1441 bool is_general_query; 1442 int err = 0; 1443 1444 spin_lock(&br->multicast_lock); 1445 if (!netif_running(br->dev) || 1446 (port && port->state == BR_STATE_DISABLED)) 1447 goto out; 1448 1449 if (skb->len == offset + sizeof(*mld)) { 1450 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1451 err = -EINVAL; 1452 goto out; 1453 } 1454 mld = (struct mld_msg *) icmp6_hdr(skb); 1455 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1456 if (max_delay) 1457 group = &mld->mld_mca; 1458 } else { 1459 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1460 err = -EINVAL; 1461 goto out; 1462 } 1463 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1464 if (!mld2q->mld2q_nsrcs) 1465 group = &mld2q->mld2q_mca; 1466 1467 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1468 } 1469 1470 is_general_query = group && ipv6_addr_any(group); 1471 1472 if (is_general_query) { 1473 saddr.proto = htons(ETH_P_IPV6); 1474 saddr.u.ip6 = ip6h->saddr; 1475 1476 br_multicast_query_received(br, port, &br->ip6_other_query, 1477 &saddr, max_delay); 1478 goto out; 1479 } else if (!group) { 1480 goto out; 1481 } 1482 1483 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1484 if (!mp) 1485 goto out; 1486 1487 max_delay *= br->multicast_last_member_count; 1488 if (mp->mglist && 1489 (timer_pending(&mp->timer) ? 1490 time_after(mp->timer.expires, now + max_delay) : 1491 try_to_del_timer_sync(&mp->timer) >= 0)) 1492 mod_timer(&mp->timer, now + max_delay); 1493 1494 for (pp = &mp->ports; 1495 (p = mlock_dereference(*pp, br)) != NULL; 1496 pp = &p->next) { 1497 if (timer_pending(&p->timer) ? 1498 time_after(p->timer.expires, now + max_delay) : 1499 try_to_del_timer_sync(&p->timer) >= 0) 1500 mod_timer(&p->timer, now + max_delay); 1501 } 1502 1503 out: 1504 spin_unlock(&br->multicast_lock); 1505 return err; 1506 } 1507 #endif 1508 1509 static void 1510 br_multicast_leave_group(struct net_bridge *br, 1511 struct net_bridge_port *port, 1512 struct br_ip *group, 1513 struct bridge_mcast_other_query *other_query, 1514 struct bridge_mcast_own_query *own_query) 1515 { 1516 struct net_bridge_mdb_htable *mdb; 1517 struct net_bridge_mdb_entry *mp; 1518 struct net_bridge_port_group *p; 1519 unsigned long now; 1520 unsigned long time; 1521 1522 spin_lock(&br->multicast_lock); 1523 if (!netif_running(br->dev) || 1524 (port && port->state == BR_STATE_DISABLED)) 1525 goto out; 1526 1527 mdb = mlock_dereference(br->mdb, br); 1528 mp = br_mdb_ip_get(mdb, group); 1529 if (!mp) 1530 goto out; 1531 1532 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1533 struct net_bridge_port_group __rcu **pp; 1534 1535 for (pp = &mp->ports; 1536 (p = mlock_dereference(*pp, br)) != NULL; 1537 pp = &p->next) { 1538 if (p->port != port) 1539 continue; 1540 1541 rcu_assign_pointer(*pp, p->next); 1542 hlist_del_init(&p->mglist); 1543 del_timer(&p->timer); 1544 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1545 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1546 p->flags); 1547 1548 if (!mp->ports && !mp->mglist && 1549 netif_running(br->dev)) 1550 mod_timer(&mp->timer, jiffies); 1551 } 1552 goto out; 1553 } 1554 1555 if (timer_pending(&other_query->timer)) 1556 goto out; 1557 1558 if (br->multicast_querier) { 1559 __br_multicast_send_query(br, port, &mp->addr); 1560 1561 time = jiffies + br->multicast_last_member_count * 1562 br->multicast_last_member_interval; 1563 1564 mod_timer(&own_query->timer, time); 1565 1566 for (p = mlock_dereference(mp->ports, br); 1567 p != NULL; 1568 p = mlock_dereference(p->next, br)) { 1569 if (p->port != port) 1570 continue; 1571 1572 if (!hlist_unhashed(&p->mglist) && 1573 (timer_pending(&p->timer) ? 1574 time_after(p->timer.expires, time) : 1575 try_to_del_timer_sync(&p->timer) >= 0)) { 1576 mod_timer(&p->timer, time); 1577 } 1578 1579 break; 1580 } 1581 } 1582 1583 now = jiffies; 1584 time = now + br->multicast_last_member_count * 1585 br->multicast_last_member_interval; 1586 1587 if (!port) { 1588 if (mp->mglist && 1589 (timer_pending(&mp->timer) ? 1590 time_after(mp->timer.expires, time) : 1591 try_to_del_timer_sync(&mp->timer) >= 0)) { 1592 mod_timer(&mp->timer, time); 1593 } 1594 1595 goto out; 1596 } 1597 1598 for (p = mlock_dereference(mp->ports, br); 1599 p != NULL; 1600 p = mlock_dereference(p->next, br)) { 1601 if (p->port != port) 1602 continue; 1603 1604 if (!hlist_unhashed(&p->mglist) && 1605 (timer_pending(&p->timer) ? 1606 time_after(p->timer.expires, time) : 1607 try_to_del_timer_sync(&p->timer) >= 0)) { 1608 mod_timer(&p->timer, time); 1609 } 1610 1611 break; 1612 } 1613 out: 1614 spin_unlock(&br->multicast_lock); 1615 } 1616 1617 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1618 struct net_bridge_port *port, 1619 __be32 group, 1620 __u16 vid) 1621 { 1622 struct br_ip br_group; 1623 struct bridge_mcast_own_query *own_query; 1624 1625 if (ipv4_is_local_multicast(group)) 1626 return; 1627 1628 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1629 1630 br_group.u.ip4 = group; 1631 br_group.proto = htons(ETH_P_IP); 1632 br_group.vid = vid; 1633 1634 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1635 own_query); 1636 } 1637 1638 #if IS_ENABLED(CONFIG_IPV6) 1639 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1640 struct net_bridge_port *port, 1641 const struct in6_addr *group, 1642 __u16 vid) 1643 { 1644 struct br_ip br_group; 1645 struct bridge_mcast_own_query *own_query; 1646 1647 if (ipv6_addr_is_ll_all_nodes(group)) 1648 return; 1649 1650 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1651 1652 br_group.u.ip6 = *group; 1653 br_group.proto = htons(ETH_P_IPV6); 1654 br_group.vid = vid; 1655 1656 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1657 own_query); 1658 } 1659 #endif 1660 1661 static void br_multicast_err_count(const struct net_bridge *br, 1662 const struct net_bridge_port *p, 1663 __be16 proto) 1664 { 1665 struct bridge_mcast_stats __percpu *stats; 1666 struct bridge_mcast_stats *pstats; 1667 1668 if (!br->multicast_stats_enabled) 1669 return; 1670 1671 if (p) 1672 stats = p->mcast_stats; 1673 else 1674 stats = br->mcast_stats; 1675 if (WARN_ON(!stats)) 1676 return; 1677 1678 pstats = this_cpu_ptr(stats); 1679 1680 u64_stats_update_begin(&pstats->syncp); 1681 switch (proto) { 1682 case htons(ETH_P_IP): 1683 pstats->mstats.igmp_parse_errors++; 1684 break; 1685 #if IS_ENABLED(CONFIG_IPV6) 1686 case htons(ETH_P_IPV6): 1687 pstats->mstats.mld_parse_errors++; 1688 break; 1689 #endif 1690 } 1691 u64_stats_update_end(&pstats->syncp); 1692 } 1693 1694 static void br_multicast_pim(struct net_bridge *br, 1695 struct net_bridge_port *port, 1696 const struct sk_buff *skb) 1697 { 1698 unsigned int offset = skb_transport_offset(skb); 1699 struct pimhdr *pimhdr, _pimhdr; 1700 1701 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 1702 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 1703 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 1704 return; 1705 1706 br_multicast_mark_router(br, port); 1707 } 1708 1709 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1710 struct net_bridge_port *port, 1711 struct sk_buff *skb, 1712 u16 vid) 1713 { 1714 struct sk_buff *skb_trimmed = NULL; 1715 struct igmphdr *ih; 1716 int err; 1717 1718 err = ip_mc_check_igmp(skb, &skb_trimmed); 1719 1720 if (err == -ENOMSG) { 1721 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 1722 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1723 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 1724 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 1725 br_multicast_pim(br, port, skb); 1726 } 1727 return 0; 1728 } else if (err < 0) { 1729 br_multicast_err_count(br, port, skb->protocol); 1730 return err; 1731 } 1732 1733 ih = igmp_hdr(skb); 1734 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 1735 1736 switch (ih->type) { 1737 case IGMP_HOST_MEMBERSHIP_REPORT: 1738 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1739 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1740 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1741 break; 1742 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1743 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1744 break; 1745 case IGMP_HOST_MEMBERSHIP_QUERY: 1746 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1747 break; 1748 case IGMP_HOST_LEAVE_MESSAGE: 1749 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1750 break; 1751 } 1752 1753 if (skb_trimmed && skb_trimmed != skb) 1754 kfree_skb(skb_trimmed); 1755 1756 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1757 BR_MCAST_DIR_RX); 1758 1759 return err; 1760 } 1761 1762 #if IS_ENABLED(CONFIG_IPV6) 1763 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1764 struct net_bridge_port *port, 1765 struct sk_buff *skb, 1766 u16 vid) 1767 { 1768 struct sk_buff *skb_trimmed = NULL; 1769 struct mld_msg *mld; 1770 int err; 1771 1772 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1773 1774 if (err == -ENOMSG) { 1775 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1776 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1777 return 0; 1778 } else if (err < 0) { 1779 br_multicast_err_count(br, port, skb->protocol); 1780 return err; 1781 } 1782 1783 mld = (struct mld_msg *)skb_transport_header(skb); 1784 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 1785 1786 switch (mld->mld_type) { 1787 case ICMPV6_MGM_REPORT: 1788 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1789 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1790 break; 1791 case ICMPV6_MLD2_REPORT: 1792 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1793 break; 1794 case ICMPV6_MGM_QUERY: 1795 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1796 break; 1797 case ICMPV6_MGM_REDUCTION: 1798 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1799 break; 1800 } 1801 1802 if (skb_trimmed && skb_trimmed != skb) 1803 kfree_skb(skb_trimmed); 1804 1805 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1806 BR_MCAST_DIR_RX); 1807 1808 return err; 1809 } 1810 #endif 1811 1812 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1813 struct sk_buff *skb, u16 vid) 1814 { 1815 int ret = 0; 1816 1817 BR_INPUT_SKB_CB(skb)->igmp = 0; 1818 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1819 1820 if (br->multicast_disabled) 1821 return 0; 1822 1823 switch (skb->protocol) { 1824 case htons(ETH_P_IP): 1825 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 1826 break; 1827 #if IS_ENABLED(CONFIG_IPV6) 1828 case htons(ETH_P_IPV6): 1829 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 1830 break; 1831 #endif 1832 } 1833 1834 return ret; 1835 } 1836 1837 static void br_multicast_query_expired(struct net_bridge *br, 1838 struct bridge_mcast_own_query *query, 1839 struct bridge_mcast_querier *querier) 1840 { 1841 spin_lock(&br->multicast_lock); 1842 if (query->startup_sent < br->multicast_startup_query_count) 1843 query->startup_sent++; 1844 1845 RCU_INIT_POINTER(querier->port, NULL); 1846 br_multicast_send_query(br, NULL, query); 1847 spin_unlock(&br->multicast_lock); 1848 } 1849 1850 static void br_ip4_multicast_query_expired(unsigned long data) 1851 { 1852 struct net_bridge *br = (void *)data; 1853 1854 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1855 } 1856 1857 #if IS_ENABLED(CONFIG_IPV6) 1858 static void br_ip6_multicast_query_expired(unsigned long data) 1859 { 1860 struct net_bridge *br = (void *)data; 1861 1862 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1863 } 1864 #endif 1865 1866 void br_multicast_init(struct net_bridge *br) 1867 { 1868 br->hash_elasticity = 4; 1869 br->hash_max = 512; 1870 1871 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1872 br->multicast_querier = 0; 1873 br->multicast_query_use_ifaddr = 0; 1874 br->multicast_last_member_count = 2; 1875 br->multicast_startup_query_count = 2; 1876 1877 br->multicast_last_member_interval = HZ; 1878 br->multicast_query_response_interval = 10 * HZ; 1879 br->multicast_startup_query_interval = 125 * HZ / 4; 1880 br->multicast_query_interval = 125 * HZ; 1881 br->multicast_querier_interval = 255 * HZ; 1882 br->multicast_membership_interval = 260 * HZ; 1883 1884 br->ip4_other_query.delay_time = 0; 1885 br->ip4_querier.port = NULL; 1886 br->multicast_igmp_version = 2; 1887 #if IS_ENABLED(CONFIG_IPV6) 1888 br->multicast_mld_version = 1; 1889 br->ip6_other_query.delay_time = 0; 1890 br->ip6_querier.port = NULL; 1891 #endif 1892 br->has_ipv6_addr = 1; 1893 1894 spin_lock_init(&br->multicast_lock); 1895 setup_timer(&br->multicast_router_timer, 1896 br_multicast_local_router_expired, 0); 1897 setup_timer(&br->ip4_other_query.timer, 1898 br_ip4_multicast_querier_expired, (unsigned long)br); 1899 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1900 (unsigned long)br); 1901 #if IS_ENABLED(CONFIG_IPV6) 1902 setup_timer(&br->ip6_other_query.timer, 1903 br_ip6_multicast_querier_expired, (unsigned long)br); 1904 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1905 (unsigned long)br); 1906 #endif 1907 } 1908 1909 static void __br_multicast_open(struct net_bridge *br, 1910 struct bridge_mcast_own_query *query) 1911 { 1912 query->startup_sent = 0; 1913 1914 if (br->multicast_disabled) 1915 return; 1916 1917 mod_timer(&query->timer, jiffies); 1918 } 1919 1920 void br_multicast_open(struct net_bridge *br) 1921 { 1922 __br_multicast_open(br, &br->ip4_own_query); 1923 #if IS_ENABLED(CONFIG_IPV6) 1924 __br_multicast_open(br, &br->ip6_own_query); 1925 #endif 1926 } 1927 1928 void br_multicast_stop(struct net_bridge *br) 1929 { 1930 del_timer_sync(&br->multicast_router_timer); 1931 del_timer_sync(&br->ip4_other_query.timer); 1932 del_timer_sync(&br->ip4_own_query.timer); 1933 #if IS_ENABLED(CONFIG_IPV6) 1934 del_timer_sync(&br->ip6_other_query.timer); 1935 del_timer_sync(&br->ip6_own_query.timer); 1936 #endif 1937 } 1938 1939 void br_multicast_dev_del(struct net_bridge *br) 1940 { 1941 struct net_bridge_mdb_htable *mdb; 1942 struct net_bridge_mdb_entry *mp; 1943 struct hlist_node *n; 1944 u32 ver; 1945 int i; 1946 1947 spin_lock_bh(&br->multicast_lock); 1948 mdb = mlock_dereference(br->mdb, br); 1949 if (!mdb) 1950 goto out; 1951 1952 br->mdb = NULL; 1953 1954 ver = mdb->ver; 1955 for (i = 0; i < mdb->max; i++) { 1956 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1957 hlist[ver]) { 1958 del_timer(&mp->timer); 1959 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1960 } 1961 } 1962 1963 if (mdb->old) { 1964 spin_unlock_bh(&br->multicast_lock); 1965 rcu_barrier_bh(); 1966 spin_lock_bh(&br->multicast_lock); 1967 WARN_ON(mdb->old); 1968 } 1969 1970 mdb->old = mdb; 1971 call_rcu_bh(&mdb->rcu, br_mdb_free); 1972 1973 out: 1974 spin_unlock_bh(&br->multicast_lock); 1975 1976 free_percpu(br->mcast_stats); 1977 } 1978 1979 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1980 { 1981 int err = -EINVAL; 1982 1983 spin_lock_bh(&br->multicast_lock); 1984 1985 switch (val) { 1986 case MDB_RTR_TYPE_DISABLED: 1987 case MDB_RTR_TYPE_PERM: 1988 del_timer(&br->multicast_router_timer); 1989 /* fall through */ 1990 case MDB_RTR_TYPE_TEMP_QUERY: 1991 br->multicast_router = val; 1992 err = 0; 1993 break; 1994 } 1995 1996 spin_unlock_bh(&br->multicast_lock); 1997 1998 return err; 1999 } 2000 2001 static void __del_port_router(struct net_bridge_port *p) 2002 { 2003 if (hlist_unhashed(&p->rlist)) 2004 return; 2005 hlist_del_init_rcu(&p->rlist); 2006 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 2007 } 2008 2009 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 2010 { 2011 struct net_bridge *br = p->br; 2012 unsigned long now = jiffies; 2013 int err = -EINVAL; 2014 2015 spin_lock(&br->multicast_lock); 2016 if (p->multicast_router == val) { 2017 /* Refresh the temp router port timer */ 2018 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2019 mod_timer(&p->multicast_router_timer, 2020 now + br->multicast_querier_interval); 2021 err = 0; 2022 goto unlock; 2023 } 2024 switch (val) { 2025 case MDB_RTR_TYPE_DISABLED: 2026 p->multicast_router = MDB_RTR_TYPE_DISABLED; 2027 __del_port_router(p); 2028 del_timer(&p->multicast_router_timer); 2029 break; 2030 case MDB_RTR_TYPE_TEMP_QUERY: 2031 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2032 __del_port_router(p); 2033 break; 2034 case MDB_RTR_TYPE_PERM: 2035 p->multicast_router = MDB_RTR_TYPE_PERM; 2036 del_timer(&p->multicast_router_timer); 2037 br_multicast_add_router(br, p); 2038 break; 2039 case MDB_RTR_TYPE_TEMP: 2040 p->multicast_router = MDB_RTR_TYPE_TEMP; 2041 br_multicast_mark_router(br, p); 2042 break; 2043 default: 2044 goto unlock; 2045 } 2046 err = 0; 2047 unlock: 2048 spin_unlock(&br->multicast_lock); 2049 2050 return err; 2051 } 2052 2053 static void br_multicast_start_querier(struct net_bridge *br, 2054 struct bridge_mcast_own_query *query) 2055 { 2056 struct net_bridge_port *port; 2057 2058 __br_multicast_open(br, query); 2059 2060 list_for_each_entry(port, &br->port_list, list) { 2061 if (port->state == BR_STATE_DISABLED || 2062 port->state == BR_STATE_BLOCKING) 2063 continue; 2064 2065 if (query == &br->ip4_own_query) 2066 br_multicast_enable(&port->ip4_own_query); 2067 #if IS_ENABLED(CONFIG_IPV6) 2068 else 2069 br_multicast_enable(&port->ip6_own_query); 2070 #endif 2071 } 2072 } 2073 2074 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2075 { 2076 struct net_bridge_mdb_htable *mdb; 2077 struct net_bridge_port *port; 2078 int err = 0; 2079 2080 spin_lock_bh(&br->multicast_lock); 2081 if (br->multicast_disabled == !val) 2082 goto unlock; 2083 2084 br->multicast_disabled = !val; 2085 if (br->multicast_disabled) 2086 goto unlock; 2087 2088 if (!netif_running(br->dev)) 2089 goto unlock; 2090 2091 mdb = mlock_dereference(br->mdb, br); 2092 if (mdb) { 2093 if (mdb->old) { 2094 err = -EEXIST; 2095 rollback: 2096 br->multicast_disabled = !!val; 2097 goto unlock; 2098 } 2099 2100 err = br_mdb_rehash(&br->mdb, mdb->max, 2101 br->hash_elasticity); 2102 if (err) 2103 goto rollback; 2104 } 2105 2106 br_multicast_open(br); 2107 list_for_each_entry(port, &br->port_list, list) 2108 __br_multicast_enable_port(port); 2109 2110 unlock: 2111 spin_unlock_bh(&br->multicast_lock); 2112 2113 return err; 2114 } 2115 2116 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2117 { 2118 unsigned long max_delay; 2119 2120 val = !!val; 2121 2122 spin_lock_bh(&br->multicast_lock); 2123 if (br->multicast_querier == val) 2124 goto unlock; 2125 2126 br->multicast_querier = val; 2127 if (!val) 2128 goto unlock; 2129 2130 max_delay = br->multicast_query_response_interval; 2131 2132 if (!timer_pending(&br->ip4_other_query.timer)) 2133 br->ip4_other_query.delay_time = jiffies + max_delay; 2134 2135 br_multicast_start_querier(br, &br->ip4_own_query); 2136 2137 #if IS_ENABLED(CONFIG_IPV6) 2138 if (!timer_pending(&br->ip6_other_query.timer)) 2139 br->ip6_other_query.delay_time = jiffies + max_delay; 2140 2141 br_multicast_start_querier(br, &br->ip6_own_query); 2142 #endif 2143 2144 unlock: 2145 spin_unlock_bh(&br->multicast_lock); 2146 2147 return 0; 2148 } 2149 2150 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2151 { 2152 int err = -EINVAL; 2153 u32 old; 2154 struct net_bridge_mdb_htable *mdb; 2155 2156 spin_lock_bh(&br->multicast_lock); 2157 if (!is_power_of_2(val)) 2158 goto unlock; 2159 2160 mdb = mlock_dereference(br->mdb, br); 2161 if (mdb && val < mdb->size) 2162 goto unlock; 2163 2164 err = 0; 2165 2166 old = br->hash_max; 2167 br->hash_max = val; 2168 2169 if (mdb) { 2170 if (mdb->old) { 2171 err = -EEXIST; 2172 rollback: 2173 br->hash_max = old; 2174 goto unlock; 2175 } 2176 2177 err = br_mdb_rehash(&br->mdb, br->hash_max, 2178 br->hash_elasticity); 2179 if (err) 2180 goto rollback; 2181 } 2182 2183 unlock: 2184 spin_unlock_bh(&br->multicast_lock); 2185 2186 return err; 2187 } 2188 2189 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 2190 { 2191 /* Currently we support only version 2 and 3 */ 2192 switch (val) { 2193 case 2: 2194 case 3: 2195 break; 2196 default: 2197 return -EINVAL; 2198 } 2199 2200 spin_lock_bh(&br->multicast_lock); 2201 br->multicast_igmp_version = val; 2202 spin_unlock_bh(&br->multicast_lock); 2203 2204 return 0; 2205 } 2206 2207 #if IS_ENABLED(CONFIG_IPV6) 2208 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 2209 { 2210 /* Currently we support version 1 and 2 */ 2211 switch (val) { 2212 case 1: 2213 case 2: 2214 break; 2215 default: 2216 return -EINVAL; 2217 } 2218 2219 spin_lock_bh(&br->multicast_lock); 2220 br->multicast_mld_version = val; 2221 spin_unlock_bh(&br->multicast_lock); 2222 2223 return 0; 2224 } 2225 #endif 2226 2227 /** 2228 * br_multicast_list_adjacent - Returns snooped multicast addresses 2229 * @dev: The bridge port adjacent to which to retrieve addresses 2230 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2231 * 2232 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2233 * snooping feature on all bridge ports of dev's bridge device, excluding 2234 * the addresses from dev itself. 2235 * 2236 * Returns the number of items added to br_ip_list. 2237 * 2238 * Notes: 2239 * - br_ip_list needs to be initialized by caller 2240 * - br_ip_list might contain duplicates in the end 2241 * (needs to be taken care of by caller) 2242 * - br_ip_list needs to be freed by caller 2243 */ 2244 int br_multicast_list_adjacent(struct net_device *dev, 2245 struct list_head *br_ip_list) 2246 { 2247 struct net_bridge *br; 2248 struct net_bridge_port *port; 2249 struct net_bridge_port_group *group; 2250 struct br_ip_list *entry; 2251 int count = 0; 2252 2253 rcu_read_lock(); 2254 if (!br_ip_list || !br_port_exists(dev)) 2255 goto unlock; 2256 2257 port = br_port_get_rcu(dev); 2258 if (!port || !port->br) 2259 goto unlock; 2260 2261 br = port->br; 2262 2263 list_for_each_entry_rcu(port, &br->port_list, list) { 2264 if (!port->dev || port->dev == dev) 2265 continue; 2266 2267 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2268 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2269 if (!entry) 2270 goto unlock; 2271 2272 entry->addr = group->addr; 2273 list_add(&entry->list, br_ip_list); 2274 count++; 2275 } 2276 } 2277 2278 unlock: 2279 rcu_read_unlock(); 2280 return count; 2281 } 2282 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2283 2284 /** 2285 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2286 * @dev: The bridge port providing the bridge on which to check for a querier 2287 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2288 * 2289 * Checks whether the given interface has a bridge on top and if so returns 2290 * true if a valid querier exists anywhere on the bridged link layer. 2291 * Otherwise returns false. 2292 */ 2293 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2294 { 2295 struct net_bridge *br; 2296 struct net_bridge_port *port; 2297 struct ethhdr eth; 2298 bool ret = false; 2299 2300 rcu_read_lock(); 2301 if (!br_port_exists(dev)) 2302 goto unlock; 2303 2304 port = br_port_get_rcu(dev); 2305 if (!port || !port->br) 2306 goto unlock; 2307 2308 br = port->br; 2309 2310 memset(ð, 0, sizeof(eth)); 2311 eth.h_proto = htons(proto); 2312 2313 ret = br_multicast_querier_exists(br, ð); 2314 2315 unlock: 2316 rcu_read_unlock(); 2317 return ret; 2318 } 2319 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2320 2321 /** 2322 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2323 * @dev: The bridge port adjacent to which to check for a querier 2324 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2325 * 2326 * Checks whether the given interface has a bridge on top and if so returns 2327 * true if a selected querier is behind one of the other ports of this 2328 * bridge. Otherwise returns false. 2329 */ 2330 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2331 { 2332 struct net_bridge *br; 2333 struct net_bridge_port *port; 2334 bool ret = false; 2335 2336 rcu_read_lock(); 2337 if (!br_port_exists(dev)) 2338 goto unlock; 2339 2340 port = br_port_get_rcu(dev); 2341 if (!port || !port->br) 2342 goto unlock; 2343 2344 br = port->br; 2345 2346 switch (proto) { 2347 case ETH_P_IP: 2348 if (!timer_pending(&br->ip4_other_query.timer) || 2349 rcu_dereference(br->ip4_querier.port) == port) 2350 goto unlock; 2351 break; 2352 #if IS_ENABLED(CONFIG_IPV6) 2353 case ETH_P_IPV6: 2354 if (!timer_pending(&br->ip6_other_query.timer) || 2355 rcu_dereference(br->ip6_querier.port) == port) 2356 goto unlock; 2357 break; 2358 #endif 2359 default: 2360 goto unlock; 2361 } 2362 2363 ret = true; 2364 unlock: 2365 rcu_read_unlock(); 2366 return ret; 2367 } 2368 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2369 2370 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 2371 const struct sk_buff *skb, u8 type, u8 dir) 2372 { 2373 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 2374 __be16 proto = skb->protocol; 2375 unsigned int t_len; 2376 2377 u64_stats_update_begin(&pstats->syncp); 2378 switch (proto) { 2379 case htons(ETH_P_IP): 2380 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 2381 switch (type) { 2382 case IGMP_HOST_MEMBERSHIP_REPORT: 2383 pstats->mstats.igmp_v1reports[dir]++; 2384 break; 2385 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2386 pstats->mstats.igmp_v2reports[dir]++; 2387 break; 2388 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2389 pstats->mstats.igmp_v3reports[dir]++; 2390 break; 2391 case IGMP_HOST_MEMBERSHIP_QUERY: 2392 if (t_len != sizeof(struct igmphdr)) { 2393 pstats->mstats.igmp_v3queries[dir]++; 2394 } else { 2395 unsigned int offset = skb_transport_offset(skb); 2396 struct igmphdr *ih, _ihdr; 2397 2398 ih = skb_header_pointer(skb, offset, 2399 sizeof(_ihdr), &_ihdr); 2400 if (!ih) 2401 break; 2402 if (!ih->code) 2403 pstats->mstats.igmp_v1queries[dir]++; 2404 else 2405 pstats->mstats.igmp_v2queries[dir]++; 2406 } 2407 break; 2408 case IGMP_HOST_LEAVE_MESSAGE: 2409 pstats->mstats.igmp_leaves[dir]++; 2410 break; 2411 } 2412 break; 2413 #if IS_ENABLED(CONFIG_IPV6) 2414 case htons(ETH_P_IPV6): 2415 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 2416 sizeof(struct ipv6hdr); 2417 t_len -= skb_network_header_len(skb); 2418 switch (type) { 2419 case ICMPV6_MGM_REPORT: 2420 pstats->mstats.mld_v1reports[dir]++; 2421 break; 2422 case ICMPV6_MLD2_REPORT: 2423 pstats->mstats.mld_v2reports[dir]++; 2424 break; 2425 case ICMPV6_MGM_QUERY: 2426 if (t_len != sizeof(struct mld_msg)) 2427 pstats->mstats.mld_v2queries[dir]++; 2428 else 2429 pstats->mstats.mld_v1queries[dir]++; 2430 break; 2431 case ICMPV6_MGM_REDUCTION: 2432 pstats->mstats.mld_leaves[dir]++; 2433 break; 2434 } 2435 break; 2436 #endif /* CONFIG_IPV6 */ 2437 } 2438 u64_stats_update_end(&pstats->syncp); 2439 } 2440 2441 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 2442 const struct sk_buff *skb, u8 type, u8 dir) 2443 { 2444 struct bridge_mcast_stats __percpu *stats; 2445 2446 /* if multicast_disabled is true then igmp type can't be set */ 2447 if (!type || !br->multicast_stats_enabled) 2448 return; 2449 2450 if (p) 2451 stats = p->mcast_stats; 2452 else 2453 stats = br->mcast_stats; 2454 if (WARN_ON(!stats)) 2455 return; 2456 2457 br_mcast_stats_add(stats, skb, type, dir); 2458 } 2459 2460 int br_multicast_init_stats(struct net_bridge *br) 2461 { 2462 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2463 if (!br->mcast_stats) 2464 return -ENOMEM; 2465 2466 return 0; 2467 } 2468 2469 static void mcast_stats_add_dir(u64 *dst, u64 *src) 2470 { 2471 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2472 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 2473 } 2474 2475 void br_multicast_get_stats(const struct net_bridge *br, 2476 const struct net_bridge_port *p, 2477 struct br_mcast_stats *dest) 2478 { 2479 struct bridge_mcast_stats __percpu *stats; 2480 struct br_mcast_stats tdst; 2481 int i; 2482 2483 memset(dest, 0, sizeof(*dest)); 2484 if (p) 2485 stats = p->mcast_stats; 2486 else 2487 stats = br->mcast_stats; 2488 if (WARN_ON(!stats)) 2489 return; 2490 2491 memset(&tdst, 0, sizeof(tdst)); 2492 for_each_possible_cpu(i) { 2493 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 2494 struct br_mcast_stats temp; 2495 unsigned int start; 2496 2497 do { 2498 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2499 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 2500 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2501 2502 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 2503 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 2504 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 2505 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 2506 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 2507 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 2508 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 2509 tdst.igmp_parse_errors += temp.igmp_parse_errors; 2510 2511 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 2512 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 2513 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 2514 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 2515 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 2516 tdst.mld_parse_errors += temp.mld_parse_errors; 2517 } 2518 memcpy(dest, &tdst, sizeof(*dest)); 2519 } 2520