1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <linux/mroute.h> 29 #include <net/ip.h> 30 #include <net/switchdev.h> 31 #if IS_ENABLED(CONFIG_IPV6) 32 #include <net/ipv6.h> 33 #include <net/mld.h> 34 #include <net/ip6_checksum.h> 35 #include <net/addrconf.h> 36 #endif 37 38 #include "br_private.h" 39 40 static void br_multicast_start_querier(struct net_bridge *br, 41 struct bridge_mcast_own_query *query); 42 static void br_multicast_add_router(struct net_bridge *br, 43 struct net_bridge_port *port); 44 static void br_ip4_multicast_leave_group(struct net_bridge *br, 45 struct net_bridge_port *port, 46 __be32 group, 47 __u16 vid, 48 const unsigned char *src); 49 50 static void __del_port_router(struct net_bridge_port *p); 51 #if IS_ENABLED(CONFIG_IPV6) 52 static void br_ip6_multicast_leave_group(struct net_bridge *br, 53 struct net_bridge_port *port, 54 const struct in6_addr *group, 55 __u16 vid, const unsigned char *src); 56 #endif 57 unsigned int br_mdb_rehash_seq; 58 59 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 60 { 61 if (a->proto != b->proto) 62 return 0; 63 if (a->vid != b->vid) 64 return 0; 65 switch (a->proto) { 66 case htons(ETH_P_IP): 67 return a->u.ip4 == b->u.ip4; 68 #if IS_ENABLED(CONFIG_IPV6) 69 case htons(ETH_P_IPV6): 70 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 71 #endif 72 } 73 return 0; 74 } 75 76 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 77 __u16 vid) 78 { 79 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 80 } 81 82 #if IS_ENABLED(CONFIG_IPV6) 83 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 84 const struct in6_addr *ip, 85 __u16 vid) 86 { 87 return jhash_2words(ipv6_addr_hash(ip), vid, 88 mdb->secret) & (mdb->max - 1); 89 } 90 #endif 91 92 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 93 struct br_ip *ip) 94 { 95 switch (ip->proto) { 96 case htons(ETH_P_IP): 97 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 98 #if IS_ENABLED(CONFIG_IPV6) 99 case htons(ETH_P_IPV6): 100 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 101 #endif 102 } 103 return 0; 104 } 105 106 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 107 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 108 { 109 struct net_bridge_mdb_entry *mp; 110 111 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 112 if (br_ip_equal(&mp->addr, dst)) 113 return mp; 114 } 115 116 return NULL; 117 } 118 119 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 120 struct br_ip *dst) 121 { 122 if (!mdb) 123 return NULL; 124 125 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 126 } 127 128 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 129 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 130 { 131 struct br_ip br_dst; 132 133 br_dst.u.ip4 = dst; 134 br_dst.proto = htons(ETH_P_IP); 135 br_dst.vid = vid; 136 137 return br_mdb_ip_get(mdb, &br_dst); 138 } 139 140 #if IS_ENABLED(CONFIG_IPV6) 141 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 142 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 143 __u16 vid) 144 { 145 struct br_ip br_dst; 146 147 br_dst.u.ip6 = *dst; 148 br_dst.proto = htons(ETH_P_IPV6); 149 br_dst.vid = vid; 150 151 return br_mdb_ip_get(mdb, &br_dst); 152 } 153 #endif 154 155 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 156 struct sk_buff *skb, u16 vid) 157 { 158 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 159 struct br_ip ip; 160 161 if (br->multicast_disabled) 162 return NULL; 163 164 if (BR_INPUT_SKB_CB(skb)->igmp) 165 return NULL; 166 167 ip.proto = skb->protocol; 168 ip.vid = vid; 169 170 switch (skb->protocol) { 171 case htons(ETH_P_IP): 172 ip.u.ip4 = ip_hdr(skb)->daddr; 173 break; 174 #if IS_ENABLED(CONFIG_IPV6) 175 case htons(ETH_P_IPV6): 176 ip.u.ip6 = ipv6_hdr(skb)->daddr; 177 break; 178 #endif 179 default: 180 return NULL; 181 } 182 183 return br_mdb_ip_get(mdb, &ip); 184 } 185 186 static void br_mdb_free(struct rcu_head *head) 187 { 188 struct net_bridge_mdb_htable *mdb = 189 container_of(head, struct net_bridge_mdb_htable, rcu); 190 struct net_bridge_mdb_htable *old = mdb->old; 191 192 mdb->old = NULL; 193 kfree(old->mhash); 194 kfree(old); 195 } 196 197 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 198 struct net_bridge_mdb_htable *old, 199 int elasticity) 200 { 201 struct net_bridge_mdb_entry *mp; 202 int maxlen; 203 int len; 204 int i; 205 206 for (i = 0; i < old->max; i++) 207 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 208 hlist_add_head(&mp->hlist[new->ver], 209 &new->mhash[br_ip_hash(new, &mp->addr)]); 210 211 if (!elasticity) 212 return 0; 213 214 maxlen = 0; 215 for (i = 0; i < new->max; i++) { 216 len = 0; 217 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 218 len++; 219 if (len > maxlen) 220 maxlen = len; 221 } 222 223 return maxlen > elasticity ? -EINVAL : 0; 224 } 225 226 void br_multicast_free_pg(struct rcu_head *head) 227 { 228 struct net_bridge_port_group *p = 229 container_of(head, struct net_bridge_port_group, rcu); 230 231 kfree(p); 232 } 233 234 static void br_multicast_free_group(struct rcu_head *head) 235 { 236 struct net_bridge_mdb_entry *mp = 237 container_of(head, struct net_bridge_mdb_entry, rcu); 238 239 kfree(mp); 240 } 241 242 static void br_multicast_group_expired(unsigned long data) 243 { 244 struct net_bridge_mdb_entry *mp = (void *)data; 245 struct net_bridge *br = mp->br; 246 struct net_bridge_mdb_htable *mdb; 247 248 spin_lock(&br->multicast_lock); 249 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 250 goto out; 251 252 mp->mglist = false; 253 254 if (mp->ports) 255 goto out; 256 257 mdb = mlock_dereference(br->mdb, br); 258 259 hlist_del_rcu(&mp->hlist[mdb->ver]); 260 mdb->size--; 261 262 call_rcu_bh(&mp->rcu, br_multicast_free_group); 263 264 out: 265 spin_unlock(&br->multicast_lock); 266 } 267 268 static void br_multicast_del_pg(struct net_bridge *br, 269 struct net_bridge_port_group *pg) 270 { 271 struct net_bridge_mdb_htable *mdb; 272 struct net_bridge_mdb_entry *mp; 273 struct net_bridge_port_group *p; 274 struct net_bridge_port_group __rcu **pp; 275 276 mdb = mlock_dereference(br->mdb, br); 277 278 mp = br_mdb_ip_get(mdb, &pg->addr); 279 if (WARN_ON(!mp)) 280 return; 281 282 for (pp = &mp->ports; 283 (p = mlock_dereference(*pp, br)) != NULL; 284 pp = &p->next) { 285 if (p != pg) 286 continue; 287 288 rcu_assign_pointer(*pp, p->next); 289 hlist_del_init(&p->mglist); 290 del_timer(&p->timer); 291 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 292 p->flags); 293 call_rcu_bh(&p->rcu, br_multicast_free_pg); 294 295 if (!mp->ports && !mp->mglist && 296 netif_running(br->dev)) 297 mod_timer(&mp->timer, jiffies); 298 299 return; 300 } 301 302 WARN_ON(1); 303 } 304 305 static void br_multicast_port_group_expired(unsigned long data) 306 { 307 struct net_bridge_port_group *pg = (void *)data; 308 struct net_bridge *br = pg->port->br; 309 310 spin_lock(&br->multicast_lock); 311 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 312 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 313 goto out; 314 315 br_multicast_del_pg(br, pg); 316 317 out: 318 spin_unlock(&br->multicast_lock); 319 } 320 321 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 322 int elasticity) 323 { 324 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 325 struct net_bridge_mdb_htable *mdb; 326 int err; 327 328 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 329 if (!mdb) 330 return -ENOMEM; 331 332 mdb->max = max; 333 mdb->old = old; 334 335 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 336 if (!mdb->mhash) { 337 kfree(mdb); 338 return -ENOMEM; 339 } 340 341 mdb->size = old ? old->size : 0; 342 mdb->ver = old ? old->ver ^ 1 : 0; 343 344 if (!old || elasticity) 345 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 346 else 347 mdb->secret = old->secret; 348 349 if (!old) 350 goto out; 351 352 err = br_mdb_copy(mdb, old, elasticity); 353 if (err) { 354 kfree(mdb->mhash); 355 kfree(mdb); 356 return err; 357 } 358 359 br_mdb_rehash_seq++; 360 call_rcu_bh(&mdb->rcu, br_mdb_free); 361 362 out: 363 rcu_assign_pointer(*mdbp, mdb); 364 365 return 0; 366 } 367 368 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 369 __be32 group, 370 u8 *igmp_type) 371 { 372 struct igmpv3_query *ihv3; 373 size_t igmp_hdr_size; 374 struct sk_buff *skb; 375 struct igmphdr *ih; 376 struct ethhdr *eth; 377 struct iphdr *iph; 378 379 igmp_hdr_size = sizeof(*ih); 380 if (br->multicast_igmp_version == 3) 381 igmp_hdr_size = sizeof(*ihv3); 382 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 383 igmp_hdr_size + 4); 384 if (!skb) 385 goto out; 386 387 skb->protocol = htons(ETH_P_IP); 388 389 skb_reset_mac_header(skb); 390 eth = eth_hdr(skb); 391 392 ether_addr_copy(eth->h_source, br->dev->dev_addr); 393 eth->h_dest[0] = 1; 394 eth->h_dest[1] = 0; 395 eth->h_dest[2] = 0x5e; 396 eth->h_dest[3] = 0; 397 eth->h_dest[4] = 0; 398 eth->h_dest[5] = 1; 399 eth->h_proto = htons(ETH_P_IP); 400 skb_put(skb, sizeof(*eth)); 401 402 skb_set_network_header(skb, skb->len); 403 iph = ip_hdr(skb); 404 405 iph->version = 4; 406 iph->ihl = 6; 407 iph->tos = 0xc0; 408 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4); 409 iph->id = 0; 410 iph->frag_off = htons(IP_DF); 411 iph->ttl = 1; 412 iph->protocol = IPPROTO_IGMP; 413 iph->saddr = br->multicast_query_use_ifaddr ? 414 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 415 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 416 ((u8 *)&iph[1])[0] = IPOPT_RA; 417 ((u8 *)&iph[1])[1] = 4; 418 ((u8 *)&iph[1])[2] = 0; 419 ((u8 *)&iph[1])[3] = 0; 420 ip_send_check(iph); 421 skb_put(skb, 24); 422 423 skb_set_transport_header(skb, skb->len); 424 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 425 426 switch (br->multicast_igmp_version) { 427 case 2: 428 ih = igmp_hdr(skb); 429 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 430 ih->code = (group ? br->multicast_last_member_interval : 431 br->multicast_query_response_interval) / 432 (HZ / IGMP_TIMER_SCALE); 433 ih->group = group; 434 ih->csum = 0; 435 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih)); 436 break; 437 case 3: 438 ihv3 = igmpv3_query_hdr(skb); 439 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 440 ihv3->code = (group ? br->multicast_last_member_interval : 441 br->multicast_query_response_interval) / 442 (HZ / IGMP_TIMER_SCALE); 443 ihv3->group = group; 444 ihv3->qqic = br->multicast_query_interval / HZ; 445 ihv3->nsrcs = 0; 446 ihv3->resv = 0; 447 ihv3->suppress = 0; 448 ihv3->qrv = 2; 449 ihv3->csum = 0; 450 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3)); 451 break; 452 } 453 454 skb_put(skb, igmp_hdr_size); 455 __skb_pull(skb, sizeof(*eth)); 456 457 out: 458 return skb; 459 } 460 461 #if IS_ENABLED(CONFIG_IPV6) 462 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 463 const struct in6_addr *grp, 464 u8 *igmp_type) 465 { 466 struct mld2_query *mld2q; 467 unsigned long interval; 468 struct ipv6hdr *ip6h; 469 struct mld_msg *mldq; 470 size_t mld_hdr_size; 471 struct sk_buff *skb; 472 struct ethhdr *eth; 473 u8 *hopopt; 474 475 mld_hdr_size = sizeof(*mldq); 476 if (br->multicast_mld_version == 2) 477 mld_hdr_size = sizeof(*mld2q); 478 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 479 8 + mld_hdr_size); 480 if (!skb) 481 goto out; 482 483 skb->protocol = htons(ETH_P_IPV6); 484 485 /* Ethernet header */ 486 skb_reset_mac_header(skb); 487 eth = eth_hdr(skb); 488 489 ether_addr_copy(eth->h_source, br->dev->dev_addr); 490 eth->h_proto = htons(ETH_P_IPV6); 491 skb_put(skb, sizeof(*eth)); 492 493 /* IPv6 header + HbH option */ 494 skb_set_network_header(skb, skb->len); 495 ip6h = ipv6_hdr(skb); 496 497 *(__force __be32 *)ip6h = htonl(0x60000000); 498 ip6h->payload_len = htons(8 + mld_hdr_size); 499 ip6h->nexthdr = IPPROTO_HOPOPTS; 500 ip6h->hop_limit = 1; 501 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 502 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 503 &ip6h->saddr)) { 504 kfree_skb(skb); 505 br->has_ipv6_addr = 0; 506 return NULL; 507 } 508 509 br->has_ipv6_addr = 1; 510 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 511 512 hopopt = (u8 *)(ip6h + 1); 513 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 514 hopopt[1] = 0; /* length of HbH */ 515 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 516 hopopt[3] = 2; /* Length of RA Option */ 517 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 518 hopopt[5] = 0; 519 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 520 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 521 522 skb_put(skb, sizeof(*ip6h) + 8); 523 524 /* ICMPv6 */ 525 skb_set_transport_header(skb, skb->len); 526 interval = ipv6_addr_any(grp) ? 527 br->multicast_query_response_interval : 528 br->multicast_last_member_interval; 529 *igmp_type = ICMPV6_MGM_QUERY; 530 switch (br->multicast_mld_version) { 531 case 1: 532 mldq = (struct mld_msg *)icmp6_hdr(skb); 533 mldq->mld_type = ICMPV6_MGM_QUERY; 534 mldq->mld_code = 0; 535 mldq->mld_cksum = 0; 536 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 537 mldq->mld_reserved = 0; 538 mldq->mld_mca = *grp; 539 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 540 sizeof(*mldq), IPPROTO_ICMPV6, 541 csum_partial(mldq, 542 sizeof(*mldq), 543 0)); 544 break; 545 case 2: 546 mld2q = (struct mld2_query *)icmp6_hdr(skb); 547 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 548 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 549 mld2q->mld2q_code = 0; 550 mld2q->mld2q_cksum = 0; 551 mld2q->mld2q_resv1 = 0; 552 mld2q->mld2q_resv2 = 0; 553 mld2q->mld2q_suppress = 0; 554 mld2q->mld2q_qrv = 2; 555 mld2q->mld2q_nsrcs = 0; 556 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 557 mld2q->mld2q_mca = *grp; 558 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 559 sizeof(*mld2q), 560 IPPROTO_ICMPV6, 561 csum_partial(mld2q, 562 sizeof(*mld2q), 563 0)); 564 break; 565 } 566 skb_put(skb, mld_hdr_size); 567 568 __skb_pull(skb, sizeof(*eth)); 569 570 out: 571 return skb; 572 } 573 #endif 574 575 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 576 struct br_ip *addr, 577 u8 *igmp_type) 578 { 579 switch (addr->proto) { 580 case htons(ETH_P_IP): 581 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type); 582 #if IS_ENABLED(CONFIG_IPV6) 583 case htons(ETH_P_IPV6): 584 return br_ip6_multicast_alloc_query(br, &addr->u.ip6, 585 igmp_type); 586 #endif 587 } 588 return NULL; 589 } 590 591 static struct net_bridge_mdb_entry *br_multicast_get_group( 592 struct net_bridge *br, struct net_bridge_port *port, 593 struct br_ip *group, int hash) 594 { 595 struct net_bridge_mdb_htable *mdb; 596 struct net_bridge_mdb_entry *mp; 597 unsigned int count = 0; 598 unsigned int max; 599 int elasticity; 600 int err; 601 602 mdb = rcu_dereference_protected(br->mdb, 1); 603 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 604 count++; 605 if (unlikely(br_ip_equal(group, &mp->addr))) 606 return mp; 607 } 608 609 elasticity = 0; 610 max = mdb->max; 611 612 if (unlikely(count > br->hash_elasticity && count)) { 613 if (net_ratelimit()) 614 br_info(br, "Multicast hash table " 615 "chain limit reached: %s\n", 616 port ? port->dev->name : br->dev->name); 617 618 elasticity = br->hash_elasticity; 619 } 620 621 if (mdb->size >= max) { 622 max *= 2; 623 if (unlikely(max > br->hash_max)) { 624 br_warn(br, "Multicast hash table maximum of %d " 625 "reached, disabling snooping: %s\n", 626 br->hash_max, 627 port ? port->dev->name : br->dev->name); 628 err = -E2BIG; 629 disable: 630 br->multicast_disabled = 1; 631 goto err; 632 } 633 } 634 635 if (max > mdb->max || elasticity) { 636 if (mdb->old) { 637 if (net_ratelimit()) 638 br_info(br, "Multicast hash table " 639 "on fire: %s\n", 640 port ? port->dev->name : br->dev->name); 641 err = -EEXIST; 642 goto err; 643 } 644 645 err = br_mdb_rehash(&br->mdb, max, elasticity); 646 if (err) { 647 br_warn(br, "Cannot rehash multicast " 648 "hash table, disabling snooping: %s, %d, %d\n", 649 port ? port->dev->name : br->dev->name, 650 mdb->size, err); 651 goto disable; 652 } 653 654 err = -EAGAIN; 655 goto err; 656 } 657 658 return NULL; 659 660 err: 661 mp = ERR_PTR(err); 662 return mp; 663 } 664 665 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 666 struct net_bridge_port *p, 667 struct br_ip *group) 668 { 669 struct net_bridge_mdb_htable *mdb; 670 struct net_bridge_mdb_entry *mp; 671 int hash; 672 int err; 673 674 mdb = rcu_dereference_protected(br->mdb, 1); 675 if (!mdb) { 676 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 677 if (err) 678 return ERR_PTR(err); 679 goto rehash; 680 } 681 682 hash = br_ip_hash(mdb, group); 683 mp = br_multicast_get_group(br, p, group, hash); 684 switch (PTR_ERR(mp)) { 685 case 0: 686 break; 687 688 case -EAGAIN: 689 rehash: 690 mdb = rcu_dereference_protected(br->mdb, 1); 691 hash = br_ip_hash(mdb, group); 692 break; 693 694 default: 695 goto out; 696 } 697 698 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 699 if (unlikely(!mp)) 700 return ERR_PTR(-ENOMEM); 701 702 mp->br = br; 703 mp->addr = *group; 704 setup_timer(&mp->timer, br_multicast_group_expired, 705 (unsigned long)mp); 706 707 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 708 mdb->size++; 709 710 out: 711 return mp; 712 } 713 714 struct net_bridge_port_group *br_multicast_new_port_group( 715 struct net_bridge_port *port, 716 struct br_ip *group, 717 struct net_bridge_port_group __rcu *next, 718 unsigned char flags, 719 const unsigned char *src) 720 { 721 struct net_bridge_port_group *p; 722 723 p = kzalloc(sizeof(*p), GFP_ATOMIC); 724 if (unlikely(!p)) 725 return NULL; 726 727 p->addr = *group; 728 p->port = port; 729 p->flags = flags; 730 rcu_assign_pointer(p->next, next); 731 hlist_add_head(&p->mglist, &port->mglist); 732 setup_timer(&p->timer, br_multicast_port_group_expired, 733 (unsigned long)p); 734 735 if (src) 736 memcpy(p->eth_addr, src, ETH_ALEN); 737 else 738 memset(p->eth_addr, 0xff, ETH_ALEN); 739 740 return p; 741 } 742 743 static bool br_port_group_equal(struct net_bridge_port_group *p, 744 struct net_bridge_port *port, 745 const unsigned char *src) 746 { 747 if (p->port != port) 748 return false; 749 750 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 751 return true; 752 753 return ether_addr_equal(src, p->eth_addr); 754 } 755 756 static int br_multicast_add_group(struct net_bridge *br, 757 struct net_bridge_port *port, 758 struct br_ip *group, 759 const unsigned char *src) 760 { 761 struct net_bridge_port_group __rcu **pp; 762 struct net_bridge_port_group *p; 763 struct net_bridge_mdb_entry *mp; 764 unsigned long now = jiffies; 765 int err; 766 767 spin_lock(&br->multicast_lock); 768 if (!netif_running(br->dev) || 769 (port && port->state == BR_STATE_DISABLED)) 770 goto out; 771 772 mp = br_multicast_new_group(br, port, group); 773 err = PTR_ERR(mp); 774 if (IS_ERR(mp)) 775 goto err; 776 777 if (!port) { 778 mp->mglist = true; 779 mod_timer(&mp->timer, now + br->multicast_membership_interval); 780 goto out; 781 } 782 783 for (pp = &mp->ports; 784 (p = mlock_dereference(*pp, br)) != NULL; 785 pp = &p->next) { 786 if (br_port_group_equal(p, port, src)) 787 goto found; 788 if ((unsigned long)p->port < (unsigned long)port) 789 break; 790 } 791 792 p = br_multicast_new_port_group(port, group, *pp, 0, src); 793 if (unlikely(!p)) 794 goto err; 795 rcu_assign_pointer(*pp, p); 796 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 797 798 found: 799 mod_timer(&p->timer, now + br->multicast_membership_interval); 800 out: 801 err = 0; 802 803 err: 804 spin_unlock(&br->multicast_lock); 805 return err; 806 } 807 808 static int br_ip4_multicast_add_group(struct net_bridge *br, 809 struct net_bridge_port *port, 810 __be32 group, 811 __u16 vid, 812 const unsigned char *src) 813 { 814 struct br_ip br_group; 815 816 if (ipv4_is_local_multicast(group)) 817 return 0; 818 819 br_group.u.ip4 = group; 820 br_group.proto = htons(ETH_P_IP); 821 br_group.vid = vid; 822 823 return br_multicast_add_group(br, port, &br_group, src); 824 } 825 826 #if IS_ENABLED(CONFIG_IPV6) 827 static int br_ip6_multicast_add_group(struct net_bridge *br, 828 struct net_bridge_port *port, 829 const struct in6_addr *group, 830 __u16 vid, 831 const unsigned char *src) 832 { 833 struct br_ip br_group; 834 835 if (ipv6_addr_is_ll_all_nodes(group)) 836 return 0; 837 838 br_group.u.ip6 = *group; 839 br_group.proto = htons(ETH_P_IPV6); 840 br_group.vid = vid; 841 842 return br_multicast_add_group(br, port, &br_group, src); 843 } 844 #endif 845 846 static void br_multicast_router_expired(unsigned long data) 847 { 848 struct net_bridge_port *port = (void *)data; 849 struct net_bridge *br = port->br; 850 851 spin_lock(&br->multicast_lock); 852 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 853 port->multicast_router == MDB_RTR_TYPE_PERM || 854 timer_pending(&port->multicast_router_timer)) 855 goto out; 856 857 __del_port_router(port); 858 out: 859 spin_unlock(&br->multicast_lock); 860 } 861 862 static void br_multicast_local_router_expired(unsigned long data) 863 { 864 } 865 866 static void br_multicast_querier_expired(struct net_bridge *br, 867 struct bridge_mcast_own_query *query) 868 { 869 spin_lock(&br->multicast_lock); 870 if (!netif_running(br->dev) || br->multicast_disabled) 871 goto out; 872 873 br_multicast_start_querier(br, query); 874 875 out: 876 spin_unlock(&br->multicast_lock); 877 } 878 879 static void br_ip4_multicast_querier_expired(unsigned long data) 880 { 881 struct net_bridge *br = (void *)data; 882 883 br_multicast_querier_expired(br, &br->ip4_own_query); 884 } 885 886 #if IS_ENABLED(CONFIG_IPV6) 887 static void br_ip6_multicast_querier_expired(unsigned long data) 888 { 889 struct net_bridge *br = (void *)data; 890 891 br_multicast_querier_expired(br, &br->ip6_own_query); 892 } 893 #endif 894 895 static void br_multicast_select_own_querier(struct net_bridge *br, 896 struct br_ip *ip, 897 struct sk_buff *skb) 898 { 899 if (ip->proto == htons(ETH_P_IP)) 900 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 901 #if IS_ENABLED(CONFIG_IPV6) 902 else 903 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 904 #endif 905 } 906 907 static void __br_multicast_send_query(struct net_bridge *br, 908 struct net_bridge_port *port, 909 struct br_ip *ip) 910 { 911 struct sk_buff *skb; 912 u8 igmp_type; 913 914 skb = br_multicast_alloc_query(br, ip, &igmp_type); 915 if (!skb) 916 return; 917 918 if (port) { 919 skb->dev = port->dev; 920 br_multicast_count(br, port, skb, igmp_type, 921 BR_MCAST_DIR_TX); 922 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 923 dev_net(port->dev), NULL, skb, NULL, skb->dev, 924 br_dev_queue_push_xmit); 925 } else { 926 br_multicast_select_own_querier(br, ip, skb); 927 br_multicast_count(br, port, skb, igmp_type, 928 BR_MCAST_DIR_RX); 929 netif_rx(skb); 930 } 931 } 932 933 static void br_multicast_send_query(struct net_bridge *br, 934 struct net_bridge_port *port, 935 struct bridge_mcast_own_query *own_query) 936 { 937 struct bridge_mcast_other_query *other_query = NULL; 938 struct br_ip br_group; 939 unsigned long time; 940 941 if (!netif_running(br->dev) || br->multicast_disabled || 942 !br->multicast_querier) 943 return; 944 945 memset(&br_group.u, 0, sizeof(br_group.u)); 946 947 if (port ? (own_query == &port->ip4_own_query) : 948 (own_query == &br->ip4_own_query)) { 949 other_query = &br->ip4_other_query; 950 br_group.proto = htons(ETH_P_IP); 951 #if IS_ENABLED(CONFIG_IPV6) 952 } else { 953 other_query = &br->ip6_other_query; 954 br_group.proto = htons(ETH_P_IPV6); 955 #endif 956 } 957 958 if (!other_query || timer_pending(&other_query->timer)) 959 return; 960 961 __br_multicast_send_query(br, port, &br_group); 962 963 time = jiffies; 964 time += own_query->startup_sent < br->multicast_startup_query_count ? 965 br->multicast_startup_query_interval : 966 br->multicast_query_interval; 967 mod_timer(&own_query->timer, time); 968 } 969 970 static void 971 br_multicast_port_query_expired(struct net_bridge_port *port, 972 struct bridge_mcast_own_query *query) 973 { 974 struct net_bridge *br = port->br; 975 976 spin_lock(&br->multicast_lock); 977 if (port->state == BR_STATE_DISABLED || 978 port->state == BR_STATE_BLOCKING) 979 goto out; 980 981 if (query->startup_sent < br->multicast_startup_query_count) 982 query->startup_sent++; 983 984 br_multicast_send_query(port->br, port, query); 985 986 out: 987 spin_unlock(&br->multicast_lock); 988 } 989 990 static void br_ip4_multicast_port_query_expired(unsigned long data) 991 { 992 struct net_bridge_port *port = (void *)data; 993 994 br_multicast_port_query_expired(port, &port->ip4_own_query); 995 } 996 997 #if IS_ENABLED(CONFIG_IPV6) 998 static void br_ip6_multicast_port_query_expired(unsigned long data) 999 { 1000 struct net_bridge_port *port = (void *)data; 1001 1002 br_multicast_port_query_expired(port, &port->ip6_own_query); 1003 } 1004 #endif 1005 1006 static void br_mc_disabled_update(struct net_device *dev, bool value) 1007 { 1008 struct switchdev_attr attr = { 1009 .orig_dev = dev, 1010 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1011 .flags = SWITCHDEV_F_DEFER, 1012 .u.mc_disabled = value, 1013 }; 1014 1015 switchdev_port_attr_set(dev, &attr); 1016 } 1017 1018 int br_multicast_add_port(struct net_bridge_port *port) 1019 { 1020 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1021 1022 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 1023 (unsigned long)port); 1024 setup_timer(&port->ip4_own_query.timer, 1025 br_ip4_multicast_port_query_expired, (unsigned long)port); 1026 #if IS_ENABLED(CONFIG_IPV6) 1027 setup_timer(&port->ip6_own_query.timer, 1028 br_ip6_multicast_port_query_expired, (unsigned long)port); 1029 #endif 1030 br_mc_disabled_update(port->dev, port->br->multicast_disabled); 1031 1032 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1033 if (!port->mcast_stats) 1034 return -ENOMEM; 1035 1036 return 0; 1037 } 1038 1039 void br_multicast_del_port(struct net_bridge_port *port) 1040 { 1041 struct net_bridge *br = port->br; 1042 struct net_bridge_port_group *pg; 1043 struct hlist_node *n; 1044 1045 /* Take care of the remaining groups, only perm ones should be left */ 1046 spin_lock_bh(&br->multicast_lock); 1047 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1048 br_multicast_del_pg(br, pg); 1049 spin_unlock_bh(&br->multicast_lock); 1050 del_timer_sync(&port->multicast_router_timer); 1051 free_percpu(port->mcast_stats); 1052 } 1053 1054 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1055 { 1056 query->startup_sent = 0; 1057 1058 if (try_to_del_timer_sync(&query->timer) >= 0 || 1059 del_timer(&query->timer)) 1060 mod_timer(&query->timer, jiffies); 1061 } 1062 1063 static void __br_multicast_enable_port(struct net_bridge_port *port) 1064 { 1065 struct net_bridge *br = port->br; 1066 1067 if (br->multicast_disabled || !netif_running(br->dev)) 1068 return; 1069 1070 br_multicast_enable(&port->ip4_own_query); 1071 #if IS_ENABLED(CONFIG_IPV6) 1072 br_multicast_enable(&port->ip6_own_query); 1073 #endif 1074 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1075 hlist_unhashed(&port->rlist)) 1076 br_multicast_add_router(br, port); 1077 } 1078 1079 void br_multicast_enable_port(struct net_bridge_port *port) 1080 { 1081 struct net_bridge *br = port->br; 1082 1083 spin_lock(&br->multicast_lock); 1084 __br_multicast_enable_port(port); 1085 spin_unlock(&br->multicast_lock); 1086 } 1087 1088 void br_multicast_disable_port(struct net_bridge_port *port) 1089 { 1090 struct net_bridge *br = port->br; 1091 struct net_bridge_port_group *pg; 1092 struct hlist_node *n; 1093 1094 spin_lock(&br->multicast_lock); 1095 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1096 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1097 br_multicast_del_pg(br, pg); 1098 1099 __del_port_router(port); 1100 1101 del_timer(&port->multicast_router_timer); 1102 del_timer(&port->ip4_own_query.timer); 1103 #if IS_ENABLED(CONFIG_IPV6) 1104 del_timer(&port->ip6_own_query.timer); 1105 #endif 1106 spin_unlock(&br->multicast_lock); 1107 } 1108 1109 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1110 struct net_bridge_port *port, 1111 struct sk_buff *skb, 1112 u16 vid) 1113 { 1114 const unsigned char *src; 1115 struct igmpv3_report *ih; 1116 struct igmpv3_grec *grec; 1117 int i; 1118 int len; 1119 int num; 1120 int type; 1121 int err = 0; 1122 __be32 group; 1123 1124 ih = igmpv3_report_hdr(skb); 1125 num = ntohs(ih->ngrec); 1126 len = skb_transport_offset(skb) + sizeof(*ih); 1127 1128 for (i = 0; i < num; i++) { 1129 len += sizeof(*grec); 1130 if (!pskb_may_pull(skb, len)) 1131 return -EINVAL; 1132 1133 grec = (void *)(skb->data + len - sizeof(*grec)); 1134 group = grec->grec_mca; 1135 type = grec->grec_type; 1136 1137 len += ntohs(grec->grec_nsrcs) * 4; 1138 if (!pskb_may_pull(skb, len)) 1139 return -EINVAL; 1140 1141 /* We treat this as an IGMPv2 report for now. */ 1142 switch (type) { 1143 case IGMPV3_MODE_IS_INCLUDE: 1144 case IGMPV3_MODE_IS_EXCLUDE: 1145 case IGMPV3_CHANGE_TO_INCLUDE: 1146 case IGMPV3_CHANGE_TO_EXCLUDE: 1147 case IGMPV3_ALLOW_NEW_SOURCES: 1148 case IGMPV3_BLOCK_OLD_SOURCES: 1149 break; 1150 1151 default: 1152 continue; 1153 } 1154 1155 src = eth_hdr(skb)->h_source; 1156 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1157 type == IGMPV3_MODE_IS_INCLUDE) && 1158 ntohs(grec->grec_nsrcs) == 0) { 1159 br_ip4_multicast_leave_group(br, port, group, vid, src); 1160 } else { 1161 err = br_ip4_multicast_add_group(br, port, group, vid, 1162 src); 1163 if (err) 1164 break; 1165 } 1166 } 1167 1168 return err; 1169 } 1170 1171 #if IS_ENABLED(CONFIG_IPV6) 1172 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1173 struct net_bridge_port *port, 1174 struct sk_buff *skb, 1175 u16 vid) 1176 { 1177 const unsigned char *src; 1178 struct icmp6hdr *icmp6h; 1179 struct mld2_grec *grec; 1180 int i; 1181 int len; 1182 int num; 1183 int err = 0; 1184 1185 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1186 return -EINVAL; 1187 1188 icmp6h = icmp6_hdr(skb); 1189 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1190 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1191 1192 for (i = 0; i < num; i++) { 1193 __be16 *nsrcs, _nsrcs; 1194 1195 nsrcs = skb_header_pointer(skb, 1196 len + offsetof(struct mld2_grec, 1197 grec_nsrcs), 1198 sizeof(_nsrcs), &_nsrcs); 1199 if (!nsrcs) 1200 return -EINVAL; 1201 1202 if (!pskb_may_pull(skb, 1203 len + sizeof(*grec) + 1204 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1205 return -EINVAL; 1206 1207 grec = (struct mld2_grec *)(skb->data + len); 1208 len += sizeof(*grec) + 1209 sizeof(struct in6_addr) * ntohs(*nsrcs); 1210 1211 /* We treat these as MLDv1 reports for now. */ 1212 switch (grec->grec_type) { 1213 case MLD2_MODE_IS_INCLUDE: 1214 case MLD2_MODE_IS_EXCLUDE: 1215 case MLD2_CHANGE_TO_INCLUDE: 1216 case MLD2_CHANGE_TO_EXCLUDE: 1217 case MLD2_ALLOW_NEW_SOURCES: 1218 case MLD2_BLOCK_OLD_SOURCES: 1219 break; 1220 1221 default: 1222 continue; 1223 } 1224 1225 src = eth_hdr(skb)->h_source; 1226 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1227 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1228 ntohs(*nsrcs) == 0) { 1229 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1230 vid, src); 1231 } else { 1232 err = br_ip6_multicast_add_group(br, port, 1233 &grec->grec_mca, vid, 1234 src); 1235 if (err) 1236 break; 1237 } 1238 } 1239 1240 return err; 1241 } 1242 #endif 1243 1244 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1245 struct net_bridge_port *port, 1246 __be32 saddr) 1247 { 1248 if (!timer_pending(&br->ip4_own_query.timer) && 1249 !timer_pending(&br->ip4_other_query.timer)) 1250 goto update; 1251 1252 if (!br->ip4_querier.addr.u.ip4) 1253 goto update; 1254 1255 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1256 goto update; 1257 1258 return false; 1259 1260 update: 1261 br->ip4_querier.addr.u.ip4 = saddr; 1262 1263 /* update protected by general multicast_lock by caller */ 1264 rcu_assign_pointer(br->ip4_querier.port, port); 1265 1266 return true; 1267 } 1268 1269 #if IS_ENABLED(CONFIG_IPV6) 1270 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1271 struct net_bridge_port *port, 1272 struct in6_addr *saddr) 1273 { 1274 if (!timer_pending(&br->ip6_own_query.timer) && 1275 !timer_pending(&br->ip6_other_query.timer)) 1276 goto update; 1277 1278 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1279 goto update; 1280 1281 return false; 1282 1283 update: 1284 br->ip6_querier.addr.u.ip6 = *saddr; 1285 1286 /* update protected by general multicast_lock by caller */ 1287 rcu_assign_pointer(br->ip6_querier.port, port); 1288 1289 return true; 1290 } 1291 #endif 1292 1293 static bool br_multicast_select_querier(struct net_bridge *br, 1294 struct net_bridge_port *port, 1295 struct br_ip *saddr) 1296 { 1297 switch (saddr->proto) { 1298 case htons(ETH_P_IP): 1299 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1300 #if IS_ENABLED(CONFIG_IPV6) 1301 case htons(ETH_P_IPV6): 1302 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1303 #endif 1304 } 1305 1306 return false; 1307 } 1308 1309 static void 1310 br_multicast_update_query_timer(struct net_bridge *br, 1311 struct bridge_mcast_other_query *query, 1312 unsigned long max_delay) 1313 { 1314 if (!timer_pending(&query->timer)) 1315 query->delay_time = jiffies + max_delay; 1316 1317 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1318 } 1319 1320 static void br_port_mc_router_state_change(struct net_bridge_port *p, 1321 bool is_mc_router) 1322 { 1323 struct switchdev_attr attr = { 1324 .orig_dev = p->dev, 1325 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 1326 .flags = SWITCHDEV_F_DEFER, 1327 .u.mrouter = is_mc_router, 1328 }; 1329 1330 switchdev_port_attr_set(p->dev, &attr); 1331 } 1332 1333 /* 1334 * Add port to router_list 1335 * list is maintained ordered by pointer value 1336 * and locked by br->multicast_lock and RCU 1337 */ 1338 static void br_multicast_add_router(struct net_bridge *br, 1339 struct net_bridge_port *port) 1340 { 1341 struct net_bridge_port *p; 1342 struct hlist_node *slot = NULL; 1343 1344 if (!hlist_unhashed(&port->rlist)) 1345 return; 1346 1347 hlist_for_each_entry(p, &br->router_list, rlist) { 1348 if ((unsigned long) port >= (unsigned long) p) 1349 break; 1350 slot = &p->rlist; 1351 } 1352 1353 if (slot) 1354 hlist_add_behind_rcu(&port->rlist, slot); 1355 else 1356 hlist_add_head_rcu(&port->rlist, &br->router_list); 1357 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1358 br_port_mc_router_state_change(port, true); 1359 } 1360 1361 static void br_multicast_mark_router(struct net_bridge *br, 1362 struct net_bridge_port *port) 1363 { 1364 unsigned long now = jiffies; 1365 1366 if (!port) { 1367 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) 1368 mod_timer(&br->multicast_router_timer, 1369 now + br->multicast_querier_interval); 1370 return; 1371 } 1372 1373 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1374 port->multicast_router == MDB_RTR_TYPE_PERM) 1375 return; 1376 1377 br_multicast_add_router(br, port); 1378 1379 mod_timer(&port->multicast_router_timer, 1380 now + br->multicast_querier_interval); 1381 } 1382 1383 static void br_multicast_query_received(struct net_bridge *br, 1384 struct net_bridge_port *port, 1385 struct bridge_mcast_other_query *query, 1386 struct br_ip *saddr, 1387 unsigned long max_delay) 1388 { 1389 if (!br_multicast_select_querier(br, port, saddr)) 1390 return; 1391 1392 br_multicast_update_query_timer(br, query, max_delay); 1393 br_multicast_mark_router(br, port); 1394 } 1395 1396 static int br_ip4_multicast_query(struct net_bridge *br, 1397 struct net_bridge_port *port, 1398 struct sk_buff *skb, 1399 u16 vid) 1400 { 1401 const struct iphdr *iph = ip_hdr(skb); 1402 struct igmphdr *ih = igmp_hdr(skb); 1403 struct net_bridge_mdb_entry *mp; 1404 struct igmpv3_query *ih3; 1405 struct net_bridge_port_group *p; 1406 struct net_bridge_port_group __rcu **pp; 1407 struct br_ip saddr; 1408 unsigned long max_delay; 1409 unsigned long now = jiffies; 1410 unsigned int offset = skb_transport_offset(skb); 1411 __be32 group; 1412 int err = 0; 1413 1414 spin_lock(&br->multicast_lock); 1415 if (!netif_running(br->dev) || 1416 (port && port->state == BR_STATE_DISABLED)) 1417 goto out; 1418 1419 group = ih->group; 1420 1421 if (skb->len == offset + sizeof(*ih)) { 1422 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1423 1424 if (!max_delay) { 1425 max_delay = 10 * HZ; 1426 group = 0; 1427 } 1428 } else if (skb->len >= offset + sizeof(*ih3)) { 1429 ih3 = igmpv3_query_hdr(skb); 1430 if (ih3->nsrcs) 1431 goto out; 1432 1433 max_delay = ih3->code ? 1434 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1435 } else { 1436 goto out; 1437 } 1438 1439 if (!group) { 1440 saddr.proto = htons(ETH_P_IP); 1441 saddr.u.ip4 = iph->saddr; 1442 1443 br_multicast_query_received(br, port, &br->ip4_other_query, 1444 &saddr, max_delay); 1445 goto out; 1446 } 1447 1448 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1449 if (!mp) 1450 goto out; 1451 1452 max_delay *= br->multicast_last_member_count; 1453 1454 if (mp->mglist && 1455 (timer_pending(&mp->timer) ? 1456 time_after(mp->timer.expires, now + max_delay) : 1457 try_to_del_timer_sync(&mp->timer) >= 0)) 1458 mod_timer(&mp->timer, now + max_delay); 1459 1460 for (pp = &mp->ports; 1461 (p = mlock_dereference(*pp, br)) != NULL; 1462 pp = &p->next) { 1463 if (timer_pending(&p->timer) ? 1464 time_after(p->timer.expires, now + max_delay) : 1465 try_to_del_timer_sync(&p->timer) >= 0) 1466 mod_timer(&p->timer, now + max_delay); 1467 } 1468 1469 out: 1470 spin_unlock(&br->multicast_lock); 1471 return err; 1472 } 1473 1474 #if IS_ENABLED(CONFIG_IPV6) 1475 static int br_ip6_multicast_query(struct net_bridge *br, 1476 struct net_bridge_port *port, 1477 struct sk_buff *skb, 1478 u16 vid) 1479 { 1480 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1481 struct mld_msg *mld; 1482 struct net_bridge_mdb_entry *mp; 1483 struct mld2_query *mld2q; 1484 struct net_bridge_port_group *p; 1485 struct net_bridge_port_group __rcu **pp; 1486 struct br_ip saddr; 1487 unsigned long max_delay; 1488 unsigned long now = jiffies; 1489 unsigned int offset = skb_transport_offset(skb); 1490 const struct in6_addr *group = NULL; 1491 bool is_general_query; 1492 int err = 0; 1493 1494 spin_lock(&br->multicast_lock); 1495 if (!netif_running(br->dev) || 1496 (port && port->state == BR_STATE_DISABLED)) 1497 goto out; 1498 1499 if (skb->len == offset + sizeof(*mld)) { 1500 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1501 err = -EINVAL; 1502 goto out; 1503 } 1504 mld = (struct mld_msg *) icmp6_hdr(skb); 1505 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1506 if (max_delay) 1507 group = &mld->mld_mca; 1508 } else { 1509 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1510 err = -EINVAL; 1511 goto out; 1512 } 1513 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1514 if (!mld2q->mld2q_nsrcs) 1515 group = &mld2q->mld2q_mca; 1516 1517 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1518 } 1519 1520 is_general_query = group && ipv6_addr_any(group); 1521 1522 if (is_general_query) { 1523 saddr.proto = htons(ETH_P_IPV6); 1524 saddr.u.ip6 = ip6h->saddr; 1525 1526 br_multicast_query_received(br, port, &br->ip6_other_query, 1527 &saddr, max_delay); 1528 goto out; 1529 } else if (!group) { 1530 goto out; 1531 } 1532 1533 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1534 if (!mp) 1535 goto out; 1536 1537 max_delay *= br->multicast_last_member_count; 1538 if (mp->mglist && 1539 (timer_pending(&mp->timer) ? 1540 time_after(mp->timer.expires, now + max_delay) : 1541 try_to_del_timer_sync(&mp->timer) >= 0)) 1542 mod_timer(&mp->timer, now + max_delay); 1543 1544 for (pp = &mp->ports; 1545 (p = mlock_dereference(*pp, br)) != NULL; 1546 pp = &p->next) { 1547 if (timer_pending(&p->timer) ? 1548 time_after(p->timer.expires, now + max_delay) : 1549 try_to_del_timer_sync(&p->timer) >= 0) 1550 mod_timer(&p->timer, now + max_delay); 1551 } 1552 1553 out: 1554 spin_unlock(&br->multicast_lock); 1555 return err; 1556 } 1557 #endif 1558 1559 static void 1560 br_multicast_leave_group(struct net_bridge *br, 1561 struct net_bridge_port *port, 1562 struct br_ip *group, 1563 struct bridge_mcast_other_query *other_query, 1564 struct bridge_mcast_own_query *own_query, 1565 const unsigned char *src) 1566 { 1567 struct net_bridge_mdb_htable *mdb; 1568 struct net_bridge_mdb_entry *mp; 1569 struct net_bridge_port_group *p; 1570 unsigned long now; 1571 unsigned long time; 1572 1573 spin_lock(&br->multicast_lock); 1574 if (!netif_running(br->dev) || 1575 (port && port->state == BR_STATE_DISABLED)) 1576 goto out; 1577 1578 mdb = mlock_dereference(br->mdb, br); 1579 mp = br_mdb_ip_get(mdb, group); 1580 if (!mp) 1581 goto out; 1582 1583 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1584 struct net_bridge_port_group __rcu **pp; 1585 1586 for (pp = &mp->ports; 1587 (p = mlock_dereference(*pp, br)) != NULL; 1588 pp = &p->next) { 1589 if (!br_port_group_equal(p, port, src)) 1590 continue; 1591 1592 rcu_assign_pointer(*pp, p->next); 1593 hlist_del_init(&p->mglist); 1594 del_timer(&p->timer); 1595 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1596 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1597 p->flags); 1598 1599 if (!mp->ports && !mp->mglist && 1600 netif_running(br->dev)) 1601 mod_timer(&mp->timer, jiffies); 1602 } 1603 goto out; 1604 } 1605 1606 if (timer_pending(&other_query->timer)) 1607 goto out; 1608 1609 if (br->multicast_querier) { 1610 __br_multicast_send_query(br, port, &mp->addr); 1611 1612 time = jiffies + br->multicast_last_member_count * 1613 br->multicast_last_member_interval; 1614 1615 mod_timer(&own_query->timer, time); 1616 1617 for (p = mlock_dereference(mp->ports, br); 1618 p != NULL; 1619 p = mlock_dereference(p->next, br)) { 1620 if (!br_port_group_equal(p, port, src)) 1621 continue; 1622 1623 if (!hlist_unhashed(&p->mglist) && 1624 (timer_pending(&p->timer) ? 1625 time_after(p->timer.expires, time) : 1626 try_to_del_timer_sync(&p->timer) >= 0)) { 1627 mod_timer(&p->timer, time); 1628 } 1629 1630 break; 1631 } 1632 } 1633 1634 now = jiffies; 1635 time = now + br->multicast_last_member_count * 1636 br->multicast_last_member_interval; 1637 1638 if (!port) { 1639 if (mp->mglist && 1640 (timer_pending(&mp->timer) ? 1641 time_after(mp->timer.expires, time) : 1642 try_to_del_timer_sync(&mp->timer) >= 0)) { 1643 mod_timer(&mp->timer, time); 1644 } 1645 1646 goto out; 1647 } 1648 1649 for (p = mlock_dereference(mp->ports, br); 1650 p != NULL; 1651 p = mlock_dereference(p->next, br)) { 1652 if (p->port != port) 1653 continue; 1654 1655 if (!hlist_unhashed(&p->mglist) && 1656 (timer_pending(&p->timer) ? 1657 time_after(p->timer.expires, time) : 1658 try_to_del_timer_sync(&p->timer) >= 0)) { 1659 mod_timer(&p->timer, time); 1660 } 1661 1662 break; 1663 } 1664 out: 1665 spin_unlock(&br->multicast_lock); 1666 } 1667 1668 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1669 struct net_bridge_port *port, 1670 __be32 group, 1671 __u16 vid, 1672 const unsigned char *src) 1673 { 1674 struct br_ip br_group; 1675 struct bridge_mcast_own_query *own_query; 1676 1677 if (ipv4_is_local_multicast(group)) 1678 return; 1679 1680 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1681 1682 br_group.u.ip4 = group; 1683 br_group.proto = htons(ETH_P_IP); 1684 br_group.vid = vid; 1685 1686 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1687 own_query, src); 1688 } 1689 1690 #if IS_ENABLED(CONFIG_IPV6) 1691 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1692 struct net_bridge_port *port, 1693 const struct in6_addr *group, 1694 __u16 vid, 1695 const unsigned char *src) 1696 { 1697 struct br_ip br_group; 1698 struct bridge_mcast_own_query *own_query; 1699 1700 if (ipv6_addr_is_ll_all_nodes(group)) 1701 return; 1702 1703 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1704 1705 br_group.u.ip6 = *group; 1706 br_group.proto = htons(ETH_P_IPV6); 1707 br_group.vid = vid; 1708 1709 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1710 own_query, src); 1711 } 1712 #endif 1713 1714 static void br_multicast_err_count(const struct net_bridge *br, 1715 const struct net_bridge_port *p, 1716 __be16 proto) 1717 { 1718 struct bridge_mcast_stats __percpu *stats; 1719 struct bridge_mcast_stats *pstats; 1720 1721 if (!br->multicast_stats_enabled) 1722 return; 1723 1724 if (p) 1725 stats = p->mcast_stats; 1726 else 1727 stats = br->mcast_stats; 1728 if (WARN_ON(!stats)) 1729 return; 1730 1731 pstats = this_cpu_ptr(stats); 1732 1733 u64_stats_update_begin(&pstats->syncp); 1734 switch (proto) { 1735 case htons(ETH_P_IP): 1736 pstats->mstats.igmp_parse_errors++; 1737 break; 1738 #if IS_ENABLED(CONFIG_IPV6) 1739 case htons(ETH_P_IPV6): 1740 pstats->mstats.mld_parse_errors++; 1741 break; 1742 #endif 1743 } 1744 u64_stats_update_end(&pstats->syncp); 1745 } 1746 1747 static void br_multicast_pim(struct net_bridge *br, 1748 struct net_bridge_port *port, 1749 const struct sk_buff *skb) 1750 { 1751 unsigned int offset = skb_transport_offset(skb); 1752 struct pimhdr *pimhdr, _pimhdr; 1753 1754 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 1755 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 1756 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 1757 return; 1758 1759 br_multicast_mark_router(br, port); 1760 } 1761 1762 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1763 struct net_bridge_port *port, 1764 struct sk_buff *skb, 1765 u16 vid) 1766 { 1767 struct sk_buff *skb_trimmed = NULL; 1768 const unsigned char *src; 1769 struct igmphdr *ih; 1770 int err; 1771 1772 err = ip_mc_check_igmp(skb, &skb_trimmed); 1773 1774 if (err == -ENOMSG) { 1775 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 1776 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1777 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 1778 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 1779 br_multicast_pim(br, port, skb); 1780 } 1781 return 0; 1782 } else if (err < 0) { 1783 br_multicast_err_count(br, port, skb->protocol); 1784 return err; 1785 } 1786 1787 ih = igmp_hdr(skb); 1788 src = eth_hdr(skb)->h_source; 1789 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 1790 1791 switch (ih->type) { 1792 case IGMP_HOST_MEMBERSHIP_REPORT: 1793 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1794 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1795 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src); 1796 break; 1797 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1798 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1799 break; 1800 case IGMP_HOST_MEMBERSHIP_QUERY: 1801 err = br_ip4_multicast_query(br, port, skb_trimmed, vid); 1802 break; 1803 case IGMP_HOST_LEAVE_MESSAGE: 1804 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 1805 break; 1806 } 1807 1808 if (skb_trimmed && skb_trimmed != skb) 1809 kfree_skb(skb_trimmed); 1810 1811 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1812 BR_MCAST_DIR_RX); 1813 1814 return err; 1815 } 1816 1817 #if IS_ENABLED(CONFIG_IPV6) 1818 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1819 struct net_bridge_port *port, 1820 struct sk_buff *skb, 1821 u16 vid) 1822 { 1823 struct sk_buff *skb_trimmed = NULL; 1824 const unsigned char *src; 1825 struct mld_msg *mld; 1826 int err; 1827 1828 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1829 1830 if (err == -ENOMSG) { 1831 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1832 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1833 return 0; 1834 } else if (err < 0) { 1835 br_multicast_err_count(br, port, skb->protocol); 1836 return err; 1837 } 1838 1839 mld = (struct mld_msg *)skb_transport_header(skb); 1840 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 1841 1842 switch (mld->mld_type) { 1843 case ICMPV6_MGM_REPORT: 1844 src = eth_hdr(skb)->h_source; 1845 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1846 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 1847 src); 1848 break; 1849 case ICMPV6_MLD2_REPORT: 1850 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1851 break; 1852 case ICMPV6_MGM_QUERY: 1853 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1854 break; 1855 case ICMPV6_MGM_REDUCTION: 1856 src = eth_hdr(skb)->h_source; 1857 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 1858 break; 1859 } 1860 1861 if (skb_trimmed && skb_trimmed != skb) 1862 kfree_skb(skb_trimmed); 1863 1864 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1865 BR_MCAST_DIR_RX); 1866 1867 return err; 1868 } 1869 #endif 1870 1871 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1872 struct sk_buff *skb, u16 vid) 1873 { 1874 int ret = 0; 1875 1876 BR_INPUT_SKB_CB(skb)->igmp = 0; 1877 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1878 1879 if (br->multicast_disabled) 1880 return 0; 1881 1882 switch (skb->protocol) { 1883 case htons(ETH_P_IP): 1884 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 1885 break; 1886 #if IS_ENABLED(CONFIG_IPV6) 1887 case htons(ETH_P_IPV6): 1888 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 1889 break; 1890 #endif 1891 } 1892 1893 return ret; 1894 } 1895 1896 static void br_multicast_query_expired(struct net_bridge *br, 1897 struct bridge_mcast_own_query *query, 1898 struct bridge_mcast_querier *querier) 1899 { 1900 spin_lock(&br->multicast_lock); 1901 if (query->startup_sent < br->multicast_startup_query_count) 1902 query->startup_sent++; 1903 1904 RCU_INIT_POINTER(querier->port, NULL); 1905 br_multicast_send_query(br, NULL, query); 1906 spin_unlock(&br->multicast_lock); 1907 } 1908 1909 static void br_ip4_multicast_query_expired(unsigned long data) 1910 { 1911 struct net_bridge *br = (void *)data; 1912 1913 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1914 } 1915 1916 #if IS_ENABLED(CONFIG_IPV6) 1917 static void br_ip6_multicast_query_expired(unsigned long data) 1918 { 1919 struct net_bridge *br = (void *)data; 1920 1921 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1922 } 1923 #endif 1924 1925 void br_multicast_init(struct net_bridge *br) 1926 { 1927 br->hash_elasticity = 4; 1928 br->hash_max = 512; 1929 1930 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1931 br->multicast_querier = 0; 1932 br->multicast_query_use_ifaddr = 0; 1933 br->multicast_last_member_count = 2; 1934 br->multicast_startup_query_count = 2; 1935 1936 br->multicast_last_member_interval = HZ; 1937 br->multicast_query_response_interval = 10 * HZ; 1938 br->multicast_startup_query_interval = 125 * HZ / 4; 1939 br->multicast_query_interval = 125 * HZ; 1940 br->multicast_querier_interval = 255 * HZ; 1941 br->multicast_membership_interval = 260 * HZ; 1942 1943 br->ip4_other_query.delay_time = 0; 1944 br->ip4_querier.port = NULL; 1945 br->multicast_igmp_version = 2; 1946 #if IS_ENABLED(CONFIG_IPV6) 1947 br->multicast_mld_version = 1; 1948 br->ip6_other_query.delay_time = 0; 1949 br->ip6_querier.port = NULL; 1950 #endif 1951 br->has_ipv6_addr = 1; 1952 1953 spin_lock_init(&br->multicast_lock); 1954 setup_timer(&br->multicast_router_timer, 1955 br_multicast_local_router_expired, 0); 1956 setup_timer(&br->ip4_other_query.timer, 1957 br_ip4_multicast_querier_expired, (unsigned long)br); 1958 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1959 (unsigned long)br); 1960 #if IS_ENABLED(CONFIG_IPV6) 1961 setup_timer(&br->ip6_other_query.timer, 1962 br_ip6_multicast_querier_expired, (unsigned long)br); 1963 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1964 (unsigned long)br); 1965 #endif 1966 } 1967 1968 static void __br_multicast_open(struct net_bridge *br, 1969 struct bridge_mcast_own_query *query) 1970 { 1971 query->startup_sent = 0; 1972 1973 if (br->multicast_disabled) 1974 return; 1975 1976 mod_timer(&query->timer, jiffies); 1977 } 1978 1979 void br_multicast_open(struct net_bridge *br) 1980 { 1981 __br_multicast_open(br, &br->ip4_own_query); 1982 #if IS_ENABLED(CONFIG_IPV6) 1983 __br_multicast_open(br, &br->ip6_own_query); 1984 #endif 1985 } 1986 1987 void br_multicast_stop(struct net_bridge *br) 1988 { 1989 del_timer_sync(&br->multicast_router_timer); 1990 del_timer_sync(&br->ip4_other_query.timer); 1991 del_timer_sync(&br->ip4_own_query.timer); 1992 #if IS_ENABLED(CONFIG_IPV6) 1993 del_timer_sync(&br->ip6_other_query.timer); 1994 del_timer_sync(&br->ip6_own_query.timer); 1995 #endif 1996 } 1997 1998 void br_multicast_dev_del(struct net_bridge *br) 1999 { 2000 struct net_bridge_mdb_htable *mdb; 2001 struct net_bridge_mdb_entry *mp; 2002 struct hlist_node *n; 2003 u32 ver; 2004 int i; 2005 2006 spin_lock_bh(&br->multicast_lock); 2007 mdb = mlock_dereference(br->mdb, br); 2008 if (!mdb) 2009 goto out; 2010 2011 br->mdb = NULL; 2012 2013 ver = mdb->ver; 2014 for (i = 0; i < mdb->max; i++) { 2015 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 2016 hlist[ver]) { 2017 del_timer(&mp->timer); 2018 call_rcu_bh(&mp->rcu, br_multicast_free_group); 2019 } 2020 } 2021 2022 if (mdb->old) { 2023 spin_unlock_bh(&br->multicast_lock); 2024 rcu_barrier_bh(); 2025 spin_lock_bh(&br->multicast_lock); 2026 WARN_ON(mdb->old); 2027 } 2028 2029 mdb->old = mdb; 2030 call_rcu_bh(&mdb->rcu, br_mdb_free); 2031 2032 out: 2033 spin_unlock_bh(&br->multicast_lock); 2034 } 2035 2036 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2037 { 2038 int err = -EINVAL; 2039 2040 spin_lock_bh(&br->multicast_lock); 2041 2042 switch (val) { 2043 case MDB_RTR_TYPE_DISABLED: 2044 case MDB_RTR_TYPE_PERM: 2045 del_timer(&br->multicast_router_timer); 2046 /* fall through */ 2047 case MDB_RTR_TYPE_TEMP_QUERY: 2048 br->multicast_router = val; 2049 err = 0; 2050 break; 2051 } 2052 2053 spin_unlock_bh(&br->multicast_lock); 2054 2055 return err; 2056 } 2057 2058 static void __del_port_router(struct net_bridge_port *p) 2059 { 2060 if (hlist_unhashed(&p->rlist)) 2061 return; 2062 hlist_del_init_rcu(&p->rlist); 2063 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 2064 br_port_mc_router_state_change(p, false); 2065 2066 /* don't allow timer refresh */ 2067 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2068 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2069 } 2070 2071 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 2072 { 2073 struct net_bridge *br = p->br; 2074 unsigned long now = jiffies; 2075 int err = -EINVAL; 2076 2077 spin_lock(&br->multicast_lock); 2078 if (p->multicast_router == val) { 2079 /* Refresh the temp router port timer */ 2080 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2081 mod_timer(&p->multicast_router_timer, 2082 now + br->multicast_querier_interval); 2083 err = 0; 2084 goto unlock; 2085 } 2086 switch (val) { 2087 case MDB_RTR_TYPE_DISABLED: 2088 p->multicast_router = MDB_RTR_TYPE_DISABLED; 2089 __del_port_router(p); 2090 del_timer(&p->multicast_router_timer); 2091 break; 2092 case MDB_RTR_TYPE_TEMP_QUERY: 2093 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2094 __del_port_router(p); 2095 break; 2096 case MDB_RTR_TYPE_PERM: 2097 p->multicast_router = MDB_RTR_TYPE_PERM; 2098 del_timer(&p->multicast_router_timer); 2099 br_multicast_add_router(br, p); 2100 break; 2101 case MDB_RTR_TYPE_TEMP: 2102 p->multicast_router = MDB_RTR_TYPE_TEMP; 2103 br_multicast_mark_router(br, p); 2104 break; 2105 default: 2106 goto unlock; 2107 } 2108 err = 0; 2109 unlock: 2110 spin_unlock(&br->multicast_lock); 2111 2112 return err; 2113 } 2114 2115 static void br_multicast_start_querier(struct net_bridge *br, 2116 struct bridge_mcast_own_query *query) 2117 { 2118 struct net_bridge_port *port; 2119 2120 __br_multicast_open(br, query); 2121 2122 list_for_each_entry(port, &br->port_list, list) { 2123 if (port->state == BR_STATE_DISABLED || 2124 port->state == BR_STATE_BLOCKING) 2125 continue; 2126 2127 if (query == &br->ip4_own_query) 2128 br_multicast_enable(&port->ip4_own_query); 2129 #if IS_ENABLED(CONFIG_IPV6) 2130 else 2131 br_multicast_enable(&port->ip6_own_query); 2132 #endif 2133 } 2134 } 2135 2136 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2137 { 2138 struct net_bridge_mdb_htable *mdb; 2139 struct net_bridge_port *port; 2140 int err = 0; 2141 2142 spin_lock_bh(&br->multicast_lock); 2143 if (br->multicast_disabled == !val) 2144 goto unlock; 2145 2146 br_mc_disabled_update(br->dev, !val); 2147 br->multicast_disabled = !val; 2148 if (br->multicast_disabled) 2149 goto unlock; 2150 2151 if (!netif_running(br->dev)) 2152 goto unlock; 2153 2154 mdb = mlock_dereference(br->mdb, br); 2155 if (mdb) { 2156 if (mdb->old) { 2157 err = -EEXIST; 2158 rollback: 2159 br->multicast_disabled = !!val; 2160 goto unlock; 2161 } 2162 2163 err = br_mdb_rehash(&br->mdb, mdb->max, 2164 br->hash_elasticity); 2165 if (err) 2166 goto rollback; 2167 } 2168 2169 br_multicast_open(br); 2170 list_for_each_entry(port, &br->port_list, list) 2171 __br_multicast_enable_port(port); 2172 2173 unlock: 2174 spin_unlock_bh(&br->multicast_lock); 2175 2176 return err; 2177 } 2178 2179 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2180 { 2181 unsigned long max_delay; 2182 2183 val = !!val; 2184 2185 spin_lock_bh(&br->multicast_lock); 2186 if (br->multicast_querier == val) 2187 goto unlock; 2188 2189 br->multicast_querier = val; 2190 if (!val) 2191 goto unlock; 2192 2193 max_delay = br->multicast_query_response_interval; 2194 2195 if (!timer_pending(&br->ip4_other_query.timer)) 2196 br->ip4_other_query.delay_time = jiffies + max_delay; 2197 2198 br_multicast_start_querier(br, &br->ip4_own_query); 2199 2200 #if IS_ENABLED(CONFIG_IPV6) 2201 if (!timer_pending(&br->ip6_other_query.timer)) 2202 br->ip6_other_query.delay_time = jiffies + max_delay; 2203 2204 br_multicast_start_querier(br, &br->ip6_own_query); 2205 #endif 2206 2207 unlock: 2208 spin_unlock_bh(&br->multicast_lock); 2209 2210 return 0; 2211 } 2212 2213 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2214 { 2215 int err = -EINVAL; 2216 u32 old; 2217 struct net_bridge_mdb_htable *mdb; 2218 2219 spin_lock_bh(&br->multicast_lock); 2220 if (!is_power_of_2(val)) 2221 goto unlock; 2222 2223 mdb = mlock_dereference(br->mdb, br); 2224 if (mdb && val < mdb->size) 2225 goto unlock; 2226 2227 err = 0; 2228 2229 old = br->hash_max; 2230 br->hash_max = val; 2231 2232 if (mdb) { 2233 if (mdb->old) { 2234 err = -EEXIST; 2235 rollback: 2236 br->hash_max = old; 2237 goto unlock; 2238 } 2239 2240 err = br_mdb_rehash(&br->mdb, br->hash_max, 2241 br->hash_elasticity); 2242 if (err) 2243 goto rollback; 2244 } 2245 2246 unlock: 2247 spin_unlock_bh(&br->multicast_lock); 2248 2249 return err; 2250 } 2251 2252 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 2253 { 2254 /* Currently we support only version 2 and 3 */ 2255 switch (val) { 2256 case 2: 2257 case 3: 2258 break; 2259 default: 2260 return -EINVAL; 2261 } 2262 2263 spin_lock_bh(&br->multicast_lock); 2264 br->multicast_igmp_version = val; 2265 spin_unlock_bh(&br->multicast_lock); 2266 2267 return 0; 2268 } 2269 2270 #if IS_ENABLED(CONFIG_IPV6) 2271 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 2272 { 2273 /* Currently we support version 1 and 2 */ 2274 switch (val) { 2275 case 1: 2276 case 2: 2277 break; 2278 default: 2279 return -EINVAL; 2280 } 2281 2282 spin_lock_bh(&br->multicast_lock); 2283 br->multicast_mld_version = val; 2284 spin_unlock_bh(&br->multicast_lock); 2285 2286 return 0; 2287 } 2288 #endif 2289 2290 /** 2291 * br_multicast_list_adjacent - Returns snooped multicast addresses 2292 * @dev: The bridge port adjacent to which to retrieve addresses 2293 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2294 * 2295 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2296 * snooping feature on all bridge ports of dev's bridge device, excluding 2297 * the addresses from dev itself. 2298 * 2299 * Returns the number of items added to br_ip_list. 2300 * 2301 * Notes: 2302 * - br_ip_list needs to be initialized by caller 2303 * - br_ip_list might contain duplicates in the end 2304 * (needs to be taken care of by caller) 2305 * - br_ip_list needs to be freed by caller 2306 */ 2307 int br_multicast_list_adjacent(struct net_device *dev, 2308 struct list_head *br_ip_list) 2309 { 2310 struct net_bridge *br; 2311 struct net_bridge_port *port; 2312 struct net_bridge_port_group *group; 2313 struct br_ip_list *entry; 2314 int count = 0; 2315 2316 rcu_read_lock(); 2317 if (!br_ip_list || !br_port_exists(dev)) 2318 goto unlock; 2319 2320 port = br_port_get_rcu(dev); 2321 if (!port || !port->br) 2322 goto unlock; 2323 2324 br = port->br; 2325 2326 list_for_each_entry_rcu(port, &br->port_list, list) { 2327 if (!port->dev || port->dev == dev) 2328 continue; 2329 2330 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2331 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2332 if (!entry) 2333 goto unlock; 2334 2335 entry->addr = group->addr; 2336 list_add(&entry->list, br_ip_list); 2337 count++; 2338 } 2339 } 2340 2341 unlock: 2342 rcu_read_unlock(); 2343 return count; 2344 } 2345 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2346 2347 /** 2348 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2349 * @dev: The bridge port providing the bridge on which to check for a querier 2350 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2351 * 2352 * Checks whether the given interface has a bridge on top and if so returns 2353 * true if a valid querier exists anywhere on the bridged link layer. 2354 * Otherwise returns false. 2355 */ 2356 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2357 { 2358 struct net_bridge *br; 2359 struct net_bridge_port *port; 2360 struct ethhdr eth; 2361 bool ret = false; 2362 2363 rcu_read_lock(); 2364 if (!br_port_exists(dev)) 2365 goto unlock; 2366 2367 port = br_port_get_rcu(dev); 2368 if (!port || !port->br) 2369 goto unlock; 2370 2371 br = port->br; 2372 2373 memset(ð, 0, sizeof(eth)); 2374 eth.h_proto = htons(proto); 2375 2376 ret = br_multicast_querier_exists(br, ð); 2377 2378 unlock: 2379 rcu_read_unlock(); 2380 return ret; 2381 } 2382 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2383 2384 /** 2385 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2386 * @dev: The bridge port adjacent to which to check for a querier 2387 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2388 * 2389 * Checks whether the given interface has a bridge on top and if so returns 2390 * true if a selected querier is behind one of the other ports of this 2391 * bridge. Otherwise returns false. 2392 */ 2393 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2394 { 2395 struct net_bridge *br; 2396 struct net_bridge_port *port; 2397 bool ret = false; 2398 2399 rcu_read_lock(); 2400 if (!br_port_exists(dev)) 2401 goto unlock; 2402 2403 port = br_port_get_rcu(dev); 2404 if (!port || !port->br) 2405 goto unlock; 2406 2407 br = port->br; 2408 2409 switch (proto) { 2410 case ETH_P_IP: 2411 if (!timer_pending(&br->ip4_other_query.timer) || 2412 rcu_dereference(br->ip4_querier.port) == port) 2413 goto unlock; 2414 break; 2415 #if IS_ENABLED(CONFIG_IPV6) 2416 case ETH_P_IPV6: 2417 if (!timer_pending(&br->ip6_other_query.timer) || 2418 rcu_dereference(br->ip6_querier.port) == port) 2419 goto unlock; 2420 break; 2421 #endif 2422 default: 2423 goto unlock; 2424 } 2425 2426 ret = true; 2427 unlock: 2428 rcu_read_unlock(); 2429 return ret; 2430 } 2431 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2432 2433 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 2434 const struct sk_buff *skb, u8 type, u8 dir) 2435 { 2436 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 2437 __be16 proto = skb->protocol; 2438 unsigned int t_len; 2439 2440 u64_stats_update_begin(&pstats->syncp); 2441 switch (proto) { 2442 case htons(ETH_P_IP): 2443 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 2444 switch (type) { 2445 case IGMP_HOST_MEMBERSHIP_REPORT: 2446 pstats->mstats.igmp_v1reports[dir]++; 2447 break; 2448 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2449 pstats->mstats.igmp_v2reports[dir]++; 2450 break; 2451 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2452 pstats->mstats.igmp_v3reports[dir]++; 2453 break; 2454 case IGMP_HOST_MEMBERSHIP_QUERY: 2455 if (t_len != sizeof(struct igmphdr)) { 2456 pstats->mstats.igmp_v3queries[dir]++; 2457 } else { 2458 unsigned int offset = skb_transport_offset(skb); 2459 struct igmphdr *ih, _ihdr; 2460 2461 ih = skb_header_pointer(skb, offset, 2462 sizeof(_ihdr), &_ihdr); 2463 if (!ih) 2464 break; 2465 if (!ih->code) 2466 pstats->mstats.igmp_v1queries[dir]++; 2467 else 2468 pstats->mstats.igmp_v2queries[dir]++; 2469 } 2470 break; 2471 case IGMP_HOST_LEAVE_MESSAGE: 2472 pstats->mstats.igmp_leaves[dir]++; 2473 break; 2474 } 2475 break; 2476 #if IS_ENABLED(CONFIG_IPV6) 2477 case htons(ETH_P_IPV6): 2478 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 2479 sizeof(struct ipv6hdr); 2480 t_len -= skb_network_header_len(skb); 2481 switch (type) { 2482 case ICMPV6_MGM_REPORT: 2483 pstats->mstats.mld_v1reports[dir]++; 2484 break; 2485 case ICMPV6_MLD2_REPORT: 2486 pstats->mstats.mld_v2reports[dir]++; 2487 break; 2488 case ICMPV6_MGM_QUERY: 2489 if (t_len != sizeof(struct mld_msg)) 2490 pstats->mstats.mld_v2queries[dir]++; 2491 else 2492 pstats->mstats.mld_v1queries[dir]++; 2493 break; 2494 case ICMPV6_MGM_REDUCTION: 2495 pstats->mstats.mld_leaves[dir]++; 2496 break; 2497 } 2498 break; 2499 #endif /* CONFIG_IPV6 */ 2500 } 2501 u64_stats_update_end(&pstats->syncp); 2502 } 2503 2504 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 2505 const struct sk_buff *skb, u8 type, u8 dir) 2506 { 2507 struct bridge_mcast_stats __percpu *stats; 2508 2509 /* if multicast_disabled is true then igmp type can't be set */ 2510 if (!type || !br->multicast_stats_enabled) 2511 return; 2512 2513 if (p) 2514 stats = p->mcast_stats; 2515 else 2516 stats = br->mcast_stats; 2517 if (WARN_ON(!stats)) 2518 return; 2519 2520 br_mcast_stats_add(stats, skb, type, dir); 2521 } 2522 2523 int br_multicast_init_stats(struct net_bridge *br) 2524 { 2525 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2526 if (!br->mcast_stats) 2527 return -ENOMEM; 2528 2529 return 0; 2530 } 2531 2532 void br_multicast_uninit_stats(struct net_bridge *br) 2533 { 2534 free_percpu(br->mcast_stats); 2535 } 2536 2537 static void mcast_stats_add_dir(u64 *dst, u64 *src) 2538 { 2539 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2540 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 2541 } 2542 2543 void br_multicast_get_stats(const struct net_bridge *br, 2544 const struct net_bridge_port *p, 2545 struct br_mcast_stats *dest) 2546 { 2547 struct bridge_mcast_stats __percpu *stats; 2548 struct br_mcast_stats tdst; 2549 int i; 2550 2551 memset(dest, 0, sizeof(*dest)); 2552 if (p) 2553 stats = p->mcast_stats; 2554 else 2555 stats = br->mcast_stats; 2556 if (WARN_ON(!stats)) 2557 return; 2558 2559 memset(&tdst, 0, sizeof(tdst)); 2560 for_each_possible_cpu(i) { 2561 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 2562 struct br_mcast_stats temp; 2563 unsigned int start; 2564 2565 do { 2566 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2567 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 2568 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2569 2570 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 2571 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 2572 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 2573 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 2574 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 2575 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 2576 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 2577 tdst.igmp_parse_errors += temp.igmp_parse_errors; 2578 2579 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 2580 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 2581 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 2582 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 2583 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 2584 tdst.mld_parse_errors += temp.mld_parse_errors; 2585 } 2586 memcpy(dest, &tdst, sizeof(*dest)); 2587 } 2588