1 /* 2 * Bridge multicast support. 3 * 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/if_ether.h> 16 #include <linux/igmp.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/netdevice.h> 21 #include <linux/netfilter_bridge.h> 22 #include <linux/random.h> 23 #include <linux/rculist.h> 24 #include <linux/skbuff.h> 25 #include <linux/slab.h> 26 #include <linux/timer.h> 27 #include <linux/inetdevice.h> 28 #include <linux/mroute.h> 29 #include <net/ip.h> 30 #include <net/switchdev.h> 31 #if IS_ENABLED(CONFIG_IPV6) 32 #include <net/ipv6.h> 33 #include <net/mld.h> 34 #include <net/ip6_checksum.h> 35 #include <net/addrconf.h> 36 #endif 37 38 #include "br_private.h" 39 40 static void br_multicast_start_querier(struct net_bridge *br, 41 struct bridge_mcast_own_query *query); 42 static void br_multicast_add_router(struct net_bridge *br, 43 struct net_bridge_port *port); 44 static void br_ip4_multicast_leave_group(struct net_bridge *br, 45 struct net_bridge_port *port, 46 __be32 group, 47 __u16 vid, 48 const unsigned char *src); 49 50 static void __del_port_router(struct net_bridge_port *p); 51 #if IS_ENABLED(CONFIG_IPV6) 52 static void br_ip6_multicast_leave_group(struct net_bridge *br, 53 struct net_bridge_port *port, 54 const struct in6_addr *group, 55 __u16 vid, const unsigned char *src); 56 #endif 57 unsigned int br_mdb_rehash_seq; 58 59 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 60 { 61 if (a->proto != b->proto) 62 return 0; 63 if (a->vid != b->vid) 64 return 0; 65 switch (a->proto) { 66 case htons(ETH_P_IP): 67 return a->u.ip4 == b->u.ip4; 68 #if IS_ENABLED(CONFIG_IPV6) 69 case htons(ETH_P_IPV6): 70 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 71 #endif 72 } 73 return 0; 74 } 75 76 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 77 __u16 vid) 78 { 79 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 80 } 81 82 #if IS_ENABLED(CONFIG_IPV6) 83 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 84 const struct in6_addr *ip, 85 __u16 vid) 86 { 87 return jhash_2words(ipv6_addr_hash(ip), vid, 88 mdb->secret) & (mdb->max - 1); 89 } 90 #endif 91 92 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 93 struct br_ip *ip) 94 { 95 switch (ip->proto) { 96 case htons(ETH_P_IP): 97 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 98 #if IS_ENABLED(CONFIG_IPV6) 99 case htons(ETH_P_IPV6): 100 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 101 #endif 102 } 103 return 0; 104 } 105 106 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 107 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 108 { 109 struct net_bridge_mdb_entry *mp; 110 111 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 112 if (br_ip_equal(&mp->addr, dst)) 113 return mp; 114 } 115 116 return NULL; 117 } 118 119 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 120 struct br_ip *dst) 121 { 122 if (!mdb) 123 return NULL; 124 125 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 126 } 127 128 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 129 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 130 { 131 struct br_ip br_dst; 132 133 br_dst.u.ip4 = dst; 134 br_dst.proto = htons(ETH_P_IP); 135 br_dst.vid = vid; 136 137 return br_mdb_ip_get(mdb, &br_dst); 138 } 139 140 #if IS_ENABLED(CONFIG_IPV6) 141 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 142 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 143 __u16 vid) 144 { 145 struct br_ip br_dst; 146 147 br_dst.u.ip6 = *dst; 148 br_dst.proto = htons(ETH_P_IPV6); 149 br_dst.vid = vid; 150 151 return br_mdb_ip_get(mdb, &br_dst); 152 } 153 #endif 154 155 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 156 struct sk_buff *skb, u16 vid) 157 { 158 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 159 struct br_ip ip; 160 161 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 162 return NULL; 163 164 if (BR_INPUT_SKB_CB(skb)->igmp) 165 return NULL; 166 167 ip.proto = skb->protocol; 168 ip.vid = vid; 169 170 switch (skb->protocol) { 171 case htons(ETH_P_IP): 172 ip.u.ip4 = ip_hdr(skb)->daddr; 173 break; 174 #if IS_ENABLED(CONFIG_IPV6) 175 case htons(ETH_P_IPV6): 176 ip.u.ip6 = ipv6_hdr(skb)->daddr; 177 break; 178 #endif 179 default: 180 return NULL; 181 } 182 183 return br_mdb_ip_get(mdb, &ip); 184 } 185 186 static void br_mdb_free(struct rcu_head *head) 187 { 188 struct net_bridge_mdb_htable *mdb = 189 container_of(head, struct net_bridge_mdb_htable, rcu); 190 struct net_bridge_mdb_htable *old = mdb->old; 191 192 mdb->old = NULL; 193 kfree(old->mhash); 194 kfree(old); 195 } 196 197 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 198 struct net_bridge_mdb_htable *old, 199 int elasticity) 200 { 201 struct net_bridge_mdb_entry *mp; 202 int maxlen; 203 int len; 204 int i; 205 206 for (i = 0; i < old->max; i++) 207 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 208 hlist_add_head(&mp->hlist[new->ver], 209 &new->mhash[br_ip_hash(new, &mp->addr)]); 210 211 if (!elasticity) 212 return 0; 213 214 maxlen = 0; 215 for (i = 0; i < new->max; i++) { 216 len = 0; 217 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 218 len++; 219 if (len > maxlen) 220 maxlen = len; 221 } 222 223 return maxlen > elasticity ? -EINVAL : 0; 224 } 225 226 void br_multicast_free_pg(struct rcu_head *head) 227 { 228 struct net_bridge_port_group *p = 229 container_of(head, struct net_bridge_port_group, rcu); 230 231 kfree(p); 232 } 233 234 static void br_multicast_free_group(struct rcu_head *head) 235 { 236 struct net_bridge_mdb_entry *mp = 237 container_of(head, struct net_bridge_mdb_entry, rcu); 238 239 kfree(mp); 240 } 241 242 static void br_multicast_group_expired(struct timer_list *t) 243 { 244 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 245 struct net_bridge *br = mp->br; 246 struct net_bridge_mdb_htable *mdb; 247 248 spin_lock(&br->multicast_lock); 249 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 250 goto out; 251 252 mp->host_joined = false; 253 br_mdb_notify(br->dev, NULL, &mp->addr, RTM_DELMDB, 0); 254 255 if (mp->ports) 256 goto out; 257 258 mdb = mlock_dereference(br->mdb, br); 259 260 hlist_del_rcu(&mp->hlist[mdb->ver]); 261 mdb->size--; 262 263 call_rcu_bh(&mp->rcu, br_multicast_free_group); 264 265 out: 266 spin_unlock(&br->multicast_lock); 267 } 268 269 static void br_multicast_del_pg(struct net_bridge *br, 270 struct net_bridge_port_group *pg) 271 { 272 struct net_bridge_mdb_htable *mdb; 273 struct net_bridge_mdb_entry *mp; 274 struct net_bridge_port_group *p; 275 struct net_bridge_port_group __rcu **pp; 276 277 mdb = mlock_dereference(br->mdb, br); 278 279 mp = br_mdb_ip_get(mdb, &pg->addr); 280 if (WARN_ON(!mp)) 281 return; 282 283 for (pp = &mp->ports; 284 (p = mlock_dereference(*pp, br)) != NULL; 285 pp = &p->next) { 286 if (p != pg) 287 continue; 288 289 rcu_assign_pointer(*pp, p->next); 290 hlist_del_init(&p->mglist); 291 del_timer(&p->timer); 292 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 293 p->flags); 294 call_rcu_bh(&p->rcu, br_multicast_free_pg); 295 296 if (!mp->ports && !mp->host_joined && 297 netif_running(br->dev)) 298 mod_timer(&mp->timer, jiffies); 299 300 return; 301 } 302 303 WARN_ON(1); 304 } 305 306 static void br_multicast_port_group_expired(struct timer_list *t) 307 { 308 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 309 struct net_bridge *br = pg->port->br; 310 311 spin_lock(&br->multicast_lock); 312 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 313 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 314 goto out; 315 316 br_multicast_del_pg(br, pg); 317 318 out: 319 spin_unlock(&br->multicast_lock); 320 } 321 322 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 323 int elasticity) 324 { 325 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 326 struct net_bridge_mdb_htable *mdb; 327 int err; 328 329 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 330 if (!mdb) 331 return -ENOMEM; 332 333 mdb->max = max; 334 mdb->old = old; 335 336 mdb->mhash = kcalloc(max, sizeof(*mdb->mhash), GFP_ATOMIC); 337 if (!mdb->mhash) { 338 kfree(mdb); 339 return -ENOMEM; 340 } 341 342 mdb->size = old ? old->size : 0; 343 mdb->ver = old ? old->ver ^ 1 : 0; 344 345 if (!old || elasticity) 346 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 347 else 348 mdb->secret = old->secret; 349 350 if (!old) 351 goto out; 352 353 err = br_mdb_copy(mdb, old, elasticity); 354 if (err) { 355 kfree(mdb->mhash); 356 kfree(mdb); 357 return err; 358 } 359 360 br_mdb_rehash_seq++; 361 call_rcu_bh(&mdb->rcu, br_mdb_free); 362 363 out: 364 rcu_assign_pointer(*mdbp, mdb); 365 366 return 0; 367 } 368 369 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 370 __be32 group, 371 u8 *igmp_type) 372 { 373 struct igmpv3_query *ihv3; 374 size_t igmp_hdr_size; 375 struct sk_buff *skb; 376 struct igmphdr *ih; 377 struct ethhdr *eth; 378 struct iphdr *iph; 379 380 igmp_hdr_size = sizeof(*ih); 381 if (br->multicast_igmp_version == 3) 382 igmp_hdr_size = sizeof(*ihv3); 383 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 384 igmp_hdr_size + 4); 385 if (!skb) 386 goto out; 387 388 skb->protocol = htons(ETH_P_IP); 389 390 skb_reset_mac_header(skb); 391 eth = eth_hdr(skb); 392 393 ether_addr_copy(eth->h_source, br->dev->dev_addr); 394 eth->h_dest[0] = 1; 395 eth->h_dest[1] = 0; 396 eth->h_dest[2] = 0x5e; 397 eth->h_dest[3] = 0; 398 eth->h_dest[4] = 0; 399 eth->h_dest[5] = 1; 400 eth->h_proto = htons(ETH_P_IP); 401 skb_put(skb, sizeof(*eth)); 402 403 skb_set_network_header(skb, skb->len); 404 iph = ip_hdr(skb); 405 406 iph->version = 4; 407 iph->ihl = 6; 408 iph->tos = 0xc0; 409 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4); 410 iph->id = 0; 411 iph->frag_off = htons(IP_DF); 412 iph->ttl = 1; 413 iph->protocol = IPPROTO_IGMP; 414 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 415 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 416 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 417 ((u8 *)&iph[1])[0] = IPOPT_RA; 418 ((u8 *)&iph[1])[1] = 4; 419 ((u8 *)&iph[1])[2] = 0; 420 ((u8 *)&iph[1])[3] = 0; 421 ip_send_check(iph); 422 skb_put(skb, 24); 423 424 skb_set_transport_header(skb, skb->len); 425 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 426 427 switch (br->multicast_igmp_version) { 428 case 2: 429 ih = igmp_hdr(skb); 430 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 431 ih->code = (group ? br->multicast_last_member_interval : 432 br->multicast_query_response_interval) / 433 (HZ / IGMP_TIMER_SCALE); 434 ih->group = group; 435 ih->csum = 0; 436 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih)); 437 break; 438 case 3: 439 ihv3 = igmpv3_query_hdr(skb); 440 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 441 ihv3->code = (group ? br->multicast_last_member_interval : 442 br->multicast_query_response_interval) / 443 (HZ / IGMP_TIMER_SCALE); 444 ihv3->group = group; 445 ihv3->qqic = br->multicast_query_interval / HZ; 446 ihv3->nsrcs = 0; 447 ihv3->resv = 0; 448 ihv3->suppress = 0; 449 ihv3->qrv = 2; 450 ihv3->csum = 0; 451 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3)); 452 break; 453 } 454 455 skb_put(skb, igmp_hdr_size); 456 __skb_pull(skb, sizeof(*eth)); 457 458 out: 459 return skb; 460 } 461 462 #if IS_ENABLED(CONFIG_IPV6) 463 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 464 const struct in6_addr *grp, 465 u8 *igmp_type) 466 { 467 struct mld2_query *mld2q; 468 unsigned long interval; 469 struct ipv6hdr *ip6h; 470 struct mld_msg *mldq; 471 size_t mld_hdr_size; 472 struct sk_buff *skb; 473 struct ethhdr *eth; 474 u8 *hopopt; 475 476 mld_hdr_size = sizeof(*mldq); 477 if (br->multicast_mld_version == 2) 478 mld_hdr_size = sizeof(*mld2q); 479 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 480 8 + mld_hdr_size); 481 if (!skb) 482 goto out; 483 484 skb->protocol = htons(ETH_P_IPV6); 485 486 /* Ethernet header */ 487 skb_reset_mac_header(skb); 488 eth = eth_hdr(skb); 489 490 ether_addr_copy(eth->h_source, br->dev->dev_addr); 491 eth->h_proto = htons(ETH_P_IPV6); 492 skb_put(skb, sizeof(*eth)); 493 494 /* IPv6 header + HbH option */ 495 skb_set_network_header(skb, skb->len); 496 ip6h = ipv6_hdr(skb); 497 498 *(__force __be32 *)ip6h = htonl(0x60000000); 499 ip6h->payload_len = htons(8 + mld_hdr_size); 500 ip6h->nexthdr = IPPROTO_HOPOPTS; 501 ip6h->hop_limit = 1; 502 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 503 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 504 &ip6h->saddr)) { 505 kfree_skb(skb); 506 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false); 507 return NULL; 508 } 509 510 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 511 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 512 513 hopopt = (u8 *)(ip6h + 1); 514 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 515 hopopt[1] = 0; /* length of HbH */ 516 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 517 hopopt[3] = 2; /* Length of RA Option */ 518 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 519 hopopt[5] = 0; 520 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 521 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 522 523 skb_put(skb, sizeof(*ip6h) + 8); 524 525 /* ICMPv6 */ 526 skb_set_transport_header(skb, skb->len); 527 interval = ipv6_addr_any(grp) ? 528 br->multicast_query_response_interval : 529 br->multicast_last_member_interval; 530 *igmp_type = ICMPV6_MGM_QUERY; 531 switch (br->multicast_mld_version) { 532 case 1: 533 mldq = (struct mld_msg *)icmp6_hdr(skb); 534 mldq->mld_type = ICMPV6_MGM_QUERY; 535 mldq->mld_code = 0; 536 mldq->mld_cksum = 0; 537 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 538 mldq->mld_reserved = 0; 539 mldq->mld_mca = *grp; 540 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 541 sizeof(*mldq), IPPROTO_ICMPV6, 542 csum_partial(mldq, 543 sizeof(*mldq), 544 0)); 545 break; 546 case 2: 547 mld2q = (struct mld2_query *)icmp6_hdr(skb); 548 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 549 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 550 mld2q->mld2q_code = 0; 551 mld2q->mld2q_cksum = 0; 552 mld2q->mld2q_resv1 = 0; 553 mld2q->mld2q_resv2 = 0; 554 mld2q->mld2q_suppress = 0; 555 mld2q->mld2q_qrv = 2; 556 mld2q->mld2q_nsrcs = 0; 557 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 558 mld2q->mld2q_mca = *grp; 559 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 560 sizeof(*mld2q), 561 IPPROTO_ICMPV6, 562 csum_partial(mld2q, 563 sizeof(*mld2q), 564 0)); 565 break; 566 } 567 skb_put(skb, mld_hdr_size); 568 569 __skb_pull(skb, sizeof(*eth)); 570 571 out: 572 return skb; 573 } 574 #endif 575 576 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 577 struct br_ip *addr, 578 u8 *igmp_type) 579 { 580 switch (addr->proto) { 581 case htons(ETH_P_IP): 582 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type); 583 #if IS_ENABLED(CONFIG_IPV6) 584 case htons(ETH_P_IPV6): 585 return br_ip6_multicast_alloc_query(br, &addr->u.ip6, 586 igmp_type); 587 #endif 588 } 589 return NULL; 590 } 591 592 static struct net_bridge_mdb_entry *br_multicast_get_group( 593 struct net_bridge *br, struct net_bridge_port *port, 594 struct br_ip *group, int hash) 595 { 596 struct net_bridge_mdb_htable *mdb; 597 struct net_bridge_mdb_entry *mp; 598 unsigned int count = 0; 599 unsigned int max; 600 int elasticity; 601 int err; 602 603 mdb = rcu_dereference_protected(br->mdb, 1); 604 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 605 count++; 606 if (unlikely(br_ip_equal(group, &mp->addr))) 607 return mp; 608 } 609 610 elasticity = 0; 611 max = mdb->max; 612 613 if (unlikely(count > br->hash_elasticity && count)) { 614 if (net_ratelimit()) 615 br_info(br, "Multicast hash table " 616 "chain limit reached: %s\n", 617 port ? port->dev->name : br->dev->name); 618 619 elasticity = br->hash_elasticity; 620 } 621 622 if (mdb->size >= max) { 623 max *= 2; 624 if (unlikely(max > br->hash_max)) { 625 br_warn(br, "Multicast hash table maximum of %d " 626 "reached, disabling snooping: %s\n", 627 br->hash_max, 628 port ? port->dev->name : br->dev->name); 629 err = -E2BIG; 630 disable: 631 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 632 goto err; 633 } 634 } 635 636 if (max > mdb->max || elasticity) { 637 if (mdb->old) { 638 if (net_ratelimit()) 639 br_info(br, "Multicast hash table " 640 "on fire: %s\n", 641 port ? port->dev->name : br->dev->name); 642 err = -EEXIST; 643 goto err; 644 } 645 646 err = br_mdb_rehash(&br->mdb, max, elasticity); 647 if (err) { 648 br_warn(br, "Cannot rehash multicast " 649 "hash table, disabling snooping: %s, %d, %d\n", 650 port ? port->dev->name : br->dev->name, 651 mdb->size, err); 652 goto disable; 653 } 654 655 err = -EAGAIN; 656 goto err; 657 } 658 659 return NULL; 660 661 err: 662 mp = ERR_PTR(err); 663 return mp; 664 } 665 666 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 667 struct net_bridge_port *p, 668 struct br_ip *group) 669 { 670 struct net_bridge_mdb_htable *mdb; 671 struct net_bridge_mdb_entry *mp; 672 int hash; 673 int err; 674 675 mdb = rcu_dereference_protected(br->mdb, 1); 676 if (!mdb) { 677 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 678 if (err) 679 return ERR_PTR(err); 680 goto rehash; 681 } 682 683 hash = br_ip_hash(mdb, group); 684 mp = br_multicast_get_group(br, p, group, hash); 685 switch (PTR_ERR(mp)) { 686 case 0: 687 break; 688 689 case -EAGAIN: 690 rehash: 691 mdb = rcu_dereference_protected(br->mdb, 1); 692 hash = br_ip_hash(mdb, group); 693 break; 694 695 default: 696 goto out; 697 } 698 699 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 700 if (unlikely(!mp)) 701 return ERR_PTR(-ENOMEM); 702 703 mp->br = br; 704 mp->addr = *group; 705 timer_setup(&mp->timer, br_multicast_group_expired, 0); 706 707 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 708 mdb->size++; 709 710 out: 711 return mp; 712 } 713 714 struct net_bridge_port_group *br_multicast_new_port_group( 715 struct net_bridge_port *port, 716 struct br_ip *group, 717 struct net_bridge_port_group __rcu *next, 718 unsigned char flags, 719 const unsigned char *src) 720 { 721 struct net_bridge_port_group *p; 722 723 p = kzalloc(sizeof(*p), GFP_ATOMIC); 724 if (unlikely(!p)) 725 return NULL; 726 727 p->addr = *group; 728 p->port = port; 729 p->flags = flags; 730 rcu_assign_pointer(p->next, next); 731 hlist_add_head(&p->mglist, &port->mglist); 732 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 733 734 if (src) 735 memcpy(p->eth_addr, src, ETH_ALEN); 736 else 737 memset(p->eth_addr, 0xff, ETH_ALEN); 738 739 return p; 740 } 741 742 static bool br_port_group_equal(struct net_bridge_port_group *p, 743 struct net_bridge_port *port, 744 const unsigned char *src) 745 { 746 if (p->port != port) 747 return false; 748 749 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 750 return true; 751 752 return ether_addr_equal(src, p->eth_addr); 753 } 754 755 static int br_multicast_add_group(struct net_bridge *br, 756 struct net_bridge_port *port, 757 struct br_ip *group, 758 const unsigned char *src) 759 { 760 struct net_bridge_port_group __rcu **pp; 761 struct net_bridge_port_group *p; 762 struct net_bridge_mdb_entry *mp; 763 unsigned long now = jiffies; 764 int err; 765 766 spin_lock(&br->multicast_lock); 767 if (!netif_running(br->dev) || 768 (port && port->state == BR_STATE_DISABLED)) 769 goto out; 770 771 mp = br_multicast_new_group(br, port, group); 772 err = PTR_ERR(mp); 773 if (IS_ERR(mp)) 774 goto err; 775 776 if (!port) { 777 if (!mp->host_joined) { 778 mp->host_joined = true; 779 br_mdb_notify(br->dev, NULL, &mp->addr, RTM_NEWMDB, 0); 780 } 781 mod_timer(&mp->timer, now + br->multicast_membership_interval); 782 goto out; 783 } 784 785 for (pp = &mp->ports; 786 (p = mlock_dereference(*pp, br)) != NULL; 787 pp = &p->next) { 788 if (br_port_group_equal(p, port, src)) 789 goto found; 790 if ((unsigned long)p->port < (unsigned long)port) 791 break; 792 } 793 794 p = br_multicast_new_port_group(port, group, *pp, 0, src); 795 if (unlikely(!p)) 796 goto err; 797 rcu_assign_pointer(*pp, p); 798 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 799 800 found: 801 mod_timer(&p->timer, now + br->multicast_membership_interval); 802 out: 803 err = 0; 804 805 err: 806 spin_unlock(&br->multicast_lock); 807 return err; 808 } 809 810 static int br_ip4_multicast_add_group(struct net_bridge *br, 811 struct net_bridge_port *port, 812 __be32 group, 813 __u16 vid, 814 const unsigned char *src) 815 { 816 struct br_ip br_group; 817 818 if (ipv4_is_local_multicast(group)) 819 return 0; 820 821 br_group.u.ip4 = group; 822 br_group.proto = htons(ETH_P_IP); 823 br_group.vid = vid; 824 825 return br_multicast_add_group(br, port, &br_group, src); 826 } 827 828 #if IS_ENABLED(CONFIG_IPV6) 829 static int br_ip6_multicast_add_group(struct net_bridge *br, 830 struct net_bridge_port *port, 831 const struct in6_addr *group, 832 __u16 vid, 833 const unsigned char *src) 834 { 835 struct br_ip br_group; 836 837 if (ipv6_addr_is_ll_all_nodes(group)) 838 return 0; 839 840 br_group.u.ip6 = *group; 841 br_group.proto = htons(ETH_P_IPV6); 842 br_group.vid = vid; 843 844 return br_multicast_add_group(br, port, &br_group, src); 845 } 846 #endif 847 848 static void br_multicast_router_expired(struct timer_list *t) 849 { 850 struct net_bridge_port *port = 851 from_timer(port, t, multicast_router_timer); 852 struct net_bridge *br = port->br; 853 854 spin_lock(&br->multicast_lock); 855 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 856 port->multicast_router == MDB_RTR_TYPE_PERM || 857 timer_pending(&port->multicast_router_timer)) 858 goto out; 859 860 __del_port_router(port); 861 out: 862 spin_unlock(&br->multicast_lock); 863 } 864 865 static void br_mc_router_state_change(struct net_bridge *p, 866 bool is_mc_router) 867 { 868 struct switchdev_attr attr = { 869 .orig_dev = p->dev, 870 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 871 .flags = SWITCHDEV_F_DEFER, 872 .u.mrouter = is_mc_router, 873 }; 874 875 switchdev_port_attr_set(p->dev, &attr); 876 } 877 878 static void br_multicast_local_router_expired(struct timer_list *t) 879 { 880 struct net_bridge *br = from_timer(br, t, multicast_router_timer); 881 882 spin_lock(&br->multicast_lock); 883 if (br->multicast_router == MDB_RTR_TYPE_DISABLED || 884 br->multicast_router == MDB_RTR_TYPE_PERM || 885 timer_pending(&br->multicast_router_timer)) 886 goto out; 887 888 br_mc_router_state_change(br, false); 889 out: 890 spin_unlock(&br->multicast_lock); 891 } 892 893 static void br_multicast_querier_expired(struct net_bridge *br, 894 struct bridge_mcast_own_query *query) 895 { 896 spin_lock(&br->multicast_lock); 897 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 898 goto out; 899 900 br_multicast_start_querier(br, query); 901 902 out: 903 spin_unlock(&br->multicast_lock); 904 } 905 906 static void br_ip4_multicast_querier_expired(struct timer_list *t) 907 { 908 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer); 909 910 br_multicast_querier_expired(br, &br->ip4_own_query); 911 } 912 913 #if IS_ENABLED(CONFIG_IPV6) 914 static void br_ip6_multicast_querier_expired(struct timer_list *t) 915 { 916 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer); 917 918 br_multicast_querier_expired(br, &br->ip6_own_query); 919 } 920 #endif 921 922 static void br_multicast_select_own_querier(struct net_bridge *br, 923 struct br_ip *ip, 924 struct sk_buff *skb) 925 { 926 if (ip->proto == htons(ETH_P_IP)) 927 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 928 #if IS_ENABLED(CONFIG_IPV6) 929 else 930 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 931 #endif 932 } 933 934 static void __br_multicast_send_query(struct net_bridge *br, 935 struct net_bridge_port *port, 936 struct br_ip *ip) 937 { 938 struct sk_buff *skb; 939 u8 igmp_type; 940 941 skb = br_multicast_alloc_query(br, ip, &igmp_type); 942 if (!skb) 943 return; 944 945 if (port) { 946 skb->dev = port->dev; 947 br_multicast_count(br, port, skb, igmp_type, 948 BR_MCAST_DIR_TX); 949 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 950 dev_net(port->dev), NULL, skb, NULL, skb->dev, 951 br_dev_queue_push_xmit); 952 } else { 953 br_multicast_select_own_querier(br, ip, skb); 954 br_multicast_count(br, port, skb, igmp_type, 955 BR_MCAST_DIR_RX); 956 netif_rx(skb); 957 } 958 } 959 960 static void br_multicast_send_query(struct net_bridge *br, 961 struct net_bridge_port *port, 962 struct bridge_mcast_own_query *own_query) 963 { 964 struct bridge_mcast_other_query *other_query = NULL; 965 struct br_ip br_group; 966 unsigned long time; 967 968 if (!netif_running(br->dev) || 969 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 970 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 971 return; 972 973 memset(&br_group.u, 0, sizeof(br_group.u)); 974 975 if (port ? (own_query == &port->ip4_own_query) : 976 (own_query == &br->ip4_own_query)) { 977 other_query = &br->ip4_other_query; 978 br_group.proto = htons(ETH_P_IP); 979 #if IS_ENABLED(CONFIG_IPV6) 980 } else { 981 other_query = &br->ip6_other_query; 982 br_group.proto = htons(ETH_P_IPV6); 983 #endif 984 } 985 986 if (!other_query || timer_pending(&other_query->timer)) 987 return; 988 989 __br_multicast_send_query(br, port, &br_group); 990 991 time = jiffies; 992 time += own_query->startup_sent < br->multicast_startup_query_count ? 993 br->multicast_startup_query_interval : 994 br->multicast_query_interval; 995 mod_timer(&own_query->timer, time); 996 } 997 998 static void 999 br_multicast_port_query_expired(struct net_bridge_port *port, 1000 struct bridge_mcast_own_query *query) 1001 { 1002 struct net_bridge *br = port->br; 1003 1004 spin_lock(&br->multicast_lock); 1005 if (port->state == BR_STATE_DISABLED || 1006 port->state == BR_STATE_BLOCKING) 1007 goto out; 1008 1009 if (query->startup_sent < br->multicast_startup_query_count) 1010 query->startup_sent++; 1011 1012 br_multicast_send_query(port->br, port, query); 1013 1014 out: 1015 spin_unlock(&br->multicast_lock); 1016 } 1017 1018 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1019 { 1020 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer); 1021 1022 br_multicast_port_query_expired(port, &port->ip4_own_query); 1023 } 1024 1025 #if IS_ENABLED(CONFIG_IPV6) 1026 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1027 { 1028 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer); 1029 1030 br_multicast_port_query_expired(port, &port->ip6_own_query); 1031 } 1032 #endif 1033 1034 static void br_mc_disabled_update(struct net_device *dev, bool value) 1035 { 1036 struct switchdev_attr attr = { 1037 .orig_dev = dev, 1038 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1039 .flags = SWITCHDEV_F_DEFER, 1040 .u.mc_disabled = !value, 1041 }; 1042 1043 switchdev_port_attr_set(dev, &attr); 1044 } 1045 1046 int br_multicast_add_port(struct net_bridge_port *port) 1047 { 1048 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1049 1050 timer_setup(&port->multicast_router_timer, 1051 br_multicast_router_expired, 0); 1052 timer_setup(&port->ip4_own_query.timer, 1053 br_ip4_multicast_port_query_expired, 0); 1054 #if IS_ENABLED(CONFIG_IPV6) 1055 timer_setup(&port->ip6_own_query.timer, 1056 br_ip6_multicast_port_query_expired, 0); 1057 #endif 1058 br_mc_disabled_update(port->dev, 1059 br_opt_get(port->br, BROPT_MULTICAST_ENABLED)); 1060 1061 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1062 if (!port->mcast_stats) 1063 return -ENOMEM; 1064 1065 return 0; 1066 } 1067 1068 void br_multicast_del_port(struct net_bridge_port *port) 1069 { 1070 struct net_bridge *br = port->br; 1071 struct net_bridge_port_group *pg; 1072 struct hlist_node *n; 1073 1074 /* Take care of the remaining groups, only perm ones should be left */ 1075 spin_lock_bh(&br->multicast_lock); 1076 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1077 br_multicast_del_pg(br, pg); 1078 spin_unlock_bh(&br->multicast_lock); 1079 del_timer_sync(&port->multicast_router_timer); 1080 free_percpu(port->mcast_stats); 1081 } 1082 1083 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1084 { 1085 query->startup_sent = 0; 1086 1087 if (try_to_del_timer_sync(&query->timer) >= 0 || 1088 del_timer(&query->timer)) 1089 mod_timer(&query->timer, jiffies); 1090 } 1091 1092 static void __br_multicast_enable_port(struct net_bridge_port *port) 1093 { 1094 struct net_bridge *br = port->br; 1095 1096 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev)) 1097 return; 1098 1099 br_multicast_enable(&port->ip4_own_query); 1100 #if IS_ENABLED(CONFIG_IPV6) 1101 br_multicast_enable(&port->ip6_own_query); 1102 #endif 1103 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1104 hlist_unhashed(&port->rlist)) 1105 br_multicast_add_router(br, port); 1106 } 1107 1108 void br_multicast_enable_port(struct net_bridge_port *port) 1109 { 1110 struct net_bridge *br = port->br; 1111 1112 spin_lock(&br->multicast_lock); 1113 __br_multicast_enable_port(port); 1114 spin_unlock(&br->multicast_lock); 1115 } 1116 1117 void br_multicast_disable_port(struct net_bridge_port *port) 1118 { 1119 struct net_bridge *br = port->br; 1120 struct net_bridge_port_group *pg; 1121 struct hlist_node *n; 1122 1123 spin_lock(&br->multicast_lock); 1124 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1125 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1126 br_multicast_del_pg(br, pg); 1127 1128 __del_port_router(port); 1129 1130 del_timer(&port->multicast_router_timer); 1131 del_timer(&port->ip4_own_query.timer); 1132 #if IS_ENABLED(CONFIG_IPV6) 1133 del_timer(&port->ip6_own_query.timer); 1134 #endif 1135 spin_unlock(&br->multicast_lock); 1136 } 1137 1138 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1139 struct net_bridge_port *port, 1140 struct sk_buff *skb, 1141 u16 vid) 1142 { 1143 const unsigned char *src; 1144 struct igmpv3_report *ih; 1145 struct igmpv3_grec *grec; 1146 int i; 1147 int len; 1148 int num; 1149 int type; 1150 int err = 0; 1151 __be32 group; 1152 1153 ih = igmpv3_report_hdr(skb); 1154 num = ntohs(ih->ngrec); 1155 len = skb_transport_offset(skb) + sizeof(*ih); 1156 1157 for (i = 0; i < num; i++) { 1158 len += sizeof(*grec); 1159 if (!pskb_may_pull(skb, len)) 1160 return -EINVAL; 1161 1162 grec = (void *)(skb->data + len - sizeof(*grec)); 1163 group = grec->grec_mca; 1164 type = grec->grec_type; 1165 1166 len += ntohs(grec->grec_nsrcs) * 4; 1167 if (!pskb_may_pull(skb, len)) 1168 return -EINVAL; 1169 1170 /* We treat this as an IGMPv2 report for now. */ 1171 switch (type) { 1172 case IGMPV3_MODE_IS_INCLUDE: 1173 case IGMPV3_MODE_IS_EXCLUDE: 1174 case IGMPV3_CHANGE_TO_INCLUDE: 1175 case IGMPV3_CHANGE_TO_EXCLUDE: 1176 case IGMPV3_ALLOW_NEW_SOURCES: 1177 case IGMPV3_BLOCK_OLD_SOURCES: 1178 break; 1179 1180 default: 1181 continue; 1182 } 1183 1184 src = eth_hdr(skb)->h_source; 1185 if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1186 type == IGMPV3_MODE_IS_INCLUDE) && 1187 ntohs(grec->grec_nsrcs) == 0) { 1188 br_ip4_multicast_leave_group(br, port, group, vid, src); 1189 } else { 1190 err = br_ip4_multicast_add_group(br, port, group, vid, 1191 src); 1192 if (err) 1193 break; 1194 } 1195 } 1196 1197 return err; 1198 } 1199 1200 #if IS_ENABLED(CONFIG_IPV6) 1201 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1202 struct net_bridge_port *port, 1203 struct sk_buff *skb, 1204 u16 vid) 1205 { 1206 const unsigned char *src; 1207 struct icmp6hdr *icmp6h; 1208 struct mld2_grec *grec; 1209 int i; 1210 int len; 1211 int num; 1212 int err = 0; 1213 1214 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1215 return -EINVAL; 1216 1217 icmp6h = icmp6_hdr(skb); 1218 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1219 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1220 1221 for (i = 0; i < num; i++) { 1222 __be16 *nsrcs, _nsrcs; 1223 1224 nsrcs = skb_header_pointer(skb, 1225 len + offsetof(struct mld2_grec, 1226 grec_nsrcs), 1227 sizeof(_nsrcs), &_nsrcs); 1228 if (!nsrcs) 1229 return -EINVAL; 1230 1231 if (!pskb_may_pull(skb, 1232 len + sizeof(*grec) + 1233 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1234 return -EINVAL; 1235 1236 grec = (struct mld2_grec *)(skb->data + len); 1237 len += sizeof(*grec) + 1238 sizeof(struct in6_addr) * ntohs(*nsrcs); 1239 1240 /* We treat these as MLDv1 reports for now. */ 1241 switch (grec->grec_type) { 1242 case MLD2_MODE_IS_INCLUDE: 1243 case MLD2_MODE_IS_EXCLUDE: 1244 case MLD2_CHANGE_TO_INCLUDE: 1245 case MLD2_CHANGE_TO_EXCLUDE: 1246 case MLD2_ALLOW_NEW_SOURCES: 1247 case MLD2_BLOCK_OLD_SOURCES: 1248 break; 1249 1250 default: 1251 continue; 1252 } 1253 1254 src = eth_hdr(skb)->h_source; 1255 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1256 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1257 ntohs(*nsrcs) == 0) { 1258 br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1259 vid, src); 1260 } else { 1261 err = br_ip6_multicast_add_group(br, port, 1262 &grec->grec_mca, vid, 1263 src); 1264 if (err) 1265 break; 1266 } 1267 } 1268 1269 return err; 1270 } 1271 #endif 1272 1273 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1274 struct net_bridge_port *port, 1275 __be32 saddr) 1276 { 1277 if (!timer_pending(&br->ip4_own_query.timer) && 1278 !timer_pending(&br->ip4_other_query.timer)) 1279 goto update; 1280 1281 if (!br->ip4_querier.addr.u.ip4) 1282 goto update; 1283 1284 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1285 goto update; 1286 1287 return false; 1288 1289 update: 1290 br->ip4_querier.addr.u.ip4 = saddr; 1291 1292 /* update protected by general multicast_lock by caller */ 1293 rcu_assign_pointer(br->ip4_querier.port, port); 1294 1295 return true; 1296 } 1297 1298 #if IS_ENABLED(CONFIG_IPV6) 1299 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1300 struct net_bridge_port *port, 1301 struct in6_addr *saddr) 1302 { 1303 if (!timer_pending(&br->ip6_own_query.timer) && 1304 !timer_pending(&br->ip6_other_query.timer)) 1305 goto update; 1306 1307 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1308 goto update; 1309 1310 return false; 1311 1312 update: 1313 br->ip6_querier.addr.u.ip6 = *saddr; 1314 1315 /* update protected by general multicast_lock by caller */ 1316 rcu_assign_pointer(br->ip6_querier.port, port); 1317 1318 return true; 1319 } 1320 #endif 1321 1322 static bool br_multicast_select_querier(struct net_bridge *br, 1323 struct net_bridge_port *port, 1324 struct br_ip *saddr) 1325 { 1326 switch (saddr->proto) { 1327 case htons(ETH_P_IP): 1328 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1329 #if IS_ENABLED(CONFIG_IPV6) 1330 case htons(ETH_P_IPV6): 1331 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1332 #endif 1333 } 1334 1335 return false; 1336 } 1337 1338 static void 1339 br_multicast_update_query_timer(struct net_bridge *br, 1340 struct bridge_mcast_other_query *query, 1341 unsigned long max_delay) 1342 { 1343 if (!timer_pending(&query->timer)) 1344 query->delay_time = jiffies + max_delay; 1345 1346 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1347 } 1348 1349 static void br_port_mc_router_state_change(struct net_bridge_port *p, 1350 bool is_mc_router) 1351 { 1352 struct switchdev_attr attr = { 1353 .orig_dev = p->dev, 1354 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 1355 .flags = SWITCHDEV_F_DEFER, 1356 .u.mrouter = is_mc_router, 1357 }; 1358 1359 switchdev_port_attr_set(p->dev, &attr); 1360 } 1361 1362 /* 1363 * Add port to router_list 1364 * list is maintained ordered by pointer value 1365 * and locked by br->multicast_lock and RCU 1366 */ 1367 static void br_multicast_add_router(struct net_bridge *br, 1368 struct net_bridge_port *port) 1369 { 1370 struct net_bridge_port *p; 1371 struct hlist_node *slot = NULL; 1372 1373 if (!hlist_unhashed(&port->rlist)) 1374 return; 1375 1376 hlist_for_each_entry(p, &br->router_list, rlist) { 1377 if ((unsigned long) port >= (unsigned long) p) 1378 break; 1379 slot = &p->rlist; 1380 } 1381 1382 if (slot) 1383 hlist_add_behind_rcu(&port->rlist, slot); 1384 else 1385 hlist_add_head_rcu(&port->rlist, &br->router_list); 1386 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1387 br_port_mc_router_state_change(port, true); 1388 } 1389 1390 static void br_multicast_mark_router(struct net_bridge *br, 1391 struct net_bridge_port *port) 1392 { 1393 unsigned long now = jiffies; 1394 1395 if (!port) { 1396 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 1397 if (!timer_pending(&br->multicast_router_timer)) 1398 br_mc_router_state_change(br, true); 1399 mod_timer(&br->multicast_router_timer, 1400 now + br->multicast_querier_interval); 1401 } 1402 return; 1403 } 1404 1405 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1406 port->multicast_router == MDB_RTR_TYPE_PERM) 1407 return; 1408 1409 br_multicast_add_router(br, port); 1410 1411 mod_timer(&port->multicast_router_timer, 1412 now + br->multicast_querier_interval); 1413 } 1414 1415 static void br_multicast_query_received(struct net_bridge *br, 1416 struct net_bridge_port *port, 1417 struct bridge_mcast_other_query *query, 1418 struct br_ip *saddr, 1419 unsigned long max_delay) 1420 { 1421 if (!br_multicast_select_querier(br, port, saddr)) 1422 return; 1423 1424 br_multicast_update_query_timer(br, query, max_delay); 1425 1426 /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules, 1427 * the arrival port for IGMP Queries where the source address 1428 * is 0.0.0.0 should not be added to router port list. 1429 */ 1430 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) || 1431 saddr->proto == htons(ETH_P_IPV6)) 1432 br_multicast_mark_router(br, port); 1433 } 1434 1435 static void br_ip4_multicast_query(struct net_bridge *br, 1436 struct net_bridge_port *port, 1437 struct sk_buff *skb, 1438 u16 vid) 1439 { 1440 const struct iphdr *iph = ip_hdr(skb); 1441 struct igmphdr *ih = igmp_hdr(skb); 1442 struct net_bridge_mdb_entry *mp; 1443 struct igmpv3_query *ih3; 1444 struct net_bridge_port_group *p; 1445 struct net_bridge_port_group __rcu **pp; 1446 struct br_ip saddr; 1447 unsigned long max_delay; 1448 unsigned long now = jiffies; 1449 unsigned int offset = skb_transport_offset(skb); 1450 __be32 group; 1451 1452 spin_lock(&br->multicast_lock); 1453 if (!netif_running(br->dev) || 1454 (port && port->state == BR_STATE_DISABLED)) 1455 goto out; 1456 1457 group = ih->group; 1458 1459 if (skb->len == offset + sizeof(*ih)) { 1460 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1461 1462 if (!max_delay) { 1463 max_delay = 10 * HZ; 1464 group = 0; 1465 } 1466 } else if (skb->len >= offset + sizeof(*ih3)) { 1467 ih3 = igmpv3_query_hdr(skb); 1468 if (ih3->nsrcs) 1469 goto out; 1470 1471 max_delay = ih3->code ? 1472 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1473 } else { 1474 goto out; 1475 } 1476 1477 if (!group) { 1478 saddr.proto = htons(ETH_P_IP); 1479 saddr.u.ip4 = iph->saddr; 1480 1481 br_multicast_query_received(br, port, &br->ip4_other_query, 1482 &saddr, max_delay); 1483 goto out; 1484 } 1485 1486 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1487 if (!mp) 1488 goto out; 1489 1490 max_delay *= br->multicast_last_member_count; 1491 1492 if (mp->host_joined && 1493 (timer_pending(&mp->timer) ? 1494 time_after(mp->timer.expires, now + max_delay) : 1495 try_to_del_timer_sync(&mp->timer) >= 0)) 1496 mod_timer(&mp->timer, now + max_delay); 1497 1498 for (pp = &mp->ports; 1499 (p = mlock_dereference(*pp, br)) != NULL; 1500 pp = &p->next) { 1501 if (timer_pending(&p->timer) ? 1502 time_after(p->timer.expires, now + max_delay) : 1503 try_to_del_timer_sync(&p->timer) >= 0) 1504 mod_timer(&p->timer, now + max_delay); 1505 } 1506 1507 out: 1508 spin_unlock(&br->multicast_lock); 1509 } 1510 1511 #if IS_ENABLED(CONFIG_IPV6) 1512 static int br_ip6_multicast_query(struct net_bridge *br, 1513 struct net_bridge_port *port, 1514 struct sk_buff *skb, 1515 u16 vid) 1516 { 1517 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1518 struct mld_msg *mld; 1519 struct net_bridge_mdb_entry *mp; 1520 struct mld2_query *mld2q; 1521 struct net_bridge_port_group *p; 1522 struct net_bridge_port_group __rcu **pp; 1523 struct br_ip saddr; 1524 unsigned long max_delay; 1525 unsigned long now = jiffies; 1526 unsigned int offset = skb_transport_offset(skb); 1527 const struct in6_addr *group = NULL; 1528 bool is_general_query; 1529 int err = 0; 1530 1531 spin_lock(&br->multicast_lock); 1532 if (!netif_running(br->dev) || 1533 (port && port->state == BR_STATE_DISABLED)) 1534 goto out; 1535 1536 if (skb->len == offset + sizeof(*mld)) { 1537 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1538 err = -EINVAL; 1539 goto out; 1540 } 1541 mld = (struct mld_msg *) icmp6_hdr(skb); 1542 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1543 if (max_delay) 1544 group = &mld->mld_mca; 1545 } else { 1546 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1547 err = -EINVAL; 1548 goto out; 1549 } 1550 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1551 if (!mld2q->mld2q_nsrcs) 1552 group = &mld2q->mld2q_mca; 1553 1554 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1555 } 1556 1557 is_general_query = group && ipv6_addr_any(group); 1558 1559 if (is_general_query) { 1560 saddr.proto = htons(ETH_P_IPV6); 1561 saddr.u.ip6 = ip6h->saddr; 1562 1563 br_multicast_query_received(br, port, &br->ip6_other_query, 1564 &saddr, max_delay); 1565 goto out; 1566 } else if (!group) { 1567 goto out; 1568 } 1569 1570 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1571 if (!mp) 1572 goto out; 1573 1574 max_delay *= br->multicast_last_member_count; 1575 if (mp->host_joined && 1576 (timer_pending(&mp->timer) ? 1577 time_after(mp->timer.expires, now + max_delay) : 1578 try_to_del_timer_sync(&mp->timer) >= 0)) 1579 mod_timer(&mp->timer, now + max_delay); 1580 1581 for (pp = &mp->ports; 1582 (p = mlock_dereference(*pp, br)) != NULL; 1583 pp = &p->next) { 1584 if (timer_pending(&p->timer) ? 1585 time_after(p->timer.expires, now + max_delay) : 1586 try_to_del_timer_sync(&p->timer) >= 0) 1587 mod_timer(&p->timer, now + max_delay); 1588 } 1589 1590 out: 1591 spin_unlock(&br->multicast_lock); 1592 return err; 1593 } 1594 #endif 1595 1596 static void 1597 br_multicast_leave_group(struct net_bridge *br, 1598 struct net_bridge_port *port, 1599 struct br_ip *group, 1600 struct bridge_mcast_other_query *other_query, 1601 struct bridge_mcast_own_query *own_query, 1602 const unsigned char *src) 1603 { 1604 struct net_bridge_mdb_htable *mdb; 1605 struct net_bridge_mdb_entry *mp; 1606 struct net_bridge_port_group *p; 1607 unsigned long now; 1608 unsigned long time; 1609 1610 spin_lock(&br->multicast_lock); 1611 if (!netif_running(br->dev) || 1612 (port && port->state == BR_STATE_DISABLED)) 1613 goto out; 1614 1615 mdb = mlock_dereference(br->mdb, br); 1616 mp = br_mdb_ip_get(mdb, group); 1617 if (!mp) 1618 goto out; 1619 1620 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1621 struct net_bridge_port_group __rcu **pp; 1622 1623 for (pp = &mp->ports; 1624 (p = mlock_dereference(*pp, br)) != NULL; 1625 pp = &p->next) { 1626 if (!br_port_group_equal(p, port, src)) 1627 continue; 1628 1629 rcu_assign_pointer(*pp, p->next); 1630 hlist_del_init(&p->mglist); 1631 del_timer(&p->timer); 1632 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1633 br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1634 p->flags); 1635 1636 if (!mp->ports && !mp->host_joined && 1637 netif_running(br->dev)) 1638 mod_timer(&mp->timer, jiffies); 1639 } 1640 goto out; 1641 } 1642 1643 if (timer_pending(&other_query->timer)) 1644 goto out; 1645 1646 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { 1647 __br_multicast_send_query(br, port, &mp->addr); 1648 1649 time = jiffies + br->multicast_last_member_count * 1650 br->multicast_last_member_interval; 1651 1652 mod_timer(&own_query->timer, time); 1653 1654 for (p = mlock_dereference(mp->ports, br); 1655 p != NULL; 1656 p = mlock_dereference(p->next, br)) { 1657 if (!br_port_group_equal(p, port, src)) 1658 continue; 1659 1660 if (!hlist_unhashed(&p->mglist) && 1661 (timer_pending(&p->timer) ? 1662 time_after(p->timer.expires, time) : 1663 try_to_del_timer_sync(&p->timer) >= 0)) { 1664 mod_timer(&p->timer, time); 1665 } 1666 1667 break; 1668 } 1669 } 1670 1671 now = jiffies; 1672 time = now + br->multicast_last_member_count * 1673 br->multicast_last_member_interval; 1674 1675 if (!port) { 1676 if (mp->host_joined && 1677 (timer_pending(&mp->timer) ? 1678 time_after(mp->timer.expires, time) : 1679 try_to_del_timer_sync(&mp->timer) >= 0)) { 1680 mod_timer(&mp->timer, time); 1681 } 1682 1683 goto out; 1684 } 1685 1686 for (p = mlock_dereference(mp->ports, br); 1687 p != NULL; 1688 p = mlock_dereference(p->next, br)) { 1689 if (p->port != port) 1690 continue; 1691 1692 if (!hlist_unhashed(&p->mglist) && 1693 (timer_pending(&p->timer) ? 1694 time_after(p->timer.expires, time) : 1695 try_to_del_timer_sync(&p->timer) >= 0)) { 1696 mod_timer(&p->timer, time); 1697 } 1698 1699 break; 1700 } 1701 out: 1702 spin_unlock(&br->multicast_lock); 1703 } 1704 1705 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1706 struct net_bridge_port *port, 1707 __be32 group, 1708 __u16 vid, 1709 const unsigned char *src) 1710 { 1711 struct br_ip br_group; 1712 struct bridge_mcast_own_query *own_query; 1713 1714 if (ipv4_is_local_multicast(group)) 1715 return; 1716 1717 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1718 1719 br_group.u.ip4 = group; 1720 br_group.proto = htons(ETH_P_IP); 1721 br_group.vid = vid; 1722 1723 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1724 own_query, src); 1725 } 1726 1727 #if IS_ENABLED(CONFIG_IPV6) 1728 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1729 struct net_bridge_port *port, 1730 const struct in6_addr *group, 1731 __u16 vid, 1732 const unsigned char *src) 1733 { 1734 struct br_ip br_group; 1735 struct bridge_mcast_own_query *own_query; 1736 1737 if (ipv6_addr_is_ll_all_nodes(group)) 1738 return; 1739 1740 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1741 1742 br_group.u.ip6 = *group; 1743 br_group.proto = htons(ETH_P_IPV6); 1744 br_group.vid = vid; 1745 1746 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1747 own_query, src); 1748 } 1749 #endif 1750 1751 static void br_multicast_err_count(const struct net_bridge *br, 1752 const struct net_bridge_port *p, 1753 __be16 proto) 1754 { 1755 struct bridge_mcast_stats __percpu *stats; 1756 struct bridge_mcast_stats *pstats; 1757 1758 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 1759 return; 1760 1761 if (p) 1762 stats = p->mcast_stats; 1763 else 1764 stats = br->mcast_stats; 1765 if (WARN_ON(!stats)) 1766 return; 1767 1768 pstats = this_cpu_ptr(stats); 1769 1770 u64_stats_update_begin(&pstats->syncp); 1771 switch (proto) { 1772 case htons(ETH_P_IP): 1773 pstats->mstats.igmp_parse_errors++; 1774 break; 1775 #if IS_ENABLED(CONFIG_IPV6) 1776 case htons(ETH_P_IPV6): 1777 pstats->mstats.mld_parse_errors++; 1778 break; 1779 #endif 1780 } 1781 u64_stats_update_end(&pstats->syncp); 1782 } 1783 1784 static void br_multicast_pim(struct net_bridge *br, 1785 struct net_bridge_port *port, 1786 const struct sk_buff *skb) 1787 { 1788 unsigned int offset = skb_transport_offset(skb); 1789 struct pimhdr *pimhdr, _pimhdr; 1790 1791 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 1792 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 1793 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 1794 return; 1795 1796 br_multicast_mark_router(br, port); 1797 } 1798 1799 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1800 struct net_bridge_port *port, 1801 struct sk_buff *skb, 1802 u16 vid) 1803 { 1804 struct sk_buff *skb_trimmed = NULL; 1805 const unsigned char *src; 1806 struct igmphdr *ih; 1807 int err; 1808 1809 err = ip_mc_check_igmp(skb, &skb_trimmed); 1810 1811 if (err == -ENOMSG) { 1812 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 1813 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1814 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 1815 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 1816 br_multicast_pim(br, port, skb); 1817 } 1818 return 0; 1819 } else if (err < 0) { 1820 br_multicast_err_count(br, port, skb->protocol); 1821 return err; 1822 } 1823 1824 ih = igmp_hdr(skb); 1825 src = eth_hdr(skb)->h_source; 1826 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 1827 1828 switch (ih->type) { 1829 case IGMP_HOST_MEMBERSHIP_REPORT: 1830 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1831 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1832 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src); 1833 break; 1834 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1835 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); 1836 break; 1837 case IGMP_HOST_MEMBERSHIP_QUERY: 1838 br_ip4_multicast_query(br, port, skb_trimmed, vid); 1839 break; 1840 case IGMP_HOST_LEAVE_MESSAGE: 1841 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 1842 break; 1843 } 1844 1845 if (skb_trimmed && skb_trimmed != skb) 1846 kfree_skb(skb_trimmed); 1847 1848 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1849 BR_MCAST_DIR_RX); 1850 1851 return err; 1852 } 1853 1854 #if IS_ENABLED(CONFIG_IPV6) 1855 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1856 struct net_bridge_port *port, 1857 struct sk_buff *skb, 1858 u16 vid) 1859 { 1860 struct sk_buff *skb_trimmed = NULL; 1861 const unsigned char *src; 1862 struct mld_msg *mld; 1863 int err; 1864 1865 err = ipv6_mc_check_mld(skb, &skb_trimmed); 1866 1867 if (err == -ENOMSG) { 1868 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 1869 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1870 return 0; 1871 } else if (err < 0) { 1872 br_multicast_err_count(br, port, skb->protocol); 1873 return err; 1874 } 1875 1876 mld = (struct mld_msg *)skb_transport_header(skb); 1877 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 1878 1879 switch (mld->mld_type) { 1880 case ICMPV6_MGM_REPORT: 1881 src = eth_hdr(skb)->h_source; 1882 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1883 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 1884 src); 1885 break; 1886 case ICMPV6_MLD2_REPORT: 1887 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid); 1888 break; 1889 case ICMPV6_MGM_QUERY: 1890 err = br_ip6_multicast_query(br, port, skb_trimmed, vid); 1891 break; 1892 case ICMPV6_MGM_REDUCTION: 1893 src = eth_hdr(skb)->h_source; 1894 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 1895 break; 1896 } 1897 1898 if (skb_trimmed && skb_trimmed != skb) 1899 kfree_skb(skb_trimmed); 1900 1901 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 1902 BR_MCAST_DIR_RX); 1903 1904 return err; 1905 } 1906 #endif 1907 1908 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1909 struct sk_buff *skb, u16 vid) 1910 { 1911 int ret = 0; 1912 1913 BR_INPUT_SKB_CB(skb)->igmp = 0; 1914 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1915 1916 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1917 return 0; 1918 1919 switch (skb->protocol) { 1920 case htons(ETH_P_IP): 1921 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 1922 break; 1923 #if IS_ENABLED(CONFIG_IPV6) 1924 case htons(ETH_P_IPV6): 1925 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 1926 break; 1927 #endif 1928 } 1929 1930 return ret; 1931 } 1932 1933 static void br_multicast_query_expired(struct net_bridge *br, 1934 struct bridge_mcast_own_query *query, 1935 struct bridge_mcast_querier *querier) 1936 { 1937 spin_lock(&br->multicast_lock); 1938 if (query->startup_sent < br->multicast_startup_query_count) 1939 query->startup_sent++; 1940 1941 RCU_INIT_POINTER(querier->port, NULL); 1942 br_multicast_send_query(br, NULL, query); 1943 spin_unlock(&br->multicast_lock); 1944 } 1945 1946 static void br_ip4_multicast_query_expired(struct timer_list *t) 1947 { 1948 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer); 1949 1950 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1951 } 1952 1953 #if IS_ENABLED(CONFIG_IPV6) 1954 static void br_ip6_multicast_query_expired(struct timer_list *t) 1955 { 1956 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer); 1957 1958 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1959 } 1960 #endif 1961 1962 void br_multicast_init(struct net_bridge *br) 1963 { 1964 br->hash_elasticity = 4; 1965 br->hash_max = 512; 1966 1967 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1968 br->multicast_last_member_count = 2; 1969 br->multicast_startup_query_count = 2; 1970 1971 br->multicast_last_member_interval = HZ; 1972 br->multicast_query_response_interval = 10 * HZ; 1973 br->multicast_startup_query_interval = 125 * HZ / 4; 1974 br->multicast_query_interval = 125 * HZ; 1975 br->multicast_querier_interval = 255 * HZ; 1976 br->multicast_membership_interval = 260 * HZ; 1977 1978 br->ip4_other_query.delay_time = 0; 1979 br->ip4_querier.port = NULL; 1980 br->multicast_igmp_version = 2; 1981 #if IS_ENABLED(CONFIG_IPV6) 1982 br->multicast_mld_version = 1; 1983 br->ip6_other_query.delay_time = 0; 1984 br->ip6_querier.port = NULL; 1985 #endif 1986 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 1987 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 1988 1989 spin_lock_init(&br->multicast_lock); 1990 timer_setup(&br->multicast_router_timer, 1991 br_multicast_local_router_expired, 0); 1992 timer_setup(&br->ip4_other_query.timer, 1993 br_ip4_multicast_querier_expired, 0); 1994 timer_setup(&br->ip4_own_query.timer, 1995 br_ip4_multicast_query_expired, 0); 1996 #if IS_ENABLED(CONFIG_IPV6) 1997 timer_setup(&br->ip6_other_query.timer, 1998 br_ip6_multicast_querier_expired, 0); 1999 timer_setup(&br->ip6_own_query.timer, 2000 br_ip6_multicast_query_expired, 0); 2001 #endif 2002 } 2003 2004 static void __br_multicast_open(struct net_bridge *br, 2005 struct bridge_mcast_own_query *query) 2006 { 2007 query->startup_sent = 0; 2008 2009 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2010 return; 2011 2012 mod_timer(&query->timer, jiffies); 2013 } 2014 2015 void br_multicast_open(struct net_bridge *br) 2016 { 2017 __br_multicast_open(br, &br->ip4_own_query); 2018 #if IS_ENABLED(CONFIG_IPV6) 2019 __br_multicast_open(br, &br->ip6_own_query); 2020 #endif 2021 } 2022 2023 void br_multicast_stop(struct net_bridge *br) 2024 { 2025 del_timer_sync(&br->multicast_router_timer); 2026 del_timer_sync(&br->ip4_other_query.timer); 2027 del_timer_sync(&br->ip4_own_query.timer); 2028 #if IS_ENABLED(CONFIG_IPV6) 2029 del_timer_sync(&br->ip6_other_query.timer); 2030 del_timer_sync(&br->ip6_own_query.timer); 2031 #endif 2032 } 2033 2034 void br_multicast_dev_del(struct net_bridge *br) 2035 { 2036 struct net_bridge_mdb_htable *mdb; 2037 struct net_bridge_mdb_entry *mp; 2038 struct hlist_node *n; 2039 u32 ver; 2040 int i; 2041 2042 spin_lock_bh(&br->multicast_lock); 2043 mdb = mlock_dereference(br->mdb, br); 2044 if (!mdb) 2045 goto out; 2046 2047 br->mdb = NULL; 2048 2049 ver = mdb->ver; 2050 for (i = 0; i < mdb->max; i++) { 2051 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 2052 hlist[ver]) { 2053 del_timer(&mp->timer); 2054 call_rcu_bh(&mp->rcu, br_multicast_free_group); 2055 } 2056 } 2057 2058 if (mdb->old) { 2059 spin_unlock_bh(&br->multicast_lock); 2060 rcu_barrier_bh(); 2061 spin_lock_bh(&br->multicast_lock); 2062 WARN_ON(mdb->old); 2063 } 2064 2065 mdb->old = mdb; 2066 call_rcu_bh(&mdb->rcu, br_mdb_free); 2067 2068 out: 2069 spin_unlock_bh(&br->multicast_lock); 2070 } 2071 2072 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2073 { 2074 int err = -EINVAL; 2075 2076 spin_lock_bh(&br->multicast_lock); 2077 2078 switch (val) { 2079 case MDB_RTR_TYPE_DISABLED: 2080 case MDB_RTR_TYPE_PERM: 2081 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 2082 del_timer(&br->multicast_router_timer); 2083 br->multicast_router = val; 2084 err = 0; 2085 break; 2086 case MDB_RTR_TYPE_TEMP_QUERY: 2087 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 2088 br_mc_router_state_change(br, false); 2089 br->multicast_router = val; 2090 err = 0; 2091 break; 2092 } 2093 2094 spin_unlock_bh(&br->multicast_lock); 2095 2096 return err; 2097 } 2098 2099 static void __del_port_router(struct net_bridge_port *p) 2100 { 2101 if (hlist_unhashed(&p->rlist)) 2102 return; 2103 hlist_del_init_rcu(&p->rlist); 2104 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 2105 br_port_mc_router_state_change(p, false); 2106 2107 /* don't allow timer refresh */ 2108 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2109 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2110 } 2111 2112 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 2113 { 2114 struct net_bridge *br = p->br; 2115 unsigned long now = jiffies; 2116 int err = -EINVAL; 2117 2118 spin_lock(&br->multicast_lock); 2119 if (p->multicast_router == val) { 2120 /* Refresh the temp router port timer */ 2121 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2122 mod_timer(&p->multicast_router_timer, 2123 now + br->multicast_querier_interval); 2124 err = 0; 2125 goto unlock; 2126 } 2127 switch (val) { 2128 case MDB_RTR_TYPE_DISABLED: 2129 p->multicast_router = MDB_RTR_TYPE_DISABLED; 2130 __del_port_router(p); 2131 del_timer(&p->multicast_router_timer); 2132 break; 2133 case MDB_RTR_TYPE_TEMP_QUERY: 2134 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2135 __del_port_router(p); 2136 break; 2137 case MDB_RTR_TYPE_PERM: 2138 p->multicast_router = MDB_RTR_TYPE_PERM; 2139 del_timer(&p->multicast_router_timer); 2140 br_multicast_add_router(br, p); 2141 break; 2142 case MDB_RTR_TYPE_TEMP: 2143 p->multicast_router = MDB_RTR_TYPE_TEMP; 2144 br_multicast_mark_router(br, p); 2145 break; 2146 default: 2147 goto unlock; 2148 } 2149 err = 0; 2150 unlock: 2151 spin_unlock(&br->multicast_lock); 2152 2153 return err; 2154 } 2155 2156 static void br_multicast_start_querier(struct net_bridge *br, 2157 struct bridge_mcast_own_query *query) 2158 { 2159 struct net_bridge_port *port; 2160 2161 __br_multicast_open(br, query); 2162 2163 list_for_each_entry(port, &br->port_list, list) { 2164 if (port->state == BR_STATE_DISABLED || 2165 port->state == BR_STATE_BLOCKING) 2166 continue; 2167 2168 if (query == &br->ip4_own_query) 2169 br_multicast_enable(&port->ip4_own_query); 2170 #if IS_ENABLED(CONFIG_IPV6) 2171 else 2172 br_multicast_enable(&port->ip6_own_query); 2173 #endif 2174 } 2175 } 2176 2177 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2178 { 2179 struct net_bridge_mdb_htable *mdb; 2180 struct net_bridge_port *port; 2181 int err = 0; 2182 2183 spin_lock_bh(&br->multicast_lock); 2184 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 2185 goto unlock; 2186 2187 br_mc_disabled_update(br->dev, val); 2188 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 2189 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2190 goto unlock; 2191 2192 if (!netif_running(br->dev)) 2193 goto unlock; 2194 2195 mdb = mlock_dereference(br->mdb, br); 2196 if (mdb) { 2197 if (mdb->old) { 2198 err = -EEXIST; 2199 rollback: 2200 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 2201 goto unlock; 2202 } 2203 2204 err = br_mdb_rehash(&br->mdb, mdb->max, 2205 br->hash_elasticity); 2206 if (err) 2207 goto rollback; 2208 } 2209 2210 br_multicast_open(br); 2211 list_for_each_entry(port, &br->port_list, list) 2212 __br_multicast_enable_port(port); 2213 2214 unlock: 2215 spin_unlock_bh(&br->multicast_lock); 2216 2217 return err; 2218 } 2219 2220 bool br_multicast_enabled(const struct net_device *dev) 2221 { 2222 struct net_bridge *br = netdev_priv(dev); 2223 2224 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 2225 } 2226 EXPORT_SYMBOL_GPL(br_multicast_enabled); 2227 2228 bool br_multicast_router(const struct net_device *dev) 2229 { 2230 struct net_bridge *br = netdev_priv(dev); 2231 bool is_router; 2232 2233 spin_lock_bh(&br->multicast_lock); 2234 is_router = br_multicast_is_router(br); 2235 spin_unlock_bh(&br->multicast_lock); 2236 return is_router; 2237 } 2238 EXPORT_SYMBOL_GPL(br_multicast_router); 2239 2240 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2241 { 2242 unsigned long max_delay; 2243 2244 val = !!val; 2245 2246 spin_lock_bh(&br->multicast_lock); 2247 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) 2248 goto unlock; 2249 2250 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); 2251 if (!val) 2252 goto unlock; 2253 2254 max_delay = br->multicast_query_response_interval; 2255 2256 if (!timer_pending(&br->ip4_other_query.timer)) 2257 br->ip4_other_query.delay_time = jiffies + max_delay; 2258 2259 br_multicast_start_querier(br, &br->ip4_own_query); 2260 2261 #if IS_ENABLED(CONFIG_IPV6) 2262 if (!timer_pending(&br->ip6_other_query.timer)) 2263 br->ip6_other_query.delay_time = jiffies + max_delay; 2264 2265 br_multicast_start_querier(br, &br->ip6_own_query); 2266 #endif 2267 2268 unlock: 2269 spin_unlock_bh(&br->multicast_lock); 2270 2271 return 0; 2272 } 2273 2274 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2275 { 2276 int err = -EINVAL; 2277 u32 old; 2278 struct net_bridge_mdb_htable *mdb; 2279 2280 spin_lock_bh(&br->multicast_lock); 2281 if (!is_power_of_2(val)) 2282 goto unlock; 2283 2284 mdb = mlock_dereference(br->mdb, br); 2285 if (mdb && val < mdb->size) 2286 goto unlock; 2287 2288 err = 0; 2289 2290 old = br->hash_max; 2291 br->hash_max = val; 2292 2293 if (mdb) { 2294 if (mdb->old) { 2295 err = -EEXIST; 2296 rollback: 2297 br->hash_max = old; 2298 goto unlock; 2299 } 2300 2301 err = br_mdb_rehash(&br->mdb, br->hash_max, 2302 br->hash_elasticity); 2303 if (err) 2304 goto rollback; 2305 } 2306 2307 unlock: 2308 spin_unlock_bh(&br->multicast_lock); 2309 2310 return err; 2311 } 2312 2313 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 2314 { 2315 /* Currently we support only version 2 and 3 */ 2316 switch (val) { 2317 case 2: 2318 case 3: 2319 break; 2320 default: 2321 return -EINVAL; 2322 } 2323 2324 spin_lock_bh(&br->multicast_lock); 2325 br->multicast_igmp_version = val; 2326 spin_unlock_bh(&br->multicast_lock); 2327 2328 return 0; 2329 } 2330 2331 #if IS_ENABLED(CONFIG_IPV6) 2332 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 2333 { 2334 /* Currently we support version 1 and 2 */ 2335 switch (val) { 2336 case 1: 2337 case 2: 2338 break; 2339 default: 2340 return -EINVAL; 2341 } 2342 2343 spin_lock_bh(&br->multicast_lock); 2344 br->multicast_mld_version = val; 2345 spin_unlock_bh(&br->multicast_lock); 2346 2347 return 0; 2348 } 2349 #endif 2350 2351 /** 2352 * br_multicast_list_adjacent - Returns snooped multicast addresses 2353 * @dev: The bridge port adjacent to which to retrieve addresses 2354 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2355 * 2356 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2357 * snooping feature on all bridge ports of dev's bridge device, excluding 2358 * the addresses from dev itself. 2359 * 2360 * Returns the number of items added to br_ip_list. 2361 * 2362 * Notes: 2363 * - br_ip_list needs to be initialized by caller 2364 * - br_ip_list might contain duplicates in the end 2365 * (needs to be taken care of by caller) 2366 * - br_ip_list needs to be freed by caller 2367 */ 2368 int br_multicast_list_adjacent(struct net_device *dev, 2369 struct list_head *br_ip_list) 2370 { 2371 struct net_bridge *br; 2372 struct net_bridge_port *port; 2373 struct net_bridge_port_group *group; 2374 struct br_ip_list *entry; 2375 int count = 0; 2376 2377 rcu_read_lock(); 2378 if (!br_ip_list || !br_port_exists(dev)) 2379 goto unlock; 2380 2381 port = br_port_get_rcu(dev); 2382 if (!port || !port->br) 2383 goto unlock; 2384 2385 br = port->br; 2386 2387 list_for_each_entry_rcu(port, &br->port_list, list) { 2388 if (!port->dev || port->dev == dev) 2389 continue; 2390 2391 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2392 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2393 if (!entry) 2394 goto unlock; 2395 2396 entry->addr = group->addr; 2397 list_add(&entry->list, br_ip_list); 2398 count++; 2399 } 2400 } 2401 2402 unlock: 2403 rcu_read_unlock(); 2404 return count; 2405 } 2406 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2407 2408 /** 2409 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2410 * @dev: The bridge port providing the bridge on which to check for a querier 2411 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2412 * 2413 * Checks whether the given interface has a bridge on top and if so returns 2414 * true if a valid querier exists anywhere on the bridged link layer. 2415 * Otherwise returns false. 2416 */ 2417 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2418 { 2419 struct net_bridge *br; 2420 struct net_bridge_port *port; 2421 struct ethhdr eth; 2422 bool ret = false; 2423 2424 rcu_read_lock(); 2425 if (!br_port_exists(dev)) 2426 goto unlock; 2427 2428 port = br_port_get_rcu(dev); 2429 if (!port || !port->br) 2430 goto unlock; 2431 2432 br = port->br; 2433 2434 memset(ð, 0, sizeof(eth)); 2435 eth.h_proto = htons(proto); 2436 2437 ret = br_multicast_querier_exists(br, ð); 2438 2439 unlock: 2440 rcu_read_unlock(); 2441 return ret; 2442 } 2443 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2444 2445 /** 2446 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2447 * @dev: The bridge port adjacent to which to check for a querier 2448 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2449 * 2450 * Checks whether the given interface has a bridge on top and if so returns 2451 * true if a selected querier is behind one of the other ports of this 2452 * bridge. Otherwise returns false. 2453 */ 2454 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2455 { 2456 struct net_bridge *br; 2457 struct net_bridge_port *port; 2458 bool ret = false; 2459 2460 rcu_read_lock(); 2461 if (!br_port_exists(dev)) 2462 goto unlock; 2463 2464 port = br_port_get_rcu(dev); 2465 if (!port || !port->br) 2466 goto unlock; 2467 2468 br = port->br; 2469 2470 switch (proto) { 2471 case ETH_P_IP: 2472 if (!timer_pending(&br->ip4_other_query.timer) || 2473 rcu_dereference(br->ip4_querier.port) == port) 2474 goto unlock; 2475 break; 2476 #if IS_ENABLED(CONFIG_IPV6) 2477 case ETH_P_IPV6: 2478 if (!timer_pending(&br->ip6_other_query.timer) || 2479 rcu_dereference(br->ip6_querier.port) == port) 2480 goto unlock; 2481 break; 2482 #endif 2483 default: 2484 goto unlock; 2485 } 2486 2487 ret = true; 2488 unlock: 2489 rcu_read_unlock(); 2490 return ret; 2491 } 2492 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2493 2494 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 2495 const struct sk_buff *skb, u8 type, u8 dir) 2496 { 2497 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 2498 __be16 proto = skb->protocol; 2499 unsigned int t_len; 2500 2501 u64_stats_update_begin(&pstats->syncp); 2502 switch (proto) { 2503 case htons(ETH_P_IP): 2504 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 2505 switch (type) { 2506 case IGMP_HOST_MEMBERSHIP_REPORT: 2507 pstats->mstats.igmp_v1reports[dir]++; 2508 break; 2509 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2510 pstats->mstats.igmp_v2reports[dir]++; 2511 break; 2512 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2513 pstats->mstats.igmp_v3reports[dir]++; 2514 break; 2515 case IGMP_HOST_MEMBERSHIP_QUERY: 2516 if (t_len != sizeof(struct igmphdr)) { 2517 pstats->mstats.igmp_v3queries[dir]++; 2518 } else { 2519 unsigned int offset = skb_transport_offset(skb); 2520 struct igmphdr *ih, _ihdr; 2521 2522 ih = skb_header_pointer(skb, offset, 2523 sizeof(_ihdr), &_ihdr); 2524 if (!ih) 2525 break; 2526 if (!ih->code) 2527 pstats->mstats.igmp_v1queries[dir]++; 2528 else 2529 pstats->mstats.igmp_v2queries[dir]++; 2530 } 2531 break; 2532 case IGMP_HOST_LEAVE_MESSAGE: 2533 pstats->mstats.igmp_leaves[dir]++; 2534 break; 2535 } 2536 break; 2537 #if IS_ENABLED(CONFIG_IPV6) 2538 case htons(ETH_P_IPV6): 2539 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 2540 sizeof(struct ipv6hdr); 2541 t_len -= skb_network_header_len(skb); 2542 switch (type) { 2543 case ICMPV6_MGM_REPORT: 2544 pstats->mstats.mld_v1reports[dir]++; 2545 break; 2546 case ICMPV6_MLD2_REPORT: 2547 pstats->mstats.mld_v2reports[dir]++; 2548 break; 2549 case ICMPV6_MGM_QUERY: 2550 if (t_len != sizeof(struct mld_msg)) 2551 pstats->mstats.mld_v2queries[dir]++; 2552 else 2553 pstats->mstats.mld_v1queries[dir]++; 2554 break; 2555 case ICMPV6_MGM_REDUCTION: 2556 pstats->mstats.mld_leaves[dir]++; 2557 break; 2558 } 2559 break; 2560 #endif /* CONFIG_IPV6 */ 2561 } 2562 u64_stats_update_end(&pstats->syncp); 2563 } 2564 2565 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 2566 const struct sk_buff *skb, u8 type, u8 dir) 2567 { 2568 struct bridge_mcast_stats __percpu *stats; 2569 2570 /* if multicast_disabled is true then igmp type can't be set */ 2571 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 2572 return; 2573 2574 if (p) 2575 stats = p->mcast_stats; 2576 else 2577 stats = br->mcast_stats; 2578 if (WARN_ON(!stats)) 2579 return; 2580 2581 br_mcast_stats_add(stats, skb, type, dir); 2582 } 2583 2584 int br_multicast_init_stats(struct net_bridge *br) 2585 { 2586 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2587 if (!br->mcast_stats) 2588 return -ENOMEM; 2589 2590 return 0; 2591 } 2592 2593 void br_multicast_uninit_stats(struct net_bridge *br) 2594 { 2595 free_percpu(br->mcast_stats); 2596 } 2597 2598 static void mcast_stats_add_dir(u64 *dst, u64 *src) 2599 { 2600 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2601 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 2602 } 2603 2604 void br_multicast_get_stats(const struct net_bridge *br, 2605 const struct net_bridge_port *p, 2606 struct br_mcast_stats *dest) 2607 { 2608 struct bridge_mcast_stats __percpu *stats; 2609 struct br_mcast_stats tdst; 2610 int i; 2611 2612 memset(dest, 0, sizeof(*dest)); 2613 if (p) 2614 stats = p->mcast_stats; 2615 else 2616 stats = br->mcast_stats; 2617 if (WARN_ON(!stats)) 2618 return; 2619 2620 memset(&tdst, 0, sizeof(tdst)); 2621 for_each_possible_cpu(i) { 2622 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 2623 struct br_mcast_stats temp; 2624 unsigned int start; 2625 2626 do { 2627 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2628 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 2629 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2630 2631 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 2632 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 2633 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 2634 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 2635 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 2636 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 2637 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 2638 tdst.igmp_parse_errors += temp.igmp_parse_errors; 2639 2640 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 2641 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 2642 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 2643 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 2644 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 2645 tdst.mld_parse_errors += temp.mld_parse_errors; 2646 } 2647 memcpy(dest, &tdst, sizeof(*dest)); 2648 } 2649