1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 37 static const struct rhashtable_params br_mdb_rht_params = { 38 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 39 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 40 .key_len = sizeof(struct br_ip), 41 .automatic_shrinking = true, 42 }; 43 44 static void br_multicast_start_querier(struct net_bridge *br, 45 struct bridge_mcast_own_query *query); 46 static void br_multicast_add_router(struct net_bridge *br, 47 struct net_bridge_port *port); 48 static void br_ip4_multicast_leave_group(struct net_bridge *br, 49 struct net_bridge_port *port, 50 __be32 group, 51 __u16 vid, 52 const unsigned char *src); 53 static void br_multicast_port_group_rexmit(struct timer_list *t); 54 55 static void __del_port_router(struct net_bridge_port *p); 56 #if IS_ENABLED(CONFIG_IPV6) 57 static void br_ip6_multicast_leave_group(struct net_bridge *br, 58 struct net_bridge_port *port, 59 const struct in6_addr *group, 60 __u16 vid, const unsigned char *src); 61 #endif 62 63 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 64 struct br_ip *dst) 65 { 66 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 67 } 68 69 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 70 struct br_ip *dst) 71 { 72 struct net_bridge_mdb_entry *ent; 73 74 lockdep_assert_held_once(&br->multicast_lock); 75 76 rcu_read_lock(); 77 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 78 rcu_read_unlock(); 79 80 return ent; 81 } 82 83 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 84 __be32 dst, __u16 vid) 85 { 86 struct br_ip br_dst; 87 88 memset(&br_dst, 0, sizeof(br_dst)); 89 br_dst.u.ip4 = dst; 90 br_dst.proto = htons(ETH_P_IP); 91 br_dst.vid = vid; 92 93 return br_mdb_ip_get(br, &br_dst); 94 } 95 96 #if IS_ENABLED(CONFIG_IPV6) 97 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 98 const struct in6_addr *dst, 99 __u16 vid) 100 { 101 struct br_ip br_dst; 102 103 memset(&br_dst, 0, sizeof(br_dst)); 104 br_dst.u.ip6 = *dst; 105 br_dst.proto = htons(ETH_P_IPV6); 106 br_dst.vid = vid; 107 108 return br_mdb_ip_get(br, &br_dst); 109 } 110 #endif 111 112 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 113 struct sk_buff *skb, u16 vid) 114 { 115 struct br_ip ip; 116 117 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 118 return NULL; 119 120 if (BR_INPUT_SKB_CB(skb)->igmp) 121 return NULL; 122 123 memset(&ip, 0, sizeof(ip)); 124 ip.proto = skb->protocol; 125 ip.vid = vid; 126 127 switch (skb->protocol) { 128 case htons(ETH_P_IP): 129 ip.u.ip4 = ip_hdr(skb)->daddr; 130 break; 131 #if IS_ENABLED(CONFIG_IPV6) 132 case htons(ETH_P_IPV6): 133 ip.u.ip6 = ipv6_hdr(skb)->daddr; 134 break; 135 #endif 136 default: 137 return NULL; 138 } 139 140 return br_mdb_ip_get_rcu(br, &ip); 141 } 142 143 static void br_multicast_group_expired(struct timer_list *t) 144 { 145 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 146 struct net_bridge *br = mp->br; 147 148 spin_lock(&br->multicast_lock); 149 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 150 goto out; 151 152 br_multicast_host_leave(mp, true); 153 154 if (mp->ports) 155 goto out; 156 157 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 158 br_mdb_rht_params); 159 hlist_del_rcu(&mp->mdb_node); 160 161 kfree_rcu(mp, rcu); 162 163 out: 164 spin_unlock(&br->multicast_lock); 165 } 166 167 static void br_multicast_del_group_src(struct net_bridge_group_src *src) 168 { 169 struct net_bridge *br = src->pg->port->br; 170 171 hlist_del_init_rcu(&src->node); 172 src->pg->src_ents--; 173 hlist_add_head(&src->del_node, &br->src_gc_list); 174 queue_work(system_long_wq, &br->src_gc_work); 175 } 176 177 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 178 struct net_bridge_port_group *pg, 179 struct net_bridge_port_group __rcu **pp) 180 { 181 struct net_bridge *br = pg->port->br; 182 struct net_bridge_group_src *ent; 183 struct hlist_node *tmp; 184 185 rcu_assign_pointer(*pp, pg->next); 186 hlist_del_init(&pg->mglist); 187 del_timer(&pg->timer); 188 del_timer(&pg->rexmit_timer); 189 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 190 br_multicast_del_group_src(ent); 191 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 192 kfree_rcu(pg, rcu); 193 194 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 195 mod_timer(&mp->timer, jiffies); 196 } 197 198 static void br_multicast_find_del_pg(struct net_bridge *br, 199 struct net_bridge_port_group *pg) 200 { 201 struct net_bridge_port_group __rcu **pp; 202 struct net_bridge_mdb_entry *mp; 203 struct net_bridge_port_group *p; 204 205 mp = br_mdb_ip_get(br, &pg->addr); 206 if (WARN_ON(!mp)) 207 return; 208 209 for (pp = &mp->ports; 210 (p = mlock_dereference(*pp, br)) != NULL; 211 pp = &p->next) { 212 if (p != pg) 213 continue; 214 215 br_multicast_del_pg(mp, pg, pp); 216 return; 217 } 218 219 WARN_ON(1); 220 } 221 222 static void br_multicast_port_group_expired(struct timer_list *t) 223 { 224 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 225 struct net_bridge_group_src *src_ent; 226 struct net_bridge *br = pg->port->br; 227 struct hlist_node *tmp; 228 bool changed; 229 230 spin_lock(&br->multicast_lock); 231 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 232 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 233 goto out; 234 235 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 236 pg->filter_mode = MCAST_INCLUDE; 237 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 238 if (!timer_pending(&src_ent->timer)) { 239 br_multicast_del_group_src(src_ent); 240 changed = true; 241 } 242 } 243 244 if (hlist_empty(&pg->src_list)) { 245 br_multicast_find_del_pg(br, pg); 246 } else if (changed) { 247 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->addr); 248 249 if (WARN_ON(!mp)) 250 goto out; 251 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 252 } 253 out: 254 spin_unlock(&br->multicast_lock); 255 } 256 257 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 258 struct net_bridge_port_group *pg, 259 __be32 ip_dst, __be32 group, 260 bool with_srcs, bool over_lmqt, 261 u8 sflag, u8 *igmp_type, 262 bool *need_rexmit) 263 { 264 struct net_bridge_port *p = pg ? pg->port : NULL; 265 struct net_bridge_group_src *ent; 266 size_t pkt_size, igmp_hdr_size; 267 unsigned long now = jiffies; 268 struct igmpv3_query *ihv3; 269 void *csum_start = NULL; 270 __sum16 *csum = NULL; 271 struct sk_buff *skb; 272 struct igmphdr *ih; 273 struct ethhdr *eth; 274 unsigned long lmqt; 275 struct iphdr *iph; 276 u16 lmqt_srcs = 0; 277 278 igmp_hdr_size = sizeof(*ih); 279 if (br->multicast_igmp_version == 3) { 280 igmp_hdr_size = sizeof(*ihv3); 281 if (pg && with_srcs) { 282 lmqt = now + (br->multicast_last_member_interval * 283 br->multicast_last_member_count); 284 hlist_for_each_entry(ent, &pg->src_list, node) { 285 if (over_lmqt == time_after(ent->timer.expires, 286 lmqt) && 287 ent->src_query_rexmit_cnt > 0) 288 lmqt_srcs++; 289 } 290 291 if (!lmqt_srcs) 292 return NULL; 293 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 294 } 295 } 296 297 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 298 if ((p && pkt_size > p->dev->mtu) || 299 pkt_size > br->dev->mtu) 300 return NULL; 301 302 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 303 if (!skb) 304 goto out; 305 306 skb->protocol = htons(ETH_P_IP); 307 308 skb_reset_mac_header(skb); 309 eth = eth_hdr(skb); 310 311 ether_addr_copy(eth->h_source, br->dev->dev_addr); 312 ip_eth_mc_map(ip_dst, eth->h_dest); 313 eth->h_proto = htons(ETH_P_IP); 314 skb_put(skb, sizeof(*eth)); 315 316 skb_set_network_header(skb, skb->len); 317 iph = ip_hdr(skb); 318 iph->tot_len = htons(pkt_size - sizeof(*eth)); 319 320 iph->version = 4; 321 iph->ihl = 6; 322 iph->tos = 0xc0; 323 iph->id = 0; 324 iph->frag_off = htons(IP_DF); 325 iph->ttl = 1; 326 iph->protocol = IPPROTO_IGMP; 327 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 328 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 329 iph->daddr = ip_dst; 330 ((u8 *)&iph[1])[0] = IPOPT_RA; 331 ((u8 *)&iph[1])[1] = 4; 332 ((u8 *)&iph[1])[2] = 0; 333 ((u8 *)&iph[1])[3] = 0; 334 ip_send_check(iph); 335 skb_put(skb, 24); 336 337 skb_set_transport_header(skb, skb->len); 338 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 339 340 switch (br->multicast_igmp_version) { 341 case 2: 342 ih = igmp_hdr(skb); 343 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 344 ih->code = (group ? br->multicast_last_member_interval : 345 br->multicast_query_response_interval) / 346 (HZ / IGMP_TIMER_SCALE); 347 ih->group = group; 348 ih->csum = 0; 349 csum = &ih->csum; 350 csum_start = (void *)ih; 351 break; 352 case 3: 353 ihv3 = igmpv3_query_hdr(skb); 354 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 355 ihv3->code = (group ? br->multicast_last_member_interval : 356 br->multicast_query_response_interval) / 357 (HZ / IGMP_TIMER_SCALE); 358 ihv3->group = group; 359 ihv3->qqic = br->multicast_query_interval / HZ; 360 ihv3->nsrcs = htons(lmqt_srcs); 361 ihv3->resv = 0; 362 ihv3->suppress = sflag; 363 ihv3->qrv = 2; 364 ihv3->csum = 0; 365 csum = &ihv3->csum; 366 csum_start = (void *)ihv3; 367 if (!pg || !with_srcs) 368 break; 369 370 lmqt_srcs = 0; 371 hlist_for_each_entry(ent, &pg->src_list, node) { 372 if (over_lmqt == time_after(ent->timer.expires, 373 lmqt) && 374 ent->src_query_rexmit_cnt > 0) { 375 ihv3->srcs[lmqt_srcs++] = ent->addr.u.ip4; 376 ent->src_query_rexmit_cnt--; 377 if (need_rexmit && ent->src_query_rexmit_cnt) 378 *need_rexmit = true; 379 } 380 } 381 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 382 kfree_skb(skb); 383 return NULL; 384 } 385 break; 386 } 387 388 if (WARN_ON(!csum || !csum_start)) { 389 kfree_skb(skb); 390 return NULL; 391 } 392 393 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 394 skb_put(skb, igmp_hdr_size); 395 __skb_pull(skb, sizeof(*eth)); 396 397 out: 398 return skb; 399 } 400 401 #if IS_ENABLED(CONFIG_IPV6) 402 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 403 struct net_bridge_port_group *pg, 404 const struct in6_addr *ip6_dst, 405 const struct in6_addr *group, 406 bool with_srcs, bool over_llqt, 407 u8 sflag, u8 *igmp_type, 408 bool *need_rexmit) 409 { 410 struct net_bridge_port *p = pg ? pg->port : NULL; 411 struct net_bridge_group_src *ent; 412 size_t pkt_size, mld_hdr_size; 413 unsigned long now = jiffies; 414 struct mld2_query *mld2q; 415 void *csum_start = NULL; 416 unsigned long interval; 417 __sum16 *csum = NULL; 418 struct ipv6hdr *ip6h; 419 struct mld_msg *mldq; 420 struct sk_buff *skb; 421 unsigned long llqt; 422 struct ethhdr *eth; 423 u16 llqt_srcs = 0; 424 u8 *hopopt; 425 426 mld_hdr_size = sizeof(*mldq); 427 if (br->multicast_mld_version == 2) { 428 mld_hdr_size = sizeof(*mld2q); 429 if (pg && with_srcs) { 430 llqt = now + (br->multicast_last_member_interval * 431 br->multicast_last_member_count); 432 hlist_for_each_entry(ent, &pg->src_list, node) { 433 if (over_llqt == time_after(ent->timer.expires, 434 llqt) && 435 ent->src_query_rexmit_cnt > 0) 436 llqt_srcs++; 437 } 438 439 if (!llqt_srcs) 440 return NULL; 441 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 442 } 443 } 444 445 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 446 if ((p && pkt_size > p->dev->mtu) || 447 pkt_size > br->dev->mtu) 448 return NULL; 449 450 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 451 if (!skb) 452 goto out; 453 454 skb->protocol = htons(ETH_P_IPV6); 455 456 /* Ethernet header */ 457 skb_reset_mac_header(skb); 458 eth = eth_hdr(skb); 459 460 ether_addr_copy(eth->h_source, br->dev->dev_addr); 461 eth->h_proto = htons(ETH_P_IPV6); 462 skb_put(skb, sizeof(*eth)); 463 464 /* IPv6 header + HbH option */ 465 skb_set_network_header(skb, skb->len); 466 ip6h = ipv6_hdr(skb); 467 468 *(__force __be32 *)ip6h = htonl(0x60000000); 469 ip6h->payload_len = htons(8 + mld_hdr_size); 470 ip6h->nexthdr = IPPROTO_HOPOPTS; 471 ip6h->hop_limit = 1; 472 ip6h->daddr = *ip6_dst; 473 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 474 &ip6h->saddr)) { 475 kfree_skb(skb); 476 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false); 477 return NULL; 478 } 479 480 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 481 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 482 483 hopopt = (u8 *)(ip6h + 1); 484 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 485 hopopt[1] = 0; /* length of HbH */ 486 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 487 hopopt[3] = 2; /* Length of RA Option */ 488 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 489 hopopt[5] = 0; 490 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 491 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 492 493 skb_put(skb, sizeof(*ip6h) + 8); 494 495 /* ICMPv6 */ 496 skb_set_transport_header(skb, skb->len); 497 interval = ipv6_addr_any(group) ? 498 br->multicast_query_response_interval : 499 br->multicast_last_member_interval; 500 *igmp_type = ICMPV6_MGM_QUERY; 501 switch (br->multicast_mld_version) { 502 case 1: 503 mldq = (struct mld_msg *)icmp6_hdr(skb); 504 mldq->mld_type = ICMPV6_MGM_QUERY; 505 mldq->mld_code = 0; 506 mldq->mld_cksum = 0; 507 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 508 mldq->mld_reserved = 0; 509 mldq->mld_mca = *group; 510 csum = &mldq->mld_cksum; 511 csum_start = (void *)mldq; 512 break; 513 case 2: 514 mld2q = (struct mld2_query *)icmp6_hdr(skb); 515 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 516 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 517 mld2q->mld2q_code = 0; 518 mld2q->mld2q_cksum = 0; 519 mld2q->mld2q_resv1 = 0; 520 mld2q->mld2q_resv2 = 0; 521 mld2q->mld2q_suppress = sflag; 522 mld2q->mld2q_qrv = 2; 523 mld2q->mld2q_nsrcs = htons(llqt_srcs); 524 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 525 mld2q->mld2q_mca = *group; 526 csum = &mld2q->mld2q_cksum; 527 csum_start = (void *)mld2q; 528 if (!pg || !with_srcs) 529 break; 530 531 llqt_srcs = 0; 532 hlist_for_each_entry(ent, &pg->src_list, node) { 533 if (over_llqt == time_after(ent->timer.expires, 534 llqt) && 535 ent->src_query_rexmit_cnt > 0) { 536 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.u.ip6; 537 ent->src_query_rexmit_cnt--; 538 if (need_rexmit && ent->src_query_rexmit_cnt) 539 *need_rexmit = true; 540 } 541 } 542 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 543 kfree_skb(skb); 544 return NULL; 545 } 546 break; 547 } 548 549 if (WARN_ON(!csum || !csum_start)) { 550 kfree_skb(skb); 551 return NULL; 552 } 553 554 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 555 IPPROTO_ICMPV6, 556 csum_partial(csum_start, mld_hdr_size, 0)); 557 skb_put(skb, mld_hdr_size); 558 __skb_pull(skb, sizeof(*eth)); 559 560 out: 561 return skb; 562 } 563 #endif 564 565 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 566 struct net_bridge_port_group *pg, 567 struct br_ip *ip_dst, 568 struct br_ip *group, 569 bool with_srcs, bool over_lmqt, 570 u8 sflag, u8 *igmp_type, 571 bool *need_rexmit) 572 { 573 __be32 ip4_dst; 574 575 switch (group->proto) { 576 case htons(ETH_P_IP): 577 ip4_dst = ip_dst ? ip_dst->u.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 578 return br_ip4_multicast_alloc_query(br, pg, 579 ip4_dst, group->u.ip4, 580 with_srcs, over_lmqt, 581 sflag, igmp_type, 582 need_rexmit); 583 #if IS_ENABLED(CONFIG_IPV6) 584 case htons(ETH_P_IPV6): { 585 struct in6_addr ip6_dst; 586 587 if (ip_dst) 588 ip6_dst = ip_dst->u.ip6; 589 else 590 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 591 htonl(1)); 592 593 return br_ip6_multicast_alloc_query(br, pg, 594 &ip6_dst, &group->u.ip6, 595 with_srcs, over_lmqt, 596 sflag, igmp_type, 597 need_rexmit); 598 } 599 #endif 600 } 601 return NULL; 602 } 603 604 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 605 struct br_ip *group) 606 { 607 struct net_bridge_mdb_entry *mp; 608 int err; 609 610 mp = br_mdb_ip_get(br, group); 611 if (mp) 612 return mp; 613 614 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 615 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 616 return ERR_PTR(-E2BIG); 617 } 618 619 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 620 if (unlikely(!mp)) 621 return ERR_PTR(-ENOMEM); 622 623 mp->br = br; 624 mp->addr = *group; 625 timer_setup(&mp->timer, br_multicast_group_expired, 0); 626 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 627 br_mdb_rht_params); 628 if (err) { 629 kfree(mp); 630 mp = ERR_PTR(err); 631 } else { 632 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 633 } 634 635 return mp; 636 } 637 638 static void br_multicast_group_src_expired(struct timer_list *t) 639 { 640 struct net_bridge_group_src *src = from_timer(src, t, timer); 641 struct net_bridge_port_group *pg; 642 struct net_bridge *br = src->br; 643 644 spin_lock(&br->multicast_lock); 645 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 646 timer_pending(&src->timer)) 647 goto out; 648 649 pg = src->pg; 650 if (pg->filter_mode == MCAST_INCLUDE) { 651 br_multicast_del_group_src(src); 652 if (!hlist_empty(&pg->src_list)) 653 goto out; 654 br_multicast_find_del_pg(br, pg); 655 } 656 out: 657 spin_unlock(&br->multicast_lock); 658 } 659 660 static struct net_bridge_group_src * 661 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 662 { 663 struct net_bridge_group_src *ent; 664 665 switch (ip->proto) { 666 case htons(ETH_P_IP): 667 hlist_for_each_entry(ent, &pg->src_list, node) 668 if (ip->u.ip4 == ent->addr.u.ip4) 669 return ent; 670 break; 671 #if IS_ENABLED(CONFIG_IPV6) 672 case htons(ETH_P_IPV6): 673 hlist_for_each_entry(ent, &pg->src_list, node) 674 if (!ipv6_addr_cmp(&ent->addr.u.ip6, &ip->u.ip6)) 675 return ent; 676 break; 677 #endif 678 } 679 680 return NULL; 681 } 682 683 static struct net_bridge_group_src * 684 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 685 { 686 struct net_bridge_group_src *grp_src; 687 688 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 689 return NULL; 690 691 switch (src_ip->proto) { 692 case htons(ETH_P_IP): 693 if (ipv4_is_zeronet(src_ip->u.ip4) || 694 ipv4_is_multicast(src_ip->u.ip4)) 695 return NULL; 696 break; 697 #if IS_ENABLED(CONFIG_IPV6) 698 case htons(ETH_P_IPV6): 699 if (ipv6_addr_any(&src_ip->u.ip6) || 700 ipv6_addr_is_multicast(&src_ip->u.ip6)) 701 return NULL; 702 break; 703 #endif 704 } 705 706 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 707 if (unlikely(!grp_src)) 708 return NULL; 709 710 grp_src->pg = pg; 711 grp_src->br = pg->port->br; 712 grp_src->addr = *src_ip; 713 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 714 715 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 716 pg->src_ents++; 717 718 return grp_src; 719 } 720 721 struct net_bridge_port_group *br_multicast_new_port_group( 722 struct net_bridge_port *port, 723 struct br_ip *group, 724 struct net_bridge_port_group __rcu *next, 725 unsigned char flags, 726 const unsigned char *src, 727 u8 filter_mode) 728 { 729 struct net_bridge_port_group *p; 730 731 p = kzalloc(sizeof(*p), GFP_ATOMIC); 732 if (unlikely(!p)) 733 return NULL; 734 735 p->addr = *group; 736 p->port = port; 737 p->flags = flags; 738 p->filter_mode = filter_mode; 739 INIT_HLIST_HEAD(&p->src_list); 740 rcu_assign_pointer(p->next, next); 741 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 742 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 743 hlist_add_head(&p->mglist, &port->mglist); 744 745 if (src) 746 memcpy(p->eth_addr, src, ETH_ALEN); 747 else 748 eth_broadcast_addr(p->eth_addr); 749 750 return p; 751 } 752 753 static bool br_port_group_equal(struct net_bridge_port_group *p, 754 struct net_bridge_port *port, 755 const unsigned char *src) 756 { 757 if (p->port != port) 758 return false; 759 760 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 761 return true; 762 763 return ether_addr_equal(src, p->eth_addr); 764 } 765 766 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify) 767 { 768 if (!mp->host_joined) { 769 mp->host_joined = true; 770 if (notify) 771 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 772 } 773 mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval); 774 } 775 776 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 777 { 778 if (!mp->host_joined) 779 return; 780 781 mp->host_joined = false; 782 if (notify) 783 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 784 } 785 786 static int br_multicast_add_group(struct net_bridge *br, 787 struct net_bridge_port *port, 788 struct br_ip *group, 789 const unsigned char *src, 790 u8 filter_mode, 791 bool igmpv2_mldv1) 792 { 793 struct net_bridge_port_group __rcu **pp; 794 struct net_bridge_port_group *p; 795 struct net_bridge_mdb_entry *mp; 796 unsigned long now = jiffies; 797 int err; 798 799 spin_lock(&br->multicast_lock); 800 if (!netif_running(br->dev) || 801 (port && port->state == BR_STATE_DISABLED)) 802 goto out; 803 804 mp = br_multicast_new_group(br, group); 805 err = PTR_ERR(mp); 806 if (IS_ERR(mp)) 807 goto err; 808 809 if (!port) { 810 br_multicast_host_join(mp, true); 811 goto out; 812 } 813 814 for (pp = &mp->ports; 815 (p = mlock_dereference(*pp, br)) != NULL; 816 pp = &p->next) { 817 if (br_port_group_equal(p, port, src)) 818 goto found; 819 if ((unsigned long)p->port < (unsigned long)port) 820 break; 821 } 822 823 p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode); 824 if (unlikely(!p)) 825 goto err; 826 rcu_assign_pointer(*pp, p); 827 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB); 828 829 found: 830 if (igmpv2_mldv1) 831 mod_timer(&p->timer, now + br->multicast_membership_interval); 832 833 out: 834 err = 0; 835 836 err: 837 spin_unlock(&br->multicast_lock); 838 return err; 839 } 840 841 static int br_ip4_multicast_add_group(struct net_bridge *br, 842 struct net_bridge_port *port, 843 __be32 group, 844 __u16 vid, 845 const unsigned char *src, 846 bool igmpv2) 847 { 848 struct br_ip br_group; 849 u8 filter_mode; 850 851 if (ipv4_is_local_multicast(group)) 852 return 0; 853 854 memset(&br_group, 0, sizeof(br_group)); 855 br_group.u.ip4 = group; 856 br_group.proto = htons(ETH_P_IP); 857 br_group.vid = vid; 858 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 859 860 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 861 igmpv2); 862 } 863 864 #if IS_ENABLED(CONFIG_IPV6) 865 static int br_ip6_multicast_add_group(struct net_bridge *br, 866 struct net_bridge_port *port, 867 const struct in6_addr *group, 868 __u16 vid, 869 const unsigned char *src, 870 bool mldv1) 871 { 872 struct br_ip br_group; 873 u8 filter_mode; 874 875 if (ipv6_addr_is_ll_all_nodes(group)) 876 return 0; 877 878 memset(&br_group, 0, sizeof(br_group)); 879 br_group.u.ip6 = *group; 880 br_group.proto = htons(ETH_P_IPV6); 881 br_group.vid = vid; 882 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 883 884 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 885 mldv1); 886 } 887 #endif 888 889 static void br_multicast_router_expired(struct timer_list *t) 890 { 891 struct net_bridge_port *port = 892 from_timer(port, t, multicast_router_timer); 893 struct net_bridge *br = port->br; 894 895 spin_lock(&br->multicast_lock); 896 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 897 port->multicast_router == MDB_RTR_TYPE_PERM || 898 timer_pending(&port->multicast_router_timer)) 899 goto out; 900 901 __del_port_router(port); 902 out: 903 spin_unlock(&br->multicast_lock); 904 } 905 906 static void br_mc_router_state_change(struct net_bridge *p, 907 bool is_mc_router) 908 { 909 struct switchdev_attr attr = { 910 .orig_dev = p->dev, 911 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 912 .flags = SWITCHDEV_F_DEFER, 913 .u.mrouter = is_mc_router, 914 }; 915 916 switchdev_port_attr_set(p->dev, &attr); 917 } 918 919 static void br_multicast_local_router_expired(struct timer_list *t) 920 { 921 struct net_bridge *br = from_timer(br, t, multicast_router_timer); 922 923 spin_lock(&br->multicast_lock); 924 if (br->multicast_router == MDB_RTR_TYPE_DISABLED || 925 br->multicast_router == MDB_RTR_TYPE_PERM || 926 timer_pending(&br->multicast_router_timer)) 927 goto out; 928 929 br_mc_router_state_change(br, false); 930 out: 931 spin_unlock(&br->multicast_lock); 932 } 933 934 static void br_multicast_querier_expired(struct net_bridge *br, 935 struct bridge_mcast_own_query *query) 936 { 937 spin_lock(&br->multicast_lock); 938 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 939 goto out; 940 941 br_multicast_start_querier(br, query); 942 943 out: 944 spin_unlock(&br->multicast_lock); 945 } 946 947 static void br_ip4_multicast_querier_expired(struct timer_list *t) 948 { 949 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer); 950 951 br_multicast_querier_expired(br, &br->ip4_own_query); 952 } 953 954 #if IS_ENABLED(CONFIG_IPV6) 955 static void br_ip6_multicast_querier_expired(struct timer_list *t) 956 { 957 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer); 958 959 br_multicast_querier_expired(br, &br->ip6_own_query); 960 } 961 #endif 962 963 static void br_multicast_select_own_querier(struct net_bridge *br, 964 struct br_ip *ip, 965 struct sk_buff *skb) 966 { 967 if (ip->proto == htons(ETH_P_IP)) 968 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 969 #if IS_ENABLED(CONFIG_IPV6) 970 else 971 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 972 #endif 973 } 974 975 static void __br_multicast_send_query(struct net_bridge *br, 976 struct net_bridge_port *port, 977 struct net_bridge_port_group *pg, 978 struct br_ip *ip_dst, 979 struct br_ip *group, 980 bool with_srcs, 981 u8 sflag, 982 bool *need_rexmit) 983 { 984 bool over_lmqt = !!sflag; 985 struct sk_buff *skb; 986 u8 igmp_type; 987 988 again_under_lmqt: 989 skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs, 990 over_lmqt, sflag, &igmp_type, 991 need_rexmit); 992 if (!skb) 993 return; 994 995 if (port) { 996 skb->dev = port->dev; 997 br_multicast_count(br, port, skb, igmp_type, 998 BR_MCAST_DIR_TX); 999 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1000 dev_net(port->dev), NULL, skb, NULL, skb->dev, 1001 br_dev_queue_push_xmit); 1002 1003 if (over_lmqt && with_srcs && sflag) { 1004 over_lmqt = false; 1005 goto again_under_lmqt; 1006 } 1007 } else { 1008 br_multicast_select_own_querier(br, group, skb); 1009 br_multicast_count(br, port, skb, igmp_type, 1010 BR_MCAST_DIR_RX); 1011 netif_rx(skb); 1012 } 1013 } 1014 1015 static void br_multicast_send_query(struct net_bridge *br, 1016 struct net_bridge_port *port, 1017 struct bridge_mcast_own_query *own_query) 1018 { 1019 struct bridge_mcast_other_query *other_query = NULL; 1020 struct br_ip br_group; 1021 unsigned long time; 1022 1023 if (!netif_running(br->dev) || 1024 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1025 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1026 return; 1027 1028 memset(&br_group.u, 0, sizeof(br_group.u)); 1029 1030 if (port ? (own_query == &port->ip4_own_query) : 1031 (own_query == &br->ip4_own_query)) { 1032 other_query = &br->ip4_other_query; 1033 br_group.proto = htons(ETH_P_IP); 1034 #if IS_ENABLED(CONFIG_IPV6) 1035 } else { 1036 other_query = &br->ip6_other_query; 1037 br_group.proto = htons(ETH_P_IPV6); 1038 #endif 1039 } 1040 1041 if (!other_query || timer_pending(&other_query->timer)) 1042 return; 1043 1044 __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0, 1045 NULL); 1046 1047 time = jiffies; 1048 time += own_query->startup_sent < br->multicast_startup_query_count ? 1049 br->multicast_startup_query_interval : 1050 br->multicast_query_interval; 1051 mod_timer(&own_query->timer, time); 1052 } 1053 1054 static void 1055 br_multicast_port_query_expired(struct net_bridge_port *port, 1056 struct bridge_mcast_own_query *query) 1057 { 1058 struct net_bridge *br = port->br; 1059 1060 spin_lock(&br->multicast_lock); 1061 if (port->state == BR_STATE_DISABLED || 1062 port->state == BR_STATE_BLOCKING) 1063 goto out; 1064 1065 if (query->startup_sent < br->multicast_startup_query_count) 1066 query->startup_sent++; 1067 1068 br_multicast_send_query(port->br, port, query); 1069 1070 out: 1071 spin_unlock(&br->multicast_lock); 1072 } 1073 1074 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1075 { 1076 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer); 1077 1078 br_multicast_port_query_expired(port, &port->ip4_own_query); 1079 } 1080 1081 #if IS_ENABLED(CONFIG_IPV6) 1082 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1083 { 1084 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer); 1085 1086 br_multicast_port_query_expired(port, &port->ip6_own_query); 1087 } 1088 #endif 1089 1090 static void br_multicast_port_group_rexmit(struct timer_list *t) 1091 { 1092 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1093 struct bridge_mcast_other_query *other_query = NULL; 1094 struct net_bridge *br = pg->port->br; 1095 bool need_rexmit = false; 1096 1097 spin_lock(&br->multicast_lock); 1098 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1099 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1100 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1101 goto out; 1102 1103 if (pg->addr.proto == htons(ETH_P_IP)) 1104 other_query = &br->ip4_other_query; 1105 #if IS_ENABLED(CONFIG_IPV6) 1106 else 1107 other_query = &br->ip6_other_query; 1108 #endif 1109 1110 if (!other_query || timer_pending(&other_query->timer)) 1111 goto out; 1112 1113 if (pg->grp_query_rexmit_cnt) { 1114 pg->grp_query_rexmit_cnt--; 1115 __br_multicast_send_query(br, pg->port, pg, &pg->addr, 1116 &pg->addr, false, 1, NULL); 1117 } 1118 __br_multicast_send_query(br, pg->port, pg, &pg->addr, 1119 &pg->addr, true, 0, &need_rexmit); 1120 1121 if (pg->grp_query_rexmit_cnt || need_rexmit) 1122 mod_timer(&pg->rexmit_timer, jiffies + 1123 br->multicast_last_member_interval); 1124 out: 1125 spin_unlock(&br->multicast_lock); 1126 } 1127 1128 static void br_mc_disabled_update(struct net_device *dev, bool value) 1129 { 1130 struct switchdev_attr attr = { 1131 .orig_dev = dev, 1132 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1133 .flags = SWITCHDEV_F_DEFER, 1134 .u.mc_disabled = !value, 1135 }; 1136 1137 switchdev_port_attr_set(dev, &attr); 1138 } 1139 1140 int br_multicast_add_port(struct net_bridge_port *port) 1141 { 1142 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1143 1144 timer_setup(&port->multicast_router_timer, 1145 br_multicast_router_expired, 0); 1146 timer_setup(&port->ip4_own_query.timer, 1147 br_ip4_multicast_port_query_expired, 0); 1148 #if IS_ENABLED(CONFIG_IPV6) 1149 timer_setup(&port->ip6_own_query.timer, 1150 br_ip6_multicast_port_query_expired, 0); 1151 #endif 1152 br_mc_disabled_update(port->dev, 1153 br_opt_get(port->br, BROPT_MULTICAST_ENABLED)); 1154 1155 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1156 if (!port->mcast_stats) 1157 return -ENOMEM; 1158 1159 return 0; 1160 } 1161 1162 void br_multicast_del_port(struct net_bridge_port *port) 1163 { 1164 struct net_bridge *br = port->br; 1165 struct net_bridge_port_group *pg; 1166 struct hlist_node *n; 1167 1168 /* Take care of the remaining groups, only perm ones should be left */ 1169 spin_lock_bh(&br->multicast_lock); 1170 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1171 br_multicast_find_del_pg(br, pg); 1172 spin_unlock_bh(&br->multicast_lock); 1173 del_timer_sync(&port->multicast_router_timer); 1174 free_percpu(port->mcast_stats); 1175 } 1176 1177 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1178 { 1179 query->startup_sent = 0; 1180 1181 if (try_to_del_timer_sync(&query->timer) >= 0 || 1182 del_timer(&query->timer)) 1183 mod_timer(&query->timer, jiffies); 1184 } 1185 1186 static void __br_multicast_enable_port(struct net_bridge_port *port) 1187 { 1188 struct net_bridge *br = port->br; 1189 1190 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev)) 1191 return; 1192 1193 br_multicast_enable(&port->ip4_own_query); 1194 #if IS_ENABLED(CONFIG_IPV6) 1195 br_multicast_enable(&port->ip6_own_query); 1196 #endif 1197 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1198 hlist_unhashed(&port->rlist)) 1199 br_multicast_add_router(br, port); 1200 } 1201 1202 void br_multicast_enable_port(struct net_bridge_port *port) 1203 { 1204 struct net_bridge *br = port->br; 1205 1206 spin_lock(&br->multicast_lock); 1207 __br_multicast_enable_port(port); 1208 spin_unlock(&br->multicast_lock); 1209 } 1210 1211 void br_multicast_disable_port(struct net_bridge_port *port) 1212 { 1213 struct net_bridge *br = port->br; 1214 struct net_bridge_port_group *pg; 1215 struct hlist_node *n; 1216 1217 spin_lock(&br->multicast_lock); 1218 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1219 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1220 br_multicast_find_del_pg(br, pg); 1221 1222 __del_port_router(port); 1223 1224 del_timer(&port->multicast_router_timer); 1225 del_timer(&port->ip4_own_query.timer); 1226 #if IS_ENABLED(CONFIG_IPV6) 1227 del_timer(&port->ip6_own_query.timer); 1228 #endif 1229 spin_unlock(&br->multicast_lock); 1230 } 1231 1232 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1233 { 1234 struct net_bridge_group_src *ent; 1235 struct hlist_node *tmp; 1236 int deleted = 0; 1237 1238 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1239 if (ent->flags & BR_SGRP_F_DELETE) { 1240 br_multicast_del_group_src(ent); 1241 deleted++; 1242 } 1243 1244 return deleted; 1245 } 1246 1247 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg) 1248 { 1249 struct bridge_mcast_other_query *other_query = NULL; 1250 struct net_bridge *br = pg->port->br; 1251 u32 lmqc = br->multicast_last_member_count; 1252 unsigned long lmqt, lmi, now = jiffies; 1253 struct net_bridge_group_src *ent; 1254 1255 if (!netif_running(br->dev) || 1256 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1257 return; 1258 1259 if (pg->addr.proto == htons(ETH_P_IP)) 1260 other_query = &br->ip4_other_query; 1261 #if IS_ENABLED(CONFIG_IPV6) 1262 else 1263 other_query = &br->ip6_other_query; 1264 #endif 1265 1266 lmqt = now + br_multicast_lmqt(br); 1267 hlist_for_each_entry(ent, &pg->src_list, node) { 1268 if (ent->flags & BR_SGRP_F_SEND) { 1269 ent->flags &= ~BR_SGRP_F_SEND; 1270 if (ent->timer.expires > lmqt) { 1271 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1272 other_query && 1273 !timer_pending(&other_query->timer)) 1274 ent->src_query_rexmit_cnt = lmqc; 1275 mod_timer(&ent->timer, lmqt); 1276 } 1277 } 1278 } 1279 1280 if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) || 1281 !other_query || timer_pending(&other_query->timer)) 1282 return; 1283 1284 __br_multicast_send_query(br, pg->port, pg, &pg->addr, 1285 &pg->addr, true, 1, NULL); 1286 1287 lmi = now + br->multicast_last_member_interval; 1288 if (!timer_pending(&pg->rexmit_timer) || 1289 time_after(pg->rexmit_timer.expires, lmi)) 1290 mod_timer(&pg->rexmit_timer, lmi); 1291 } 1292 1293 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg) 1294 { 1295 struct bridge_mcast_other_query *other_query = NULL; 1296 struct net_bridge *br = pg->port->br; 1297 unsigned long now = jiffies, lmi; 1298 1299 if (!netif_running(br->dev) || 1300 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1301 return; 1302 1303 if (pg->addr.proto == htons(ETH_P_IP)) 1304 other_query = &br->ip4_other_query; 1305 #if IS_ENABLED(CONFIG_IPV6) 1306 else 1307 other_query = &br->ip6_other_query; 1308 #endif 1309 1310 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1311 other_query && !timer_pending(&other_query->timer)) { 1312 lmi = now + br->multicast_last_member_interval; 1313 pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1; 1314 __br_multicast_send_query(br, pg->port, pg, &pg->addr, 1315 &pg->addr, false, 0, NULL); 1316 if (!timer_pending(&pg->rexmit_timer) || 1317 time_after(pg->rexmit_timer.expires, lmi)) 1318 mod_timer(&pg->rexmit_timer, lmi); 1319 } 1320 1321 if (pg->filter_mode == MCAST_EXCLUDE && 1322 (!timer_pending(&pg->timer) || 1323 time_after(pg->timer.expires, now + br_multicast_lmqt(br)))) 1324 mod_timer(&pg->timer, now + br_multicast_lmqt(br)); 1325 } 1326 1327 /* State Msg type New state Actions 1328 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 1329 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 1330 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 1331 */ 1332 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg, 1333 void *srcs, u32 nsrcs, size_t src_size) 1334 { 1335 struct net_bridge *br = pg->port->br; 1336 struct net_bridge_group_src *ent; 1337 unsigned long now = jiffies; 1338 bool changed = false; 1339 struct br_ip src_ip; 1340 u32 src_idx; 1341 1342 memset(&src_ip, 0, sizeof(src_ip)); 1343 src_ip.proto = pg->addr.proto; 1344 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1345 memcpy(&src_ip.u, srcs, src_size); 1346 ent = br_multicast_find_group_src(pg, &src_ip); 1347 if (!ent) { 1348 ent = br_multicast_new_group_src(pg, &src_ip); 1349 if (ent) 1350 changed = true; 1351 } 1352 1353 if (ent) 1354 mod_timer(&ent->timer, now + br_multicast_gmi(br)); 1355 srcs += src_size; 1356 } 1357 1358 return changed; 1359 } 1360 1361 /* State Msg type New state Actions 1362 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 1363 * Delete (A-B) 1364 * Group Timer=GMI 1365 */ 1366 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg, 1367 void *srcs, u32 nsrcs, size_t src_size) 1368 { 1369 struct net_bridge_group_src *ent; 1370 struct br_ip src_ip; 1371 u32 src_idx; 1372 1373 hlist_for_each_entry(ent, &pg->src_list, node) 1374 ent->flags |= BR_SGRP_F_DELETE; 1375 1376 memset(&src_ip, 0, sizeof(src_ip)); 1377 src_ip.proto = pg->addr.proto; 1378 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1379 memcpy(&src_ip.u, srcs, src_size); 1380 ent = br_multicast_find_group_src(pg, &src_ip); 1381 if (ent) 1382 ent->flags &= ~BR_SGRP_F_DELETE; 1383 else 1384 br_multicast_new_group_src(pg, &src_ip); 1385 srcs += src_size; 1386 } 1387 1388 __grp_src_delete_marked(pg); 1389 } 1390 1391 /* State Msg type New state Actions 1392 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 1393 * Delete (X-A) 1394 * Delete (Y-A) 1395 * Group Timer=GMI 1396 */ 1397 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg, 1398 void *srcs, u32 nsrcs, size_t src_size) 1399 { 1400 struct net_bridge *br = pg->port->br; 1401 struct net_bridge_group_src *ent; 1402 unsigned long now = jiffies; 1403 bool changed = false; 1404 struct br_ip src_ip; 1405 u32 src_idx; 1406 1407 hlist_for_each_entry(ent, &pg->src_list, node) 1408 ent->flags |= BR_SGRP_F_DELETE; 1409 1410 memset(&src_ip, 0, sizeof(src_ip)); 1411 src_ip.proto = pg->addr.proto; 1412 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1413 memcpy(&src_ip.u, srcs, src_size); 1414 ent = br_multicast_find_group_src(pg, &src_ip); 1415 if (ent) { 1416 ent->flags &= ~BR_SGRP_F_DELETE; 1417 } else { 1418 ent = br_multicast_new_group_src(pg, &src_ip); 1419 if (ent) { 1420 mod_timer(&ent->timer, 1421 now + br_multicast_gmi(br)); 1422 changed = true; 1423 } 1424 } 1425 srcs += src_size; 1426 } 1427 1428 if (__grp_src_delete_marked(pg)) 1429 changed = true; 1430 1431 return changed; 1432 } 1433 1434 static bool br_multicast_isexc(struct net_bridge_port_group *pg, 1435 void *srcs, u32 nsrcs, size_t src_size) 1436 { 1437 struct net_bridge *br = pg->port->br; 1438 bool changed = false; 1439 1440 switch (pg->filter_mode) { 1441 case MCAST_INCLUDE: 1442 __grp_src_isexc_incl(pg, srcs, nsrcs, src_size); 1443 changed = true; 1444 break; 1445 case MCAST_EXCLUDE: 1446 changed = __grp_src_isexc_excl(pg, srcs, nsrcs, src_size); 1447 break; 1448 } 1449 1450 pg->filter_mode = MCAST_EXCLUDE; 1451 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 1452 1453 return changed; 1454 } 1455 1456 /* State Msg type New state Actions 1457 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 1458 * Send Q(G,A-B) 1459 */ 1460 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg, 1461 void *srcs, u32 nsrcs, size_t src_size) 1462 { 1463 struct net_bridge *br = pg->port->br; 1464 u32 src_idx, to_send = pg->src_ents; 1465 struct net_bridge_group_src *ent; 1466 unsigned long now = jiffies; 1467 bool changed = false; 1468 struct br_ip src_ip; 1469 1470 hlist_for_each_entry(ent, &pg->src_list, node) 1471 ent->flags |= BR_SGRP_F_SEND; 1472 1473 memset(&src_ip, 0, sizeof(src_ip)); 1474 src_ip.proto = pg->addr.proto; 1475 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1476 memcpy(&src_ip.u, srcs, src_size); 1477 ent = br_multicast_find_group_src(pg, &src_ip); 1478 if (ent) { 1479 ent->flags &= ~BR_SGRP_F_SEND; 1480 to_send--; 1481 } else { 1482 ent = br_multicast_new_group_src(pg, &src_ip); 1483 if (ent) 1484 changed = true; 1485 } 1486 if (ent) 1487 mod_timer(&ent->timer, now + br_multicast_gmi(br)); 1488 srcs += src_size; 1489 } 1490 1491 if (to_send) 1492 __grp_src_query_marked_and_rexmit(pg); 1493 1494 return changed; 1495 } 1496 1497 /* State Msg type New state Actions 1498 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 1499 * Send Q(G,X-A) 1500 * Send Q(G) 1501 */ 1502 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg, 1503 void *srcs, u32 nsrcs, size_t src_size) 1504 { 1505 struct net_bridge *br = pg->port->br; 1506 u32 src_idx, to_send = pg->src_ents; 1507 struct net_bridge_group_src *ent; 1508 unsigned long now = jiffies; 1509 bool changed = false; 1510 struct br_ip src_ip; 1511 1512 hlist_for_each_entry(ent, &pg->src_list, node) 1513 if (timer_pending(&ent->timer)) 1514 ent->flags |= BR_SGRP_F_SEND; 1515 1516 memset(&src_ip, 0, sizeof(src_ip)); 1517 src_ip.proto = pg->addr.proto; 1518 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1519 memcpy(&src_ip.u, srcs, src_size); 1520 ent = br_multicast_find_group_src(pg, &src_ip); 1521 if (ent) { 1522 if (timer_pending(&ent->timer)) { 1523 ent->flags &= ~BR_SGRP_F_SEND; 1524 to_send--; 1525 } 1526 } else { 1527 ent = br_multicast_new_group_src(pg, &src_ip); 1528 if (ent) 1529 changed = true; 1530 } 1531 if (ent) 1532 mod_timer(&ent->timer, now + br_multicast_gmi(br)); 1533 srcs += src_size; 1534 } 1535 1536 if (to_send) 1537 __grp_src_query_marked_and_rexmit(pg); 1538 1539 __grp_send_query_and_rexmit(pg); 1540 1541 return changed; 1542 } 1543 1544 static bool br_multicast_toin(struct net_bridge_port_group *pg, 1545 void *srcs, u32 nsrcs, size_t src_size) 1546 { 1547 bool changed = false; 1548 1549 switch (pg->filter_mode) { 1550 case MCAST_INCLUDE: 1551 changed = __grp_src_toin_incl(pg, srcs, nsrcs, src_size); 1552 break; 1553 case MCAST_EXCLUDE: 1554 changed = __grp_src_toin_excl(pg, srcs, nsrcs, src_size); 1555 break; 1556 } 1557 1558 return changed; 1559 } 1560 1561 /* State Msg type New state Actions 1562 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 1563 * Delete (A-B) 1564 * Send Q(G,A*B) 1565 * Group Timer=GMI 1566 */ 1567 static void __grp_src_toex_incl(struct net_bridge_port_group *pg, 1568 void *srcs, u32 nsrcs, size_t src_size) 1569 { 1570 struct net_bridge_group_src *ent; 1571 u32 src_idx, to_send = 0; 1572 struct br_ip src_ip; 1573 1574 hlist_for_each_entry(ent, &pg->src_list, node) 1575 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 1576 1577 memset(&src_ip, 0, sizeof(src_ip)); 1578 src_ip.proto = pg->addr.proto; 1579 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1580 memcpy(&src_ip.u, srcs, src_size); 1581 ent = br_multicast_find_group_src(pg, &src_ip); 1582 if (ent) { 1583 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 1584 BR_SGRP_F_SEND; 1585 to_send++; 1586 } else { 1587 br_multicast_new_group_src(pg, &src_ip); 1588 } 1589 srcs += src_size; 1590 } 1591 1592 __grp_src_delete_marked(pg); 1593 if (to_send) 1594 __grp_src_query_marked_and_rexmit(pg); 1595 } 1596 1597 /* State Msg type New state Actions 1598 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 1599 * Delete (X-A) 1600 * Delete (Y-A) 1601 * Send Q(G,A-Y) 1602 * Group Timer=GMI 1603 */ 1604 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg, 1605 void *srcs, u32 nsrcs, size_t src_size) 1606 { 1607 struct net_bridge_group_src *ent; 1608 u32 src_idx, to_send = 0; 1609 bool changed = false; 1610 struct br_ip src_ip; 1611 1612 hlist_for_each_entry(ent, &pg->src_list, node) 1613 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 1614 1615 memset(&src_ip, 0, sizeof(src_ip)); 1616 src_ip.proto = pg->addr.proto; 1617 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1618 memcpy(&src_ip.u, srcs, src_size); 1619 ent = br_multicast_find_group_src(pg, &src_ip); 1620 if (ent) { 1621 ent->flags &= ~BR_SGRP_F_DELETE; 1622 } else { 1623 ent = br_multicast_new_group_src(pg, &src_ip); 1624 if (ent) { 1625 mod_timer(&ent->timer, pg->timer.expires); 1626 changed = true; 1627 } 1628 } 1629 if (ent && timer_pending(&ent->timer)) { 1630 ent->flags |= BR_SGRP_F_SEND; 1631 to_send++; 1632 } 1633 srcs += src_size; 1634 } 1635 1636 if (__grp_src_delete_marked(pg)) 1637 changed = true; 1638 if (to_send) 1639 __grp_src_query_marked_and_rexmit(pg); 1640 1641 return changed; 1642 } 1643 1644 static bool br_multicast_toex(struct net_bridge_port_group *pg, 1645 void *srcs, u32 nsrcs, size_t src_size) 1646 { 1647 struct net_bridge *br = pg->port->br; 1648 bool changed = false; 1649 1650 switch (pg->filter_mode) { 1651 case MCAST_INCLUDE: 1652 __grp_src_toex_incl(pg, srcs, nsrcs, src_size); 1653 changed = true; 1654 break; 1655 case MCAST_EXCLUDE: 1656 __grp_src_toex_excl(pg, srcs, nsrcs, src_size); 1657 break; 1658 } 1659 1660 pg->filter_mode = MCAST_EXCLUDE; 1661 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 1662 1663 return changed; 1664 } 1665 1666 /* State Msg type New state Actions 1667 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 1668 */ 1669 static void __grp_src_block_incl(struct net_bridge_port_group *pg, 1670 void *srcs, u32 nsrcs, size_t src_size) 1671 { 1672 struct net_bridge_group_src *ent; 1673 u32 src_idx, to_send = 0; 1674 struct br_ip src_ip; 1675 1676 hlist_for_each_entry(ent, &pg->src_list, node) 1677 ent->flags &= ~BR_SGRP_F_SEND; 1678 1679 memset(&src_ip, 0, sizeof(src_ip)); 1680 src_ip.proto = pg->addr.proto; 1681 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1682 memcpy(&src_ip.u, srcs, src_size); 1683 ent = br_multicast_find_group_src(pg, &src_ip); 1684 if (ent) { 1685 ent->flags |= BR_SGRP_F_SEND; 1686 to_send++; 1687 } 1688 srcs += src_size; 1689 } 1690 1691 if (to_send) 1692 __grp_src_query_marked_and_rexmit(pg); 1693 1694 if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) 1695 br_multicast_find_del_pg(pg->port->br, pg); 1696 } 1697 1698 /* State Msg type New state Actions 1699 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 1700 * Send Q(G,A-Y) 1701 */ 1702 static bool __grp_src_block_excl(struct net_bridge_port_group *pg, 1703 void *srcs, u32 nsrcs, size_t src_size) 1704 { 1705 struct net_bridge_group_src *ent; 1706 u32 src_idx, to_send = 0; 1707 bool changed = false; 1708 struct br_ip src_ip; 1709 1710 hlist_for_each_entry(ent, &pg->src_list, node) 1711 ent->flags &= ~BR_SGRP_F_SEND; 1712 1713 memset(&src_ip, 0, sizeof(src_ip)); 1714 src_ip.proto = pg->addr.proto; 1715 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1716 memcpy(&src_ip.u, srcs, src_size); 1717 ent = br_multicast_find_group_src(pg, &src_ip); 1718 if (!ent) { 1719 ent = br_multicast_new_group_src(pg, &src_ip); 1720 if (ent) { 1721 mod_timer(&ent->timer, pg->timer.expires); 1722 changed = true; 1723 } 1724 } 1725 if (ent && timer_pending(&ent->timer)) { 1726 ent->flags |= BR_SGRP_F_SEND; 1727 to_send++; 1728 } 1729 srcs += src_size; 1730 } 1731 1732 if (to_send) 1733 __grp_src_query_marked_and_rexmit(pg); 1734 1735 return changed; 1736 } 1737 1738 static bool br_multicast_block(struct net_bridge_port_group *pg, 1739 void *srcs, u32 nsrcs, size_t src_size) 1740 { 1741 bool changed = false; 1742 1743 switch (pg->filter_mode) { 1744 case MCAST_INCLUDE: 1745 __grp_src_block_incl(pg, srcs, nsrcs, src_size); 1746 break; 1747 case MCAST_EXCLUDE: 1748 changed = __grp_src_block_excl(pg, srcs, nsrcs, src_size); 1749 break; 1750 } 1751 1752 return changed; 1753 } 1754 1755 static struct net_bridge_port_group * 1756 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 1757 struct net_bridge_port *p, 1758 const unsigned char *src) 1759 { 1760 struct net_bridge_port_group *pg; 1761 struct net_bridge *br = mp->br; 1762 1763 for (pg = mlock_dereference(mp->ports, br); 1764 pg; 1765 pg = mlock_dereference(pg->next, br)) 1766 if (br_port_group_equal(pg, p, src)) 1767 return pg; 1768 1769 return NULL; 1770 } 1771 1772 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1773 struct net_bridge_port *port, 1774 struct sk_buff *skb, 1775 u16 vid) 1776 { 1777 bool igmpv2 = br->multicast_igmp_version == 2; 1778 struct net_bridge_mdb_entry *mdst; 1779 struct net_bridge_port_group *pg; 1780 const unsigned char *src; 1781 struct igmpv3_report *ih; 1782 struct igmpv3_grec *grec; 1783 int i, len, num, type; 1784 bool changed = false; 1785 __be32 group; 1786 int err = 0; 1787 u16 nsrcs; 1788 1789 ih = igmpv3_report_hdr(skb); 1790 num = ntohs(ih->ngrec); 1791 len = skb_transport_offset(skb) + sizeof(*ih); 1792 1793 for (i = 0; i < num; i++) { 1794 len += sizeof(*grec); 1795 if (!ip_mc_may_pull(skb, len)) 1796 return -EINVAL; 1797 1798 grec = (void *)(skb->data + len - sizeof(*grec)); 1799 group = grec->grec_mca; 1800 type = grec->grec_type; 1801 nsrcs = ntohs(grec->grec_nsrcs); 1802 1803 len += nsrcs * 4; 1804 if (!ip_mc_may_pull(skb, len)) 1805 return -EINVAL; 1806 1807 switch (type) { 1808 case IGMPV3_MODE_IS_INCLUDE: 1809 case IGMPV3_MODE_IS_EXCLUDE: 1810 case IGMPV3_CHANGE_TO_INCLUDE: 1811 case IGMPV3_CHANGE_TO_EXCLUDE: 1812 case IGMPV3_ALLOW_NEW_SOURCES: 1813 case IGMPV3_BLOCK_OLD_SOURCES: 1814 break; 1815 1816 default: 1817 continue; 1818 } 1819 1820 src = eth_hdr(skb)->h_source; 1821 if (nsrcs == 0 && 1822 (type == IGMPV3_CHANGE_TO_INCLUDE || 1823 type == IGMPV3_MODE_IS_INCLUDE)) { 1824 if (!port || igmpv2) { 1825 br_ip4_multicast_leave_group(br, port, group, vid, src); 1826 continue; 1827 } 1828 } else { 1829 err = br_ip4_multicast_add_group(br, port, group, vid, 1830 src, igmpv2); 1831 if (err) 1832 break; 1833 } 1834 1835 if (!port || igmpv2) 1836 continue; 1837 1838 spin_lock_bh(&br->multicast_lock); 1839 mdst = br_mdb_ip4_get(br, group, vid); 1840 if (!mdst) 1841 goto unlock_continue; 1842 pg = br_multicast_find_port(mdst, port, src); 1843 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 1844 goto unlock_continue; 1845 /* reload grec */ 1846 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 1847 switch (type) { 1848 case IGMPV3_ALLOW_NEW_SOURCES: 1849 changed = br_multicast_isinc_allow(pg, grec->grec_src, 1850 nsrcs, sizeof(__be32)); 1851 break; 1852 case IGMPV3_MODE_IS_INCLUDE: 1853 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs, 1854 sizeof(__be32)); 1855 break; 1856 case IGMPV3_MODE_IS_EXCLUDE: 1857 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs, 1858 sizeof(__be32)); 1859 break; 1860 case IGMPV3_CHANGE_TO_INCLUDE: 1861 changed = br_multicast_toin(pg, grec->grec_src, nsrcs, 1862 sizeof(__be32)); 1863 break; 1864 case IGMPV3_CHANGE_TO_EXCLUDE: 1865 changed = br_multicast_toex(pg, grec->grec_src, nsrcs, 1866 sizeof(__be32)); 1867 break; 1868 case IGMPV3_BLOCK_OLD_SOURCES: 1869 changed = br_multicast_block(pg, grec->grec_src, nsrcs, 1870 sizeof(__be32)); 1871 break; 1872 } 1873 if (changed) 1874 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 1875 unlock_continue: 1876 spin_unlock_bh(&br->multicast_lock); 1877 } 1878 1879 return err; 1880 } 1881 1882 #if IS_ENABLED(CONFIG_IPV6) 1883 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1884 struct net_bridge_port *port, 1885 struct sk_buff *skb, 1886 u16 vid) 1887 { 1888 bool mldv1 = br->multicast_mld_version == 1; 1889 struct net_bridge_mdb_entry *mdst; 1890 struct net_bridge_port_group *pg; 1891 unsigned int nsrcs_offset; 1892 const unsigned char *src; 1893 struct icmp6hdr *icmp6h; 1894 struct mld2_grec *grec; 1895 unsigned int grec_len; 1896 bool changed = false; 1897 int i, len, num; 1898 int err = 0; 1899 1900 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h))) 1901 return -EINVAL; 1902 1903 icmp6h = icmp6_hdr(skb); 1904 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1905 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1906 1907 for (i = 0; i < num; i++) { 1908 __be16 *_nsrcs, __nsrcs; 1909 u16 nsrcs; 1910 1911 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 1912 1913 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 1914 nsrcs_offset + sizeof(__nsrcs)) 1915 return -EINVAL; 1916 1917 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 1918 sizeof(__nsrcs), &__nsrcs); 1919 if (!_nsrcs) 1920 return -EINVAL; 1921 1922 nsrcs = ntohs(*_nsrcs); 1923 grec_len = struct_size(grec, grec_src, nsrcs); 1924 1925 if (!ipv6_mc_may_pull(skb, len + grec_len)) 1926 return -EINVAL; 1927 1928 grec = (struct mld2_grec *)(skb->data + len); 1929 len += grec_len; 1930 1931 switch (grec->grec_type) { 1932 case MLD2_MODE_IS_INCLUDE: 1933 case MLD2_MODE_IS_EXCLUDE: 1934 case MLD2_CHANGE_TO_INCLUDE: 1935 case MLD2_CHANGE_TO_EXCLUDE: 1936 case MLD2_ALLOW_NEW_SOURCES: 1937 case MLD2_BLOCK_OLD_SOURCES: 1938 break; 1939 1940 default: 1941 continue; 1942 } 1943 1944 src = eth_hdr(skb)->h_source; 1945 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1946 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1947 nsrcs == 0) { 1948 if (!port || mldv1) { 1949 br_ip6_multicast_leave_group(br, port, 1950 &grec->grec_mca, 1951 vid, src); 1952 continue; 1953 } 1954 } else { 1955 err = br_ip6_multicast_add_group(br, port, 1956 &grec->grec_mca, vid, 1957 src, mldv1); 1958 if (err) 1959 break; 1960 } 1961 1962 if (!port || mldv1) 1963 continue; 1964 1965 spin_lock_bh(&br->multicast_lock); 1966 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid); 1967 if (!mdst) 1968 goto unlock_continue; 1969 pg = br_multicast_find_port(mdst, port, src); 1970 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 1971 goto unlock_continue; 1972 switch (grec->grec_type) { 1973 case MLD2_ALLOW_NEW_SOURCES: 1974 changed = br_multicast_isinc_allow(pg, grec->grec_src, 1975 nsrcs, 1976 sizeof(struct in6_addr)); 1977 break; 1978 case MLD2_MODE_IS_INCLUDE: 1979 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs, 1980 sizeof(struct in6_addr)); 1981 break; 1982 case MLD2_MODE_IS_EXCLUDE: 1983 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs, 1984 sizeof(struct in6_addr)); 1985 break; 1986 case MLD2_CHANGE_TO_INCLUDE: 1987 changed = br_multicast_toin(pg, grec->grec_src, nsrcs, 1988 sizeof(struct in6_addr)); 1989 break; 1990 case MLD2_CHANGE_TO_EXCLUDE: 1991 changed = br_multicast_toex(pg, grec->grec_src, nsrcs, 1992 sizeof(struct in6_addr)); 1993 break; 1994 case MLD2_BLOCK_OLD_SOURCES: 1995 changed = br_multicast_block(pg, grec->grec_src, nsrcs, 1996 sizeof(struct in6_addr)); 1997 break; 1998 } 1999 if (changed) 2000 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 2001 unlock_continue: 2002 spin_unlock_bh(&br->multicast_lock); 2003 } 2004 2005 return err; 2006 } 2007 #endif 2008 2009 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 2010 struct net_bridge_port *port, 2011 __be32 saddr) 2012 { 2013 if (!timer_pending(&br->ip4_own_query.timer) && 2014 !timer_pending(&br->ip4_other_query.timer)) 2015 goto update; 2016 2017 if (!br->ip4_querier.addr.u.ip4) 2018 goto update; 2019 2020 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 2021 goto update; 2022 2023 return false; 2024 2025 update: 2026 br->ip4_querier.addr.u.ip4 = saddr; 2027 2028 /* update protected by general multicast_lock by caller */ 2029 rcu_assign_pointer(br->ip4_querier.port, port); 2030 2031 return true; 2032 } 2033 2034 #if IS_ENABLED(CONFIG_IPV6) 2035 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 2036 struct net_bridge_port *port, 2037 struct in6_addr *saddr) 2038 { 2039 if (!timer_pending(&br->ip6_own_query.timer) && 2040 !timer_pending(&br->ip6_other_query.timer)) 2041 goto update; 2042 2043 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 2044 goto update; 2045 2046 return false; 2047 2048 update: 2049 br->ip6_querier.addr.u.ip6 = *saddr; 2050 2051 /* update protected by general multicast_lock by caller */ 2052 rcu_assign_pointer(br->ip6_querier.port, port); 2053 2054 return true; 2055 } 2056 #endif 2057 2058 static bool br_multicast_select_querier(struct net_bridge *br, 2059 struct net_bridge_port *port, 2060 struct br_ip *saddr) 2061 { 2062 switch (saddr->proto) { 2063 case htons(ETH_P_IP): 2064 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 2065 #if IS_ENABLED(CONFIG_IPV6) 2066 case htons(ETH_P_IPV6): 2067 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 2068 #endif 2069 } 2070 2071 return false; 2072 } 2073 2074 static void 2075 br_multicast_update_query_timer(struct net_bridge *br, 2076 struct bridge_mcast_other_query *query, 2077 unsigned long max_delay) 2078 { 2079 if (!timer_pending(&query->timer)) 2080 query->delay_time = jiffies + max_delay; 2081 2082 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 2083 } 2084 2085 static void br_port_mc_router_state_change(struct net_bridge_port *p, 2086 bool is_mc_router) 2087 { 2088 struct switchdev_attr attr = { 2089 .orig_dev = p->dev, 2090 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 2091 .flags = SWITCHDEV_F_DEFER, 2092 .u.mrouter = is_mc_router, 2093 }; 2094 2095 switchdev_port_attr_set(p->dev, &attr); 2096 } 2097 2098 /* 2099 * Add port to router_list 2100 * list is maintained ordered by pointer value 2101 * and locked by br->multicast_lock and RCU 2102 */ 2103 static void br_multicast_add_router(struct net_bridge *br, 2104 struct net_bridge_port *port) 2105 { 2106 struct net_bridge_port *p; 2107 struct hlist_node *slot = NULL; 2108 2109 if (!hlist_unhashed(&port->rlist)) 2110 return; 2111 2112 hlist_for_each_entry(p, &br->router_list, rlist) { 2113 if ((unsigned long) port >= (unsigned long) p) 2114 break; 2115 slot = &p->rlist; 2116 } 2117 2118 if (slot) 2119 hlist_add_behind_rcu(&port->rlist, slot); 2120 else 2121 hlist_add_head_rcu(&port->rlist, &br->router_list); 2122 br_rtr_notify(br->dev, port, RTM_NEWMDB); 2123 br_port_mc_router_state_change(port, true); 2124 } 2125 2126 static void br_multicast_mark_router(struct net_bridge *br, 2127 struct net_bridge_port *port) 2128 { 2129 unsigned long now = jiffies; 2130 2131 if (!port) { 2132 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 2133 if (!timer_pending(&br->multicast_router_timer)) 2134 br_mc_router_state_change(br, true); 2135 mod_timer(&br->multicast_router_timer, 2136 now + br->multicast_querier_interval); 2137 } 2138 return; 2139 } 2140 2141 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 2142 port->multicast_router == MDB_RTR_TYPE_PERM) 2143 return; 2144 2145 br_multicast_add_router(br, port); 2146 2147 mod_timer(&port->multicast_router_timer, 2148 now + br->multicast_querier_interval); 2149 } 2150 2151 static void br_multicast_query_received(struct net_bridge *br, 2152 struct net_bridge_port *port, 2153 struct bridge_mcast_other_query *query, 2154 struct br_ip *saddr, 2155 unsigned long max_delay) 2156 { 2157 if (!br_multicast_select_querier(br, port, saddr)) 2158 return; 2159 2160 br_multicast_update_query_timer(br, query, max_delay); 2161 br_multicast_mark_router(br, port); 2162 } 2163 2164 static void br_ip4_multicast_query(struct net_bridge *br, 2165 struct net_bridge_port *port, 2166 struct sk_buff *skb, 2167 u16 vid) 2168 { 2169 unsigned int transport_len = ip_transport_len(skb); 2170 const struct iphdr *iph = ip_hdr(skb); 2171 struct igmphdr *ih = igmp_hdr(skb); 2172 struct net_bridge_mdb_entry *mp; 2173 struct igmpv3_query *ih3; 2174 struct net_bridge_port_group *p; 2175 struct net_bridge_port_group __rcu **pp; 2176 struct br_ip saddr; 2177 unsigned long max_delay; 2178 unsigned long now = jiffies; 2179 __be32 group; 2180 2181 spin_lock(&br->multicast_lock); 2182 if (!netif_running(br->dev) || 2183 (port && port->state == BR_STATE_DISABLED)) 2184 goto out; 2185 2186 group = ih->group; 2187 2188 if (transport_len == sizeof(*ih)) { 2189 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 2190 2191 if (!max_delay) { 2192 max_delay = 10 * HZ; 2193 group = 0; 2194 } 2195 } else if (transport_len >= sizeof(*ih3)) { 2196 ih3 = igmpv3_query_hdr(skb); 2197 if (ih3->nsrcs) 2198 goto out; 2199 2200 max_delay = ih3->code ? 2201 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 2202 } else { 2203 goto out; 2204 } 2205 2206 if (!group) { 2207 saddr.proto = htons(ETH_P_IP); 2208 saddr.u.ip4 = iph->saddr; 2209 2210 br_multicast_query_received(br, port, &br->ip4_other_query, 2211 &saddr, max_delay); 2212 goto out; 2213 } 2214 2215 mp = br_mdb_ip4_get(br, group, vid); 2216 if (!mp) 2217 goto out; 2218 2219 max_delay *= br->multicast_last_member_count; 2220 2221 if (mp->host_joined && 2222 (timer_pending(&mp->timer) ? 2223 time_after(mp->timer.expires, now + max_delay) : 2224 try_to_del_timer_sync(&mp->timer) >= 0)) 2225 mod_timer(&mp->timer, now + max_delay); 2226 2227 for (pp = &mp->ports; 2228 (p = mlock_dereference(*pp, br)) != NULL; 2229 pp = &p->next) { 2230 if (timer_pending(&p->timer) ? 2231 time_after(p->timer.expires, now + max_delay) : 2232 try_to_del_timer_sync(&p->timer) >= 0) 2233 mod_timer(&p->timer, now + max_delay); 2234 } 2235 2236 out: 2237 spin_unlock(&br->multicast_lock); 2238 } 2239 2240 #if IS_ENABLED(CONFIG_IPV6) 2241 static int br_ip6_multicast_query(struct net_bridge *br, 2242 struct net_bridge_port *port, 2243 struct sk_buff *skb, 2244 u16 vid) 2245 { 2246 unsigned int transport_len = ipv6_transport_len(skb); 2247 struct mld_msg *mld; 2248 struct net_bridge_mdb_entry *mp; 2249 struct mld2_query *mld2q; 2250 struct net_bridge_port_group *p; 2251 struct net_bridge_port_group __rcu **pp; 2252 struct br_ip saddr; 2253 unsigned long max_delay; 2254 unsigned long now = jiffies; 2255 unsigned int offset = skb_transport_offset(skb); 2256 const struct in6_addr *group = NULL; 2257 bool is_general_query; 2258 int err = 0; 2259 2260 spin_lock(&br->multicast_lock); 2261 if (!netif_running(br->dev) || 2262 (port && port->state == BR_STATE_DISABLED)) 2263 goto out; 2264 2265 if (transport_len == sizeof(*mld)) { 2266 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 2267 err = -EINVAL; 2268 goto out; 2269 } 2270 mld = (struct mld_msg *) icmp6_hdr(skb); 2271 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 2272 if (max_delay) 2273 group = &mld->mld_mca; 2274 } else { 2275 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 2276 err = -EINVAL; 2277 goto out; 2278 } 2279 mld2q = (struct mld2_query *)icmp6_hdr(skb); 2280 if (!mld2q->mld2q_nsrcs) 2281 group = &mld2q->mld2q_mca; 2282 2283 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 2284 } 2285 2286 is_general_query = group && ipv6_addr_any(group); 2287 2288 if (is_general_query) { 2289 saddr.proto = htons(ETH_P_IPV6); 2290 saddr.u.ip6 = ipv6_hdr(skb)->saddr; 2291 2292 br_multicast_query_received(br, port, &br->ip6_other_query, 2293 &saddr, max_delay); 2294 goto out; 2295 } else if (!group) { 2296 goto out; 2297 } 2298 2299 mp = br_mdb_ip6_get(br, group, vid); 2300 if (!mp) 2301 goto out; 2302 2303 max_delay *= br->multicast_last_member_count; 2304 if (mp->host_joined && 2305 (timer_pending(&mp->timer) ? 2306 time_after(mp->timer.expires, now + max_delay) : 2307 try_to_del_timer_sync(&mp->timer) >= 0)) 2308 mod_timer(&mp->timer, now + max_delay); 2309 2310 for (pp = &mp->ports; 2311 (p = mlock_dereference(*pp, br)) != NULL; 2312 pp = &p->next) { 2313 if (timer_pending(&p->timer) ? 2314 time_after(p->timer.expires, now + max_delay) : 2315 try_to_del_timer_sync(&p->timer) >= 0) 2316 mod_timer(&p->timer, now + max_delay); 2317 } 2318 2319 out: 2320 spin_unlock(&br->multicast_lock); 2321 return err; 2322 } 2323 #endif 2324 2325 static void 2326 br_multicast_leave_group(struct net_bridge *br, 2327 struct net_bridge_port *port, 2328 struct br_ip *group, 2329 struct bridge_mcast_other_query *other_query, 2330 struct bridge_mcast_own_query *own_query, 2331 const unsigned char *src) 2332 { 2333 struct net_bridge_mdb_entry *mp; 2334 struct net_bridge_port_group *p; 2335 unsigned long now; 2336 unsigned long time; 2337 2338 spin_lock(&br->multicast_lock); 2339 if (!netif_running(br->dev) || 2340 (port && port->state == BR_STATE_DISABLED)) 2341 goto out; 2342 2343 mp = br_mdb_ip_get(br, group); 2344 if (!mp) 2345 goto out; 2346 2347 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 2348 struct net_bridge_port_group __rcu **pp; 2349 2350 for (pp = &mp->ports; 2351 (p = mlock_dereference(*pp, br)) != NULL; 2352 pp = &p->next) { 2353 if (!br_port_group_equal(p, port, src)) 2354 continue; 2355 2356 if (p->flags & MDB_PG_FLAGS_PERMANENT) 2357 break; 2358 2359 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2360 br_multicast_del_pg(mp, p, pp); 2361 } 2362 goto out; 2363 } 2364 2365 if (timer_pending(&other_query->timer)) 2366 goto out; 2367 2368 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { 2369 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr, 2370 false, 0, NULL); 2371 2372 time = jiffies + br->multicast_last_member_count * 2373 br->multicast_last_member_interval; 2374 2375 mod_timer(&own_query->timer, time); 2376 2377 for (p = mlock_dereference(mp->ports, br); 2378 p != NULL; 2379 p = mlock_dereference(p->next, br)) { 2380 if (!br_port_group_equal(p, port, src)) 2381 continue; 2382 2383 if (!hlist_unhashed(&p->mglist) && 2384 (timer_pending(&p->timer) ? 2385 time_after(p->timer.expires, time) : 2386 try_to_del_timer_sync(&p->timer) >= 0)) { 2387 mod_timer(&p->timer, time); 2388 } 2389 2390 break; 2391 } 2392 } 2393 2394 now = jiffies; 2395 time = now + br->multicast_last_member_count * 2396 br->multicast_last_member_interval; 2397 2398 if (!port) { 2399 if (mp->host_joined && 2400 (timer_pending(&mp->timer) ? 2401 time_after(mp->timer.expires, time) : 2402 try_to_del_timer_sync(&mp->timer) >= 0)) { 2403 mod_timer(&mp->timer, time); 2404 } 2405 2406 goto out; 2407 } 2408 2409 for (p = mlock_dereference(mp->ports, br); 2410 p != NULL; 2411 p = mlock_dereference(p->next, br)) { 2412 if (p->port != port) 2413 continue; 2414 2415 if (!hlist_unhashed(&p->mglist) && 2416 (timer_pending(&p->timer) ? 2417 time_after(p->timer.expires, time) : 2418 try_to_del_timer_sync(&p->timer) >= 0)) { 2419 mod_timer(&p->timer, time); 2420 } 2421 2422 break; 2423 } 2424 out: 2425 spin_unlock(&br->multicast_lock); 2426 } 2427 2428 static void br_ip4_multicast_leave_group(struct net_bridge *br, 2429 struct net_bridge_port *port, 2430 __be32 group, 2431 __u16 vid, 2432 const unsigned char *src) 2433 { 2434 struct br_ip br_group; 2435 struct bridge_mcast_own_query *own_query; 2436 2437 if (ipv4_is_local_multicast(group)) 2438 return; 2439 2440 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 2441 2442 memset(&br_group, 0, sizeof(br_group)); 2443 br_group.u.ip4 = group; 2444 br_group.proto = htons(ETH_P_IP); 2445 br_group.vid = vid; 2446 2447 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 2448 own_query, src); 2449 } 2450 2451 #if IS_ENABLED(CONFIG_IPV6) 2452 static void br_ip6_multicast_leave_group(struct net_bridge *br, 2453 struct net_bridge_port *port, 2454 const struct in6_addr *group, 2455 __u16 vid, 2456 const unsigned char *src) 2457 { 2458 struct br_ip br_group; 2459 struct bridge_mcast_own_query *own_query; 2460 2461 if (ipv6_addr_is_ll_all_nodes(group)) 2462 return; 2463 2464 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 2465 2466 memset(&br_group, 0, sizeof(br_group)); 2467 br_group.u.ip6 = *group; 2468 br_group.proto = htons(ETH_P_IPV6); 2469 br_group.vid = vid; 2470 2471 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 2472 own_query, src); 2473 } 2474 #endif 2475 2476 static void br_multicast_err_count(const struct net_bridge *br, 2477 const struct net_bridge_port *p, 2478 __be16 proto) 2479 { 2480 struct bridge_mcast_stats __percpu *stats; 2481 struct bridge_mcast_stats *pstats; 2482 2483 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 2484 return; 2485 2486 if (p) 2487 stats = p->mcast_stats; 2488 else 2489 stats = br->mcast_stats; 2490 if (WARN_ON(!stats)) 2491 return; 2492 2493 pstats = this_cpu_ptr(stats); 2494 2495 u64_stats_update_begin(&pstats->syncp); 2496 switch (proto) { 2497 case htons(ETH_P_IP): 2498 pstats->mstats.igmp_parse_errors++; 2499 break; 2500 #if IS_ENABLED(CONFIG_IPV6) 2501 case htons(ETH_P_IPV6): 2502 pstats->mstats.mld_parse_errors++; 2503 break; 2504 #endif 2505 } 2506 u64_stats_update_end(&pstats->syncp); 2507 } 2508 2509 static void br_multicast_pim(struct net_bridge *br, 2510 struct net_bridge_port *port, 2511 const struct sk_buff *skb) 2512 { 2513 unsigned int offset = skb_transport_offset(skb); 2514 struct pimhdr *pimhdr, _pimhdr; 2515 2516 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 2517 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 2518 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 2519 return; 2520 2521 br_multicast_mark_router(br, port); 2522 } 2523 2524 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br, 2525 struct net_bridge_port *port, 2526 struct sk_buff *skb) 2527 { 2528 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 2529 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 2530 return -ENOMSG; 2531 2532 br_multicast_mark_router(br, port); 2533 2534 return 0; 2535 } 2536 2537 static int br_multicast_ipv4_rcv(struct net_bridge *br, 2538 struct net_bridge_port *port, 2539 struct sk_buff *skb, 2540 u16 vid) 2541 { 2542 const unsigned char *src; 2543 struct igmphdr *ih; 2544 int err; 2545 2546 err = ip_mc_check_igmp(skb); 2547 2548 if (err == -ENOMSG) { 2549 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 2550 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2551 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 2552 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 2553 br_multicast_pim(br, port, skb); 2554 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 2555 br_ip4_multicast_mrd_rcv(br, port, skb); 2556 } 2557 2558 return 0; 2559 } else if (err < 0) { 2560 br_multicast_err_count(br, port, skb->protocol); 2561 return err; 2562 } 2563 2564 ih = igmp_hdr(skb); 2565 src = eth_hdr(skb)->h_source; 2566 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 2567 2568 switch (ih->type) { 2569 case IGMP_HOST_MEMBERSHIP_REPORT: 2570 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2571 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2572 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src, 2573 true); 2574 break; 2575 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2576 err = br_ip4_multicast_igmp3_report(br, port, skb, vid); 2577 break; 2578 case IGMP_HOST_MEMBERSHIP_QUERY: 2579 br_ip4_multicast_query(br, port, skb, vid); 2580 break; 2581 case IGMP_HOST_LEAVE_MESSAGE: 2582 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 2583 break; 2584 } 2585 2586 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 2587 BR_MCAST_DIR_RX); 2588 2589 return err; 2590 } 2591 2592 #if IS_ENABLED(CONFIG_IPV6) 2593 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br, 2594 struct net_bridge_port *port, 2595 struct sk_buff *skb) 2596 { 2597 int ret; 2598 2599 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) 2600 return -ENOMSG; 2601 2602 ret = ipv6_mc_check_icmpv6(skb); 2603 if (ret < 0) 2604 return ret; 2605 2606 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 2607 return -ENOMSG; 2608 2609 br_multicast_mark_router(br, port); 2610 2611 return 0; 2612 } 2613 2614 static int br_multicast_ipv6_rcv(struct net_bridge *br, 2615 struct net_bridge_port *port, 2616 struct sk_buff *skb, 2617 u16 vid) 2618 { 2619 const unsigned char *src; 2620 struct mld_msg *mld; 2621 int err; 2622 2623 err = ipv6_mc_check_mld(skb); 2624 2625 if (err == -ENOMSG) { 2626 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 2627 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2628 2629 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) { 2630 err = br_ip6_multicast_mrd_rcv(br, port, skb); 2631 2632 if (err < 0 && err != -ENOMSG) { 2633 br_multicast_err_count(br, port, skb->protocol); 2634 return err; 2635 } 2636 } 2637 2638 return 0; 2639 } else if (err < 0) { 2640 br_multicast_err_count(br, port, skb->protocol); 2641 return err; 2642 } 2643 2644 mld = (struct mld_msg *)skb_transport_header(skb); 2645 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 2646 2647 switch (mld->mld_type) { 2648 case ICMPV6_MGM_REPORT: 2649 src = eth_hdr(skb)->h_source; 2650 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2651 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 2652 src, true); 2653 break; 2654 case ICMPV6_MLD2_REPORT: 2655 err = br_ip6_multicast_mld2_report(br, port, skb, vid); 2656 break; 2657 case ICMPV6_MGM_QUERY: 2658 err = br_ip6_multicast_query(br, port, skb, vid); 2659 break; 2660 case ICMPV6_MGM_REDUCTION: 2661 src = eth_hdr(skb)->h_source; 2662 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 2663 break; 2664 } 2665 2666 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 2667 BR_MCAST_DIR_RX); 2668 2669 return err; 2670 } 2671 #endif 2672 2673 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 2674 struct sk_buff *skb, u16 vid) 2675 { 2676 int ret = 0; 2677 2678 BR_INPUT_SKB_CB(skb)->igmp = 0; 2679 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 2680 2681 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2682 return 0; 2683 2684 switch (skb->protocol) { 2685 case htons(ETH_P_IP): 2686 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 2687 break; 2688 #if IS_ENABLED(CONFIG_IPV6) 2689 case htons(ETH_P_IPV6): 2690 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 2691 break; 2692 #endif 2693 } 2694 2695 return ret; 2696 } 2697 2698 static void br_multicast_query_expired(struct net_bridge *br, 2699 struct bridge_mcast_own_query *query, 2700 struct bridge_mcast_querier *querier) 2701 { 2702 spin_lock(&br->multicast_lock); 2703 if (query->startup_sent < br->multicast_startup_query_count) 2704 query->startup_sent++; 2705 2706 RCU_INIT_POINTER(querier->port, NULL); 2707 br_multicast_send_query(br, NULL, query); 2708 spin_unlock(&br->multicast_lock); 2709 } 2710 2711 static void br_ip4_multicast_query_expired(struct timer_list *t) 2712 { 2713 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer); 2714 2715 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 2716 } 2717 2718 #if IS_ENABLED(CONFIG_IPV6) 2719 static void br_ip6_multicast_query_expired(struct timer_list *t) 2720 { 2721 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer); 2722 2723 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 2724 } 2725 #endif 2726 2727 static void __grp_src_gc(struct hlist_head *head) 2728 { 2729 struct net_bridge_group_src *ent; 2730 struct hlist_node *tmp; 2731 2732 hlist_for_each_entry_safe(ent, tmp, head, del_node) { 2733 hlist_del_init(&ent->del_node); 2734 del_timer_sync(&ent->timer); 2735 kfree_rcu(ent, rcu); 2736 } 2737 } 2738 2739 static void br_multicast_src_gc(struct work_struct *work) 2740 { 2741 struct net_bridge *br = container_of(work, struct net_bridge, 2742 src_gc_work); 2743 HLIST_HEAD(deleted_head); 2744 2745 spin_lock_bh(&br->multicast_lock); 2746 hlist_move_list(&br->src_gc_list, &deleted_head); 2747 spin_unlock_bh(&br->multicast_lock); 2748 2749 __grp_src_gc(&deleted_head); 2750 } 2751 2752 void br_multicast_init(struct net_bridge *br) 2753 { 2754 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 2755 2756 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2757 br->multicast_last_member_count = 2; 2758 br->multicast_startup_query_count = 2; 2759 2760 br->multicast_last_member_interval = HZ; 2761 br->multicast_query_response_interval = 10 * HZ; 2762 br->multicast_startup_query_interval = 125 * HZ / 4; 2763 br->multicast_query_interval = 125 * HZ; 2764 br->multicast_querier_interval = 255 * HZ; 2765 br->multicast_membership_interval = 260 * HZ; 2766 2767 br->ip4_other_query.delay_time = 0; 2768 br->ip4_querier.port = NULL; 2769 br->multicast_igmp_version = 2; 2770 #if IS_ENABLED(CONFIG_IPV6) 2771 br->multicast_mld_version = 1; 2772 br->ip6_other_query.delay_time = 0; 2773 br->ip6_querier.port = NULL; 2774 #endif 2775 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 2776 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 2777 2778 spin_lock_init(&br->multicast_lock); 2779 timer_setup(&br->multicast_router_timer, 2780 br_multicast_local_router_expired, 0); 2781 timer_setup(&br->ip4_other_query.timer, 2782 br_ip4_multicast_querier_expired, 0); 2783 timer_setup(&br->ip4_own_query.timer, 2784 br_ip4_multicast_query_expired, 0); 2785 #if IS_ENABLED(CONFIG_IPV6) 2786 timer_setup(&br->ip6_other_query.timer, 2787 br_ip6_multicast_querier_expired, 0); 2788 timer_setup(&br->ip6_own_query.timer, 2789 br_ip6_multicast_query_expired, 0); 2790 #endif 2791 INIT_HLIST_HEAD(&br->mdb_list); 2792 INIT_HLIST_HEAD(&br->src_gc_list); 2793 INIT_WORK(&br->src_gc_work, br_multicast_src_gc); 2794 } 2795 2796 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 2797 { 2798 struct in_device *in_dev = in_dev_get(br->dev); 2799 2800 if (!in_dev) 2801 return; 2802 2803 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 2804 in_dev_put(in_dev); 2805 } 2806 2807 #if IS_ENABLED(CONFIG_IPV6) 2808 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 2809 { 2810 struct in6_addr addr; 2811 2812 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 2813 ipv6_dev_mc_inc(br->dev, &addr); 2814 } 2815 #else 2816 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 2817 { 2818 } 2819 #endif 2820 2821 static void br_multicast_join_snoopers(struct net_bridge *br) 2822 { 2823 br_ip4_multicast_join_snoopers(br); 2824 br_ip6_multicast_join_snoopers(br); 2825 } 2826 2827 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 2828 { 2829 struct in_device *in_dev = in_dev_get(br->dev); 2830 2831 if (WARN_ON(!in_dev)) 2832 return; 2833 2834 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 2835 in_dev_put(in_dev); 2836 } 2837 2838 #if IS_ENABLED(CONFIG_IPV6) 2839 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 2840 { 2841 struct in6_addr addr; 2842 2843 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 2844 ipv6_dev_mc_dec(br->dev, &addr); 2845 } 2846 #else 2847 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 2848 { 2849 } 2850 #endif 2851 2852 static void br_multicast_leave_snoopers(struct net_bridge *br) 2853 { 2854 br_ip4_multicast_leave_snoopers(br); 2855 br_ip6_multicast_leave_snoopers(br); 2856 } 2857 2858 static void __br_multicast_open(struct net_bridge *br, 2859 struct bridge_mcast_own_query *query) 2860 { 2861 query->startup_sent = 0; 2862 2863 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2864 return; 2865 2866 mod_timer(&query->timer, jiffies); 2867 } 2868 2869 void br_multicast_open(struct net_bridge *br) 2870 { 2871 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2872 br_multicast_join_snoopers(br); 2873 2874 __br_multicast_open(br, &br->ip4_own_query); 2875 #if IS_ENABLED(CONFIG_IPV6) 2876 __br_multicast_open(br, &br->ip6_own_query); 2877 #endif 2878 } 2879 2880 void br_multicast_stop(struct net_bridge *br) 2881 { 2882 del_timer_sync(&br->multicast_router_timer); 2883 del_timer_sync(&br->ip4_other_query.timer); 2884 del_timer_sync(&br->ip4_own_query.timer); 2885 #if IS_ENABLED(CONFIG_IPV6) 2886 del_timer_sync(&br->ip6_other_query.timer); 2887 del_timer_sync(&br->ip6_own_query.timer); 2888 #endif 2889 2890 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2891 br_multicast_leave_snoopers(br); 2892 } 2893 2894 void br_multicast_dev_del(struct net_bridge *br) 2895 { 2896 struct net_bridge_mdb_entry *mp; 2897 HLIST_HEAD(deleted_head); 2898 struct hlist_node *tmp; 2899 2900 spin_lock_bh(&br->multicast_lock); 2901 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) { 2902 del_timer(&mp->timer); 2903 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 2904 br_mdb_rht_params); 2905 hlist_del_rcu(&mp->mdb_node); 2906 kfree_rcu(mp, rcu); 2907 } 2908 hlist_move_list(&br->src_gc_list, &deleted_head); 2909 spin_unlock_bh(&br->multicast_lock); 2910 2911 __grp_src_gc(&deleted_head); 2912 cancel_work_sync(&br->src_gc_work); 2913 2914 rcu_barrier(); 2915 } 2916 2917 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2918 { 2919 int err = -EINVAL; 2920 2921 spin_lock_bh(&br->multicast_lock); 2922 2923 switch (val) { 2924 case MDB_RTR_TYPE_DISABLED: 2925 case MDB_RTR_TYPE_PERM: 2926 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 2927 del_timer(&br->multicast_router_timer); 2928 br->multicast_router = val; 2929 err = 0; 2930 break; 2931 case MDB_RTR_TYPE_TEMP_QUERY: 2932 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 2933 br_mc_router_state_change(br, false); 2934 br->multicast_router = val; 2935 err = 0; 2936 break; 2937 } 2938 2939 spin_unlock_bh(&br->multicast_lock); 2940 2941 return err; 2942 } 2943 2944 static void __del_port_router(struct net_bridge_port *p) 2945 { 2946 if (hlist_unhashed(&p->rlist)) 2947 return; 2948 hlist_del_init_rcu(&p->rlist); 2949 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 2950 br_port_mc_router_state_change(p, false); 2951 2952 /* don't allow timer refresh */ 2953 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2954 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2955 } 2956 2957 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 2958 { 2959 struct net_bridge *br = p->br; 2960 unsigned long now = jiffies; 2961 int err = -EINVAL; 2962 2963 spin_lock(&br->multicast_lock); 2964 if (p->multicast_router == val) { 2965 /* Refresh the temp router port timer */ 2966 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2967 mod_timer(&p->multicast_router_timer, 2968 now + br->multicast_querier_interval); 2969 err = 0; 2970 goto unlock; 2971 } 2972 switch (val) { 2973 case MDB_RTR_TYPE_DISABLED: 2974 p->multicast_router = MDB_RTR_TYPE_DISABLED; 2975 __del_port_router(p); 2976 del_timer(&p->multicast_router_timer); 2977 break; 2978 case MDB_RTR_TYPE_TEMP_QUERY: 2979 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2980 __del_port_router(p); 2981 break; 2982 case MDB_RTR_TYPE_PERM: 2983 p->multicast_router = MDB_RTR_TYPE_PERM; 2984 del_timer(&p->multicast_router_timer); 2985 br_multicast_add_router(br, p); 2986 break; 2987 case MDB_RTR_TYPE_TEMP: 2988 p->multicast_router = MDB_RTR_TYPE_TEMP; 2989 br_multicast_mark_router(br, p); 2990 break; 2991 default: 2992 goto unlock; 2993 } 2994 err = 0; 2995 unlock: 2996 spin_unlock(&br->multicast_lock); 2997 2998 return err; 2999 } 3000 3001 static void br_multicast_start_querier(struct net_bridge *br, 3002 struct bridge_mcast_own_query *query) 3003 { 3004 struct net_bridge_port *port; 3005 3006 __br_multicast_open(br, query); 3007 3008 rcu_read_lock(); 3009 list_for_each_entry_rcu(port, &br->port_list, list) { 3010 if (port->state == BR_STATE_DISABLED || 3011 port->state == BR_STATE_BLOCKING) 3012 continue; 3013 3014 if (query == &br->ip4_own_query) 3015 br_multicast_enable(&port->ip4_own_query); 3016 #if IS_ENABLED(CONFIG_IPV6) 3017 else 3018 br_multicast_enable(&port->ip6_own_query); 3019 #endif 3020 } 3021 rcu_read_unlock(); 3022 } 3023 3024 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 3025 { 3026 struct net_bridge_port *port; 3027 3028 spin_lock_bh(&br->multicast_lock); 3029 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 3030 goto unlock; 3031 3032 br_mc_disabled_update(br->dev, val); 3033 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 3034 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 3035 br_multicast_leave_snoopers(br); 3036 goto unlock; 3037 } 3038 3039 if (!netif_running(br->dev)) 3040 goto unlock; 3041 3042 br_multicast_open(br); 3043 list_for_each_entry(port, &br->port_list, list) 3044 __br_multicast_enable_port(port); 3045 3046 unlock: 3047 spin_unlock_bh(&br->multicast_lock); 3048 3049 return 0; 3050 } 3051 3052 bool br_multicast_enabled(const struct net_device *dev) 3053 { 3054 struct net_bridge *br = netdev_priv(dev); 3055 3056 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 3057 } 3058 EXPORT_SYMBOL_GPL(br_multicast_enabled); 3059 3060 bool br_multicast_router(const struct net_device *dev) 3061 { 3062 struct net_bridge *br = netdev_priv(dev); 3063 bool is_router; 3064 3065 spin_lock_bh(&br->multicast_lock); 3066 is_router = br_multicast_is_router(br); 3067 spin_unlock_bh(&br->multicast_lock); 3068 return is_router; 3069 } 3070 EXPORT_SYMBOL_GPL(br_multicast_router); 3071 3072 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 3073 { 3074 unsigned long max_delay; 3075 3076 val = !!val; 3077 3078 spin_lock_bh(&br->multicast_lock); 3079 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) 3080 goto unlock; 3081 3082 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); 3083 if (!val) 3084 goto unlock; 3085 3086 max_delay = br->multicast_query_response_interval; 3087 3088 if (!timer_pending(&br->ip4_other_query.timer)) 3089 br->ip4_other_query.delay_time = jiffies + max_delay; 3090 3091 br_multicast_start_querier(br, &br->ip4_own_query); 3092 3093 #if IS_ENABLED(CONFIG_IPV6) 3094 if (!timer_pending(&br->ip6_other_query.timer)) 3095 br->ip6_other_query.delay_time = jiffies + max_delay; 3096 3097 br_multicast_start_querier(br, &br->ip6_own_query); 3098 #endif 3099 3100 unlock: 3101 spin_unlock_bh(&br->multicast_lock); 3102 3103 return 0; 3104 } 3105 3106 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 3107 { 3108 /* Currently we support only version 2 and 3 */ 3109 switch (val) { 3110 case 2: 3111 case 3: 3112 break; 3113 default: 3114 return -EINVAL; 3115 } 3116 3117 spin_lock_bh(&br->multicast_lock); 3118 br->multicast_igmp_version = val; 3119 spin_unlock_bh(&br->multicast_lock); 3120 3121 return 0; 3122 } 3123 3124 #if IS_ENABLED(CONFIG_IPV6) 3125 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 3126 { 3127 /* Currently we support version 1 and 2 */ 3128 switch (val) { 3129 case 1: 3130 case 2: 3131 break; 3132 default: 3133 return -EINVAL; 3134 } 3135 3136 spin_lock_bh(&br->multicast_lock); 3137 br->multicast_mld_version = val; 3138 spin_unlock_bh(&br->multicast_lock); 3139 3140 return 0; 3141 } 3142 #endif 3143 3144 /** 3145 * br_multicast_list_adjacent - Returns snooped multicast addresses 3146 * @dev: The bridge port adjacent to which to retrieve addresses 3147 * @br_ip_list: The list to store found, snooped multicast IP addresses in 3148 * 3149 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 3150 * snooping feature on all bridge ports of dev's bridge device, excluding 3151 * the addresses from dev itself. 3152 * 3153 * Returns the number of items added to br_ip_list. 3154 * 3155 * Notes: 3156 * - br_ip_list needs to be initialized by caller 3157 * - br_ip_list might contain duplicates in the end 3158 * (needs to be taken care of by caller) 3159 * - br_ip_list needs to be freed by caller 3160 */ 3161 int br_multicast_list_adjacent(struct net_device *dev, 3162 struct list_head *br_ip_list) 3163 { 3164 struct net_bridge *br; 3165 struct net_bridge_port *port; 3166 struct net_bridge_port_group *group; 3167 struct br_ip_list *entry; 3168 int count = 0; 3169 3170 rcu_read_lock(); 3171 if (!br_ip_list || !netif_is_bridge_port(dev)) 3172 goto unlock; 3173 3174 port = br_port_get_rcu(dev); 3175 if (!port || !port->br) 3176 goto unlock; 3177 3178 br = port->br; 3179 3180 list_for_each_entry_rcu(port, &br->port_list, list) { 3181 if (!port->dev || port->dev == dev) 3182 continue; 3183 3184 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 3185 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 3186 if (!entry) 3187 goto unlock; 3188 3189 entry->addr = group->addr; 3190 list_add(&entry->list, br_ip_list); 3191 count++; 3192 } 3193 } 3194 3195 unlock: 3196 rcu_read_unlock(); 3197 return count; 3198 } 3199 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 3200 3201 /** 3202 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 3203 * @dev: The bridge port providing the bridge on which to check for a querier 3204 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3205 * 3206 * Checks whether the given interface has a bridge on top and if so returns 3207 * true if a valid querier exists anywhere on the bridged link layer. 3208 * Otherwise returns false. 3209 */ 3210 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 3211 { 3212 struct net_bridge *br; 3213 struct net_bridge_port *port; 3214 struct ethhdr eth; 3215 bool ret = false; 3216 3217 rcu_read_lock(); 3218 if (!netif_is_bridge_port(dev)) 3219 goto unlock; 3220 3221 port = br_port_get_rcu(dev); 3222 if (!port || !port->br) 3223 goto unlock; 3224 3225 br = port->br; 3226 3227 memset(ð, 0, sizeof(eth)); 3228 eth.h_proto = htons(proto); 3229 3230 ret = br_multicast_querier_exists(br, ð); 3231 3232 unlock: 3233 rcu_read_unlock(); 3234 return ret; 3235 } 3236 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 3237 3238 /** 3239 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 3240 * @dev: The bridge port adjacent to which to check for a querier 3241 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3242 * 3243 * Checks whether the given interface has a bridge on top and if so returns 3244 * true if a selected querier is behind one of the other ports of this 3245 * bridge. Otherwise returns false. 3246 */ 3247 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 3248 { 3249 struct net_bridge *br; 3250 struct net_bridge_port *port; 3251 bool ret = false; 3252 3253 rcu_read_lock(); 3254 if (!netif_is_bridge_port(dev)) 3255 goto unlock; 3256 3257 port = br_port_get_rcu(dev); 3258 if (!port || !port->br) 3259 goto unlock; 3260 3261 br = port->br; 3262 3263 switch (proto) { 3264 case ETH_P_IP: 3265 if (!timer_pending(&br->ip4_other_query.timer) || 3266 rcu_dereference(br->ip4_querier.port) == port) 3267 goto unlock; 3268 break; 3269 #if IS_ENABLED(CONFIG_IPV6) 3270 case ETH_P_IPV6: 3271 if (!timer_pending(&br->ip6_other_query.timer) || 3272 rcu_dereference(br->ip6_querier.port) == port) 3273 goto unlock; 3274 break; 3275 #endif 3276 default: 3277 goto unlock; 3278 } 3279 3280 ret = true; 3281 unlock: 3282 rcu_read_unlock(); 3283 return ret; 3284 } 3285 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 3286 3287 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 3288 const struct sk_buff *skb, u8 type, u8 dir) 3289 { 3290 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 3291 __be16 proto = skb->protocol; 3292 unsigned int t_len; 3293 3294 u64_stats_update_begin(&pstats->syncp); 3295 switch (proto) { 3296 case htons(ETH_P_IP): 3297 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 3298 switch (type) { 3299 case IGMP_HOST_MEMBERSHIP_REPORT: 3300 pstats->mstats.igmp_v1reports[dir]++; 3301 break; 3302 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3303 pstats->mstats.igmp_v2reports[dir]++; 3304 break; 3305 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3306 pstats->mstats.igmp_v3reports[dir]++; 3307 break; 3308 case IGMP_HOST_MEMBERSHIP_QUERY: 3309 if (t_len != sizeof(struct igmphdr)) { 3310 pstats->mstats.igmp_v3queries[dir]++; 3311 } else { 3312 unsigned int offset = skb_transport_offset(skb); 3313 struct igmphdr *ih, _ihdr; 3314 3315 ih = skb_header_pointer(skb, offset, 3316 sizeof(_ihdr), &_ihdr); 3317 if (!ih) 3318 break; 3319 if (!ih->code) 3320 pstats->mstats.igmp_v1queries[dir]++; 3321 else 3322 pstats->mstats.igmp_v2queries[dir]++; 3323 } 3324 break; 3325 case IGMP_HOST_LEAVE_MESSAGE: 3326 pstats->mstats.igmp_leaves[dir]++; 3327 break; 3328 } 3329 break; 3330 #if IS_ENABLED(CONFIG_IPV6) 3331 case htons(ETH_P_IPV6): 3332 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 3333 sizeof(struct ipv6hdr); 3334 t_len -= skb_network_header_len(skb); 3335 switch (type) { 3336 case ICMPV6_MGM_REPORT: 3337 pstats->mstats.mld_v1reports[dir]++; 3338 break; 3339 case ICMPV6_MLD2_REPORT: 3340 pstats->mstats.mld_v2reports[dir]++; 3341 break; 3342 case ICMPV6_MGM_QUERY: 3343 if (t_len != sizeof(struct mld_msg)) 3344 pstats->mstats.mld_v2queries[dir]++; 3345 else 3346 pstats->mstats.mld_v1queries[dir]++; 3347 break; 3348 case ICMPV6_MGM_REDUCTION: 3349 pstats->mstats.mld_leaves[dir]++; 3350 break; 3351 } 3352 break; 3353 #endif /* CONFIG_IPV6 */ 3354 } 3355 u64_stats_update_end(&pstats->syncp); 3356 } 3357 3358 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 3359 const struct sk_buff *skb, u8 type, u8 dir) 3360 { 3361 struct bridge_mcast_stats __percpu *stats; 3362 3363 /* if multicast_disabled is true then igmp type can't be set */ 3364 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3365 return; 3366 3367 if (p) 3368 stats = p->mcast_stats; 3369 else 3370 stats = br->mcast_stats; 3371 if (WARN_ON(!stats)) 3372 return; 3373 3374 br_mcast_stats_add(stats, skb, type, dir); 3375 } 3376 3377 int br_multicast_init_stats(struct net_bridge *br) 3378 { 3379 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 3380 if (!br->mcast_stats) 3381 return -ENOMEM; 3382 3383 return 0; 3384 } 3385 3386 void br_multicast_uninit_stats(struct net_bridge *br) 3387 { 3388 free_percpu(br->mcast_stats); 3389 } 3390 3391 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 3392 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 3393 { 3394 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 3395 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 3396 } 3397 3398 void br_multicast_get_stats(const struct net_bridge *br, 3399 const struct net_bridge_port *p, 3400 struct br_mcast_stats *dest) 3401 { 3402 struct bridge_mcast_stats __percpu *stats; 3403 struct br_mcast_stats tdst; 3404 int i; 3405 3406 memset(dest, 0, sizeof(*dest)); 3407 if (p) 3408 stats = p->mcast_stats; 3409 else 3410 stats = br->mcast_stats; 3411 if (WARN_ON(!stats)) 3412 return; 3413 3414 memset(&tdst, 0, sizeof(tdst)); 3415 for_each_possible_cpu(i) { 3416 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 3417 struct br_mcast_stats temp; 3418 unsigned int start; 3419 3420 do { 3421 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 3422 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 3423 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 3424 3425 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 3426 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 3427 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 3428 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 3429 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 3430 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 3431 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 3432 tdst.igmp_parse_errors += temp.igmp_parse_errors; 3433 3434 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 3435 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 3436 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 3437 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 3438 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 3439 tdst.mld_parse_errors += temp.mld_parse_errors; 3440 } 3441 memcpy(dest, &tdst, sizeof(*dest)); 3442 } 3443 3444 int br_mdb_hash_init(struct net_bridge *br) 3445 { 3446 return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 3447 } 3448 3449 void br_mdb_hash_fini(struct net_bridge *br) 3450 { 3451 rhashtable_destroy(&br->mdb_hash_tbl); 3452 } 3453