1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 37 static const struct rhashtable_params br_mdb_rht_params = { 38 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 39 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 40 .key_len = sizeof(struct br_ip), 41 .automatic_shrinking = true, 42 }; 43 44 static void br_multicast_start_querier(struct net_bridge *br, 45 struct bridge_mcast_own_query *query); 46 static void br_multicast_add_router(struct net_bridge *br, 47 struct net_bridge_port *port); 48 static void br_ip4_multicast_leave_group(struct net_bridge *br, 49 struct net_bridge_port *port, 50 __be32 group, 51 __u16 vid, 52 const unsigned char *src); 53 static void br_multicast_port_group_rexmit(struct timer_list *t); 54 55 static void __del_port_router(struct net_bridge_port *p); 56 #if IS_ENABLED(CONFIG_IPV6) 57 static void br_ip6_multicast_leave_group(struct net_bridge *br, 58 struct net_bridge_port *port, 59 const struct in6_addr *group, 60 __u16 vid, const unsigned char *src); 61 #endif 62 63 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 64 struct br_ip *dst) 65 { 66 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 67 } 68 69 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 70 struct br_ip *dst) 71 { 72 struct net_bridge_mdb_entry *ent; 73 74 lockdep_assert_held_once(&br->multicast_lock); 75 76 rcu_read_lock(); 77 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 78 rcu_read_unlock(); 79 80 return ent; 81 } 82 83 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 84 __be32 dst, __u16 vid) 85 { 86 struct br_ip br_dst; 87 88 memset(&br_dst, 0, sizeof(br_dst)); 89 br_dst.u.ip4 = dst; 90 br_dst.proto = htons(ETH_P_IP); 91 br_dst.vid = vid; 92 93 return br_mdb_ip_get(br, &br_dst); 94 } 95 96 #if IS_ENABLED(CONFIG_IPV6) 97 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 98 const struct in6_addr *dst, 99 __u16 vid) 100 { 101 struct br_ip br_dst; 102 103 memset(&br_dst, 0, sizeof(br_dst)); 104 br_dst.u.ip6 = *dst; 105 br_dst.proto = htons(ETH_P_IPV6); 106 br_dst.vid = vid; 107 108 return br_mdb_ip_get(br, &br_dst); 109 } 110 #endif 111 112 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 113 struct sk_buff *skb, u16 vid) 114 { 115 struct br_ip ip; 116 117 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 118 return NULL; 119 120 if (BR_INPUT_SKB_CB(skb)->igmp) 121 return NULL; 122 123 memset(&ip, 0, sizeof(ip)); 124 ip.proto = skb->protocol; 125 ip.vid = vid; 126 127 switch (skb->protocol) { 128 case htons(ETH_P_IP): 129 ip.u.ip4 = ip_hdr(skb)->daddr; 130 break; 131 #if IS_ENABLED(CONFIG_IPV6) 132 case htons(ETH_P_IPV6): 133 ip.u.ip6 = ipv6_hdr(skb)->daddr; 134 break; 135 #endif 136 default: 137 return NULL; 138 } 139 140 return br_mdb_ip_get_rcu(br, &ip); 141 } 142 143 static void br_multicast_group_expired(struct timer_list *t) 144 { 145 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 146 struct net_bridge *br = mp->br; 147 148 spin_lock(&br->multicast_lock); 149 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 150 goto out; 151 152 br_multicast_host_leave(mp, true); 153 154 if (mp->ports) 155 goto out; 156 157 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 158 br_mdb_rht_params); 159 hlist_del_rcu(&mp->mdb_node); 160 161 kfree_rcu(mp, rcu); 162 163 out: 164 spin_unlock(&br->multicast_lock); 165 } 166 167 static void br_multicast_del_group_src(struct net_bridge_group_src *src) 168 { 169 struct net_bridge *br = src->pg->port->br; 170 171 hlist_del_init_rcu(&src->node); 172 src->pg->src_ents--; 173 hlist_add_head(&src->del_node, &br->src_gc_list); 174 queue_work(system_long_wq, &br->src_gc_work); 175 } 176 177 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 178 struct net_bridge_port_group *pg, 179 struct net_bridge_port_group __rcu **pp) 180 { 181 struct net_bridge *br = pg->port->br; 182 struct net_bridge_group_src *ent; 183 struct hlist_node *tmp; 184 185 rcu_assign_pointer(*pp, pg->next); 186 hlist_del_init(&pg->mglist); 187 del_timer(&pg->timer); 188 del_timer(&pg->rexmit_timer); 189 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 190 br_multicast_del_group_src(ent); 191 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 192 kfree_rcu(pg, rcu); 193 194 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 195 mod_timer(&mp->timer, jiffies); 196 } 197 198 static void br_multicast_find_del_pg(struct net_bridge *br, 199 struct net_bridge_port_group *pg) 200 { 201 struct net_bridge_port_group __rcu **pp; 202 struct net_bridge_mdb_entry *mp; 203 struct net_bridge_port_group *p; 204 205 mp = br_mdb_ip_get(br, &pg->addr); 206 if (WARN_ON(!mp)) 207 return; 208 209 for (pp = &mp->ports; 210 (p = mlock_dereference(*pp, br)) != NULL; 211 pp = &p->next) { 212 if (p != pg) 213 continue; 214 215 br_multicast_del_pg(mp, pg, pp); 216 return; 217 } 218 219 WARN_ON(1); 220 } 221 222 static void br_multicast_port_group_expired(struct timer_list *t) 223 { 224 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 225 struct net_bridge_group_src *src_ent; 226 struct net_bridge *br = pg->port->br; 227 struct hlist_node *tmp; 228 bool changed; 229 230 spin_lock(&br->multicast_lock); 231 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 232 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 233 goto out; 234 235 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 236 pg->filter_mode = MCAST_INCLUDE; 237 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 238 if (!timer_pending(&src_ent->timer)) { 239 br_multicast_del_group_src(src_ent); 240 changed = true; 241 } 242 } 243 244 if (hlist_empty(&pg->src_list)) { 245 br_multicast_find_del_pg(br, pg); 246 } else if (changed) { 247 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->addr); 248 249 if (WARN_ON(!mp)) 250 goto out; 251 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 252 } 253 out: 254 spin_unlock(&br->multicast_lock); 255 } 256 257 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 258 struct net_bridge_port_group *pg, 259 __be32 ip_dst, __be32 group, 260 bool with_srcs, bool over_lmqt, 261 u8 sflag, u8 *igmp_type, 262 bool *need_rexmit) 263 { 264 struct net_bridge_port *p = pg ? pg->port : NULL; 265 struct net_bridge_group_src *ent; 266 size_t pkt_size, igmp_hdr_size; 267 unsigned long now = jiffies; 268 struct igmpv3_query *ihv3; 269 void *csum_start = NULL; 270 __sum16 *csum = NULL; 271 struct sk_buff *skb; 272 struct igmphdr *ih; 273 struct ethhdr *eth; 274 unsigned long lmqt; 275 struct iphdr *iph; 276 u16 lmqt_srcs = 0; 277 278 igmp_hdr_size = sizeof(*ih); 279 if (br->multicast_igmp_version == 3) { 280 igmp_hdr_size = sizeof(*ihv3); 281 if (pg && with_srcs) { 282 lmqt = now + (br->multicast_last_member_interval * 283 br->multicast_last_member_count); 284 hlist_for_each_entry(ent, &pg->src_list, node) { 285 if (over_lmqt == time_after(ent->timer.expires, 286 lmqt) && 287 ent->src_query_rexmit_cnt > 0) 288 lmqt_srcs++; 289 } 290 291 if (!lmqt_srcs) 292 return NULL; 293 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 294 } 295 } 296 297 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 298 if ((p && pkt_size > p->dev->mtu) || 299 pkt_size > br->dev->mtu) 300 return NULL; 301 302 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 303 if (!skb) 304 goto out; 305 306 skb->protocol = htons(ETH_P_IP); 307 308 skb_reset_mac_header(skb); 309 eth = eth_hdr(skb); 310 311 ether_addr_copy(eth->h_source, br->dev->dev_addr); 312 ip_eth_mc_map(ip_dst, eth->h_dest); 313 eth->h_proto = htons(ETH_P_IP); 314 skb_put(skb, sizeof(*eth)); 315 316 skb_set_network_header(skb, skb->len); 317 iph = ip_hdr(skb); 318 iph->tot_len = htons(pkt_size - sizeof(*eth)); 319 320 iph->version = 4; 321 iph->ihl = 6; 322 iph->tos = 0xc0; 323 iph->id = 0; 324 iph->frag_off = htons(IP_DF); 325 iph->ttl = 1; 326 iph->protocol = IPPROTO_IGMP; 327 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 328 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 329 iph->daddr = ip_dst; 330 ((u8 *)&iph[1])[0] = IPOPT_RA; 331 ((u8 *)&iph[1])[1] = 4; 332 ((u8 *)&iph[1])[2] = 0; 333 ((u8 *)&iph[1])[3] = 0; 334 ip_send_check(iph); 335 skb_put(skb, 24); 336 337 skb_set_transport_header(skb, skb->len); 338 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 339 340 switch (br->multicast_igmp_version) { 341 case 2: 342 ih = igmp_hdr(skb); 343 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 344 ih->code = (group ? br->multicast_last_member_interval : 345 br->multicast_query_response_interval) / 346 (HZ / IGMP_TIMER_SCALE); 347 ih->group = group; 348 ih->csum = 0; 349 csum = &ih->csum; 350 csum_start = (void *)ih; 351 break; 352 case 3: 353 ihv3 = igmpv3_query_hdr(skb); 354 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 355 ihv3->code = (group ? br->multicast_last_member_interval : 356 br->multicast_query_response_interval) / 357 (HZ / IGMP_TIMER_SCALE); 358 ihv3->group = group; 359 ihv3->qqic = br->multicast_query_interval / HZ; 360 ihv3->nsrcs = htons(lmqt_srcs); 361 ihv3->resv = 0; 362 ihv3->suppress = sflag; 363 ihv3->qrv = 2; 364 ihv3->csum = 0; 365 csum = &ihv3->csum; 366 csum_start = (void *)ihv3; 367 if (!pg || !with_srcs) 368 break; 369 370 lmqt_srcs = 0; 371 hlist_for_each_entry(ent, &pg->src_list, node) { 372 if (over_lmqt == time_after(ent->timer.expires, 373 lmqt) && 374 ent->src_query_rexmit_cnt > 0) { 375 ihv3->srcs[lmqt_srcs++] = ent->addr.u.ip4; 376 ent->src_query_rexmit_cnt--; 377 if (need_rexmit && ent->src_query_rexmit_cnt) 378 *need_rexmit = true; 379 } 380 } 381 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 382 kfree_skb(skb); 383 return NULL; 384 } 385 break; 386 } 387 388 if (WARN_ON(!csum || !csum_start)) { 389 kfree_skb(skb); 390 return NULL; 391 } 392 393 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 394 skb_put(skb, igmp_hdr_size); 395 __skb_pull(skb, sizeof(*eth)); 396 397 out: 398 return skb; 399 } 400 401 #if IS_ENABLED(CONFIG_IPV6) 402 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 403 struct net_bridge_port_group *pg, 404 const struct in6_addr *ip6_dst, 405 const struct in6_addr *group, 406 bool with_srcs, bool over_llqt, 407 u8 sflag, u8 *igmp_type, 408 bool *need_rexmit) 409 { 410 struct net_bridge_port *p = pg ? pg->port : NULL; 411 struct net_bridge_group_src *ent; 412 size_t pkt_size, mld_hdr_size; 413 unsigned long now = jiffies; 414 struct mld2_query *mld2q; 415 void *csum_start = NULL; 416 unsigned long interval; 417 __sum16 *csum = NULL; 418 struct ipv6hdr *ip6h; 419 struct mld_msg *mldq; 420 struct sk_buff *skb; 421 unsigned long llqt; 422 struct ethhdr *eth; 423 u16 llqt_srcs = 0; 424 u8 *hopopt; 425 426 mld_hdr_size = sizeof(*mldq); 427 if (br->multicast_mld_version == 2) { 428 mld_hdr_size = sizeof(*mld2q); 429 if (pg && with_srcs) { 430 llqt = now + (br->multicast_last_member_interval * 431 br->multicast_last_member_count); 432 hlist_for_each_entry(ent, &pg->src_list, node) { 433 if (over_llqt == time_after(ent->timer.expires, 434 llqt) && 435 ent->src_query_rexmit_cnt > 0) 436 llqt_srcs++; 437 } 438 439 if (!llqt_srcs) 440 return NULL; 441 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 442 } 443 } 444 445 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 446 if ((p && pkt_size > p->dev->mtu) || 447 pkt_size > br->dev->mtu) 448 return NULL; 449 450 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 451 if (!skb) 452 goto out; 453 454 skb->protocol = htons(ETH_P_IPV6); 455 456 /* Ethernet header */ 457 skb_reset_mac_header(skb); 458 eth = eth_hdr(skb); 459 460 ether_addr_copy(eth->h_source, br->dev->dev_addr); 461 eth->h_proto = htons(ETH_P_IPV6); 462 skb_put(skb, sizeof(*eth)); 463 464 /* IPv6 header + HbH option */ 465 skb_set_network_header(skb, skb->len); 466 ip6h = ipv6_hdr(skb); 467 468 *(__force __be32 *)ip6h = htonl(0x60000000); 469 ip6h->payload_len = htons(8 + mld_hdr_size); 470 ip6h->nexthdr = IPPROTO_HOPOPTS; 471 ip6h->hop_limit = 1; 472 ip6h->daddr = *ip6_dst; 473 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 474 &ip6h->saddr)) { 475 kfree_skb(skb); 476 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false); 477 return NULL; 478 } 479 480 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 481 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 482 483 hopopt = (u8 *)(ip6h + 1); 484 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 485 hopopt[1] = 0; /* length of HbH */ 486 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 487 hopopt[3] = 2; /* Length of RA Option */ 488 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 489 hopopt[5] = 0; 490 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 491 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 492 493 skb_put(skb, sizeof(*ip6h) + 8); 494 495 /* ICMPv6 */ 496 skb_set_transport_header(skb, skb->len); 497 interval = ipv6_addr_any(group) ? 498 br->multicast_query_response_interval : 499 br->multicast_last_member_interval; 500 *igmp_type = ICMPV6_MGM_QUERY; 501 switch (br->multicast_mld_version) { 502 case 1: 503 mldq = (struct mld_msg *)icmp6_hdr(skb); 504 mldq->mld_type = ICMPV6_MGM_QUERY; 505 mldq->mld_code = 0; 506 mldq->mld_cksum = 0; 507 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 508 mldq->mld_reserved = 0; 509 mldq->mld_mca = *group; 510 csum = &mldq->mld_cksum; 511 csum_start = (void *)mldq; 512 break; 513 case 2: 514 mld2q = (struct mld2_query *)icmp6_hdr(skb); 515 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 516 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 517 mld2q->mld2q_code = 0; 518 mld2q->mld2q_cksum = 0; 519 mld2q->mld2q_resv1 = 0; 520 mld2q->mld2q_resv2 = 0; 521 mld2q->mld2q_suppress = sflag; 522 mld2q->mld2q_qrv = 2; 523 mld2q->mld2q_nsrcs = htons(llqt_srcs); 524 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 525 mld2q->mld2q_mca = *group; 526 csum = &mld2q->mld2q_cksum; 527 csum_start = (void *)mld2q; 528 if (!pg || !with_srcs) 529 break; 530 531 llqt_srcs = 0; 532 hlist_for_each_entry(ent, &pg->src_list, node) { 533 if (over_llqt == time_after(ent->timer.expires, 534 llqt) && 535 ent->src_query_rexmit_cnt > 0) { 536 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.u.ip6; 537 ent->src_query_rexmit_cnt--; 538 if (need_rexmit && ent->src_query_rexmit_cnt) 539 *need_rexmit = true; 540 } 541 } 542 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 543 kfree_skb(skb); 544 return NULL; 545 } 546 break; 547 } 548 549 if (WARN_ON(!csum || !csum_start)) { 550 kfree_skb(skb); 551 return NULL; 552 } 553 554 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 555 IPPROTO_ICMPV6, 556 csum_partial(csum_start, mld_hdr_size, 0)); 557 skb_put(skb, mld_hdr_size); 558 __skb_pull(skb, sizeof(*eth)); 559 560 out: 561 return skb; 562 } 563 #endif 564 565 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 566 struct net_bridge_port_group *pg, 567 struct br_ip *ip_dst, 568 struct br_ip *group, 569 bool with_srcs, bool over_lmqt, 570 u8 sflag, u8 *igmp_type, 571 bool *need_rexmit) 572 { 573 __be32 ip4_dst; 574 575 switch (group->proto) { 576 case htons(ETH_P_IP): 577 ip4_dst = ip_dst ? ip_dst->u.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 578 return br_ip4_multicast_alloc_query(br, pg, 579 ip4_dst, group->u.ip4, 580 with_srcs, over_lmqt, 581 sflag, igmp_type, 582 need_rexmit); 583 #if IS_ENABLED(CONFIG_IPV6) 584 case htons(ETH_P_IPV6): { 585 struct in6_addr ip6_dst; 586 587 if (ip_dst) 588 ip6_dst = ip_dst->u.ip6; 589 else 590 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 591 htonl(1)); 592 593 return br_ip6_multicast_alloc_query(br, pg, 594 &ip6_dst, &group->u.ip6, 595 with_srcs, over_lmqt, 596 sflag, igmp_type, 597 need_rexmit); 598 } 599 #endif 600 } 601 return NULL; 602 } 603 604 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 605 struct br_ip *group) 606 { 607 struct net_bridge_mdb_entry *mp; 608 int err; 609 610 mp = br_mdb_ip_get(br, group); 611 if (mp) 612 return mp; 613 614 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 615 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 616 return ERR_PTR(-E2BIG); 617 } 618 619 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 620 if (unlikely(!mp)) 621 return ERR_PTR(-ENOMEM); 622 623 mp->br = br; 624 mp->addr = *group; 625 timer_setup(&mp->timer, br_multicast_group_expired, 0); 626 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 627 br_mdb_rht_params); 628 if (err) { 629 kfree(mp); 630 mp = ERR_PTR(err); 631 } else { 632 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 633 } 634 635 return mp; 636 } 637 638 static void br_multicast_group_src_expired(struct timer_list *t) 639 { 640 struct net_bridge_group_src *src = from_timer(src, t, timer); 641 struct net_bridge_port_group *pg; 642 struct net_bridge *br = src->br; 643 644 spin_lock(&br->multicast_lock); 645 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 646 timer_pending(&src->timer)) 647 goto out; 648 649 pg = src->pg; 650 if (pg->filter_mode == MCAST_INCLUDE) { 651 br_multicast_del_group_src(src); 652 if (!hlist_empty(&pg->src_list)) 653 goto out; 654 br_multicast_find_del_pg(br, pg); 655 } 656 out: 657 spin_unlock(&br->multicast_lock); 658 } 659 660 static struct net_bridge_group_src * 661 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 662 { 663 struct net_bridge_group_src *ent; 664 665 switch (ip->proto) { 666 case htons(ETH_P_IP): 667 hlist_for_each_entry(ent, &pg->src_list, node) 668 if (ip->u.ip4 == ent->addr.u.ip4) 669 return ent; 670 break; 671 #if IS_ENABLED(CONFIG_IPV6) 672 case htons(ETH_P_IPV6): 673 hlist_for_each_entry(ent, &pg->src_list, node) 674 if (!ipv6_addr_cmp(&ent->addr.u.ip6, &ip->u.ip6)) 675 return ent; 676 break; 677 #endif 678 } 679 680 return NULL; 681 } 682 683 static struct net_bridge_group_src * 684 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 685 { 686 struct net_bridge_group_src *grp_src; 687 688 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 689 return NULL; 690 691 switch (src_ip->proto) { 692 case htons(ETH_P_IP): 693 if (ipv4_is_zeronet(src_ip->u.ip4) || 694 ipv4_is_multicast(src_ip->u.ip4)) 695 return NULL; 696 break; 697 #if IS_ENABLED(CONFIG_IPV6) 698 case htons(ETH_P_IPV6): 699 if (ipv6_addr_any(&src_ip->u.ip6) || 700 ipv6_addr_is_multicast(&src_ip->u.ip6)) 701 return NULL; 702 break; 703 #endif 704 } 705 706 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 707 if (unlikely(!grp_src)) 708 return NULL; 709 710 grp_src->pg = pg; 711 grp_src->br = pg->port->br; 712 grp_src->addr = *src_ip; 713 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 714 715 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 716 pg->src_ents++; 717 718 return grp_src; 719 } 720 721 struct net_bridge_port_group *br_multicast_new_port_group( 722 struct net_bridge_port *port, 723 struct br_ip *group, 724 struct net_bridge_port_group __rcu *next, 725 unsigned char flags, 726 const unsigned char *src, 727 u8 filter_mode) 728 { 729 struct net_bridge_port_group *p; 730 731 p = kzalloc(sizeof(*p), GFP_ATOMIC); 732 if (unlikely(!p)) 733 return NULL; 734 735 p->addr = *group; 736 p->port = port; 737 p->flags = flags; 738 p->filter_mode = filter_mode; 739 INIT_HLIST_HEAD(&p->src_list); 740 rcu_assign_pointer(p->next, next); 741 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 742 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 743 hlist_add_head(&p->mglist, &port->mglist); 744 745 if (src) 746 memcpy(p->eth_addr, src, ETH_ALEN); 747 else 748 eth_broadcast_addr(p->eth_addr); 749 750 return p; 751 } 752 753 static bool br_port_group_equal(struct net_bridge_port_group *p, 754 struct net_bridge_port *port, 755 const unsigned char *src) 756 { 757 if (p->port != port) 758 return false; 759 760 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 761 return true; 762 763 return ether_addr_equal(src, p->eth_addr); 764 } 765 766 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify) 767 { 768 if (!mp->host_joined) { 769 mp->host_joined = true; 770 if (notify) 771 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 772 } 773 mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval); 774 } 775 776 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 777 { 778 if (!mp->host_joined) 779 return; 780 781 mp->host_joined = false; 782 if (notify) 783 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 784 } 785 786 static int br_multicast_add_group(struct net_bridge *br, 787 struct net_bridge_port *port, 788 struct br_ip *group, 789 const unsigned char *src, 790 u8 filter_mode, 791 bool igmpv2_mldv1) 792 { 793 struct net_bridge_port_group __rcu **pp; 794 struct net_bridge_port_group *p; 795 struct net_bridge_mdb_entry *mp; 796 unsigned long now = jiffies; 797 int err; 798 799 spin_lock(&br->multicast_lock); 800 if (!netif_running(br->dev) || 801 (port && port->state == BR_STATE_DISABLED)) 802 goto out; 803 804 mp = br_multicast_new_group(br, group); 805 err = PTR_ERR(mp); 806 if (IS_ERR(mp)) 807 goto err; 808 809 if (!port) { 810 br_multicast_host_join(mp, true); 811 goto out; 812 } 813 814 for (pp = &mp->ports; 815 (p = mlock_dereference(*pp, br)) != NULL; 816 pp = &p->next) { 817 if (br_port_group_equal(p, port, src)) 818 goto found; 819 if ((unsigned long)p->port < (unsigned long)port) 820 break; 821 } 822 823 p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode); 824 if (unlikely(!p)) 825 goto err; 826 rcu_assign_pointer(*pp, p); 827 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB); 828 829 found: 830 if (igmpv2_mldv1) 831 mod_timer(&p->timer, now + br->multicast_membership_interval); 832 833 out: 834 err = 0; 835 836 err: 837 spin_unlock(&br->multicast_lock); 838 return err; 839 } 840 841 static int br_ip4_multicast_add_group(struct net_bridge *br, 842 struct net_bridge_port *port, 843 __be32 group, 844 __u16 vid, 845 const unsigned char *src, 846 bool igmpv2) 847 { 848 struct br_ip br_group; 849 u8 filter_mode; 850 851 if (ipv4_is_local_multicast(group)) 852 return 0; 853 854 memset(&br_group, 0, sizeof(br_group)); 855 br_group.u.ip4 = group; 856 br_group.proto = htons(ETH_P_IP); 857 br_group.vid = vid; 858 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 859 860 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 861 igmpv2); 862 } 863 864 #if IS_ENABLED(CONFIG_IPV6) 865 static int br_ip6_multicast_add_group(struct net_bridge *br, 866 struct net_bridge_port *port, 867 const struct in6_addr *group, 868 __u16 vid, 869 const unsigned char *src, 870 bool mldv1) 871 { 872 struct br_ip br_group; 873 u8 filter_mode; 874 875 if (ipv6_addr_is_ll_all_nodes(group)) 876 return 0; 877 878 memset(&br_group, 0, sizeof(br_group)); 879 br_group.u.ip6 = *group; 880 br_group.proto = htons(ETH_P_IPV6); 881 br_group.vid = vid; 882 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 883 884 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 885 mldv1); 886 } 887 #endif 888 889 static void br_multicast_router_expired(struct timer_list *t) 890 { 891 struct net_bridge_port *port = 892 from_timer(port, t, multicast_router_timer); 893 struct net_bridge *br = port->br; 894 895 spin_lock(&br->multicast_lock); 896 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 897 port->multicast_router == MDB_RTR_TYPE_PERM || 898 timer_pending(&port->multicast_router_timer)) 899 goto out; 900 901 __del_port_router(port); 902 out: 903 spin_unlock(&br->multicast_lock); 904 } 905 906 static void br_mc_router_state_change(struct net_bridge *p, 907 bool is_mc_router) 908 { 909 struct switchdev_attr attr = { 910 .orig_dev = p->dev, 911 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 912 .flags = SWITCHDEV_F_DEFER, 913 .u.mrouter = is_mc_router, 914 }; 915 916 switchdev_port_attr_set(p->dev, &attr); 917 } 918 919 static void br_multicast_local_router_expired(struct timer_list *t) 920 { 921 struct net_bridge *br = from_timer(br, t, multicast_router_timer); 922 923 spin_lock(&br->multicast_lock); 924 if (br->multicast_router == MDB_RTR_TYPE_DISABLED || 925 br->multicast_router == MDB_RTR_TYPE_PERM || 926 timer_pending(&br->multicast_router_timer)) 927 goto out; 928 929 br_mc_router_state_change(br, false); 930 out: 931 spin_unlock(&br->multicast_lock); 932 } 933 934 static void br_multicast_querier_expired(struct net_bridge *br, 935 struct bridge_mcast_own_query *query) 936 { 937 spin_lock(&br->multicast_lock); 938 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 939 goto out; 940 941 br_multicast_start_querier(br, query); 942 943 out: 944 spin_unlock(&br->multicast_lock); 945 } 946 947 static void br_ip4_multicast_querier_expired(struct timer_list *t) 948 { 949 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer); 950 951 br_multicast_querier_expired(br, &br->ip4_own_query); 952 } 953 954 #if IS_ENABLED(CONFIG_IPV6) 955 static void br_ip6_multicast_querier_expired(struct timer_list *t) 956 { 957 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer); 958 959 br_multicast_querier_expired(br, &br->ip6_own_query); 960 } 961 #endif 962 963 static void br_multicast_select_own_querier(struct net_bridge *br, 964 struct br_ip *ip, 965 struct sk_buff *skb) 966 { 967 if (ip->proto == htons(ETH_P_IP)) 968 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 969 #if IS_ENABLED(CONFIG_IPV6) 970 else 971 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 972 #endif 973 } 974 975 static void __br_multicast_send_query(struct net_bridge *br, 976 struct net_bridge_port *port, 977 struct net_bridge_port_group *pg, 978 struct br_ip *ip_dst, 979 struct br_ip *group, 980 bool with_srcs, 981 u8 sflag, 982 bool *need_rexmit) 983 { 984 bool over_lmqt = !!sflag; 985 struct sk_buff *skb; 986 u8 igmp_type; 987 988 again_under_lmqt: 989 skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs, 990 over_lmqt, sflag, &igmp_type, 991 need_rexmit); 992 if (!skb) 993 return; 994 995 if (port) { 996 skb->dev = port->dev; 997 br_multicast_count(br, port, skb, igmp_type, 998 BR_MCAST_DIR_TX); 999 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1000 dev_net(port->dev), NULL, skb, NULL, skb->dev, 1001 br_dev_queue_push_xmit); 1002 1003 if (over_lmqt && with_srcs && sflag) { 1004 over_lmqt = false; 1005 goto again_under_lmqt; 1006 } 1007 } else { 1008 br_multicast_select_own_querier(br, group, skb); 1009 br_multicast_count(br, port, skb, igmp_type, 1010 BR_MCAST_DIR_RX); 1011 netif_rx(skb); 1012 } 1013 } 1014 1015 static void br_multicast_send_query(struct net_bridge *br, 1016 struct net_bridge_port *port, 1017 struct bridge_mcast_own_query *own_query) 1018 { 1019 struct bridge_mcast_other_query *other_query = NULL; 1020 struct br_ip br_group; 1021 unsigned long time; 1022 1023 if (!netif_running(br->dev) || 1024 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1025 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1026 return; 1027 1028 memset(&br_group.u, 0, sizeof(br_group.u)); 1029 1030 if (port ? (own_query == &port->ip4_own_query) : 1031 (own_query == &br->ip4_own_query)) { 1032 other_query = &br->ip4_other_query; 1033 br_group.proto = htons(ETH_P_IP); 1034 #if IS_ENABLED(CONFIG_IPV6) 1035 } else { 1036 other_query = &br->ip6_other_query; 1037 br_group.proto = htons(ETH_P_IPV6); 1038 #endif 1039 } 1040 1041 if (!other_query || timer_pending(&other_query->timer)) 1042 return; 1043 1044 __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0, 1045 NULL); 1046 1047 time = jiffies; 1048 time += own_query->startup_sent < br->multicast_startup_query_count ? 1049 br->multicast_startup_query_interval : 1050 br->multicast_query_interval; 1051 mod_timer(&own_query->timer, time); 1052 } 1053 1054 static void 1055 br_multicast_port_query_expired(struct net_bridge_port *port, 1056 struct bridge_mcast_own_query *query) 1057 { 1058 struct net_bridge *br = port->br; 1059 1060 spin_lock(&br->multicast_lock); 1061 if (port->state == BR_STATE_DISABLED || 1062 port->state == BR_STATE_BLOCKING) 1063 goto out; 1064 1065 if (query->startup_sent < br->multicast_startup_query_count) 1066 query->startup_sent++; 1067 1068 br_multicast_send_query(port->br, port, query); 1069 1070 out: 1071 spin_unlock(&br->multicast_lock); 1072 } 1073 1074 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1075 { 1076 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer); 1077 1078 br_multicast_port_query_expired(port, &port->ip4_own_query); 1079 } 1080 1081 #if IS_ENABLED(CONFIG_IPV6) 1082 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1083 { 1084 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer); 1085 1086 br_multicast_port_query_expired(port, &port->ip6_own_query); 1087 } 1088 #endif 1089 1090 static void br_multicast_port_group_rexmit(struct timer_list *t) 1091 { 1092 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1093 struct bridge_mcast_other_query *other_query = NULL; 1094 struct net_bridge *br = pg->port->br; 1095 bool need_rexmit = false; 1096 1097 spin_lock(&br->multicast_lock); 1098 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1099 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1100 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1101 goto out; 1102 1103 if (pg->addr.proto == htons(ETH_P_IP)) 1104 other_query = &br->ip4_other_query; 1105 #if IS_ENABLED(CONFIG_IPV6) 1106 else 1107 other_query = &br->ip6_other_query; 1108 #endif 1109 1110 if (!other_query || timer_pending(&other_query->timer)) 1111 goto out; 1112 1113 if (pg->grp_query_rexmit_cnt) { 1114 pg->grp_query_rexmit_cnt--; 1115 __br_multicast_send_query(br, pg->port, pg, &pg->addr, 1116 &pg->addr, false, 1, NULL); 1117 } 1118 __br_multicast_send_query(br, pg->port, pg, &pg->addr, 1119 &pg->addr, true, 0, &need_rexmit); 1120 1121 if (pg->grp_query_rexmit_cnt || need_rexmit) 1122 mod_timer(&pg->rexmit_timer, jiffies + 1123 br->multicast_last_member_interval); 1124 out: 1125 spin_unlock(&br->multicast_lock); 1126 } 1127 1128 static void br_mc_disabled_update(struct net_device *dev, bool value) 1129 { 1130 struct switchdev_attr attr = { 1131 .orig_dev = dev, 1132 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1133 .flags = SWITCHDEV_F_DEFER, 1134 .u.mc_disabled = !value, 1135 }; 1136 1137 switchdev_port_attr_set(dev, &attr); 1138 } 1139 1140 int br_multicast_add_port(struct net_bridge_port *port) 1141 { 1142 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1143 1144 timer_setup(&port->multicast_router_timer, 1145 br_multicast_router_expired, 0); 1146 timer_setup(&port->ip4_own_query.timer, 1147 br_ip4_multicast_port_query_expired, 0); 1148 #if IS_ENABLED(CONFIG_IPV6) 1149 timer_setup(&port->ip6_own_query.timer, 1150 br_ip6_multicast_port_query_expired, 0); 1151 #endif 1152 br_mc_disabled_update(port->dev, 1153 br_opt_get(port->br, BROPT_MULTICAST_ENABLED)); 1154 1155 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1156 if (!port->mcast_stats) 1157 return -ENOMEM; 1158 1159 return 0; 1160 } 1161 1162 void br_multicast_del_port(struct net_bridge_port *port) 1163 { 1164 struct net_bridge *br = port->br; 1165 struct net_bridge_port_group *pg; 1166 struct hlist_node *n; 1167 1168 /* Take care of the remaining groups, only perm ones should be left */ 1169 spin_lock_bh(&br->multicast_lock); 1170 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1171 br_multicast_find_del_pg(br, pg); 1172 spin_unlock_bh(&br->multicast_lock); 1173 del_timer_sync(&port->multicast_router_timer); 1174 free_percpu(port->mcast_stats); 1175 } 1176 1177 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1178 { 1179 query->startup_sent = 0; 1180 1181 if (try_to_del_timer_sync(&query->timer) >= 0 || 1182 del_timer(&query->timer)) 1183 mod_timer(&query->timer, jiffies); 1184 } 1185 1186 static void __br_multicast_enable_port(struct net_bridge_port *port) 1187 { 1188 struct net_bridge *br = port->br; 1189 1190 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev)) 1191 return; 1192 1193 br_multicast_enable(&port->ip4_own_query); 1194 #if IS_ENABLED(CONFIG_IPV6) 1195 br_multicast_enable(&port->ip6_own_query); 1196 #endif 1197 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1198 hlist_unhashed(&port->rlist)) 1199 br_multicast_add_router(br, port); 1200 } 1201 1202 void br_multicast_enable_port(struct net_bridge_port *port) 1203 { 1204 struct net_bridge *br = port->br; 1205 1206 spin_lock(&br->multicast_lock); 1207 __br_multicast_enable_port(port); 1208 spin_unlock(&br->multicast_lock); 1209 } 1210 1211 void br_multicast_disable_port(struct net_bridge_port *port) 1212 { 1213 struct net_bridge *br = port->br; 1214 struct net_bridge_port_group *pg; 1215 struct hlist_node *n; 1216 1217 spin_lock(&br->multicast_lock); 1218 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1219 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1220 br_multicast_find_del_pg(br, pg); 1221 1222 __del_port_router(port); 1223 1224 del_timer(&port->multicast_router_timer); 1225 del_timer(&port->ip4_own_query.timer); 1226 #if IS_ENABLED(CONFIG_IPV6) 1227 del_timer(&port->ip6_own_query.timer); 1228 #endif 1229 spin_unlock(&br->multicast_lock); 1230 } 1231 1232 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1233 { 1234 struct net_bridge_group_src *ent; 1235 struct hlist_node *tmp; 1236 int deleted = 0; 1237 1238 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1239 if (ent->flags & BR_SGRP_F_DELETE) { 1240 br_multicast_del_group_src(ent); 1241 deleted++; 1242 } 1243 1244 return deleted; 1245 } 1246 1247 /* State Msg type New state Actions 1248 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 1249 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 1250 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 1251 */ 1252 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg, 1253 void *srcs, u32 nsrcs, size_t src_size) 1254 { 1255 struct net_bridge *br = pg->port->br; 1256 struct net_bridge_group_src *ent; 1257 unsigned long now = jiffies; 1258 bool changed = false; 1259 struct br_ip src_ip; 1260 u32 src_idx; 1261 1262 memset(&src_ip, 0, sizeof(src_ip)); 1263 src_ip.proto = pg->addr.proto; 1264 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1265 memcpy(&src_ip.u, srcs, src_size); 1266 ent = br_multicast_find_group_src(pg, &src_ip); 1267 if (!ent) { 1268 ent = br_multicast_new_group_src(pg, &src_ip); 1269 if (ent) 1270 changed = true; 1271 } 1272 1273 if (ent) 1274 mod_timer(&ent->timer, now + br_multicast_gmi(br)); 1275 srcs += src_size; 1276 } 1277 1278 return changed; 1279 } 1280 1281 /* State Msg type New state Actions 1282 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 1283 * Delete (A-B) 1284 * Group Timer=GMI 1285 */ 1286 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg, 1287 void *srcs, u32 nsrcs, size_t src_size) 1288 { 1289 struct net_bridge_group_src *ent; 1290 struct br_ip src_ip; 1291 u32 src_idx; 1292 1293 hlist_for_each_entry(ent, &pg->src_list, node) 1294 ent->flags |= BR_SGRP_F_DELETE; 1295 1296 memset(&src_ip, 0, sizeof(src_ip)); 1297 src_ip.proto = pg->addr.proto; 1298 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1299 memcpy(&src_ip.u, srcs, src_size); 1300 ent = br_multicast_find_group_src(pg, &src_ip); 1301 if (ent) 1302 ent->flags &= ~BR_SGRP_F_DELETE; 1303 else 1304 br_multicast_new_group_src(pg, &src_ip); 1305 srcs += src_size; 1306 } 1307 1308 __grp_src_delete_marked(pg); 1309 } 1310 1311 /* State Msg type New state Actions 1312 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 1313 * Delete (X-A) 1314 * Delete (Y-A) 1315 * Group Timer=GMI 1316 */ 1317 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg, 1318 void *srcs, u32 nsrcs, size_t src_size) 1319 { 1320 struct net_bridge *br = pg->port->br; 1321 struct net_bridge_group_src *ent; 1322 unsigned long now = jiffies; 1323 bool changed = false; 1324 struct br_ip src_ip; 1325 u32 src_idx; 1326 1327 hlist_for_each_entry(ent, &pg->src_list, node) 1328 ent->flags |= BR_SGRP_F_DELETE; 1329 1330 memset(&src_ip, 0, sizeof(src_ip)); 1331 src_ip.proto = pg->addr.proto; 1332 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1333 memcpy(&src_ip.u, srcs, src_size); 1334 ent = br_multicast_find_group_src(pg, &src_ip); 1335 if (ent) { 1336 ent->flags &= ~BR_SGRP_F_DELETE; 1337 } else { 1338 ent = br_multicast_new_group_src(pg, &src_ip); 1339 if (ent) { 1340 mod_timer(&ent->timer, 1341 now + br_multicast_gmi(br)); 1342 changed = true; 1343 } 1344 } 1345 srcs += src_size; 1346 } 1347 1348 if (__grp_src_delete_marked(pg)) 1349 changed = true; 1350 1351 return changed; 1352 } 1353 1354 static bool br_multicast_isexc(struct net_bridge_port_group *pg, 1355 void *srcs, u32 nsrcs, size_t src_size) 1356 { 1357 struct net_bridge *br = pg->port->br; 1358 bool changed = false; 1359 1360 switch (pg->filter_mode) { 1361 case MCAST_INCLUDE: 1362 __grp_src_isexc_incl(pg, srcs, nsrcs, src_size); 1363 changed = true; 1364 break; 1365 case MCAST_EXCLUDE: 1366 changed = __grp_src_isexc_excl(pg, srcs, nsrcs, src_size); 1367 break; 1368 } 1369 1370 pg->filter_mode = MCAST_EXCLUDE; 1371 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 1372 1373 return changed; 1374 } 1375 1376 static struct net_bridge_port_group * 1377 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 1378 struct net_bridge_port *p, 1379 const unsigned char *src) 1380 { 1381 struct net_bridge_port_group *pg; 1382 struct net_bridge *br = mp->br; 1383 1384 for (pg = mlock_dereference(mp->ports, br); 1385 pg; 1386 pg = mlock_dereference(pg->next, br)) 1387 if (br_port_group_equal(pg, p, src)) 1388 return pg; 1389 1390 return NULL; 1391 } 1392 1393 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 1394 struct net_bridge_port *port, 1395 struct sk_buff *skb, 1396 u16 vid) 1397 { 1398 bool igmpv2 = br->multicast_igmp_version == 2; 1399 struct net_bridge_mdb_entry *mdst; 1400 struct net_bridge_port_group *pg; 1401 const unsigned char *src; 1402 struct igmpv3_report *ih; 1403 struct igmpv3_grec *grec; 1404 int i, len, num, type; 1405 bool changed = false; 1406 __be32 group; 1407 int err = 0; 1408 u16 nsrcs; 1409 1410 ih = igmpv3_report_hdr(skb); 1411 num = ntohs(ih->ngrec); 1412 len = skb_transport_offset(skb) + sizeof(*ih); 1413 1414 for (i = 0; i < num; i++) { 1415 len += sizeof(*grec); 1416 if (!ip_mc_may_pull(skb, len)) 1417 return -EINVAL; 1418 1419 grec = (void *)(skb->data + len - sizeof(*grec)); 1420 group = grec->grec_mca; 1421 type = grec->grec_type; 1422 nsrcs = ntohs(grec->grec_nsrcs); 1423 1424 len += nsrcs * 4; 1425 if (!ip_mc_may_pull(skb, len)) 1426 return -EINVAL; 1427 1428 switch (type) { 1429 case IGMPV3_MODE_IS_INCLUDE: 1430 case IGMPV3_MODE_IS_EXCLUDE: 1431 case IGMPV3_CHANGE_TO_INCLUDE: 1432 case IGMPV3_CHANGE_TO_EXCLUDE: 1433 case IGMPV3_ALLOW_NEW_SOURCES: 1434 case IGMPV3_BLOCK_OLD_SOURCES: 1435 break; 1436 1437 default: 1438 continue; 1439 } 1440 1441 src = eth_hdr(skb)->h_source; 1442 if (nsrcs == 0 && 1443 (type == IGMPV3_CHANGE_TO_INCLUDE || 1444 type == IGMPV3_MODE_IS_INCLUDE)) { 1445 if (!port || igmpv2) { 1446 br_ip4_multicast_leave_group(br, port, group, vid, src); 1447 continue; 1448 } 1449 } else { 1450 err = br_ip4_multicast_add_group(br, port, group, vid, 1451 src, igmpv2); 1452 if (err) 1453 break; 1454 } 1455 1456 if (!port || igmpv2) 1457 continue; 1458 1459 spin_lock_bh(&br->multicast_lock); 1460 mdst = br_mdb_ip4_get(br, group, vid); 1461 if (!mdst) 1462 goto unlock_continue; 1463 pg = br_multicast_find_port(mdst, port, src); 1464 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 1465 goto unlock_continue; 1466 /* reload grec */ 1467 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 1468 switch (type) { 1469 case IGMPV3_ALLOW_NEW_SOURCES: 1470 changed = br_multicast_isinc_allow(pg, grec->grec_src, 1471 nsrcs, sizeof(__be32)); 1472 break; 1473 case IGMPV3_MODE_IS_INCLUDE: 1474 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs, 1475 sizeof(__be32)); 1476 break; 1477 case IGMPV3_MODE_IS_EXCLUDE: 1478 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs, 1479 sizeof(__be32)); 1480 break; 1481 } 1482 if (changed) 1483 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 1484 unlock_continue: 1485 spin_unlock_bh(&br->multicast_lock); 1486 } 1487 1488 return err; 1489 } 1490 1491 #if IS_ENABLED(CONFIG_IPV6) 1492 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1493 struct net_bridge_port *port, 1494 struct sk_buff *skb, 1495 u16 vid) 1496 { 1497 bool mldv1 = br->multicast_mld_version == 1; 1498 struct net_bridge_mdb_entry *mdst; 1499 struct net_bridge_port_group *pg; 1500 unsigned int nsrcs_offset; 1501 const unsigned char *src; 1502 struct icmp6hdr *icmp6h; 1503 struct mld2_grec *grec; 1504 unsigned int grec_len; 1505 bool changed = false; 1506 int i, len, num; 1507 int err = 0; 1508 1509 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h))) 1510 return -EINVAL; 1511 1512 icmp6h = icmp6_hdr(skb); 1513 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1514 len = skb_transport_offset(skb) + sizeof(*icmp6h); 1515 1516 for (i = 0; i < num; i++) { 1517 __be16 *_nsrcs, __nsrcs; 1518 u16 nsrcs; 1519 1520 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 1521 1522 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 1523 nsrcs_offset + sizeof(__nsrcs)) 1524 return -EINVAL; 1525 1526 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 1527 sizeof(__nsrcs), &__nsrcs); 1528 if (!_nsrcs) 1529 return -EINVAL; 1530 1531 nsrcs = ntohs(*_nsrcs); 1532 grec_len = struct_size(grec, grec_src, nsrcs); 1533 1534 if (!ipv6_mc_may_pull(skb, len + grec_len)) 1535 return -EINVAL; 1536 1537 grec = (struct mld2_grec *)(skb->data + len); 1538 len += grec_len; 1539 1540 switch (grec->grec_type) { 1541 case MLD2_MODE_IS_INCLUDE: 1542 case MLD2_MODE_IS_EXCLUDE: 1543 case MLD2_CHANGE_TO_INCLUDE: 1544 case MLD2_CHANGE_TO_EXCLUDE: 1545 case MLD2_ALLOW_NEW_SOURCES: 1546 case MLD2_BLOCK_OLD_SOURCES: 1547 break; 1548 1549 default: 1550 continue; 1551 } 1552 1553 src = eth_hdr(skb)->h_source; 1554 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1555 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1556 nsrcs == 0) { 1557 if (!port || mldv1) { 1558 br_ip6_multicast_leave_group(br, port, 1559 &grec->grec_mca, 1560 vid, src); 1561 continue; 1562 } 1563 } else { 1564 err = br_ip6_multicast_add_group(br, port, 1565 &grec->grec_mca, vid, 1566 src, mldv1); 1567 if (err) 1568 break; 1569 } 1570 1571 if (!port || mldv1) 1572 continue; 1573 1574 spin_lock_bh(&br->multicast_lock); 1575 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid); 1576 if (!mdst) 1577 goto unlock_continue; 1578 pg = br_multicast_find_port(mdst, port, src); 1579 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 1580 goto unlock_continue; 1581 switch (grec->grec_type) { 1582 case MLD2_ALLOW_NEW_SOURCES: 1583 changed = br_multicast_isinc_allow(pg, grec->grec_src, 1584 nsrcs, 1585 sizeof(struct in6_addr)); 1586 break; 1587 case MLD2_MODE_IS_INCLUDE: 1588 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs, 1589 sizeof(struct in6_addr)); 1590 break; 1591 case MLD2_MODE_IS_EXCLUDE: 1592 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs, 1593 sizeof(struct in6_addr)); 1594 break; 1595 } 1596 if (changed) 1597 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 1598 unlock_continue: 1599 spin_unlock_bh(&br->multicast_lock); 1600 } 1601 1602 return err; 1603 } 1604 #endif 1605 1606 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1607 struct net_bridge_port *port, 1608 __be32 saddr) 1609 { 1610 if (!timer_pending(&br->ip4_own_query.timer) && 1611 !timer_pending(&br->ip4_other_query.timer)) 1612 goto update; 1613 1614 if (!br->ip4_querier.addr.u.ip4) 1615 goto update; 1616 1617 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1618 goto update; 1619 1620 return false; 1621 1622 update: 1623 br->ip4_querier.addr.u.ip4 = saddr; 1624 1625 /* update protected by general multicast_lock by caller */ 1626 rcu_assign_pointer(br->ip4_querier.port, port); 1627 1628 return true; 1629 } 1630 1631 #if IS_ENABLED(CONFIG_IPV6) 1632 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1633 struct net_bridge_port *port, 1634 struct in6_addr *saddr) 1635 { 1636 if (!timer_pending(&br->ip6_own_query.timer) && 1637 !timer_pending(&br->ip6_other_query.timer)) 1638 goto update; 1639 1640 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1641 goto update; 1642 1643 return false; 1644 1645 update: 1646 br->ip6_querier.addr.u.ip6 = *saddr; 1647 1648 /* update protected by general multicast_lock by caller */ 1649 rcu_assign_pointer(br->ip6_querier.port, port); 1650 1651 return true; 1652 } 1653 #endif 1654 1655 static bool br_multicast_select_querier(struct net_bridge *br, 1656 struct net_bridge_port *port, 1657 struct br_ip *saddr) 1658 { 1659 switch (saddr->proto) { 1660 case htons(ETH_P_IP): 1661 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1662 #if IS_ENABLED(CONFIG_IPV6) 1663 case htons(ETH_P_IPV6): 1664 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1665 #endif 1666 } 1667 1668 return false; 1669 } 1670 1671 static void 1672 br_multicast_update_query_timer(struct net_bridge *br, 1673 struct bridge_mcast_other_query *query, 1674 unsigned long max_delay) 1675 { 1676 if (!timer_pending(&query->timer)) 1677 query->delay_time = jiffies + max_delay; 1678 1679 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1680 } 1681 1682 static void br_port_mc_router_state_change(struct net_bridge_port *p, 1683 bool is_mc_router) 1684 { 1685 struct switchdev_attr attr = { 1686 .orig_dev = p->dev, 1687 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 1688 .flags = SWITCHDEV_F_DEFER, 1689 .u.mrouter = is_mc_router, 1690 }; 1691 1692 switchdev_port_attr_set(p->dev, &attr); 1693 } 1694 1695 /* 1696 * Add port to router_list 1697 * list is maintained ordered by pointer value 1698 * and locked by br->multicast_lock and RCU 1699 */ 1700 static void br_multicast_add_router(struct net_bridge *br, 1701 struct net_bridge_port *port) 1702 { 1703 struct net_bridge_port *p; 1704 struct hlist_node *slot = NULL; 1705 1706 if (!hlist_unhashed(&port->rlist)) 1707 return; 1708 1709 hlist_for_each_entry(p, &br->router_list, rlist) { 1710 if ((unsigned long) port >= (unsigned long) p) 1711 break; 1712 slot = &p->rlist; 1713 } 1714 1715 if (slot) 1716 hlist_add_behind_rcu(&port->rlist, slot); 1717 else 1718 hlist_add_head_rcu(&port->rlist, &br->router_list); 1719 br_rtr_notify(br->dev, port, RTM_NEWMDB); 1720 br_port_mc_router_state_change(port, true); 1721 } 1722 1723 static void br_multicast_mark_router(struct net_bridge *br, 1724 struct net_bridge_port *port) 1725 { 1726 unsigned long now = jiffies; 1727 1728 if (!port) { 1729 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 1730 if (!timer_pending(&br->multicast_router_timer)) 1731 br_mc_router_state_change(br, true); 1732 mod_timer(&br->multicast_router_timer, 1733 now + br->multicast_querier_interval); 1734 } 1735 return; 1736 } 1737 1738 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1739 port->multicast_router == MDB_RTR_TYPE_PERM) 1740 return; 1741 1742 br_multicast_add_router(br, port); 1743 1744 mod_timer(&port->multicast_router_timer, 1745 now + br->multicast_querier_interval); 1746 } 1747 1748 static void br_multicast_query_received(struct net_bridge *br, 1749 struct net_bridge_port *port, 1750 struct bridge_mcast_other_query *query, 1751 struct br_ip *saddr, 1752 unsigned long max_delay) 1753 { 1754 if (!br_multicast_select_querier(br, port, saddr)) 1755 return; 1756 1757 br_multicast_update_query_timer(br, query, max_delay); 1758 br_multicast_mark_router(br, port); 1759 } 1760 1761 static void br_ip4_multicast_query(struct net_bridge *br, 1762 struct net_bridge_port *port, 1763 struct sk_buff *skb, 1764 u16 vid) 1765 { 1766 unsigned int transport_len = ip_transport_len(skb); 1767 const struct iphdr *iph = ip_hdr(skb); 1768 struct igmphdr *ih = igmp_hdr(skb); 1769 struct net_bridge_mdb_entry *mp; 1770 struct igmpv3_query *ih3; 1771 struct net_bridge_port_group *p; 1772 struct net_bridge_port_group __rcu **pp; 1773 struct br_ip saddr; 1774 unsigned long max_delay; 1775 unsigned long now = jiffies; 1776 __be32 group; 1777 1778 spin_lock(&br->multicast_lock); 1779 if (!netif_running(br->dev) || 1780 (port && port->state == BR_STATE_DISABLED)) 1781 goto out; 1782 1783 group = ih->group; 1784 1785 if (transport_len == sizeof(*ih)) { 1786 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1787 1788 if (!max_delay) { 1789 max_delay = 10 * HZ; 1790 group = 0; 1791 } 1792 } else if (transport_len >= sizeof(*ih3)) { 1793 ih3 = igmpv3_query_hdr(skb); 1794 if (ih3->nsrcs) 1795 goto out; 1796 1797 max_delay = ih3->code ? 1798 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1799 } else { 1800 goto out; 1801 } 1802 1803 if (!group) { 1804 saddr.proto = htons(ETH_P_IP); 1805 saddr.u.ip4 = iph->saddr; 1806 1807 br_multicast_query_received(br, port, &br->ip4_other_query, 1808 &saddr, max_delay); 1809 goto out; 1810 } 1811 1812 mp = br_mdb_ip4_get(br, group, vid); 1813 if (!mp) 1814 goto out; 1815 1816 max_delay *= br->multicast_last_member_count; 1817 1818 if (mp->host_joined && 1819 (timer_pending(&mp->timer) ? 1820 time_after(mp->timer.expires, now + max_delay) : 1821 try_to_del_timer_sync(&mp->timer) >= 0)) 1822 mod_timer(&mp->timer, now + max_delay); 1823 1824 for (pp = &mp->ports; 1825 (p = mlock_dereference(*pp, br)) != NULL; 1826 pp = &p->next) { 1827 if (timer_pending(&p->timer) ? 1828 time_after(p->timer.expires, now + max_delay) : 1829 try_to_del_timer_sync(&p->timer) >= 0) 1830 mod_timer(&p->timer, now + max_delay); 1831 } 1832 1833 out: 1834 spin_unlock(&br->multicast_lock); 1835 } 1836 1837 #if IS_ENABLED(CONFIG_IPV6) 1838 static int br_ip6_multicast_query(struct net_bridge *br, 1839 struct net_bridge_port *port, 1840 struct sk_buff *skb, 1841 u16 vid) 1842 { 1843 unsigned int transport_len = ipv6_transport_len(skb); 1844 struct mld_msg *mld; 1845 struct net_bridge_mdb_entry *mp; 1846 struct mld2_query *mld2q; 1847 struct net_bridge_port_group *p; 1848 struct net_bridge_port_group __rcu **pp; 1849 struct br_ip saddr; 1850 unsigned long max_delay; 1851 unsigned long now = jiffies; 1852 unsigned int offset = skb_transport_offset(skb); 1853 const struct in6_addr *group = NULL; 1854 bool is_general_query; 1855 int err = 0; 1856 1857 spin_lock(&br->multicast_lock); 1858 if (!netif_running(br->dev) || 1859 (port && port->state == BR_STATE_DISABLED)) 1860 goto out; 1861 1862 if (transport_len == sizeof(*mld)) { 1863 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1864 err = -EINVAL; 1865 goto out; 1866 } 1867 mld = (struct mld_msg *) icmp6_hdr(skb); 1868 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1869 if (max_delay) 1870 group = &mld->mld_mca; 1871 } else { 1872 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1873 err = -EINVAL; 1874 goto out; 1875 } 1876 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1877 if (!mld2q->mld2q_nsrcs) 1878 group = &mld2q->mld2q_mca; 1879 1880 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1881 } 1882 1883 is_general_query = group && ipv6_addr_any(group); 1884 1885 if (is_general_query) { 1886 saddr.proto = htons(ETH_P_IPV6); 1887 saddr.u.ip6 = ipv6_hdr(skb)->saddr; 1888 1889 br_multicast_query_received(br, port, &br->ip6_other_query, 1890 &saddr, max_delay); 1891 goto out; 1892 } else if (!group) { 1893 goto out; 1894 } 1895 1896 mp = br_mdb_ip6_get(br, group, vid); 1897 if (!mp) 1898 goto out; 1899 1900 max_delay *= br->multicast_last_member_count; 1901 if (mp->host_joined && 1902 (timer_pending(&mp->timer) ? 1903 time_after(mp->timer.expires, now + max_delay) : 1904 try_to_del_timer_sync(&mp->timer) >= 0)) 1905 mod_timer(&mp->timer, now + max_delay); 1906 1907 for (pp = &mp->ports; 1908 (p = mlock_dereference(*pp, br)) != NULL; 1909 pp = &p->next) { 1910 if (timer_pending(&p->timer) ? 1911 time_after(p->timer.expires, now + max_delay) : 1912 try_to_del_timer_sync(&p->timer) >= 0) 1913 mod_timer(&p->timer, now + max_delay); 1914 } 1915 1916 out: 1917 spin_unlock(&br->multicast_lock); 1918 return err; 1919 } 1920 #endif 1921 1922 static void 1923 br_multicast_leave_group(struct net_bridge *br, 1924 struct net_bridge_port *port, 1925 struct br_ip *group, 1926 struct bridge_mcast_other_query *other_query, 1927 struct bridge_mcast_own_query *own_query, 1928 const unsigned char *src) 1929 { 1930 struct net_bridge_mdb_entry *mp; 1931 struct net_bridge_port_group *p; 1932 unsigned long now; 1933 unsigned long time; 1934 1935 spin_lock(&br->multicast_lock); 1936 if (!netif_running(br->dev) || 1937 (port && port->state == BR_STATE_DISABLED)) 1938 goto out; 1939 1940 mp = br_mdb_ip_get(br, group); 1941 if (!mp) 1942 goto out; 1943 1944 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1945 struct net_bridge_port_group __rcu **pp; 1946 1947 for (pp = &mp->ports; 1948 (p = mlock_dereference(*pp, br)) != NULL; 1949 pp = &p->next) { 1950 if (!br_port_group_equal(p, port, src)) 1951 continue; 1952 1953 if (p->flags & MDB_PG_FLAGS_PERMANENT) 1954 break; 1955 1956 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 1957 br_multicast_del_pg(mp, p, pp); 1958 } 1959 goto out; 1960 } 1961 1962 if (timer_pending(&other_query->timer)) 1963 goto out; 1964 1965 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { 1966 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr, 1967 false, 0, NULL); 1968 1969 time = jiffies + br->multicast_last_member_count * 1970 br->multicast_last_member_interval; 1971 1972 mod_timer(&own_query->timer, time); 1973 1974 for (p = mlock_dereference(mp->ports, br); 1975 p != NULL; 1976 p = mlock_dereference(p->next, br)) { 1977 if (!br_port_group_equal(p, port, src)) 1978 continue; 1979 1980 if (!hlist_unhashed(&p->mglist) && 1981 (timer_pending(&p->timer) ? 1982 time_after(p->timer.expires, time) : 1983 try_to_del_timer_sync(&p->timer) >= 0)) { 1984 mod_timer(&p->timer, time); 1985 } 1986 1987 break; 1988 } 1989 } 1990 1991 now = jiffies; 1992 time = now + br->multicast_last_member_count * 1993 br->multicast_last_member_interval; 1994 1995 if (!port) { 1996 if (mp->host_joined && 1997 (timer_pending(&mp->timer) ? 1998 time_after(mp->timer.expires, time) : 1999 try_to_del_timer_sync(&mp->timer) >= 0)) { 2000 mod_timer(&mp->timer, time); 2001 } 2002 2003 goto out; 2004 } 2005 2006 for (p = mlock_dereference(mp->ports, br); 2007 p != NULL; 2008 p = mlock_dereference(p->next, br)) { 2009 if (p->port != port) 2010 continue; 2011 2012 if (!hlist_unhashed(&p->mglist) && 2013 (timer_pending(&p->timer) ? 2014 time_after(p->timer.expires, time) : 2015 try_to_del_timer_sync(&p->timer) >= 0)) { 2016 mod_timer(&p->timer, time); 2017 } 2018 2019 break; 2020 } 2021 out: 2022 spin_unlock(&br->multicast_lock); 2023 } 2024 2025 static void br_ip4_multicast_leave_group(struct net_bridge *br, 2026 struct net_bridge_port *port, 2027 __be32 group, 2028 __u16 vid, 2029 const unsigned char *src) 2030 { 2031 struct br_ip br_group; 2032 struct bridge_mcast_own_query *own_query; 2033 2034 if (ipv4_is_local_multicast(group)) 2035 return; 2036 2037 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 2038 2039 memset(&br_group, 0, sizeof(br_group)); 2040 br_group.u.ip4 = group; 2041 br_group.proto = htons(ETH_P_IP); 2042 br_group.vid = vid; 2043 2044 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 2045 own_query, src); 2046 } 2047 2048 #if IS_ENABLED(CONFIG_IPV6) 2049 static void br_ip6_multicast_leave_group(struct net_bridge *br, 2050 struct net_bridge_port *port, 2051 const struct in6_addr *group, 2052 __u16 vid, 2053 const unsigned char *src) 2054 { 2055 struct br_ip br_group; 2056 struct bridge_mcast_own_query *own_query; 2057 2058 if (ipv6_addr_is_ll_all_nodes(group)) 2059 return; 2060 2061 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 2062 2063 memset(&br_group, 0, sizeof(br_group)); 2064 br_group.u.ip6 = *group; 2065 br_group.proto = htons(ETH_P_IPV6); 2066 br_group.vid = vid; 2067 2068 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 2069 own_query, src); 2070 } 2071 #endif 2072 2073 static void br_multicast_err_count(const struct net_bridge *br, 2074 const struct net_bridge_port *p, 2075 __be16 proto) 2076 { 2077 struct bridge_mcast_stats __percpu *stats; 2078 struct bridge_mcast_stats *pstats; 2079 2080 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 2081 return; 2082 2083 if (p) 2084 stats = p->mcast_stats; 2085 else 2086 stats = br->mcast_stats; 2087 if (WARN_ON(!stats)) 2088 return; 2089 2090 pstats = this_cpu_ptr(stats); 2091 2092 u64_stats_update_begin(&pstats->syncp); 2093 switch (proto) { 2094 case htons(ETH_P_IP): 2095 pstats->mstats.igmp_parse_errors++; 2096 break; 2097 #if IS_ENABLED(CONFIG_IPV6) 2098 case htons(ETH_P_IPV6): 2099 pstats->mstats.mld_parse_errors++; 2100 break; 2101 #endif 2102 } 2103 u64_stats_update_end(&pstats->syncp); 2104 } 2105 2106 static void br_multicast_pim(struct net_bridge *br, 2107 struct net_bridge_port *port, 2108 const struct sk_buff *skb) 2109 { 2110 unsigned int offset = skb_transport_offset(skb); 2111 struct pimhdr *pimhdr, _pimhdr; 2112 2113 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 2114 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 2115 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 2116 return; 2117 2118 br_multicast_mark_router(br, port); 2119 } 2120 2121 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br, 2122 struct net_bridge_port *port, 2123 struct sk_buff *skb) 2124 { 2125 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 2126 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 2127 return -ENOMSG; 2128 2129 br_multicast_mark_router(br, port); 2130 2131 return 0; 2132 } 2133 2134 static int br_multicast_ipv4_rcv(struct net_bridge *br, 2135 struct net_bridge_port *port, 2136 struct sk_buff *skb, 2137 u16 vid) 2138 { 2139 const unsigned char *src; 2140 struct igmphdr *ih; 2141 int err; 2142 2143 err = ip_mc_check_igmp(skb); 2144 2145 if (err == -ENOMSG) { 2146 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 2147 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2148 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 2149 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 2150 br_multicast_pim(br, port, skb); 2151 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 2152 br_ip4_multicast_mrd_rcv(br, port, skb); 2153 } 2154 2155 return 0; 2156 } else if (err < 0) { 2157 br_multicast_err_count(br, port, skb->protocol); 2158 return err; 2159 } 2160 2161 ih = igmp_hdr(skb); 2162 src = eth_hdr(skb)->h_source; 2163 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 2164 2165 switch (ih->type) { 2166 case IGMP_HOST_MEMBERSHIP_REPORT: 2167 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2168 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2169 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src, 2170 true); 2171 break; 2172 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2173 err = br_ip4_multicast_igmp3_report(br, port, skb, vid); 2174 break; 2175 case IGMP_HOST_MEMBERSHIP_QUERY: 2176 br_ip4_multicast_query(br, port, skb, vid); 2177 break; 2178 case IGMP_HOST_LEAVE_MESSAGE: 2179 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 2180 break; 2181 } 2182 2183 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 2184 BR_MCAST_DIR_RX); 2185 2186 return err; 2187 } 2188 2189 #if IS_ENABLED(CONFIG_IPV6) 2190 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br, 2191 struct net_bridge_port *port, 2192 struct sk_buff *skb) 2193 { 2194 int ret; 2195 2196 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) 2197 return -ENOMSG; 2198 2199 ret = ipv6_mc_check_icmpv6(skb); 2200 if (ret < 0) 2201 return ret; 2202 2203 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 2204 return -ENOMSG; 2205 2206 br_multicast_mark_router(br, port); 2207 2208 return 0; 2209 } 2210 2211 static int br_multicast_ipv6_rcv(struct net_bridge *br, 2212 struct net_bridge_port *port, 2213 struct sk_buff *skb, 2214 u16 vid) 2215 { 2216 const unsigned char *src; 2217 struct mld_msg *mld; 2218 int err; 2219 2220 err = ipv6_mc_check_mld(skb); 2221 2222 if (err == -ENOMSG) { 2223 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 2224 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2225 2226 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) { 2227 err = br_ip6_multicast_mrd_rcv(br, port, skb); 2228 2229 if (err < 0 && err != -ENOMSG) { 2230 br_multicast_err_count(br, port, skb->protocol); 2231 return err; 2232 } 2233 } 2234 2235 return 0; 2236 } else if (err < 0) { 2237 br_multicast_err_count(br, port, skb->protocol); 2238 return err; 2239 } 2240 2241 mld = (struct mld_msg *)skb_transport_header(skb); 2242 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 2243 2244 switch (mld->mld_type) { 2245 case ICMPV6_MGM_REPORT: 2246 src = eth_hdr(skb)->h_source; 2247 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 2248 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 2249 src, true); 2250 break; 2251 case ICMPV6_MLD2_REPORT: 2252 err = br_ip6_multicast_mld2_report(br, port, skb, vid); 2253 break; 2254 case ICMPV6_MGM_QUERY: 2255 err = br_ip6_multicast_query(br, port, skb, vid); 2256 break; 2257 case ICMPV6_MGM_REDUCTION: 2258 src = eth_hdr(skb)->h_source; 2259 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 2260 break; 2261 } 2262 2263 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 2264 BR_MCAST_DIR_RX); 2265 2266 return err; 2267 } 2268 #endif 2269 2270 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 2271 struct sk_buff *skb, u16 vid) 2272 { 2273 int ret = 0; 2274 2275 BR_INPUT_SKB_CB(skb)->igmp = 0; 2276 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 2277 2278 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2279 return 0; 2280 2281 switch (skb->protocol) { 2282 case htons(ETH_P_IP): 2283 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 2284 break; 2285 #if IS_ENABLED(CONFIG_IPV6) 2286 case htons(ETH_P_IPV6): 2287 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 2288 break; 2289 #endif 2290 } 2291 2292 return ret; 2293 } 2294 2295 static void br_multicast_query_expired(struct net_bridge *br, 2296 struct bridge_mcast_own_query *query, 2297 struct bridge_mcast_querier *querier) 2298 { 2299 spin_lock(&br->multicast_lock); 2300 if (query->startup_sent < br->multicast_startup_query_count) 2301 query->startup_sent++; 2302 2303 RCU_INIT_POINTER(querier->port, NULL); 2304 br_multicast_send_query(br, NULL, query); 2305 spin_unlock(&br->multicast_lock); 2306 } 2307 2308 static void br_ip4_multicast_query_expired(struct timer_list *t) 2309 { 2310 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer); 2311 2312 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 2313 } 2314 2315 #if IS_ENABLED(CONFIG_IPV6) 2316 static void br_ip6_multicast_query_expired(struct timer_list *t) 2317 { 2318 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer); 2319 2320 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 2321 } 2322 #endif 2323 2324 static void __grp_src_gc(struct hlist_head *head) 2325 { 2326 struct net_bridge_group_src *ent; 2327 struct hlist_node *tmp; 2328 2329 hlist_for_each_entry_safe(ent, tmp, head, del_node) { 2330 hlist_del_init(&ent->del_node); 2331 del_timer_sync(&ent->timer); 2332 kfree_rcu(ent, rcu); 2333 } 2334 } 2335 2336 static void br_multicast_src_gc(struct work_struct *work) 2337 { 2338 struct net_bridge *br = container_of(work, struct net_bridge, 2339 src_gc_work); 2340 HLIST_HEAD(deleted_head); 2341 2342 spin_lock_bh(&br->multicast_lock); 2343 hlist_move_list(&br->src_gc_list, &deleted_head); 2344 spin_unlock_bh(&br->multicast_lock); 2345 2346 __grp_src_gc(&deleted_head); 2347 } 2348 2349 void br_multicast_init(struct net_bridge *br) 2350 { 2351 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 2352 2353 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2354 br->multicast_last_member_count = 2; 2355 br->multicast_startup_query_count = 2; 2356 2357 br->multicast_last_member_interval = HZ; 2358 br->multicast_query_response_interval = 10 * HZ; 2359 br->multicast_startup_query_interval = 125 * HZ / 4; 2360 br->multicast_query_interval = 125 * HZ; 2361 br->multicast_querier_interval = 255 * HZ; 2362 br->multicast_membership_interval = 260 * HZ; 2363 2364 br->ip4_other_query.delay_time = 0; 2365 br->ip4_querier.port = NULL; 2366 br->multicast_igmp_version = 2; 2367 #if IS_ENABLED(CONFIG_IPV6) 2368 br->multicast_mld_version = 1; 2369 br->ip6_other_query.delay_time = 0; 2370 br->ip6_querier.port = NULL; 2371 #endif 2372 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 2373 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 2374 2375 spin_lock_init(&br->multicast_lock); 2376 timer_setup(&br->multicast_router_timer, 2377 br_multicast_local_router_expired, 0); 2378 timer_setup(&br->ip4_other_query.timer, 2379 br_ip4_multicast_querier_expired, 0); 2380 timer_setup(&br->ip4_own_query.timer, 2381 br_ip4_multicast_query_expired, 0); 2382 #if IS_ENABLED(CONFIG_IPV6) 2383 timer_setup(&br->ip6_other_query.timer, 2384 br_ip6_multicast_querier_expired, 0); 2385 timer_setup(&br->ip6_own_query.timer, 2386 br_ip6_multicast_query_expired, 0); 2387 #endif 2388 INIT_HLIST_HEAD(&br->mdb_list); 2389 INIT_HLIST_HEAD(&br->src_gc_list); 2390 INIT_WORK(&br->src_gc_work, br_multicast_src_gc); 2391 } 2392 2393 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 2394 { 2395 struct in_device *in_dev = in_dev_get(br->dev); 2396 2397 if (!in_dev) 2398 return; 2399 2400 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 2401 in_dev_put(in_dev); 2402 } 2403 2404 #if IS_ENABLED(CONFIG_IPV6) 2405 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 2406 { 2407 struct in6_addr addr; 2408 2409 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 2410 ipv6_dev_mc_inc(br->dev, &addr); 2411 } 2412 #else 2413 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 2414 { 2415 } 2416 #endif 2417 2418 static void br_multicast_join_snoopers(struct net_bridge *br) 2419 { 2420 br_ip4_multicast_join_snoopers(br); 2421 br_ip6_multicast_join_snoopers(br); 2422 } 2423 2424 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 2425 { 2426 struct in_device *in_dev = in_dev_get(br->dev); 2427 2428 if (WARN_ON(!in_dev)) 2429 return; 2430 2431 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 2432 in_dev_put(in_dev); 2433 } 2434 2435 #if IS_ENABLED(CONFIG_IPV6) 2436 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 2437 { 2438 struct in6_addr addr; 2439 2440 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 2441 ipv6_dev_mc_dec(br->dev, &addr); 2442 } 2443 #else 2444 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 2445 { 2446 } 2447 #endif 2448 2449 static void br_multicast_leave_snoopers(struct net_bridge *br) 2450 { 2451 br_ip4_multicast_leave_snoopers(br); 2452 br_ip6_multicast_leave_snoopers(br); 2453 } 2454 2455 static void __br_multicast_open(struct net_bridge *br, 2456 struct bridge_mcast_own_query *query) 2457 { 2458 query->startup_sent = 0; 2459 2460 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2461 return; 2462 2463 mod_timer(&query->timer, jiffies); 2464 } 2465 2466 void br_multicast_open(struct net_bridge *br) 2467 { 2468 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2469 br_multicast_join_snoopers(br); 2470 2471 __br_multicast_open(br, &br->ip4_own_query); 2472 #if IS_ENABLED(CONFIG_IPV6) 2473 __br_multicast_open(br, &br->ip6_own_query); 2474 #endif 2475 } 2476 2477 void br_multicast_stop(struct net_bridge *br) 2478 { 2479 del_timer_sync(&br->multicast_router_timer); 2480 del_timer_sync(&br->ip4_other_query.timer); 2481 del_timer_sync(&br->ip4_own_query.timer); 2482 #if IS_ENABLED(CONFIG_IPV6) 2483 del_timer_sync(&br->ip6_other_query.timer); 2484 del_timer_sync(&br->ip6_own_query.timer); 2485 #endif 2486 2487 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 2488 br_multicast_leave_snoopers(br); 2489 } 2490 2491 void br_multicast_dev_del(struct net_bridge *br) 2492 { 2493 struct net_bridge_mdb_entry *mp; 2494 HLIST_HEAD(deleted_head); 2495 struct hlist_node *tmp; 2496 2497 spin_lock_bh(&br->multicast_lock); 2498 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) { 2499 del_timer(&mp->timer); 2500 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 2501 br_mdb_rht_params); 2502 hlist_del_rcu(&mp->mdb_node); 2503 kfree_rcu(mp, rcu); 2504 } 2505 hlist_move_list(&br->src_gc_list, &deleted_head); 2506 spin_unlock_bh(&br->multicast_lock); 2507 2508 __grp_src_gc(&deleted_head); 2509 cancel_work_sync(&br->src_gc_work); 2510 2511 rcu_barrier(); 2512 } 2513 2514 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2515 { 2516 int err = -EINVAL; 2517 2518 spin_lock_bh(&br->multicast_lock); 2519 2520 switch (val) { 2521 case MDB_RTR_TYPE_DISABLED: 2522 case MDB_RTR_TYPE_PERM: 2523 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 2524 del_timer(&br->multicast_router_timer); 2525 br->multicast_router = val; 2526 err = 0; 2527 break; 2528 case MDB_RTR_TYPE_TEMP_QUERY: 2529 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 2530 br_mc_router_state_change(br, false); 2531 br->multicast_router = val; 2532 err = 0; 2533 break; 2534 } 2535 2536 spin_unlock_bh(&br->multicast_lock); 2537 2538 return err; 2539 } 2540 2541 static void __del_port_router(struct net_bridge_port *p) 2542 { 2543 if (hlist_unhashed(&p->rlist)) 2544 return; 2545 hlist_del_init_rcu(&p->rlist); 2546 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 2547 br_port_mc_router_state_change(p, false); 2548 2549 /* don't allow timer refresh */ 2550 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2551 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2552 } 2553 2554 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 2555 { 2556 struct net_bridge *br = p->br; 2557 unsigned long now = jiffies; 2558 int err = -EINVAL; 2559 2560 spin_lock(&br->multicast_lock); 2561 if (p->multicast_router == val) { 2562 /* Refresh the temp router port timer */ 2563 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 2564 mod_timer(&p->multicast_router_timer, 2565 now + br->multicast_querier_interval); 2566 err = 0; 2567 goto unlock; 2568 } 2569 switch (val) { 2570 case MDB_RTR_TYPE_DISABLED: 2571 p->multicast_router = MDB_RTR_TYPE_DISABLED; 2572 __del_port_router(p); 2573 del_timer(&p->multicast_router_timer); 2574 break; 2575 case MDB_RTR_TYPE_TEMP_QUERY: 2576 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2577 __del_port_router(p); 2578 break; 2579 case MDB_RTR_TYPE_PERM: 2580 p->multicast_router = MDB_RTR_TYPE_PERM; 2581 del_timer(&p->multicast_router_timer); 2582 br_multicast_add_router(br, p); 2583 break; 2584 case MDB_RTR_TYPE_TEMP: 2585 p->multicast_router = MDB_RTR_TYPE_TEMP; 2586 br_multicast_mark_router(br, p); 2587 break; 2588 default: 2589 goto unlock; 2590 } 2591 err = 0; 2592 unlock: 2593 spin_unlock(&br->multicast_lock); 2594 2595 return err; 2596 } 2597 2598 static void br_multicast_start_querier(struct net_bridge *br, 2599 struct bridge_mcast_own_query *query) 2600 { 2601 struct net_bridge_port *port; 2602 2603 __br_multicast_open(br, query); 2604 2605 rcu_read_lock(); 2606 list_for_each_entry_rcu(port, &br->port_list, list) { 2607 if (port->state == BR_STATE_DISABLED || 2608 port->state == BR_STATE_BLOCKING) 2609 continue; 2610 2611 if (query == &br->ip4_own_query) 2612 br_multicast_enable(&port->ip4_own_query); 2613 #if IS_ENABLED(CONFIG_IPV6) 2614 else 2615 br_multicast_enable(&port->ip6_own_query); 2616 #endif 2617 } 2618 rcu_read_unlock(); 2619 } 2620 2621 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2622 { 2623 struct net_bridge_port *port; 2624 2625 spin_lock_bh(&br->multicast_lock); 2626 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 2627 goto unlock; 2628 2629 br_mc_disabled_update(br->dev, val); 2630 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 2631 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 2632 br_multicast_leave_snoopers(br); 2633 goto unlock; 2634 } 2635 2636 if (!netif_running(br->dev)) 2637 goto unlock; 2638 2639 br_multicast_open(br); 2640 list_for_each_entry(port, &br->port_list, list) 2641 __br_multicast_enable_port(port); 2642 2643 unlock: 2644 spin_unlock_bh(&br->multicast_lock); 2645 2646 return 0; 2647 } 2648 2649 bool br_multicast_enabled(const struct net_device *dev) 2650 { 2651 struct net_bridge *br = netdev_priv(dev); 2652 2653 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 2654 } 2655 EXPORT_SYMBOL_GPL(br_multicast_enabled); 2656 2657 bool br_multicast_router(const struct net_device *dev) 2658 { 2659 struct net_bridge *br = netdev_priv(dev); 2660 bool is_router; 2661 2662 spin_lock_bh(&br->multicast_lock); 2663 is_router = br_multicast_is_router(br); 2664 spin_unlock_bh(&br->multicast_lock); 2665 return is_router; 2666 } 2667 EXPORT_SYMBOL_GPL(br_multicast_router); 2668 2669 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2670 { 2671 unsigned long max_delay; 2672 2673 val = !!val; 2674 2675 spin_lock_bh(&br->multicast_lock); 2676 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) 2677 goto unlock; 2678 2679 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); 2680 if (!val) 2681 goto unlock; 2682 2683 max_delay = br->multicast_query_response_interval; 2684 2685 if (!timer_pending(&br->ip4_other_query.timer)) 2686 br->ip4_other_query.delay_time = jiffies + max_delay; 2687 2688 br_multicast_start_querier(br, &br->ip4_own_query); 2689 2690 #if IS_ENABLED(CONFIG_IPV6) 2691 if (!timer_pending(&br->ip6_other_query.timer)) 2692 br->ip6_other_query.delay_time = jiffies + max_delay; 2693 2694 br_multicast_start_querier(br, &br->ip6_own_query); 2695 #endif 2696 2697 unlock: 2698 spin_unlock_bh(&br->multicast_lock); 2699 2700 return 0; 2701 } 2702 2703 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 2704 { 2705 /* Currently we support only version 2 and 3 */ 2706 switch (val) { 2707 case 2: 2708 case 3: 2709 break; 2710 default: 2711 return -EINVAL; 2712 } 2713 2714 spin_lock_bh(&br->multicast_lock); 2715 br->multicast_igmp_version = val; 2716 spin_unlock_bh(&br->multicast_lock); 2717 2718 return 0; 2719 } 2720 2721 #if IS_ENABLED(CONFIG_IPV6) 2722 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 2723 { 2724 /* Currently we support version 1 and 2 */ 2725 switch (val) { 2726 case 1: 2727 case 2: 2728 break; 2729 default: 2730 return -EINVAL; 2731 } 2732 2733 spin_lock_bh(&br->multicast_lock); 2734 br->multicast_mld_version = val; 2735 spin_unlock_bh(&br->multicast_lock); 2736 2737 return 0; 2738 } 2739 #endif 2740 2741 /** 2742 * br_multicast_list_adjacent - Returns snooped multicast addresses 2743 * @dev: The bridge port adjacent to which to retrieve addresses 2744 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2745 * 2746 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2747 * snooping feature on all bridge ports of dev's bridge device, excluding 2748 * the addresses from dev itself. 2749 * 2750 * Returns the number of items added to br_ip_list. 2751 * 2752 * Notes: 2753 * - br_ip_list needs to be initialized by caller 2754 * - br_ip_list might contain duplicates in the end 2755 * (needs to be taken care of by caller) 2756 * - br_ip_list needs to be freed by caller 2757 */ 2758 int br_multicast_list_adjacent(struct net_device *dev, 2759 struct list_head *br_ip_list) 2760 { 2761 struct net_bridge *br; 2762 struct net_bridge_port *port; 2763 struct net_bridge_port_group *group; 2764 struct br_ip_list *entry; 2765 int count = 0; 2766 2767 rcu_read_lock(); 2768 if (!br_ip_list || !netif_is_bridge_port(dev)) 2769 goto unlock; 2770 2771 port = br_port_get_rcu(dev); 2772 if (!port || !port->br) 2773 goto unlock; 2774 2775 br = port->br; 2776 2777 list_for_each_entry_rcu(port, &br->port_list, list) { 2778 if (!port->dev || port->dev == dev) 2779 continue; 2780 2781 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2782 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2783 if (!entry) 2784 goto unlock; 2785 2786 entry->addr = group->addr; 2787 list_add(&entry->list, br_ip_list); 2788 count++; 2789 } 2790 } 2791 2792 unlock: 2793 rcu_read_unlock(); 2794 return count; 2795 } 2796 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2797 2798 /** 2799 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2800 * @dev: The bridge port providing the bridge on which to check for a querier 2801 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2802 * 2803 * Checks whether the given interface has a bridge on top and if so returns 2804 * true if a valid querier exists anywhere on the bridged link layer. 2805 * Otherwise returns false. 2806 */ 2807 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2808 { 2809 struct net_bridge *br; 2810 struct net_bridge_port *port; 2811 struct ethhdr eth; 2812 bool ret = false; 2813 2814 rcu_read_lock(); 2815 if (!netif_is_bridge_port(dev)) 2816 goto unlock; 2817 2818 port = br_port_get_rcu(dev); 2819 if (!port || !port->br) 2820 goto unlock; 2821 2822 br = port->br; 2823 2824 memset(ð, 0, sizeof(eth)); 2825 eth.h_proto = htons(proto); 2826 2827 ret = br_multicast_querier_exists(br, ð); 2828 2829 unlock: 2830 rcu_read_unlock(); 2831 return ret; 2832 } 2833 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2834 2835 /** 2836 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2837 * @dev: The bridge port adjacent to which to check for a querier 2838 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2839 * 2840 * Checks whether the given interface has a bridge on top and if so returns 2841 * true if a selected querier is behind one of the other ports of this 2842 * bridge. Otherwise returns false. 2843 */ 2844 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2845 { 2846 struct net_bridge *br; 2847 struct net_bridge_port *port; 2848 bool ret = false; 2849 2850 rcu_read_lock(); 2851 if (!netif_is_bridge_port(dev)) 2852 goto unlock; 2853 2854 port = br_port_get_rcu(dev); 2855 if (!port || !port->br) 2856 goto unlock; 2857 2858 br = port->br; 2859 2860 switch (proto) { 2861 case ETH_P_IP: 2862 if (!timer_pending(&br->ip4_other_query.timer) || 2863 rcu_dereference(br->ip4_querier.port) == port) 2864 goto unlock; 2865 break; 2866 #if IS_ENABLED(CONFIG_IPV6) 2867 case ETH_P_IPV6: 2868 if (!timer_pending(&br->ip6_other_query.timer) || 2869 rcu_dereference(br->ip6_querier.port) == port) 2870 goto unlock; 2871 break; 2872 #endif 2873 default: 2874 goto unlock; 2875 } 2876 2877 ret = true; 2878 unlock: 2879 rcu_read_unlock(); 2880 return ret; 2881 } 2882 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2883 2884 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 2885 const struct sk_buff *skb, u8 type, u8 dir) 2886 { 2887 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 2888 __be16 proto = skb->protocol; 2889 unsigned int t_len; 2890 2891 u64_stats_update_begin(&pstats->syncp); 2892 switch (proto) { 2893 case htons(ETH_P_IP): 2894 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 2895 switch (type) { 2896 case IGMP_HOST_MEMBERSHIP_REPORT: 2897 pstats->mstats.igmp_v1reports[dir]++; 2898 break; 2899 case IGMPV2_HOST_MEMBERSHIP_REPORT: 2900 pstats->mstats.igmp_v2reports[dir]++; 2901 break; 2902 case IGMPV3_HOST_MEMBERSHIP_REPORT: 2903 pstats->mstats.igmp_v3reports[dir]++; 2904 break; 2905 case IGMP_HOST_MEMBERSHIP_QUERY: 2906 if (t_len != sizeof(struct igmphdr)) { 2907 pstats->mstats.igmp_v3queries[dir]++; 2908 } else { 2909 unsigned int offset = skb_transport_offset(skb); 2910 struct igmphdr *ih, _ihdr; 2911 2912 ih = skb_header_pointer(skb, offset, 2913 sizeof(_ihdr), &_ihdr); 2914 if (!ih) 2915 break; 2916 if (!ih->code) 2917 pstats->mstats.igmp_v1queries[dir]++; 2918 else 2919 pstats->mstats.igmp_v2queries[dir]++; 2920 } 2921 break; 2922 case IGMP_HOST_LEAVE_MESSAGE: 2923 pstats->mstats.igmp_leaves[dir]++; 2924 break; 2925 } 2926 break; 2927 #if IS_ENABLED(CONFIG_IPV6) 2928 case htons(ETH_P_IPV6): 2929 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 2930 sizeof(struct ipv6hdr); 2931 t_len -= skb_network_header_len(skb); 2932 switch (type) { 2933 case ICMPV6_MGM_REPORT: 2934 pstats->mstats.mld_v1reports[dir]++; 2935 break; 2936 case ICMPV6_MLD2_REPORT: 2937 pstats->mstats.mld_v2reports[dir]++; 2938 break; 2939 case ICMPV6_MGM_QUERY: 2940 if (t_len != sizeof(struct mld_msg)) 2941 pstats->mstats.mld_v2queries[dir]++; 2942 else 2943 pstats->mstats.mld_v1queries[dir]++; 2944 break; 2945 case ICMPV6_MGM_REDUCTION: 2946 pstats->mstats.mld_leaves[dir]++; 2947 break; 2948 } 2949 break; 2950 #endif /* CONFIG_IPV6 */ 2951 } 2952 u64_stats_update_end(&pstats->syncp); 2953 } 2954 2955 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 2956 const struct sk_buff *skb, u8 type, u8 dir) 2957 { 2958 struct bridge_mcast_stats __percpu *stats; 2959 2960 /* if multicast_disabled is true then igmp type can't be set */ 2961 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 2962 return; 2963 2964 if (p) 2965 stats = p->mcast_stats; 2966 else 2967 stats = br->mcast_stats; 2968 if (WARN_ON(!stats)) 2969 return; 2970 2971 br_mcast_stats_add(stats, skb, type, dir); 2972 } 2973 2974 int br_multicast_init_stats(struct net_bridge *br) 2975 { 2976 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2977 if (!br->mcast_stats) 2978 return -ENOMEM; 2979 2980 return 0; 2981 } 2982 2983 void br_multicast_uninit_stats(struct net_bridge *br) 2984 { 2985 free_percpu(br->mcast_stats); 2986 } 2987 2988 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 2989 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 2990 { 2991 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2992 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 2993 } 2994 2995 void br_multicast_get_stats(const struct net_bridge *br, 2996 const struct net_bridge_port *p, 2997 struct br_mcast_stats *dest) 2998 { 2999 struct bridge_mcast_stats __percpu *stats; 3000 struct br_mcast_stats tdst; 3001 int i; 3002 3003 memset(dest, 0, sizeof(*dest)); 3004 if (p) 3005 stats = p->mcast_stats; 3006 else 3007 stats = br->mcast_stats; 3008 if (WARN_ON(!stats)) 3009 return; 3010 3011 memset(&tdst, 0, sizeof(tdst)); 3012 for_each_possible_cpu(i) { 3013 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 3014 struct br_mcast_stats temp; 3015 unsigned int start; 3016 3017 do { 3018 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 3019 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 3020 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 3021 3022 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 3023 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 3024 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 3025 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 3026 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 3027 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 3028 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 3029 tdst.igmp_parse_errors += temp.igmp_parse_errors; 3030 3031 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 3032 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 3033 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 3034 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 3035 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 3036 tdst.mld_parse_errors += temp.mld_parse_errors; 3037 } 3038 memcpy(dest, &tdst, sizeof(*dest)); 3039 } 3040 3041 int br_mdb_hash_init(struct net_bridge *br) 3042 { 3043 return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 3044 } 3045 3046 void br_mdb_hash_fini(struct net_bridge *br) 3047 { 3048 rhashtable_destroy(&br->mdb_hash_tbl); 3049 } 3050