1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 37 static const struct rhashtable_params br_mdb_rht_params = { 38 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 39 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 40 .key_len = sizeof(struct br_ip), 41 .automatic_shrinking = true, 42 }; 43 44 static const struct rhashtable_params br_sg_port_rht_params = { 45 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 46 .key_offset = offsetof(struct net_bridge_port_group, key), 47 .key_len = sizeof(struct net_bridge_port_group_sg_key), 48 .automatic_shrinking = true, 49 }; 50 51 static void br_multicast_start_querier(struct net_bridge *br, 52 struct bridge_mcast_own_query *query); 53 static void br_multicast_add_router(struct net_bridge *br, 54 struct net_bridge_port *port); 55 static void br_ip4_multicast_leave_group(struct net_bridge *br, 56 struct net_bridge_port *port, 57 __be32 group, 58 __u16 vid, 59 const unsigned char *src); 60 static void br_multicast_port_group_rexmit(struct timer_list *t); 61 62 static void __del_port_router(struct net_bridge_port *p); 63 #if IS_ENABLED(CONFIG_IPV6) 64 static void br_ip6_multicast_leave_group(struct net_bridge *br, 65 struct net_bridge_port *port, 66 const struct in6_addr *group, 67 __u16 vid, const unsigned char *src); 68 #endif 69 static struct net_bridge_port_group * 70 __br_multicast_add_group(struct net_bridge *br, 71 struct net_bridge_port *port, 72 struct br_ip *group, 73 const unsigned char *src, 74 u8 filter_mode, 75 bool igmpv2_mldv1, 76 bool blocked); 77 static void br_multicast_find_del_pg(struct net_bridge *br, 78 struct net_bridge_port_group *pg); 79 80 static struct net_bridge_port_group * 81 br_sg_port_find(struct net_bridge *br, 82 struct net_bridge_port_group_sg_key *sg_p) 83 { 84 lockdep_assert_held_once(&br->multicast_lock); 85 86 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 87 br_sg_port_rht_params); 88 } 89 90 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 91 struct br_ip *dst) 92 { 93 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 94 } 95 96 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 97 struct br_ip *dst) 98 { 99 struct net_bridge_mdb_entry *ent; 100 101 lockdep_assert_held_once(&br->multicast_lock); 102 103 rcu_read_lock(); 104 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 105 rcu_read_unlock(); 106 107 return ent; 108 } 109 110 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 111 __be32 dst, __u16 vid) 112 { 113 struct br_ip br_dst; 114 115 memset(&br_dst, 0, sizeof(br_dst)); 116 br_dst.dst.ip4 = dst; 117 br_dst.proto = htons(ETH_P_IP); 118 br_dst.vid = vid; 119 120 return br_mdb_ip_get(br, &br_dst); 121 } 122 123 #if IS_ENABLED(CONFIG_IPV6) 124 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 125 const struct in6_addr *dst, 126 __u16 vid) 127 { 128 struct br_ip br_dst; 129 130 memset(&br_dst, 0, sizeof(br_dst)); 131 br_dst.dst.ip6 = *dst; 132 br_dst.proto = htons(ETH_P_IPV6); 133 br_dst.vid = vid; 134 135 return br_mdb_ip_get(br, &br_dst); 136 } 137 #endif 138 139 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 140 struct sk_buff *skb, u16 vid) 141 { 142 struct br_ip ip; 143 144 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 145 return NULL; 146 147 if (BR_INPUT_SKB_CB(skb)->igmp) 148 return NULL; 149 150 memset(&ip, 0, sizeof(ip)); 151 ip.proto = skb->protocol; 152 ip.vid = vid; 153 154 switch (skb->protocol) { 155 case htons(ETH_P_IP): 156 ip.dst.ip4 = ip_hdr(skb)->daddr; 157 if (br->multicast_igmp_version == 3) { 158 struct net_bridge_mdb_entry *mdb; 159 160 ip.src.ip4 = ip_hdr(skb)->saddr; 161 mdb = br_mdb_ip_get_rcu(br, &ip); 162 if (mdb) 163 return mdb; 164 ip.src.ip4 = 0; 165 } 166 break; 167 #if IS_ENABLED(CONFIG_IPV6) 168 case htons(ETH_P_IPV6): 169 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 170 if (br->multicast_mld_version == 2) { 171 struct net_bridge_mdb_entry *mdb; 172 173 ip.src.ip6 = ipv6_hdr(skb)->saddr; 174 mdb = br_mdb_ip_get_rcu(br, &ip); 175 if (mdb) 176 return mdb; 177 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 178 } 179 break; 180 #endif 181 default: 182 return NULL; 183 } 184 185 return br_mdb_ip_get_rcu(br, &ip); 186 } 187 188 static bool br_port_group_equal(struct net_bridge_port_group *p, 189 struct net_bridge_port *port, 190 const unsigned char *src) 191 { 192 if (p->key.port != port) 193 return false; 194 195 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 196 return true; 197 198 return ether_addr_equal(src, p->eth_addr); 199 } 200 201 static void __fwd_add_star_excl(struct net_bridge_port_group *pg, 202 struct br_ip *sg_ip) 203 { 204 struct net_bridge_port_group_sg_key sg_key; 205 struct net_bridge *br = pg->key.port->br; 206 struct net_bridge_port_group *src_pg; 207 208 memset(&sg_key, 0, sizeof(sg_key)); 209 sg_key.port = pg->key.port; 210 sg_key.addr = *sg_ip; 211 if (br_sg_port_find(br, &sg_key)) 212 return; 213 214 src_pg = __br_multicast_add_group(br, pg->key.port, sg_ip, pg->eth_addr, 215 MCAST_INCLUDE, false, false); 216 if (IS_ERR_OR_NULL(src_pg) || 217 src_pg->rt_protocol != RTPROT_KERNEL) 218 return; 219 220 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 221 } 222 223 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 224 struct br_ip *sg_ip) 225 { 226 struct net_bridge_port_group_sg_key sg_key; 227 struct net_bridge *br = pg->key.port->br; 228 struct net_bridge_port_group *src_pg; 229 230 memset(&sg_key, 0, sizeof(sg_key)); 231 sg_key.port = pg->key.port; 232 sg_key.addr = *sg_ip; 233 src_pg = br_sg_port_find(br, &sg_key); 234 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 235 src_pg->rt_protocol != RTPROT_KERNEL) 236 return; 237 238 br_multicast_find_del_pg(br, src_pg); 239 } 240 241 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 242 * to all other ports' S,G entries which are not blocked by the current group 243 * for proper replication, the assumption is that any S,G blocked entries 244 * are already added so the S,G,port lookup should skip them. 245 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 246 * deleted we need to remove it from all ports' S,G entries where it was 247 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 248 */ 249 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 250 u8 filter_mode) 251 { 252 struct net_bridge *br = pg->key.port->br; 253 struct net_bridge_port_group *pg_lst; 254 struct net_bridge_mdb_entry *mp; 255 struct br_ip sg_ip; 256 257 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 258 return; 259 260 mp = br_mdb_ip_get(br, &pg->key.addr); 261 if (!mp) 262 return; 263 264 memset(&sg_ip, 0, sizeof(sg_ip)); 265 sg_ip = pg->key.addr; 266 for (pg_lst = mlock_dereference(mp->ports, br); 267 pg_lst; 268 pg_lst = mlock_dereference(pg_lst->next, br)) { 269 struct net_bridge_group_src *src_ent; 270 271 if (pg_lst == pg) 272 continue; 273 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 274 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 275 continue; 276 sg_ip.src = src_ent->addr.src; 277 switch (filter_mode) { 278 case MCAST_INCLUDE: 279 __fwd_del_star_excl(pg, &sg_ip); 280 break; 281 case MCAST_EXCLUDE: 282 __fwd_add_star_excl(pg, &sg_ip); 283 break; 284 } 285 } 286 } 287 } 288 289 /* called when adding a new S,G with host_joined == false by default */ 290 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 291 struct net_bridge_port_group *sg) 292 { 293 struct net_bridge_mdb_entry *sg_mp; 294 295 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 296 return; 297 if (!star_mp->host_joined) 298 return; 299 300 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 301 if (!sg_mp) 302 return; 303 sg_mp->host_joined = true; 304 } 305 306 /* set the host_joined state of all of *,G's S,G entries */ 307 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 308 { 309 struct net_bridge *br = star_mp->br; 310 struct net_bridge_mdb_entry *sg_mp; 311 struct net_bridge_port_group *pg; 312 struct br_ip sg_ip; 313 314 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 315 return; 316 317 memset(&sg_ip, 0, sizeof(sg_ip)); 318 sg_ip = star_mp->addr; 319 for (pg = mlock_dereference(star_mp->ports, br); 320 pg; 321 pg = mlock_dereference(pg->next, br)) { 322 struct net_bridge_group_src *src_ent; 323 324 hlist_for_each_entry(src_ent, &pg->src_list, node) { 325 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 326 continue; 327 sg_ip.src = src_ent->addr.src; 328 sg_mp = br_mdb_ip_get(br, &sg_ip); 329 if (!sg_mp) 330 continue; 331 sg_mp->host_joined = star_mp->host_joined; 332 } 333 } 334 } 335 336 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 337 { 338 struct net_bridge_port_group __rcu **pp; 339 struct net_bridge_port_group *p; 340 341 /* *,G exclude ports are only added to S,G entries */ 342 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 343 return; 344 345 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 346 * we should ignore perm entries since they're managed by user-space 347 */ 348 for (pp = &sgmp->ports; 349 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 350 pp = &p->next) 351 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 352 MDB_PG_FLAGS_PERMANENT))) 353 return; 354 355 /* currently the host can only have joined the *,G which means 356 * we treat it as EXCLUDE {}, so for an S,G it's considered a 357 * STAR_EXCLUDE entry and we can safely leave it 358 */ 359 sgmp->host_joined = false; 360 361 for (pp = &sgmp->ports; 362 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 363 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 364 br_multicast_del_pg(sgmp, p, pp); 365 else 366 pp = &p->next; 367 } 368 } 369 370 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 371 struct net_bridge_port_group *sg) 372 { 373 struct net_bridge_port_group_sg_key sg_key; 374 struct net_bridge *br = star_mp->br; 375 struct net_bridge_port_group *pg; 376 377 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 378 return; 379 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 380 return; 381 382 br_multicast_sg_host_state(star_mp, sg); 383 memset(&sg_key, 0, sizeof(sg_key)); 384 sg_key.addr = sg->key.addr; 385 /* we need to add all exclude ports to the S,G */ 386 for (pg = mlock_dereference(star_mp->ports, br); 387 pg; 388 pg = mlock_dereference(pg->next, br)) { 389 struct net_bridge_port_group *src_pg; 390 391 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 392 continue; 393 394 sg_key.port = pg->key.port; 395 if (br_sg_port_find(br, &sg_key)) 396 continue; 397 398 src_pg = __br_multicast_add_group(br, pg->key.port, 399 &sg->key.addr, 400 sg->eth_addr, 401 MCAST_INCLUDE, false, false); 402 if (IS_ERR_OR_NULL(src_pg) || 403 src_pg->rt_protocol != RTPROT_KERNEL) 404 continue; 405 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 406 } 407 } 408 409 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 410 { 411 struct net_bridge_mdb_entry *star_mp; 412 struct net_bridge_port_group *sg; 413 struct br_ip sg_ip; 414 415 if (src->flags & BR_SGRP_F_INSTALLED) 416 return; 417 418 memset(&sg_ip, 0, sizeof(sg_ip)); 419 sg_ip = src->pg->key.addr; 420 sg_ip.src = src->addr.src; 421 sg = __br_multicast_add_group(src->br, src->pg->key.port, &sg_ip, 422 src->pg->eth_addr, MCAST_INCLUDE, false, 423 !timer_pending(&src->timer)); 424 if (IS_ERR_OR_NULL(sg)) 425 return; 426 src->flags |= BR_SGRP_F_INSTALLED; 427 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 428 429 /* if it was added by user-space as perm we can skip next steps */ 430 if (sg->rt_protocol != RTPROT_KERNEL && 431 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 432 return; 433 434 /* the kernel is now responsible for removing this S,G */ 435 del_timer(&sg->timer); 436 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 437 if (!star_mp) 438 return; 439 440 br_multicast_sg_add_exclude_ports(star_mp, sg); 441 } 442 443 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src) 444 { 445 struct net_bridge_port_group *p, *pg = src->pg; 446 struct net_bridge_port_group __rcu **pp; 447 struct net_bridge_mdb_entry *mp; 448 struct br_ip sg_ip; 449 450 memset(&sg_ip, 0, sizeof(sg_ip)); 451 sg_ip = pg->key.addr; 452 sg_ip.src = src->addr.src; 453 454 mp = br_mdb_ip_get(src->br, &sg_ip); 455 if (!mp) 456 return; 457 458 for (pp = &mp->ports; 459 (p = mlock_dereference(*pp, src->br)) != NULL; 460 pp = &p->next) { 461 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 462 continue; 463 464 if (p->rt_protocol != RTPROT_KERNEL && 465 (p->flags & MDB_PG_FLAGS_PERMANENT)) 466 break; 467 468 br_multicast_del_pg(mp, p, pp); 469 break; 470 } 471 src->flags &= ~BR_SGRP_F_INSTALLED; 472 } 473 474 /* install S,G and based on src's timer enable or disable forwarding */ 475 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 476 { 477 struct net_bridge_port_group_sg_key sg_key; 478 struct net_bridge_port_group *sg; 479 u8 old_flags; 480 481 br_multicast_fwd_src_add(src); 482 483 memset(&sg_key, 0, sizeof(sg_key)); 484 sg_key.addr = src->pg->key.addr; 485 sg_key.addr.src = src->addr.src; 486 sg_key.port = src->pg->key.port; 487 488 sg = br_sg_port_find(src->br, &sg_key); 489 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 490 return; 491 492 old_flags = sg->flags; 493 if (timer_pending(&src->timer)) 494 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 495 else 496 sg->flags |= MDB_PG_FLAGS_BLOCKED; 497 498 if (old_flags != sg->flags) { 499 struct net_bridge_mdb_entry *sg_mp; 500 501 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 502 if (!sg_mp) 503 return; 504 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 505 } 506 } 507 508 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 509 { 510 struct net_bridge_mdb_entry *mp; 511 512 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 513 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 514 WARN_ON(mp->ports); 515 516 del_timer_sync(&mp->timer); 517 kfree_rcu(mp, rcu); 518 } 519 520 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 521 { 522 struct net_bridge *br = mp->br; 523 524 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 525 br_mdb_rht_params); 526 hlist_del_init_rcu(&mp->mdb_node); 527 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 528 queue_work(system_long_wq, &br->mcast_gc_work); 529 } 530 531 static void br_multicast_group_expired(struct timer_list *t) 532 { 533 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 534 struct net_bridge *br = mp->br; 535 536 spin_lock(&br->multicast_lock); 537 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 538 timer_pending(&mp->timer)) 539 goto out; 540 541 br_multicast_host_leave(mp, true); 542 543 if (mp->ports) 544 goto out; 545 br_multicast_del_mdb_entry(mp); 546 out: 547 spin_unlock(&br->multicast_lock); 548 } 549 550 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 551 { 552 struct net_bridge_group_src *src; 553 554 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 555 WARN_ON(!hlist_unhashed(&src->node)); 556 557 del_timer_sync(&src->timer); 558 kfree_rcu(src, rcu); 559 } 560 561 static void br_multicast_del_group_src(struct net_bridge_group_src *src) 562 { 563 struct net_bridge *br = src->pg->key.port->br; 564 565 br_multicast_fwd_src_remove(src); 566 hlist_del_init_rcu(&src->node); 567 src->pg->src_ents--; 568 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 569 queue_work(system_long_wq, &br->mcast_gc_work); 570 } 571 572 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 573 { 574 struct net_bridge_port_group *pg; 575 576 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 577 WARN_ON(!hlist_unhashed(&pg->mglist)); 578 WARN_ON(!hlist_empty(&pg->src_list)); 579 580 del_timer_sync(&pg->rexmit_timer); 581 del_timer_sync(&pg->timer); 582 kfree_rcu(pg, rcu); 583 } 584 585 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 586 struct net_bridge_port_group *pg, 587 struct net_bridge_port_group __rcu **pp) 588 { 589 struct net_bridge *br = pg->key.port->br; 590 struct net_bridge_group_src *ent; 591 struct hlist_node *tmp; 592 593 rcu_assign_pointer(*pp, pg->next); 594 hlist_del_init(&pg->mglist); 595 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 596 br_multicast_del_group_src(ent); 597 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 598 if (!br_multicast_is_star_g(&mp->addr)) { 599 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 600 br_sg_port_rht_params); 601 br_multicast_sg_del_exclude_ports(mp); 602 } else { 603 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 604 } 605 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 606 queue_work(system_long_wq, &br->mcast_gc_work); 607 608 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 609 mod_timer(&mp->timer, jiffies); 610 } 611 612 static void br_multicast_find_del_pg(struct net_bridge *br, 613 struct net_bridge_port_group *pg) 614 { 615 struct net_bridge_port_group __rcu **pp; 616 struct net_bridge_mdb_entry *mp; 617 struct net_bridge_port_group *p; 618 619 mp = br_mdb_ip_get(br, &pg->key.addr); 620 if (WARN_ON(!mp)) 621 return; 622 623 for (pp = &mp->ports; 624 (p = mlock_dereference(*pp, br)) != NULL; 625 pp = &p->next) { 626 if (p != pg) 627 continue; 628 629 br_multicast_del_pg(mp, pg, pp); 630 return; 631 } 632 633 WARN_ON(1); 634 } 635 636 static void br_multicast_port_group_expired(struct timer_list *t) 637 { 638 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 639 struct net_bridge_group_src *src_ent; 640 struct net_bridge *br = pg->key.port->br; 641 struct hlist_node *tmp; 642 bool changed; 643 644 spin_lock(&br->multicast_lock); 645 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 646 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 647 goto out; 648 649 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 650 pg->filter_mode = MCAST_INCLUDE; 651 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 652 if (!timer_pending(&src_ent->timer)) { 653 br_multicast_del_group_src(src_ent); 654 changed = true; 655 } 656 } 657 658 if (hlist_empty(&pg->src_list)) { 659 br_multicast_find_del_pg(br, pg); 660 } else if (changed) { 661 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 662 663 if (changed && br_multicast_is_star_g(&pg->key.addr)) 664 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 665 666 if (WARN_ON(!mp)) 667 goto out; 668 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 669 } 670 out: 671 spin_unlock(&br->multicast_lock); 672 } 673 674 static void br_multicast_gc(struct hlist_head *head) 675 { 676 struct net_bridge_mcast_gc *gcent; 677 struct hlist_node *tmp; 678 679 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 680 hlist_del_init(&gcent->gc_node); 681 gcent->destroy(gcent); 682 } 683 } 684 685 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 686 struct net_bridge_port_group *pg, 687 __be32 ip_dst, __be32 group, 688 bool with_srcs, bool over_lmqt, 689 u8 sflag, u8 *igmp_type, 690 bool *need_rexmit) 691 { 692 struct net_bridge_port *p = pg ? pg->key.port : NULL; 693 struct net_bridge_group_src *ent; 694 size_t pkt_size, igmp_hdr_size; 695 unsigned long now = jiffies; 696 struct igmpv3_query *ihv3; 697 void *csum_start = NULL; 698 __sum16 *csum = NULL; 699 struct sk_buff *skb; 700 struct igmphdr *ih; 701 struct ethhdr *eth; 702 unsigned long lmqt; 703 struct iphdr *iph; 704 u16 lmqt_srcs = 0; 705 706 igmp_hdr_size = sizeof(*ih); 707 if (br->multicast_igmp_version == 3) { 708 igmp_hdr_size = sizeof(*ihv3); 709 if (pg && with_srcs) { 710 lmqt = now + (br->multicast_last_member_interval * 711 br->multicast_last_member_count); 712 hlist_for_each_entry(ent, &pg->src_list, node) { 713 if (over_lmqt == time_after(ent->timer.expires, 714 lmqt) && 715 ent->src_query_rexmit_cnt > 0) 716 lmqt_srcs++; 717 } 718 719 if (!lmqt_srcs) 720 return NULL; 721 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 722 } 723 } 724 725 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 726 if ((p && pkt_size > p->dev->mtu) || 727 pkt_size > br->dev->mtu) 728 return NULL; 729 730 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 731 if (!skb) 732 goto out; 733 734 skb->protocol = htons(ETH_P_IP); 735 736 skb_reset_mac_header(skb); 737 eth = eth_hdr(skb); 738 739 ether_addr_copy(eth->h_source, br->dev->dev_addr); 740 ip_eth_mc_map(ip_dst, eth->h_dest); 741 eth->h_proto = htons(ETH_P_IP); 742 skb_put(skb, sizeof(*eth)); 743 744 skb_set_network_header(skb, skb->len); 745 iph = ip_hdr(skb); 746 iph->tot_len = htons(pkt_size - sizeof(*eth)); 747 748 iph->version = 4; 749 iph->ihl = 6; 750 iph->tos = 0xc0; 751 iph->id = 0; 752 iph->frag_off = htons(IP_DF); 753 iph->ttl = 1; 754 iph->protocol = IPPROTO_IGMP; 755 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 756 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 757 iph->daddr = ip_dst; 758 ((u8 *)&iph[1])[0] = IPOPT_RA; 759 ((u8 *)&iph[1])[1] = 4; 760 ((u8 *)&iph[1])[2] = 0; 761 ((u8 *)&iph[1])[3] = 0; 762 ip_send_check(iph); 763 skb_put(skb, 24); 764 765 skb_set_transport_header(skb, skb->len); 766 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 767 768 switch (br->multicast_igmp_version) { 769 case 2: 770 ih = igmp_hdr(skb); 771 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 772 ih->code = (group ? br->multicast_last_member_interval : 773 br->multicast_query_response_interval) / 774 (HZ / IGMP_TIMER_SCALE); 775 ih->group = group; 776 ih->csum = 0; 777 csum = &ih->csum; 778 csum_start = (void *)ih; 779 break; 780 case 3: 781 ihv3 = igmpv3_query_hdr(skb); 782 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 783 ihv3->code = (group ? br->multicast_last_member_interval : 784 br->multicast_query_response_interval) / 785 (HZ / IGMP_TIMER_SCALE); 786 ihv3->group = group; 787 ihv3->qqic = br->multicast_query_interval / HZ; 788 ihv3->nsrcs = htons(lmqt_srcs); 789 ihv3->resv = 0; 790 ihv3->suppress = sflag; 791 ihv3->qrv = 2; 792 ihv3->csum = 0; 793 csum = &ihv3->csum; 794 csum_start = (void *)ihv3; 795 if (!pg || !with_srcs) 796 break; 797 798 lmqt_srcs = 0; 799 hlist_for_each_entry(ent, &pg->src_list, node) { 800 if (over_lmqt == time_after(ent->timer.expires, 801 lmqt) && 802 ent->src_query_rexmit_cnt > 0) { 803 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 804 ent->src_query_rexmit_cnt--; 805 if (need_rexmit && ent->src_query_rexmit_cnt) 806 *need_rexmit = true; 807 } 808 } 809 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 810 kfree_skb(skb); 811 return NULL; 812 } 813 break; 814 } 815 816 if (WARN_ON(!csum || !csum_start)) { 817 kfree_skb(skb); 818 return NULL; 819 } 820 821 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 822 skb_put(skb, igmp_hdr_size); 823 __skb_pull(skb, sizeof(*eth)); 824 825 out: 826 return skb; 827 } 828 829 #if IS_ENABLED(CONFIG_IPV6) 830 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 831 struct net_bridge_port_group *pg, 832 const struct in6_addr *ip6_dst, 833 const struct in6_addr *group, 834 bool with_srcs, bool over_llqt, 835 u8 sflag, u8 *igmp_type, 836 bool *need_rexmit) 837 { 838 struct net_bridge_port *p = pg ? pg->key.port : NULL; 839 struct net_bridge_group_src *ent; 840 size_t pkt_size, mld_hdr_size; 841 unsigned long now = jiffies; 842 struct mld2_query *mld2q; 843 void *csum_start = NULL; 844 unsigned long interval; 845 __sum16 *csum = NULL; 846 struct ipv6hdr *ip6h; 847 struct mld_msg *mldq; 848 struct sk_buff *skb; 849 unsigned long llqt; 850 struct ethhdr *eth; 851 u16 llqt_srcs = 0; 852 u8 *hopopt; 853 854 mld_hdr_size = sizeof(*mldq); 855 if (br->multicast_mld_version == 2) { 856 mld_hdr_size = sizeof(*mld2q); 857 if (pg && with_srcs) { 858 llqt = now + (br->multicast_last_member_interval * 859 br->multicast_last_member_count); 860 hlist_for_each_entry(ent, &pg->src_list, node) { 861 if (over_llqt == time_after(ent->timer.expires, 862 llqt) && 863 ent->src_query_rexmit_cnt > 0) 864 llqt_srcs++; 865 } 866 867 if (!llqt_srcs) 868 return NULL; 869 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 870 } 871 } 872 873 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 874 if ((p && pkt_size > p->dev->mtu) || 875 pkt_size > br->dev->mtu) 876 return NULL; 877 878 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 879 if (!skb) 880 goto out; 881 882 skb->protocol = htons(ETH_P_IPV6); 883 884 /* Ethernet header */ 885 skb_reset_mac_header(skb); 886 eth = eth_hdr(skb); 887 888 ether_addr_copy(eth->h_source, br->dev->dev_addr); 889 eth->h_proto = htons(ETH_P_IPV6); 890 skb_put(skb, sizeof(*eth)); 891 892 /* IPv6 header + HbH option */ 893 skb_set_network_header(skb, skb->len); 894 ip6h = ipv6_hdr(skb); 895 896 *(__force __be32 *)ip6h = htonl(0x60000000); 897 ip6h->payload_len = htons(8 + mld_hdr_size); 898 ip6h->nexthdr = IPPROTO_HOPOPTS; 899 ip6h->hop_limit = 1; 900 ip6h->daddr = *ip6_dst; 901 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 902 &ip6h->saddr)) { 903 kfree_skb(skb); 904 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false); 905 return NULL; 906 } 907 908 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 909 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 910 911 hopopt = (u8 *)(ip6h + 1); 912 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 913 hopopt[1] = 0; /* length of HbH */ 914 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 915 hopopt[3] = 2; /* Length of RA Option */ 916 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 917 hopopt[5] = 0; 918 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 919 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 920 921 skb_put(skb, sizeof(*ip6h) + 8); 922 923 /* ICMPv6 */ 924 skb_set_transport_header(skb, skb->len); 925 interval = ipv6_addr_any(group) ? 926 br->multicast_query_response_interval : 927 br->multicast_last_member_interval; 928 *igmp_type = ICMPV6_MGM_QUERY; 929 switch (br->multicast_mld_version) { 930 case 1: 931 mldq = (struct mld_msg *)icmp6_hdr(skb); 932 mldq->mld_type = ICMPV6_MGM_QUERY; 933 mldq->mld_code = 0; 934 mldq->mld_cksum = 0; 935 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 936 mldq->mld_reserved = 0; 937 mldq->mld_mca = *group; 938 csum = &mldq->mld_cksum; 939 csum_start = (void *)mldq; 940 break; 941 case 2: 942 mld2q = (struct mld2_query *)icmp6_hdr(skb); 943 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 944 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 945 mld2q->mld2q_code = 0; 946 mld2q->mld2q_cksum = 0; 947 mld2q->mld2q_resv1 = 0; 948 mld2q->mld2q_resv2 = 0; 949 mld2q->mld2q_suppress = sflag; 950 mld2q->mld2q_qrv = 2; 951 mld2q->mld2q_nsrcs = htons(llqt_srcs); 952 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 953 mld2q->mld2q_mca = *group; 954 csum = &mld2q->mld2q_cksum; 955 csum_start = (void *)mld2q; 956 if (!pg || !with_srcs) 957 break; 958 959 llqt_srcs = 0; 960 hlist_for_each_entry(ent, &pg->src_list, node) { 961 if (over_llqt == time_after(ent->timer.expires, 962 llqt) && 963 ent->src_query_rexmit_cnt > 0) { 964 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 965 ent->src_query_rexmit_cnt--; 966 if (need_rexmit && ent->src_query_rexmit_cnt) 967 *need_rexmit = true; 968 } 969 } 970 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 971 kfree_skb(skb); 972 return NULL; 973 } 974 break; 975 } 976 977 if (WARN_ON(!csum || !csum_start)) { 978 kfree_skb(skb); 979 return NULL; 980 } 981 982 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 983 IPPROTO_ICMPV6, 984 csum_partial(csum_start, mld_hdr_size, 0)); 985 skb_put(skb, mld_hdr_size); 986 __skb_pull(skb, sizeof(*eth)); 987 988 out: 989 return skb; 990 } 991 #endif 992 993 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 994 struct net_bridge_port_group *pg, 995 struct br_ip *ip_dst, 996 struct br_ip *group, 997 bool with_srcs, bool over_lmqt, 998 u8 sflag, u8 *igmp_type, 999 bool *need_rexmit) 1000 { 1001 __be32 ip4_dst; 1002 1003 switch (group->proto) { 1004 case htons(ETH_P_IP): 1005 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1006 return br_ip4_multicast_alloc_query(br, pg, 1007 ip4_dst, group->dst.ip4, 1008 with_srcs, over_lmqt, 1009 sflag, igmp_type, 1010 need_rexmit); 1011 #if IS_ENABLED(CONFIG_IPV6) 1012 case htons(ETH_P_IPV6): { 1013 struct in6_addr ip6_dst; 1014 1015 if (ip_dst) 1016 ip6_dst = ip_dst->dst.ip6; 1017 else 1018 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1019 htonl(1)); 1020 1021 return br_ip6_multicast_alloc_query(br, pg, 1022 &ip6_dst, &group->dst.ip6, 1023 with_srcs, over_lmqt, 1024 sflag, igmp_type, 1025 need_rexmit); 1026 } 1027 #endif 1028 } 1029 return NULL; 1030 } 1031 1032 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1033 struct br_ip *group) 1034 { 1035 struct net_bridge_mdb_entry *mp; 1036 int err; 1037 1038 mp = br_mdb_ip_get(br, group); 1039 if (mp) 1040 return mp; 1041 1042 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1043 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1044 return ERR_PTR(-E2BIG); 1045 } 1046 1047 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1048 if (unlikely(!mp)) 1049 return ERR_PTR(-ENOMEM); 1050 1051 mp->br = br; 1052 mp->addr = *group; 1053 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1054 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1055 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1056 br_mdb_rht_params); 1057 if (err) { 1058 kfree(mp); 1059 mp = ERR_PTR(err); 1060 } else { 1061 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1062 } 1063 1064 return mp; 1065 } 1066 1067 static void br_multicast_group_src_expired(struct timer_list *t) 1068 { 1069 struct net_bridge_group_src *src = from_timer(src, t, timer); 1070 struct net_bridge_port_group *pg; 1071 struct net_bridge *br = src->br; 1072 1073 spin_lock(&br->multicast_lock); 1074 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1075 timer_pending(&src->timer)) 1076 goto out; 1077 1078 pg = src->pg; 1079 if (pg->filter_mode == MCAST_INCLUDE) { 1080 br_multicast_del_group_src(src); 1081 if (!hlist_empty(&pg->src_list)) 1082 goto out; 1083 br_multicast_find_del_pg(br, pg); 1084 } else { 1085 br_multicast_fwd_src_handle(src); 1086 } 1087 1088 out: 1089 spin_unlock(&br->multicast_lock); 1090 } 1091 1092 static struct net_bridge_group_src * 1093 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1094 { 1095 struct net_bridge_group_src *ent; 1096 1097 switch (ip->proto) { 1098 case htons(ETH_P_IP): 1099 hlist_for_each_entry(ent, &pg->src_list, node) 1100 if (ip->src.ip4 == ent->addr.src.ip4) 1101 return ent; 1102 break; 1103 #if IS_ENABLED(CONFIG_IPV6) 1104 case htons(ETH_P_IPV6): 1105 hlist_for_each_entry(ent, &pg->src_list, node) 1106 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1107 return ent; 1108 break; 1109 #endif 1110 } 1111 1112 return NULL; 1113 } 1114 1115 static struct net_bridge_group_src * 1116 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1117 { 1118 struct net_bridge_group_src *grp_src; 1119 1120 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1121 return NULL; 1122 1123 switch (src_ip->proto) { 1124 case htons(ETH_P_IP): 1125 if (ipv4_is_zeronet(src_ip->src.ip4) || 1126 ipv4_is_multicast(src_ip->src.ip4)) 1127 return NULL; 1128 break; 1129 #if IS_ENABLED(CONFIG_IPV6) 1130 case htons(ETH_P_IPV6): 1131 if (ipv6_addr_any(&src_ip->src.ip6) || 1132 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1133 return NULL; 1134 break; 1135 #endif 1136 } 1137 1138 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1139 if (unlikely(!grp_src)) 1140 return NULL; 1141 1142 grp_src->pg = pg; 1143 grp_src->br = pg->key.port->br; 1144 grp_src->addr = *src_ip; 1145 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1146 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1147 1148 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1149 pg->src_ents++; 1150 1151 return grp_src; 1152 } 1153 1154 struct net_bridge_port_group *br_multicast_new_port_group( 1155 struct net_bridge_port *port, 1156 struct br_ip *group, 1157 struct net_bridge_port_group __rcu *next, 1158 unsigned char flags, 1159 const unsigned char *src, 1160 u8 filter_mode, 1161 u8 rt_protocol) 1162 { 1163 struct net_bridge_port_group *p; 1164 1165 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1166 if (unlikely(!p)) 1167 return NULL; 1168 1169 p->key.addr = *group; 1170 p->key.port = port; 1171 p->flags = flags; 1172 p->filter_mode = filter_mode; 1173 p->rt_protocol = rt_protocol; 1174 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1175 INIT_HLIST_HEAD(&p->src_list); 1176 1177 if (!br_multicast_is_star_g(group) && 1178 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1179 br_sg_port_rht_params)) { 1180 kfree(p); 1181 return NULL; 1182 } 1183 1184 rcu_assign_pointer(p->next, next); 1185 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1186 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1187 hlist_add_head(&p->mglist, &port->mglist); 1188 1189 if (src) 1190 memcpy(p->eth_addr, src, ETH_ALEN); 1191 else 1192 eth_broadcast_addr(p->eth_addr); 1193 1194 return p; 1195 } 1196 1197 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify) 1198 { 1199 if (!mp->host_joined) { 1200 mp->host_joined = true; 1201 if (br_multicast_is_star_g(&mp->addr)) 1202 br_multicast_star_g_host_state(mp); 1203 if (notify) 1204 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1205 } 1206 mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval); 1207 } 1208 1209 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1210 { 1211 if (!mp->host_joined) 1212 return; 1213 1214 mp->host_joined = false; 1215 if (br_multicast_is_star_g(&mp->addr)) 1216 br_multicast_star_g_host_state(mp); 1217 if (notify) 1218 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1219 } 1220 1221 static struct net_bridge_port_group * 1222 __br_multicast_add_group(struct net_bridge *br, 1223 struct net_bridge_port *port, 1224 struct br_ip *group, 1225 const unsigned char *src, 1226 u8 filter_mode, 1227 bool igmpv2_mldv1, 1228 bool blocked) 1229 { 1230 struct net_bridge_port_group __rcu **pp; 1231 struct net_bridge_port_group *p = NULL; 1232 struct net_bridge_mdb_entry *mp; 1233 unsigned long now = jiffies; 1234 1235 if (!netif_running(br->dev) || 1236 (port && port->state == BR_STATE_DISABLED)) 1237 goto out; 1238 1239 mp = br_multicast_new_group(br, group); 1240 if (IS_ERR(mp)) 1241 return ERR_PTR(PTR_ERR(mp)); 1242 1243 if (!port) { 1244 br_multicast_host_join(mp, true); 1245 goto out; 1246 } 1247 1248 for (pp = &mp->ports; 1249 (p = mlock_dereference(*pp, br)) != NULL; 1250 pp = &p->next) { 1251 if (br_port_group_equal(p, port, src)) 1252 goto found; 1253 if ((unsigned long)p->key.port < (unsigned long)port) 1254 break; 1255 } 1256 1257 p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode, 1258 RTPROT_KERNEL); 1259 if (unlikely(!p)) { 1260 p = ERR_PTR(-ENOMEM); 1261 goto out; 1262 } 1263 rcu_assign_pointer(*pp, p); 1264 if (blocked) 1265 p->flags |= MDB_PG_FLAGS_BLOCKED; 1266 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB); 1267 1268 found: 1269 if (igmpv2_mldv1) 1270 mod_timer(&p->timer, now + br->multicast_membership_interval); 1271 1272 out: 1273 return p; 1274 } 1275 1276 static int br_multicast_add_group(struct net_bridge *br, 1277 struct net_bridge_port *port, 1278 struct br_ip *group, 1279 const unsigned char *src, 1280 u8 filter_mode, 1281 bool igmpv2_mldv1) 1282 { 1283 struct net_bridge_port_group *pg; 1284 int err; 1285 1286 spin_lock(&br->multicast_lock); 1287 pg = __br_multicast_add_group(br, port, group, src, filter_mode, 1288 igmpv2_mldv1, false); 1289 /* NULL is considered valid for host joined groups */ 1290 err = IS_ERR(pg) ? PTR_ERR(pg) : 0; 1291 spin_unlock(&br->multicast_lock); 1292 1293 return err; 1294 } 1295 1296 static int br_ip4_multicast_add_group(struct net_bridge *br, 1297 struct net_bridge_port *port, 1298 __be32 group, 1299 __u16 vid, 1300 const unsigned char *src, 1301 bool igmpv2) 1302 { 1303 struct br_ip br_group; 1304 u8 filter_mode; 1305 1306 if (ipv4_is_local_multicast(group)) 1307 return 0; 1308 1309 memset(&br_group, 0, sizeof(br_group)); 1310 br_group.dst.ip4 = group; 1311 br_group.proto = htons(ETH_P_IP); 1312 br_group.vid = vid; 1313 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1314 1315 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 1316 igmpv2); 1317 } 1318 1319 #if IS_ENABLED(CONFIG_IPV6) 1320 static int br_ip6_multicast_add_group(struct net_bridge *br, 1321 struct net_bridge_port *port, 1322 const struct in6_addr *group, 1323 __u16 vid, 1324 const unsigned char *src, 1325 bool mldv1) 1326 { 1327 struct br_ip br_group; 1328 u8 filter_mode; 1329 1330 if (ipv6_addr_is_ll_all_nodes(group)) 1331 return 0; 1332 1333 memset(&br_group, 0, sizeof(br_group)); 1334 br_group.dst.ip6 = *group; 1335 br_group.proto = htons(ETH_P_IPV6); 1336 br_group.vid = vid; 1337 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1338 1339 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 1340 mldv1); 1341 } 1342 #endif 1343 1344 static void br_multicast_router_expired(struct timer_list *t) 1345 { 1346 struct net_bridge_port *port = 1347 from_timer(port, t, multicast_router_timer); 1348 struct net_bridge *br = port->br; 1349 1350 spin_lock(&br->multicast_lock); 1351 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1352 port->multicast_router == MDB_RTR_TYPE_PERM || 1353 timer_pending(&port->multicast_router_timer)) 1354 goto out; 1355 1356 __del_port_router(port); 1357 out: 1358 spin_unlock(&br->multicast_lock); 1359 } 1360 1361 static void br_mc_router_state_change(struct net_bridge *p, 1362 bool is_mc_router) 1363 { 1364 struct switchdev_attr attr = { 1365 .orig_dev = p->dev, 1366 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1367 .flags = SWITCHDEV_F_DEFER, 1368 .u.mrouter = is_mc_router, 1369 }; 1370 1371 switchdev_port_attr_set(p->dev, &attr); 1372 } 1373 1374 static void br_multicast_local_router_expired(struct timer_list *t) 1375 { 1376 struct net_bridge *br = from_timer(br, t, multicast_router_timer); 1377 1378 spin_lock(&br->multicast_lock); 1379 if (br->multicast_router == MDB_RTR_TYPE_DISABLED || 1380 br->multicast_router == MDB_RTR_TYPE_PERM || 1381 timer_pending(&br->multicast_router_timer)) 1382 goto out; 1383 1384 br_mc_router_state_change(br, false); 1385 out: 1386 spin_unlock(&br->multicast_lock); 1387 } 1388 1389 static void br_multicast_querier_expired(struct net_bridge *br, 1390 struct bridge_mcast_own_query *query) 1391 { 1392 spin_lock(&br->multicast_lock); 1393 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1394 goto out; 1395 1396 br_multicast_start_querier(br, query); 1397 1398 out: 1399 spin_unlock(&br->multicast_lock); 1400 } 1401 1402 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1403 { 1404 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer); 1405 1406 br_multicast_querier_expired(br, &br->ip4_own_query); 1407 } 1408 1409 #if IS_ENABLED(CONFIG_IPV6) 1410 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1411 { 1412 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer); 1413 1414 br_multicast_querier_expired(br, &br->ip6_own_query); 1415 } 1416 #endif 1417 1418 static void br_multicast_select_own_querier(struct net_bridge *br, 1419 struct br_ip *ip, 1420 struct sk_buff *skb) 1421 { 1422 if (ip->proto == htons(ETH_P_IP)) 1423 br->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1424 #if IS_ENABLED(CONFIG_IPV6) 1425 else 1426 br->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1427 #endif 1428 } 1429 1430 static void __br_multicast_send_query(struct net_bridge *br, 1431 struct net_bridge_port *port, 1432 struct net_bridge_port_group *pg, 1433 struct br_ip *ip_dst, 1434 struct br_ip *group, 1435 bool with_srcs, 1436 u8 sflag, 1437 bool *need_rexmit) 1438 { 1439 bool over_lmqt = !!sflag; 1440 struct sk_buff *skb; 1441 u8 igmp_type; 1442 1443 again_under_lmqt: 1444 skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs, 1445 over_lmqt, sflag, &igmp_type, 1446 need_rexmit); 1447 if (!skb) 1448 return; 1449 1450 if (port) { 1451 skb->dev = port->dev; 1452 br_multicast_count(br, port, skb, igmp_type, 1453 BR_MCAST_DIR_TX); 1454 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1455 dev_net(port->dev), NULL, skb, NULL, skb->dev, 1456 br_dev_queue_push_xmit); 1457 1458 if (over_lmqt && with_srcs && sflag) { 1459 over_lmqt = false; 1460 goto again_under_lmqt; 1461 } 1462 } else { 1463 br_multicast_select_own_querier(br, group, skb); 1464 br_multicast_count(br, port, skb, igmp_type, 1465 BR_MCAST_DIR_RX); 1466 netif_rx(skb); 1467 } 1468 } 1469 1470 static void br_multicast_send_query(struct net_bridge *br, 1471 struct net_bridge_port *port, 1472 struct bridge_mcast_own_query *own_query) 1473 { 1474 struct bridge_mcast_other_query *other_query = NULL; 1475 struct br_ip br_group; 1476 unsigned long time; 1477 1478 if (!netif_running(br->dev) || 1479 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1480 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1481 return; 1482 1483 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1484 1485 if (port ? (own_query == &port->ip4_own_query) : 1486 (own_query == &br->ip4_own_query)) { 1487 other_query = &br->ip4_other_query; 1488 br_group.proto = htons(ETH_P_IP); 1489 #if IS_ENABLED(CONFIG_IPV6) 1490 } else { 1491 other_query = &br->ip6_other_query; 1492 br_group.proto = htons(ETH_P_IPV6); 1493 #endif 1494 } 1495 1496 if (!other_query || timer_pending(&other_query->timer)) 1497 return; 1498 1499 __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0, 1500 NULL); 1501 1502 time = jiffies; 1503 time += own_query->startup_sent < br->multicast_startup_query_count ? 1504 br->multicast_startup_query_interval : 1505 br->multicast_query_interval; 1506 mod_timer(&own_query->timer, time); 1507 } 1508 1509 static void 1510 br_multicast_port_query_expired(struct net_bridge_port *port, 1511 struct bridge_mcast_own_query *query) 1512 { 1513 struct net_bridge *br = port->br; 1514 1515 spin_lock(&br->multicast_lock); 1516 if (port->state == BR_STATE_DISABLED || 1517 port->state == BR_STATE_BLOCKING) 1518 goto out; 1519 1520 if (query->startup_sent < br->multicast_startup_query_count) 1521 query->startup_sent++; 1522 1523 br_multicast_send_query(port->br, port, query); 1524 1525 out: 1526 spin_unlock(&br->multicast_lock); 1527 } 1528 1529 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1530 { 1531 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer); 1532 1533 br_multicast_port_query_expired(port, &port->ip4_own_query); 1534 } 1535 1536 #if IS_ENABLED(CONFIG_IPV6) 1537 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1538 { 1539 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer); 1540 1541 br_multicast_port_query_expired(port, &port->ip6_own_query); 1542 } 1543 #endif 1544 1545 static void br_multicast_port_group_rexmit(struct timer_list *t) 1546 { 1547 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1548 struct bridge_mcast_other_query *other_query = NULL; 1549 struct net_bridge *br = pg->key.port->br; 1550 bool need_rexmit = false; 1551 1552 spin_lock(&br->multicast_lock); 1553 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1554 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1555 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1556 goto out; 1557 1558 if (pg->key.addr.proto == htons(ETH_P_IP)) 1559 other_query = &br->ip4_other_query; 1560 #if IS_ENABLED(CONFIG_IPV6) 1561 else 1562 other_query = &br->ip6_other_query; 1563 #endif 1564 1565 if (!other_query || timer_pending(&other_query->timer)) 1566 goto out; 1567 1568 if (pg->grp_query_rexmit_cnt) { 1569 pg->grp_query_rexmit_cnt--; 1570 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1571 &pg->key.addr, false, 1, NULL); 1572 } 1573 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1574 &pg->key.addr, true, 0, &need_rexmit); 1575 1576 if (pg->grp_query_rexmit_cnt || need_rexmit) 1577 mod_timer(&pg->rexmit_timer, jiffies + 1578 br->multicast_last_member_interval); 1579 out: 1580 spin_unlock(&br->multicast_lock); 1581 } 1582 1583 static void br_mc_disabled_update(struct net_device *dev, bool value) 1584 { 1585 struct switchdev_attr attr = { 1586 .orig_dev = dev, 1587 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1588 .flags = SWITCHDEV_F_DEFER, 1589 .u.mc_disabled = !value, 1590 }; 1591 1592 switchdev_port_attr_set(dev, &attr); 1593 } 1594 1595 int br_multicast_add_port(struct net_bridge_port *port) 1596 { 1597 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1598 1599 timer_setup(&port->multicast_router_timer, 1600 br_multicast_router_expired, 0); 1601 timer_setup(&port->ip4_own_query.timer, 1602 br_ip4_multicast_port_query_expired, 0); 1603 #if IS_ENABLED(CONFIG_IPV6) 1604 timer_setup(&port->ip6_own_query.timer, 1605 br_ip6_multicast_port_query_expired, 0); 1606 #endif 1607 br_mc_disabled_update(port->dev, 1608 br_opt_get(port->br, BROPT_MULTICAST_ENABLED)); 1609 1610 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1611 if (!port->mcast_stats) 1612 return -ENOMEM; 1613 1614 return 0; 1615 } 1616 1617 void br_multicast_del_port(struct net_bridge_port *port) 1618 { 1619 struct net_bridge *br = port->br; 1620 struct net_bridge_port_group *pg; 1621 HLIST_HEAD(deleted_head); 1622 struct hlist_node *n; 1623 1624 /* Take care of the remaining groups, only perm ones should be left */ 1625 spin_lock_bh(&br->multicast_lock); 1626 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1627 br_multicast_find_del_pg(br, pg); 1628 hlist_move_list(&br->mcast_gc_list, &deleted_head); 1629 spin_unlock_bh(&br->multicast_lock); 1630 br_multicast_gc(&deleted_head); 1631 del_timer_sync(&port->multicast_router_timer); 1632 free_percpu(port->mcast_stats); 1633 } 1634 1635 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1636 { 1637 query->startup_sent = 0; 1638 1639 if (try_to_del_timer_sync(&query->timer) >= 0 || 1640 del_timer(&query->timer)) 1641 mod_timer(&query->timer, jiffies); 1642 } 1643 1644 static void __br_multicast_enable_port(struct net_bridge_port *port) 1645 { 1646 struct net_bridge *br = port->br; 1647 1648 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev)) 1649 return; 1650 1651 br_multicast_enable(&port->ip4_own_query); 1652 #if IS_ENABLED(CONFIG_IPV6) 1653 br_multicast_enable(&port->ip6_own_query); 1654 #endif 1655 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1656 hlist_unhashed(&port->rlist)) 1657 br_multicast_add_router(br, port); 1658 } 1659 1660 void br_multicast_enable_port(struct net_bridge_port *port) 1661 { 1662 struct net_bridge *br = port->br; 1663 1664 spin_lock(&br->multicast_lock); 1665 __br_multicast_enable_port(port); 1666 spin_unlock(&br->multicast_lock); 1667 } 1668 1669 void br_multicast_disable_port(struct net_bridge_port *port) 1670 { 1671 struct net_bridge *br = port->br; 1672 struct net_bridge_port_group *pg; 1673 struct hlist_node *n; 1674 1675 spin_lock(&br->multicast_lock); 1676 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1677 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1678 br_multicast_find_del_pg(br, pg); 1679 1680 __del_port_router(port); 1681 1682 del_timer(&port->multicast_router_timer); 1683 del_timer(&port->ip4_own_query.timer); 1684 #if IS_ENABLED(CONFIG_IPV6) 1685 del_timer(&port->ip6_own_query.timer); 1686 #endif 1687 spin_unlock(&br->multicast_lock); 1688 } 1689 1690 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1691 { 1692 struct net_bridge_group_src *ent; 1693 struct hlist_node *tmp; 1694 int deleted = 0; 1695 1696 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1697 if (ent->flags & BR_SGRP_F_DELETE) { 1698 br_multicast_del_group_src(ent); 1699 deleted++; 1700 } 1701 1702 return deleted; 1703 } 1704 1705 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 1706 unsigned long expires) 1707 { 1708 mod_timer(&src->timer, expires); 1709 br_multicast_fwd_src_handle(src); 1710 } 1711 1712 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg) 1713 { 1714 struct bridge_mcast_other_query *other_query = NULL; 1715 struct net_bridge *br = pg->key.port->br; 1716 u32 lmqc = br->multicast_last_member_count; 1717 unsigned long lmqt, lmi, now = jiffies; 1718 struct net_bridge_group_src *ent; 1719 1720 if (!netif_running(br->dev) || 1721 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1722 return; 1723 1724 if (pg->key.addr.proto == htons(ETH_P_IP)) 1725 other_query = &br->ip4_other_query; 1726 #if IS_ENABLED(CONFIG_IPV6) 1727 else 1728 other_query = &br->ip6_other_query; 1729 #endif 1730 1731 lmqt = now + br_multicast_lmqt(br); 1732 hlist_for_each_entry(ent, &pg->src_list, node) { 1733 if (ent->flags & BR_SGRP_F_SEND) { 1734 ent->flags &= ~BR_SGRP_F_SEND; 1735 if (ent->timer.expires > lmqt) { 1736 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1737 other_query && 1738 !timer_pending(&other_query->timer)) 1739 ent->src_query_rexmit_cnt = lmqc; 1740 __grp_src_mod_timer(ent, lmqt); 1741 } 1742 } 1743 } 1744 1745 if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) || 1746 !other_query || timer_pending(&other_query->timer)) 1747 return; 1748 1749 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1750 &pg->key.addr, true, 1, NULL); 1751 1752 lmi = now + br->multicast_last_member_interval; 1753 if (!timer_pending(&pg->rexmit_timer) || 1754 time_after(pg->rexmit_timer.expires, lmi)) 1755 mod_timer(&pg->rexmit_timer, lmi); 1756 } 1757 1758 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg) 1759 { 1760 struct bridge_mcast_other_query *other_query = NULL; 1761 struct net_bridge *br = pg->key.port->br; 1762 unsigned long now = jiffies, lmi; 1763 1764 if (!netif_running(br->dev) || 1765 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1766 return; 1767 1768 if (pg->key.addr.proto == htons(ETH_P_IP)) 1769 other_query = &br->ip4_other_query; 1770 #if IS_ENABLED(CONFIG_IPV6) 1771 else 1772 other_query = &br->ip6_other_query; 1773 #endif 1774 1775 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1776 other_query && !timer_pending(&other_query->timer)) { 1777 lmi = now + br->multicast_last_member_interval; 1778 pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1; 1779 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1780 &pg->key.addr, false, 0, NULL); 1781 if (!timer_pending(&pg->rexmit_timer) || 1782 time_after(pg->rexmit_timer.expires, lmi)) 1783 mod_timer(&pg->rexmit_timer, lmi); 1784 } 1785 1786 if (pg->filter_mode == MCAST_EXCLUDE && 1787 (!timer_pending(&pg->timer) || 1788 time_after(pg->timer.expires, now + br_multicast_lmqt(br)))) 1789 mod_timer(&pg->timer, now + br_multicast_lmqt(br)); 1790 } 1791 1792 /* State Msg type New state Actions 1793 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 1794 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 1795 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 1796 */ 1797 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg, 1798 void *srcs, u32 nsrcs, size_t src_size) 1799 { 1800 struct net_bridge *br = pg->key.port->br; 1801 struct net_bridge_group_src *ent; 1802 unsigned long now = jiffies; 1803 bool changed = false; 1804 struct br_ip src_ip; 1805 u32 src_idx; 1806 1807 memset(&src_ip, 0, sizeof(src_ip)); 1808 src_ip.proto = pg->key.addr.proto; 1809 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1810 memcpy(&src_ip.src, srcs, src_size); 1811 ent = br_multicast_find_group_src(pg, &src_ip); 1812 if (!ent) { 1813 ent = br_multicast_new_group_src(pg, &src_ip); 1814 if (ent) 1815 changed = true; 1816 } 1817 1818 if (ent) 1819 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 1820 srcs += src_size; 1821 } 1822 1823 return changed; 1824 } 1825 1826 /* State Msg type New state Actions 1827 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 1828 * Delete (A-B) 1829 * Group Timer=GMI 1830 */ 1831 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg, 1832 void *srcs, u32 nsrcs, size_t src_size) 1833 { 1834 struct net_bridge_group_src *ent; 1835 struct br_ip src_ip; 1836 u32 src_idx; 1837 1838 hlist_for_each_entry(ent, &pg->src_list, node) 1839 ent->flags |= BR_SGRP_F_DELETE; 1840 1841 memset(&src_ip, 0, sizeof(src_ip)); 1842 src_ip.proto = pg->key.addr.proto; 1843 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1844 memcpy(&src_ip.src, srcs, src_size); 1845 ent = br_multicast_find_group_src(pg, &src_ip); 1846 if (ent) 1847 ent->flags &= ~BR_SGRP_F_DELETE; 1848 else 1849 ent = br_multicast_new_group_src(pg, &src_ip); 1850 if (ent) 1851 br_multicast_fwd_src_handle(ent); 1852 srcs += src_size; 1853 } 1854 1855 __grp_src_delete_marked(pg); 1856 } 1857 1858 /* State Msg type New state Actions 1859 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 1860 * Delete (X-A) 1861 * Delete (Y-A) 1862 * Group Timer=GMI 1863 */ 1864 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg, 1865 void *srcs, u32 nsrcs, size_t src_size) 1866 { 1867 struct net_bridge *br = pg->key.port->br; 1868 struct net_bridge_group_src *ent; 1869 unsigned long now = jiffies; 1870 bool changed = false; 1871 struct br_ip src_ip; 1872 u32 src_idx; 1873 1874 hlist_for_each_entry(ent, &pg->src_list, node) 1875 ent->flags |= BR_SGRP_F_DELETE; 1876 1877 memset(&src_ip, 0, sizeof(src_ip)); 1878 src_ip.proto = pg->key.addr.proto; 1879 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1880 memcpy(&src_ip.src, srcs, src_size); 1881 ent = br_multicast_find_group_src(pg, &src_ip); 1882 if (ent) { 1883 ent->flags &= ~BR_SGRP_F_DELETE; 1884 } else { 1885 ent = br_multicast_new_group_src(pg, &src_ip); 1886 if (ent) { 1887 __grp_src_mod_timer(ent, 1888 now + br_multicast_gmi(br)); 1889 changed = true; 1890 } 1891 } 1892 srcs += src_size; 1893 } 1894 1895 if (__grp_src_delete_marked(pg)) 1896 changed = true; 1897 1898 return changed; 1899 } 1900 1901 static bool br_multicast_isexc(struct net_bridge_port_group *pg, 1902 void *srcs, u32 nsrcs, size_t src_size) 1903 { 1904 struct net_bridge *br = pg->key.port->br; 1905 bool changed = false; 1906 1907 switch (pg->filter_mode) { 1908 case MCAST_INCLUDE: 1909 __grp_src_isexc_incl(pg, srcs, nsrcs, src_size); 1910 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 1911 changed = true; 1912 break; 1913 case MCAST_EXCLUDE: 1914 changed = __grp_src_isexc_excl(pg, srcs, nsrcs, src_size); 1915 break; 1916 } 1917 1918 pg->filter_mode = MCAST_EXCLUDE; 1919 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 1920 1921 return changed; 1922 } 1923 1924 /* State Msg type New state Actions 1925 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 1926 * Send Q(G,A-B) 1927 */ 1928 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg, 1929 void *srcs, u32 nsrcs, size_t src_size) 1930 { 1931 struct net_bridge *br = pg->key.port->br; 1932 u32 src_idx, to_send = pg->src_ents; 1933 struct net_bridge_group_src *ent; 1934 unsigned long now = jiffies; 1935 bool changed = false; 1936 struct br_ip src_ip; 1937 1938 hlist_for_each_entry(ent, &pg->src_list, node) 1939 ent->flags |= BR_SGRP_F_SEND; 1940 1941 memset(&src_ip, 0, sizeof(src_ip)); 1942 src_ip.proto = pg->key.addr.proto; 1943 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1944 memcpy(&src_ip.src, srcs, src_size); 1945 ent = br_multicast_find_group_src(pg, &src_ip); 1946 if (ent) { 1947 ent->flags &= ~BR_SGRP_F_SEND; 1948 to_send--; 1949 } else { 1950 ent = br_multicast_new_group_src(pg, &src_ip); 1951 if (ent) 1952 changed = true; 1953 } 1954 if (ent) 1955 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 1956 srcs += src_size; 1957 } 1958 1959 if (to_send) 1960 __grp_src_query_marked_and_rexmit(pg); 1961 1962 return changed; 1963 } 1964 1965 /* State Msg type New state Actions 1966 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 1967 * Send Q(G,X-A) 1968 * Send Q(G) 1969 */ 1970 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg, 1971 void *srcs, u32 nsrcs, size_t src_size) 1972 { 1973 struct net_bridge *br = pg->key.port->br; 1974 u32 src_idx, to_send = pg->src_ents; 1975 struct net_bridge_group_src *ent; 1976 unsigned long now = jiffies; 1977 bool changed = false; 1978 struct br_ip src_ip; 1979 1980 hlist_for_each_entry(ent, &pg->src_list, node) 1981 if (timer_pending(&ent->timer)) 1982 ent->flags |= BR_SGRP_F_SEND; 1983 1984 memset(&src_ip, 0, sizeof(src_ip)); 1985 src_ip.proto = pg->key.addr.proto; 1986 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1987 memcpy(&src_ip.src, srcs, src_size); 1988 ent = br_multicast_find_group_src(pg, &src_ip); 1989 if (ent) { 1990 if (timer_pending(&ent->timer)) { 1991 ent->flags &= ~BR_SGRP_F_SEND; 1992 to_send--; 1993 } 1994 } else { 1995 ent = br_multicast_new_group_src(pg, &src_ip); 1996 if (ent) 1997 changed = true; 1998 } 1999 if (ent) 2000 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 2001 srcs += src_size; 2002 } 2003 2004 if (to_send) 2005 __grp_src_query_marked_and_rexmit(pg); 2006 2007 __grp_send_query_and_rexmit(pg); 2008 2009 return changed; 2010 } 2011 2012 static bool br_multicast_toin(struct net_bridge_port_group *pg, 2013 void *srcs, u32 nsrcs, size_t src_size) 2014 { 2015 bool changed = false; 2016 2017 switch (pg->filter_mode) { 2018 case MCAST_INCLUDE: 2019 changed = __grp_src_toin_incl(pg, srcs, nsrcs, src_size); 2020 break; 2021 case MCAST_EXCLUDE: 2022 changed = __grp_src_toin_excl(pg, srcs, nsrcs, src_size); 2023 break; 2024 } 2025 2026 return changed; 2027 } 2028 2029 /* State Msg type New state Actions 2030 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2031 * Delete (A-B) 2032 * Send Q(G,A*B) 2033 * Group Timer=GMI 2034 */ 2035 static void __grp_src_toex_incl(struct net_bridge_port_group *pg, 2036 void *srcs, u32 nsrcs, size_t src_size) 2037 { 2038 struct net_bridge_group_src *ent; 2039 u32 src_idx, to_send = 0; 2040 struct br_ip src_ip; 2041 2042 hlist_for_each_entry(ent, &pg->src_list, node) 2043 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2044 2045 memset(&src_ip, 0, sizeof(src_ip)); 2046 src_ip.proto = pg->key.addr.proto; 2047 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2048 memcpy(&src_ip.src, srcs, src_size); 2049 ent = br_multicast_find_group_src(pg, &src_ip); 2050 if (ent) { 2051 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2052 BR_SGRP_F_SEND; 2053 to_send++; 2054 } else { 2055 ent = br_multicast_new_group_src(pg, &src_ip); 2056 } 2057 if (ent) 2058 br_multicast_fwd_src_handle(ent); 2059 srcs += src_size; 2060 } 2061 2062 __grp_src_delete_marked(pg); 2063 if (to_send) 2064 __grp_src_query_marked_and_rexmit(pg); 2065 } 2066 2067 /* State Msg type New state Actions 2068 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2069 * Delete (X-A) 2070 * Delete (Y-A) 2071 * Send Q(G,A-Y) 2072 * Group Timer=GMI 2073 */ 2074 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg, 2075 void *srcs, u32 nsrcs, size_t src_size) 2076 { 2077 struct net_bridge_group_src *ent; 2078 u32 src_idx, to_send = 0; 2079 bool changed = false; 2080 struct br_ip src_ip; 2081 2082 hlist_for_each_entry(ent, &pg->src_list, node) 2083 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2084 2085 memset(&src_ip, 0, sizeof(src_ip)); 2086 src_ip.proto = pg->key.addr.proto; 2087 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2088 memcpy(&src_ip.src, srcs, src_size); 2089 ent = br_multicast_find_group_src(pg, &src_ip); 2090 if (ent) { 2091 ent->flags &= ~BR_SGRP_F_DELETE; 2092 } else { 2093 ent = br_multicast_new_group_src(pg, &src_ip); 2094 if (ent) { 2095 __grp_src_mod_timer(ent, pg->timer.expires); 2096 changed = true; 2097 } 2098 } 2099 if (ent && timer_pending(&ent->timer)) { 2100 ent->flags |= BR_SGRP_F_SEND; 2101 to_send++; 2102 } 2103 srcs += src_size; 2104 } 2105 2106 if (__grp_src_delete_marked(pg)) 2107 changed = true; 2108 if (to_send) 2109 __grp_src_query_marked_and_rexmit(pg); 2110 2111 return changed; 2112 } 2113 2114 static bool br_multicast_toex(struct net_bridge_port_group *pg, 2115 void *srcs, u32 nsrcs, size_t src_size) 2116 { 2117 struct net_bridge *br = pg->key.port->br; 2118 bool changed = false; 2119 2120 switch (pg->filter_mode) { 2121 case MCAST_INCLUDE: 2122 __grp_src_toex_incl(pg, srcs, nsrcs, src_size); 2123 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2124 changed = true; 2125 break; 2126 case MCAST_EXCLUDE: 2127 changed = __grp_src_toex_excl(pg, srcs, nsrcs, src_size); 2128 break; 2129 } 2130 2131 pg->filter_mode = MCAST_EXCLUDE; 2132 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 2133 2134 return changed; 2135 } 2136 2137 /* State Msg type New state Actions 2138 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2139 */ 2140 static void __grp_src_block_incl(struct net_bridge_port_group *pg, 2141 void *srcs, u32 nsrcs, size_t src_size) 2142 { 2143 struct net_bridge_group_src *ent; 2144 u32 src_idx, to_send = 0; 2145 struct br_ip src_ip; 2146 2147 hlist_for_each_entry(ent, &pg->src_list, node) 2148 ent->flags &= ~BR_SGRP_F_SEND; 2149 2150 memset(&src_ip, 0, sizeof(src_ip)); 2151 src_ip.proto = pg->key.addr.proto; 2152 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2153 memcpy(&src_ip.src, srcs, src_size); 2154 ent = br_multicast_find_group_src(pg, &src_ip); 2155 if (ent) { 2156 ent->flags |= BR_SGRP_F_SEND; 2157 to_send++; 2158 } 2159 srcs += src_size; 2160 } 2161 2162 if (to_send) 2163 __grp_src_query_marked_and_rexmit(pg); 2164 2165 if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) 2166 br_multicast_find_del_pg(pg->key.port->br, pg); 2167 } 2168 2169 /* State Msg type New state Actions 2170 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2171 * Send Q(G,A-Y) 2172 */ 2173 static bool __grp_src_block_excl(struct net_bridge_port_group *pg, 2174 void *srcs, u32 nsrcs, size_t src_size) 2175 { 2176 struct net_bridge_group_src *ent; 2177 u32 src_idx, to_send = 0; 2178 bool changed = false; 2179 struct br_ip src_ip; 2180 2181 hlist_for_each_entry(ent, &pg->src_list, node) 2182 ent->flags &= ~BR_SGRP_F_SEND; 2183 2184 memset(&src_ip, 0, sizeof(src_ip)); 2185 src_ip.proto = pg->key.addr.proto; 2186 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2187 memcpy(&src_ip.src, srcs, src_size); 2188 ent = br_multicast_find_group_src(pg, &src_ip); 2189 if (!ent) { 2190 ent = br_multicast_new_group_src(pg, &src_ip); 2191 if (ent) { 2192 __grp_src_mod_timer(ent, pg->timer.expires); 2193 changed = true; 2194 } 2195 } 2196 if (ent && timer_pending(&ent->timer)) { 2197 ent->flags |= BR_SGRP_F_SEND; 2198 to_send++; 2199 } 2200 srcs += src_size; 2201 } 2202 2203 if (to_send) 2204 __grp_src_query_marked_and_rexmit(pg); 2205 2206 return changed; 2207 } 2208 2209 static bool br_multicast_block(struct net_bridge_port_group *pg, 2210 void *srcs, u32 nsrcs, size_t src_size) 2211 { 2212 bool changed = false; 2213 2214 switch (pg->filter_mode) { 2215 case MCAST_INCLUDE: 2216 __grp_src_block_incl(pg, srcs, nsrcs, src_size); 2217 break; 2218 case MCAST_EXCLUDE: 2219 changed = __grp_src_block_excl(pg, srcs, nsrcs, src_size); 2220 break; 2221 } 2222 2223 return changed; 2224 } 2225 2226 static struct net_bridge_port_group * 2227 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2228 struct net_bridge_port *p, 2229 const unsigned char *src) 2230 { 2231 struct net_bridge *br __maybe_unused = mp->br; 2232 struct net_bridge_port_group *pg; 2233 2234 for (pg = mlock_dereference(mp->ports, br); 2235 pg; 2236 pg = mlock_dereference(pg->next, br)) 2237 if (br_port_group_equal(pg, p, src)) 2238 return pg; 2239 2240 return NULL; 2241 } 2242 2243 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 2244 struct net_bridge_port *port, 2245 struct sk_buff *skb, 2246 u16 vid) 2247 { 2248 bool igmpv2 = br->multicast_igmp_version == 2; 2249 struct net_bridge_mdb_entry *mdst; 2250 struct net_bridge_port_group *pg; 2251 const unsigned char *src; 2252 struct igmpv3_report *ih; 2253 struct igmpv3_grec *grec; 2254 int i, len, num, type; 2255 bool changed = false; 2256 __be32 group; 2257 int err = 0; 2258 u16 nsrcs; 2259 2260 ih = igmpv3_report_hdr(skb); 2261 num = ntohs(ih->ngrec); 2262 len = skb_transport_offset(skb) + sizeof(*ih); 2263 2264 for (i = 0; i < num; i++) { 2265 len += sizeof(*grec); 2266 if (!ip_mc_may_pull(skb, len)) 2267 return -EINVAL; 2268 2269 grec = (void *)(skb->data + len - sizeof(*grec)); 2270 group = grec->grec_mca; 2271 type = grec->grec_type; 2272 nsrcs = ntohs(grec->grec_nsrcs); 2273 2274 len += nsrcs * 4; 2275 if (!ip_mc_may_pull(skb, len)) 2276 return -EINVAL; 2277 2278 switch (type) { 2279 case IGMPV3_MODE_IS_INCLUDE: 2280 case IGMPV3_MODE_IS_EXCLUDE: 2281 case IGMPV3_CHANGE_TO_INCLUDE: 2282 case IGMPV3_CHANGE_TO_EXCLUDE: 2283 case IGMPV3_ALLOW_NEW_SOURCES: 2284 case IGMPV3_BLOCK_OLD_SOURCES: 2285 break; 2286 2287 default: 2288 continue; 2289 } 2290 2291 src = eth_hdr(skb)->h_source; 2292 if (nsrcs == 0 && 2293 (type == IGMPV3_CHANGE_TO_INCLUDE || 2294 type == IGMPV3_MODE_IS_INCLUDE)) { 2295 if (!port || igmpv2) { 2296 br_ip4_multicast_leave_group(br, port, group, vid, src); 2297 continue; 2298 } 2299 } else { 2300 err = br_ip4_multicast_add_group(br, port, group, vid, 2301 src, igmpv2); 2302 if (err) 2303 break; 2304 } 2305 2306 if (!port || igmpv2) 2307 continue; 2308 2309 spin_lock_bh(&br->multicast_lock); 2310 mdst = br_mdb_ip4_get(br, group, vid); 2311 if (!mdst) 2312 goto unlock_continue; 2313 pg = br_multicast_find_port(mdst, port, src); 2314 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2315 goto unlock_continue; 2316 /* reload grec */ 2317 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2318 switch (type) { 2319 case IGMPV3_ALLOW_NEW_SOURCES: 2320 changed = br_multicast_isinc_allow(pg, grec->grec_src, 2321 nsrcs, sizeof(__be32)); 2322 break; 2323 case IGMPV3_MODE_IS_INCLUDE: 2324 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs, 2325 sizeof(__be32)); 2326 break; 2327 case IGMPV3_MODE_IS_EXCLUDE: 2328 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs, 2329 sizeof(__be32)); 2330 break; 2331 case IGMPV3_CHANGE_TO_INCLUDE: 2332 changed = br_multicast_toin(pg, grec->grec_src, nsrcs, 2333 sizeof(__be32)); 2334 break; 2335 case IGMPV3_CHANGE_TO_EXCLUDE: 2336 changed = br_multicast_toex(pg, grec->grec_src, nsrcs, 2337 sizeof(__be32)); 2338 break; 2339 case IGMPV3_BLOCK_OLD_SOURCES: 2340 changed = br_multicast_block(pg, grec->grec_src, nsrcs, 2341 sizeof(__be32)); 2342 break; 2343 } 2344 if (changed) 2345 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 2346 unlock_continue: 2347 spin_unlock_bh(&br->multicast_lock); 2348 } 2349 2350 return err; 2351 } 2352 2353 #if IS_ENABLED(CONFIG_IPV6) 2354 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 2355 struct net_bridge_port *port, 2356 struct sk_buff *skb, 2357 u16 vid) 2358 { 2359 bool mldv1 = br->multicast_mld_version == 1; 2360 struct net_bridge_mdb_entry *mdst; 2361 struct net_bridge_port_group *pg; 2362 unsigned int nsrcs_offset; 2363 const unsigned char *src; 2364 struct icmp6hdr *icmp6h; 2365 struct mld2_grec *grec; 2366 unsigned int grec_len; 2367 bool changed = false; 2368 int i, len, num; 2369 int err = 0; 2370 2371 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h))) 2372 return -EINVAL; 2373 2374 icmp6h = icmp6_hdr(skb); 2375 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 2376 len = skb_transport_offset(skb) + sizeof(*icmp6h); 2377 2378 for (i = 0; i < num; i++) { 2379 __be16 *_nsrcs, __nsrcs; 2380 u16 nsrcs; 2381 2382 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2383 2384 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2385 nsrcs_offset + sizeof(__nsrcs)) 2386 return -EINVAL; 2387 2388 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2389 sizeof(__nsrcs), &__nsrcs); 2390 if (!_nsrcs) 2391 return -EINVAL; 2392 2393 nsrcs = ntohs(*_nsrcs); 2394 grec_len = struct_size(grec, grec_src, nsrcs); 2395 2396 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2397 return -EINVAL; 2398 2399 grec = (struct mld2_grec *)(skb->data + len); 2400 len += grec_len; 2401 2402 switch (grec->grec_type) { 2403 case MLD2_MODE_IS_INCLUDE: 2404 case MLD2_MODE_IS_EXCLUDE: 2405 case MLD2_CHANGE_TO_INCLUDE: 2406 case MLD2_CHANGE_TO_EXCLUDE: 2407 case MLD2_ALLOW_NEW_SOURCES: 2408 case MLD2_BLOCK_OLD_SOURCES: 2409 break; 2410 2411 default: 2412 continue; 2413 } 2414 2415 src = eth_hdr(skb)->h_source; 2416 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2417 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2418 nsrcs == 0) { 2419 if (!port || mldv1) { 2420 br_ip6_multicast_leave_group(br, port, 2421 &grec->grec_mca, 2422 vid, src); 2423 continue; 2424 } 2425 } else { 2426 err = br_ip6_multicast_add_group(br, port, 2427 &grec->grec_mca, vid, 2428 src, mldv1); 2429 if (err) 2430 break; 2431 } 2432 2433 if (!port || mldv1) 2434 continue; 2435 2436 spin_lock_bh(&br->multicast_lock); 2437 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid); 2438 if (!mdst) 2439 goto unlock_continue; 2440 pg = br_multicast_find_port(mdst, port, src); 2441 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2442 goto unlock_continue; 2443 switch (grec->grec_type) { 2444 case MLD2_ALLOW_NEW_SOURCES: 2445 changed = br_multicast_isinc_allow(pg, grec->grec_src, 2446 nsrcs, 2447 sizeof(struct in6_addr)); 2448 break; 2449 case MLD2_MODE_IS_INCLUDE: 2450 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs, 2451 sizeof(struct in6_addr)); 2452 break; 2453 case MLD2_MODE_IS_EXCLUDE: 2454 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs, 2455 sizeof(struct in6_addr)); 2456 break; 2457 case MLD2_CHANGE_TO_INCLUDE: 2458 changed = br_multicast_toin(pg, grec->grec_src, nsrcs, 2459 sizeof(struct in6_addr)); 2460 break; 2461 case MLD2_CHANGE_TO_EXCLUDE: 2462 changed = br_multicast_toex(pg, grec->grec_src, nsrcs, 2463 sizeof(struct in6_addr)); 2464 break; 2465 case MLD2_BLOCK_OLD_SOURCES: 2466 changed = br_multicast_block(pg, grec->grec_src, nsrcs, 2467 sizeof(struct in6_addr)); 2468 break; 2469 } 2470 if (changed) 2471 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 2472 unlock_continue: 2473 spin_unlock_bh(&br->multicast_lock); 2474 } 2475 2476 return err; 2477 } 2478 #endif 2479 2480 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 2481 struct net_bridge_port *port, 2482 __be32 saddr) 2483 { 2484 if (!timer_pending(&br->ip4_own_query.timer) && 2485 !timer_pending(&br->ip4_other_query.timer)) 2486 goto update; 2487 2488 if (!br->ip4_querier.addr.src.ip4) 2489 goto update; 2490 2491 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.src.ip4)) 2492 goto update; 2493 2494 return false; 2495 2496 update: 2497 br->ip4_querier.addr.src.ip4 = saddr; 2498 2499 /* update protected by general multicast_lock by caller */ 2500 rcu_assign_pointer(br->ip4_querier.port, port); 2501 2502 return true; 2503 } 2504 2505 #if IS_ENABLED(CONFIG_IPV6) 2506 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 2507 struct net_bridge_port *port, 2508 struct in6_addr *saddr) 2509 { 2510 if (!timer_pending(&br->ip6_own_query.timer) && 2511 !timer_pending(&br->ip6_other_query.timer)) 2512 goto update; 2513 2514 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.src.ip6) <= 0) 2515 goto update; 2516 2517 return false; 2518 2519 update: 2520 br->ip6_querier.addr.src.ip6 = *saddr; 2521 2522 /* update protected by general multicast_lock by caller */ 2523 rcu_assign_pointer(br->ip6_querier.port, port); 2524 2525 return true; 2526 } 2527 #endif 2528 2529 static bool br_multicast_select_querier(struct net_bridge *br, 2530 struct net_bridge_port *port, 2531 struct br_ip *saddr) 2532 { 2533 switch (saddr->proto) { 2534 case htons(ETH_P_IP): 2535 return br_ip4_multicast_select_querier(br, port, saddr->src.ip4); 2536 #if IS_ENABLED(CONFIG_IPV6) 2537 case htons(ETH_P_IPV6): 2538 return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6); 2539 #endif 2540 } 2541 2542 return false; 2543 } 2544 2545 static void 2546 br_multicast_update_query_timer(struct net_bridge *br, 2547 struct bridge_mcast_other_query *query, 2548 unsigned long max_delay) 2549 { 2550 if (!timer_pending(&query->timer)) 2551 query->delay_time = jiffies + max_delay; 2552 2553 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 2554 } 2555 2556 static void br_port_mc_router_state_change(struct net_bridge_port *p, 2557 bool is_mc_router) 2558 { 2559 struct switchdev_attr attr = { 2560 .orig_dev = p->dev, 2561 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 2562 .flags = SWITCHDEV_F_DEFER, 2563 .u.mrouter = is_mc_router, 2564 }; 2565 2566 switchdev_port_attr_set(p->dev, &attr); 2567 } 2568 2569 /* 2570 * Add port to router_list 2571 * list is maintained ordered by pointer value 2572 * and locked by br->multicast_lock and RCU 2573 */ 2574 static void br_multicast_add_router(struct net_bridge *br, 2575 struct net_bridge_port *port) 2576 { 2577 struct net_bridge_port *p; 2578 struct hlist_node *slot = NULL; 2579 2580 if (!hlist_unhashed(&port->rlist)) 2581 return; 2582 2583 hlist_for_each_entry(p, &br->router_list, rlist) { 2584 if ((unsigned long) port >= (unsigned long) p) 2585 break; 2586 slot = &p->rlist; 2587 } 2588 2589 if (slot) 2590 hlist_add_behind_rcu(&port->rlist, slot); 2591 else 2592 hlist_add_head_rcu(&port->rlist, &br->router_list); 2593 br_rtr_notify(br->dev, port, RTM_NEWMDB); 2594 br_port_mc_router_state_change(port, true); 2595 } 2596 2597 static void br_multicast_mark_router(struct net_bridge *br, 2598 struct net_bridge_port *port) 2599 { 2600 unsigned long now = jiffies; 2601 2602 if (!port) { 2603 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 2604 if (!timer_pending(&br->multicast_router_timer)) 2605 br_mc_router_state_change(br, true); 2606 mod_timer(&br->multicast_router_timer, 2607 now + br->multicast_querier_interval); 2608 } 2609 return; 2610 } 2611 2612 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 2613 port->multicast_router == MDB_RTR_TYPE_PERM) 2614 return; 2615 2616 br_multicast_add_router(br, port); 2617 2618 mod_timer(&port->multicast_router_timer, 2619 now + br->multicast_querier_interval); 2620 } 2621 2622 static void br_multicast_query_received(struct net_bridge *br, 2623 struct net_bridge_port *port, 2624 struct bridge_mcast_other_query *query, 2625 struct br_ip *saddr, 2626 unsigned long max_delay) 2627 { 2628 if (!br_multicast_select_querier(br, port, saddr)) 2629 return; 2630 2631 br_multicast_update_query_timer(br, query, max_delay); 2632 br_multicast_mark_router(br, port); 2633 } 2634 2635 static void br_ip4_multicast_query(struct net_bridge *br, 2636 struct net_bridge_port *port, 2637 struct sk_buff *skb, 2638 u16 vid) 2639 { 2640 unsigned int transport_len = ip_transport_len(skb); 2641 const struct iphdr *iph = ip_hdr(skb); 2642 struct igmphdr *ih = igmp_hdr(skb); 2643 struct net_bridge_mdb_entry *mp; 2644 struct igmpv3_query *ih3; 2645 struct net_bridge_port_group *p; 2646 struct net_bridge_port_group __rcu **pp; 2647 struct br_ip saddr; 2648 unsigned long max_delay; 2649 unsigned long now = jiffies; 2650 __be32 group; 2651 2652 spin_lock(&br->multicast_lock); 2653 if (!netif_running(br->dev) || 2654 (port && port->state == BR_STATE_DISABLED)) 2655 goto out; 2656 2657 group = ih->group; 2658 2659 if (transport_len == sizeof(*ih)) { 2660 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 2661 2662 if (!max_delay) { 2663 max_delay = 10 * HZ; 2664 group = 0; 2665 } 2666 } else if (transport_len >= sizeof(*ih3)) { 2667 ih3 = igmpv3_query_hdr(skb); 2668 if (ih3->nsrcs || 2669 (br->multicast_igmp_version == 3 && group && ih3->suppress)) 2670 goto out; 2671 2672 max_delay = ih3->code ? 2673 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 2674 } else { 2675 goto out; 2676 } 2677 2678 if (!group) { 2679 saddr.proto = htons(ETH_P_IP); 2680 saddr.src.ip4 = iph->saddr; 2681 2682 br_multicast_query_received(br, port, &br->ip4_other_query, 2683 &saddr, max_delay); 2684 goto out; 2685 } 2686 2687 mp = br_mdb_ip4_get(br, group, vid); 2688 if (!mp) 2689 goto out; 2690 2691 max_delay *= br->multicast_last_member_count; 2692 2693 if (mp->host_joined && 2694 (timer_pending(&mp->timer) ? 2695 time_after(mp->timer.expires, now + max_delay) : 2696 try_to_del_timer_sync(&mp->timer) >= 0)) 2697 mod_timer(&mp->timer, now + max_delay); 2698 2699 for (pp = &mp->ports; 2700 (p = mlock_dereference(*pp, br)) != NULL; 2701 pp = &p->next) { 2702 if (timer_pending(&p->timer) ? 2703 time_after(p->timer.expires, now + max_delay) : 2704 try_to_del_timer_sync(&p->timer) >= 0 && 2705 (br->multicast_igmp_version == 2 || 2706 p->filter_mode == MCAST_EXCLUDE)) 2707 mod_timer(&p->timer, now + max_delay); 2708 } 2709 2710 out: 2711 spin_unlock(&br->multicast_lock); 2712 } 2713 2714 #if IS_ENABLED(CONFIG_IPV6) 2715 static int br_ip6_multicast_query(struct net_bridge *br, 2716 struct net_bridge_port *port, 2717 struct sk_buff *skb, 2718 u16 vid) 2719 { 2720 unsigned int transport_len = ipv6_transport_len(skb); 2721 struct mld_msg *mld; 2722 struct net_bridge_mdb_entry *mp; 2723 struct mld2_query *mld2q; 2724 struct net_bridge_port_group *p; 2725 struct net_bridge_port_group __rcu **pp; 2726 struct br_ip saddr; 2727 unsigned long max_delay; 2728 unsigned long now = jiffies; 2729 unsigned int offset = skb_transport_offset(skb); 2730 const struct in6_addr *group = NULL; 2731 bool is_general_query; 2732 int err = 0; 2733 2734 spin_lock(&br->multicast_lock); 2735 if (!netif_running(br->dev) || 2736 (port && port->state == BR_STATE_DISABLED)) 2737 goto out; 2738 2739 if (transport_len == sizeof(*mld)) { 2740 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 2741 err = -EINVAL; 2742 goto out; 2743 } 2744 mld = (struct mld_msg *) icmp6_hdr(skb); 2745 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 2746 if (max_delay) 2747 group = &mld->mld_mca; 2748 } else { 2749 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 2750 err = -EINVAL; 2751 goto out; 2752 } 2753 mld2q = (struct mld2_query *)icmp6_hdr(skb); 2754 if (!mld2q->mld2q_nsrcs) 2755 group = &mld2q->mld2q_mca; 2756 if (br->multicast_mld_version == 2 && 2757 !ipv6_addr_any(&mld2q->mld2q_mca) && 2758 mld2q->mld2q_suppress) 2759 goto out; 2760 2761 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 2762 } 2763 2764 is_general_query = group && ipv6_addr_any(group); 2765 2766 if (is_general_query) { 2767 saddr.proto = htons(ETH_P_IPV6); 2768 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 2769 2770 br_multicast_query_received(br, port, &br->ip6_other_query, 2771 &saddr, max_delay); 2772 goto out; 2773 } else if (!group) { 2774 goto out; 2775 } 2776 2777 mp = br_mdb_ip6_get(br, group, vid); 2778 if (!mp) 2779 goto out; 2780 2781 max_delay *= br->multicast_last_member_count; 2782 if (mp->host_joined && 2783 (timer_pending(&mp->timer) ? 2784 time_after(mp->timer.expires, now + max_delay) : 2785 try_to_del_timer_sync(&mp->timer) >= 0)) 2786 mod_timer(&mp->timer, now + max_delay); 2787 2788 for (pp = &mp->ports; 2789 (p = mlock_dereference(*pp, br)) != NULL; 2790 pp = &p->next) { 2791 if (timer_pending(&p->timer) ? 2792 time_after(p->timer.expires, now + max_delay) : 2793 try_to_del_timer_sync(&p->timer) >= 0 && 2794 (br->multicast_mld_version == 1 || 2795 p->filter_mode == MCAST_EXCLUDE)) 2796 mod_timer(&p->timer, now + max_delay); 2797 } 2798 2799 out: 2800 spin_unlock(&br->multicast_lock); 2801 return err; 2802 } 2803 #endif 2804 2805 static void 2806 br_multicast_leave_group(struct net_bridge *br, 2807 struct net_bridge_port *port, 2808 struct br_ip *group, 2809 struct bridge_mcast_other_query *other_query, 2810 struct bridge_mcast_own_query *own_query, 2811 const unsigned char *src) 2812 { 2813 struct net_bridge_mdb_entry *mp; 2814 struct net_bridge_port_group *p; 2815 unsigned long now; 2816 unsigned long time; 2817 2818 spin_lock(&br->multicast_lock); 2819 if (!netif_running(br->dev) || 2820 (port && port->state == BR_STATE_DISABLED)) 2821 goto out; 2822 2823 mp = br_mdb_ip_get(br, group); 2824 if (!mp) 2825 goto out; 2826 2827 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 2828 struct net_bridge_port_group __rcu **pp; 2829 2830 for (pp = &mp->ports; 2831 (p = mlock_dereference(*pp, br)) != NULL; 2832 pp = &p->next) { 2833 if (!br_port_group_equal(p, port, src)) 2834 continue; 2835 2836 if (p->flags & MDB_PG_FLAGS_PERMANENT) 2837 break; 2838 2839 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2840 br_multicast_del_pg(mp, p, pp); 2841 } 2842 goto out; 2843 } 2844 2845 if (timer_pending(&other_query->timer)) 2846 goto out; 2847 2848 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { 2849 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr, 2850 false, 0, NULL); 2851 2852 time = jiffies + br->multicast_last_member_count * 2853 br->multicast_last_member_interval; 2854 2855 mod_timer(&own_query->timer, time); 2856 2857 for (p = mlock_dereference(mp->ports, br); 2858 p != NULL; 2859 p = mlock_dereference(p->next, br)) { 2860 if (!br_port_group_equal(p, port, src)) 2861 continue; 2862 2863 if (!hlist_unhashed(&p->mglist) && 2864 (timer_pending(&p->timer) ? 2865 time_after(p->timer.expires, time) : 2866 try_to_del_timer_sync(&p->timer) >= 0)) { 2867 mod_timer(&p->timer, time); 2868 } 2869 2870 break; 2871 } 2872 } 2873 2874 now = jiffies; 2875 time = now + br->multicast_last_member_count * 2876 br->multicast_last_member_interval; 2877 2878 if (!port) { 2879 if (mp->host_joined && 2880 (timer_pending(&mp->timer) ? 2881 time_after(mp->timer.expires, time) : 2882 try_to_del_timer_sync(&mp->timer) >= 0)) { 2883 mod_timer(&mp->timer, time); 2884 } 2885 2886 goto out; 2887 } 2888 2889 for (p = mlock_dereference(mp->ports, br); 2890 p != NULL; 2891 p = mlock_dereference(p->next, br)) { 2892 if (p->key.port != port) 2893 continue; 2894 2895 if (!hlist_unhashed(&p->mglist) && 2896 (timer_pending(&p->timer) ? 2897 time_after(p->timer.expires, time) : 2898 try_to_del_timer_sync(&p->timer) >= 0)) { 2899 mod_timer(&p->timer, time); 2900 } 2901 2902 break; 2903 } 2904 out: 2905 spin_unlock(&br->multicast_lock); 2906 } 2907 2908 static void br_ip4_multicast_leave_group(struct net_bridge *br, 2909 struct net_bridge_port *port, 2910 __be32 group, 2911 __u16 vid, 2912 const unsigned char *src) 2913 { 2914 struct br_ip br_group; 2915 struct bridge_mcast_own_query *own_query; 2916 2917 if (ipv4_is_local_multicast(group)) 2918 return; 2919 2920 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 2921 2922 memset(&br_group, 0, sizeof(br_group)); 2923 br_group.dst.ip4 = group; 2924 br_group.proto = htons(ETH_P_IP); 2925 br_group.vid = vid; 2926 2927 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 2928 own_query, src); 2929 } 2930 2931 #if IS_ENABLED(CONFIG_IPV6) 2932 static void br_ip6_multicast_leave_group(struct net_bridge *br, 2933 struct net_bridge_port *port, 2934 const struct in6_addr *group, 2935 __u16 vid, 2936 const unsigned char *src) 2937 { 2938 struct br_ip br_group; 2939 struct bridge_mcast_own_query *own_query; 2940 2941 if (ipv6_addr_is_ll_all_nodes(group)) 2942 return; 2943 2944 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 2945 2946 memset(&br_group, 0, sizeof(br_group)); 2947 br_group.dst.ip6 = *group; 2948 br_group.proto = htons(ETH_P_IPV6); 2949 br_group.vid = vid; 2950 2951 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 2952 own_query, src); 2953 } 2954 #endif 2955 2956 static void br_multicast_err_count(const struct net_bridge *br, 2957 const struct net_bridge_port *p, 2958 __be16 proto) 2959 { 2960 struct bridge_mcast_stats __percpu *stats; 2961 struct bridge_mcast_stats *pstats; 2962 2963 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 2964 return; 2965 2966 if (p) 2967 stats = p->mcast_stats; 2968 else 2969 stats = br->mcast_stats; 2970 if (WARN_ON(!stats)) 2971 return; 2972 2973 pstats = this_cpu_ptr(stats); 2974 2975 u64_stats_update_begin(&pstats->syncp); 2976 switch (proto) { 2977 case htons(ETH_P_IP): 2978 pstats->mstats.igmp_parse_errors++; 2979 break; 2980 #if IS_ENABLED(CONFIG_IPV6) 2981 case htons(ETH_P_IPV6): 2982 pstats->mstats.mld_parse_errors++; 2983 break; 2984 #endif 2985 } 2986 u64_stats_update_end(&pstats->syncp); 2987 } 2988 2989 static void br_multicast_pim(struct net_bridge *br, 2990 struct net_bridge_port *port, 2991 const struct sk_buff *skb) 2992 { 2993 unsigned int offset = skb_transport_offset(skb); 2994 struct pimhdr *pimhdr, _pimhdr; 2995 2996 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 2997 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 2998 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 2999 return; 3000 3001 br_multicast_mark_router(br, port); 3002 } 3003 3004 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br, 3005 struct net_bridge_port *port, 3006 struct sk_buff *skb) 3007 { 3008 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3009 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3010 return -ENOMSG; 3011 3012 br_multicast_mark_router(br, port); 3013 3014 return 0; 3015 } 3016 3017 static int br_multicast_ipv4_rcv(struct net_bridge *br, 3018 struct net_bridge_port *port, 3019 struct sk_buff *skb, 3020 u16 vid) 3021 { 3022 const unsigned char *src; 3023 struct igmphdr *ih; 3024 int err; 3025 3026 err = ip_mc_check_igmp(skb); 3027 3028 if (err == -ENOMSG) { 3029 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3030 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3031 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3032 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3033 br_multicast_pim(br, port, skb); 3034 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3035 br_ip4_multicast_mrd_rcv(br, port, skb); 3036 } 3037 3038 return 0; 3039 } else if (err < 0) { 3040 br_multicast_err_count(br, port, skb->protocol); 3041 return err; 3042 } 3043 3044 ih = igmp_hdr(skb); 3045 src = eth_hdr(skb)->h_source; 3046 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3047 3048 switch (ih->type) { 3049 case IGMP_HOST_MEMBERSHIP_REPORT: 3050 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3051 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3052 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src, 3053 true); 3054 break; 3055 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3056 err = br_ip4_multicast_igmp3_report(br, port, skb, vid); 3057 break; 3058 case IGMP_HOST_MEMBERSHIP_QUERY: 3059 br_ip4_multicast_query(br, port, skb, vid); 3060 break; 3061 case IGMP_HOST_LEAVE_MESSAGE: 3062 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 3063 break; 3064 } 3065 3066 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 3067 BR_MCAST_DIR_RX); 3068 3069 return err; 3070 } 3071 3072 #if IS_ENABLED(CONFIG_IPV6) 3073 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br, 3074 struct net_bridge_port *port, 3075 struct sk_buff *skb) 3076 { 3077 int ret; 3078 3079 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) 3080 return -ENOMSG; 3081 3082 ret = ipv6_mc_check_icmpv6(skb); 3083 if (ret < 0) 3084 return ret; 3085 3086 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3087 return -ENOMSG; 3088 3089 br_multicast_mark_router(br, port); 3090 3091 return 0; 3092 } 3093 3094 static int br_multicast_ipv6_rcv(struct net_bridge *br, 3095 struct net_bridge_port *port, 3096 struct sk_buff *skb, 3097 u16 vid) 3098 { 3099 const unsigned char *src; 3100 struct mld_msg *mld; 3101 int err; 3102 3103 err = ipv6_mc_check_mld(skb); 3104 3105 if (err == -ENOMSG) { 3106 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3107 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3108 3109 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) { 3110 err = br_ip6_multicast_mrd_rcv(br, port, skb); 3111 3112 if (err < 0 && err != -ENOMSG) { 3113 br_multicast_err_count(br, port, skb->protocol); 3114 return err; 3115 } 3116 } 3117 3118 return 0; 3119 } else if (err < 0) { 3120 br_multicast_err_count(br, port, skb->protocol); 3121 return err; 3122 } 3123 3124 mld = (struct mld_msg *)skb_transport_header(skb); 3125 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3126 3127 switch (mld->mld_type) { 3128 case ICMPV6_MGM_REPORT: 3129 src = eth_hdr(skb)->h_source; 3130 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3131 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 3132 src, true); 3133 break; 3134 case ICMPV6_MLD2_REPORT: 3135 err = br_ip6_multicast_mld2_report(br, port, skb, vid); 3136 break; 3137 case ICMPV6_MGM_QUERY: 3138 err = br_ip6_multicast_query(br, port, skb, vid); 3139 break; 3140 case ICMPV6_MGM_REDUCTION: 3141 src = eth_hdr(skb)->h_source; 3142 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 3143 break; 3144 } 3145 3146 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 3147 BR_MCAST_DIR_RX); 3148 3149 return err; 3150 } 3151 #endif 3152 3153 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 3154 struct sk_buff *skb, u16 vid) 3155 { 3156 int ret = 0; 3157 3158 BR_INPUT_SKB_CB(skb)->igmp = 0; 3159 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3160 3161 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3162 return 0; 3163 3164 switch (skb->protocol) { 3165 case htons(ETH_P_IP): 3166 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 3167 break; 3168 #if IS_ENABLED(CONFIG_IPV6) 3169 case htons(ETH_P_IPV6): 3170 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 3171 break; 3172 #endif 3173 } 3174 3175 return ret; 3176 } 3177 3178 static void br_multicast_query_expired(struct net_bridge *br, 3179 struct bridge_mcast_own_query *query, 3180 struct bridge_mcast_querier *querier) 3181 { 3182 spin_lock(&br->multicast_lock); 3183 if (query->startup_sent < br->multicast_startup_query_count) 3184 query->startup_sent++; 3185 3186 RCU_INIT_POINTER(querier->port, NULL); 3187 br_multicast_send_query(br, NULL, query); 3188 spin_unlock(&br->multicast_lock); 3189 } 3190 3191 static void br_ip4_multicast_query_expired(struct timer_list *t) 3192 { 3193 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer); 3194 3195 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 3196 } 3197 3198 #if IS_ENABLED(CONFIG_IPV6) 3199 static void br_ip6_multicast_query_expired(struct timer_list *t) 3200 { 3201 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer); 3202 3203 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 3204 } 3205 #endif 3206 3207 static void br_multicast_gc_work(struct work_struct *work) 3208 { 3209 struct net_bridge *br = container_of(work, struct net_bridge, 3210 mcast_gc_work); 3211 HLIST_HEAD(deleted_head); 3212 3213 spin_lock_bh(&br->multicast_lock); 3214 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3215 spin_unlock_bh(&br->multicast_lock); 3216 3217 br_multicast_gc(&deleted_head); 3218 } 3219 3220 void br_multicast_init(struct net_bridge *br) 3221 { 3222 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 3223 3224 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3225 br->multicast_last_member_count = 2; 3226 br->multicast_startup_query_count = 2; 3227 3228 br->multicast_last_member_interval = HZ; 3229 br->multicast_query_response_interval = 10 * HZ; 3230 br->multicast_startup_query_interval = 125 * HZ / 4; 3231 br->multicast_query_interval = 125 * HZ; 3232 br->multicast_querier_interval = 255 * HZ; 3233 br->multicast_membership_interval = 260 * HZ; 3234 3235 br->ip4_other_query.delay_time = 0; 3236 br->ip4_querier.port = NULL; 3237 br->multicast_igmp_version = 2; 3238 #if IS_ENABLED(CONFIG_IPV6) 3239 br->multicast_mld_version = 1; 3240 br->ip6_other_query.delay_time = 0; 3241 br->ip6_querier.port = NULL; 3242 #endif 3243 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 3244 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 3245 3246 spin_lock_init(&br->multicast_lock); 3247 timer_setup(&br->multicast_router_timer, 3248 br_multicast_local_router_expired, 0); 3249 timer_setup(&br->ip4_other_query.timer, 3250 br_ip4_multicast_querier_expired, 0); 3251 timer_setup(&br->ip4_own_query.timer, 3252 br_ip4_multicast_query_expired, 0); 3253 #if IS_ENABLED(CONFIG_IPV6) 3254 timer_setup(&br->ip6_other_query.timer, 3255 br_ip6_multicast_querier_expired, 0); 3256 timer_setup(&br->ip6_own_query.timer, 3257 br_ip6_multicast_query_expired, 0); 3258 #endif 3259 INIT_HLIST_HEAD(&br->mdb_list); 3260 INIT_HLIST_HEAD(&br->mcast_gc_list); 3261 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 3262 } 3263 3264 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 3265 { 3266 struct in_device *in_dev = in_dev_get(br->dev); 3267 3268 if (!in_dev) 3269 return; 3270 3271 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3272 in_dev_put(in_dev); 3273 } 3274 3275 #if IS_ENABLED(CONFIG_IPV6) 3276 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3277 { 3278 struct in6_addr addr; 3279 3280 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3281 ipv6_dev_mc_inc(br->dev, &addr); 3282 } 3283 #else 3284 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3285 { 3286 } 3287 #endif 3288 3289 static void br_multicast_join_snoopers(struct net_bridge *br) 3290 { 3291 br_ip4_multicast_join_snoopers(br); 3292 br_ip6_multicast_join_snoopers(br); 3293 } 3294 3295 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 3296 { 3297 struct in_device *in_dev = in_dev_get(br->dev); 3298 3299 if (WARN_ON(!in_dev)) 3300 return; 3301 3302 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3303 in_dev_put(in_dev); 3304 } 3305 3306 #if IS_ENABLED(CONFIG_IPV6) 3307 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3308 { 3309 struct in6_addr addr; 3310 3311 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3312 ipv6_dev_mc_dec(br->dev, &addr); 3313 } 3314 #else 3315 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3316 { 3317 } 3318 #endif 3319 3320 static void br_multicast_leave_snoopers(struct net_bridge *br) 3321 { 3322 br_ip4_multicast_leave_snoopers(br); 3323 br_ip6_multicast_leave_snoopers(br); 3324 } 3325 3326 static void __br_multicast_open(struct net_bridge *br, 3327 struct bridge_mcast_own_query *query) 3328 { 3329 query->startup_sent = 0; 3330 3331 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3332 return; 3333 3334 mod_timer(&query->timer, jiffies); 3335 } 3336 3337 void br_multicast_open(struct net_bridge *br) 3338 { 3339 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3340 br_multicast_join_snoopers(br); 3341 3342 __br_multicast_open(br, &br->ip4_own_query); 3343 #if IS_ENABLED(CONFIG_IPV6) 3344 __br_multicast_open(br, &br->ip6_own_query); 3345 #endif 3346 } 3347 3348 void br_multicast_stop(struct net_bridge *br) 3349 { 3350 del_timer_sync(&br->multicast_router_timer); 3351 del_timer_sync(&br->ip4_other_query.timer); 3352 del_timer_sync(&br->ip4_own_query.timer); 3353 #if IS_ENABLED(CONFIG_IPV6) 3354 del_timer_sync(&br->ip6_other_query.timer); 3355 del_timer_sync(&br->ip6_own_query.timer); 3356 #endif 3357 3358 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3359 br_multicast_leave_snoopers(br); 3360 } 3361 3362 void br_multicast_dev_del(struct net_bridge *br) 3363 { 3364 struct net_bridge_mdb_entry *mp; 3365 HLIST_HEAD(deleted_head); 3366 struct hlist_node *tmp; 3367 3368 spin_lock_bh(&br->multicast_lock); 3369 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 3370 br_multicast_del_mdb_entry(mp); 3371 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3372 spin_unlock_bh(&br->multicast_lock); 3373 3374 br_multicast_gc(&deleted_head); 3375 cancel_work_sync(&br->mcast_gc_work); 3376 3377 rcu_barrier(); 3378 } 3379 3380 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 3381 { 3382 int err = -EINVAL; 3383 3384 spin_lock_bh(&br->multicast_lock); 3385 3386 switch (val) { 3387 case MDB_RTR_TYPE_DISABLED: 3388 case MDB_RTR_TYPE_PERM: 3389 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 3390 del_timer(&br->multicast_router_timer); 3391 br->multicast_router = val; 3392 err = 0; 3393 break; 3394 case MDB_RTR_TYPE_TEMP_QUERY: 3395 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 3396 br_mc_router_state_change(br, false); 3397 br->multicast_router = val; 3398 err = 0; 3399 break; 3400 } 3401 3402 spin_unlock_bh(&br->multicast_lock); 3403 3404 return err; 3405 } 3406 3407 static void __del_port_router(struct net_bridge_port *p) 3408 { 3409 if (hlist_unhashed(&p->rlist)) 3410 return; 3411 hlist_del_init_rcu(&p->rlist); 3412 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 3413 br_port_mc_router_state_change(p, false); 3414 3415 /* don't allow timer refresh */ 3416 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 3417 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3418 } 3419 3420 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 3421 { 3422 struct net_bridge *br = p->br; 3423 unsigned long now = jiffies; 3424 int err = -EINVAL; 3425 3426 spin_lock(&br->multicast_lock); 3427 if (p->multicast_router == val) { 3428 /* Refresh the temp router port timer */ 3429 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 3430 mod_timer(&p->multicast_router_timer, 3431 now + br->multicast_querier_interval); 3432 err = 0; 3433 goto unlock; 3434 } 3435 switch (val) { 3436 case MDB_RTR_TYPE_DISABLED: 3437 p->multicast_router = MDB_RTR_TYPE_DISABLED; 3438 __del_port_router(p); 3439 del_timer(&p->multicast_router_timer); 3440 break; 3441 case MDB_RTR_TYPE_TEMP_QUERY: 3442 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3443 __del_port_router(p); 3444 break; 3445 case MDB_RTR_TYPE_PERM: 3446 p->multicast_router = MDB_RTR_TYPE_PERM; 3447 del_timer(&p->multicast_router_timer); 3448 br_multicast_add_router(br, p); 3449 break; 3450 case MDB_RTR_TYPE_TEMP: 3451 p->multicast_router = MDB_RTR_TYPE_TEMP; 3452 br_multicast_mark_router(br, p); 3453 break; 3454 default: 3455 goto unlock; 3456 } 3457 err = 0; 3458 unlock: 3459 spin_unlock(&br->multicast_lock); 3460 3461 return err; 3462 } 3463 3464 static void br_multicast_start_querier(struct net_bridge *br, 3465 struct bridge_mcast_own_query *query) 3466 { 3467 struct net_bridge_port *port; 3468 3469 __br_multicast_open(br, query); 3470 3471 rcu_read_lock(); 3472 list_for_each_entry_rcu(port, &br->port_list, list) { 3473 if (port->state == BR_STATE_DISABLED || 3474 port->state == BR_STATE_BLOCKING) 3475 continue; 3476 3477 if (query == &br->ip4_own_query) 3478 br_multicast_enable(&port->ip4_own_query); 3479 #if IS_ENABLED(CONFIG_IPV6) 3480 else 3481 br_multicast_enable(&port->ip6_own_query); 3482 #endif 3483 } 3484 rcu_read_unlock(); 3485 } 3486 3487 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 3488 { 3489 struct net_bridge_port *port; 3490 3491 spin_lock_bh(&br->multicast_lock); 3492 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 3493 goto unlock; 3494 3495 br_mc_disabled_update(br->dev, val); 3496 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 3497 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 3498 br_multicast_leave_snoopers(br); 3499 goto unlock; 3500 } 3501 3502 if (!netif_running(br->dev)) 3503 goto unlock; 3504 3505 br_multicast_open(br); 3506 list_for_each_entry(port, &br->port_list, list) 3507 __br_multicast_enable_port(port); 3508 3509 unlock: 3510 spin_unlock_bh(&br->multicast_lock); 3511 3512 return 0; 3513 } 3514 3515 bool br_multicast_enabled(const struct net_device *dev) 3516 { 3517 struct net_bridge *br = netdev_priv(dev); 3518 3519 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 3520 } 3521 EXPORT_SYMBOL_GPL(br_multicast_enabled); 3522 3523 bool br_multicast_router(const struct net_device *dev) 3524 { 3525 struct net_bridge *br = netdev_priv(dev); 3526 bool is_router; 3527 3528 spin_lock_bh(&br->multicast_lock); 3529 is_router = br_multicast_is_router(br); 3530 spin_unlock_bh(&br->multicast_lock); 3531 return is_router; 3532 } 3533 EXPORT_SYMBOL_GPL(br_multicast_router); 3534 3535 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 3536 { 3537 unsigned long max_delay; 3538 3539 val = !!val; 3540 3541 spin_lock_bh(&br->multicast_lock); 3542 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) 3543 goto unlock; 3544 3545 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); 3546 if (!val) 3547 goto unlock; 3548 3549 max_delay = br->multicast_query_response_interval; 3550 3551 if (!timer_pending(&br->ip4_other_query.timer)) 3552 br->ip4_other_query.delay_time = jiffies + max_delay; 3553 3554 br_multicast_start_querier(br, &br->ip4_own_query); 3555 3556 #if IS_ENABLED(CONFIG_IPV6) 3557 if (!timer_pending(&br->ip6_other_query.timer)) 3558 br->ip6_other_query.delay_time = jiffies + max_delay; 3559 3560 br_multicast_start_querier(br, &br->ip6_own_query); 3561 #endif 3562 3563 unlock: 3564 spin_unlock_bh(&br->multicast_lock); 3565 3566 return 0; 3567 } 3568 3569 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 3570 { 3571 /* Currently we support only version 2 and 3 */ 3572 switch (val) { 3573 case 2: 3574 case 3: 3575 break; 3576 default: 3577 return -EINVAL; 3578 } 3579 3580 spin_lock_bh(&br->multicast_lock); 3581 br->multicast_igmp_version = val; 3582 spin_unlock_bh(&br->multicast_lock); 3583 3584 return 0; 3585 } 3586 3587 #if IS_ENABLED(CONFIG_IPV6) 3588 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 3589 { 3590 /* Currently we support version 1 and 2 */ 3591 switch (val) { 3592 case 1: 3593 case 2: 3594 break; 3595 default: 3596 return -EINVAL; 3597 } 3598 3599 spin_lock_bh(&br->multicast_lock); 3600 br->multicast_mld_version = val; 3601 spin_unlock_bh(&br->multicast_lock); 3602 3603 return 0; 3604 } 3605 #endif 3606 3607 /** 3608 * br_multicast_list_adjacent - Returns snooped multicast addresses 3609 * @dev: The bridge port adjacent to which to retrieve addresses 3610 * @br_ip_list: The list to store found, snooped multicast IP addresses in 3611 * 3612 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 3613 * snooping feature on all bridge ports of dev's bridge device, excluding 3614 * the addresses from dev itself. 3615 * 3616 * Returns the number of items added to br_ip_list. 3617 * 3618 * Notes: 3619 * - br_ip_list needs to be initialized by caller 3620 * - br_ip_list might contain duplicates in the end 3621 * (needs to be taken care of by caller) 3622 * - br_ip_list needs to be freed by caller 3623 */ 3624 int br_multicast_list_adjacent(struct net_device *dev, 3625 struct list_head *br_ip_list) 3626 { 3627 struct net_bridge *br; 3628 struct net_bridge_port *port; 3629 struct net_bridge_port_group *group; 3630 struct br_ip_list *entry; 3631 int count = 0; 3632 3633 rcu_read_lock(); 3634 if (!br_ip_list || !netif_is_bridge_port(dev)) 3635 goto unlock; 3636 3637 port = br_port_get_rcu(dev); 3638 if (!port || !port->br) 3639 goto unlock; 3640 3641 br = port->br; 3642 3643 list_for_each_entry_rcu(port, &br->port_list, list) { 3644 if (!port->dev || port->dev == dev) 3645 continue; 3646 3647 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 3648 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 3649 if (!entry) 3650 goto unlock; 3651 3652 entry->addr = group->key.addr; 3653 list_add(&entry->list, br_ip_list); 3654 count++; 3655 } 3656 } 3657 3658 unlock: 3659 rcu_read_unlock(); 3660 return count; 3661 } 3662 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 3663 3664 /** 3665 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 3666 * @dev: The bridge port providing the bridge on which to check for a querier 3667 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3668 * 3669 * Checks whether the given interface has a bridge on top and if so returns 3670 * true if a valid querier exists anywhere on the bridged link layer. 3671 * Otherwise returns false. 3672 */ 3673 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 3674 { 3675 struct net_bridge *br; 3676 struct net_bridge_port *port; 3677 struct ethhdr eth; 3678 bool ret = false; 3679 3680 rcu_read_lock(); 3681 if (!netif_is_bridge_port(dev)) 3682 goto unlock; 3683 3684 port = br_port_get_rcu(dev); 3685 if (!port || !port->br) 3686 goto unlock; 3687 3688 br = port->br; 3689 3690 memset(ð, 0, sizeof(eth)); 3691 eth.h_proto = htons(proto); 3692 3693 ret = br_multicast_querier_exists(br, ð); 3694 3695 unlock: 3696 rcu_read_unlock(); 3697 return ret; 3698 } 3699 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 3700 3701 /** 3702 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 3703 * @dev: The bridge port adjacent to which to check for a querier 3704 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3705 * 3706 * Checks whether the given interface has a bridge on top and if so returns 3707 * true if a selected querier is behind one of the other ports of this 3708 * bridge. Otherwise returns false. 3709 */ 3710 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 3711 { 3712 struct net_bridge *br; 3713 struct net_bridge_port *port; 3714 bool ret = false; 3715 3716 rcu_read_lock(); 3717 if (!netif_is_bridge_port(dev)) 3718 goto unlock; 3719 3720 port = br_port_get_rcu(dev); 3721 if (!port || !port->br) 3722 goto unlock; 3723 3724 br = port->br; 3725 3726 switch (proto) { 3727 case ETH_P_IP: 3728 if (!timer_pending(&br->ip4_other_query.timer) || 3729 rcu_dereference(br->ip4_querier.port) == port) 3730 goto unlock; 3731 break; 3732 #if IS_ENABLED(CONFIG_IPV6) 3733 case ETH_P_IPV6: 3734 if (!timer_pending(&br->ip6_other_query.timer) || 3735 rcu_dereference(br->ip6_querier.port) == port) 3736 goto unlock; 3737 break; 3738 #endif 3739 default: 3740 goto unlock; 3741 } 3742 3743 ret = true; 3744 unlock: 3745 rcu_read_unlock(); 3746 return ret; 3747 } 3748 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 3749 3750 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 3751 const struct sk_buff *skb, u8 type, u8 dir) 3752 { 3753 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 3754 __be16 proto = skb->protocol; 3755 unsigned int t_len; 3756 3757 u64_stats_update_begin(&pstats->syncp); 3758 switch (proto) { 3759 case htons(ETH_P_IP): 3760 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 3761 switch (type) { 3762 case IGMP_HOST_MEMBERSHIP_REPORT: 3763 pstats->mstats.igmp_v1reports[dir]++; 3764 break; 3765 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3766 pstats->mstats.igmp_v2reports[dir]++; 3767 break; 3768 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3769 pstats->mstats.igmp_v3reports[dir]++; 3770 break; 3771 case IGMP_HOST_MEMBERSHIP_QUERY: 3772 if (t_len != sizeof(struct igmphdr)) { 3773 pstats->mstats.igmp_v3queries[dir]++; 3774 } else { 3775 unsigned int offset = skb_transport_offset(skb); 3776 struct igmphdr *ih, _ihdr; 3777 3778 ih = skb_header_pointer(skb, offset, 3779 sizeof(_ihdr), &_ihdr); 3780 if (!ih) 3781 break; 3782 if (!ih->code) 3783 pstats->mstats.igmp_v1queries[dir]++; 3784 else 3785 pstats->mstats.igmp_v2queries[dir]++; 3786 } 3787 break; 3788 case IGMP_HOST_LEAVE_MESSAGE: 3789 pstats->mstats.igmp_leaves[dir]++; 3790 break; 3791 } 3792 break; 3793 #if IS_ENABLED(CONFIG_IPV6) 3794 case htons(ETH_P_IPV6): 3795 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 3796 sizeof(struct ipv6hdr); 3797 t_len -= skb_network_header_len(skb); 3798 switch (type) { 3799 case ICMPV6_MGM_REPORT: 3800 pstats->mstats.mld_v1reports[dir]++; 3801 break; 3802 case ICMPV6_MLD2_REPORT: 3803 pstats->mstats.mld_v2reports[dir]++; 3804 break; 3805 case ICMPV6_MGM_QUERY: 3806 if (t_len != sizeof(struct mld_msg)) 3807 pstats->mstats.mld_v2queries[dir]++; 3808 else 3809 pstats->mstats.mld_v1queries[dir]++; 3810 break; 3811 case ICMPV6_MGM_REDUCTION: 3812 pstats->mstats.mld_leaves[dir]++; 3813 break; 3814 } 3815 break; 3816 #endif /* CONFIG_IPV6 */ 3817 } 3818 u64_stats_update_end(&pstats->syncp); 3819 } 3820 3821 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 3822 const struct sk_buff *skb, u8 type, u8 dir) 3823 { 3824 struct bridge_mcast_stats __percpu *stats; 3825 3826 /* if multicast_disabled is true then igmp type can't be set */ 3827 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3828 return; 3829 3830 if (p) 3831 stats = p->mcast_stats; 3832 else 3833 stats = br->mcast_stats; 3834 if (WARN_ON(!stats)) 3835 return; 3836 3837 br_mcast_stats_add(stats, skb, type, dir); 3838 } 3839 3840 int br_multicast_init_stats(struct net_bridge *br) 3841 { 3842 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 3843 if (!br->mcast_stats) 3844 return -ENOMEM; 3845 3846 return 0; 3847 } 3848 3849 void br_multicast_uninit_stats(struct net_bridge *br) 3850 { 3851 free_percpu(br->mcast_stats); 3852 } 3853 3854 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 3855 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 3856 { 3857 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 3858 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 3859 } 3860 3861 void br_multicast_get_stats(const struct net_bridge *br, 3862 const struct net_bridge_port *p, 3863 struct br_mcast_stats *dest) 3864 { 3865 struct bridge_mcast_stats __percpu *stats; 3866 struct br_mcast_stats tdst; 3867 int i; 3868 3869 memset(dest, 0, sizeof(*dest)); 3870 if (p) 3871 stats = p->mcast_stats; 3872 else 3873 stats = br->mcast_stats; 3874 if (WARN_ON(!stats)) 3875 return; 3876 3877 memset(&tdst, 0, sizeof(tdst)); 3878 for_each_possible_cpu(i) { 3879 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 3880 struct br_mcast_stats temp; 3881 unsigned int start; 3882 3883 do { 3884 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 3885 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 3886 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 3887 3888 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 3889 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 3890 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 3891 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 3892 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 3893 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 3894 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 3895 tdst.igmp_parse_errors += temp.igmp_parse_errors; 3896 3897 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 3898 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 3899 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 3900 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 3901 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 3902 tdst.mld_parse_errors += temp.mld_parse_errors; 3903 } 3904 memcpy(dest, &tdst, sizeof(*dest)); 3905 } 3906 3907 int br_mdb_hash_init(struct net_bridge *br) 3908 { 3909 int err; 3910 3911 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 3912 if (err) 3913 return err; 3914 3915 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 3916 if (err) { 3917 rhashtable_destroy(&br->sg_port_tbl); 3918 return err; 3919 } 3920 3921 return 0; 3922 } 3923 3924 void br_mdb_hash_fini(struct net_bridge *br) 3925 { 3926 rhashtable_destroy(&br->sg_port_tbl); 3927 rhashtable_destroy(&br->mdb_hash_tbl); 3928 } 3929