1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 #include "br_private_mcast_eht.h" 37 38 static const struct rhashtable_params br_mdb_rht_params = { 39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 41 .key_len = sizeof(struct br_ip), 42 .automatic_shrinking = true, 43 }; 44 45 static const struct rhashtable_params br_sg_port_rht_params = { 46 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 47 .key_offset = offsetof(struct net_bridge_port_group, key), 48 .key_len = sizeof(struct net_bridge_port_group_sg_key), 49 .automatic_shrinking = true, 50 }; 51 52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 53 struct bridge_mcast_own_query *query); 54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 55 struct net_bridge_mcast_port *pmctx); 56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 57 struct net_bridge_mcast_port *pmctx, 58 __be32 group, 59 __u16 vid, 60 const unsigned char *src); 61 static void br_multicast_port_group_rexmit(struct timer_list *t); 62 63 static void 64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); 65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 66 struct net_bridge_mcast_port *pmctx); 67 #if IS_ENABLED(CONFIG_IPV6) 68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 69 struct net_bridge_mcast_port *pmctx, 70 const struct in6_addr *group, 71 __u16 vid, const unsigned char *src); 72 #endif 73 static struct net_bridge_port_group * 74 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 75 struct net_bridge_mcast_port *pmctx, 76 struct br_ip *group, 77 const unsigned char *src, 78 u8 filter_mode, 79 bool igmpv2_mldv1, 80 bool blocked); 81 static void br_multicast_find_del_pg(struct net_bridge *br, 82 struct net_bridge_port_group *pg); 83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx); 84 85 static int br_mc_disabled_update(struct net_device *dev, bool value, 86 struct netlink_ext_ack *extack); 87 88 static struct net_bridge_port_group * 89 br_sg_port_find(struct net_bridge *br, 90 struct net_bridge_port_group_sg_key *sg_p) 91 { 92 lockdep_assert_held_once(&br->multicast_lock); 93 94 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 95 br_sg_port_rht_params); 96 } 97 98 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 99 struct br_ip *dst) 100 { 101 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 102 } 103 104 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 105 struct br_ip *dst) 106 { 107 struct net_bridge_mdb_entry *ent; 108 109 lockdep_assert_held_once(&br->multicast_lock); 110 111 rcu_read_lock(); 112 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 113 rcu_read_unlock(); 114 115 return ent; 116 } 117 118 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 119 __be32 dst, __u16 vid) 120 { 121 struct br_ip br_dst; 122 123 memset(&br_dst, 0, sizeof(br_dst)); 124 br_dst.dst.ip4 = dst; 125 br_dst.proto = htons(ETH_P_IP); 126 br_dst.vid = vid; 127 128 return br_mdb_ip_get(br, &br_dst); 129 } 130 131 #if IS_ENABLED(CONFIG_IPV6) 132 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 133 const struct in6_addr *dst, 134 __u16 vid) 135 { 136 struct br_ip br_dst; 137 138 memset(&br_dst, 0, sizeof(br_dst)); 139 br_dst.dst.ip6 = *dst; 140 br_dst.proto = htons(ETH_P_IPV6); 141 br_dst.vid = vid; 142 143 return br_mdb_ip_get(br, &br_dst); 144 } 145 #endif 146 147 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, 148 struct sk_buff *skb, u16 vid) 149 { 150 struct net_bridge *br = brmctx->br; 151 struct br_ip ip; 152 153 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 154 br_multicast_ctx_vlan_global_disabled(brmctx)) 155 return NULL; 156 157 if (BR_INPUT_SKB_CB(skb)->igmp) 158 return NULL; 159 160 memset(&ip, 0, sizeof(ip)); 161 ip.proto = skb->protocol; 162 ip.vid = vid; 163 164 switch (skb->protocol) { 165 case htons(ETH_P_IP): 166 ip.dst.ip4 = ip_hdr(skb)->daddr; 167 if (brmctx->multicast_igmp_version == 3) { 168 struct net_bridge_mdb_entry *mdb; 169 170 ip.src.ip4 = ip_hdr(skb)->saddr; 171 mdb = br_mdb_ip_get_rcu(br, &ip); 172 if (mdb) 173 return mdb; 174 ip.src.ip4 = 0; 175 } 176 break; 177 #if IS_ENABLED(CONFIG_IPV6) 178 case htons(ETH_P_IPV6): 179 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 180 if (brmctx->multicast_mld_version == 2) { 181 struct net_bridge_mdb_entry *mdb; 182 183 ip.src.ip6 = ipv6_hdr(skb)->saddr; 184 mdb = br_mdb_ip_get_rcu(br, &ip); 185 if (mdb) 186 return mdb; 187 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 188 } 189 break; 190 #endif 191 default: 192 ip.proto = 0; 193 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 194 } 195 196 return br_mdb_ip_get_rcu(br, &ip); 197 } 198 199 /* IMPORTANT: this function must be used only when the contexts cannot be 200 * passed down (e.g. timer) and must be used for read-only purposes because 201 * the vlan snooping option can change, so it can return any context 202 * (non-vlan or vlan). Its initial intended purpose is to read timer values 203 * from the *current* context based on the option. At worst that could lead 204 * to inconsistent timers when the contexts are changed, i.e. src timer 205 * which needs to re-arm with a specific delay taken from the old context 206 */ 207 static struct net_bridge_mcast_port * 208 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) 209 { 210 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; 211 struct net_bridge_vlan *vlan; 212 213 lockdep_assert_held_once(&pg->key.port->br->multicast_lock); 214 215 /* if vlan snooping is disabled use the port's multicast context */ 216 if (!pg->key.addr.vid || 217 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 218 goto out; 219 220 /* locking is tricky here, due to different rules for multicast and 221 * vlans we need to take rcu to find the vlan and make sure it has 222 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under 223 * multicast_lock which must be already held here, so the vlan's pmctx 224 * can safely be used on return 225 */ 226 rcu_read_lock(); 227 vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid); 228 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 229 pmctx = &vlan->port_mcast_ctx; 230 else 231 pmctx = NULL; 232 rcu_read_unlock(); 233 out: 234 return pmctx; 235 } 236 237 /* when snooping we need to check if the contexts should be used 238 * in the following order: 239 * - if pmctx is non-NULL (port), check if it should be used 240 * - if pmctx is NULL (bridge), check if brmctx should be used 241 */ 242 static bool 243 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx, 244 const struct net_bridge_mcast_port *pmctx) 245 { 246 if (!netif_running(brmctx->br->dev)) 247 return false; 248 249 if (pmctx) 250 return !br_multicast_port_ctx_state_disabled(pmctx); 251 else 252 return !br_multicast_ctx_vlan_disabled(brmctx); 253 } 254 255 static bool br_port_group_equal(struct net_bridge_port_group *p, 256 struct net_bridge_port *port, 257 const unsigned char *src) 258 { 259 if (p->key.port != port) 260 return false; 261 262 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 263 return true; 264 265 return ether_addr_equal(src, p->eth_addr); 266 } 267 268 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, 269 struct net_bridge_port_group *pg, 270 struct br_ip *sg_ip) 271 { 272 struct net_bridge_port_group_sg_key sg_key; 273 struct net_bridge_port_group *src_pg; 274 struct net_bridge_mcast *brmctx; 275 276 memset(&sg_key, 0, sizeof(sg_key)); 277 brmctx = br_multicast_port_ctx_get_global(pmctx); 278 sg_key.port = pg->key.port; 279 sg_key.addr = *sg_ip; 280 if (br_sg_port_find(brmctx->br, &sg_key)) 281 return; 282 283 src_pg = __br_multicast_add_group(brmctx, pmctx, 284 sg_ip, pg->eth_addr, 285 MCAST_INCLUDE, false, false); 286 if (IS_ERR_OR_NULL(src_pg) || 287 src_pg->rt_protocol != RTPROT_KERNEL) 288 return; 289 290 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 291 } 292 293 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 294 struct br_ip *sg_ip) 295 { 296 struct net_bridge_port_group_sg_key sg_key; 297 struct net_bridge *br = pg->key.port->br; 298 struct net_bridge_port_group *src_pg; 299 300 memset(&sg_key, 0, sizeof(sg_key)); 301 sg_key.port = pg->key.port; 302 sg_key.addr = *sg_ip; 303 src_pg = br_sg_port_find(br, &sg_key); 304 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 305 src_pg->rt_protocol != RTPROT_KERNEL) 306 return; 307 308 br_multicast_find_del_pg(br, src_pg); 309 } 310 311 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 312 * to all other ports' S,G entries which are not blocked by the current group 313 * for proper replication, the assumption is that any S,G blocked entries 314 * are already added so the S,G,port lookup should skip them. 315 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 316 * deleted we need to remove it from all ports' S,G entries where it was 317 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 318 */ 319 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 320 u8 filter_mode) 321 { 322 struct net_bridge *br = pg->key.port->br; 323 struct net_bridge_port_group *pg_lst; 324 struct net_bridge_mcast_port *pmctx; 325 struct net_bridge_mdb_entry *mp; 326 struct br_ip sg_ip; 327 328 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 329 return; 330 331 mp = br_mdb_ip_get(br, &pg->key.addr); 332 if (!mp) 333 return; 334 pmctx = br_multicast_pg_to_port_ctx(pg); 335 if (!pmctx) 336 return; 337 338 memset(&sg_ip, 0, sizeof(sg_ip)); 339 sg_ip = pg->key.addr; 340 341 for (pg_lst = mlock_dereference(mp->ports, br); 342 pg_lst; 343 pg_lst = mlock_dereference(pg_lst->next, br)) { 344 struct net_bridge_group_src *src_ent; 345 346 if (pg_lst == pg) 347 continue; 348 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 349 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 350 continue; 351 sg_ip.src = src_ent->addr.src; 352 switch (filter_mode) { 353 case MCAST_INCLUDE: 354 __fwd_del_star_excl(pg, &sg_ip); 355 break; 356 case MCAST_EXCLUDE: 357 __fwd_add_star_excl(pmctx, pg, &sg_ip); 358 break; 359 } 360 } 361 } 362 } 363 364 /* called when adding a new S,G with host_joined == false by default */ 365 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 366 struct net_bridge_port_group *sg) 367 { 368 struct net_bridge_mdb_entry *sg_mp; 369 370 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 371 return; 372 if (!star_mp->host_joined) 373 return; 374 375 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 376 if (!sg_mp) 377 return; 378 sg_mp->host_joined = true; 379 } 380 381 /* set the host_joined state of all of *,G's S,G entries */ 382 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 383 { 384 struct net_bridge *br = star_mp->br; 385 struct net_bridge_mdb_entry *sg_mp; 386 struct net_bridge_port_group *pg; 387 struct br_ip sg_ip; 388 389 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 390 return; 391 392 memset(&sg_ip, 0, sizeof(sg_ip)); 393 sg_ip = star_mp->addr; 394 for (pg = mlock_dereference(star_mp->ports, br); 395 pg; 396 pg = mlock_dereference(pg->next, br)) { 397 struct net_bridge_group_src *src_ent; 398 399 hlist_for_each_entry(src_ent, &pg->src_list, node) { 400 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 401 continue; 402 sg_ip.src = src_ent->addr.src; 403 sg_mp = br_mdb_ip_get(br, &sg_ip); 404 if (!sg_mp) 405 continue; 406 sg_mp->host_joined = star_mp->host_joined; 407 } 408 } 409 } 410 411 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 412 { 413 struct net_bridge_port_group __rcu **pp; 414 struct net_bridge_port_group *p; 415 416 /* *,G exclude ports are only added to S,G entries */ 417 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 418 return; 419 420 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 421 * we should ignore perm entries since they're managed by user-space 422 */ 423 for (pp = &sgmp->ports; 424 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 425 pp = &p->next) 426 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 427 MDB_PG_FLAGS_PERMANENT))) 428 return; 429 430 /* currently the host can only have joined the *,G which means 431 * we treat it as EXCLUDE {}, so for an S,G it's considered a 432 * STAR_EXCLUDE entry and we can safely leave it 433 */ 434 sgmp->host_joined = false; 435 436 for (pp = &sgmp->ports; 437 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 438 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 439 br_multicast_del_pg(sgmp, p, pp); 440 else 441 pp = &p->next; 442 } 443 } 444 445 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 446 struct net_bridge_port_group *sg) 447 { 448 struct net_bridge_port_group_sg_key sg_key; 449 struct net_bridge *br = star_mp->br; 450 struct net_bridge_mcast_port *pmctx; 451 struct net_bridge_port_group *pg; 452 struct net_bridge_mcast *brmctx; 453 454 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 455 return; 456 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 457 return; 458 459 br_multicast_sg_host_state(star_mp, sg); 460 memset(&sg_key, 0, sizeof(sg_key)); 461 sg_key.addr = sg->key.addr; 462 /* we need to add all exclude ports to the S,G */ 463 for (pg = mlock_dereference(star_mp->ports, br); 464 pg; 465 pg = mlock_dereference(pg->next, br)) { 466 struct net_bridge_port_group *src_pg; 467 468 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 469 continue; 470 471 sg_key.port = pg->key.port; 472 if (br_sg_port_find(br, &sg_key)) 473 continue; 474 475 pmctx = br_multicast_pg_to_port_ctx(pg); 476 if (!pmctx) 477 continue; 478 brmctx = br_multicast_port_ctx_get_global(pmctx); 479 480 src_pg = __br_multicast_add_group(brmctx, pmctx, 481 &sg->key.addr, 482 sg->eth_addr, 483 MCAST_INCLUDE, false, false); 484 if (IS_ERR_OR_NULL(src_pg) || 485 src_pg->rt_protocol != RTPROT_KERNEL) 486 continue; 487 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 488 } 489 } 490 491 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 492 { 493 struct net_bridge_mdb_entry *star_mp; 494 struct net_bridge_mcast_port *pmctx; 495 struct net_bridge_port_group *sg; 496 struct net_bridge_mcast *brmctx; 497 struct br_ip sg_ip; 498 499 if (src->flags & BR_SGRP_F_INSTALLED) 500 return; 501 502 memset(&sg_ip, 0, sizeof(sg_ip)); 503 pmctx = br_multicast_pg_to_port_ctx(src->pg); 504 if (!pmctx) 505 return; 506 brmctx = br_multicast_port_ctx_get_global(pmctx); 507 sg_ip = src->pg->key.addr; 508 sg_ip.src = src->addr.src; 509 510 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip, 511 src->pg->eth_addr, MCAST_INCLUDE, false, 512 !timer_pending(&src->timer)); 513 if (IS_ERR_OR_NULL(sg)) 514 return; 515 src->flags |= BR_SGRP_F_INSTALLED; 516 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 517 518 /* if it was added by user-space as perm we can skip next steps */ 519 if (sg->rt_protocol != RTPROT_KERNEL && 520 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 521 return; 522 523 /* the kernel is now responsible for removing this S,G */ 524 del_timer(&sg->timer); 525 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 526 if (!star_mp) 527 return; 528 529 br_multicast_sg_add_exclude_ports(star_mp, sg); 530 } 531 532 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 533 bool fastleave) 534 { 535 struct net_bridge_port_group *p, *pg = src->pg; 536 struct net_bridge_port_group __rcu **pp; 537 struct net_bridge_mdb_entry *mp; 538 struct br_ip sg_ip; 539 540 memset(&sg_ip, 0, sizeof(sg_ip)); 541 sg_ip = pg->key.addr; 542 sg_ip.src = src->addr.src; 543 544 mp = br_mdb_ip_get(src->br, &sg_ip); 545 if (!mp) 546 return; 547 548 for (pp = &mp->ports; 549 (p = mlock_dereference(*pp, src->br)) != NULL; 550 pp = &p->next) { 551 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 552 continue; 553 554 if (p->rt_protocol != RTPROT_KERNEL && 555 (p->flags & MDB_PG_FLAGS_PERMANENT) && 556 !(src->flags & BR_SGRP_F_USER_ADDED)) 557 break; 558 559 if (fastleave) 560 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 561 br_multicast_del_pg(mp, p, pp); 562 break; 563 } 564 src->flags &= ~BR_SGRP_F_INSTALLED; 565 } 566 567 /* install S,G and based on src's timer enable or disable forwarding */ 568 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 569 { 570 struct net_bridge_port_group_sg_key sg_key; 571 struct net_bridge_port_group *sg; 572 u8 old_flags; 573 574 br_multicast_fwd_src_add(src); 575 576 memset(&sg_key, 0, sizeof(sg_key)); 577 sg_key.addr = src->pg->key.addr; 578 sg_key.addr.src = src->addr.src; 579 sg_key.port = src->pg->key.port; 580 581 sg = br_sg_port_find(src->br, &sg_key); 582 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 583 return; 584 585 old_flags = sg->flags; 586 if (timer_pending(&src->timer)) 587 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 588 else 589 sg->flags |= MDB_PG_FLAGS_BLOCKED; 590 591 if (old_flags != sg->flags) { 592 struct net_bridge_mdb_entry *sg_mp; 593 594 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 595 if (!sg_mp) 596 return; 597 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 598 } 599 } 600 601 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 602 { 603 struct net_bridge_mdb_entry *mp; 604 605 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 606 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 607 WARN_ON(mp->ports); 608 609 timer_shutdown_sync(&mp->timer); 610 kfree_rcu(mp, rcu); 611 } 612 613 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 614 { 615 struct net_bridge *br = mp->br; 616 617 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 618 br_mdb_rht_params); 619 hlist_del_init_rcu(&mp->mdb_node); 620 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 621 queue_work(system_long_wq, &br->mcast_gc_work); 622 } 623 624 static void br_multicast_group_expired(struct timer_list *t) 625 { 626 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 627 struct net_bridge *br = mp->br; 628 629 spin_lock(&br->multicast_lock); 630 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 631 timer_pending(&mp->timer)) 632 goto out; 633 634 br_multicast_host_leave(mp, true); 635 636 if (mp->ports) 637 goto out; 638 br_multicast_del_mdb_entry(mp); 639 out: 640 spin_unlock(&br->multicast_lock); 641 } 642 643 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 644 { 645 struct net_bridge_group_src *src; 646 647 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 648 WARN_ON(!hlist_unhashed(&src->node)); 649 650 timer_shutdown_sync(&src->timer); 651 kfree_rcu(src, rcu); 652 } 653 654 void __br_multicast_del_group_src(struct net_bridge_group_src *src) 655 { 656 struct net_bridge *br = src->pg->key.port->br; 657 658 hlist_del_init_rcu(&src->node); 659 src->pg->src_ents--; 660 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 661 queue_work(system_long_wq, &br->mcast_gc_work); 662 } 663 664 void br_multicast_del_group_src(struct net_bridge_group_src *src, 665 bool fastleave) 666 { 667 br_multicast_fwd_src_remove(src, fastleave); 668 __br_multicast_del_group_src(src); 669 } 670 671 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 672 { 673 struct net_bridge_port_group *pg; 674 675 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 676 WARN_ON(!hlist_unhashed(&pg->mglist)); 677 WARN_ON(!hlist_empty(&pg->src_list)); 678 679 timer_shutdown_sync(&pg->rexmit_timer); 680 timer_shutdown_sync(&pg->timer); 681 kfree_rcu(pg, rcu); 682 } 683 684 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 685 struct net_bridge_port_group *pg, 686 struct net_bridge_port_group __rcu **pp) 687 { 688 struct net_bridge *br = pg->key.port->br; 689 struct net_bridge_group_src *ent; 690 struct hlist_node *tmp; 691 692 rcu_assign_pointer(*pp, pg->next); 693 hlist_del_init(&pg->mglist); 694 br_multicast_eht_clean_sets(pg); 695 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 696 br_multicast_del_group_src(ent, false); 697 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 698 if (!br_multicast_is_star_g(&mp->addr)) { 699 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 700 br_sg_port_rht_params); 701 br_multicast_sg_del_exclude_ports(mp); 702 } else { 703 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 704 } 705 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 706 queue_work(system_long_wq, &br->mcast_gc_work); 707 708 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 709 mod_timer(&mp->timer, jiffies); 710 } 711 712 static void br_multicast_find_del_pg(struct net_bridge *br, 713 struct net_bridge_port_group *pg) 714 { 715 struct net_bridge_port_group __rcu **pp; 716 struct net_bridge_mdb_entry *mp; 717 struct net_bridge_port_group *p; 718 719 mp = br_mdb_ip_get(br, &pg->key.addr); 720 if (WARN_ON(!mp)) 721 return; 722 723 for (pp = &mp->ports; 724 (p = mlock_dereference(*pp, br)) != NULL; 725 pp = &p->next) { 726 if (p != pg) 727 continue; 728 729 br_multicast_del_pg(mp, pg, pp); 730 return; 731 } 732 733 WARN_ON(1); 734 } 735 736 static void br_multicast_port_group_expired(struct timer_list *t) 737 { 738 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 739 struct net_bridge_group_src *src_ent; 740 struct net_bridge *br = pg->key.port->br; 741 struct hlist_node *tmp; 742 bool changed; 743 744 spin_lock(&br->multicast_lock); 745 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 746 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 747 goto out; 748 749 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 750 pg->filter_mode = MCAST_INCLUDE; 751 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 752 if (!timer_pending(&src_ent->timer)) { 753 br_multicast_del_group_src(src_ent, false); 754 changed = true; 755 } 756 } 757 758 if (hlist_empty(&pg->src_list)) { 759 br_multicast_find_del_pg(br, pg); 760 } else if (changed) { 761 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 762 763 if (changed && br_multicast_is_star_g(&pg->key.addr)) 764 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 765 766 if (WARN_ON(!mp)) 767 goto out; 768 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 769 } 770 out: 771 spin_unlock(&br->multicast_lock); 772 } 773 774 static void br_multicast_gc(struct hlist_head *head) 775 { 776 struct net_bridge_mcast_gc *gcent; 777 struct hlist_node *tmp; 778 779 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 780 hlist_del_init(&gcent->gc_node); 781 gcent->destroy(gcent); 782 } 783 } 784 785 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx, 786 struct net_bridge_mcast_port *pmctx, 787 struct sk_buff *skb) 788 { 789 struct net_bridge_vlan *vlan = NULL; 790 791 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx)) 792 vlan = pmctx->vlan; 793 else if (br_multicast_ctx_is_vlan(brmctx)) 794 vlan = brmctx->vlan; 795 796 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 797 u16 vlan_proto; 798 799 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0) 800 return; 801 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid); 802 } 803 } 804 805 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, 806 struct net_bridge_mcast_port *pmctx, 807 struct net_bridge_port_group *pg, 808 __be32 ip_dst, __be32 group, 809 bool with_srcs, bool over_lmqt, 810 u8 sflag, u8 *igmp_type, 811 bool *need_rexmit) 812 { 813 struct net_bridge_port *p = pg ? pg->key.port : NULL; 814 struct net_bridge_group_src *ent; 815 size_t pkt_size, igmp_hdr_size; 816 unsigned long now = jiffies; 817 struct igmpv3_query *ihv3; 818 void *csum_start = NULL; 819 __sum16 *csum = NULL; 820 struct sk_buff *skb; 821 struct igmphdr *ih; 822 struct ethhdr *eth; 823 unsigned long lmqt; 824 struct iphdr *iph; 825 u16 lmqt_srcs = 0; 826 827 igmp_hdr_size = sizeof(*ih); 828 if (brmctx->multicast_igmp_version == 3) { 829 igmp_hdr_size = sizeof(*ihv3); 830 if (pg && with_srcs) { 831 lmqt = now + (brmctx->multicast_last_member_interval * 832 brmctx->multicast_last_member_count); 833 hlist_for_each_entry(ent, &pg->src_list, node) { 834 if (over_lmqt == time_after(ent->timer.expires, 835 lmqt) && 836 ent->src_query_rexmit_cnt > 0) 837 lmqt_srcs++; 838 } 839 840 if (!lmqt_srcs) 841 return NULL; 842 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 843 } 844 } 845 846 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 847 if ((p && pkt_size > p->dev->mtu) || 848 pkt_size > brmctx->br->dev->mtu) 849 return NULL; 850 851 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 852 if (!skb) 853 goto out; 854 855 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 856 skb->protocol = htons(ETH_P_IP); 857 858 skb_reset_mac_header(skb); 859 eth = eth_hdr(skb); 860 861 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 862 ip_eth_mc_map(ip_dst, eth->h_dest); 863 eth->h_proto = htons(ETH_P_IP); 864 skb_put(skb, sizeof(*eth)); 865 866 skb_set_network_header(skb, skb->len); 867 iph = ip_hdr(skb); 868 iph->tot_len = htons(pkt_size - sizeof(*eth)); 869 870 iph->version = 4; 871 iph->ihl = 6; 872 iph->tos = 0xc0; 873 iph->id = 0; 874 iph->frag_off = htons(IP_DF); 875 iph->ttl = 1; 876 iph->protocol = IPPROTO_IGMP; 877 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 878 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0; 879 iph->daddr = ip_dst; 880 ((u8 *)&iph[1])[0] = IPOPT_RA; 881 ((u8 *)&iph[1])[1] = 4; 882 ((u8 *)&iph[1])[2] = 0; 883 ((u8 *)&iph[1])[3] = 0; 884 ip_send_check(iph); 885 skb_put(skb, 24); 886 887 skb_set_transport_header(skb, skb->len); 888 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 889 890 switch (brmctx->multicast_igmp_version) { 891 case 2: 892 ih = igmp_hdr(skb); 893 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 894 ih->code = (group ? brmctx->multicast_last_member_interval : 895 brmctx->multicast_query_response_interval) / 896 (HZ / IGMP_TIMER_SCALE); 897 ih->group = group; 898 ih->csum = 0; 899 csum = &ih->csum; 900 csum_start = (void *)ih; 901 break; 902 case 3: 903 ihv3 = igmpv3_query_hdr(skb); 904 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 905 ihv3->code = (group ? brmctx->multicast_last_member_interval : 906 brmctx->multicast_query_response_interval) / 907 (HZ / IGMP_TIMER_SCALE); 908 ihv3->group = group; 909 ihv3->qqic = brmctx->multicast_query_interval / HZ; 910 ihv3->nsrcs = htons(lmqt_srcs); 911 ihv3->resv = 0; 912 ihv3->suppress = sflag; 913 ihv3->qrv = 2; 914 ihv3->csum = 0; 915 csum = &ihv3->csum; 916 csum_start = (void *)ihv3; 917 if (!pg || !with_srcs) 918 break; 919 920 lmqt_srcs = 0; 921 hlist_for_each_entry(ent, &pg->src_list, node) { 922 if (over_lmqt == time_after(ent->timer.expires, 923 lmqt) && 924 ent->src_query_rexmit_cnt > 0) { 925 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 926 ent->src_query_rexmit_cnt--; 927 if (need_rexmit && ent->src_query_rexmit_cnt) 928 *need_rexmit = true; 929 } 930 } 931 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 932 kfree_skb(skb); 933 return NULL; 934 } 935 break; 936 } 937 938 if (WARN_ON(!csum || !csum_start)) { 939 kfree_skb(skb); 940 return NULL; 941 } 942 943 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 944 skb_put(skb, igmp_hdr_size); 945 __skb_pull(skb, sizeof(*eth)); 946 947 out: 948 return skb; 949 } 950 951 #if IS_ENABLED(CONFIG_IPV6) 952 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, 953 struct net_bridge_mcast_port *pmctx, 954 struct net_bridge_port_group *pg, 955 const struct in6_addr *ip6_dst, 956 const struct in6_addr *group, 957 bool with_srcs, bool over_llqt, 958 u8 sflag, u8 *igmp_type, 959 bool *need_rexmit) 960 { 961 struct net_bridge_port *p = pg ? pg->key.port : NULL; 962 struct net_bridge_group_src *ent; 963 size_t pkt_size, mld_hdr_size; 964 unsigned long now = jiffies; 965 struct mld2_query *mld2q; 966 void *csum_start = NULL; 967 unsigned long interval; 968 __sum16 *csum = NULL; 969 struct ipv6hdr *ip6h; 970 struct mld_msg *mldq; 971 struct sk_buff *skb; 972 unsigned long llqt; 973 struct ethhdr *eth; 974 u16 llqt_srcs = 0; 975 u8 *hopopt; 976 977 mld_hdr_size = sizeof(*mldq); 978 if (brmctx->multicast_mld_version == 2) { 979 mld_hdr_size = sizeof(*mld2q); 980 if (pg && with_srcs) { 981 llqt = now + (brmctx->multicast_last_member_interval * 982 brmctx->multicast_last_member_count); 983 hlist_for_each_entry(ent, &pg->src_list, node) { 984 if (over_llqt == time_after(ent->timer.expires, 985 llqt) && 986 ent->src_query_rexmit_cnt > 0) 987 llqt_srcs++; 988 } 989 990 if (!llqt_srcs) 991 return NULL; 992 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 993 } 994 } 995 996 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 997 if ((p && pkt_size > p->dev->mtu) || 998 pkt_size > brmctx->br->dev->mtu) 999 return NULL; 1000 1001 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 1002 if (!skb) 1003 goto out; 1004 1005 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 1006 skb->protocol = htons(ETH_P_IPV6); 1007 1008 /* Ethernet header */ 1009 skb_reset_mac_header(skb); 1010 eth = eth_hdr(skb); 1011 1012 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 1013 eth->h_proto = htons(ETH_P_IPV6); 1014 skb_put(skb, sizeof(*eth)); 1015 1016 /* IPv6 header + HbH option */ 1017 skb_set_network_header(skb, skb->len); 1018 ip6h = ipv6_hdr(skb); 1019 1020 *(__force __be32 *)ip6h = htonl(0x60000000); 1021 ip6h->payload_len = htons(8 + mld_hdr_size); 1022 ip6h->nexthdr = IPPROTO_HOPOPTS; 1023 ip6h->hop_limit = 1; 1024 ip6h->daddr = *ip6_dst; 1025 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev, 1026 &ip6h->daddr, 0, &ip6h->saddr)) { 1027 kfree_skb(skb); 1028 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false); 1029 return NULL; 1030 } 1031 1032 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true); 1033 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 1034 1035 hopopt = (u8 *)(ip6h + 1); 1036 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 1037 hopopt[1] = 0; /* length of HbH */ 1038 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 1039 hopopt[3] = 2; /* Length of RA Option */ 1040 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 1041 hopopt[5] = 0; 1042 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 1043 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 1044 1045 skb_put(skb, sizeof(*ip6h) + 8); 1046 1047 /* ICMPv6 */ 1048 skb_set_transport_header(skb, skb->len); 1049 interval = ipv6_addr_any(group) ? 1050 brmctx->multicast_query_response_interval : 1051 brmctx->multicast_last_member_interval; 1052 *igmp_type = ICMPV6_MGM_QUERY; 1053 switch (brmctx->multicast_mld_version) { 1054 case 1: 1055 mldq = (struct mld_msg *)icmp6_hdr(skb); 1056 mldq->mld_type = ICMPV6_MGM_QUERY; 1057 mldq->mld_code = 0; 1058 mldq->mld_cksum = 0; 1059 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 1060 mldq->mld_reserved = 0; 1061 mldq->mld_mca = *group; 1062 csum = &mldq->mld_cksum; 1063 csum_start = (void *)mldq; 1064 break; 1065 case 2: 1066 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1067 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 1068 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 1069 mld2q->mld2q_code = 0; 1070 mld2q->mld2q_cksum = 0; 1071 mld2q->mld2q_resv1 = 0; 1072 mld2q->mld2q_resv2 = 0; 1073 mld2q->mld2q_suppress = sflag; 1074 mld2q->mld2q_qrv = 2; 1075 mld2q->mld2q_nsrcs = htons(llqt_srcs); 1076 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; 1077 mld2q->mld2q_mca = *group; 1078 csum = &mld2q->mld2q_cksum; 1079 csum_start = (void *)mld2q; 1080 if (!pg || !with_srcs) 1081 break; 1082 1083 llqt_srcs = 0; 1084 hlist_for_each_entry(ent, &pg->src_list, node) { 1085 if (over_llqt == time_after(ent->timer.expires, 1086 llqt) && 1087 ent->src_query_rexmit_cnt > 0) { 1088 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 1089 ent->src_query_rexmit_cnt--; 1090 if (need_rexmit && ent->src_query_rexmit_cnt) 1091 *need_rexmit = true; 1092 } 1093 } 1094 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 1095 kfree_skb(skb); 1096 return NULL; 1097 } 1098 break; 1099 } 1100 1101 if (WARN_ON(!csum || !csum_start)) { 1102 kfree_skb(skb); 1103 return NULL; 1104 } 1105 1106 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 1107 IPPROTO_ICMPV6, 1108 csum_partial(csum_start, mld_hdr_size, 0)); 1109 skb_put(skb, mld_hdr_size); 1110 __skb_pull(skb, sizeof(*eth)); 1111 1112 out: 1113 return skb; 1114 } 1115 #endif 1116 1117 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1118 struct net_bridge_mcast_port *pmctx, 1119 struct net_bridge_port_group *pg, 1120 struct br_ip *ip_dst, 1121 struct br_ip *group, 1122 bool with_srcs, bool over_lmqt, 1123 u8 sflag, u8 *igmp_type, 1124 bool *need_rexmit) 1125 { 1126 __be32 ip4_dst; 1127 1128 switch (group->proto) { 1129 case htons(ETH_P_IP): 1130 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1131 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg, 1132 ip4_dst, group->dst.ip4, 1133 with_srcs, over_lmqt, 1134 sflag, igmp_type, 1135 need_rexmit); 1136 #if IS_ENABLED(CONFIG_IPV6) 1137 case htons(ETH_P_IPV6): { 1138 struct in6_addr ip6_dst; 1139 1140 if (ip_dst) 1141 ip6_dst = ip_dst->dst.ip6; 1142 else 1143 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1144 htonl(1)); 1145 1146 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg, 1147 &ip6_dst, &group->dst.ip6, 1148 with_srcs, over_lmqt, 1149 sflag, igmp_type, 1150 need_rexmit); 1151 } 1152 #endif 1153 } 1154 return NULL; 1155 } 1156 1157 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1158 struct br_ip *group) 1159 { 1160 struct net_bridge_mdb_entry *mp; 1161 int err; 1162 1163 mp = br_mdb_ip_get(br, group); 1164 if (mp) 1165 return mp; 1166 1167 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1168 br_mc_disabled_update(br->dev, false, NULL); 1169 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1170 return ERR_PTR(-E2BIG); 1171 } 1172 1173 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1174 if (unlikely(!mp)) 1175 return ERR_PTR(-ENOMEM); 1176 1177 mp->br = br; 1178 mp->addr = *group; 1179 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1180 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1181 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1182 br_mdb_rht_params); 1183 if (err) { 1184 kfree(mp); 1185 mp = ERR_PTR(err); 1186 } else { 1187 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1188 } 1189 1190 return mp; 1191 } 1192 1193 static void br_multicast_group_src_expired(struct timer_list *t) 1194 { 1195 struct net_bridge_group_src *src = from_timer(src, t, timer); 1196 struct net_bridge_port_group *pg; 1197 struct net_bridge *br = src->br; 1198 1199 spin_lock(&br->multicast_lock); 1200 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1201 timer_pending(&src->timer)) 1202 goto out; 1203 1204 pg = src->pg; 1205 if (pg->filter_mode == MCAST_INCLUDE) { 1206 br_multicast_del_group_src(src, false); 1207 if (!hlist_empty(&pg->src_list)) 1208 goto out; 1209 br_multicast_find_del_pg(br, pg); 1210 } else { 1211 br_multicast_fwd_src_handle(src); 1212 } 1213 1214 out: 1215 spin_unlock(&br->multicast_lock); 1216 } 1217 1218 struct net_bridge_group_src * 1219 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1220 { 1221 struct net_bridge_group_src *ent; 1222 1223 switch (ip->proto) { 1224 case htons(ETH_P_IP): 1225 hlist_for_each_entry(ent, &pg->src_list, node) 1226 if (ip->src.ip4 == ent->addr.src.ip4) 1227 return ent; 1228 break; 1229 #if IS_ENABLED(CONFIG_IPV6) 1230 case htons(ETH_P_IPV6): 1231 hlist_for_each_entry(ent, &pg->src_list, node) 1232 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1233 return ent; 1234 break; 1235 #endif 1236 } 1237 1238 return NULL; 1239 } 1240 1241 struct net_bridge_group_src * 1242 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1243 { 1244 struct net_bridge_group_src *grp_src; 1245 1246 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1247 return NULL; 1248 1249 switch (src_ip->proto) { 1250 case htons(ETH_P_IP): 1251 if (ipv4_is_zeronet(src_ip->src.ip4) || 1252 ipv4_is_multicast(src_ip->src.ip4)) 1253 return NULL; 1254 break; 1255 #if IS_ENABLED(CONFIG_IPV6) 1256 case htons(ETH_P_IPV6): 1257 if (ipv6_addr_any(&src_ip->src.ip6) || 1258 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1259 return NULL; 1260 break; 1261 #endif 1262 } 1263 1264 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1265 if (unlikely(!grp_src)) 1266 return NULL; 1267 1268 grp_src->pg = pg; 1269 grp_src->br = pg->key.port->br; 1270 grp_src->addr = *src_ip; 1271 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1272 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1273 1274 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1275 pg->src_ents++; 1276 1277 return grp_src; 1278 } 1279 1280 struct net_bridge_port_group *br_multicast_new_port_group( 1281 struct net_bridge_port *port, 1282 const struct br_ip *group, 1283 struct net_bridge_port_group __rcu *next, 1284 unsigned char flags, 1285 const unsigned char *src, 1286 u8 filter_mode, 1287 u8 rt_protocol) 1288 { 1289 struct net_bridge_port_group *p; 1290 1291 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1292 if (unlikely(!p)) 1293 return NULL; 1294 1295 p->key.addr = *group; 1296 p->key.port = port; 1297 p->flags = flags; 1298 p->filter_mode = filter_mode; 1299 p->rt_protocol = rt_protocol; 1300 p->eht_host_tree = RB_ROOT; 1301 p->eht_set_tree = RB_ROOT; 1302 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1303 INIT_HLIST_HEAD(&p->src_list); 1304 1305 if (!br_multicast_is_star_g(group) && 1306 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1307 br_sg_port_rht_params)) { 1308 kfree(p); 1309 return NULL; 1310 } 1311 1312 rcu_assign_pointer(p->next, next); 1313 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1314 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1315 hlist_add_head(&p->mglist, &port->mglist); 1316 1317 if (src) 1318 memcpy(p->eth_addr, src, ETH_ALEN); 1319 else 1320 eth_broadcast_addr(p->eth_addr); 1321 1322 return p; 1323 } 1324 1325 void br_multicast_host_join(const struct net_bridge_mcast *brmctx, 1326 struct net_bridge_mdb_entry *mp, bool notify) 1327 { 1328 if (!mp->host_joined) { 1329 mp->host_joined = true; 1330 if (br_multicast_is_star_g(&mp->addr)) 1331 br_multicast_star_g_host_state(mp); 1332 if (notify) 1333 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1334 } 1335 1336 if (br_group_is_l2(&mp->addr)) 1337 return; 1338 1339 mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval); 1340 } 1341 1342 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1343 { 1344 if (!mp->host_joined) 1345 return; 1346 1347 mp->host_joined = false; 1348 if (br_multicast_is_star_g(&mp->addr)) 1349 br_multicast_star_g_host_state(mp); 1350 if (notify) 1351 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1352 } 1353 1354 static struct net_bridge_port_group * 1355 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 1356 struct net_bridge_mcast_port *pmctx, 1357 struct br_ip *group, 1358 const unsigned char *src, 1359 u8 filter_mode, 1360 bool igmpv2_mldv1, 1361 bool blocked) 1362 { 1363 struct net_bridge_port_group __rcu **pp; 1364 struct net_bridge_port_group *p = NULL; 1365 struct net_bridge_mdb_entry *mp; 1366 unsigned long now = jiffies; 1367 1368 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 1369 goto out; 1370 1371 mp = br_multicast_new_group(brmctx->br, group); 1372 if (IS_ERR(mp)) 1373 return ERR_CAST(mp); 1374 1375 if (!pmctx) { 1376 br_multicast_host_join(brmctx, mp, true); 1377 goto out; 1378 } 1379 1380 for (pp = &mp->ports; 1381 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 1382 pp = &p->next) { 1383 if (br_port_group_equal(p, pmctx->port, src)) 1384 goto found; 1385 if ((unsigned long)p->key.port < (unsigned long)pmctx->port) 1386 break; 1387 } 1388 1389 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src, 1390 filter_mode, RTPROT_KERNEL); 1391 if (unlikely(!p)) { 1392 p = ERR_PTR(-ENOMEM); 1393 goto out; 1394 } 1395 rcu_assign_pointer(*pp, p); 1396 if (blocked) 1397 p->flags |= MDB_PG_FLAGS_BLOCKED; 1398 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB); 1399 1400 found: 1401 if (igmpv2_mldv1) 1402 mod_timer(&p->timer, 1403 now + brmctx->multicast_membership_interval); 1404 1405 out: 1406 return p; 1407 } 1408 1409 static int br_multicast_add_group(struct net_bridge_mcast *brmctx, 1410 struct net_bridge_mcast_port *pmctx, 1411 struct br_ip *group, 1412 const unsigned char *src, 1413 u8 filter_mode, 1414 bool igmpv2_mldv1) 1415 { 1416 struct net_bridge_port_group *pg; 1417 int err; 1418 1419 spin_lock(&brmctx->br->multicast_lock); 1420 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, 1421 igmpv2_mldv1, false); 1422 /* NULL is considered valid for host joined groups */ 1423 err = PTR_ERR_OR_ZERO(pg); 1424 spin_unlock(&brmctx->br->multicast_lock); 1425 1426 return err; 1427 } 1428 1429 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, 1430 struct net_bridge_mcast_port *pmctx, 1431 __be32 group, 1432 __u16 vid, 1433 const unsigned char *src, 1434 bool igmpv2) 1435 { 1436 struct br_ip br_group; 1437 u8 filter_mode; 1438 1439 if (ipv4_is_local_multicast(group)) 1440 return 0; 1441 1442 memset(&br_group, 0, sizeof(br_group)); 1443 br_group.dst.ip4 = group; 1444 br_group.proto = htons(ETH_P_IP); 1445 br_group.vid = vid; 1446 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1447 1448 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1449 filter_mode, igmpv2); 1450 } 1451 1452 #if IS_ENABLED(CONFIG_IPV6) 1453 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, 1454 struct net_bridge_mcast_port *pmctx, 1455 const struct in6_addr *group, 1456 __u16 vid, 1457 const unsigned char *src, 1458 bool mldv1) 1459 { 1460 struct br_ip br_group; 1461 u8 filter_mode; 1462 1463 if (ipv6_addr_is_ll_all_nodes(group)) 1464 return 0; 1465 1466 memset(&br_group, 0, sizeof(br_group)); 1467 br_group.dst.ip6 = *group; 1468 br_group.proto = htons(ETH_P_IPV6); 1469 br_group.vid = vid; 1470 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1471 1472 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1473 filter_mode, mldv1); 1474 } 1475 #endif 1476 1477 static bool br_multicast_rport_del(struct hlist_node *rlist) 1478 { 1479 if (hlist_unhashed(rlist)) 1480 return false; 1481 1482 hlist_del_init_rcu(rlist); 1483 return true; 1484 } 1485 1486 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1487 { 1488 return br_multicast_rport_del(&pmctx->ip4_rlist); 1489 } 1490 1491 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1492 { 1493 #if IS_ENABLED(CONFIG_IPV6) 1494 return br_multicast_rport_del(&pmctx->ip6_rlist); 1495 #else 1496 return false; 1497 #endif 1498 } 1499 1500 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, 1501 struct timer_list *t, 1502 struct hlist_node *rlist) 1503 { 1504 struct net_bridge *br = pmctx->port->br; 1505 bool del; 1506 1507 spin_lock(&br->multicast_lock); 1508 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1509 pmctx->multicast_router == MDB_RTR_TYPE_PERM || 1510 timer_pending(t)) 1511 goto out; 1512 1513 del = br_multicast_rport_del(rlist); 1514 br_multicast_rport_del_notify(pmctx, del); 1515 out: 1516 spin_unlock(&br->multicast_lock); 1517 } 1518 1519 static void br_ip4_multicast_router_expired(struct timer_list *t) 1520 { 1521 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1522 ip4_mc_router_timer); 1523 1524 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); 1525 } 1526 1527 #if IS_ENABLED(CONFIG_IPV6) 1528 static void br_ip6_multicast_router_expired(struct timer_list *t) 1529 { 1530 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1531 ip6_mc_router_timer); 1532 1533 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); 1534 } 1535 #endif 1536 1537 static void br_mc_router_state_change(struct net_bridge *p, 1538 bool is_mc_router) 1539 { 1540 struct switchdev_attr attr = { 1541 .orig_dev = p->dev, 1542 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1543 .flags = SWITCHDEV_F_DEFER, 1544 .u.mrouter = is_mc_router, 1545 }; 1546 1547 switchdev_port_attr_set(p->dev, &attr, NULL); 1548 } 1549 1550 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, 1551 struct timer_list *timer) 1552 { 1553 spin_lock(&brmctx->br->multicast_lock); 1554 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1555 brmctx->multicast_router == MDB_RTR_TYPE_PERM || 1556 br_ip4_multicast_is_router(brmctx) || 1557 br_ip6_multicast_is_router(brmctx)) 1558 goto out; 1559 1560 br_mc_router_state_change(brmctx->br, false); 1561 out: 1562 spin_unlock(&brmctx->br->multicast_lock); 1563 } 1564 1565 static void br_ip4_multicast_local_router_expired(struct timer_list *t) 1566 { 1567 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1568 ip4_mc_router_timer); 1569 1570 br_multicast_local_router_expired(brmctx, t); 1571 } 1572 1573 #if IS_ENABLED(CONFIG_IPV6) 1574 static void br_ip6_multicast_local_router_expired(struct timer_list *t) 1575 { 1576 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1577 ip6_mc_router_timer); 1578 1579 br_multicast_local_router_expired(brmctx, t); 1580 } 1581 #endif 1582 1583 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, 1584 struct bridge_mcast_own_query *query) 1585 { 1586 spin_lock(&brmctx->br->multicast_lock); 1587 if (!netif_running(brmctx->br->dev) || 1588 br_multicast_ctx_vlan_global_disabled(brmctx) || 1589 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1590 goto out; 1591 1592 br_multicast_start_querier(brmctx, query); 1593 1594 out: 1595 spin_unlock(&brmctx->br->multicast_lock); 1596 } 1597 1598 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1599 { 1600 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1601 ip4_other_query.timer); 1602 1603 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); 1604 } 1605 1606 #if IS_ENABLED(CONFIG_IPV6) 1607 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1608 { 1609 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1610 ip6_other_query.timer); 1611 1612 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); 1613 } 1614 #endif 1615 1616 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, 1617 struct br_ip *ip, 1618 struct sk_buff *skb) 1619 { 1620 if (ip->proto == htons(ETH_P_IP)) 1621 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1622 #if IS_ENABLED(CONFIG_IPV6) 1623 else 1624 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1625 #endif 1626 } 1627 1628 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, 1629 struct net_bridge_mcast_port *pmctx, 1630 struct net_bridge_port_group *pg, 1631 struct br_ip *ip_dst, 1632 struct br_ip *group, 1633 bool with_srcs, 1634 u8 sflag, 1635 bool *need_rexmit) 1636 { 1637 bool over_lmqt = !!sflag; 1638 struct sk_buff *skb; 1639 u8 igmp_type; 1640 1641 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1642 !br_multicast_ctx_matches_vlan_snooping(brmctx)) 1643 return; 1644 1645 again_under_lmqt: 1646 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group, 1647 with_srcs, over_lmqt, sflag, &igmp_type, 1648 need_rexmit); 1649 if (!skb) 1650 return; 1651 1652 if (pmctx) { 1653 skb->dev = pmctx->port->dev; 1654 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type, 1655 BR_MCAST_DIR_TX); 1656 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1657 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev, 1658 br_dev_queue_push_xmit); 1659 1660 if (over_lmqt && with_srcs && sflag) { 1661 over_lmqt = false; 1662 goto again_under_lmqt; 1663 } 1664 } else { 1665 br_multicast_select_own_querier(brmctx, group, skb); 1666 br_multicast_count(brmctx->br, NULL, skb, igmp_type, 1667 BR_MCAST_DIR_RX); 1668 netif_rx(skb); 1669 } 1670 } 1671 1672 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier, 1673 struct bridge_mcast_querier *dest) 1674 { 1675 unsigned int seq; 1676 1677 memset(dest, 0, sizeof(*dest)); 1678 do { 1679 seq = read_seqcount_begin(&querier->seq); 1680 dest->port_ifidx = querier->port_ifidx; 1681 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip)); 1682 } while (read_seqcount_retry(&querier->seq, seq)); 1683 } 1684 1685 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx, 1686 struct bridge_mcast_querier *querier, 1687 int ifindex, 1688 struct br_ip *saddr) 1689 { 1690 write_seqcount_begin(&querier->seq); 1691 querier->port_ifidx = ifindex; 1692 memcpy(&querier->addr, saddr, sizeof(*saddr)); 1693 write_seqcount_end(&querier->seq); 1694 } 1695 1696 static void br_multicast_send_query(struct net_bridge_mcast *brmctx, 1697 struct net_bridge_mcast_port *pmctx, 1698 struct bridge_mcast_own_query *own_query) 1699 { 1700 struct bridge_mcast_other_query *other_query = NULL; 1701 struct bridge_mcast_querier *querier; 1702 struct br_ip br_group; 1703 unsigned long time; 1704 1705 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1706 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 1707 !brmctx->multicast_querier) 1708 return; 1709 1710 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1711 1712 if (pmctx ? (own_query == &pmctx->ip4_own_query) : 1713 (own_query == &brmctx->ip4_own_query)) { 1714 querier = &brmctx->ip4_querier; 1715 other_query = &brmctx->ip4_other_query; 1716 br_group.proto = htons(ETH_P_IP); 1717 #if IS_ENABLED(CONFIG_IPV6) 1718 } else { 1719 querier = &brmctx->ip6_querier; 1720 other_query = &brmctx->ip6_other_query; 1721 br_group.proto = htons(ETH_P_IPV6); 1722 #endif 1723 } 1724 1725 if (!other_query || timer_pending(&other_query->timer)) 1726 return; 1727 1728 /* we're about to select ourselves as querier */ 1729 if (!pmctx && querier->port_ifidx) { 1730 struct br_ip zeroip = {}; 1731 1732 br_multicast_update_querier(brmctx, querier, 0, &zeroip); 1733 } 1734 1735 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false, 1736 0, NULL); 1737 1738 time = jiffies; 1739 time += own_query->startup_sent < brmctx->multicast_startup_query_count ? 1740 brmctx->multicast_startup_query_interval : 1741 brmctx->multicast_query_interval; 1742 mod_timer(&own_query->timer, time); 1743 } 1744 1745 static void 1746 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, 1747 struct bridge_mcast_own_query *query) 1748 { 1749 struct net_bridge *br = pmctx->port->br; 1750 struct net_bridge_mcast *brmctx; 1751 1752 spin_lock(&br->multicast_lock); 1753 if (br_multicast_port_ctx_state_stopped(pmctx)) 1754 goto out; 1755 1756 brmctx = br_multicast_port_ctx_get_global(pmctx); 1757 if (query->startup_sent < brmctx->multicast_startup_query_count) 1758 query->startup_sent++; 1759 1760 br_multicast_send_query(brmctx, pmctx, query); 1761 1762 out: 1763 spin_unlock(&br->multicast_lock); 1764 } 1765 1766 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1767 { 1768 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1769 ip4_own_query.timer); 1770 1771 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); 1772 } 1773 1774 #if IS_ENABLED(CONFIG_IPV6) 1775 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1776 { 1777 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1778 ip6_own_query.timer); 1779 1780 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); 1781 } 1782 #endif 1783 1784 static void br_multicast_port_group_rexmit(struct timer_list *t) 1785 { 1786 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1787 struct bridge_mcast_other_query *other_query = NULL; 1788 struct net_bridge *br = pg->key.port->br; 1789 struct net_bridge_mcast_port *pmctx; 1790 struct net_bridge_mcast *brmctx; 1791 bool need_rexmit = false; 1792 1793 spin_lock(&br->multicast_lock); 1794 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1795 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1796 goto out; 1797 1798 pmctx = br_multicast_pg_to_port_ctx(pg); 1799 if (!pmctx) 1800 goto out; 1801 brmctx = br_multicast_port_ctx_get_global(pmctx); 1802 if (!brmctx->multicast_querier) 1803 goto out; 1804 1805 if (pg->key.addr.proto == htons(ETH_P_IP)) 1806 other_query = &brmctx->ip4_other_query; 1807 #if IS_ENABLED(CONFIG_IPV6) 1808 else 1809 other_query = &brmctx->ip6_other_query; 1810 #endif 1811 1812 if (!other_query || timer_pending(&other_query->timer)) 1813 goto out; 1814 1815 if (pg->grp_query_rexmit_cnt) { 1816 pg->grp_query_rexmit_cnt--; 1817 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1818 &pg->key.addr, false, 1, NULL); 1819 } 1820 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1821 &pg->key.addr, true, 0, &need_rexmit); 1822 1823 if (pg->grp_query_rexmit_cnt || need_rexmit) 1824 mod_timer(&pg->rexmit_timer, jiffies + 1825 brmctx->multicast_last_member_interval); 1826 out: 1827 spin_unlock(&br->multicast_lock); 1828 } 1829 1830 static int br_mc_disabled_update(struct net_device *dev, bool value, 1831 struct netlink_ext_ack *extack) 1832 { 1833 struct switchdev_attr attr = { 1834 .orig_dev = dev, 1835 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1836 .flags = SWITCHDEV_F_DEFER, 1837 .u.mc_disabled = !value, 1838 }; 1839 1840 return switchdev_port_attr_set(dev, &attr, extack); 1841 } 1842 1843 void br_multicast_port_ctx_init(struct net_bridge_port *port, 1844 struct net_bridge_vlan *vlan, 1845 struct net_bridge_mcast_port *pmctx) 1846 { 1847 pmctx->port = port; 1848 pmctx->vlan = vlan; 1849 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1850 timer_setup(&pmctx->ip4_mc_router_timer, 1851 br_ip4_multicast_router_expired, 0); 1852 timer_setup(&pmctx->ip4_own_query.timer, 1853 br_ip4_multicast_port_query_expired, 0); 1854 #if IS_ENABLED(CONFIG_IPV6) 1855 timer_setup(&pmctx->ip6_mc_router_timer, 1856 br_ip6_multicast_router_expired, 0); 1857 timer_setup(&pmctx->ip6_own_query.timer, 1858 br_ip6_multicast_port_query_expired, 0); 1859 #endif 1860 } 1861 1862 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 1863 { 1864 #if IS_ENABLED(CONFIG_IPV6) 1865 del_timer_sync(&pmctx->ip6_mc_router_timer); 1866 #endif 1867 del_timer_sync(&pmctx->ip4_mc_router_timer); 1868 } 1869 1870 int br_multicast_add_port(struct net_bridge_port *port) 1871 { 1872 int err; 1873 1874 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 1875 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx); 1876 1877 err = br_mc_disabled_update(port->dev, 1878 br_opt_get(port->br, 1879 BROPT_MULTICAST_ENABLED), 1880 NULL); 1881 if (err && err != -EOPNOTSUPP) 1882 return err; 1883 1884 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1885 if (!port->mcast_stats) 1886 return -ENOMEM; 1887 1888 return 0; 1889 } 1890 1891 void br_multicast_del_port(struct net_bridge_port *port) 1892 { 1893 struct net_bridge *br = port->br; 1894 struct net_bridge_port_group *pg; 1895 HLIST_HEAD(deleted_head); 1896 struct hlist_node *n; 1897 1898 /* Take care of the remaining groups, only perm ones should be left */ 1899 spin_lock_bh(&br->multicast_lock); 1900 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1901 br_multicast_find_del_pg(br, pg); 1902 hlist_move_list(&br->mcast_gc_list, &deleted_head); 1903 spin_unlock_bh(&br->multicast_lock); 1904 br_multicast_gc(&deleted_head); 1905 br_multicast_port_ctx_deinit(&port->multicast_ctx); 1906 free_percpu(port->mcast_stats); 1907 } 1908 1909 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1910 { 1911 query->startup_sent = 0; 1912 1913 if (try_to_del_timer_sync(&query->timer) >= 0 || 1914 del_timer(&query->timer)) 1915 mod_timer(&query->timer, jiffies); 1916 } 1917 1918 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) 1919 { 1920 struct net_bridge *br = pmctx->port->br; 1921 struct net_bridge_mcast *brmctx; 1922 1923 brmctx = br_multicast_port_ctx_get_global(pmctx); 1924 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1925 !netif_running(br->dev)) 1926 return; 1927 1928 br_multicast_enable(&pmctx->ip4_own_query); 1929 #if IS_ENABLED(CONFIG_IPV6) 1930 br_multicast_enable(&pmctx->ip6_own_query); 1931 #endif 1932 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { 1933 br_ip4_multicast_add_router(brmctx, pmctx); 1934 br_ip6_multicast_add_router(brmctx, pmctx); 1935 } 1936 } 1937 1938 void br_multicast_enable_port(struct net_bridge_port *port) 1939 { 1940 struct net_bridge *br = port->br; 1941 1942 spin_lock_bh(&br->multicast_lock); 1943 __br_multicast_enable_port_ctx(&port->multicast_ctx); 1944 spin_unlock_bh(&br->multicast_lock); 1945 } 1946 1947 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) 1948 { 1949 struct net_bridge_port_group *pg; 1950 struct hlist_node *n; 1951 bool del = false; 1952 1953 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) 1954 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && 1955 (!br_multicast_port_ctx_is_vlan(pmctx) || 1956 pg->key.addr.vid == pmctx->vlan->vid)) 1957 br_multicast_find_del_pg(pmctx->port->br, pg); 1958 1959 del |= br_ip4_multicast_rport_del(pmctx); 1960 del_timer(&pmctx->ip4_mc_router_timer); 1961 del_timer(&pmctx->ip4_own_query.timer); 1962 del |= br_ip6_multicast_rport_del(pmctx); 1963 #if IS_ENABLED(CONFIG_IPV6) 1964 del_timer(&pmctx->ip6_mc_router_timer); 1965 del_timer(&pmctx->ip6_own_query.timer); 1966 #endif 1967 br_multicast_rport_del_notify(pmctx, del); 1968 } 1969 1970 void br_multicast_disable_port(struct net_bridge_port *port) 1971 { 1972 spin_lock_bh(&port->br->multicast_lock); 1973 __br_multicast_disable_port_ctx(&port->multicast_ctx); 1974 spin_unlock_bh(&port->br->multicast_lock); 1975 } 1976 1977 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1978 { 1979 struct net_bridge_group_src *ent; 1980 struct hlist_node *tmp; 1981 int deleted = 0; 1982 1983 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1984 if (ent->flags & BR_SGRP_F_DELETE) { 1985 br_multicast_del_group_src(ent, false); 1986 deleted++; 1987 } 1988 1989 return deleted; 1990 } 1991 1992 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 1993 unsigned long expires) 1994 { 1995 mod_timer(&src->timer, expires); 1996 br_multicast_fwd_src_handle(src); 1997 } 1998 1999 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, 2000 struct net_bridge_mcast_port *pmctx, 2001 struct net_bridge_port_group *pg) 2002 { 2003 struct bridge_mcast_other_query *other_query = NULL; 2004 u32 lmqc = brmctx->multicast_last_member_count; 2005 unsigned long lmqt, lmi, now = jiffies; 2006 struct net_bridge_group_src *ent; 2007 2008 if (!netif_running(brmctx->br->dev) || 2009 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2010 return; 2011 2012 if (pg->key.addr.proto == htons(ETH_P_IP)) 2013 other_query = &brmctx->ip4_other_query; 2014 #if IS_ENABLED(CONFIG_IPV6) 2015 else 2016 other_query = &brmctx->ip6_other_query; 2017 #endif 2018 2019 lmqt = now + br_multicast_lmqt(brmctx); 2020 hlist_for_each_entry(ent, &pg->src_list, node) { 2021 if (ent->flags & BR_SGRP_F_SEND) { 2022 ent->flags &= ~BR_SGRP_F_SEND; 2023 if (ent->timer.expires > lmqt) { 2024 if (brmctx->multicast_querier && 2025 other_query && 2026 !timer_pending(&other_query->timer)) 2027 ent->src_query_rexmit_cnt = lmqc; 2028 __grp_src_mod_timer(ent, lmqt); 2029 } 2030 } 2031 } 2032 2033 if (!brmctx->multicast_querier || 2034 !other_query || timer_pending(&other_query->timer)) 2035 return; 2036 2037 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2038 &pg->key.addr, true, 1, NULL); 2039 2040 lmi = now + brmctx->multicast_last_member_interval; 2041 if (!timer_pending(&pg->rexmit_timer) || 2042 time_after(pg->rexmit_timer.expires, lmi)) 2043 mod_timer(&pg->rexmit_timer, lmi); 2044 } 2045 2046 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, 2047 struct net_bridge_mcast_port *pmctx, 2048 struct net_bridge_port_group *pg) 2049 { 2050 struct bridge_mcast_other_query *other_query = NULL; 2051 unsigned long now = jiffies, lmi; 2052 2053 if (!netif_running(brmctx->br->dev) || 2054 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2055 return; 2056 2057 if (pg->key.addr.proto == htons(ETH_P_IP)) 2058 other_query = &brmctx->ip4_other_query; 2059 #if IS_ENABLED(CONFIG_IPV6) 2060 else 2061 other_query = &brmctx->ip6_other_query; 2062 #endif 2063 2064 if (brmctx->multicast_querier && 2065 other_query && !timer_pending(&other_query->timer)) { 2066 lmi = now + brmctx->multicast_last_member_interval; 2067 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; 2068 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2069 &pg->key.addr, false, 0, NULL); 2070 if (!timer_pending(&pg->rexmit_timer) || 2071 time_after(pg->rexmit_timer.expires, lmi)) 2072 mod_timer(&pg->rexmit_timer, lmi); 2073 } 2074 2075 if (pg->filter_mode == MCAST_EXCLUDE && 2076 (!timer_pending(&pg->timer) || 2077 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) 2078 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx)); 2079 } 2080 2081 /* State Msg type New state Actions 2082 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 2083 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 2084 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 2085 */ 2086 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, 2087 struct net_bridge_port_group *pg, void *h_addr, 2088 void *srcs, u32 nsrcs, size_t addr_size, 2089 int grec_type) 2090 { 2091 struct net_bridge_group_src *ent; 2092 unsigned long now = jiffies; 2093 bool changed = false; 2094 struct br_ip src_ip; 2095 u32 src_idx; 2096 2097 memset(&src_ip, 0, sizeof(src_ip)); 2098 src_ip.proto = pg->key.addr.proto; 2099 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2100 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2101 ent = br_multicast_find_group_src(pg, &src_ip); 2102 if (!ent) { 2103 ent = br_multicast_new_group_src(pg, &src_ip); 2104 if (ent) 2105 changed = true; 2106 } 2107 2108 if (ent) 2109 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2110 } 2111 2112 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2113 grec_type)) 2114 changed = true; 2115 2116 return changed; 2117 } 2118 2119 /* State Msg type New state Actions 2120 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2121 * Delete (A-B) 2122 * Group Timer=GMI 2123 */ 2124 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, 2125 struct net_bridge_port_group *pg, void *h_addr, 2126 void *srcs, u32 nsrcs, size_t addr_size, 2127 int grec_type) 2128 { 2129 struct net_bridge_group_src *ent; 2130 struct br_ip src_ip; 2131 u32 src_idx; 2132 2133 hlist_for_each_entry(ent, &pg->src_list, node) 2134 ent->flags |= BR_SGRP_F_DELETE; 2135 2136 memset(&src_ip, 0, sizeof(src_ip)); 2137 src_ip.proto = pg->key.addr.proto; 2138 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2139 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2140 ent = br_multicast_find_group_src(pg, &src_ip); 2141 if (ent) 2142 ent->flags &= ~BR_SGRP_F_DELETE; 2143 else 2144 ent = br_multicast_new_group_src(pg, &src_ip); 2145 if (ent) 2146 br_multicast_fwd_src_handle(ent); 2147 } 2148 2149 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2150 grec_type); 2151 2152 __grp_src_delete_marked(pg); 2153 } 2154 2155 /* State Msg type New state Actions 2156 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 2157 * Delete (X-A) 2158 * Delete (Y-A) 2159 * Group Timer=GMI 2160 */ 2161 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, 2162 struct net_bridge_port_group *pg, void *h_addr, 2163 void *srcs, u32 nsrcs, size_t addr_size, 2164 int grec_type) 2165 { 2166 struct net_bridge_group_src *ent; 2167 unsigned long now = jiffies; 2168 bool changed = false; 2169 struct br_ip src_ip; 2170 u32 src_idx; 2171 2172 hlist_for_each_entry(ent, &pg->src_list, node) 2173 ent->flags |= BR_SGRP_F_DELETE; 2174 2175 memset(&src_ip, 0, sizeof(src_ip)); 2176 src_ip.proto = pg->key.addr.proto; 2177 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2178 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2179 ent = br_multicast_find_group_src(pg, &src_ip); 2180 if (ent) { 2181 ent->flags &= ~BR_SGRP_F_DELETE; 2182 } else { 2183 ent = br_multicast_new_group_src(pg, &src_ip); 2184 if (ent) { 2185 __grp_src_mod_timer(ent, 2186 now + br_multicast_gmi(brmctx)); 2187 changed = true; 2188 } 2189 } 2190 } 2191 2192 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2193 grec_type)) 2194 changed = true; 2195 2196 if (__grp_src_delete_marked(pg)) 2197 changed = true; 2198 2199 return changed; 2200 } 2201 2202 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, 2203 struct net_bridge_port_group *pg, void *h_addr, 2204 void *srcs, u32 nsrcs, size_t addr_size, 2205 int grec_type) 2206 { 2207 bool changed = false; 2208 2209 switch (pg->filter_mode) { 2210 case MCAST_INCLUDE: 2211 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2212 grec_type); 2213 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2214 changed = true; 2215 break; 2216 case MCAST_EXCLUDE: 2217 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, 2218 addr_size, grec_type); 2219 break; 2220 } 2221 2222 pg->filter_mode = MCAST_EXCLUDE; 2223 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2224 2225 return changed; 2226 } 2227 2228 /* State Msg type New state Actions 2229 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 2230 * Send Q(G,A-B) 2231 */ 2232 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, 2233 struct net_bridge_mcast_port *pmctx, 2234 struct net_bridge_port_group *pg, void *h_addr, 2235 void *srcs, u32 nsrcs, size_t addr_size, 2236 int grec_type) 2237 { 2238 u32 src_idx, to_send = pg->src_ents; 2239 struct net_bridge_group_src *ent; 2240 unsigned long now = jiffies; 2241 bool changed = false; 2242 struct br_ip src_ip; 2243 2244 hlist_for_each_entry(ent, &pg->src_list, node) 2245 ent->flags |= BR_SGRP_F_SEND; 2246 2247 memset(&src_ip, 0, sizeof(src_ip)); 2248 src_ip.proto = pg->key.addr.proto; 2249 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2250 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2251 ent = br_multicast_find_group_src(pg, &src_ip); 2252 if (ent) { 2253 ent->flags &= ~BR_SGRP_F_SEND; 2254 to_send--; 2255 } else { 2256 ent = br_multicast_new_group_src(pg, &src_ip); 2257 if (ent) 2258 changed = true; 2259 } 2260 if (ent) 2261 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2262 } 2263 2264 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2265 grec_type)) 2266 changed = true; 2267 2268 if (to_send) 2269 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2270 2271 return changed; 2272 } 2273 2274 /* State Msg type New state Actions 2275 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 2276 * Send Q(G,X-A) 2277 * Send Q(G) 2278 */ 2279 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, 2280 struct net_bridge_mcast_port *pmctx, 2281 struct net_bridge_port_group *pg, void *h_addr, 2282 void *srcs, u32 nsrcs, size_t addr_size, 2283 int grec_type) 2284 { 2285 u32 src_idx, to_send = pg->src_ents; 2286 struct net_bridge_group_src *ent; 2287 unsigned long now = jiffies; 2288 bool changed = false; 2289 struct br_ip src_ip; 2290 2291 hlist_for_each_entry(ent, &pg->src_list, node) 2292 if (timer_pending(&ent->timer)) 2293 ent->flags |= BR_SGRP_F_SEND; 2294 2295 memset(&src_ip, 0, sizeof(src_ip)); 2296 src_ip.proto = pg->key.addr.proto; 2297 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2298 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2299 ent = br_multicast_find_group_src(pg, &src_ip); 2300 if (ent) { 2301 if (timer_pending(&ent->timer)) { 2302 ent->flags &= ~BR_SGRP_F_SEND; 2303 to_send--; 2304 } 2305 } else { 2306 ent = br_multicast_new_group_src(pg, &src_ip); 2307 if (ent) 2308 changed = true; 2309 } 2310 if (ent) 2311 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2312 } 2313 2314 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2315 grec_type)) 2316 changed = true; 2317 2318 if (to_send) 2319 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2320 2321 __grp_send_query_and_rexmit(brmctx, pmctx, pg); 2322 2323 return changed; 2324 } 2325 2326 static bool br_multicast_toin(struct net_bridge_mcast *brmctx, 2327 struct net_bridge_mcast_port *pmctx, 2328 struct net_bridge_port_group *pg, void *h_addr, 2329 void *srcs, u32 nsrcs, size_t addr_size, 2330 int grec_type) 2331 { 2332 bool changed = false; 2333 2334 switch (pg->filter_mode) { 2335 case MCAST_INCLUDE: 2336 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, 2337 nsrcs, addr_size, grec_type); 2338 break; 2339 case MCAST_EXCLUDE: 2340 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, 2341 nsrcs, addr_size, grec_type); 2342 break; 2343 } 2344 2345 if (br_multicast_eht_should_del_pg(pg)) { 2346 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2347 br_multicast_find_del_pg(pg->key.port->br, pg); 2348 /* a notification has already been sent and we shouldn't 2349 * access pg after the delete so we have to return false 2350 */ 2351 changed = false; 2352 } 2353 2354 return changed; 2355 } 2356 2357 /* State Msg type New state Actions 2358 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2359 * Delete (A-B) 2360 * Send Q(G,A*B) 2361 * Group Timer=GMI 2362 */ 2363 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, 2364 struct net_bridge_mcast_port *pmctx, 2365 struct net_bridge_port_group *pg, void *h_addr, 2366 void *srcs, u32 nsrcs, size_t addr_size, 2367 int grec_type) 2368 { 2369 struct net_bridge_group_src *ent; 2370 u32 src_idx, to_send = 0; 2371 struct br_ip src_ip; 2372 2373 hlist_for_each_entry(ent, &pg->src_list, node) 2374 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2375 2376 memset(&src_ip, 0, sizeof(src_ip)); 2377 src_ip.proto = pg->key.addr.proto; 2378 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2379 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2380 ent = br_multicast_find_group_src(pg, &src_ip); 2381 if (ent) { 2382 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2383 BR_SGRP_F_SEND; 2384 to_send++; 2385 } else { 2386 ent = br_multicast_new_group_src(pg, &src_ip); 2387 } 2388 if (ent) 2389 br_multicast_fwd_src_handle(ent); 2390 } 2391 2392 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2393 grec_type); 2394 2395 __grp_src_delete_marked(pg); 2396 if (to_send) 2397 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2398 } 2399 2400 /* State Msg type New state Actions 2401 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2402 * Delete (X-A) 2403 * Delete (Y-A) 2404 * Send Q(G,A-Y) 2405 * Group Timer=GMI 2406 */ 2407 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, 2408 struct net_bridge_mcast_port *pmctx, 2409 struct net_bridge_port_group *pg, void *h_addr, 2410 void *srcs, u32 nsrcs, size_t addr_size, 2411 int grec_type) 2412 { 2413 struct net_bridge_group_src *ent; 2414 u32 src_idx, to_send = 0; 2415 bool changed = false; 2416 struct br_ip src_ip; 2417 2418 hlist_for_each_entry(ent, &pg->src_list, node) 2419 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2420 2421 memset(&src_ip, 0, sizeof(src_ip)); 2422 src_ip.proto = pg->key.addr.proto; 2423 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2424 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2425 ent = br_multicast_find_group_src(pg, &src_ip); 2426 if (ent) { 2427 ent->flags &= ~BR_SGRP_F_DELETE; 2428 } else { 2429 ent = br_multicast_new_group_src(pg, &src_ip); 2430 if (ent) { 2431 __grp_src_mod_timer(ent, pg->timer.expires); 2432 changed = true; 2433 } 2434 } 2435 if (ent && timer_pending(&ent->timer)) { 2436 ent->flags |= BR_SGRP_F_SEND; 2437 to_send++; 2438 } 2439 } 2440 2441 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2442 grec_type)) 2443 changed = true; 2444 2445 if (__grp_src_delete_marked(pg)) 2446 changed = true; 2447 if (to_send) 2448 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2449 2450 return changed; 2451 } 2452 2453 static bool br_multicast_toex(struct net_bridge_mcast *brmctx, 2454 struct net_bridge_mcast_port *pmctx, 2455 struct net_bridge_port_group *pg, void *h_addr, 2456 void *srcs, u32 nsrcs, size_t addr_size, 2457 int grec_type) 2458 { 2459 bool changed = false; 2460 2461 switch (pg->filter_mode) { 2462 case MCAST_INCLUDE: 2463 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, 2464 addr_size, grec_type); 2465 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2466 changed = true; 2467 break; 2468 case MCAST_EXCLUDE: 2469 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, 2470 nsrcs, addr_size, grec_type); 2471 break; 2472 } 2473 2474 pg->filter_mode = MCAST_EXCLUDE; 2475 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2476 2477 return changed; 2478 } 2479 2480 /* State Msg type New state Actions 2481 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2482 */ 2483 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, 2484 struct net_bridge_mcast_port *pmctx, 2485 struct net_bridge_port_group *pg, void *h_addr, 2486 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2487 { 2488 struct net_bridge_group_src *ent; 2489 u32 src_idx, to_send = 0; 2490 bool changed = false; 2491 struct br_ip src_ip; 2492 2493 hlist_for_each_entry(ent, &pg->src_list, node) 2494 ent->flags &= ~BR_SGRP_F_SEND; 2495 2496 memset(&src_ip, 0, sizeof(src_ip)); 2497 src_ip.proto = pg->key.addr.proto; 2498 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2499 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2500 ent = br_multicast_find_group_src(pg, &src_ip); 2501 if (ent) { 2502 ent->flags |= BR_SGRP_F_SEND; 2503 to_send++; 2504 } 2505 } 2506 2507 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2508 grec_type)) 2509 changed = true; 2510 2511 if (to_send) 2512 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2513 2514 return changed; 2515 } 2516 2517 /* State Msg type New state Actions 2518 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2519 * Send Q(G,A-Y) 2520 */ 2521 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, 2522 struct net_bridge_mcast_port *pmctx, 2523 struct net_bridge_port_group *pg, void *h_addr, 2524 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2525 { 2526 struct net_bridge_group_src *ent; 2527 u32 src_idx, to_send = 0; 2528 bool changed = false; 2529 struct br_ip src_ip; 2530 2531 hlist_for_each_entry(ent, &pg->src_list, node) 2532 ent->flags &= ~BR_SGRP_F_SEND; 2533 2534 memset(&src_ip, 0, sizeof(src_ip)); 2535 src_ip.proto = pg->key.addr.proto; 2536 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2537 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2538 ent = br_multicast_find_group_src(pg, &src_ip); 2539 if (!ent) { 2540 ent = br_multicast_new_group_src(pg, &src_ip); 2541 if (ent) { 2542 __grp_src_mod_timer(ent, pg->timer.expires); 2543 changed = true; 2544 } 2545 } 2546 if (ent && timer_pending(&ent->timer)) { 2547 ent->flags |= BR_SGRP_F_SEND; 2548 to_send++; 2549 } 2550 } 2551 2552 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2553 grec_type)) 2554 changed = true; 2555 2556 if (to_send) 2557 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2558 2559 return changed; 2560 } 2561 2562 static bool br_multicast_block(struct net_bridge_mcast *brmctx, 2563 struct net_bridge_mcast_port *pmctx, 2564 struct net_bridge_port_group *pg, void *h_addr, 2565 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2566 { 2567 bool changed = false; 2568 2569 switch (pg->filter_mode) { 2570 case MCAST_INCLUDE: 2571 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, 2572 nsrcs, addr_size, grec_type); 2573 break; 2574 case MCAST_EXCLUDE: 2575 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, 2576 nsrcs, addr_size, grec_type); 2577 break; 2578 } 2579 2580 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2581 br_multicast_eht_should_del_pg(pg)) { 2582 if (br_multicast_eht_should_del_pg(pg)) 2583 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2584 br_multicast_find_del_pg(pg->key.port->br, pg); 2585 /* a notification has already been sent and we shouldn't 2586 * access pg after the delete so we have to return false 2587 */ 2588 changed = false; 2589 } 2590 2591 return changed; 2592 } 2593 2594 static struct net_bridge_port_group * 2595 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2596 struct net_bridge_port *p, 2597 const unsigned char *src) 2598 { 2599 struct net_bridge *br __maybe_unused = mp->br; 2600 struct net_bridge_port_group *pg; 2601 2602 for (pg = mlock_dereference(mp->ports, br); 2603 pg; 2604 pg = mlock_dereference(pg->next, br)) 2605 if (br_port_group_equal(pg, p, src)) 2606 return pg; 2607 2608 return NULL; 2609 } 2610 2611 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, 2612 struct net_bridge_mcast_port *pmctx, 2613 struct sk_buff *skb, 2614 u16 vid) 2615 { 2616 bool igmpv2 = brmctx->multicast_igmp_version == 2; 2617 struct net_bridge_mdb_entry *mdst; 2618 struct net_bridge_port_group *pg; 2619 const unsigned char *src; 2620 struct igmpv3_report *ih; 2621 struct igmpv3_grec *grec; 2622 int i, len, num, type; 2623 __be32 group, *h_addr; 2624 bool changed = false; 2625 int err = 0; 2626 u16 nsrcs; 2627 2628 ih = igmpv3_report_hdr(skb); 2629 num = ntohs(ih->ngrec); 2630 len = skb_transport_offset(skb) + sizeof(*ih); 2631 2632 for (i = 0; i < num; i++) { 2633 len += sizeof(*grec); 2634 if (!ip_mc_may_pull(skb, len)) 2635 return -EINVAL; 2636 2637 grec = (void *)(skb->data + len - sizeof(*grec)); 2638 group = grec->grec_mca; 2639 type = grec->grec_type; 2640 nsrcs = ntohs(grec->grec_nsrcs); 2641 2642 len += nsrcs * 4; 2643 if (!ip_mc_may_pull(skb, len)) 2644 return -EINVAL; 2645 2646 switch (type) { 2647 case IGMPV3_MODE_IS_INCLUDE: 2648 case IGMPV3_MODE_IS_EXCLUDE: 2649 case IGMPV3_CHANGE_TO_INCLUDE: 2650 case IGMPV3_CHANGE_TO_EXCLUDE: 2651 case IGMPV3_ALLOW_NEW_SOURCES: 2652 case IGMPV3_BLOCK_OLD_SOURCES: 2653 break; 2654 2655 default: 2656 continue; 2657 } 2658 2659 src = eth_hdr(skb)->h_source; 2660 if (nsrcs == 0 && 2661 (type == IGMPV3_CHANGE_TO_INCLUDE || 2662 type == IGMPV3_MODE_IS_INCLUDE)) { 2663 if (!pmctx || igmpv2) { 2664 br_ip4_multicast_leave_group(brmctx, pmctx, 2665 group, vid, src); 2666 continue; 2667 } 2668 } else { 2669 err = br_ip4_multicast_add_group(brmctx, pmctx, group, 2670 vid, src, igmpv2); 2671 if (err) 2672 break; 2673 } 2674 2675 if (!pmctx || igmpv2) 2676 continue; 2677 2678 spin_lock(&brmctx->br->multicast_lock); 2679 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2680 goto unlock_continue; 2681 2682 mdst = br_mdb_ip4_get(brmctx->br, group, vid); 2683 if (!mdst) 2684 goto unlock_continue; 2685 pg = br_multicast_find_port(mdst, pmctx->port, src); 2686 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2687 goto unlock_continue; 2688 /* reload grec and host addr */ 2689 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2690 h_addr = &ip_hdr(skb)->saddr; 2691 switch (type) { 2692 case IGMPV3_ALLOW_NEW_SOURCES: 2693 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2694 grec->grec_src, 2695 nsrcs, sizeof(__be32), type); 2696 break; 2697 case IGMPV3_MODE_IS_INCLUDE: 2698 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2699 grec->grec_src, 2700 nsrcs, sizeof(__be32), type); 2701 break; 2702 case IGMPV3_MODE_IS_EXCLUDE: 2703 changed = br_multicast_isexc(brmctx, pg, h_addr, 2704 grec->grec_src, 2705 nsrcs, sizeof(__be32), type); 2706 break; 2707 case IGMPV3_CHANGE_TO_INCLUDE: 2708 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2709 grec->grec_src, 2710 nsrcs, sizeof(__be32), type); 2711 break; 2712 case IGMPV3_CHANGE_TO_EXCLUDE: 2713 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2714 grec->grec_src, 2715 nsrcs, sizeof(__be32), type); 2716 break; 2717 case IGMPV3_BLOCK_OLD_SOURCES: 2718 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2719 grec->grec_src, 2720 nsrcs, sizeof(__be32), type); 2721 break; 2722 } 2723 if (changed) 2724 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2725 unlock_continue: 2726 spin_unlock(&brmctx->br->multicast_lock); 2727 } 2728 2729 return err; 2730 } 2731 2732 #if IS_ENABLED(CONFIG_IPV6) 2733 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, 2734 struct net_bridge_mcast_port *pmctx, 2735 struct sk_buff *skb, 2736 u16 vid) 2737 { 2738 bool mldv1 = brmctx->multicast_mld_version == 1; 2739 struct net_bridge_mdb_entry *mdst; 2740 struct net_bridge_port_group *pg; 2741 unsigned int nsrcs_offset; 2742 struct mld2_report *mld2r; 2743 const unsigned char *src; 2744 struct in6_addr *h_addr; 2745 struct mld2_grec *grec; 2746 unsigned int grec_len; 2747 bool changed = false; 2748 int i, len, num; 2749 int err = 0; 2750 2751 if (!ipv6_mc_may_pull(skb, sizeof(*mld2r))) 2752 return -EINVAL; 2753 2754 mld2r = (struct mld2_report *)icmp6_hdr(skb); 2755 num = ntohs(mld2r->mld2r_ngrec); 2756 len = skb_transport_offset(skb) + sizeof(*mld2r); 2757 2758 for (i = 0; i < num; i++) { 2759 __be16 *_nsrcs, __nsrcs; 2760 u16 nsrcs; 2761 2762 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2763 2764 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2765 nsrcs_offset + sizeof(__nsrcs)) 2766 return -EINVAL; 2767 2768 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2769 sizeof(__nsrcs), &__nsrcs); 2770 if (!_nsrcs) 2771 return -EINVAL; 2772 2773 nsrcs = ntohs(*_nsrcs); 2774 grec_len = struct_size(grec, grec_src, nsrcs); 2775 2776 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2777 return -EINVAL; 2778 2779 grec = (struct mld2_grec *)(skb->data + len); 2780 len += grec_len; 2781 2782 switch (grec->grec_type) { 2783 case MLD2_MODE_IS_INCLUDE: 2784 case MLD2_MODE_IS_EXCLUDE: 2785 case MLD2_CHANGE_TO_INCLUDE: 2786 case MLD2_CHANGE_TO_EXCLUDE: 2787 case MLD2_ALLOW_NEW_SOURCES: 2788 case MLD2_BLOCK_OLD_SOURCES: 2789 break; 2790 2791 default: 2792 continue; 2793 } 2794 2795 src = eth_hdr(skb)->h_source; 2796 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2797 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2798 nsrcs == 0) { 2799 if (!pmctx || mldv1) { 2800 br_ip6_multicast_leave_group(brmctx, pmctx, 2801 &grec->grec_mca, 2802 vid, src); 2803 continue; 2804 } 2805 } else { 2806 err = br_ip6_multicast_add_group(brmctx, pmctx, 2807 &grec->grec_mca, vid, 2808 src, mldv1); 2809 if (err) 2810 break; 2811 } 2812 2813 if (!pmctx || mldv1) 2814 continue; 2815 2816 spin_lock(&brmctx->br->multicast_lock); 2817 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2818 goto unlock_continue; 2819 2820 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid); 2821 if (!mdst) 2822 goto unlock_continue; 2823 pg = br_multicast_find_port(mdst, pmctx->port, src); 2824 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2825 goto unlock_continue; 2826 h_addr = &ipv6_hdr(skb)->saddr; 2827 switch (grec->grec_type) { 2828 case MLD2_ALLOW_NEW_SOURCES: 2829 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2830 grec->grec_src, nsrcs, 2831 sizeof(struct in6_addr), 2832 grec->grec_type); 2833 break; 2834 case MLD2_MODE_IS_INCLUDE: 2835 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2836 grec->grec_src, nsrcs, 2837 sizeof(struct in6_addr), 2838 grec->grec_type); 2839 break; 2840 case MLD2_MODE_IS_EXCLUDE: 2841 changed = br_multicast_isexc(brmctx, pg, h_addr, 2842 grec->grec_src, nsrcs, 2843 sizeof(struct in6_addr), 2844 grec->grec_type); 2845 break; 2846 case MLD2_CHANGE_TO_INCLUDE: 2847 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2848 grec->grec_src, nsrcs, 2849 sizeof(struct in6_addr), 2850 grec->grec_type); 2851 break; 2852 case MLD2_CHANGE_TO_EXCLUDE: 2853 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2854 grec->grec_src, nsrcs, 2855 sizeof(struct in6_addr), 2856 grec->grec_type); 2857 break; 2858 case MLD2_BLOCK_OLD_SOURCES: 2859 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2860 grec->grec_src, nsrcs, 2861 sizeof(struct in6_addr), 2862 grec->grec_type); 2863 break; 2864 } 2865 if (changed) 2866 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2867 unlock_continue: 2868 spin_unlock(&brmctx->br->multicast_lock); 2869 } 2870 2871 return err; 2872 } 2873 #endif 2874 2875 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx, 2876 struct net_bridge_mcast_port *pmctx, 2877 struct br_ip *saddr) 2878 { 2879 int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0; 2880 struct timer_list *own_timer, *other_timer; 2881 struct bridge_mcast_querier *querier; 2882 2883 switch (saddr->proto) { 2884 case htons(ETH_P_IP): 2885 querier = &brmctx->ip4_querier; 2886 own_timer = &brmctx->ip4_own_query.timer; 2887 other_timer = &brmctx->ip4_other_query.timer; 2888 if (!querier->addr.src.ip4 || 2889 ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4)) 2890 goto update; 2891 break; 2892 #if IS_ENABLED(CONFIG_IPV6) 2893 case htons(ETH_P_IPV6): 2894 querier = &brmctx->ip6_querier; 2895 own_timer = &brmctx->ip6_own_query.timer; 2896 other_timer = &brmctx->ip6_other_query.timer; 2897 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0) 2898 goto update; 2899 break; 2900 #endif 2901 default: 2902 return false; 2903 } 2904 2905 if (!timer_pending(own_timer) && !timer_pending(other_timer)) 2906 goto update; 2907 2908 return false; 2909 2910 update: 2911 br_multicast_update_querier(brmctx, querier, port_ifidx, saddr); 2912 2913 return true; 2914 } 2915 2916 static struct net_bridge_port * 2917 __br_multicast_get_querier_port(struct net_bridge *br, 2918 const struct bridge_mcast_querier *querier) 2919 { 2920 int port_ifidx = READ_ONCE(querier->port_ifidx); 2921 struct net_bridge_port *p; 2922 struct net_device *dev; 2923 2924 if (port_ifidx == 0) 2925 return NULL; 2926 2927 dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx); 2928 if (!dev) 2929 return NULL; 2930 p = br_port_get_rtnl_rcu(dev); 2931 if (!p || p->br != br) 2932 return NULL; 2933 2934 return p; 2935 } 2936 2937 size_t br_multicast_querier_state_size(void) 2938 { 2939 return nla_total_size(0) + /* nest attribute */ 2940 nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */ 2941 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */ 2942 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */ 2943 #if IS_ENABLED(CONFIG_IPV6) 2944 nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */ 2945 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */ 2946 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */ 2947 #endif 2948 0; 2949 } 2950 2951 /* protected by rtnl or rcu */ 2952 int br_multicast_dump_querier_state(struct sk_buff *skb, 2953 const struct net_bridge_mcast *brmctx, 2954 int nest_attr) 2955 { 2956 struct bridge_mcast_querier querier = {}; 2957 struct net_bridge_port *p; 2958 struct nlattr *nest; 2959 2960 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 2961 br_multicast_ctx_vlan_global_disabled(brmctx)) 2962 return 0; 2963 2964 nest = nla_nest_start(skb, nest_attr); 2965 if (!nest) 2966 return -EMSGSIZE; 2967 2968 rcu_read_lock(); 2969 if (!brmctx->multicast_querier && 2970 !timer_pending(&brmctx->ip4_other_query.timer)) 2971 goto out_v6; 2972 2973 br_multicast_read_querier(&brmctx->ip4_querier, &querier); 2974 if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS, 2975 querier.addr.src.ip4)) { 2976 rcu_read_unlock(); 2977 goto out_err; 2978 } 2979 2980 p = __br_multicast_get_querier_port(brmctx->br, &querier); 2981 if (timer_pending(&brmctx->ip4_other_query.timer) && 2982 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER, 2983 br_timer_value(&brmctx->ip4_other_query.timer), 2984 BRIDGE_QUERIER_PAD) || 2985 (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) { 2986 rcu_read_unlock(); 2987 goto out_err; 2988 } 2989 2990 out_v6: 2991 #if IS_ENABLED(CONFIG_IPV6) 2992 if (!brmctx->multicast_querier && 2993 !timer_pending(&brmctx->ip6_other_query.timer)) 2994 goto out; 2995 2996 br_multicast_read_querier(&brmctx->ip6_querier, &querier); 2997 if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS, 2998 &querier.addr.src.ip6)) { 2999 rcu_read_unlock(); 3000 goto out_err; 3001 } 3002 3003 p = __br_multicast_get_querier_port(brmctx->br, &querier); 3004 if (timer_pending(&brmctx->ip6_other_query.timer) && 3005 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER, 3006 br_timer_value(&brmctx->ip6_other_query.timer), 3007 BRIDGE_QUERIER_PAD) || 3008 (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT, 3009 p->dev->ifindex)))) { 3010 rcu_read_unlock(); 3011 goto out_err; 3012 } 3013 out: 3014 #endif 3015 rcu_read_unlock(); 3016 nla_nest_end(skb, nest); 3017 if (!nla_len(nest)) 3018 nla_nest_cancel(skb, nest); 3019 3020 return 0; 3021 3022 out_err: 3023 nla_nest_cancel(skb, nest); 3024 return -EMSGSIZE; 3025 } 3026 3027 static void 3028 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, 3029 struct bridge_mcast_other_query *query, 3030 unsigned long max_delay) 3031 { 3032 if (!timer_pending(&query->timer)) 3033 query->delay_time = jiffies + max_delay; 3034 3035 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); 3036 } 3037 3038 static void br_port_mc_router_state_change(struct net_bridge_port *p, 3039 bool is_mc_router) 3040 { 3041 struct switchdev_attr attr = { 3042 .orig_dev = p->dev, 3043 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 3044 .flags = SWITCHDEV_F_DEFER, 3045 .u.mrouter = is_mc_router, 3046 }; 3047 3048 switchdev_port_attr_set(p->dev, &attr, NULL); 3049 } 3050 3051 static struct net_bridge_port * 3052 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, 3053 struct hlist_head *mc_router_list, 3054 struct hlist_node *rlist) 3055 { 3056 struct net_bridge_mcast_port *pmctx; 3057 3058 #if IS_ENABLED(CONFIG_IPV6) 3059 if (mc_router_list == &brmctx->ip6_mc_router_list) 3060 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3061 ip6_rlist); 3062 else 3063 #endif 3064 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3065 ip4_rlist); 3066 3067 return pmctx->port; 3068 } 3069 3070 static struct hlist_node * 3071 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, 3072 struct net_bridge_port *port, 3073 struct hlist_head *mc_router_list) 3074 3075 { 3076 struct hlist_node *slot = NULL; 3077 struct net_bridge_port *p; 3078 struct hlist_node *rlist; 3079 3080 hlist_for_each(rlist, mc_router_list) { 3081 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); 3082 3083 if ((unsigned long)port >= (unsigned long)p) 3084 break; 3085 3086 slot = rlist; 3087 } 3088 3089 return slot; 3090 } 3091 3092 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, 3093 struct hlist_node *rnode) 3094 { 3095 #if IS_ENABLED(CONFIG_IPV6) 3096 if (rnode != &pmctx->ip6_rlist) 3097 return hlist_unhashed(&pmctx->ip6_rlist); 3098 else 3099 return hlist_unhashed(&pmctx->ip4_rlist); 3100 #else 3101 return true; 3102 #endif 3103 } 3104 3105 /* Add port to router_list 3106 * list is maintained ordered by pointer value 3107 * and locked by br->multicast_lock and RCU 3108 */ 3109 static void br_multicast_add_router(struct net_bridge_mcast *brmctx, 3110 struct net_bridge_mcast_port *pmctx, 3111 struct hlist_node *rlist, 3112 struct hlist_head *mc_router_list) 3113 { 3114 struct hlist_node *slot; 3115 3116 if (!hlist_unhashed(rlist)) 3117 return; 3118 3119 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list); 3120 3121 if (slot) 3122 hlist_add_behind_rcu(rlist, slot); 3123 else 3124 hlist_add_head_rcu(rlist, mc_router_list); 3125 3126 /* For backwards compatibility for now, only notify if we 3127 * switched from no IPv4/IPv6 multicast router to a new 3128 * IPv4 or IPv6 multicast router. 3129 */ 3130 if (br_multicast_no_router_otherpf(pmctx, rlist)) { 3131 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB); 3132 br_port_mc_router_state_change(pmctx->port, true); 3133 } 3134 } 3135 3136 /* Add port to router_list 3137 * list is maintained ordered by pointer value 3138 * and locked by br->multicast_lock and RCU 3139 */ 3140 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 3141 struct net_bridge_mcast_port *pmctx) 3142 { 3143 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist, 3144 &brmctx->ip4_mc_router_list); 3145 } 3146 3147 /* Add port to router_list 3148 * list is maintained ordered by pointer value 3149 * and locked by br->multicast_lock and RCU 3150 */ 3151 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 3152 struct net_bridge_mcast_port *pmctx) 3153 { 3154 #if IS_ENABLED(CONFIG_IPV6) 3155 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist, 3156 &brmctx->ip6_mc_router_list); 3157 #endif 3158 } 3159 3160 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, 3161 struct net_bridge_mcast_port *pmctx, 3162 struct timer_list *timer, 3163 struct hlist_node *rlist, 3164 struct hlist_head *mc_router_list) 3165 { 3166 unsigned long now = jiffies; 3167 3168 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3169 return; 3170 3171 if (!pmctx) { 3172 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 3173 if (!br_ip4_multicast_is_router(brmctx) && 3174 !br_ip6_multicast_is_router(brmctx)) 3175 br_mc_router_state_change(brmctx->br, true); 3176 mod_timer(timer, now + brmctx->multicast_querier_interval); 3177 } 3178 return; 3179 } 3180 3181 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 3182 pmctx->multicast_router == MDB_RTR_TYPE_PERM) 3183 return; 3184 3185 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); 3186 mod_timer(timer, now + brmctx->multicast_querier_interval); 3187 } 3188 3189 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, 3190 struct net_bridge_mcast_port *pmctx) 3191 { 3192 struct timer_list *timer = &brmctx->ip4_mc_router_timer; 3193 struct hlist_node *rlist = NULL; 3194 3195 if (pmctx) { 3196 timer = &pmctx->ip4_mc_router_timer; 3197 rlist = &pmctx->ip4_rlist; 3198 } 3199 3200 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3201 &brmctx->ip4_mc_router_list); 3202 } 3203 3204 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, 3205 struct net_bridge_mcast_port *pmctx) 3206 { 3207 #if IS_ENABLED(CONFIG_IPV6) 3208 struct timer_list *timer = &brmctx->ip6_mc_router_timer; 3209 struct hlist_node *rlist = NULL; 3210 3211 if (pmctx) { 3212 timer = &pmctx->ip6_mc_router_timer; 3213 rlist = &pmctx->ip6_rlist; 3214 } 3215 3216 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3217 &brmctx->ip6_mc_router_list); 3218 #endif 3219 } 3220 3221 static void 3222 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, 3223 struct net_bridge_mcast_port *pmctx, 3224 struct bridge_mcast_other_query *query, 3225 struct br_ip *saddr, 3226 unsigned long max_delay) 3227 { 3228 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3229 return; 3230 3231 br_multicast_update_query_timer(brmctx, query, max_delay); 3232 br_ip4_multicast_mark_router(brmctx, pmctx); 3233 } 3234 3235 #if IS_ENABLED(CONFIG_IPV6) 3236 static void 3237 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, 3238 struct net_bridge_mcast_port *pmctx, 3239 struct bridge_mcast_other_query *query, 3240 struct br_ip *saddr, 3241 unsigned long max_delay) 3242 { 3243 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3244 return; 3245 3246 br_multicast_update_query_timer(brmctx, query, max_delay); 3247 br_ip6_multicast_mark_router(brmctx, pmctx); 3248 } 3249 #endif 3250 3251 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, 3252 struct net_bridge_mcast_port *pmctx, 3253 struct sk_buff *skb, 3254 u16 vid) 3255 { 3256 unsigned int transport_len = ip_transport_len(skb); 3257 const struct iphdr *iph = ip_hdr(skb); 3258 struct igmphdr *ih = igmp_hdr(skb); 3259 struct net_bridge_mdb_entry *mp; 3260 struct igmpv3_query *ih3; 3261 struct net_bridge_port_group *p; 3262 struct net_bridge_port_group __rcu **pp; 3263 struct br_ip saddr = {}; 3264 unsigned long max_delay; 3265 unsigned long now = jiffies; 3266 __be32 group; 3267 3268 spin_lock(&brmctx->br->multicast_lock); 3269 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3270 goto out; 3271 3272 group = ih->group; 3273 3274 if (transport_len == sizeof(*ih)) { 3275 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 3276 3277 if (!max_delay) { 3278 max_delay = 10 * HZ; 3279 group = 0; 3280 } 3281 } else if (transport_len >= sizeof(*ih3)) { 3282 ih3 = igmpv3_query_hdr(skb); 3283 if (ih3->nsrcs || 3284 (brmctx->multicast_igmp_version == 3 && group && 3285 ih3->suppress)) 3286 goto out; 3287 3288 max_delay = ih3->code ? 3289 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 3290 } else { 3291 goto out; 3292 } 3293 3294 if (!group) { 3295 saddr.proto = htons(ETH_P_IP); 3296 saddr.src.ip4 = iph->saddr; 3297 3298 br_ip4_multicast_query_received(brmctx, pmctx, 3299 &brmctx->ip4_other_query, 3300 &saddr, max_delay); 3301 goto out; 3302 } 3303 3304 mp = br_mdb_ip4_get(brmctx->br, group, vid); 3305 if (!mp) 3306 goto out; 3307 3308 max_delay *= brmctx->multicast_last_member_count; 3309 3310 if (mp->host_joined && 3311 (timer_pending(&mp->timer) ? 3312 time_after(mp->timer.expires, now + max_delay) : 3313 try_to_del_timer_sync(&mp->timer) >= 0)) 3314 mod_timer(&mp->timer, now + max_delay); 3315 3316 for (pp = &mp->ports; 3317 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3318 pp = &p->next) { 3319 if (timer_pending(&p->timer) ? 3320 time_after(p->timer.expires, now + max_delay) : 3321 try_to_del_timer_sync(&p->timer) >= 0 && 3322 (brmctx->multicast_igmp_version == 2 || 3323 p->filter_mode == MCAST_EXCLUDE)) 3324 mod_timer(&p->timer, now + max_delay); 3325 } 3326 3327 out: 3328 spin_unlock(&brmctx->br->multicast_lock); 3329 } 3330 3331 #if IS_ENABLED(CONFIG_IPV6) 3332 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, 3333 struct net_bridge_mcast_port *pmctx, 3334 struct sk_buff *skb, 3335 u16 vid) 3336 { 3337 unsigned int transport_len = ipv6_transport_len(skb); 3338 struct mld_msg *mld; 3339 struct net_bridge_mdb_entry *mp; 3340 struct mld2_query *mld2q; 3341 struct net_bridge_port_group *p; 3342 struct net_bridge_port_group __rcu **pp; 3343 struct br_ip saddr = {}; 3344 unsigned long max_delay; 3345 unsigned long now = jiffies; 3346 unsigned int offset = skb_transport_offset(skb); 3347 const struct in6_addr *group = NULL; 3348 bool is_general_query; 3349 int err = 0; 3350 3351 spin_lock(&brmctx->br->multicast_lock); 3352 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3353 goto out; 3354 3355 if (transport_len == sizeof(*mld)) { 3356 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 3357 err = -EINVAL; 3358 goto out; 3359 } 3360 mld = (struct mld_msg *) icmp6_hdr(skb); 3361 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 3362 if (max_delay) 3363 group = &mld->mld_mca; 3364 } else { 3365 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 3366 err = -EINVAL; 3367 goto out; 3368 } 3369 mld2q = (struct mld2_query *)icmp6_hdr(skb); 3370 if (!mld2q->mld2q_nsrcs) 3371 group = &mld2q->mld2q_mca; 3372 if (brmctx->multicast_mld_version == 2 && 3373 !ipv6_addr_any(&mld2q->mld2q_mca) && 3374 mld2q->mld2q_suppress) 3375 goto out; 3376 3377 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 3378 } 3379 3380 is_general_query = group && ipv6_addr_any(group); 3381 3382 if (is_general_query) { 3383 saddr.proto = htons(ETH_P_IPV6); 3384 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 3385 3386 br_ip6_multicast_query_received(brmctx, pmctx, 3387 &brmctx->ip6_other_query, 3388 &saddr, max_delay); 3389 goto out; 3390 } else if (!group) { 3391 goto out; 3392 } 3393 3394 mp = br_mdb_ip6_get(brmctx->br, group, vid); 3395 if (!mp) 3396 goto out; 3397 3398 max_delay *= brmctx->multicast_last_member_count; 3399 if (mp->host_joined && 3400 (timer_pending(&mp->timer) ? 3401 time_after(mp->timer.expires, now + max_delay) : 3402 try_to_del_timer_sync(&mp->timer) >= 0)) 3403 mod_timer(&mp->timer, now + max_delay); 3404 3405 for (pp = &mp->ports; 3406 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3407 pp = &p->next) { 3408 if (timer_pending(&p->timer) ? 3409 time_after(p->timer.expires, now + max_delay) : 3410 try_to_del_timer_sync(&p->timer) >= 0 && 3411 (brmctx->multicast_mld_version == 1 || 3412 p->filter_mode == MCAST_EXCLUDE)) 3413 mod_timer(&p->timer, now + max_delay); 3414 } 3415 3416 out: 3417 spin_unlock(&brmctx->br->multicast_lock); 3418 return err; 3419 } 3420 #endif 3421 3422 static void 3423 br_multicast_leave_group(struct net_bridge_mcast *brmctx, 3424 struct net_bridge_mcast_port *pmctx, 3425 struct br_ip *group, 3426 struct bridge_mcast_other_query *other_query, 3427 struct bridge_mcast_own_query *own_query, 3428 const unsigned char *src) 3429 { 3430 struct net_bridge_mdb_entry *mp; 3431 struct net_bridge_port_group *p; 3432 unsigned long now; 3433 unsigned long time; 3434 3435 spin_lock(&brmctx->br->multicast_lock); 3436 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3437 goto out; 3438 3439 mp = br_mdb_ip_get(brmctx->br, group); 3440 if (!mp) 3441 goto out; 3442 3443 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { 3444 struct net_bridge_port_group __rcu **pp; 3445 3446 for (pp = &mp->ports; 3447 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3448 pp = &p->next) { 3449 if (!br_port_group_equal(p, pmctx->port, src)) 3450 continue; 3451 3452 if (p->flags & MDB_PG_FLAGS_PERMANENT) 3453 break; 3454 3455 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 3456 br_multicast_del_pg(mp, p, pp); 3457 } 3458 goto out; 3459 } 3460 3461 if (timer_pending(&other_query->timer)) 3462 goto out; 3463 3464 if (brmctx->multicast_querier) { 3465 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr, 3466 false, 0, NULL); 3467 3468 time = jiffies + brmctx->multicast_last_member_count * 3469 brmctx->multicast_last_member_interval; 3470 3471 mod_timer(&own_query->timer, time); 3472 3473 for (p = mlock_dereference(mp->ports, brmctx->br); 3474 p != NULL && pmctx != NULL; 3475 p = mlock_dereference(p->next, brmctx->br)) { 3476 if (!br_port_group_equal(p, pmctx->port, src)) 3477 continue; 3478 3479 if (!hlist_unhashed(&p->mglist) && 3480 (timer_pending(&p->timer) ? 3481 time_after(p->timer.expires, time) : 3482 try_to_del_timer_sync(&p->timer) >= 0)) { 3483 mod_timer(&p->timer, time); 3484 } 3485 3486 break; 3487 } 3488 } 3489 3490 now = jiffies; 3491 time = now + brmctx->multicast_last_member_count * 3492 brmctx->multicast_last_member_interval; 3493 3494 if (!pmctx) { 3495 if (mp->host_joined && 3496 (timer_pending(&mp->timer) ? 3497 time_after(mp->timer.expires, time) : 3498 try_to_del_timer_sync(&mp->timer) >= 0)) { 3499 mod_timer(&mp->timer, time); 3500 } 3501 3502 goto out; 3503 } 3504 3505 for (p = mlock_dereference(mp->ports, brmctx->br); 3506 p != NULL; 3507 p = mlock_dereference(p->next, brmctx->br)) { 3508 if (p->key.port != pmctx->port) 3509 continue; 3510 3511 if (!hlist_unhashed(&p->mglist) && 3512 (timer_pending(&p->timer) ? 3513 time_after(p->timer.expires, time) : 3514 try_to_del_timer_sync(&p->timer) >= 0)) { 3515 mod_timer(&p->timer, time); 3516 } 3517 3518 break; 3519 } 3520 out: 3521 spin_unlock(&brmctx->br->multicast_lock); 3522 } 3523 3524 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 3525 struct net_bridge_mcast_port *pmctx, 3526 __be32 group, 3527 __u16 vid, 3528 const unsigned char *src) 3529 { 3530 struct br_ip br_group; 3531 struct bridge_mcast_own_query *own_query; 3532 3533 if (ipv4_is_local_multicast(group)) 3534 return; 3535 3536 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; 3537 3538 memset(&br_group, 0, sizeof(br_group)); 3539 br_group.dst.ip4 = group; 3540 br_group.proto = htons(ETH_P_IP); 3541 br_group.vid = vid; 3542 3543 br_multicast_leave_group(brmctx, pmctx, &br_group, 3544 &brmctx->ip4_other_query, 3545 own_query, src); 3546 } 3547 3548 #if IS_ENABLED(CONFIG_IPV6) 3549 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 3550 struct net_bridge_mcast_port *pmctx, 3551 const struct in6_addr *group, 3552 __u16 vid, 3553 const unsigned char *src) 3554 { 3555 struct br_ip br_group; 3556 struct bridge_mcast_own_query *own_query; 3557 3558 if (ipv6_addr_is_ll_all_nodes(group)) 3559 return; 3560 3561 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; 3562 3563 memset(&br_group, 0, sizeof(br_group)); 3564 br_group.dst.ip6 = *group; 3565 br_group.proto = htons(ETH_P_IPV6); 3566 br_group.vid = vid; 3567 3568 br_multicast_leave_group(brmctx, pmctx, &br_group, 3569 &brmctx->ip6_other_query, 3570 own_query, src); 3571 } 3572 #endif 3573 3574 static void br_multicast_err_count(const struct net_bridge *br, 3575 const struct net_bridge_port *p, 3576 __be16 proto) 3577 { 3578 struct bridge_mcast_stats __percpu *stats; 3579 struct bridge_mcast_stats *pstats; 3580 3581 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3582 return; 3583 3584 if (p) 3585 stats = p->mcast_stats; 3586 else 3587 stats = br->mcast_stats; 3588 if (WARN_ON(!stats)) 3589 return; 3590 3591 pstats = this_cpu_ptr(stats); 3592 3593 u64_stats_update_begin(&pstats->syncp); 3594 switch (proto) { 3595 case htons(ETH_P_IP): 3596 pstats->mstats.igmp_parse_errors++; 3597 break; 3598 #if IS_ENABLED(CONFIG_IPV6) 3599 case htons(ETH_P_IPV6): 3600 pstats->mstats.mld_parse_errors++; 3601 break; 3602 #endif 3603 } 3604 u64_stats_update_end(&pstats->syncp); 3605 } 3606 3607 static void br_multicast_pim(struct net_bridge_mcast *brmctx, 3608 struct net_bridge_mcast_port *pmctx, 3609 const struct sk_buff *skb) 3610 { 3611 unsigned int offset = skb_transport_offset(skb); 3612 struct pimhdr *pimhdr, _pimhdr; 3613 3614 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3615 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3616 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3617 return; 3618 3619 spin_lock(&brmctx->br->multicast_lock); 3620 br_ip4_multicast_mark_router(brmctx, pmctx); 3621 spin_unlock(&brmctx->br->multicast_lock); 3622 } 3623 3624 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3625 struct net_bridge_mcast_port *pmctx, 3626 struct sk_buff *skb) 3627 { 3628 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3629 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3630 return -ENOMSG; 3631 3632 spin_lock(&brmctx->br->multicast_lock); 3633 br_ip4_multicast_mark_router(brmctx, pmctx); 3634 spin_unlock(&brmctx->br->multicast_lock); 3635 3636 return 0; 3637 } 3638 3639 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, 3640 struct net_bridge_mcast_port *pmctx, 3641 struct sk_buff *skb, 3642 u16 vid) 3643 { 3644 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3645 const unsigned char *src; 3646 struct igmphdr *ih; 3647 int err; 3648 3649 err = ip_mc_check_igmp(skb); 3650 3651 if (err == -ENOMSG) { 3652 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3653 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3654 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3655 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3656 br_multicast_pim(brmctx, pmctx, skb); 3657 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3658 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); 3659 } 3660 3661 return 0; 3662 } else if (err < 0) { 3663 br_multicast_err_count(brmctx->br, p, skb->protocol); 3664 return err; 3665 } 3666 3667 ih = igmp_hdr(skb); 3668 src = eth_hdr(skb)->h_source; 3669 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3670 3671 switch (ih->type) { 3672 case IGMP_HOST_MEMBERSHIP_REPORT: 3673 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3674 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3675 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid, 3676 src, true); 3677 break; 3678 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3679 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); 3680 break; 3681 case IGMP_HOST_MEMBERSHIP_QUERY: 3682 br_ip4_multicast_query(brmctx, pmctx, skb, vid); 3683 break; 3684 case IGMP_HOST_LEAVE_MESSAGE: 3685 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src); 3686 break; 3687 } 3688 3689 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3690 BR_MCAST_DIR_RX); 3691 3692 return err; 3693 } 3694 3695 #if IS_ENABLED(CONFIG_IPV6) 3696 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3697 struct net_bridge_mcast_port *pmctx, 3698 struct sk_buff *skb) 3699 { 3700 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3701 return; 3702 3703 spin_lock(&brmctx->br->multicast_lock); 3704 br_ip6_multicast_mark_router(brmctx, pmctx); 3705 spin_unlock(&brmctx->br->multicast_lock); 3706 } 3707 3708 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, 3709 struct net_bridge_mcast_port *pmctx, 3710 struct sk_buff *skb, 3711 u16 vid) 3712 { 3713 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3714 const unsigned char *src; 3715 struct mld_msg *mld; 3716 int err; 3717 3718 err = ipv6_mc_check_mld(skb); 3719 3720 if (err == -ENOMSG || err == -ENODATA) { 3721 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3722 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3723 if (err == -ENODATA && 3724 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) 3725 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); 3726 3727 return 0; 3728 } else if (err < 0) { 3729 br_multicast_err_count(brmctx->br, p, skb->protocol); 3730 return err; 3731 } 3732 3733 mld = (struct mld_msg *)skb_transport_header(skb); 3734 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3735 3736 switch (mld->mld_type) { 3737 case ICMPV6_MGM_REPORT: 3738 src = eth_hdr(skb)->h_source; 3739 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3740 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca, 3741 vid, src, true); 3742 break; 3743 case ICMPV6_MLD2_REPORT: 3744 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); 3745 break; 3746 case ICMPV6_MGM_QUERY: 3747 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); 3748 break; 3749 case ICMPV6_MGM_REDUCTION: 3750 src = eth_hdr(skb)->h_source; 3751 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid, 3752 src); 3753 break; 3754 } 3755 3756 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3757 BR_MCAST_DIR_RX); 3758 3759 return err; 3760 } 3761 #endif 3762 3763 int br_multicast_rcv(struct net_bridge_mcast **brmctx, 3764 struct net_bridge_mcast_port **pmctx, 3765 struct net_bridge_vlan *vlan, 3766 struct sk_buff *skb, u16 vid) 3767 { 3768 int ret = 0; 3769 3770 BR_INPUT_SKB_CB(skb)->igmp = 0; 3771 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3772 3773 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED)) 3774 return 0; 3775 3776 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { 3777 const struct net_bridge_vlan *masterv; 3778 3779 /* the vlan has the master flag set only when transmitting 3780 * through the bridge device 3781 */ 3782 if (br_vlan_is_master(vlan)) { 3783 masterv = vlan; 3784 *brmctx = &vlan->br_mcast_ctx; 3785 *pmctx = NULL; 3786 } else { 3787 masterv = vlan->brvlan; 3788 *brmctx = &vlan->brvlan->br_mcast_ctx; 3789 *pmctx = &vlan->port_mcast_ctx; 3790 } 3791 3792 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 3793 return 0; 3794 } 3795 3796 switch (skb->protocol) { 3797 case htons(ETH_P_IP): 3798 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid); 3799 break; 3800 #if IS_ENABLED(CONFIG_IPV6) 3801 case htons(ETH_P_IPV6): 3802 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid); 3803 break; 3804 #endif 3805 } 3806 3807 return ret; 3808 } 3809 3810 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, 3811 struct bridge_mcast_own_query *query, 3812 struct bridge_mcast_querier *querier) 3813 { 3814 spin_lock(&brmctx->br->multicast_lock); 3815 if (br_multicast_ctx_vlan_disabled(brmctx)) 3816 goto out; 3817 3818 if (query->startup_sent < brmctx->multicast_startup_query_count) 3819 query->startup_sent++; 3820 3821 br_multicast_send_query(brmctx, NULL, query); 3822 out: 3823 spin_unlock(&brmctx->br->multicast_lock); 3824 } 3825 3826 static void br_ip4_multicast_query_expired(struct timer_list *t) 3827 { 3828 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3829 ip4_own_query.timer); 3830 3831 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, 3832 &brmctx->ip4_querier); 3833 } 3834 3835 #if IS_ENABLED(CONFIG_IPV6) 3836 static void br_ip6_multicast_query_expired(struct timer_list *t) 3837 { 3838 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3839 ip6_own_query.timer); 3840 3841 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, 3842 &brmctx->ip6_querier); 3843 } 3844 #endif 3845 3846 static void br_multicast_gc_work(struct work_struct *work) 3847 { 3848 struct net_bridge *br = container_of(work, struct net_bridge, 3849 mcast_gc_work); 3850 HLIST_HEAD(deleted_head); 3851 3852 spin_lock_bh(&br->multicast_lock); 3853 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3854 spin_unlock_bh(&br->multicast_lock); 3855 3856 br_multicast_gc(&deleted_head); 3857 } 3858 3859 void br_multicast_ctx_init(struct net_bridge *br, 3860 struct net_bridge_vlan *vlan, 3861 struct net_bridge_mcast *brmctx) 3862 { 3863 brmctx->br = br; 3864 brmctx->vlan = vlan; 3865 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3866 brmctx->multicast_last_member_count = 2; 3867 brmctx->multicast_startup_query_count = 2; 3868 3869 brmctx->multicast_last_member_interval = HZ; 3870 brmctx->multicast_query_response_interval = 10 * HZ; 3871 brmctx->multicast_startup_query_interval = 125 * HZ / 4; 3872 brmctx->multicast_query_interval = 125 * HZ; 3873 brmctx->multicast_querier_interval = 255 * HZ; 3874 brmctx->multicast_membership_interval = 260 * HZ; 3875 3876 brmctx->ip4_other_query.delay_time = 0; 3877 brmctx->ip4_querier.port_ifidx = 0; 3878 seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); 3879 brmctx->multicast_igmp_version = 2; 3880 #if IS_ENABLED(CONFIG_IPV6) 3881 brmctx->multicast_mld_version = 1; 3882 brmctx->ip6_other_query.delay_time = 0; 3883 brmctx->ip6_querier.port_ifidx = 0; 3884 seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); 3885 #endif 3886 3887 timer_setup(&brmctx->ip4_mc_router_timer, 3888 br_ip4_multicast_local_router_expired, 0); 3889 timer_setup(&brmctx->ip4_other_query.timer, 3890 br_ip4_multicast_querier_expired, 0); 3891 timer_setup(&brmctx->ip4_own_query.timer, 3892 br_ip4_multicast_query_expired, 0); 3893 #if IS_ENABLED(CONFIG_IPV6) 3894 timer_setup(&brmctx->ip6_mc_router_timer, 3895 br_ip6_multicast_local_router_expired, 0); 3896 timer_setup(&brmctx->ip6_other_query.timer, 3897 br_ip6_multicast_querier_expired, 0); 3898 timer_setup(&brmctx->ip6_own_query.timer, 3899 br_ip6_multicast_query_expired, 0); 3900 #endif 3901 } 3902 3903 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) 3904 { 3905 __br_multicast_stop(brmctx); 3906 } 3907 3908 void br_multicast_init(struct net_bridge *br) 3909 { 3910 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 3911 3912 br_multicast_ctx_init(br, NULL, &br->multicast_ctx); 3913 3914 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 3915 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 3916 3917 spin_lock_init(&br->multicast_lock); 3918 INIT_HLIST_HEAD(&br->mdb_list); 3919 INIT_HLIST_HEAD(&br->mcast_gc_list); 3920 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 3921 } 3922 3923 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 3924 { 3925 struct in_device *in_dev = in_dev_get(br->dev); 3926 3927 if (!in_dev) 3928 return; 3929 3930 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3931 in_dev_put(in_dev); 3932 } 3933 3934 #if IS_ENABLED(CONFIG_IPV6) 3935 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3936 { 3937 struct in6_addr addr; 3938 3939 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3940 ipv6_dev_mc_inc(br->dev, &addr); 3941 } 3942 #else 3943 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3944 { 3945 } 3946 #endif 3947 3948 void br_multicast_join_snoopers(struct net_bridge *br) 3949 { 3950 br_ip4_multicast_join_snoopers(br); 3951 br_ip6_multicast_join_snoopers(br); 3952 } 3953 3954 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 3955 { 3956 struct in_device *in_dev = in_dev_get(br->dev); 3957 3958 if (WARN_ON(!in_dev)) 3959 return; 3960 3961 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3962 in_dev_put(in_dev); 3963 } 3964 3965 #if IS_ENABLED(CONFIG_IPV6) 3966 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3967 { 3968 struct in6_addr addr; 3969 3970 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3971 ipv6_dev_mc_dec(br->dev, &addr); 3972 } 3973 #else 3974 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3975 { 3976 } 3977 #endif 3978 3979 void br_multicast_leave_snoopers(struct net_bridge *br) 3980 { 3981 br_ip4_multicast_leave_snoopers(br); 3982 br_ip6_multicast_leave_snoopers(br); 3983 } 3984 3985 static void __br_multicast_open_query(struct net_bridge *br, 3986 struct bridge_mcast_own_query *query) 3987 { 3988 query->startup_sent = 0; 3989 3990 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3991 return; 3992 3993 mod_timer(&query->timer, jiffies); 3994 } 3995 3996 static void __br_multicast_open(struct net_bridge_mcast *brmctx) 3997 { 3998 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query); 3999 #if IS_ENABLED(CONFIG_IPV6) 4000 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query); 4001 #endif 4002 } 4003 4004 void br_multicast_open(struct net_bridge *br) 4005 { 4006 ASSERT_RTNL(); 4007 4008 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4009 struct net_bridge_vlan_group *vg; 4010 struct net_bridge_vlan *vlan; 4011 4012 vg = br_vlan_group(br); 4013 if (vg) { 4014 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4015 struct net_bridge_mcast *brmctx; 4016 4017 brmctx = &vlan->br_mcast_ctx; 4018 if (br_vlan_is_brentry(vlan) && 4019 !br_multicast_ctx_vlan_disabled(brmctx)) 4020 __br_multicast_open(&vlan->br_mcast_ctx); 4021 } 4022 } 4023 } else { 4024 __br_multicast_open(&br->multicast_ctx); 4025 } 4026 } 4027 4028 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 4029 { 4030 del_timer_sync(&brmctx->ip4_mc_router_timer); 4031 del_timer_sync(&brmctx->ip4_other_query.timer); 4032 del_timer_sync(&brmctx->ip4_own_query.timer); 4033 #if IS_ENABLED(CONFIG_IPV6) 4034 del_timer_sync(&brmctx->ip6_mc_router_timer); 4035 del_timer_sync(&brmctx->ip6_other_query.timer); 4036 del_timer_sync(&brmctx->ip6_own_query.timer); 4037 #endif 4038 } 4039 4040 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) 4041 { 4042 struct net_bridge *br; 4043 4044 /* it's okay to check for the flag without the multicast lock because it 4045 * can only change under RTNL -> multicast_lock, we need the latter to 4046 * sync with timers and packets 4047 */ 4048 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) 4049 return; 4050 4051 if (br_vlan_is_master(vlan)) { 4052 br = vlan->br; 4053 4054 if (!br_vlan_is_brentry(vlan) || 4055 (on && 4056 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx))) 4057 return; 4058 4059 spin_lock_bh(&br->multicast_lock); 4060 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4061 spin_unlock_bh(&br->multicast_lock); 4062 4063 if (on) 4064 __br_multicast_open(&vlan->br_mcast_ctx); 4065 else 4066 __br_multicast_stop(&vlan->br_mcast_ctx); 4067 } else { 4068 struct net_bridge_mcast *brmctx; 4069 4070 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx); 4071 if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) 4072 return; 4073 4074 br = vlan->port->br; 4075 spin_lock_bh(&br->multicast_lock); 4076 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4077 if (on) 4078 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx); 4079 else 4080 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx); 4081 spin_unlock_bh(&br->multicast_lock); 4082 } 4083 } 4084 4085 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) 4086 { 4087 struct net_bridge_port *p; 4088 4089 if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) 4090 return; 4091 4092 list_for_each_entry(p, &vlan->br->port_list, list) { 4093 struct net_bridge_vlan *vport; 4094 4095 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid); 4096 if (!vport) 4097 continue; 4098 br_multicast_toggle_one_vlan(vport, on); 4099 } 4100 4101 if (br_vlan_is_brentry(vlan)) 4102 br_multicast_toggle_one_vlan(vlan, on); 4103 } 4104 4105 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, 4106 struct netlink_ext_ack *extack) 4107 { 4108 struct net_bridge_vlan_group *vg; 4109 struct net_bridge_vlan *vlan; 4110 struct net_bridge_port *p; 4111 4112 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) 4113 return 0; 4114 4115 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) { 4116 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled"); 4117 return -EINVAL; 4118 } 4119 4120 vg = br_vlan_group(br); 4121 if (!vg) 4122 return 0; 4123 4124 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); 4125 4126 /* disable/enable non-vlan mcast contexts based on vlan snooping */ 4127 if (on) 4128 __br_multicast_stop(&br->multicast_ctx); 4129 else 4130 __br_multicast_open(&br->multicast_ctx); 4131 list_for_each_entry(p, &br->port_list, list) { 4132 if (on) 4133 br_multicast_disable_port(p); 4134 else 4135 br_multicast_enable_port(p); 4136 } 4137 4138 list_for_each_entry(vlan, &vg->vlan_list, vlist) 4139 br_multicast_toggle_vlan(vlan, on); 4140 4141 return 0; 4142 } 4143 4144 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on) 4145 { 4146 ASSERT_RTNL(); 4147 4148 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and 4149 * requires only RTNL to change 4150 */ 4151 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 4152 return false; 4153 4154 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED; 4155 br_multicast_toggle_vlan(vlan, on); 4156 4157 return true; 4158 } 4159 4160 void br_multicast_stop(struct net_bridge *br) 4161 { 4162 ASSERT_RTNL(); 4163 4164 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4165 struct net_bridge_vlan_group *vg; 4166 struct net_bridge_vlan *vlan; 4167 4168 vg = br_vlan_group(br); 4169 if (vg) { 4170 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4171 struct net_bridge_mcast *brmctx; 4172 4173 brmctx = &vlan->br_mcast_ctx; 4174 if (br_vlan_is_brentry(vlan) && 4175 !br_multicast_ctx_vlan_disabled(brmctx)) 4176 __br_multicast_stop(&vlan->br_mcast_ctx); 4177 } 4178 } 4179 } else { 4180 __br_multicast_stop(&br->multicast_ctx); 4181 } 4182 } 4183 4184 void br_multicast_dev_del(struct net_bridge *br) 4185 { 4186 struct net_bridge_mdb_entry *mp; 4187 HLIST_HEAD(deleted_head); 4188 struct hlist_node *tmp; 4189 4190 spin_lock_bh(&br->multicast_lock); 4191 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 4192 br_multicast_del_mdb_entry(mp); 4193 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4194 spin_unlock_bh(&br->multicast_lock); 4195 4196 br_multicast_ctx_deinit(&br->multicast_ctx); 4197 br_multicast_gc(&deleted_head); 4198 cancel_work_sync(&br->mcast_gc_work); 4199 4200 rcu_barrier(); 4201 } 4202 4203 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) 4204 { 4205 int err = -EINVAL; 4206 4207 spin_lock_bh(&brmctx->br->multicast_lock); 4208 4209 switch (val) { 4210 case MDB_RTR_TYPE_DISABLED: 4211 case MDB_RTR_TYPE_PERM: 4212 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); 4213 del_timer(&brmctx->ip4_mc_router_timer); 4214 #if IS_ENABLED(CONFIG_IPV6) 4215 del_timer(&brmctx->ip6_mc_router_timer); 4216 #endif 4217 brmctx->multicast_router = val; 4218 err = 0; 4219 break; 4220 case MDB_RTR_TYPE_TEMP_QUERY: 4221 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 4222 br_mc_router_state_change(brmctx->br, false); 4223 brmctx->multicast_router = val; 4224 err = 0; 4225 break; 4226 } 4227 4228 spin_unlock_bh(&brmctx->br->multicast_lock); 4229 4230 return err; 4231 } 4232 4233 static void 4234 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) 4235 { 4236 if (!deleted) 4237 return; 4238 4239 /* For backwards compatibility for now, only notify if there is 4240 * no multicast router anymore for both IPv4 and IPv6. 4241 */ 4242 if (!hlist_unhashed(&pmctx->ip4_rlist)) 4243 return; 4244 #if IS_ENABLED(CONFIG_IPV6) 4245 if (!hlist_unhashed(&pmctx->ip6_rlist)) 4246 return; 4247 #endif 4248 4249 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB); 4250 br_port_mc_router_state_change(pmctx->port, false); 4251 4252 /* don't allow timer refresh */ 4253 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) 4254 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4255 } 4256 4257 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, 4258 unsigned long val) 4259 { 4260 struct net_bridge_mcast *brmctx; 4261 unsigned long now = jiffies; 4262 int err = -EINVAL; 4263 bool del = false; 4264 4265 brmctx = br_multicast_port_ctx_get_global(pmctx); 4266 spin_lock_bh(&brmctx->br->multicast_lock); 4267 if (pmctx->multicast_router == val) { 4268 /* Refresh the temp router port timer */ 4269 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { 4270 mod_timer(&pmctx->ip4_mc_router_timer, 4271 now + brmctx->multicast_querier_interval); 4272 #if IS_ENABLED(CONFIG_IPV6) 4273 mod_timer(&pmctx->ip6_mc_router_timer, 4274 now + brmctx->multicast_querier_interval); 4275 #endif 4276 } 4277 err = 0; 4278 goto unlock; 4279 } 4280 switch (val) { 4281 case MDB_RTR_TYPE_DISABLED: 4282 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4283 del |= br_ip4_multicast_rport_del(pmctx); 4284 del_timer(&pmctx->ip4_mc_router_timer); 4285 del |= br_ip6_multicast_rport_del(pmctx); 4286 #if IS_ENABLED(CONFIG_IPV6) 4287 del_timer(&pmctx->ip6_mc_router_timer); 4288 #endif 4289 br_multicast_rport_del_notify(pmctx, del); 4290 break; 4291 case MDB_RTR_TYPE_TEMP_QUERY: 4292 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4293 del |= br_ip4_multicast_rport_del(pmctx); 4294 del |= br_ip6_multicast_rport_del(pmctx); 4295 br_multicast_rport_del_notify(pmctx, del); 4296 break; 4297 case MDB_RTR_TYPE_PERM: 4298 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4299 del_timer(&pmctx->ip4_mc_router_timer); 4300 br_ip4_multicast_add_router(brmctx, pmctx); 4301 #if IS_ENABLED(CONFIG_IPV6) 4302 del_timer(&pmctx->ip6_mc_router_timer); 4303 #endif 4304 br_ip6_multicast_add_router(brmctx, pmctx); 4305 break; 4306 case MDB_RTR_TYPE_TEMP: 4307 pmctx->multicast_router = MDB_RTR_TYPE_TEMP; 4308 br_ip4_multicast_mark_router(brmctx, pmctx); 4309 br_ip6_multicast_mark_router(brmctx, pmctx); 4310 break; 4311 default: 4312 goto unlock; 4313 } 4314 err = 0; 4315 unlock: 4316 spin_unlock_bh(&brmctx->br->multicast_lock); 4317 4318 return err; 4319 } 4320 4321 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router) 4322 { 4323 int err; 4324 4325 if (br_vlan_is_master(v)) 4326 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router); 4327 else 4328 err = br_multicast_set_port_router(&v->port_mcast_ctx, 4329 mcast_router); 4330 4331 return err; 4332 } 4333 4334 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 4335 struct bridge_mcast_own_query *query) 4336 { 4337 struct net_bridge_port *port; 4338 4339 if (!br_multicast_ctx_matches_vlan_snooping(brmctx)) 4340 return; 4341 4342 __br_multicast_open_query(brmctx->br, query); 4343 4344 rcu_read_lock(); 4345 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { 4346 struct bridge_mcast_own_query *ip4_own_query; 4347 #if IS_ENABLED(CONFIG_IPV6) 4348 struct bridge_mcast_own_query *ip6_own_query; 4349 #endif 4350 4351 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx)) 4352 continue; 4353 4354 if (br_multicast_ctx_is_vlan(brmctx)) { 4355 struct net_bridge_vlan *vlan; 4356 4357 vlan = br_vlan_find(nbp_vlan_group_rcu(port), 4358 brmctx->vlan->vid); 4359 if (!vlan || 4360 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx)) 4361 continue; 4362 4363 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query; 4364 #if IS_ENABLED(CONFIG_IPV6) 4365 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query; 4366 #endif 4367 } else { 4368 ip4_own_query = &port->multicast_ctx.ip4_own_query; 4369 #if IS_ENABLED(CONFIG_IPV6) 4370 ip6_own_query = &port->multicast_ctx.ip6_own_query; 4371 #endif 4372 } 4373 4374 if (query == &brmctx->ip4_own_query) 4375 br_multicast_enable(ip4_own_query); 4376 #if IS_ENABLED(CONFIG_IPV6) 4377 else 4378 br_multicast_enable(ip6_own_query); 4379 #endif 4380 } 4381 rcu_read_unlock(); 4382 } 4383 4384 int br_multicast_toggle(struct net_bridge *br, unsigned long val, 4385 struct netlink_ext_ack *extack) 4386 { 4387 struct net_bridge_port *port; 4388 bool change_snoopers = false; 4389 int err = 0; 4390 4391 spin_lock_bh(&br->multicast_lock); 4392 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 4393 goto unlock; 4394 4395 err = br_mc_disabled_update(br->dev, val, extack); 4396 if (err == -EOPNOTSUPP) 4397 err = 0; 4398 if (err) 4399 goto unlock; 4400 4401 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 4402 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 4403 change_snoopers = true; 4404 goto unlock; 4405 } 4406 4407 if (!netif_running(br->dev)) 4408 goto unlock; 4409 4410 br_multicast_open(br); 4411 list_for_each_entry(port, &br->port_list, list) 4412 __br_multicast_enable_port_ctx(&port->multicast_ctx); 4413 4414 change_snoopers = true; 4415 4416 unlock: 4417 spin_unlock_bh(&br->multicast_lock); 4418 4419 /* br_multicast_join_snoopers has the potential to cause 4420 * an MLD Report/Leave to be delivered to br_multicast_rcv, 4421 * which would in turn call br_multicast_add_group, which would 4422 * attempt to acquire multicast_lock. This function should be 4423 * called after the lock has been released to avoid deadlocks on 4424 * multicast_lock. 4425 * 4426 * br_multicast_leave_snoopers does not have the problem since 4427 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 4428 * returns without calling br_multicast_ipv4/6_rcv if it's not 4429 * enabled. Moved both functions out just for symmetry. 4430 */ 4431 if (change_snoopers) { 4432 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4433 br_multicast_join_snoopers(br); 4434 else 4435 br_multicast_leave_snoopers(br); 4436 } 4437 4438 return err; 4439 } 4440 4441 bool br_multicast_enabled(const struct net_device *dev) 4442 { 4443 struct net_bridge *br = netdev_priv(dev); 4444 4445 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 4446 } 4447 EXPORT_SYMBOL_GPL(br_multicast_enabled); 4448 4449 bool br_multicast_router(const struct net_device *dev) 4450 { 4451 struct net_bridge *br = netdev_priv(dev); 4452 bool is_router; 4453 4454 spin_lock_bh(&br->multicast_lock); 4455 is_router = br_multicast_is_router(&br->multicast_ctx, NULL); 4456 spin_unlock_bh(&br->multicast_lock); 4457 return is_router; 4458 } 4459 EXPORT_SYMBOL_GPL(br_multicast_router); 4460 4461 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) 4462 { 4463 unsigned long max_delay; 4464 4465 val = !!val; 4466 4467 spin_lock_bh(&brmctx->br->multicast_lock); 4468 if (brmctx->multicast_querier == val) 4469 goto unlock; 4470 4471 WRITE_ONCE(brmctx->multicast_querier, val); 4472 if (!val) 4473 goto unlock; 4474 4475 max_delay = brmctx->multicast_query_response_interval; 4476 4477 if (!timer_pending(&brmctx->ip4_other_query.timer)) 4478 brmctx->ip4_other_query.delay_time = jiffies + max_delay; 4479 4480 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); 4481 4482 #if IS_ENABLED(CONFIG_IPV6) 4483 if (!timer_pending(&brmctx->ip6_other_query.timer)) 4484 brmctx->ip6_other_query.delay_time = jiffies + max_delay; 4485 4486 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); 4487 #endif 4488 4489 unlock: 4490 spin_unlock_bh(&brmctx->br->multicast_lock); 4491 4492 return 0; 4493 } 4494 4495 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx, 4496 unsigned long val) 4497 { 4498 /* Currently we support only version 2 and 3 */ 4499 switch (val) { 4500 case 2: 4501 case 3: 4502 break; 4503 default: 4504 return -EINVAL; 4505 } 4506 4507 spin_lock_bh(&brmctx->br->multicast_lock); 4508 brmctx->multicast_igmp_version = val; 4509 spin_unlock_bh(&brmctx->br->multicast_lock); 4510 4511 return 0; 4512 } 4513 4514 #if IS_ENABLED(CONFIG_IPV6) 4515 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, 4516 unsigned long val) 4517 { 4518 /* Currently we support version 1 and 2 */ 4519 switch (val) { 4520 case 1: 4521 case 2: 4522 break; 4523 default: 4524 return -EINVAL; 4525 } 4526 4527 spin_lock_bh(&brmctx->br->multicast_lock); 4528 brmctx->multicast_mld_version = val; 4529 spin_unlock_bh(&brmctx->br->multicast_lock); 4530 4531 return 0; 4532 } 4533 #endif 4534 4535 void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, 4536 unsigned long val) 4537 { 4538 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4539 4540 if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { 4541 br_info(brmctx->br, 4542 "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", 4543 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), 4544 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); 4545 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; 4546 } 4547 4548 brmctx->multicast_query_interval = intvl_jiffies; 4549 } 4550 4551 void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, 4552 unsigned long val) 4553 { 4554 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4555 4556 if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { 4557 br_info(brmctx->br, 4558 "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", 4559 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), 4560 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); 4561 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; 4562 } 4563 4564 brmctx->multicast_startup_query_interval = intvl_jiffies; 4565 } 4566 4567 /** 4568 * br_multicast_list_adjacent - Returns snooped multicast addresses 4569 * @dev: The bridge port adjacent to which to retrieve addresses 4570 * @br_ip_list: The list to store found, snooped multicast IP addresses in 4571 * 4572 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 4573 * snooping feature on all bridge ports of dev's bridge device, excluding 4574 * the addresses from dev itself. 4575 * 4576 * Returns the number of items added to br_ip_list. 4577 * 4578 * Notes: 4579 * - br_ip_list needs to be initialized by caller 4580 * - br_ip_list might contain duplicates in the end 4581 * (needs to be taken care of by caller) 4582 * - br_ip_list needs to be freed by caller 4583 */ 4584 int br_multicast_list_adjacent(struct net_device *dev, 4585 struct list_head *br_ip_list) 4586 { 4587 struct net_bridge *br; 4588 struct net_bridge_port *port; 4589 struct net_bridge_port_group *group; 4590 struct br_ip_list *entry; 4591 int count = 0; 4592 4593 rcu_read_lock(); 4594 if (!br_ip_list || !netif_is_bridge_port(dev)) 4595 goto unlock; 4596 4597 port = br_port_get_rcu(dev); 4598 if (!port || !port->br) 4599 goto unlock; 4600 4601 br = port->br; 4602 4603 list_for_each_entry_rcu(port, &br->port_list, list) { 4604 if (!port->dev || port->dev == dev) 4605 continue; 4606 4607 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 4608 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 4609 if (!entry) 4610 goto unlock; 4611 4612 entry->addr = group->key.addr; 4613 list_add(&entry->list, br_ip_list); 4614 count++; 4615 } 4616 } 4617 4618 unlock: 4619 rcu_read_unlock(); 4620 return count; 4621 } 4622 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 4623 4624 /** 4625 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 4626 * @dev: The bridge port providing the bridge on which to check for a querier 4627 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4628 * 4629 * Checks whether the given interface has a bridge on top and if so returns 4630 * true if a valid querier exists anywhere on the bridged link layer. 4631 * Otherwise returns false. 4632 */ 4633 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 4634 { 4635 struct net_bridge *br; 4636 struct net_bridge_port *port; 4637 struct ethhdr eth; 4638 bool ret = false; 4639 4640 rcu_read_lock(); 4641 if (!netif_is_bridge_port(dev)) 4642 goto unlock; 4643 4644 port = br_port_get_rcu(dev); 4645 if (!port || !port->br) 4646 goto unlock; 4647 4648 br = port->br; 4649 4650 memset(ð, 0, sizeof(eth)); 4651 eth.h_proto = htons(proto); 4652 4653 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL); 4654 4655 unlock: 4656 rcu_read_unlock(); 4657 return ret; 4658 } 4659 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 4660 4661 /** 4662 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 4663 * @dev: The bridge port adjacent to which to check for a querier 4664 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4665 * 4666 * Checks whether the given interface has a bridge on top and if so returns 4667 * true if a selected querier is behind one of the other ports of this 4668 * bridge. Otherwise returns false. 4669 */ 4670 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 4671 { 4672 struct net_bridge_mcast *brmctx; 4673 struct net_bridge *br; 4674 struct net_bridge_port *port; 4675 bool ret = false; 4676 int port_ifidx; 4677 4678 rcu_read_lock(); 4679 if (!netif_is_bridge_port(dev)) 4680 goto unlock; 4681 4682 port = br_port_get_rcu(dev); 4683 if (!port || !port->br) 4684 goto unlock; 4685 4686 br = port->br; 4687 brmctx = &br->multicast_ctx; 4688 4689 switch (proto) { 4690 case ETH_P_IP: 4691 port_ifidx = brmctx->ip4_querier.port_ifidx; 4692 if (!timer_pending(&brmctx->ip4_other_query.timer) || 4693 port_ifidx == port->dev->ifindex) 4694 goto unlock; 4695 break; 4696 #if IS_ENABLED(CONFIG_IPV6) 4697 case ETH_P_IPV6: 4698 port_ifidx = brmctx->ip6_querier.port_ifidx; 4699 if (!timer_pending(&brmctx->ip6_other_query.timer) || 4700 port_ifidx == port->dev->ifindex) 4701 goto unlock; 4702 break; 4703 #endif 4704 default: 4705 goto unlock; 4706 } 4707 4708 ret = true; 4709 unlock: 4710 rcu_read_unlock(); 4711 return ret; 4712 } 4713 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 4714 4715 /** 4716 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port 4717 * @dev: The bridge port adjacent to which to check for a multicast router 4718 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4719 * 4720 * Checks whether the given interface has a bridge on top and if so returns 4721 * true if a multicast router is behind one of the other ports of this 4722 * bridge. Otherwise returns false. 4723 */ 4724 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) 4725 { 4726 struct net_bridge_mcast_port *pmctx; 4727 struct net_bridge_mcast *brmctx; 4728 struct net_bridge_port *port; 4729 bool ret = false; 4730 4731 rcu_read_lock(); 4732 port = br_port_get_check_rcu(dev); 4733 if (!port) 4734 goto unlock; 4735 4736 brmctx = &port->br->multicast_ctx; 4737 switch (proto) { 4738 case ETH_P_IP: 4739 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, 4740 ip4_rlist) { 4741 if (pmctx->port == port) 4742 continue; 4743 4744 ret = true; 4745 goto unlock; 4746 } 4747 break; 4748 #if IS_ENABLED(CONFIG_IPV6) 4749 case ETH_P_IPV6: 4750 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, 4751 ip6_rlist) { 4752 if (pmctx->port == port) 4753 continue; 4754 4755 ret = true; 4756 goto unlock; 4757 } 4758 break; 4759 #endif 4760 default: 4761 /* when compiled without IPv6 support, be conservative and 4762 * always assume presence of an IPv6 multicast router 4763 */ 4764 ret = true; 4765 } 4766 4767 unlock: 4768 rcu_read_unlock(); 4769 return ret; 4770 } 4771 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); 4772 4773 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 4774 const struct sk_buff *skb, u8 type, u8 dir) 4775 { 4776 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 4777 __be16 proto = skb->protocol; 4778 unsigned int t_len; 4779 4780 u64_stats_update_begin(&pstats->syncp); 4781 switch (proto) { 4782 case htons(ETH_P_IP): 4783 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 4784 switch (type) { 4785 case IGMP_HOST_MEMBERSHIP_REPORT: 4786 pstats->mstats.igmp_v1reports[dir]++; 4787 break; 4788 case IGMPV2_HOST_MEMBERSHIP_REPORT: 4789 pstats->mstats.igmp_v2reports[dir]++; 4790 break; 4791 case IGMPV3_HOST_MEMBERSHIP_REPORT: 4792 pstats->mstats.igmp_v3reports[dir]++; 4793 break; 4794 case IGMP_HOST_MEMBERSHIP_QUERY: 4795 if (t_len != sizeof(struct igmphdr)) { 4796 pstats->mstats.igmp_v3queries[dir]++; 4797 } else { 4798 unsigned int offset = skb_transport_offset(skb); 4799 struct igmphdr *ih, _ihdr; 4800 4801 ih = skb_header_pointer(skb, offset, 4802 sizeof(_ihdr), &_ihdr); 4803 if (!ih) 4804 break; 4805 if (!ih->code) 4806 pstats->mstats.igmp_v1queries[dir]++; 4807 else 4808 pstats->mstats.igmp_v2queries[dir]++; 4809 } 4810 break; 4811 case IGMP_HOST_LEAVE_MESSAGE: 4812 pstats->mstats.igmp_leaves[dir]++; 4813 break; 4814 } 4815 break; 4816 #if IS_ENABLED(CONFIG_IPV6) 4817 case htons(ETH_P_IPV6): 4818 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 4819 sizeof(struct ipv6hdr); 4820 t_len -= skb_network_header_len(skb); 4821 switch (type) { 4822 case ICMPV6_MGM_REPORT: 4823 pstats->mstats.mld_v1reports[dir]++; 4824 break; 4825 case ICMPV6_MLD2_REPORT: 4826 pstats->mstats.mld_v2reports[dir]++; 4827 break; 4828 case ICMPV6_MGM_QUERY: 4829 if (t_len != sizeof(struct mld_msg)) 4830 pstats->mstats.mld_v2queries[dir]++; 4831 else 4832 pstats->mstats.mld_v1queries[dir]++; 4833 break; 4834 case ICMPV6_MGM_REDUCTION: 4835 pstats->mstats.mld_leaves[dir]++; 4836 break; 4837 } 4838 break; 4839 #endif /* CONFIG_IPV6 */ 4840 } 4841 u64_stats_update_end(&pstats->syncp); 4842 } 4843 4844 void br_multicast_count(struct net_bridge *br, 4845 const struct net_bridge_port *p, 4846 const struct sk_buff *skb, u8 type, u8 dir) 4847 { 4848 struct bridge_mcast_stats __percpu *stats; 4849 4850 /* if multicast_disabled is true then igmp type can't be set */ 4851 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 4852 return; 4853 4854 if (p) 4855 stats = p->mcast_stats; 4856 else 4857 stats = br->mcast_stats; 4858 if (WARN_ON(!stats)) 4859 return; 4860 4861 br_mcast_stats_add(stats, skb, type, dir); 4862 } 4863 4864 int br_multicast_init_stats(struct net_bridge *br) 4865 { 4866 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 4867 if (!br->mcast_stats) 4868 return -ENOMEM; 4869 4870 return 0; 4871 } 4872 4873 void br_multicast_uninit_stats(struct net_bridge *br) 4874 { 4875 free_percpu(br->mcast_stats); 4876 } 4877 4878 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 4879 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 4880 { 4881 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 4882 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 4883 } 4884 4885 void br_multicast_get_stats(const struct net_bridge *br, 4886 const struct net_bridge_port *p, 4887 struct br_mcast_stats *dest) 4888 { 4889 struct bridge_mcast_stats __percpu *stats; 4890 struct br_mcast_stats tdst; 4891 int i; 4892 4893 memset(dest, 0, sizeof(*dest)); 4894 if (p) 4895 stats = p->mcast_stats; 4896 else 4897 stats = br->mcast_stats; 4898 if (WARN_ON(!stats)) 4899 return; 4900 4901 memset(&tdst, 0, sizeof(tdst)); 4902 for_each_possible_cpu(i) { 4903 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 4904 struct br_mcast_stats temp; 4905 unsigned int start; 4906 4907 do { 4908 start = u64_stats_fetch_begin(&cpu_stats->syncp); 4909 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 4910 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 4911 4912 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 4913 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 4914 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 4915 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 4916 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 4917 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 4918 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 4919 tdst.igmp_parse_errors += temp.igmp_parse_errors; 4920 4921 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 4922 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 4923 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 4924 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 4925 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 4926 tdst.mld_parse_errors += temp.mld_parse_errors; 4927 } 4928 memcpy(dest, &tdst, sizeof(*dest)); 4929 } 4930 4931 int br_mdb_hash_init(struct net_bridge *br) 4932 { 4933 int err; 4934 4935 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 4936 if (err) 4937 return err; 4938 4939 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 4940 if (err) { 4941 rhashtable_destroy(&br->sg_port_tbl); 4942 return err; 4943 } 4944 4945 return 0; 4946 } 4947 4948 void br_mdb_hash_fini(struct net_bridge *br) 4949 { 4950 rhashtable_destroy(&br->sg_port_tbl); 4951 rhashtable_destroy(&br->mdb_hash_tbl); 4952 } 4953