1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 #include "br_private_mcast_eht.h" 37 38 static const struct rhashtable_params br_mdb_rht_params = { 39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 41 .key_len = sizeof(struct br_ip), 42 .automatic_shrinking = true, 43 }; 44 45 static const struct rhashtable_params br_sg_port_rht_params = { 46 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 47 .key_offset = offsetof(struct net_bridge_port_group, key), 48 .key_len = sizeof(struct net_bridge_port_group_sg_key), 49 .automatic_shrinking = true, 50 }; 51 52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 53 struct bridge_mcast_own_query *query); 54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 55 struct net_bridge_mcast_port *pmctx); 56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 57 struct net_bridge_mcast_port *pmctx, 58 __be32 group, 59 __u16 vid, 60 const unsigned char *src); 61 static void br_multicast_port_group_rexmit(struct timer_list *t); 62 63 static void 64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); 65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 66 struct net_bridge_mcast_port *pmctx); 67 #if IS_ENABLED(CONFIG_IPV6) 68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 69 struct net_bridge_mcast_port *pmctx, 70 const struct in6_addr *group, 71 __u16 vid, const unsigned char *src); 72 #endif 73 static struct net_bridge_port_group * 74 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 75 struct net_bridge_mcast_port *pmctx, 76 struct br_ip *group, 77 const unsigned char *src, 78 u8 filter_mode, 79 bool igmpv2_mldv1, 80 bool blocked); 81 static void br_multicast_find_del_pg(struct net_bridge *br, 82 struct net_bridge_port_group *pg); 83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx); 84 85 static int br_mc_disabled_update(struct net_device *dev, bool value, 86 struct netlink_ext_ack *extack); 87 88 static struct net_bridge_port_group * 89 br_sg_port_find(struct net_bridge *br, 90 struct net_bridge_port_group_sg_key *sg_p) 91 { 92 lockdep_assert_held_once(&br->multicast_lock); 93 94 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 95 br_sg_port_rht_params); 96 } 97 98 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 99 struct br_ip *dst) 100 { 101 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 102 } 103 104 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 105 struct br_ip *dst) 106 { 107 struct net_bridge_mdb_entry *ent; 108 109 lockdep_assert_held_once(&br->multicast_lock); 110 111 rcu_read_lock(); 112 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 113 rcu_read_unlock(); 114 115 return ent; 116 } 117 118 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 119 __be32 dst, __u16 vid) 120 { 121 struct br_ip br_dst; 122 123 memset(&br_dst, 0, sizeof(br_dst)); 124 br_dst.dst.ip4 = dst; 125 br_dst.proto = htons(ETH_P_IP); 126 br_dst.vid = vid; 127 128 return br_mdb_ip_get(br, &br_dst); 129 } 130 131 #if IS_ENABLED(CONFIG_IPV6) 132 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 133 const struct in6_addr *dst, 134 __u16 vid) 135 { 136 struct br_ip br_dst; 137 138 memset(&br_dst, 0, sizeof(br_dst)); 139 br_dst.dst.ip6 = *dst; 140 br_dst.proto = htons(ETH_P_IPV6); 141 br_dst.vid = vid; 142 143 return br_mdb_ip_get(br, &br_dst); 144 } 145 #endif 146 147 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, 148 struct sk_buff *skb, u16 vid) 149 { 150 struct net_bridge *br = brmctx->br; 151 struct br_ip ip; 152 153 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 154 br_multicast_ctx_vlan_global_disabled(brmctx)) 155 return NULL; 156 157 if (BR_INPUT_SKB_CB(skb)->igmp) 158 return NULL; 159 160 memset(&ip, 0, sizeof(ip)); 161 ip.proto = skb->protocol; 162 ip.vid = vid; 163 164 switch (skb->protocol) { 165 case htons(ETH_P_IP): 166 ip.dst.ip4 = ip_hdr(skb)->daddr; 167 if (brmctx->multicast_igmp_version == 3) { 168 struct net_bridge_mdb_entry *mdb; 169 170 ip.src.ip4 = ip_hdr(skb)->saddr; 171 mdb = br_mdb_ip_get_rcu(br, &ip); 172 if (mdb) 173 return mdb; 174 ip.src.ip4 = 0; 175 } 176 break; 177 #if IS_ENABLED(CONFIG_IPV6) 178 case htons(ETH_P_IPV6): 179 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 180 if (brmctx->multicast_mld_version == 2) { 181 struct net_bridge_mdb_entry *mdb; 182 183 ip.src.ip6 = ipv6_hdr(skb)->saddr; 184 mdb = br_mdb_ip_get_rcu(br, &ip); 185 if (mdb) 186 return mdb; 187 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 188 } 189 break; 190 #endif 191 default: 192 ip.proto = 0; 193 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 194 } 195 196 return br_mdb_ip_get_rcu(br, &ip); 197 } 198 199 /* IMPORTANT: this function must be used only when the contexts cannot be 200 * passed down (e.g. timer) and must be used for read-only purposes because 201 * the vlan snooping option can change, so it can return any context 202 * (non-vlan or vlan). Its initial intended purpose is to read timer values 203 * from the *current* context based on the option. At worst that could lead 204 * to inconsistent timers when the contexts are changed, i.e. src timer 205 * which needs to re-arm with a specific delay taken from the old context 206 */ 207 static struct net_bridge_mcast_port * 208 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) 209 { 210 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; 211 struct net_bridge_vlan *vlan; 212 213 lockdep_assert_held_once(&pg->key.port->br->multicast_lock); 214 215 /* if vlan snooping is disabled use the port's multicast context */ 216 if (!pg->key.addr.vid || 217 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 218 goto out; 219 220 /* locking is tricky here, due to different rules for multicast and 221 * vlans we need to take rcu to find the vlan and make sure it has 222 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under 223 * multicast_lock which must be already held here, so the vlan's pmctx 224 * can safely be used on return 225 */ 226 rcu_read_lock(); 227 vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid); 228 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 229 pmctx = &vlan->port_mcast_ctx; 230 else 231 pmctx = NULL; 232 rcu_read_unlock(); 233 out: 234 return pmctx; 235 } 236 237 /* when snooping we need to check if the contexts should be used 238 * in the following order: 239 * - if pmctx is non-NULL (port), check if it should be used 240 * - if pmctx is NULL (bridge), check if brmctx should be used 241 */ 242 static bool 243 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx, 244 const struct net_bridge_mcast_port *pmctx) 245 { 246 if (!netif_running(brmctx->br->dev)) 247 return false; 248 249 if (pmctx) 250 return !br_multicast_port_ctx_state_disabled(pmctx); 251 else 252 return !br_multicast_ctx_vlan_disabled(brmctx); 253 } 254 255 static bool br_port_group_equal(struct net_bridge_port_group *p, 256 struct net_bridge_port *port, 257 const unsigned char *src) 258 { 259 if (p->key.port != port) 260 return false; 261 262 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 263 return true; 264 265 return ether_addr_equal(src, p->eth_addr); 266 } 267 268 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, 269 struct net_bridge_port_group *pg, 270 struct br_ip *sg_ip) 271 { 272 struct net_bridge_port_group_sg_key sg_key; 273 struct net_bridge_port_group *src_pg; 274 struct net_bridge_mcast *brmctx; 275 276 memset(&sg_key, 0, sizeof(sg_key)); 277 brmctx = br_multicast_port_ctx_get_global(pmctx); 278 sg_key.port = pg->key.port; 279 sg_key.addr = *sg_ip; 280 if (br_sg_port_find(brmctx->br, &sg_key)) 281 return; 282 283 src_pg = __br_multicast_add_group(brmctx, pmctx, 284 sg_ip, pg->eth_addr, 285 MCAST_INCLUDE, false, false); 286 if (IS_ERR_OR_NULL(src_pg) || 287 src_pg->rt_protocol != RTPROT_KERNEL) 288 return; 289 290 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 291 } 292 293 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 294 struct br_ip *sg_ip) 295 { 296 struct net_bridge_port_group_sg_key sg_key; 297 struct net_bridge *br = pg->key.port->br; 298 struct net_bridge_port_group *src_pg; 299 300 memset(&sg_key, 0, sizeof(sg_key)); 301 sg_key.port = pg->key.port; 302 sg_key.addr = *sg_ip; 303 src_pg = br_sg_port_find(br, &sg_key); 304 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 305 src_pg->rt_protocol != RTPROT_KERNEL) 306 return; 307 308 br_multicast_find_del_pg(br, src_pg); 309 } 310 311 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 312 * to all other ports' S,G entries which are not blocked by the current group 313 * for proper replication, the assumption is that any S,G blocked entries 314 * are already added so the S,G,port lookup should skip them. 315 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 316 * deleted we need to remove it from all ports' S,G entries where it was 317 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 318 */ 319 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 320 u8 filter_mode) 321 { 322 struct net_bridge *br = pg->key.port->br; 323 struct net_bridge_port_group *pg_lst; 324 struct net_bridge_mcast_port *pmctx; 325 struct net_bridge_mdb_entry *mp; 326 struct br_ip sg_ip; 327 328 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 329 return; 330 331 mp = br_mdb_ip_get(br, &pg->key.addr); 332 if (!mp) 333 return; 334 pmctx = br_multicast_pg_to_port_ctx(pg); 335 if (!pmctx) 336 return; 337 338 memset(&sg_ip, 0, sizeof(sg_ip)); 339 sg_ip = pg->key.addr; 340 341 for (pg_lst = mlock_dereference(mp->ports, br); 342 pg_lst; 343 pg_lst = mlock_dereference(pg_lst->next, br)) { 344 struct net_bridge_group_src *src_ent; 345 346 if (pg_lst == pg) 347 continue; 348 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 349 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 350 continue; 351 sg_ip.src = src_ent->addr.src; 352 switch (filter_mode) { 353 case MCAST_INCLUDE: 354 __fwd_del_star_excl(pg, &sg_ip); 355 break; 356 case MCAST_EXCLUDE: 357 __fwd_add_star_excl(pmctx, pg, &sg_ip); 358 break; 359 } 360 } 361 } 362 } 363 364 /* called when adding a new S,G with host_joined == false by default */ 365 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 366 struct net_bridge_port_group *sg) 367 { 368 struct net_bridge_mdb_entry *sg_mp; 369 370 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 371 return; 372 if (!star_mp->host_joined) 373 return; 374 375 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 376 if (!sg_mp) 377 return; 378 sg_mp->host_joined = true; 379 } 380 381 /* set the host_joined state of all of *,G's S,G entries */ 382 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 383 { 384 struct net_bridge *br = star_mp->br; 385 struct net_bridge_mdb_entry *sg_mp; 386 struct net_bridge_port_group *pg; 387 struct br_ip sg_ip; 388 389 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 390 return; 391 392 memset(&sg_ip, 0, sizeof(sg_ip)); 393 sg_ip = star_mp->addr; 394 for (pg = mlock_dereference(star_mp->ports, br); 395 pg; 396 pg = mlock_dereference(pg->next, br)) { 397 struct net_bridge_group_src *src_ent; 398 399 hlist_for_each_entry(src_ent, &pg->src_list, node) { 400 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 401 continue; 402 sg_ip.src = src_ent->addr.src; 403 sg_mp = br_mdb_ip_get(br, &sg_ip); 404 if (!sg_mp) 405 continue; 406 sg_mp->host_joined = star_mp->host_joined; 407 } 408 } 409 } 410 411 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 412 { 413 struct net_bridge_port_group __rcu **pp; 414 struct net_bridge_port_group *p; 415 416 /* *,G exclude ports are only added to S,G entries */ 417 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 418 return; 419 420 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 421 * we should ignore perm entries since they're managed by user-space 422 */ 423 for (pp = &sgmp->ports; 424 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 425 pp = &p->next) 426 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 427 MDB_PG_FLAGS_PERMANENT))) 428 return; 429 430 /* currently the host can only have joined the *,G which means 431 * we treat it as EXCLUDE {}, so for an S,G it's considered a 432 * STAR_EXCLUDE entry and we can safely leave it 433 */ 434 sgmp->host_joined = false; 435 436 for (pp = &sgmp->ports; 437 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 438 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 439 br_multicast_del_pg(sgmp, p, pp); 440 else 441 pp = &p->next; 442 } 443 } 444 445 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 446 struct net_bridge_port_group *sg) 447 { 448 struct net_bridge_port_group_sg_key sg_key; 449 struct net_bridge *br = star_mp->br; 450 struct net_bridge_mcast_port *pmctx; 451 struct net_bridge_port_group *pg; 452 struct net_bridge_mcast *brmctx; 453 454 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 455 return; 456 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 457 return; 458 459 br_multicast_sg_host_state(star_mp, sg); 460 memset(&sg_key, 0, sizeof(sg_key)); 461 sg_key.addr = sg->key.addr; 462 /* we need to add all exclude ports to the S,G */ 463 for (pg = mlock_dereference(star_mp->ports, br); 464 pg; 465 pg = mlock_dereference(pg->next, br)) { 466 struct net_bridge_port_group *src_pg; 467 468 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 469 continue; 470 471 sg_key.port = pg->key.port; 472 if (br_sg_port_find(br, &sg_key)) 473 continue; 474 475 pmctx = br_multicast_pg_to_port_ctx(pg); 476 if (!pmctx) 477 continue; 478 brmctx = br_multicast_port_ctx_get_global(pmctx); 479 480 src_pg = __br_multicast_add_group(brmctx, pmctx, 481 &sg->key.addr, 482 sg->eth_addr, 483 MCAST_INCLUDE, false, false); 484 if (IS_ERR_OR_NULL(src_pg) || 485 src_pg->rt_protocol != RTPROT_KERNEL) 486 continue; 487 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 488 } 489 } 490 491 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 492 { 493 struct net_bridge_mdb_entry *star_mp; 494 struct net_bridge_mcast_port *pmctx; 495 struct net_bridge_port_group *sg; 496 struct net_bridge_mcast *brmctx; 497 struct br_ip sg_ip; 498 499 if (src->flags & BR_SGRP_F_INSTALLED) 500 return; 501 502 memset(&sg_ip, 0, sizeof(sg_ip)); 503 pmctx = br_multicast_pg_to_port_ctx(src->pg); 504 if (!pmctx) 505 return; 506 brmctx = br_multicast_port_ctx_get_global(pmctx); 507 sg_ip = src->pg->key.addr; 508 sg_ip.src = src->addr.src; 509 510 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip, 511 src->pg->eth_addr, MCAST_INCLUDE, false, 512 !timer_pending(&src->timer)); 513 if (IS_ERR_OR_NULL(sg)) 514 return; 515 src->flags |= BR_SGRP_F_INSTALLED; 516 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 517 518 /* if it was added by user-space as perm we can skip next steps */ 519 if (sg->rt_protocol != RTPROT_KERNEL && 520 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 521 return; 522 523 /* the kernel is now responsible for removing this S,G */ 524 del_timer(&sg->timer); 525 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 526 if (!star_mp) 527 return; 528 529 br_multicast_sg_add_exclude_ports(star_mp, sg); 530 } 531 532 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 533 bool fastleave) 534 { 535 struct net_bridge_port_group *p, *pg = src->pg; 536 struct net_bridge_port_group __rcu **pp; 537 struct net_bridge_mdb_entry *mp; 538 struct br_ip sg_ip; 539 540 memset(&sg_ip, 0, sizeof(sg_ip)); 541 sg_ip = pg->key.addr; 542 sg_ip.src = src->addr.src; 543 544 mp = br_mdb_ip_get(src->br, &sg_ip); 545 if (!mp) 546 return; 547 548 for (pp = &mp->ports; 549 (p = mlock_dereference(*pp, src->br)) != NULL; 550 pp = &p->next) { 551 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 552 continue; 553 554 if (p->rt_protocol != RTPROT_KERNEL && 555 (p->flags & MDB_PG_FLAGS_PERMANENT)) 556 break; 557 558 if (fastleave) 559 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 560 br_multicast_del_pg(mp, p, pp); 561 break; 562 } 563 src->flags &= ~BR_SGRP_F_INSTALLED; 564 } 565 566 /* install S,G and based on src's timer enable or disable forwarding */ 567 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 568 { 569 struct net_bridge_port_group_sg_key sg_key; 570 struct net_bridge_port_group *sg; 571 u8 old_flags; 572 573 br_multicast_fwd_src_add(src); 574 575 memset(&sg_key, 0, sizeof(sg_key)); 576 sg_key.addr = src->pg->key.addr; 577 sg_key.addr.src = src->addr.src; 578 sg_key.port = src->pg->key.port; 579 580 sg = br_sg_port_find(src->br, &sg_key); 581 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 582 return; 583 584 old_flags = sg->flags; 585 if (timer_pending(&src->timer)) 586 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 587 else 588 sg->flags |= MDB_PG_FLAGS_BLOCKED; 589 590 if (old_flags != sg->flags) { 591 struct net_bridge_mdb_entry *sg_mp; 592 593 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 594 if (!sg_mp) 595 return; 596 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 597 } 598 } 599 600 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 601 { 602 struct net_bridge_mdb_entry *mp; 603 604 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 605 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 606 WARN_ON(mp->ports); 607 608 del_timer_sync(&mp->timer); 609 kfree_rcu(mp, rcu); 610 } 611 612 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 613 { 614 struct net_bridge *br = mp->br; 615 616 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 617 br_mdb_rht_params); 618 hlist_del_init_rcu(&mp->mdb_node); 619 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 620 queue_work(system_long_wq, &br->mcast_gc_work); 621 } 622 623 static void br_multicast_group_expired(struct timer_list *t) 624 { 625 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 626 struct net_bridge *br = mp->br; 627 628 spin_lock(&br->multicast_lock); 629 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 630 timer_pending(&mp->timer)) 631 goto out; 632 633 br_multicast_host_leave(mp, true); 634 635 if (mp->ports) 636 goto out; 637 br_multicast_del_mdb_entry(mp); 638 out: 639 spin_unlock(&br->multicast_lock); 640 } 641 642 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 643 { 644 struct net_bridge_group_src *src; 645 646 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 647 WARN_ON(!hlist_unhashed(&src->node)); 648 649 del_timer_sync(&src->timer); 650 kfree_rcu(src, rcu); 651 } 652 653 void br_multicast_del_group_src(struct net_bridge_group_src *src, 654 bool fastleave) 655 { 656 struct net_bridge *br = src->pg->key.port->br; 657 658 br_multicast_fwd_src_remove(src, fastleave); 659 hlist_del_init_rcu(&src->node); 660 src->pg->src_ents--; 661 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 662 queue_work(system_long_wq, &br->mcast_gc_work); 663 } 664 665 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 666 { 667 struct net_bridge_port_group *pg; 668 669 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 670 WARN_ON(!hlist_unhashed(&pg->mglist)); 671 WARN_ON(!hlist_empty(&pg->src_list)); 672 673 del_timer_sync(&pg->rexmit_timer); 674 del_timer_sync(&pg->timer); 675 kfree_rcu(pg, rcu); 676 } 677 678 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 679 struct net_bridge_port_group *pg, 680 struct net_bridge_port_group __rcu **pp) 681 { 682 struct net_bridge *br = pg->key.port->br; 683 struct net_bridge_group_src *ent; 684 struct hlist_node *tmp; 685 686 rcu_assign_pointer(*pp, pg->next); 687 hlist_del_init(&pg->mglist); 688 br_multicast_eht_clean_sets(pg); 689 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 690 br_multicast_del_group_src(ent, false); 691 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 692 if (!br_multicast_is_star_g(&mp->addr)) { 693 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 694 br_sg_port_rht_params); 695 br_multicast_sg_del_exclude_ports(mp); 696 } else { 697 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 698 } 699 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 700 queue_work(system_long_wq, &br->mcast_gc_work); 701 702 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 703 mod_timer(&mp->timer, jiffies); 704 } 705 706 static void br_multicast_find_del_pg(struct net_bridge *br, 707 struct net_bridge_port_group *pg) 708 { 709 struct net_bridge_port_group __rcu **pp; 710 struct net_bridge_mdb_entry *mp; 711 struct net_bridge_port_group *p; 712 713 mp = br_mdb_ip_get(br, &pg->key.addr); 714 if (WARN_ON(!mp)) 715 return; 716 717 for (pp = &mp->ports; 718 (p = mlock_dereference(*pp, br)) != NULL; 719 pp = &p->next) { 720 if (p != pg) 721 continue; 722 723 br_multicast_del_pg(mp, pg, pp); 724 return; 725 } 726 727 WARN_ON(1); 728 } 729 730 static void br_multicast_port_group_expired(struct timer_list *t) 731 { 732 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 733 struct net_bridge_group_src *src_ent; 734 struct net_bridge *br = pg->key.port->br; 735 struct hlist_node *tmp; 736 bool changed; 737 738 spin_lock(&br->multicast_lock); 739 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 740 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 741 goto out; 742 743 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 744 pg->filter_mode = MCAST_INCLUDE; 745 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 746 if (!timer_pending(&src_ent->timer)) { 747 br_multicast_del_group_src(src_ent, false); 748 changed = true; 749 } 750 } 751 752 if (hlist_empty(&pg->src_list)) { 753 br_multicast_find_del_pg(br, pg); 754 } else if (changed) { 755 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 756 757 if (changed && br_multicast_is_star_g(&pg->key.addr)) 758 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 759 760 if (WARN_ON(!mp)) 761 goto out; 762 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 763 } 764 out: 765 spin_unlock(&br->multicast_lock); 766 } 767 768 static void br_multicast_gc(struct hlist_head *head) 769 { 770 struct net_bridge_mcast_gc *gcent; 771 struct hlist_node *tmp; 772 773 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 774 hlist_del_init(&gcent->gc_node); 775 gcent->destroy(gcent); 776 } 777 } 778 779 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx, 780 struct net_bridge_mcast_port *pmctx, 781 struct sk_buff *skb) 782 { 783 struct net_bridge_vlan *vlan = NULL; 784 785 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx)) 786 vlan = pmctx->vlan; 787 else if (br_multicast_ctx_is_vlan(brmctx)) 788 vlan = brmctx->vlan; 789 790 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 791 u16 vlan_proto; 792 793 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0) 794 return; 795 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid); 796 } 797 } 798 799 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, 800 struct net_bridge_mcast_port *pmctx, 801 struct net_bridge_port_group *pg, 802 __be32 ip_dst, __be32 group, 803 bool with_srcs, bool over_lmqt, 804 u8 sflag, u8 *igmp_type, 805 bool *need_rexmit) 806 { 807 struct net_bridge_port *p = pg ? pg->key.port : NULL; 808 struct net_bridge_group_src *ent; 809 size_t pkt_size, igmp_hdr_size; 810 unsigned long now = jiffies; 811 struct igmpv3_query *ihv3; 812 void *csum_start = NULL; 813 __sum16 *csum = NULL; 814 struct sk_buff *skb; 815 struct igmphdr *ih; 816 struct ethhdr *eth; 817 unsigned long lmqt; 818 struct iphdr *iph; 819 u16 lmqt_srcs = 0; 820 821 igmp_hdr_size = sizeof(*ih); 822 if (brmctx->multicast_igmp_version == 3) { 823 igmp_hdr_size = sizeof(*ihv3); 824 if (pg && with_srcs) { 825 lmqt = now + (brmctx->multicast_last_member_interval * 826 brmctx->multicast_last_member_count); 827 hlist_for_each_entry(ent, &pg->src_list, node) { 828 if (over_lmqt == time_after(ent->timer.expires, 829 lmqt) && 830 ent->src_query_rexmit_cnt > 0) 831 lmqt_srcs++; 832 } 833 834 if (!lmqt_srcs) 835 return NULL; 836 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 837 } 838 } 839 840 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 841 if ((p && pkt_size > p->dev->mtu) || 842 pkt_size > brmctx->br->dev->mtu) 843 return NULL; 844 845 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 846 if (!skb) 847 goto out; 848 849 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 850 skb->protocol = htons(ETH_P_IP); 851 852 skb_reset_mac_header(skb); 853 eth = eth_hdr(skb); 854 855 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 856 ip_eth_mc_map(ip_dst, eth->h_dest); 857 eth->h_proto = htons(ETH_P_IP); 858 skb_put(skb, sizeof(*eth)); 859 860 skb_set_network_header(skb, skb->len); 861 iph = ip_hdr(skb); 862 iph->tot_len = htons(pkt_size - sizeof(*eth)); 863 864 iph->version = 4; 865 iph->ihl = 6; 866 iph->tos = 0xc0; 867 iph->id = 0; 868 iph->frag_off = htons(IP_DF); 869 iph->ttl = 1; 870 iph->protocol = IPPROTO_IGMP; 871 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 872 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0; 873 iph->daddr = ip_dst; 874 ((u8 *)&iph[1])[0] = IPOPT_RA; 875 ((u8 *)&iph[1])[1] = 4; 876 ((u8 *)&iph[1])[2] = 0; 877 ((u8 *)&iph[1])[3] = 0; 878 ip_send_check(iph); 879 skb_put(skb, 24); 880 881 skb_set_transport_header(skb, skb->len); 882 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 883 884 switch (brmctx->multicast_igmp_version) { 885 case 2: 886 ih = igmp_hdr(skb); 887 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 888 ih->code = (group ? brmctx->multicast_last_member_interval : 889 brmctx->multicast_query_response_interval) / 890 (HZ / IGMP_TIMER_SCALE); 891 ih->group = group; 892 ih->csum = 0; 893 csum = &ih->csum; 894 csum_start = (void *)ih; 895 break; 896 case 3: 897 ihv3 = igmpv3_query_hdr(skb); 898 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 899 ihv3->code = (group ? brmctx->multicast_last_member_interval : 900 brmctx->multicast_query_response_interval) / 901 (HZ / IGMP_TIMER_SCALE); 902 ihv3->group = group; 903 ihv3->qqic = brmctx->multicast_query_interval / HZ; 904 ihv3->nsrcs = htons(lmqt_srcs); 905 ihv3->resv = 0; 906 ihv3->suppress = sflag; 907 ihv3->qrv = 2; 908 ihv3->csum = 0; 909 csum = &ihv3->csum; 910 csum_start = (void *)ihv3; 911 if (!pg || !with_srcs) 912 break; 913 914 lmqt_srcs = 0; 915 hlist_for_each_entry(ent, &pg->src_list, node) { 916 if (over_lmqt == time_after(ent->timer.expires, 917 lmqt) && 918 ent->src_query_rexmit_cnt > 0) { 919 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 920 ent->src_query_rexmit_cnt--; 921 if (need_rexmit && ent->src_query_rexmit_cnt) 922 *need_rexmit = true; 923 } 924 } 925 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 926 kfree_skb(skb); 927 return NULL; 928 } 929 break; 930 } 931 932 if (WARN_ON(!csum || !csum_start)) { 933 kfree_skb(skb); 934 return NULL; 935 } 936 937 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 938 skb_put(skb, igmp_hdr_size); 939 __skb_pull(skb, sizeof(*eth)); 940 941 out: 942 return skb; 943 } 944 945 #if IS_ENABLED(CONFIG_IPV6) 946 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, 947 struct net_bridge_mcast_port *pmctx, 948 struct net_bridge_port_group *pg, 949 const struct in6_addr *ip6_dst, 950 const struct in6_addr *group, 951 bool with_srcs, bool over_llqt, 952 u8 sflag, u8 *igmp_type, 953 bool *need_rexmit) 954 { 955 struct net_bridge_port *p = pg ? pg->key.port : NULL; 956 struct net_bridge_group_src *ent; 957 size_t pkt_size, mld_hdr_size; 958 unsigned long now = jiffies; 959 struct mld2_query *mld2q; 960 void *csum_start = NULL; 961 unsigned long interval; 962 __sum16 *csum = NULL; 963 struct ipv6hdr *ip6h; 964 struct mld_msg *mldq; 965 struct sk_buff *skb; 966 unsigned long llqt; 967 struct ethhdr *eth; 968 u16 llqt_srcs = 0; 969 u8 *hopopt; 970 971 mld_hdr_size = sizeof(*mldq); 972 if (brmctx->multicast_mld_version == 2) { 973 mld_hdr_size = sizeof(*mld2q); 974 if (pg && with_srcs) { 975 llqt = now + (brmctx->multicast_last_member_interval * 976 brmctx->multicast_last_member_count); 977 hlist_for_each_entry(ent, &pg->src_list, node) { 978 if (over_llqt == time_after(ent->timer.expires, 979 llqt) && 980 ent->src_query_rexmit_cnt > 0) 981 llqt_srcs++; 982 } 983 984 if (!llqt_srcs) 985 return NULL; 986 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 987 } 988 } 989 990 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 991 if ((p && pkt_size > p->dev->mtu) || 992 pkt_size > brmctx->br->dev->mtu) 993 return NULL; 994 995 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 996 if (!skb) 997 goto out; 998 999 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 1000 skb->protocol = htons(ETH_P_IPV6); 1001 1002 /* Ethernet header */ 1003 skb_reset_mac_header(skb); 1004 eth = eth_hdr(skb); 1005 1006 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 1007 eth->h_proto = htons(ETH_P_IPV6); 1008 skb_put(skb, sizeof(*eth)); 1009 1010 /* IPv6 header + HbH option */ 1011 skb_set_network_header(skb, skb->len); 1012 ip6h = ipv6_hdr(skb); 1013 1014 *(__force __be32 *)ip6h = htonl(0x60000000); 1015 ip6h->payload_len = htons(8 + mld_hdr_size); 1016 ip6h->nexthdr = IPPROTO_HOPOPTS; 1017 ip6h->hop_limit = 1; 1018 ip6h->daddr = *ip6_dst; 1019 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev, 1020 &ip6h->daddr, 0, &ip6h->saddr)) { 1021 kfree_skb(skb); 1022 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false); 1023 return NULL; 1024 } 1025 1026 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true); 1027 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 1028 1029 hopopt = (u8 *)(ip6h + 1); 1030 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 1031 hopopt[1] = 0; /* length of HbH */ 1032 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 1033 hopopt[3] = 2; /* Length of RA Option */ 1034 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 1035 hopopt[5] = 0; 1036 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 1037 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 1038 1039 skb_put(skb, sizeof(*ip6h) + 8); 1040 1041 /* ICMPv6 */ 1042 skb_set_transport_header(skb, skb->len); 1043 interval = ipv6_addr_any(group) ? 1044 brmctx->multicast_query_response_interval : 1045 brmctx->multicast_last_member_interval; 1046 *igmp_type = ICMPV6_MGM_QUERY; 1047 switch (brmctx->multicast_mld_version) { 1048 case 1: 1049 mldq = (struct mld_msg *)icmp6_hdr(skb); 1050 mldq->mld_type = ICMPV6_MGM_QUERY; 1051 mldq->mld_code = 0; 1052 mldq->mld_cksum = 0; 1053 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 1054 mldq->mld_reserved = 0; 1055 mldq->mld_mca = *group; 1056 csum = &mldq->mld_cksum; 1057 csum_start = (void *)mldq; 1058 break; 1059 case 2: 1060 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1061 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 1062 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 1063 mld2q->mld2q_code = 0; 1064 mld2q->mld2q_cksum = 0; 1065 mld2q->mld2q_resv1 = 0; 1066 mld2q->mld2q_resv2 = 0; 1067 mld2q->mld2q_suppress = sflag; 1068 mld2q->mld2q_qrv = 2; 1069 mld2q->mld2q_nsrcs = htons(llqt_srcs); 1070 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; 1071 mld2q->mld2q_mca = *group; 1072 csum = &mld2q->mld2q_cksum; 1073 csum_start = (void *)mld2q; 1074 if (!pg || !with_srcs) 1075 break; 1076 1077 llqt_srcs = 0; 1078 hlist_for_each_entry(ent, &pg->src_list, node) { 1079 if (over_llqt == time_after(ent->timer.expires, 1080 llqt) && 1081 ent->src_query_rexmit_cnt > 0) { 1082 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 1083 ent->src_query_rexmit_cnt--; 1084 if (need_rexmit && ent->src_query_rexmit_cnt) 1085 *need_rexmit = true; 1086 } 1087 } 1088 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 1089 kfree_skb(skb); 1090 return NULL; 1091 } 1092 break; 1093 } 1094 1095 if (WARN_ON(!csum || !csum_start)) { 1096 kfree_skb(skb); 1097 return NULL; 1098 } 1099 1100 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 1101 IPPROTO_ICMPV6, 1102 csum_partial(csum_start, mld_hdr_size, 0)); 1103 skb_put(skb, mld_hdr_size); 1104 __skb_pull(skb, sizeof(*eth)); 1105 1106 out: 1107 return skb; 1108 } 1109 #endif 1110 1111 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1112 struct net_bridge_mcast_port *pmctx, 1113 struct net_bridge_port_group *pg, 1114 struct br_ip *ip_dst, 1115 struct br_ip *group, 1116 bool with_srcs, bool over_lmqt, 1117 u8 sflag, u8 *igmp_type, 1118 bool *need_rexmit) 1119 { 1120 __be32 ip4_dst; 1121 1122 switch (group->proto) { 1123 case htons(ETH_P_IP): 1124 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1125 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg, 1126 ip4_dst, group->dst.ip4, 1127 with_srcs, over_lmqt, 1128 sflag, igmp_type, 1129 need_rexmit); 1130 #if IS_ENABLED(CONFIG_IPV6) 1131 case htons(ETH_P_IPV6): { 1132 struct in6_addr ip6_dst; 1133 1134 if (ip_dst) 1135 ip6_dst = ip_dst->dst.ip6; 1136 else 1137 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1138 htonl(1)); 1139 1140 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg, 1141 &ip6_dst, &group->dst.ip6, 1142 with_srcs, over_lmqt, 1143 sflag, igmp_type, 1144 need_rexmit); 1145 } 1146 #endif 1147 } 1148 return NULL; 1149 } 1150 1151 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1152 struct br_ip *group) 1153 { 1154 struct net_bridge_mdb_entry *mp; 1155 int err; 1156 1157 mp = br_mdb_ip_get(br, group); 1158 if (mp) 1159 return mp; 1160 1161 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1162 br_mc_disabled_update(br->dev, false, NULL); 1163 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1164 return ERR_PTR(-E2BIG); 1165 } 1166 1167 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1168 if (unlikely(!mp)) 1169 return ERR_PTR(-ENOMEM); 1170 1171 mp->br = br; 1172 mp->addr = *group; 1173 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1174 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1175 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1176 br_mdb_rht_params); 1177 if (err) { 1178 kfree(mp); 1179 mp = ERR_PTR(err); 1180 } else { 1181 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1182 } 1183 1184 return mp; 1185 } 1186 1187 static void br_multicast_group_src_expired(struct timer_list *t) 1188 { 1189 struct net_bridge_group_src *src = from_timer(src, t, timer); 1190 struct net_bridge_port_group *pg; 1191 struct net_bridge *br = src->br; 1192 1193 spin_lock(&br->multicast_lock); 1194 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1195 timer_pending(&src->timer)) 1196 goto out; 1197 1198 pg = src->pg; 1199 if (pg->filter_mode == MCAST_INCLUDE) { 1200 br_multicast_del_group_src(src, false); 1201 if (!hlist_empty(&pg->src_list)) 1202 goto out; 1203 br_multicast_find_del_pg(br, pg); 1204 } else { 1205 br_multicast_fwd_src_handle(src); 1206 } 1207 1208 out: 1209 spin_unlock(&br->multicast_lock); 1210 } 1211 1212 struct net_bridge_group_src * 1213 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1214 { 1215 struct net_bridge_group_src *ent; 1216 1217 switch (ip->proto) { 1218 case htons(ETH_P_IP): 1219 hlist_for_each_entry(ent, &pg->src_list, node) 1220 if (ip->src.ip4 == ent->addr.src.ip4) 1221 return ent; 1222 break; 1223 #if IS_ENABLED(CONFIG_IPV6) 1224 case htons(ETH_P_IPV6): 1225 hlist_for_each_entry(ent, &pg->src_list, node) 1226 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1227 return ent; 1228 break; 1229 #endif 1230 } 1231 1232 return NULL; 1233 } 1234 1235 static struct net_bridge_group_src * 1236 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1237 { 1238 struct net_bridge_group_src *grp_src; 1239 1240 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1241 return NULL; 1242 1243 switch (src_ip->proto) { 1244 case htons(ETH_P_IP): 1245 if (ipv4_is_zeronet(src_ip->src.ip4) || 1246 ipv4_is_multicast(src_ip->src.ip4)) 1247 return NULL; 1248 break; 1249 #if IS_ENABLED(CONFIG_IPV6) 1250 case htons(ETH_P_IPV6): 1251 if (ipv6_addr_any(&src_ip->src.ip6) || 1252 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1253 return NULL; 1254 break; 1255 #endif 1256 } 1257 1258 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1259 if (unlikely(!grp_src)) 1260 return NULL; 1261 1262 grp_src->pg = pg; 1263 grp_src->br = pg->key.port->br; 1264 grp_src->addr = *src_ip; 1265 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1266 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1267 1268 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1269 pg->src_ents++; 1270 1271 return grp_src; 1272 } 1273 1274 struct net_bridge_port_group *br_multicast_new_port_group( 1275 struct net_bridge_port *port, 1276 struct br_ip *group, 1277 struct net_bridge_port_group __rcu *next, 1278 unsigned char flags, 1279 const unsigned char *src, 1280 u8 filter_mode, 1281 u8 rt_protocol) 1282 { 1283 struct net_bridge_port_group *p; 1284 1285 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1286 if (unlikely(!p)) 1287 return NULL; 1288 1289 p->key.addr = *group; 1290 p->key.port = port; 1291 p->flags = flags; 1292 p->filter_mode = filter_mode; 1293 p->rt_protocol = rt_protocol; 1294 p->eht_host_tree = RB_ROOT; 1295 p->eht_set_tree = RB_ROOT; 1296 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1297 INIT_HLIST_HEAD(&p->src_list); 1298 1299 if (!br_multicast_is_star_g(group) && 1300 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1301 br_sg_port_rht_params)) { 1302 kfree(p); 1303 return NULL; 1304 } 1305 1306 rcu_assign_pointer(p->next, next); 1307 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1308 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1309 hlist_add_head(&p->mglist, &port->mglist); 1310 1311 if (src) 1312 memcpy(p->eth_addr, src, ETH_ALEN); 1313 else 1314 eth_broadcast_addr(p->eth_addr); 1315 1316 return p; 1317 } 1318 1319 void br_multicast_host_join(const struct net_bridge_mcast *brmctx, 1320 struct net_bridge_mdb_entry *mp, bool notify) 1321 { 1322 if (!mp->host_joined) { 1323 mp->host_joined = true; 1324 if (br_multicast_is_star_g(&mp->addr)) 1325 br_multicast_star_g_host_state(mp); 1326 if (notify) 1327 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1328 } 1329 1330 if (br_group_is_l2(&mp->addr)) 1331 return; 1332 1333 mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval); 1334 } 1335 1336 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1337 { 1338 if (!mp->host_joined) 1339 return; 1340 1341 mp->host_joined = false; 1342 if (br_multicast_is_star_g(&mp->addr)) 1343 br_multicast_star_g_host_state(mp); 1344 if (notify) 1345 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1346 } 1347 1348 static struct net_bridge_port_group * 1349 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 1350 struct net_bridge_mcast_port *pmctx, 1351 struct br_ip *group, 1352 const unsigned char *src, 1353 u8 filter_mode, 1354 bool igmpv2_mldv1, 1355 bool blocked) 1356 { 1357 struct net_bridge_port_group __rcu **pp; 1358 struct net_bridge_port_group *p = NULL; 1359 struct net_bridge_mdb_entry *mp; 1360 unsigned long now = jiffies; 1361 1362 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 1363 goto out; 1364 1365 mp = br_multicast_new_group(brmctx->br, group); 1366 if (IS_ERR(mp)) 1367 return ERR_CAST(mp); 1368 1369 if (!pmctx) { 1370 br_multicast_host_join(brmctx, mp, true); 1371 goto out; 1372 } 1373 1374 for (pp = &mp->ports; 1375 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 1376 pp = &p->next) { 1377 if (br_port_group_equal(p, pmctx->port, src)) 1378 goto found; 1379 if ((unsigned long)p->key.port < (unsigned long)pmctx->port) 1380 break; 1381 } 1382 1383 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src, 1384 filter_mode, RTPROT_KERNEL); 1385 if (unlikely(!p)) { 1386 p = ERR_PTR(-ENOMEM); 1387 goto out; 1388 } 1389 rcu_assign_pointer(*pp, p); 1390 if (blocked) 1391 p->flags |= MDB_PG_FLAGS_BLOCKED; 1392 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB); 1393 1394 found: 1395 if (igmpv2_mldv1) 1396 mod_timer(&p->timer, 1397 now + brmctx->multicast_membership_interval); 1398 1399 out: 1400 return p; 1401 } 1402 1403 static int br_multicast_add_group(struct net_bridge_mcast *brmctx, 1404 struct net_bridge_mcast_port *pmctx, 1405 struct br_ip *group, 1406 const unsigned char *src, 1407 u8 filter_mode, 1408 bool igmpv2_mldv1) 1409 { 1410 struct net_bridge_port_group *pg; 1411 int err; 1412 1413 spin_lock(&brmctx->br->multicast_lock); 1414 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, 1415 igmpv2_mldv1, false); 1416 /* NULL is considered valid for host joined groups */ 1417 err = PTR_ERR_OR_ZERO(pg); 1418 spin_unlock(&brmctx->br->multicast_lock); 1419 1420 return err; 1421 } 1422 1423 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, 1424 struct net_bridge_mcast_port *pmctx, 1425 __be32 group, 1426 __u16 vid, 1427 const unsigned char *src, 1428 bool igmpv2) 1429 { 1430 struct br_ip br_group; 1431 u8 filter_mode; 1432 1433 if (ipv4_is_local_multicast(group)) 1434 return 0; 1435 1436 memset(&br_group, 0, sizeof(br_group)); 1437 br_group.dst.ip4 = group; 1438 br_group.proto = htons(ETH_P_IP); 1439 br_group.vid = vid; 1440 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1441 1442 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1443 filter_mode, igmpv2); 1444 } 1445 1446 #if IS_ENABLED(CONFIG_IPV6) 1447 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, 1448 struct net_bridge_mcast_port *pmctx, 1449 const struct in6_addr *group, 1450 __u16 vid, 1451 const unsigned char *src, 1452 bool mldv1) 1453 { 1454 struct br_ip br_group; 1455 u8 filter_mode; 1456 1457 if (ipv6_addr_is_ll_all_nodes(group)) 1458 return 0; 1459 1460 memset(&br_group, 0, sizeof(br_group)); 1461 br_group.dst.ip6 = *group; 1462 br_group.proto = htons(ETH_P_IPV6); 1463 br_group.vid = vid; 1464 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1465 1466 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1467 filter_mode, mldv1); 1468 } 1469 #endif 1470 1471 static bool br_multicast_rport_del(struct hlist_node *rlist) 1472 { 1473 if (hlist_unhashed(rlist)) 1474 return false; 1475 1476 hlist_del_init_rcu(rlist); 1477 return true; 1478 } 1479 1480 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1481 { 1482 return br_multicast_rport_del(&pmctx->ip4_rlist); 1483 } 1484 1485 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1486 { 1487 #if IS_ENABLED(CONFIG_IPV6) 1488 return br_multicast_rport_del(&pmctx->ip6_rlist); 1489 #else 1490 return false; 1491 #endif 1492 } 1493 1494 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, 1495 struct timer_list *t, 1496 struct hlist_node *rlist) 1497 { 1498 struct net_bridge *br = pmctx->port->br; 1499 bool del; 1500 1501 spin_lock(&br->multicast_lock); 1502 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1503 pmctx->multicast_router == MDB_RTR_TYPE_PERM || 1504 timer_pending(t)) 1505 goto out; 1506 1507 del = br_multicast_rport_del(rlist); 1508 br_multicast_rport_del_notify(pmctx, del); 1509 out: 1510 spin_unlock(&br->multicast_lock); 1511 } 1512 1513 static void br_ip4_multicast_router_expired(struct timer_list *t) 1514 { 1515 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1516 ip4_mc_router_timer); 1517 1518 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); 1519 } 1520 1521 #if IS_ENABLED(CONFIG_IPV6) 1522 static void br_ip6_multicast_router_expired(struct timer_list *t) 1523 { 1524 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1525 ip6_mc_router_timer); 1526 1527 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); 1528 } 1529 #endif 1530 1531 static void br_mc_router_state_change(struct net_bridge *p, 1532 bool is_mc_router) 1533 { 1534 struct switchdev_attr attr = { 1535 .orig_dev = p->dev, 1536 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1537 .flags = SWITCHDEV_F_DEFER, 1538 .u.mrouter = is_mc_router, 1539 }; 1540 1541 switchdev_port_attr_set(p->dev, &attr, NULL); 1542 } 1543 1544 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, 1545 struct timer_list *timer) 1546 { 1547 spin_lock(&brmctx->br->multicast_lock); 1548 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1549 brmctx->multicast_router == MDB_RTR_TYPE_PERM || 1550 br_ip4_multicast_is_router(brmctx) || 1551 br_ip6_multicast_is_router(brmctx)) 1552 goto out; 1553 1554 br_mc_router_state_change(brmctx->br, false); 1555 out: 1556 spin_unlock(&brmctx->br->multicast_lock); 1557 } 1558 1559 static void br_ip4_multicast_local_router_expired(struct timer_list *t) 1560 { 1561 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1562 ip4_mc_router_timer); 1563 1564 br_multicast_local_router_expired(brmctx, t); 1565 } 1566 1567 #if IS_ENABLED(CONFIG_IPV6) 1568 static void br_ip6_multicast_local_router_expired(struct timer_list *t) 1569 { 1570 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1571 ip6_mc_router_timer); 1572 1573 br_multicast_local_router_expired(brmctx, t); 1574 } 1575 #endif 1576 1577 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, 1578 struct bridge_mcast_own_query *query) 1579 { 1580 spin_lock(&brmctx->br->multicast_lock); 1581 if (!netif_running(brmctx->br->dev) || 1582 br_multicast_ctx_vlan_global_disabled(brmctx) || 1583 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1584 goto out; 1585 1586 br_multicast_start_querier(brmctx, query); 1587 1588 out: 1589 spin_unlock(&brmctx->br->multicast_lock); 1590 } 1591 1592 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1593 { 1594 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1595 ip4_other_query.timer); 1596 1597 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); 1598 } 1599 1600 #if IS_ENABLED(CONFIG_IPV6) 1601 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1602 { 1603 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1604 ip6_other_query.timer); 1605 1606 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); 1607 } 1608 #endif 1609 1610 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, 1611 struct br_ip *ip, 1612 struct sk_buff *skb) 1613 { 1614 if (ip->proto == htons(ETH_P_IP)) 1615 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1616 #if IS_ENABLED(CONFIG_IPV6) 1617 else 1618 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1619 #endif 1620 } 1621 1622 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, 1623 struct net_bridge_mcast_port *pmctx, 1624 struct net_bridge_port_group *pg, 1625 struct br_ip *ip_dst, 1626 struct br_ip *group, 1627 bool with_srcs, 1628 u8 sflag, 1629 bool *need_rexmit) 1630 { 1631 bool over_lmqt = !!sflag; 1632 struct sk_buff *skb; 1633 u8 igmp_type; 1634 1635 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1636 !br_multicast_ctx_matches_vlan_snooping(brmctx)) 1637 return; 1638 1639 again_under_lmqt: 1640 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group, 1641 with_srcs, over_lmqt, sflag, &igmp_type, 1642 need_rexmit); 1643 if (!skb) 1644 return; 1645 1646 if (pmctx) { 1647 skb->dev = pmctx->port->dev; 1648 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type, 1649 BR_MCAST_DIR_TX); 1650 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1651 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev, 1652 br_dev_queue_push_xmit); 1653 1654 if (over_lmqt && with_srcs && sflag) { 1655 over_lmqt = false; 1656 goto again_under_lmqt; 1657 } 1658 } else { 1659 br_multicast_select_own_querier(brmctx, group, skb); 1660 br_multicast_count(brmctx->br, NULL, skb, igmp_type, 1661 BR_MCAST_DIR_RX); 1662 netif_rx(skb); 1663 } 1664 } 1665 1666 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier, 1667 struct bridge_mcast_querier *dest) 1668 { 1669 unsigned int seq; 1670 1671 memset(dest, 0, sizeof(*dest)); 1672 do { 1673 seq = read_seqcount_begin(&querier->seq); 1674 dest->port_ifidx = querier->port_ifidx; 1675 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip)); 1676 } while (read_seqcount_retry(&querier->seq, seq)); 1677 } 1678 1679 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx, 1680 struct bridge_mcast_querier *querier, 1681 int ifindex, 1682 struct br_ip *saddr) 1683 { 1684 write_seqcount_begin(&querier->seq); 1685 querier->port_ifidx = ifindex; 1686 memcpy(&querier->addr, saddr, sizeof(*saddr)); 1687 write_seqcount_end(&querier->seq); 1688 } 1689 1690 static void br_multicast_send_query(struct net_bridge_mcast *brmctx, 1691 struct net_bridge_mcast_port *pmctx, 1692 struct bridge_mcast_own_query *own_query) 1693 { 1694 struct bridge_mcast_other_query *other_query = NULL; 1695 struct bridge_mcast_querier *querier; 1696 struct br_ip br_group; 1697 unsigned long time; 1698 1699 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1700 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 1701 !brmctx->multicast_querier) 1702 return; 1703 1704 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1705 1706 if (pmctx ? (own_query == &pmctx->ip4_own_query) : 1707 (own_query == &brmctx->ip4_own_query)) { 1708 querier = &brmctx->ip4_querier; 1709 other_query = &brmctx->ip4_other_query; 1710 br_group.proto = htons(ETH_P_IP); 1711 #if IS_ENABLED(CONFIG_IPV6) 1712 } else { 1713 querier = &brmctx->ip6_querier; 1714 other_query = &brmctx->ip6_other_query; 1715 br_group.proto = htons(ETH_P_IPV6); 1716 #endif 1717 } 1718 1719 if (!other_query || timer_pending(&other_query->timer)) 1720 return; 1721 1722 /* we're about to select ourselves as querier */ 1723 if (!pmctx && querier->port_ifidx) { 1724 struct br_ip zeroip = {}; 1725 1726 br_multicast_update_querier(brmctx, querier, 0, &zeroip); 1727 } 1728 1729 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false, 1730 0, NULL); 1731 1732 time = jiffies; 1733 time += own_query->startup_sent < brmctx->multicast_startup_query_count ? 1734 brmctx->multicast_startup_query_interval : 1735 brmctx->multicast_query_interval; 1736 mod_timer(&own_query->timer, time); 1737 } 1738 1739 static void 1740 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, 1741 struct bridge_mcast_own_query *query) 1742 { 1743 struct net_bridge *br = pmctx->port->br; 1744 struct net_bridge_mcast *brmctx; 1745 1746 spin_lock(&br->multicast_lock); 1747 if (br_multicast_port_ctx_state_stopped(pmctx)) 1748 goto out; 1749 1750 brmctx = br_multicast_port_ctx_get_global(pmctx); 1751 if (query->startup_sent < brmctx->multicast_startup_query_count) 1752 query->startup_sent++; 1753 1754 br_multicast_send_query(brmctx, pmctx, query); 1755 1756 out: 1757 spin_unlock(&br->multicast_lock); 1758 } 1759 1760 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1761 { 1762 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1763 ip4_own_query.timer); 1764 1765 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); 1766 } 1767 1768 #if IS_ENABLED(CONFIG_IPV6) 1769 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1770 { 1771 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1772 ip6_own_query.timer); 1773 1774 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); 1775 } 1776 #endif 1777 1778 static void br_multicast_port_group_rexmit(struct timer_list *t) 1779 { 1780 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1781 struct bridge_mcast_other_query *other_query = NULL; 1782 struct net_bridge *br = pg->key.port->br; 1783 struct net_bridge_mcast_port *pmctx; 1784 struct net_bridge_mcast *brmctx; 1785 bool need_rexmit = false; 1786 1787 spin_lock(&br->multicast_lock); 1788 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1789 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1790 goto out; 1791 1792 pmctx = br_multicast_pg_to_port_ctx(pg); 1793 if (!pmctx) 1794 goto out; 1795 brmctx = br_multicast_port_ctx_get_global(pmctx); 1796 if (!brmctx->multicast_querier) 1797 goto out; 1798 1799 if (pg->key.addr.proto == htons(ETH_P_IP)) 1800 other_query = &brmctx->ip4_other_query; 1801 #if IS_ENABLED(CONFIG_IPV6) 1802 else 1803 other_query = &brmctx->ip6_other_query; 1804 #endif 1805 1806 if (!other_query || timer_pending(&other_query->timer)) 1807 goto out; 1808 1809 if (pg->grp_query_rexmit_cnt) { 1810 pg->grp_query_rexmit_cnt--; 1811 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1812 &pg->key.addr, false, 1, NULL); 1813 } 1814 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1815 &pg->key.addr, true, 0, &need_rexmit); 1816 1817 if (pg->grp_query_rexmit_cnt || need_rexmit) 1818 mod_timer(&pg->rexmit_timer, jiffies + 1819 brmctx->multicast_last_member_interval); 1820 out: 1821 spin_unlock(&br->multicast_lock); 1822 } 1823 1824 static int br_mc_disabled_update(struct net_device *dev, bool value, 1825 struct netlink_ext_ack *extack) 1826 { 1827 struct switchdev_attr attr = { 1828 .orig_dev = dev, 1829 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1830 .flags = SWITCHDEV_F_DEFER, 1831 .u.mc_disabled = !value, 1832 }; 1833 1834 return switchdev_port_attr_set(dev, &attr, extack); 1835 } 1836 1837 void br_multicast_port_ctx_init(struct net_bridge_port *port, 1838 struct net_bridge_vlan *vlan, 1839 struct net_bridge_mcast_port *pmctx) 1840 { 1841 pmctx->port = port; 1842 pmctx->vlan = vlan; 1843 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1844 timer_setup(&pmctx->ip4_mc_router_timer, 1845 br_ip4_multicast_router_expired, 0); 1846 timer_setup(&pmctx->ip4_own_query.timer, 1847 br_ip4_multicast_port_query_expired, 0); 1848 #if IS_ENABLED(CONFIG_IPV6) 1849 timer_setup(&pmctx->ip6_mc_router_timer, 1850 br_ip6_multicast_router_expired, 0); 1851 timer_setup(&pmctx->ip6_own_query.timer, 1852 br_ip6_multicast_port_query_expired, 0); 1853 #endif 1854 } 1855 1856 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 1857 { 1858 #if IS_ENABLED(CONFIG_IPV6) 1859 del_timer_sync(&pmctx->ip6_mc_router_timer); 1860 #endif 1861 del_timer_sync(&pmctx->ip4_mc_router_timer); 1862 } 1863 1864 int br_multicast_add_port(struct net_bridge_port *port) 1865 { 1866 int err; 1867 1868 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 1869 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx); 1870 1871 err = br_mc_disabled_update(port->dev, 1872 br_opt_get(port->br, 1873 BROPT_MULTICAST_ENABLED), 1874 NULL); 1875 if (err && err != -EOPNOTSUPP) 1876 return err; 1877 1878 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1879 if (!port->mcast_stats) 1880 return -ENOMEM; 1881 1882 return 0; 1883 } 1884 1885 void br_multicast_del_port(struct net_bridge_port *port) 1886 { 1887 struct net_bridge *br = port->br; 1888 struct net_bridge_port_group *pg; 1889 HLIST_HEAD(deleted_head); 1890 struct hlist_node *n; 1891 1892 /* Take care of the remaining groups, only perm ones should be left */ 1893 spin_lock_bh(&br->multicast_lock); 1894 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1895 br_multicast_find_del_pg(br, pg); 1896 hlist_move_list(&br->mcast_gc_list, &deleted_head); 1897 spin_unlock_bh(&br->multicast_lock); 1898 br_multicast_gc(&deleted_head); 1899 br_multicast_port_ctx_deinit(&port->multicast_ctx); 1900 free_percpu(port->mcast_stats); 1901 } 1902 1903 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1904 { 1905 query->startup_sent = 0; 1906 1907 if (try_to_del_timer_sync(&query->timer) >= 0 || 1908 del_timer(&query->timer)) 1909 mod_timer(&query->timer, jiffies); 1910 } 1911 1912 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) 1913 { 1914 struct net_bridge *br = pmctx->port->br; 1915 struct net_bridge_mcast *brmctx; 1916 1917 brmctx = br_multicast_port_ctx_get_global(pmctx); 1918 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1919 !netif_running(br->dev)) 1920 return; 1921 1922 br_multicast_enable(&pmctx->ip4_own_query); 1923 #if IS_ENABLED(CONFIG_IPV6) 1924 br_multicast_enable(&pmctx->ip6_own_query); 1925 #endif 1926 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { 1927 br_ip4_multicast_add_router(brmctx, pmctx); 1928 br_ip6_multicast_add_router(brmctx, pmctx); 1929 } 1930 } 1931 1932 void br_multicast_enable_port(struct net_bridge_port *port) 1933 { 1934 struct net_bridge *br = port->br; 1935 1936 spin_lock_bh(&br->multicast_lock); 1937 __br_multicast_enable_port_ctx(&port->multicast_ctx); 1938 spin_unlock_bh(&br->multicast_lock); 1939 } 1940 1941 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) 1942 { 1943 struct net_bridge_port_group *pg; 1944 struct hlist_node *n; 1945 bool del = false; 1946 1947 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) 1948 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && 1949 (!br_multicast_port_ctx_is_vlan(pmctx) || 1950 pg->key.addr.vid == pmctx->vlan->vid)) 1951 br_multicast_find_del_pg(pmctx->port->br, pg); 1952 1953 del |= br_ip4_multicast_rport_del(pmctx); 1954 del_timer(&pmctx->ip4_mc_router_timer); 1955 del_timer(&pmctx->ip4_own_query.timer); 1956 del |= br_ip6_multicast_rport_del(pmctx); 1957 #if IS_ENABLED(CONFIG_IPV6) 1958 del_timer(&pmctx->ip6_mc_router_timer); 1959 del_timer(&pmctx->ip6_own_query.timer); 1960 #endif 1961 br_multicast_rport_del_notify(pmctx, del); 1962 } 1963 1964 void br_multicast_disable_port(struct net_bridge_port *port) 1965 { 1966 spin_lock_bh(&port->br->multicast_lock); 1967 __br_multicast_disable_port_ctx(&port->multicast_ctx); 1968 spin_unlock_bh(&port->br->multicast_lock); 1969 } 1970 1971 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1972 { 1973 struct net_bridge_group_src *ent; 1974 struct hlist_node *tmp; 1975 int deleted = 0; 1976 1977 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1978 if (ent->flags & BR_SGRP_F_DELETE) { 1979 br_multicast_del_group_src(ent, false); 1980 deleted++; 1981 } 1982 1983 return deleted; 1984 } 1985 1986 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 1987 unsigned long expires) 1988 { 1989 mod_timer(&src->timer, expires); 1990 br_multicast_fwd_src_handle(src); 1991 } 1992 1993 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, 1994 struct net_bridge_mcast_port *pmctx, 1995 struct net_bridge_port_group *pg) 1996 { 1997 struct bridge_mcast_other_query *other_query = NULL; 1998 u32 lmqc = brmctx->multicast_last_member_count; 1999 unsigned long lmqt, lmi, now = jiffies; 2000 struct net_bridge_group_src *ent; 2001 2002 if (!netif_running(brmctx->br->dev) || 2003 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2004 return; 2005 2006 if (pg->key.addr.proto == htons(ETH_P_IP)) 2007 other_query = &brmctx->ip4_other_query; 2008 #if IS_ENABLED(CONFIG_IPV6) 2009 else 2010 other_query = &brmctx->ip6_other_query; 2011 #endif 2012 2013 lmqt = now + br_multicast_lmqt(brmctx); 2014 hlist_for_each_entry(ent, &pg->src_list, node) { 2015 if (ent->flags & BR_SGRP_F_SEND) { 2016 ent->flags &= ~BR_SGRP_F_SEND; 2017 if (ent->timer.expires > lmqt) { 2018 if (brmctx->multicast_querier && 2019 other_query && 2020 !timer_pending(&other_query->timer)) 2021 ent->src_query_rexmit_cnt = lmqc; 2022 __grp_src_mod_timer(ent, lmqt); 2023 } 2024 } 2025 } 2026 2027 if (!brmctx->multicast_querier || 2028 !other_query || timer_pending(&other_query->timer)) 2029 return; 2030 2031 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2032 &pg->key.addr, true, 1, NULL); 2033 2034 lmi = now + brmctx->multicast_last_member_interval; 2035 if (!timer_pending(&pg->rexmit_timer) || 2036 time_after(pg->rexmit_timer.expires, lmi)) 2037 mod_timer(&pg->rexmit_timer, lmi); 2038 } 2039 2040 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, 2041 struct net_bridge_mcast_port *pmctx, 2042 struct net_bridge_port_group *pg) 2043 { 2044 struct bridge_mcast_other_query *other_query = NULL; 2045 unsigned long now = jiffies, lmi; 2046 2047 if (!netif_running(brmctx->br->dev) || 2048 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2049 return; 2050 2051 if (pg->key.addr.proto == htons(ETH_P_IP)) 2052 other_query = &brmctx->ip4_other_query; 2053 #if IS_ENABLED(CONFIG_IPV6) 2054 else 2055 other_query = &brmctx->ip6_other_query; 2056 #endif 2057 2058 if (brmctx->multicast_querier && 2059 other_query && !timer_pending(&other_query->timer)) { 2060 lmi = now + brmctx->multicast_last_member_interval; 2061 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; 2062 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2063 &pg->key.addr, false, 0, NULL); 2064 if (!timer_pending(&pg->rexmit_timer) || 2065 time_after(pg->rexmit_timer.expires, lmi)) 2066 mod_timer(&pg->rexmit_timer, lmi); 2067 } 2068 2069 if (pg->filter_mode == MCAST_EXCLUDE && 2070 (!timer_pending(&pg->timer) || 2071 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) 2072 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx)); 2073 } 2074 2075 /* State Msg type New state Actions 2076 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 2077 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 2078 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 2079 */ 2080 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, 2081 struct net_bridge_port_group *pg, void *h_addr, 2082 void *srcs, u32 nsrcs, size_t addr_size, 2083 int grec_type) 2084 { 2085 struct net_bridge_group_src *ent; 2086 unsigned long now = jiffies; 2087 bool changed = false; 2088 struct br_ip src_ip; 2089 u32 src_idx; 2090 2091 memset(&src_ip, 0, sizeof(src_ip)); 2092 src_ip.proto = pg->key.addr.proto; 2093 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2094 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2095 ent = br_multicast_find_group_src(pg, &src_ip); 2096 if (!ent) { 2097 ent = br_multicast_new_group_src(pg, &src_ip); 2098 if (ent) 2099 changed = true; 2100 } 2101 2102 if (ent) 2103 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2104 } 2105 2106 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2107 grec_type)) 2108 changed = true; 2109 2110 return changed; 2111 } 2112 2113 /* State Msg type New state Actions 2114 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2115 * Delete (A-B) 2116 * Group Timer=GMI 2117 */ 2118 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, 2119 struct net_bridge_port_group *pg, void *h_addr, 2120 void *srcs, u32 nsrcs, size_t addr_size, 2121 int grec_type) 2122 { 2123 struct net_bridge_group_src *ent; 2124 struct br_ip src_ip; 2125 u32 src_idx; 2126 2127 hlist_for_each_entry(ent, &pg->src_list, node) 2128 ent->flags |= BR_SGRP_F_DELETE; 2129 2130 memset(&src_ip, 0, sizeof(src_ip)); 2131 src_ip.proto = pg->key.addr.proto; 2132 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2133 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2134 ent = br_multicast_find_group_src(pg, &src_ip); 2135 if (ent) 2136 ent->flags &= ~BR_SGRP_F_DELETE; 2137 else 2138 ent = br_multicast_new_group_src(pg, &src_ip); 2139 if (ent) 2140 br_multicast_fwd_src_handle(ent); 2141 } 2142 2143 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2144 grec_type); 2145 2146 __grp_src_delete_marked(pg); 2147 } 2148 2149 /* State Msg type New state Actions 2150 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 2151 * Delete (X-A) 2152 * Delete (Y-A) 2153 * Group Timer=GMI 2154 */ 2155 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, 2156 struct net_bridge_port_group *pg, void *h_addr, 2157 void *srcs, u32 nsrcs, size_t addr_size, 2158 int grec_type) 2159 { 2160 struct net_bridge_group_src *ent; 2161 unsigned long now = jiffies; 2162 bool changed = false; 2163 struct br_ip src_ip; 2164 u32 src_idx; 2165 2166 hlist_for_each_entry(ent, &pg->src_list, node) 2167 ent->flags |= BR_SGRP_F_DELETE; 2168 2169 memset(&src_ip, 0, sizeof(src_ip)); 2170 src_ip.proto = pg->key.addr.proto; 2171 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2172 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2173 ent = br_multicast_find_group_src(pg, &src_ip); 2174 if (ent) { 2175 ent->flags &= ~BR_SGRP_F_DELETE; 2176 } else { 2177 ent = br_multicast_new_group_src(pg, &src_ip); 2178 if (ent) { 2179 __grp_src_mod_timer(ent, 2180 now + br_multicast_gmi(brmctx)); 2181 changed = true; 2182 } 2183 } 2184 } 2185 2186 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2187 grec_type)) 2188 changed = true; 2189 2190 if (__grp_src_delete_marked(pg)) 2191 changed = true; 2192 2193 return changed; 2194 } 2195 2196 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, 2197 struct net_bridge_port_group *pg, void *h_addr, 2198 void *srcs, u32 nsrcs, size_t addr_size, 2199 int grec_type) 2200 { 2201 bool changed = false; 2202 2203 switch (pg->filter_mode) { 2204 case MCAST_INCLUDE: 2205 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2206 grec_type); 2207 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2208 changed = true; 2209 break; 2210 case MCAST_EXCLUDE: 2211 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, 2212 addr_size, grec_type); 2213 break; 2214 } 2215 2216 pg->filter_mode = MCAST_EXCLUDE; 2217 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2218 2219 return changed; 2220 } 2221 2222 /* State Msg type New state Actions 2223 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 2224 * Send Q(G,A-B) 2225 */ 2226 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, 2227 struct net_bridge_mcast_port *pmctx, 2228 struct net_bridge_port_group *pg, void *h_addr, 2229 void *srcs, u32 nsrcs, size_t addr_size, 2230 int grec_type) 2231 { 2232 u32 src_idx, to_send = pg->src_ents; 2233 struct net_bridge_group_src *ent; 2234 unsigned long now = jiffies; 2235 bool changed = false; 2236 struct br_ip src_ip; 2237 2238 hlist_for_each_entry(ent, &pg->src_list, node) 2239 ent->flags |= BR_SGRP_F_SEND; 2240 2241 memset(&src_ip, 0, sizeof(src_ip)); 2242 src_ip.proto = pg->key.addr.proto; 2243 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2244 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2245 ent = br_multicast_find_group_src(pg, &src_ip); 2246 if (ent) { 2247 ent->flags &= ~BR_SGRP_F_SEND; 2248 to_send--; 2249 } else { 2250 ent = br_multicast_new_group_src(pg, &src_ip); 2251 if (ent) 2252 changed = true; 2253 } 2254 if (ent) 2255 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2256 } 2257 2258 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2259 grec_type)) 2260 changed = true; 2261 2262 if (to_send) 2263 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2264 2265 return changed; 2266 } 2267 2268 /* State Msg type New state Actions 2269 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 2270 * Send Q(G,X-A) 2271 * Send Q(G) 2272 */ 2273 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, 2274 struct net_bridge_mcast_port *pmctx, 2275 struct net_bridge_port_group *pg, void *h_addr, 2276 void *srcs, u32 nsrcs, size_t addr_size, 2277 int grec_type) 2278 { 2279 u32 src_idx, to_send = pg->src_ents; 2280 struct net_bridge_group_src *ent; 2281 unsigned long now = jiffies; 2282 bool changed = false; 2283 struct br_ip src_ip; 2284 2285 hlist_for_each_entry(ent, &pg->src_list, node) 2286 if (timer_pending(&ent->timer)) 2287 ent->flags |= BR_SGRP_F_SEND; 2288 2289 memset(&src_ip, 0, sizeof(src_ip)); 2290 src_ip.proto = pg->key.addr.proto; 2291 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2292 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2293 ent = br_multicast_find_group_src(pg, &src_ip); 2294 if (ent) { 2295 if (timer_pending(&ent->timer)) { 2296 ent->flags &= ~BR_SGRP_F_SEND; 2297 to_send--; 2298 } 2299 } else { 2300 ent = br_multicast_new_group_src(pg, &src_ip); 2301 if (ent) 2302 changed = true; 2303 } 2304 if (ent) 2305 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2306 } 2307 2308 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2309 grec_type)) 2310 changed = true; 2311 2312 if (to_send) 2313 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2314 2315 __grp_send_query_and_rexmit(brmctx, pmctx, pg); 2316 2317 return changed; 2318 } 2319 2320 static bool br_multicast_toin(struct net_bridge_mcast *brmctx, 2321 struct net_bridge_mcast_port *pmctx, 2322 struct net_bridge_port_group *pg, void *h_addr, 2323 void *srcs, u32 nsrcs, size_t addr_size, 2324 int grec_type) 2325 { 2326 bool changed = false; 2327 2328 switch (pg->filter_mode) { 2329 case MCAST_INCLUDE: 2330 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, 2331 nsrcs, addr_size, grec_type); 2332 break; 2333 case MCAST_EXCLUDE: 2334 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, 2335 nsrcs, addr_size, grec_type); 2336 break; 2337 } 2338 2339 if (br_multicast_eht_should_del_pg(pg)) { 2340 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2341 br_multicast_find_del_pg(pg->key.port->br, pg); 2342 /* a notification has already been sent and we shouldn't 2343 * access pg after the delete so we have to return false 2344 */ 2345 changed = false; 2346 } 2347 2348 return changed; 2349 } 2350 2351 /* State Msg type New state Actions 2352 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2353 * Delete (A-B) 2354 * Send Q(G,A*B) 2355 * Group Timer=GMI 2356 */ 2357 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, 2358 struct net_bridge_mcast_port *pmctx, 2359 struct net_bridge_port_group *pg, void *h_addr, 2360 void *srcs, u32 nsrcs, size_t addr_size, 2361 int grec_type) 2362 { 2363 struct net_bridge_group_src *ent; 2364 u32 src_idx, to_send = 0; 2365 struct br_ip src_ip; 2366 2367 hlist_for_each_entry(ent, &pg->src_list, node) 2368 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2369 2370 memset(&src_ip, 0, sizeof(src_ip)); 2371 src_ip.proto = pg->key.addr.proto; 2372 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2373 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2374 ent = br_multicast_find_group_src(pg, &src_ip); 2375 if (ent) { 2376 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2377 BR_SGRP_F_SEND; 2378 to_send++; 2379 } else { 2380 ent = br_multicast_new_group_src(pg, &src_ip); 2381 } 2382 if (ent) 2383 br_multicast_fwd_src_handle(ent); 2384 } 2385 2386 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2387 grec_type); 2388 2389 __grp_src_delete_marked(pg); 2390 if (to_send) 2391 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2392 } 2393 2394 /* State Msg type New state Actions 2395 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2396 * Delete (X-A) 2397 * Delete (Y-A) 2398 * Send Q(G,A-Y) 2399 * Group Timer=GMI 2400 */ 2401 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, 2402 struct net_bridge_mcast_port *pmctx, 2403 struct net_bridge_port_group *pg, void *h_addr, 2404 void *srcs, u32 nsrcs, size_t addr_size, 2405 int grec_type) 2406 { 2407 struct net_bridge_group_src *ent; 2408 u32 src_idx, to_send = 0; 2409 bool changed = false; 2410 struct br_ip src_ip; 2411 2412 hlist_for_each_entry(ent, &pg->src_list, node) 2413 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2414 2415 memset(&src_ip, 0, sizeof(src_ip)); 2416 src_ip.proto = pg->key.addr.proto; 2417 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2418 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2419 ent = br_multicast_find_group_src(pg, &src_ip); 2420 if (ent) { 2421 ent->flags &= ~BR_SGRP_F_DELETE; 2422 } else { 2423 ent = br_multicast_new_group_src(pg, &src_ip); 2424 if (ent) { 2425 __grp_src_mod_timer(ent, pg->timer.expires); 2426 changed = true; 2427 } 2428 } 2429 if (ent && timer_pending(&ent->timer)) { 2430 ent->flags |= BR_SGRP_F_SEND; 2431 to_send++; 2432 } 2433 } 2434 2435 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2436 grec_type)) 2437 changed = true; 2438 2439 if (__grp_src_delete_marked(pg)) 2440 changed = true; 2441 if (to_send) 2442 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2443 2444 return changed; 2445 } 2446 2447 static bool br_multicast_toex(struct net_bridge_mcast *brmctx, 2448 struct net_bridge_mcast_port *pmctx, 2449 struct net_bridge_port_group *pg, void *h_addr, 2450 void *srcs, u32 nsrcs, size_t addr_size, 2451 int grec_type) 2452 { 2453 bool changed = false; 2454 2455 switch (pg->filter_mode) { 2456 case MCAST_INCLUDE: 2457 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, 2458 addr_size, grec_type); 2459 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2460 changed = true; 2461 break; 2462 case MCAST_EXCLUDE: 2463 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, 2464 nsrcs, addr_size, grec_type); 2465 break; 2466 } 2467 2468 pg->filter_mode = MCAST_EXCLUDE; 2469 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2470 2471 return changed; 2472 } 2473 2474 /* State Msg type New state Actions 2475 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2476 */ 2477 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, 2478 struct net_bridge_mcast_port *pmctx, 2479 struct net_bridge_port_group *pg, void *h_addr, 2480 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2481 { 2482 struct net_bridge_group_src *ent; 2483 u32 src_idx, to_send = 0; 2484 bool changed = false; 2485 struct br_ip src_ip; 2486 2487 hlist_for_each_entry(ent, &pg->src_list, node) 2488 ent->flags &= ~BR_SGRP_F_SEND; 2489 2490 memset(&src_ip, 0, sizeof(src_ip)); 2491 src_ip.proto = pg->key.addr.proto; 2492 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2493 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2494 ent = br_multicast_find_group_src(pg, &src_ip); 2495 if (ent) { 2496 ent->flags |= BR_SGRP_F_SEND; 2497 to_send++; 2498 } 2499 } 2500 2501 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2502 grec_type)) 2503 changed = true; 2504 2505 if (to_send) 2506 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2507 2508 return changed; 2509 } 2510 2511 /* State Msg type New state Actions 2512 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2513 * Send Q(G,A-Y) 2514 */ 2515 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, 2516 struct net_bridge_mcast_port *pmctx, 2517 struct net_bridge_port_group *pg, void *h_addr, 2518 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2519 { 2520 struct net_bridge_group_src *ent; 2521 u32 src_idx, to_send = 0; 2522 bool changed = false; 2523 struct br_ip src_ip; 2524 2525 hlist_for_each_entry(ent, &pg->src_list, node) 2526 ent->flags &= ~BR_SGRP_F_SEND; 2527 2528 memset(&src_ip, 0, sizeof(src_ip)); 2529 src_ip.proto = pg->key.addr.proto; 2530 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2531 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2532 ent = br_multicast_find_group_src(pg, &src_ip); 2533 if (!ent) { 2534 ent = br_multicast_new_group_src(pg, &src_ip); 2535 if (ent) { 2536 __grp_src_mod_timer(ent, pg->timer.expires); 2537 changed = true; 2538 } 2539 } 2540 if (ent && timer_pending(&ent->timer)) { 2541 ent->flags |= BR_SGRP_F_SEND; 2542 to_send++; 2543 } 2544 } 2545 2546 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2547 grec_type)) 2548 changed = true; 2549 2550 if (to_send) 2551 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2552 2553 return changed; 2554 } 2555 2556 static bool br_multicast_block(struct net_bridge_mcast *brmctx, 2557 struct net_bridge_mcast_port *pmctx, 2558 struct net_bridge_port_group *pg, void *h_addr, 2559 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2560 { 2561 bool changed = false; 2562 2563 switch (pg->filter_mode) { 2564 case MCAST_INCLUDE: 2565 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, 2566 nsrcs, addr_size, grec_type); 2567 break; 2568 case MCAST_EXCLUDE: 2569 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, 2570 nsrcs, addr_size, grec_type); 2571 break; 2572 } 2573 2574 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2575 br_multicast_eht_should_del_pg(pg)) { 2576 if (br_multicast_eht_should_del_pg(pg)) 2577 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2578 br_multicast_find_del_pg(pg->key.port->br, pg); 2579 /* a notification has already been sent and we shouldn't 2580 * access pg after the delete so we have to return false 2581 */ 2582 changed = false; 2583 } 2584 2585 return changed; 2586 } 2587 2588 static struct net_bridge_port_group * 2589 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2590 struct net_bridge_port *p, 2591 const unsigned char *src) 2592 { 2593 struct net_bridge *br __maybe_unused = mp->br; 2594 struct net_bridge_port_group *pg; 2595 2596 for (pg = mlock_dereference(mp->ports, br); 2597 pg; 2598 pg = mlock_dereference(pg->next, br)) 2599 if (br_port_group_equal(pg, p, src)) 2600 return pg; 2601 2602 return NULL; 2603 } 2604 2605 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, 2606 struct net_bridge_mcast_port *pmctx, 2607 struct sk_buff *skb, 2608 u16 vid) 2609 { 2610 bool igmpv2 = brmctx->multicast_igmp_version == 2; 2611 struct net_bridge_mdb_entry *mdst; 2612 struct net_bridge_port_group *pg; 2613 const unsigned char *src; 2614 struct igmpv3_report *ih; 2615 struct igmpv3_grec *grec; 2616 int i, len, num, type; 2617 __be32 group, *h_addr; 2618 bool changed = false; 2619 int err = 0; 2620 u16 nsrcs; 2621 2622 ih = igmpv3_report_hdr(skb); 2623 num = ntohs(ih->ngrec); 2624 len = skb_transport_offset(skb) + sizeof(*ih); 2625 2626 for (i = 0; i < num; i++) { 2627 len += sizeof(*grec); 2628 if (!ip_mc_may_pull(skb, len)) 2629 return -EINVAL; 2630 2631 grec = (void *)(skb->data + len - sizeof(*grec)); 2632 group = grec->grec_mca; 2633 type = grec->grec_type; 2634 nsrcs = ntohs(grec->grec_nsrcs); 2635 2636 len += nsrcs * 4; 2637 if (!ip_mc_may_pull(skb, len)) 2638 return -EINVAL; 2639 2640 switch (type) { 2641 case IGMPV3_MODE_IS_INCLUDE: 2642 case IGMPV3_MODE_IS_EXCLUDE: 2643 case IGMPV3_CHANGE_TO_INCLUDE: 2644 case IGMPV3_CHANGE_TO_EXCLUDE: 2645 case IGMPV3_ALLOW_NEW_SOURCES: 2646 case IGMPV3_BLOCK_OLD_SOURCES: 2647 break; 2648 2649 default: 2650 continue; 2651 } 2652 2653 src = eth_hdr(skb)->h_source; 2654 if (nsrcs == 0 && 2655 (type == IGMPV3_CHANGE_TO_INCLUDE || 2656 type == IGMPV3_MODE_IS_INCLUDE)) { 2657 if (!pmctx || igmpv2) { 2658 br_ip4_multicast_leave_group(brmctx, pmctx, 2659 group, vid, src); 2660 continue; 2661 } 2662 } else { 2663 err = br_ip4_multicast_add_group(brmctx, pmctx, group, 2664 vid, src, igmpv2); 2665 if (err) 2666 break; 2667 } 2668 2669 if (!pmctx || igmpv2) 2670 continue; 2671 2672 spin_lock(&brmctx->br->multicast_lock); 2673 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2674 goto unlock_continue; 2675 2676 mdst = br_mdb_ip4_get(brmctx->br, group, vid); 2677 if (!mdst) 2678 goto unlock_continue; 2679 pg = br_multicast_find_port(mdst, pmctx->port, src); 2680 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2681 goto unlock_continue; 2682 /* reload grec and host addr */ 2683 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2684 h_addr = &ip_hdr(skb)->saddr; 2685 switch (type) { 2686 case IGMPV3_ALLOW_NEW_SOURCES: 2687 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2688 grec->grec_src, 2689 nsrcs, sizeof(__be32), type); 2690 break; 2691 case IGMPV3_MODE_IS_INCLUDE: 2692 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2693 grec->grec_src, 2694 nsrcs, sizeof(__be32), type); 2695 break; 2696 case IGMPV3_MODE_IS_EXCLUDE: 2697 changed = br_multicast_isexc(brmctx, pg, h_addr, 2698 grec->grec_src, 2699 nsrcs, sizeof(__be32), type); 2700 break; 2701 case IGMPV3_CHANGE_TO_INCLUDE: 2702 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2703 grec->grec_src, 2704 nsrcs, sizeof(__be32), type); 2705 break; 2706 case IGMPV3_CHANGE_TO_EXCLUDE: 2707 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2708 grec->grec_src, 2709 nsrcs, sizeof(__be32), type); 2710 break; 2711 case IGMPV3_BLOCK_OLD_SOURCES: 2712 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2713 grec->grec_src, 2714 nsrcs, sizeof(__be32), type); 2715 break; 2716 } 2717 if (changed) 2718 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2719 unlock_continue: 2720 spin_unlock(&brmctx->br->multicast_lock); 2721 } 2722 2723 return err; 2724 } 2725 2726 #if IS_ENABLED(CONFIG_IPV6) 2727 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, 2728 struct net_bridge_mcast_port *pmctx, 2729 struct sk_buff *skb, 2730 u16 vid) 2731 { 2732 bool mldv1 = brmctx->multicast_mld_version == 1; 2733 struct net_bridge_mdb_entry *mdst; 2734 struct net_bridge_port_group *pg; 2735 unsigned int nsrcs_offset; 2736 struct mld2_report *mld2r; 2737 const unsigned char *src; 2738 struct in6_addr *h_addr; 2739 struct mld2_grec *grec; 2740 unsigned int grec_len; 2741 bool changed = false; 2742 int i, len, num; 2743 int err = 0; 2744 2745 if (!ipv6_mc_may_pull(skb, sizeof(*mld2r))) 2746 return -EINVAL; 2747 2748 mld2r = (struct mld2_report *)icmp6_hdr(skb); 2749 num = ntohs(mld2r->mld2r_ngrec); 2750 len = skb_transport_offset(skb) + sizeof(*mld2r); 2751 2752 for (i = 0; i < num; i++) { 2753 __be16 *_nsrcs, __nsrcs; 2754 u16 nsrcs; 2755 2756 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2757 2758 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2759 nsrcs_offset + sizeof(__nsrcs)) 2760 return -EINVAL; 2761 2762 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2763 sizeof(__nsrcs), &__nsrcs); 2764 if (!_nsrcs) 2765 return -EINVAL; 2766 2767 nsrcs = ntohs(*_nsrcs); 2768 grec_len = struct_size(grec, grec_src, nsrcs); 2769 2770 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2771 return -EINVAL; 2772 2773 grec = (struct mld2_grec *)(skb->data + len); 2774 len += grec_len; 2775 2776 switch (grec->grec_type) { 2777 case MLD2_MODE_IS_INCLUDE: 2778 case MLD2_MODE_IS_EXCLUDE: 2779 case MLD2_CHANGE_TO_INCLUDE: 2780 case MLD2_CHANGE_TO_EXCLUDE: 2781 case MLD2_ALLOW_NEW_SOURCES: 2782 case MLD2_BLOCK_OLD_SOURCES: 2783 break; 2784 2785 default: 2786 continue; 2787 } 2788 2789 src = eth_hdr(skb)->h_source; 2790 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2791 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2792 nsrcs == 0) { 2793 if (!pmctx || mldv1) { 2794 br_ip6_multicast_leave_group(brmctx, pmctx, 2795 &grec->grec_mca, 2796 vid, src); 2797 continue; 2798 } 2799 } else { 2800 err = br_ip6_multicast_add_group(brmctx, pmctx, 2801 &grec->grec_mca, vid, 2802 src, mldv1); 2803 if (err) 2804 break; 2805 } 2806 2807 if (!pmctx || mldv1) 2808 continue; 2809 2810 spin_lock(&brmctx->br->multicast_lock); 2811 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2812 goto unlock_continue; 2813 2814 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid); 2815 if (!mdst) 2816 goto unlock_continue; 2817 pg = br_multicast_find_port(mdst, pmctx->port, src); 2818 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2819 goto unlock_continue; 2820 h_addr = &ipv6_hdr(skb)->saddr; 2821 switch (grec->grec_type) { 2822 case MLD2_ALLOW_NEW_SOURCES: 2823 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2824 grec->grec_src, nsrcs, 2825 sizeof(struct in6_addr), 2826 grec->grec_type); 2827 break; 2828 case MLD2_MODE_IS_INCLUDE: 2829 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2830 grec->grec_src, nsrcs, 2831 sizeof(struct in6_addr), 2832 grec->grec_type); 2833 break; 2834 case MLD2_MODE_IS_EXCLUDE: 2835 changed = br_multicast_isexc(brmctx, pg, h_addr, 2836 grec->grec_src, nsrcs, 2837 sizeof(struct in6_addr), 2838 grec->grec_type); 2839 break; 2840 case MLD2_CHANGE_TO_INCLUDE: 2841 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2842 grec->grec_src, nsrcs, 2843 sizeof(struct in6_addr), 2844 grec->grec_type); 2845 break; 2846 case MLD2_CHANGE_TO_EXCLUDE: 2847 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2848 grec->grec_src, nsrcs, 2849 sizeof(struct in6_addr), 2850 grec->grec_type); 2851 break; 2852 case MLD2_BLOCK_OLD_SOURCES: 2853 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2854 grec->grec_src, nsrcs, 2855 sizeof(struct in6_addr), 2856 grec->grec_type); 2857 break; 2858 } 2859 if (changed) 2860 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2861 unlock_continue: 2862 spin_unlock(&brmctx->br->multicast_lock); 2863 } 2864 2865 return err; 2866 } 2867 #endif 2868 2869 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx, 2870 struct net_bridge_mcast_port *pmctx, 2871 struct br_ip *saddr) 2872 { 2873 int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0; 2874 struct timer_list *own_timer, *other_timer; 2875 struct bridge_mcast_querier *querier; 2876 2877 switch (saddr->proto) { 2878 case htons(ETH_P_IP): 2879 querier = &brmctx->ip4_querier; 2880 own_timer = &brmctx->ip4_own_query.timer; 2881 other_timer = &brmctx->ip4_other_query.timer; 2882 if (!querier->addr.src.ip4 || 2883 ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4)) 2884 goto update; 2885 break; 2886 #if IS_ENABLED(CONFIG_IPV6) 2887 case htons(ETH_P_IPV6): 2888 querier = &brmctx->ip6_querier; 2889 own_timer = &brmctx->ip6_own_query.timer; 2890 other_timer = &brmctx->ip6_other_query.timer; 2891 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0) 2892 goto update; 2893 break; 2894 #endif 2895 default: 2896 return false; 2897 } 2898 2899 if (!timer_pending(own_timer) && !timer_pending(other_timer)) 2900 goto update; 2901 2902 return false; 2903 2904 update: 2905 br_multicast_update_querier(brmctx, querier, port_ifidx, saddr); 2906 2907 return true; 2908 } 2909 2910 static struct net_bridge_port * 2911 __br_multicast_get_querier_port(struct net_bridge *br, 2912 const struct bridge_mcast_querier *querier) 2913 { 2914 int port_ifidx = READ_ONCE(querier->port_ifidx); 2915 struct net_bridge_port *p; 2916 struct net_device *dev; 2917 2918 if (port_ifidx == 0) 2919 return NULL; 2920 2921 dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx); 2922 if (!dev) 2923 return NULL; 2924 p = br_port_get_rtnl_rcu(dev); 2925 if (!p || p->br != br) 2926 return NULL; 2927 2928 return p; 2929 } 2930 2931 size_t br_multicast_querier_state_size(void) 2932 { 2933 return nla_total_size(0) + /* nest attribute */ 2934 nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */ 2935 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */ 2936 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */ 2937 #if IS_ENABLED(CONFIG_IPV6) 2938 nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */ 2939 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */ 2940 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */ 2941 #endif 2942 0; 2943 } 2944 2945 /* protected by rtnl or rcu */ 2946 int br_multicast_dump_querier_state(struct sk_buff *skb, 2947 const struct net_bridge_mcast *brmctx, 2948 int nest_attr) 2949 { 2950 struct bridge_mcast_querier querier = {}; 2951 struct net_bridge_port *p; 2952 struct nlattr *nest; 2953 2954 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 2955 br_multicast_ctx_vlan_global_disabled(brmctx)) 2956 return 0; 2957 2958 nest = nla_nest_start(skb, nest_attr); 2959 if (!nest) 2960 return -EMSGSIZE; 2961 2962 rcu_read_lock(); 2963 if (!brmctx->multicast_querier && 2964 !timer_pending(&brmctx->ip4_other_query.timer)) 2965 goto out_v6; 2966 2967 br_multicast_read_querier(&brmctx->ip4_querier, &querier); 2968 if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS, 2969 querier.addr.src.ip4)) { 2970 rcu_read_unlock(); 2971 goto out_err; 2972 } 2973 2974 p = __br_multicast_get_querier_port(brmctx->br, &querier); 2975 if (timer_pending(&brmctx->ip4_other_query.timer) && 2976 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER, 2977 br_timer_value(&brmctx->ip4_other_query.timer), 2978 BRIDGE_QUERIER_PAD) || 2979 (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) { 2980 rcu_read_unlock(); 2981 goto out_err; 2982 } 2983 2984 out_v6: 2985 #if IS_ENABLED(CONFIG_IPV6) 2986 if (!brmctx->multicast_querier && 2987 !timer_pending(&brmctx->ip6_other_query.timer)) 2988 goto out; 2989 2990 br_multicast_read_querier(&brmctx->ip6_querier, &querier); 2991 if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS, 2992 &querier.addr.src.ip6)) { 2993 rcu_read_unlock(); 2994 goto out_err; 2995 } 2996 2997 p = __br_multicast_get_querier_port(brmctx->br, &querier); 2998 if (timer_pending(&brmctx->ip6_other_query.timer) && 2999 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER, 3000 br_timer_value(&brmctx->ip6_other_query.timer), 3001 BRIDGE_QUERIER_PAD) || 3002 (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT, 3003 p->dev->ifindex)))) { 3004 rcu_read_unlock(); 3005 goto out_err; 3006 } 3007 out: 3008 #endif 3009 rcu_read_unlock(); 3010 nla_nest_end(skb, nest); 3011 if (!nla_len(nest)) 3012 nla_nest_cancel(skb, nest); 3013 3014 return 0; 3015 3016 out_err: 3017 nla_nest_cancel(skb, nest); 3018 return -EMSGSIZE; 3019 } 3020 3021 static void 3022 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, 3023 struct bridge_mcast_other_query *query, 3024 unsigned long max_delay) 3025 { 3026 if (!timer_pending(&query->timer)) 3027 query->delay_time = jiffies + max_delay; 3028 3029 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); 3030 } 3031 3032 static void br_port_mc_router_state_change(struct net_bridge_port *p, 3033 bool is_mc_router) 3034 { 3035 struct switchdev_attr attr = { 3036 .orig_dev = p->dev, 3037 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 3038 .flags = SWITCHDEV_F_DEFER, 3039 .u.mrouter = is_mc_router, 3040 }; 3041 3042 switchdev_port_attr_set(p->dev, &attr, NULL); 3043 } 3044 3045 static struct net_bridge_port * 3046 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, 3047 struct hlist_head *mc_router_list, 3048 struct hlist_node *rlist) 3049 { 3050 struct net_bridge_mcast_port *pmctx; 3051 3052 #if IS_ENABLED(CONFIG_IPV6) 3053 if (mc_router_list == &brmctx->ip6_mc_router_list) 3054 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3055 ip6_rlist); 3056 else 3057 #endif 3058 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3059 ip4_rlist); 3060 3061 return pmctx->port; 3062 } 3063 3064 static struct hlist_node * 3065 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, 3066 struct net_bridge_port *port, 3067 struct hlist_head *mc_router_list) 3068 3069 { 3070 struct hlist_node *slot = NULL; 3071 struct net_bridge_port *p; 3072 struct hlist_node *rlist; 3073 3074 hlist_for_each(rlist, mc_router_list) { 3075 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); 3076 3077 if ((unsigned long)port >= (unsigned long)p) 3078 break; 3079 3080 slot = rlist; 3081 } 3082 3083 return slot; 3084 } 3085 3086 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, 3087 struct hlist_node *rnode) 3088 { 3089 #if IS_ENABLED(CONFIG_IPV6) 3090 if (rnode != &pmctx->ip6_rlist) 3091 return hlist_unhashed(&pmctx->ip6_rlist); 3092 else 3093 return hlist_unhashed(&pmctx->ip4_rlist); 3094 #else 3095 return true; 3096 #endif 3097 } 3098 3099 /* Add port to router_list 3100 * list is maintained ordered by pointer value 3101 * and locked by br->multicast_lock and RCU 3102 */ 3103 static void br_multicast_add_router(struct net_bridge_mcast *brmctx, 3104 struct net_bridge_mcast_port *pmctx, 3105 struct hlist_node *rlist, 3106 struct hlist_head *mc_router_list) 3107 { 3108 struct hlist_node *slot; 3109 3110 if (!hlist_unhashed(rlist)) 3111 return; 3112 3113 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list); 3114 3115 if (slot) 3116 hlist_add_behind_rcu(rlist, slot); 3117 else 3118 hlist_add_head_rcu(rlist, mc_router_list); 3119 3120 /* For backwards compatibility for now, only notify if we 3121 * switched from no IPv4/IPv6 multicast router to a new 3122 * IPv4 or IPv6 multicast router. 3123 */ 3124 if (br_multicast_no_router_otherpf(pmctx, rlist)) { 3125 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB); 3126 br_port_mc_router_state_change(pmctx->port, true); 3127 } 3128 } 3129 3130 /* Add port to router_list 3131 * list is maintained ordered by pointer value 3132 * and locked by br->multicast_lock and RCU 3133 */ 3134 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 3135 struct net_bridge_mcast_port *pmctx) 3136 { 3137 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist, 3138 &brmctx->ip4_mc_router_list); 3139 } 3140 3141 /* Add port to router_list 3142 * list is maintained ordered by pointer value 3143 * and locked by br->multicast_lock and RCU 3144 */ 3145 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 3146 struct net_bridge_mcast_port *pmctx) 3147 { 3148 #if IS_ENABLED(CONFIG_IPV6) 3149 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist, 3150 &brmctx->ip6_mc_router_list); 3151 #endif 3152 } 3153 3154 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, 3155 struct net_bridge_mcast_port *pmctx, 3156 struct timer_list *timer, 3157 struct hlist_node *rlist, 3158 struct hlist_head *mc_router_list) 3159 { 3160 unsigned long now = jiffies; 3161 3162 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3163 return; 3164 3165 if (!pmctx) { 3166 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 3167 if (!br_ip4_multicast_is_router(brmctx) && 3168 !br_ip6_multicast_is_router(brmctx)) 3169 br_mc_router_state_change(brmctx->br, true); 3170 mod_timer(timer, now + brmctx->multicast_querier_interval); 3171 } 3172 return; 3173 } 3174 3175 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 3176 pmctx->multicast_router == MDB_RTR_TYPE_PERM) 3177 return; 3178 3179 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); 3180 mod_timer(timer, now + brmctx->multicast_querier_interval); 3181 } 3182 3183 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, 3184 struct net_bridge_mcast_port *pmctx) 3185 { 3186 struct timer_list *timer = &brmctx->ip4_mc_router_timer; 3187 struct hlist_node *rlist = NULL; 3188 3189 if (pmctx) { 3190 timer = &pmctx->ip4_mc_router_timer; 3191 rlist = &pmctx->ip4_rlist; 3192 } 3193 3194 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3195 &brmctx->ip4_mc_router_list); 3196 } 3197 3198 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, 3199 struct net_bridge_mcast_port *pmctx) 3200 { 3201 #if IS_ENABLED(CONFIG_IPV6) 3202 struct timer_list *timer = &brmctx->ip6_mc_router_timer; 3203 struct hlist_node *rlist = NULL; 3204 3205 if (pmctx) { 3206 timer = &pmctx->ip6_mc_router_timer; 3207 rlist = &pmctx->ip6_rlist; 3208 } 3209 3210 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3211 &brmctx->ip6_mc_router_list); 3212 #endif 3213 } 3214 3215 static void 3216 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, 3217 struct net_bridge_mcast_port *pmctx, 3218 struct bridge_mcast_other_query *query, 3219 struct br_ip *saddr, 3220 unsigned long max_delay) 3221 { 3222 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3223 return; 3224 3225 br_multicast_update_query_timer(brmctx, query, max_delay); 3226 br_ip4_multicast_mark_router(brmctx, pmctx); 3227 } 3228 3229 #if IS_ENABLED(CONFIG_IPV6) 3230 static void 3231 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, 3232 struct net_bridge_mcast_port *pmctx, 3233 struct bridge_mcast_other_query *query, 3234 struct br_ip *saddr, 3235 unsigned long max_delay) 3236 { 3237 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3238 return; 3239 3240 br_multicast_update_query_timer(brmctx, query, max_delay); 3241 br_ip6_multicast_mark_router(brmctx, pmctx); 3242 } 3243 #endif 3244 3245 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, 3246 struct net_bridge_mcast_port *pmctx, 3247 struct sk_buff *skb, 3248 u16 vid) 3249 { 3250 unsigned int transport_len = ip_transport_len(skb); 3251 const struct iphdr *iph = ip_hdr(skb); 3252 struct igmphdr *ih = igmp_hdr(skb); 3253 struct net_bridge_mdb_entry *mp; 3254 struct igmpv3_query *ih3; 3255 struct net_bridge_port_group *p; 3256 struct net_bridge_port_group __rcu **pp; 3257 struct br_ip saddr = {}; 3258 unsigned long max_delay; 3259 unsigned long now = jiffies; 3260 __be32 group; 3261 3262 spin_lock(&brmctx->br->multicast_lock); 3263 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3264 goto out; 3265 3266 group = ih->group; 3267 3268 if (transport_len == sizeof(*ih)) { 3269 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 3270 3271 if (!max_delay) { 3272 max_delay = 10 * HZ; 3273 group = 0; 3274 } 3275 } else if (transport_len >= sizeof(*ih3)) { 3276 ih3 = igmpv3_query_hdr(skb); 3277 if (ih3->nsrcs || 3278 (brmctx->multicast_igmp_version == 3 && group && 3279 ih3->suppress)) 3280 goto out; 3281 3282 max_delay = ih3->code ? 3283 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 3284 } else { 3285 goto out; 3286 } 3287 3288 if (!group) { 3289 saddr.proto = htons(ETH_P_IP); 3290 saddr.src.ip4 = iph->saddr; 3291 3292 br_ip4_multicast_query_received(brmctx, pmctx, 3293 &brmctx->ip4_other_query, 3294 &saddr, max_delay); 3295 goto out; 3296 } 3297 3298 mp = br_mdb_ip4_get(brmctx->br, group, vid); 3299 if (!mp) 3300 goto out; 3301 3302 max_delay *= brmctx->multicast_last_member_count; 3303 3304 if (mp->host_joined && 3305 (timer_pending(&mp->timer) ? 3306 time_after(mp->timer.expires, now + max_delay) : 3307 try_to_del_timer_sync(&mp->timer) >= 0)) 3308 mod_timer(&mp->timer, now + max_delay); 3309 3310 for (pp = &mp->ports; 3311 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3312 pp = &p->next) { 3313 if (timer_pending(&p->timer) ? 3314 time_after(p->timer.expires, now + max_delay) : 3315 try_to_del_timer_sync(&p->timer) >= 0 && 3316 (brmctx->multicast_igmp_version == 2 || 3317 p->filter_mode == MCAST_EXCLUDE)) 3318 mod_timer(&p->timer, now + max_delay); 3319 } 3320 3321 out: 3322 spin_unlock(&brmctx->br->multicast_lock); 3323 } 3324 3325 #if IS_ENABLED(CONFIG_IPV6) 3326 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, 3327 struct net_bridge_mcast_port *pmctx, 3328 struct sk_buff *skb, 3329 u16 vid) 3330 { 3331 unsigned int transport_len = ipv6_transport_len(skb); 3332 struct mld_msg *mld; 3333 struct net_bridge_mdb_entry *mp; 3334 struct mld2_query *mld2q; 3335 struct net_bridge_port_group *p; 3336 struct net_bridge_port_group __rcu **pp; 3337 struct br_ip saddr = {}; 3338 unsigned long max_delay; 3339 unsigned long now = jiffies; 3340 unsigned int offset = skb_transport_offset(skb); 3341 const struct in6_addr *group = NULL; 3342 bool is_general_query; 3343 int err = 0; 3344 3345 spin_lock(&brmctx->br->multicast_lock); 3346 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3347 goto out; 3348 3349 if (transport_len == sizeof(*mld)) { 3350 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 3351 err = -EINVAL; 3352 goto out; 3353 } 3354 mld = (struct mld_msg *) icmp6_hdr(skb); 3355 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 3356 if (max_delay) 3357 group = &mld->mld_mca; 3358 } else { 3359 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 3360 err = -EINVAL; 3361 goto out; 3362 } 3363 mld2q = (struct mld2_query *)icmp6_hdr(skb); 3364 if (!mld2q->mld2q_nsrcs) 3365 group = &mld2q->mld2q_mca; 3366 if (brmctx->multicast_mld_version == 2 && 3367 !ipv6_addr_any(&mld2q->mld2q_mca) && 3368 mld2q->mld2q_suppress) 3369 goto out; 3370 3371 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 3372 } 3373 3374 is_general_query = group && ipv6_addr_any(group); 3375 3376 if (is_general_query) { 3377 saddr.proto = htons(ETH_P_IPV6); 3378 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 3379 3380 br_ip6_multicast_query_received(brmctx, pmctx, 3381 &brmctx->ip6_other_query, 3382 &saddr, max_delay); 3383 goto out; 3384 } else if (!group) { 3385 goto out; 3386 } 3387 3388 mp = br_mdb_ip6_get(brmctx->br, group, vid); 3389 if (!mp) 3390 goto out; 3391 3392 max_delay *= brmctx->multicast_last_member_count; 3393 if (mp->host_joined && 3394 (timer_pending(&mp->timer) ? 3395 time_after(mp->timer.expires, now + max_delay) : 3396 try_to_del_timer_sync(&mp->timer) >= 0)) 3397 mod_timer(&mp->timer, now + max_delay); 3398 3399 for (pp = &mp->ports; 3400 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3401 pp = &p->next) { 3402 if (timer_pending(&p->timer) ? 3403 time_after(p->timer.expires, now + max_delay) : 3404 try_to_del_timer_sync(&p->timer) >= 0 && 3405 (brmctx->multicast_mld_version == 1 || 3406 p->filter_mode == MCAST_EXCLUDE)) 3407 mod_timer(&p->timer, now + max_delay); 3408 } 3409 3410 out: 3411 spin_unlock(&brmctx->br->multicast_lock); 3412 return err; 3413 } 3414 #endif 3415 3416 static void 3417 br_multicast_leave_group(struct net_bridge_mcast *brmctx, 3418 struct net_bridge_mcast_port *pmctx, 3419 struct br_ip *group, 3420 struct bridge_mcast_other_query *other_query, 3421 struct bridge_mcast_own_query *own_query, 3422 const unsigned char *src) 3423 { 3424 struct net_bridge_mdb_entry *mp; 3425 struct net_bridge_port_group *p; 3426 unsigned long now; 3427 unsigned long time; 3428 3429 spin_lock(&brmctx->br->multicast_lock); 3430 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3431 goto out; 3432 3433 mp = br_mdb_ip_get(brmctx->br, group); 3434 if (!mp) 3435 goto out; 3436 3437 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { 3438 struct net_bridge_port_group __rcu **pp; 3439 3440 for (pp = &mp->ports; 3441 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3442 pp = &p->next) { 3443 if (!br_port_group_equal(p, pmctx->port, src)) 3444 continue; 3445 3446 if (p->flags & MDB_PG_FLAGS_PERMANENT) 3447 break; 3448 3449 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 3450 br_multicast_del_pg(mp, p, pp); 3451 } 3452 goto out; 3453 } 3454 3455 if (timer_pending(&other_query->timer)) 3456 goto out; 3457 3458 if (brmctx->multicast_querier) { 3459 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr, 3460 false, 0, NULL); 3461 3462 time = jiffies + brmctx->multicast_last_member_count * 3463 brmctx->multicast_last_member_interval; 3464 3465 mod_timer(&own_query->timer, time); 3466 3467 for (p = mlock_dereference(mp->ports, brmctx->br); 3468 p != NULL && pmctx != NULL; 3469 p = mlock_dereference(p->next, brmctx->br)) { 3470 if (!br_port_group_equal(p, pmctx->port, src)) 3471 continue; 3472 3473 if (!hlist_unhashed(&p->mglist) && 3474 (timer_pending(&p->timer) ? 3475 time_after(p->timer.expires, time) : 3476 try_to_del_timer_sync(&p->timer) >= 0)) { 3477 mod_timer(&p->timer, time); 3478 } 3479 3480 break; 3481 } 3482 } 3483 3484 now = jiffies; 3485 time = now + brmctx->multicast_last_member_count * 3486 brmctx->multicast_last_member_interval; 3487 3488 if (!pmctx) { 3489 if (mp->host_joined && 3490 (timer_pending(&mp->timer) ? 3491 time_after(mp->timer.expires, time) : 3492 try_to_del_timer_sync(&mp->timer) >= 0)) { 3493 mod_timer(&mp->timer, time); 3494 } 3495 3496 goto out; 3497 } 3498 3499 for (p = mlock_dereference(mp->ports, brmctx->br); 3500 p != NULL; 3501 p = mlock_dereference(p->next, brmctx->br)) { 3502 if (p->key.port != pmctx->port) 3503 continue; 3504 3505 if (!hlist_unhashed(&p->mglist) && 3506 (timer_pending(&p->timer) ? 3507 time_after(p->timer.expires, time) : 3508 try_to_del_timer_sync(&p->timer) >= 0)) { 3509 mod_timer(&p->timer, time); 3510 } 3511 3512 break; 3513 } 3514 out: 3515 spin_unlock(&brmctx->br->multicast_lock); 3516 } 3517 3518 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 3519 struct net_bridge_mcast_port *pmctx, 3520 __be32 group, 3521 __u16 vid, 3522 const unsigned char *src) 3523 { 3524 struct br_ip br_group; 3525 struct bridge_mcast_own_query *own_query; 3526 3527 if (ipv4_is_local_multicast(group)) 3528 return; 3529 3530 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; 3531 3532 memset(&br_group, 0, sizeof(br_group)); 3533 br_group.dst.ip4 = group; 3534 br_group.proto = htons(ETH_P_IP); 3535 br_group.vid = vid; 3536 3537 br_multicast_leave_group(brmctx, pmctx, &br_group, 3538 &brmctx->ip4_other_query, 3539 own_query, src); 3540 } 3541 3542 #if IS_ENABLED(CONFIG_IPV6) 3543 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 3544 struct net_bridge_mcast_port *pmctx, 3545 const struct in6_addr *group, 3546 __u16 vid, 3547 const unsigned char *src) 3548 { 3549 struct br_ip br_group; 3550 struct bridge_mcast_own_query *own_query; 3551 3552 if (ipv6_addr_is_ll_all_nodes(group)) 3553 return; 3554 3555 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; 3556 3557 memset(&br_group, 0, sizeof(br_group)); 3558 br_group.dst.ip6 = *group; 3559 br_group.proto = htons(ETH_P_IPV6); 3560 br_group.vid = vid; 3561 3562 br_multicast_leave_group(brmctx, pmctx, &br_group, 3563 &brmctx->ip6_other_query, 3564 own_query, src); 3565 } 3566 #endif 3567 3568 static void br_multicast_err_count(const struct net_bridge *br, 3569 const struct net_bridge_port *p, 3570 __be16 proto) 3571 { 3572 struct bridge_mcast_stats __percpu *stats; 3573 struct bridge_mcast_stats *pstats; 3574 3575 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3576 return; 3577 3578 if (p) 3579 stats = p->mcast_stats; 3580 else 3581 stats = br->mcast_stats; 3582 if (WARN_ON(!stats)) 3583 return; 3584 3585 pstats = this_cpu_ptr(stats); 3586 3587 u64_stats_update_begin(&pstats->syncp); 3588 switch (proto) { 3589 case htons(ETH_P_IP): 3590 pstats->mstats.igmp_parse_errors++; 3591 break; 3592 #if IS_ENABLED(CONFIG_IPV6) 3593 case htons(ETH_P_IPV6): 3594 pstats->mstats.mld_parse_errors++; 3595 break; 3596 #endif 3597 } 3598 u64_stats_update_end(&pstats->syncp); 3599 } 3600 3601 static void br_multicast_pim(struct net_bridge_mcast *brmctx, 3602 struct net_bridge_mcast_port *pmctx, 3603 const struct sk_buff *skb) 3604 { 3605 unsigned int offset = skb_transport_offset(skb); 3606 struct pimhdr *pimhdr, _pimhdr; 3607 3608 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3609 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3610 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3611 return; 3612 3613 spin_lock(&brmctx->br->multicast_lock); 3614 br_ip4_multicast_mark_router(brmctx, pmctx); 3615 spin_unlock(&brmctx->br->multicast_lock); 3616 } 3617 3618 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3619 struct net_bridge_mcast_port *pmctx, 3620 struct sk_buff *skb) 3621 { 3622 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3623 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3624 return -ENOMSG; 3625 3626 spin_lock(&brmctx->br->multicast_lock); 3627 br_ip4_multicast_mark_router(brmctx, pmctx); 3628 spin_unlock(&brmctx->br->multicast_lock); 3629 3630 return 0; 3631 } 3632 3633 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, 3634 struct net_bridge_mcast_port *pmctx, 3635 struct sk_buff *skb, 3636 u16 vid) 3637 { 3638 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3639 const unsigned char *src; 3640 struct igmphdr *ih; 3641 int err; 3642 3643 err = ip_mc_check_igmp(skb); 3644 3645 if (err == -ENOMSG) { 3646 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3647 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3648 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3649 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3650 br_multicast_pim(brmctx, pmctx, skb); 3651 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3652 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); 3653 } 3654 3655 return 0; 3656 } else if (err < 0) { 3657 br_multicast_err_count(brmctx->br, p, skb->protocol); 3658 return err; 3659 } 3660 3661 ih = igmp_hdr(skb); 3662 src = eth_hdr(skb)->h_source; 3663 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3664 3665 switch (ih->type) { 3666 case IGMP_HOST_MEMBERSHIP_REPORT: 3667 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3668 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3669 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid, 3670 src, true); 3671 break; 3672 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3673 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); 3674 break; 3675 case IGMP_HOST_MEMBERSHIP_QUERY: 3676 br_ip4_multicast_query(brmctx, pmctx, skb, vid); 3677 break; 3678 case IGMP_HOST_LEAVE_MESSAGE: 3679 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src); 3680 break; 3681 } 3682 3683 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3684 BR_MCAST_DIR_RX); 3685 3686 return err; 3687 } 3688 3689 #if IS_ENABLED(CONFIG_IPV6) 3690 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3691 struct net_bridge_mcast_port *pmctx, 3692 struct sk_buff *skb) 3693 { 3694 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3695 return; 3696 3697 spin_lock(&brmctx->br->multicast_lock); 3698 br_ip6_multicast_mark_router(brmctx, pmctx); 3699 spin_unlock(&brmctx->br->multicast_lock); 3700 } 3701 3702 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, 3703 struct net_bridge_mcast_port *pmctx, 3704 struct sk_buff *skb, 3705 u16 vid) 3706 { 3707 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3708 const unsigned char *src; 3709 struct mld_msg *mld; 3710 int err; 3711 3712 err = ipv6_mc_check_mld(skb); 3713 3714 if (err == -ENOMSG || err == -ENODATA) { 3715 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3716 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3717 if (err == -ENODATA && 3718 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) 3719 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); 3720 3721 return 0; 3722 } else if (err < 0) { 3723 br_multicast_err_count(brmctx->br, p, skb->protocol); 3724 return err; 3725 } 3726 3727 mld = (struct mld_msg *)skb_transport_header(skb); 3728 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3729 3730 switch (mld->mld_type) { 3731 case ICMPV6_MGM_REPORT: 3732 src = eth_hdr(skb)->h_source; 3733 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3734 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca, 3735 vid, src, true); 3736 break; 3737 case ICMPV6_MLD2_REPORT: 3738 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); 3739 break; 3740 case ICMPV6_MGM_QUERY: 3741 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); 3742 break; 3743 case ICMPV6_MGM_REDUCTION: 3744 src = eth_hdr(skb)->h_source; 3745 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid, 3746 src); 3747 break; 3748 } 3749 3750 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3751 BR_MCAST_DIR_RX); 3752 3753 return err; 3754 } 3755 #endif 3756 3757 int br_multicast_rcv(struct net_bridge_mcast **brmctx, 3758 struct net_bridge_mcast_port **pmctx, 3759 struct net_bridge_vlan *vlan, 3760 struct sk_buff *skb, u16 vid) 3761 { 3762 int ret = 0; 3763 3764 BR_INPUT_SKB_CB(skb)->igmp = 0; 3765 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3766 3767 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED)) 3768 return 0; 3769 3770 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { 3771 const struct net_bridge_vlan *masterv; 3772 3773 /* the vlan has the master flag set only when transmitting 3774 * through the bridge device 3775 */ 3776 if (br_vlan_is_master(vlan)) { 3777 masterv = vlan; 3778 *brmctx = &vlan->br_mcast_ctx; 3779 *pmctx = NULL; 3780 } else { 3781 masterv = vlan->brvlan; 3782 *brmctx = &vlan->brvlan->br_mcast_ctx; 3783 *pmctx = &vlan->port_mcast_ctx; 3784 } 3785 3786 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 3787 return 0; 3788 } 3789 3790 switch (skb->protocol) { 3791 case htons(ETH_P_IP): 3792 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid); 3793 break; 3794 #if IS_ENABLED(CONFIG_IPV6) 3795 case htons(ETH_P_IPV6): 3796 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid); 3797 break; 3798 #endif 3799 } 3800 3801 return ret; 3802 } 3803 3804 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, 3805 struct bridge_mcast_own_query *query, 3806 struct bridge_mcast_querier *querier) 3807 { 3808 spin_lock(&brmctx->br->multicast_lock); 3809 if (br_multicast_ctx_vlan_disabled(brmctx)) 3810 goto out; 3811 3812 if (query->startup_sent < brmctx->multicast_startup_query_count) 3813 query->startup_sent++; 3814 3815 br_multicast_send_query(brmctx, NULL, query); 3816 out: 3817 spin_unlock(&brmctx->br->multicast_lock); 3818 } 3819 3820 static void br_ip4_multicast_query_expired(struct timer_list *t) 3821 { 3822 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3823 ip4_own_query.timer); 3824 3825 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, 3826 &brmctx->ip4_querier); 3827 } 3828 3829 #if IS_ENABLED(CONFIG_IPV6) 3830 static void br_ip6_multicast_query_expired(struct timer_list *t) 3831 { 3832 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3833 ip6_own_query.timer); 3834 3835 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, 3836 &brmctx->ip6_querier); 3837 } 3838 #endif 3839 3840 static void br_multicast_gc_work(struct work_struct *work) 3841 { 3842 struct net_bridge *br = container_of(work, struct net_bridge, 3843 mcast_gc_work); 3844 HLIST_HEAD(deleted_head); 3845 3846 spin_lock_bh(&br->multicast_lock); 3847 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3848 spin_unlock_bh(&br->multicast_lock); 3849 3850 br_multicast_gc(&deleted_head); 3851 } 3852 3853 void br_multicast_ctx_init(struct net_bridge *br, 3854 struct net_bridge_vlan *vlan, 3855 struct net_bridge_mcast *brmctx) 3856 { 3857 brmctx->br = br; 3858 brmctx->vlan = vlan; 3859 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3860 brmctx->multicast_last_member_count = 2; 3861 brmctx->multicast_startup_query_count = 2; 3862 3863 brmctx->multicast_last_member_interval = HZ; 3864 brmctx->multicast_query_response_interval = 10 * HZ; 3865 brmctx->multicast_startup_query_interval = 125 * HZ / 4; 3866 brmctx->multicast_query_interval = 125 * HZ; 3867 brmctx->multicast_querier_interval = 255 * HZ; 3868 brmctx->multicast_membership_interval = 260 * HZ; 3869 3870 brmctx->ip4_other_query.delay_time = 0; 3871 brmctx->ip4_querier.port_ifidx = 0; 3872 seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); 3873 brmctx->multicast_igmp_version = 2; 3874 #if IS_ENABLED(CONFIG_IPV6) 3875 brmctx->multicast_mld_version = 1; 3876 brmctx->ip6_other_query.delay_time = 0; 3877 brmctx->ip6_querier.port_ifidx = 0; 3878 seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); 3879 #endif 3880 3881 timer_setup(&brmctx->ip4_mc_router_timer, 3882 br_ip4_multicast_local_router_expired, 0); 3883 timer_setup(&brmctx->ip4_other_query.timer, 3884 br_ip4_multicast_querier_expired, 0); 3885 timer_setup(&brmctx->ip4_own_query.timer, 3886 br_ip4_multicast_query_expired, 0); 3887 #if IS_ENABLED(CONFIG_IPV6) 3888 timer_setup(&brmctx->ip6_mc_router_timer, 3889 br_ip6_multicast_local_router_expired, 0); 3890 timer_setup(&brmctx->ip6_other_query.timer, 3891 br_ip6_multicast_querier_expired, 0); 3892 timer_setup(&brmctx->ip6_own_query.timer, 3893 br_ip6_multicast_query_expired, 0); 3894 #endif 3895 } 3896 3897 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) 3898 { 3899 __br_multicast_stop(brmctx); 3900 } 3901 3902 void br_multicast_init(struct net_bridge *br) 3903 { 3904 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 3905 3906 br_multicast_ctx_init(br, NULL, &br->multicast_ctx); 3907 3908 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 3909 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 3910 3911 spin_lock_init(&br->multicast_lock); 3912 INIT_HLIST_HEAD(&br->mdb_list); 3913 INIT_HLIST_HEAD(&br->mcast_gc_list); 3914 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 3915 } 3916 3917 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 3918 { 3919 struct in_device *in_dev = in_dev_get(br->dev); 3920 3921 if (!in_dev) 3922 return; 3923 3924 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3925 in_dev_put(in_dev); 3926 } 3927 3928 #if IS_ENABLED(CONFIG_IPV6) 3929 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3930 { 3931 struct in6_addr addr; 3932 3933 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3934 ipv6_dev_mc_inc(br->dev, &addr); 3935 } 3936 #else 3937 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3938 { 3939 } 3940 #endif 3941 3942 void br_multicast_join_snoopers(struct net_bridge *br) 3943 { 3944 br_ip4_multicast_join_snoopers(br); 3945 br_ip6_multicast_join_snoopers(br); 3946 } 3947 3948 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 3949 { 3950 struct in_device *in_dev = in_dev_get(br->dev); 3951 3952 if (WARN_ON(!in_dev)) 3953 return; 3954 3955 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3956 in_dev_put(in_dev); 3957 } 3958 3959 #if IS_ENABLED(CONFIG_IPV6) 3960 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3961 { 3962 struct in6_addr addr; 3963 3964 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3965 ipv6_dev_mc_dec(br->dev, &addr); 3966 } 3967 #else 3968 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3969 { 3970 } 3971 #endif 3972 3973 void br_multicast_leave_snoopers(struct net_bridge *br) 3974 { 3975 br_ip4_multicast_leave_snoopers(br); 3976 br_ip6_multicast_leave_snoopers(br); 3977 } 3978 3979 static void __br_multicast_open_query(struct net_bridge *br, 3980 struct bridge_mcast_own_query *query) 3981 { 3982 query->startup_sent = 0; 3983 3984 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3985 return; 3986 3987 mod_timer(&query->timer, jiffies); 3988 } 3989 3990 static void __br_multicast_open(struct net_bridge_mcast *brmctx) 3991 { 3992 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query); 3993 #if IS_ENABLED(CONFIG_IPV6) 3994 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query); 3995 #endif 3996 } 3997 3998 void br_multicast_open(struct net_bridge *br) 3999 { 4000 ASSERT_RTNL(); 4001 4002 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4003 struct net_bridge_vlan_group *vg; 4004 struct net_bridge_vlan *vlan; 4005 4006 vg = br_vlan_group(br); 4007 if (vg) { 4008 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4009 struct net_bridge_mcast *brmctx; 4010 4011 brmctx = &vlan->br_mcast_ctx; 4012 if (br_vlan_is_brentry(vlan) && 4013 !br_multicast_ctx_vlan_disabled(brmctx)) 4014 __br_multicast_open(&vlan->br_mcast_ctx); 4015 } 4016 } 4017 } else { 4018 __br_multicast_open(&br->multicast_ctx); 4019 } 4020 } 4021 4022 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 4023 { 4024 del_timer_sync(&brmctx->ip4_mc_router_timer); 4025 del_timer_sync(&brmctx->ip4_other_query.timer); 4026 del_timer_sync(&brmctx->ip4_own_query.timer); 4027 #if IS_ENABLED(CONFIG_IPV6) 4028 del_timer_sync(&brmctx->ip6_mc_router_timer); 4029 del_timer_sync(&brmctx->ip6_other_query.timer); 4030 del_timer_sync(&brmctx->ip6_own_query.timer); 4031 #endif 4032 } 4033 4034 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) 4035 { 4036 struct net_bridge *br; 4037 4038 /* it's okay to check for the flag without the multicast lock because it 4039 * can only change under RTNL -> multicast_lock, we need the latter to 4040 * sync with timers and packets 4041 */ 4042 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) 4043 return; 4044 4045 if (br_vlan_is_master(vlan)) { 4046 br = vlan->br; 4047 4048 if (!br_vlan_is_brentry(vlan) || 4049 (on && 4050 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx))) 4051 return; 4052 4053 spin_lock_bh(&br->multicast_lock); 4054 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4055 spin_unlock_bh(&br->multicast_lock); 4056 4057 if (on) 4058 __br_multicast_open(&vlan->br_mcast_ctx); 4059 else 4060 __br_multicast_stop(&vlan->br_mcast_ctx); 4061 } else { 4062 struct net_bridge_mcast *brmctx; 4063 4064 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx); 4065 if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) 4066 return; 4067 4068 br = vlan->port->br; 4069 spin_lock_bh(&br->multicast_lock); 4070 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4071 if (on) 4072 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx); 4073 else 4074 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx); 4075 spin_unlock_bh(&br->multicast_lock); 4076 } 4077 } 4078 4079 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) 4080 { 4081 struct net_bridge_port *p; 4082 4083 if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) 4084 return; 4085 4086 list_for_each_entry(p, &vlan->br->port_list, list) { 4087 struct net_bridge_vlan *vport; 4088 4089 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid); 4090 if (!vport) 4091 continue; 4092 br_multicast_toggle_one_vlan(vport, on); 4093 } 4094 4095 if (br_vlan_is_brentry(vlan)) 4096 br_multicast_toggle_one_vlan(vlan, on); 4097 } 4098 4099 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, 4100 struct netlink_ext_ack *extack) 4101 { 4102 struct net_bridge_vlan_group *vg; 4103 struct net_bridge_vlan *vlan; 4104 struct net_bridge_port *p; 4105 4106 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) 4107 return 0; 4108 4109 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) { 4110 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled"); 4111 return -EINVAL; 4112 } 4113 4114 vg = br_vlan_group(br); 4115 if (!vg) 4116 return 0; 4117 4118 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); 4119 4120 /* disable/enable non-vlan mcast contexts based on vlan snooping */ 4121 if (on) 4122 __br_multicast_stop(&br->multicast_ctx); 4123 else 4124 __br_multicast_open(&br->multicast_ctx); 4125 list_for_each_entry(p, &br->port_list, list) { 4126 if (on) 4127 br_multicast_disable_port(p); 4128 else 4129 br_multicast_enable_port(p); 4130 } 4131 4132 list_for_each_entry(vlan, &vg->vlan_list, vlist) 4133 br_multicast_toggle_vlan(vlan, on); 4134 4135 return 0; 4136 } 4137 4138 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on) 4139 { 4140 ASSERT_RTNL(); 4141 4142 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and 4143 * requires only RTNL to change 4144 */ 4145 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 4146 return false; 4147 4148 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED; 4149 br_multicast_toggle_vlan(vlan, on); 4150 4151 return true; 4152 } 4153 4154 void br_multicast_stop(struct net_bridge *br) 4155 { 4156 ASSERT_RTNL(); 4157 4158 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4159 struct net_bridge_vlan_group *vg; 4160 struct net_bridge_vlan *vlan; 4161 4162 vg = br_vlan_group(br); 4163 if (vg) { 4164 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4165 struct net_bridge_mcast *brmctx; 4166 4167 brmctx = &vlan->br_mcast_ctx; 4168 if (br_vlan_is_brentry(vlan) && 4169 !br_multicast_ctx_vlan_disabled(brmctx)) 4170 __br_multicast_stop(&vlan->br_mcast_ctx); 4171 } 4172 } 4173 } else { 4174 __br_multicast_stop(&br->multicast_ctx); 4175 } 4176 } 4177 4178 void br_multicast_dev_del(struct net_bridge *br) 4179 { 4180 struct net_bridge_mdb_entry *mp; 4181 HLIST_HEAD(deleted_head); 4182 struct hlist_node *tmp; 4183 4184 spin_lock_bh(&br->multicast_lock); 4185 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 4186 br_multicast_del_mdb_entry(mp); 4187 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4188 spin_unlock_bh(&br->multicast_lock); 4189 4190 br_multicast_ctx_deinit(&br->multicast_ctx); 4191 br_multicast_gc(&deleted_head); 4192 cancel_work_sync(&br->mcast_gc_work); 4193 4194 rcu_barrier(); 4195 } 4196 4197 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) 4198 { 4199 int err = -EINVAL; 4200 4201 spin_lock_bh(&brmctx->br->multicast_lock); 4202 4203 switch (val) { 4204 case MDB_RTR_TYPE_DISABLED: 4205 case MDB_RTR_TYPE_PERM: 4206 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); 4207 del_timer(&brmctx->ip4_mc_router_timer); 4208 #if IS_ENABLED(CONFIG_IPV6) 4209 del_timer(&brmctx->ip6_mc_router_timer); 4210 #endif 4211 brmctx->multicast_router = val; 4212 err = 0; 4213 break; 4214 case MDB_RTR_TYPE_TEMP_QUERY: 4215 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 4216 br_mc_router_state_change(brmctx->br, false); 4217 brmctx->multicast_router = val; 4218 err = 0; 4219 break; 4220 } 4221 4222 spin_unlock_bh(&brmctx->br->multicast_lock); 4223 4224 return err; 4225 } 4226 4227 static void 4228 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) 4229 { 4230 if (!deleted) 4231 return; 4232 4233 /* For backwards compatibility for now, only notify if there is 4234 * no multicast router anymore for both IPv4 and IPv6. 4235 */ 4236 if (!hlist_unhashed(&pmctx->ip4_rlist)) 4237 return; 4238 #if IS_ENABLED(CONFIG_IPV6) 4239 if (!hlist_unhashed(&pmctx->ip6_rlist)) 4240 return; 4241 #endif 4242 4243 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB); 4244 br_port_mc_router_state_change(pmctx->port, false); 4245 4246 /* don't allow timer refresh */ 4247 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) 4248 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4249 } 4250 4251 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, 4252 unsigned long val) 4253 { 4254 struct net_bridge_mcast *brmctx; 4255 unsigned long now = jiffies; 4256 int err = -EINVAL; 4257 bool del = false; 4258 4259 brmctx = br_multicast_port_ctx_get_global(pmctx); 4260 spin_lock_bh(&brmctx->br->multicast_lock); 4261 if (pmctx->multicast_router == val) { 4262 /* Refresh the temp router port timer */ 4263 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { 4264 mod_timer(&pmctx->ip4_mc_router_timer, 4265 now + brmctx->multicast_querier_interval); 4266 #if IS_ENABLED(CONFIG_IPV6) 4267 mod_timer(&pmctx->ip6_mc_router_timer, 4268 now + brmctx->multicast_querier_interval); 4269 #endif 4270 } 4271 err = 0; 4272 goto unlock; 4273 } 4274 switch (val) { 4275 case MDB_RTR_TYPE_DISABLED: 4276 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4277 del |= br_ip4_multicast_rport_del(pmctx); 4278 del_timer(&pmctx->ip4_mc_router_timer); 4279 del |= br_ip6_multicast_rport_del(pmctx); 4280 #if IS_ENABLED(CONFIG_IPV6) 4281 del_timer(&pmctx->ip6_mc_router_timer); 4282 #endif 4283 br_multicast_rport_del_notify(pmctx, del); 4284 break; 4285 case MDB_RTR_TYPE_TEMP_QUERY: 4286 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4287 del |= br_ip4_multicast_rport_del(pmctx); 4288 del |= br_ip6_multicast_rport_del(pmctx); 4289 br_multicast_rport_del_notify(pmctx, del); 4290 break; 4291 case MDB_RTR_TYPE_PERM: 4292 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4293 del_timer(&pmctx->ip4_mc_router_timer); 4294 br_ip4_multicast_add_router(brmctx, pmctx); 4295 #if IS_ENABLED(CONFIG_IPV6) 4296 del_timer(&pmctx->ip6_mc_router_timer); 4297 #endif 4298 br_ip6_multicast_add_router(brmctx, pmctx); 4299 break; 4300 case MDB_RTR_TYPE_TEMP: 4301 pmctx->multicast_router = MDB_RTR_TYPE_TEMP; 4302 br_ip4_multicast_mark_router(brmctx, pmctx); 4303 br_ip6_multicast_mark_router(brmctx, pmctx); 4304 break; 4305 default: 4306 goto unlock; 4307 } 4308 err = 0; 4309 unlock: 4310 spin_unlock_bh(&brmctx->br->multicast_lock); 4311 4312 return err; 4313 } 4314 4315 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router) 4316 { 4317 int err; 4318 4319 if (br_vlan_is_master(v)) 4320 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router); 4321 else 4322 err = br_multicast_set_port_router(&v->port_mcast_ctx, 4323 mcast_router); 4324 4325 return err; 4326 } 4327 4328 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 4329 struct bridge_mcast_own_query *query) 4330 { 4331 struct net_bridge_port *port; 4332 4333 if (!br_multicast_ctx_matches_vlan_snooping(brmctx)) 4334 return; 4335 4336 __br_multicast_open_query(brmctx->br, query); 4337 4338 rcu_read_lock(); 4339 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { 4340 struct bridge_mcast_own_query *ip4_own_query; 4341 #if IS_ENABLED(CONFIG_IPV6) 4342 struct bridge_mcast_own_query *ip6_own_query; 4343 #endif 4344 4345 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx)) 4346 continue; 4347 4348 if (br_multicast_ctx_is_vlan(brmctx)) { 4349 struct net_bridge_vlan *vlan; 4350 4351 vlan = br_vlan_find(nbp_vlan_group_rcu(port), 4352 brmctx->vlan->vid); 4353 if (!vlan || 4354 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx)) 4355 continue; 4356 4357 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query; 4358 #if IS_ENABLED(CONFIG_IPV6) 4359 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query; 4360 #endif 4361 } else { 4362 ip4_own_query = &port->multicast_ctx.ip4_own_query; 4363 #if IS_ENABLED(CONFIG_IPV6) 4364 ip6_own_query = &port->multicast_ctx.ip6_own_query; 4365 #endif 4366 } 4367 4368 if (query == &brmctx->ip4_own_query) 4369 br_multicast_enable(ip4_own_query); 4370 #if IS_ENABLED(CONFIG_IPV6) 4371 else 4372 br_multicast_enable(ip6_own_query); 4373 #endif 4374 } 4375 rcu_read_unlock(); 4376 } 4377 4378 int br_multicast_toggle(struct net_bridge *br, unsigned long val, 4379 struct netlink_ext_ack *extack) 4380 { 4381 struct net_bridge_port *port; 4382 bool change_snoopers = false; 4383 int err = 0; 4384 4385 spin_lock_bh(&br->multicast_lock); 4386 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 4387 goto unlock; 4388 4389 err = br_mc_disabled_update(br->dev, val, extack); 4390 if (err == -EOPNOTSUPP) 4391 err = 0; 4392 if (err) 4393 goto unlock; 4394 4395 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 4396 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 4397 change_snoopers = true; 4398 goto unlock; 4399 } 4400 4401 if (!netif_running(br->dev)) 4402 goto unlock; 4403 4404 br_multicast_open(br); 4405 list_for_each_entry(port, &br->port_list, list) 4406 __br_multicast_enable_port_ctx(&port->multicast_ctx); 4407 4408 change_snoopers = true; 4409 4410 unlock: 4411 spin_unlock_bh(&br->multicast_lock); 4412 4413 /* br_multicast_join_snoopers has the potential to cause 4414 * an MLD Report/Leave to be delivered to br_multicast_rcv, 4415 * which would in turn call br_multicast_add_group, which would 4416 * attempt to acquire multicast_lock. This function should be 4417 * called after the lock has been released to avoid deadlocks on 4418 * multicast_lock. 4419 * 4420 * br_multicast_leave_snoopers does not have the problem since 4421 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 4422 * returns without calling br_multicast_ipv4/6_rcv if it's not 4423 * enabled. Moved both functions out just for symmetry. 4424 */ 4425 if (change_snoopers) { 4426 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4427 br_multicast_join_snoopers(br); 4428 else 4429 br_multicast_leave_snoopers(br); 4430 } 4431 4432 return err; 4433 } 4434 4435 bool br_multicast_enabled(const struct net_device *dev) 4436 { 4437 struct net_bridge *br = netdev_priv(dev); 4438 4439 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 4440 } 4441 EXPORT_SYMBOL_GPL(br_multicast_enabled); 4442 4443 bool br_multicast_router(const struct net_device *dev) 4444 { 4445 struct net_bridge *br = netdev_priv(dev); 4446 bool is_router; 4447 4448 spin_lock_bh(&br->multicast_lock); 4449 is_router = br_multicast_is_router(&br->multicast_ctx, NULL); 4450 spin_unlock_bh(&br->multicast_lock); 4451 return is_router; 4452 } 4453 EXPORT_SYMBOL_GPL(br_multicast_router); 4454 4455 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) 4456 { 4457 unsigned long max_delay; 4458 4459 val = !!val; 4460 4461 spin_lock_bh(&brmctx->br->multicast_lock); 4462 if (brmctx->multicast_querier == val) 4463 goto unlock; 4464 4465 WRITE_ONCE(brmctx->multicast_querier, val); 4466 if (!val) 4467 goto unlock; 4468 4469 max_delay = brmctx->multicast_query_response_interval; 4470 4471 if (!timer_pending(&brmctx->ip4_other_query.timer)) 4472 brmctx->ip4_other_query.delay_time = jiffies + max_delay; 4473 4474 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); 4475 4476 #if IS_ENABLED(CONFIG_IPV6) 4477 if (!timer_pending(&brmctx->ip6_other_query.timer)) 4478 brmctx->ip6_other_query.delay_time = jiffies + max_delay; 4479 4480 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); 4481 #endif 4482 4483 unlock: 4484 spin_unlock_bh(&brmctx->br->multicast_lock); 4485 4486 return 0; 4487 } 4488 4489 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx, 4490 unsigned long val) 4491 { 4492 /* Currently we support only version 2 and 3 */ 4493 switch (val) { 4494 case 2: 4495 case 3: 4496 break; 4497 default: 4498 return -EINVAL; 4499 } 4500 4501 spin_lock_bh(&brmctx->br->multicast_lock); 4502 brmctx->multicast_igmp_version = val; 4503 spin_unlock_bh(&brmctx->br->multicast_lock); 4504 4505 return 0; 4506 } 4507 4508 #if IS_ENABLED(CONFIG_IPV6) 4509 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, 4510 unsigned long val) 4511 { 4512 /* Currently we support version 1 and 2 */ 4513 switch (val) { 4514 case 1: 4515 case 2: 4516 break; 4517 default: 4518 return -EINVAL; 4519 } 4520 4521 spin_lock_bh(&brmctx->br->multicast_lock); 4522 brmctx->multicast_mld_version = val; 4523 spin_unlock_bh(&brmctx->br->multicast_lock); 4524 4525 return 0; 4526 } 4527 #endif 4528 4529 void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, 4530 unsigned long val) 4531 { 4532 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4533 4534 if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { 4535 br_info(brmctx->br, 4536 "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", 4537 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), 4538 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); 4539 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; 4540 } 4541 4542 brmctx->multicast_query_interval = intvl_jiffies; 4543 } 4544 4545 void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, 4546 unsigned long val) 4547 { 4548 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4549 4550 if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { 4551 br_info(brmctx->br, 4552 "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", 4553 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), 4554 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); 4555 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; 4556 } 4557 4558 brmctx->multicast_startup_query_interval = intvl_jiffies; 4559 } 4560 4561 /** 4562 * br_multicast_list_adjacent - Returns snooped multicast addresses 4563 * @dev: The bridge port adjacent to which to retrieve addresses 4564 * @br_ip_list: The list to store found, snooped multicast IP addresses in 4565 * 4566 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 4567 * snooping feature on all bridge ports of dev's bridge device, excluding 4568 * the addresses from dev itself. 4569 * 4570 * Returns the number of items added to br_ip_list. 4571 * 4572 * Notes: 4573 * - br_ip_list needs to be initialized by caller 4574 * - br_ip_list might contain duplicates in the end 4575 * (needs to be taken care of by caller) 4576 * - br_ip_list needs to be freed by caller 4577 */ 4578 int br_multicast_list_adjacent(struct net_device *dev, 4579 struct list_head *br_ip_list) 4580 { 4581 struct net_bridge *br; 4582 struct net_bridge_port *port; 4583 struct net_bridge_port_group *group; 4584 struct br_ip_list *entry; 4585 int count = 0; 4586 4587 rcu_read_lock(); 4588 if (!br_ip_list || !netif_is_bridge_port(dev)) 4589 goto unlock; 4590 4591 port = br_port_get_rcu(dev); 4592 if (!port || !port->br) 4593 goto unlock; 4594 4595 br = port->br; 4596 4597 list_for_each_entry_rcu(port, &br->port_list, list) { 4598 if (!port->dev || port->dev == dev) 4599 continue; 4600 4601 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 4602 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 4603 if (!entry) 4604 goto unlock; 4605 4606 entry->addr = group->key.addr; 4607 list_add(&entry->list, br_ip_list); 4608 count++; 4609 } 4610 } 4611 4612 unlock: 4613 rcu_read_unlock(); 4614 return count; 4615 } 4616 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 4617 4618 /** 4619 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 4620 * @dev: The bridge port providing the bridge on which to check for a querier 4621 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4622 * 4623 * Checks whether the given interface has a bridge on top and if so returns 4624 * true if a valid querier exists anywhere on the bridged link layer. 4625 * Otherwise returns false. 4626 */ 4627 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 4628 { 4629 struct net_bridge *br; 4630 struct net_bridge_port *port; 4631 struct ethhdr eth; 4632 bool ret = false; 4633 4634 rcu_read_lock(); 4635 if (!netif_is_bridge_port(dev)) 4636 goto unlock; 4637 4638 port = br_port_get_rcu(dev); 4639 if (!port || !port->br) 4640 goto unlock; 4641 4642 br = port->br; 4643 4644 memset(ð, 0, sizeof(eth)); 4645 eth.h_proto = htons(proto); 4646 4647 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL); 4648 4649 unlock: 4650 rcu_read_unlock(); 4651 return ret; 4652 } 4653 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 4654 4655 /** 4656 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 4657 * @dev: The bridge port adjacent to which to check for a querier 4658 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4659 * 4660 * Checks whether the given interface has a bridge on top and if so returns 4661 * true if a selected querier is behind one of the other ports of this 4662 * bridge. Otherwise returns false. 4663 */ 4664 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 4665 { 4666 struct net_bridge_mcast *brmctx; 4667 struct net_bridge *br; 4668 struct net_bridge_port *port; 4669 bool ret = false; 4670 int port_ifidx; 4671 4672 rcu_read_lock(); 4673 if (!netif_is_bridge_port(dev)) 4674 goto unlock; 4675 4676 port = br_port_get_rcu(dev); 4677 if (!port || !port->br) 4678 goto unlock; 4679 4680 br = port->br; 4681 brmctx = &br->multicast_ctx; 4682 4683 switch (proto) { 4684 case ETH_P_IP: 4685 port_ifidx = brmctx->ip4_querier.port_ifidx; 4686 if (!timer_pending(&brmctx->ip4_other_query.timer) || 4687 port_ifidx == port->dev->ifindex) 4688 goto unlock; 4689 break; 4690 #if IS_ENABLED(CONFIG_IPV6) 4691 case ETH_P_IPV6: 4692 port_ifidx = brmctx->ip6_querier.port_ifidx; 4693 if (!timer_pending(&brmctx->ip6_other_query.timer) || 4694 port_ifidx == port->dev->ifindex) 4695 goto unlock; 4696 break; 4697 #endif 4698 default: 4699 goto unlock; 4700 } 4701 4702 ret = true; 4703 unlock: 4704 rcu_read_unlock(); 4705 return ret; 4706 } 4707 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 4708 4709 /** 4710 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port 4711 * @dev: The bridge port adjacent to which to check for a multicast router 4712 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4713 * 4714 * Checks whether the given interface has a bridge on top and if so returns 4715 * true if a multicast router is behind one of the other ports of this 4716 * bridge. Otherwise returns false. 4717 */ 4718 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) 4719 { 4720 struct net_bridge_mcast_port *pmctx; 4721 struct net_bridge_mcast *brmctx; 4722 struct net_bridge_port *port; 4723 bool ret = false; 4724 4725 rcu_read_lock(); 4726 port = br_port_get_check_rcu(dev); 4727 if (!port) 4728 goto unlock; 4729 4730 brmctx = &port->br->multicast_ctx; 4731 switch (proto) { 4732 case ETH_P_IP: 4733 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, 4734 ip4_rlist) { 4735 if (pmctx->port == port) 4736 continue; 4737 4738 ret = true; 4739 goto unlock; 4740 } 4741 break; 4742 #if IS_ENABLED(CONFIG_IPV6) 4743 case ETH_P_IPV6: 4744 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, 4745 ip6_rlist) { 4746 if (pmctx->port == port) 4747 continue; 4748 4749 ret = true; 4750 goto unlock; 4751 } 4752 break; 4753 #endif 4754 default: 4755 /* when compiled without IPv6 support, be conservative and 4756 * always assume presence of an IPv6 multicast router 4757 */ 4758 ret = true; 4759 } 4760 4761 unlock: 4762 rcu_read_unlock(); 4763 return ret; 4764 } 4765 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); 4766 4767 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 4768 const struct sk_buff *skb, u8 type, u8 dir) 4769 { 4770 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 4771 __be16 proto = skb->protocol; 4772 unsigned int t_len; 4773 4774 u64_stats_update_begin(&pstats->syncp); 4775 switch (proto) { 4776 case htons(ETH_P_IP): 4777 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 4778 switch (type) { 4779 case IGMP_HOST_MEMBERSHIP_REPORT: 4780 pstats->mstats.igmp_v1reports[dir]++; 4781 break; 4782 case IGMPV2_HOST_MEMBERSHIP_REPORT: 4783 pstats->mstats.igmp_v2reports[dir]++; 4784 break; 4785 case IGMPV3_HOST_MEMBERSHIP_REPORT: 4786 pstats->mstats.igmp_v3reports[dir]++; 4787 break; 4788 case IGMP_HOST_MEMBERSHIP_QUERY: 4789 if (t_len != sizeof(struct igmphdr)) { 4790 pstats->mstats.igmp_v3queries[dir]++; 4791 } else { 4792 unsigned int offset = skb_transport_offset(skb); 4793 struct igmphdr *ih, _ihdr; 4794 4795 ih = skb_header_pointer(skb, offset, 4796 sizeof(_ihdr), &_ihdr); 4797 if (!ih) 4798 break; 4799 if (!ih->code) 4800 pstats->mstats.igmp_v1queries[dir]++; 4801 else 4802 pstats->mstats.igmp_v2queries[dir]++; 4803 } 4804 break; 4805 case IGMP_HOST_LEAVE_MESSAGE: 4806 pstats->mstats.igmp_leaves[dir]++; 4807 break; 4808 } 4809 break; 4810 #if IS_ENABLED(CONFIG_IPV6) 4811 case htons(ETH_P_IPV6): 4812 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 4813 sizeof(struct ipv6hdr); 4814 t_len -= skb_network_header_len(skb); 4815 switch (type) { 4816 case ICMPV6_MGM_REPORT: 4817 pstats->mstats.mld_v1reports[dir]++; 4818 break; 4819 case ICMPV6_MLD2_REPORT: 4820 pstats->mstats.mld_v2reports[dir]++; 4821 break; 4822 case ICMPV6_MGM_QUERY: 4823 if (t_len != sizeof(struct mld_msg)) 4824 pstats->mstats.mld_v2queries[dir]++; 4825 else 4826 pstats->mstats.mld_v1queries[dir]++; 4827 break; 4828 case ICMPV6_MGM_REDUCTION: 4829 pstats->mstats.mld_leaves[dir]++; 4830 break; 4831 } 4832 break; 4833 #endif /* CONFIG_IPV6 */ 4834 } 4835 u64_stats_update_end(&pstats->syncp); 4836 } 4837 4838 void br_multicast_count(struct net_bridge *br, 4839 const struct net_bridge_port *p, 4840 const struct sk_buff *skb, u8 type, u8 dir) 4841 { 4842 struct bridge_mcast_stats __percpu *stats; 4843 4844 /* if multicast_disabled is true then igmp type can't be set */ 4845 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 4846 return; 4847 4848 if (p) 4849 stats = p->mcast_stats; 4850 else 4851 stats = br->mcast_stats; 4852 if (WARN_ON(!stats)) 4853 return; 4854 4855 br_mcast_stats_add(stats, skb, type, dir); 4856 } 4857 4858 int br_multicast_init_stats(struct net_bridge *br) 4859 { 4860 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 4861 if (!br->mcast_stats) 4862 return -ENOMEM; 4863 4864 return 0; 4865 } 4866 4867 void br_multicast_uninit_stats(struct net_bridge *br) 4868 { 4869 free_percpu(br->mcast_stats); 4870 } 4871 4872 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 4873 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 4874 { 4875 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 4876 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 4877 } 4878 4879 void br_multicast_get_stats(const struct net_bridge *br, 4880 const struct net_bridge_port *p, 4881 struct br_mcast_stats *dest) 4882 { 4883 struct bridge_mcast_stats __percpu *stats; 4884 struct br_mcast_stats tdst; 4885 int i; 4886 4887 memset(dest, 0, sizeof(*dest)); 4888 if (p) 4889 stats = p->mcast_stats; 4890 else 4891 stats = br->mcast_stats; 4892 if (WARN_ON(!stats)) 4893 return; 4894 4895 memset(&tdst, 0, sizeof(tdst)); 4896 for_each_possible_cpu(i) { 4897 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 4898 struct br_mcast_stats temp; 4899 unsigned int start; 4900 4901 do { 4902 start = u64_stats_fetch_begin(&cpu_stats->syncp); 4903 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 4904 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 4905 4906 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 4907 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 4908 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 4909 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 4910 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 4911 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 4912 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 4913 tdst.igmp_parse_errors += temp.igmp_parse_errors; 4914 4915 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 4916 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 4917 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 4918 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 4919 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 4920 tdst.mld_parse_errors += temp.mld_parse_errors; 4921 } 4922 memcpy(dest, &tdst, sizeof(*dest)); 4923 } 4924 4925 int br_mdb_hash_init(struct net_bridge *br) 4926 { 4927 int err; 4928 4929 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 4930 if (err) 4931 return err; 4932 4933 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 4934 if (err) { 4935 rhashtable_destroy(&br->sg_port_tbl); 4936 return err; 4937 } 4938 4939 return 0; 4940 } 4941 4942 void br_mdb_hash_fini(struct net_bridge *br) 4943 { 4944 rhashtable_destroy(&br->sg_port_tbl); 4945 rhashtable_destroy(&br->mdb_hash_tbl); 4946 } 4947