1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 #include "br_private_mcast_eht.h" 37 38 static const struct rhashtable_params br_mdb_rht_params = { 39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 41 .key_len = sizeof(struct br_ip), 42 .automatic_shrinking = true, 43 }; 44 45 static const struct rhashtable_params br_sg_port_rht_params = { 46 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 47 .key_offset = offsetof(struct net_bridge_port_group, key), 48 .key_len = sizeof(struct net_bridge_port_group_sg_key), 49 .automatic_shrinking = true, 50 }; 51 52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 53 struct bridge_mcast_own_query *query); 54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 55 struct net_bridge_mcast_port *pmctx); 56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 57 struct net_bridge_mcast_port *pmctx, 58 __be32 group, 59 __u16 vid, 60 const unsigned char *src); 61 static void br_multicast_port_group_rexmit(struct timer_list *t); 62 63 static void 64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); 65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 66 struct net_bridge_mcast_port *pmctx); 67 #if IS_ENABLED(CONFIG_IPV6) 68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 69 struct net_bridge_mcast_port *pmctx, 70 const struct in6_addr *group, 71 __u16 vid, const unsigned char *src); 72 #endif 73 static struct net_bridge_port_group * 74 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 75 struct net_bridge_mcast_port *pmctx, 76 struct br_ip *group, 77 const unsigned char *src, 78 u8 filter_mode, 79 bool igmpv2_mldv1, 80 bool blocked); 81 static void br_multicast_find_del_pg(struct net_bridge *br, 82 struct net_bridge_port_group *pg); 83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx); 84 85 static struct net_bridge_port_group * 86 br_sg_port_find(struct net_bridge *br, 87 struct net_bridge_port_group_sg_key *sg_p) 88 { 89 lockdep_assert_held_once(&br->multicast_lock); 90 91 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 92 br_sg_port_rht_params); 93 } 94 95 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 96 struct br_ip *dst) 97 { 98 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 99 } 100 101 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 102 struct br_ip *dst) 103 { 104 struct net_bridge_mdb_entry *ent; 105 106 lockdep_assert_held_once(&br->multicast_lock); 107 108 rcu_read_lock(); 109 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 110 rcu_read_unlock(); 111 112 return ent; 113 } 114 115 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 116 __be32 dst, __u16 vid) 117 { 118 struct br_ip br_dst; 119 120 memset(&br_dst, 0, sizeof(br_dst)); 121 br_dst.dst.ip4 = dst; 122 br_dst.proto = htons(ETH_P_IP); 123 br_dst.vid = vid; 124 125 return br_mdb_ip_get(br, &br_dst); 126 } 127 128 #if IS_ENABLED(CONFIG_IPV6) 129 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 130 const struct in6_addr *dst, 131 __u16 vid) 132 { 133 struct br_ip br_dst; 134 135 memset(&br_dst, 0, sizeof(br_dst)); 136 br_dst.dst.ip6 = *dst; 137 br_dst.proto = htons(ETH_P_IPV6); 138 br_dst.vid = vid; 139 140 return br_mdb_ip_get(br, &br_dst); 141 } 142 #endif 143 144 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, 145 struct sk_buff *skb, u16 vid) 146 { 147 struct net_bridge *br = brmctx->br; 148 struct br_ip ip; 149 150 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 151 return NULL; 152 153 if (BR_INPUT_SKB_CB(skb)->igmp) 154 return NULL; 155 156 memset(&ip, 0, sizeof(ip)); 157 ip.proto = skb->protocol; 158 ip.vid = vid; 159 160 switch (skb->protocol) { 161 case htons(ETH_P_IP): 162 ip.dst.ip4 = ip_hdr(skb)->daddr; 163 if (brmctx->multicast_igmp_version == 3) { 164 struct net_bridge_mdb_entry *mdb; 165 166 ip.src.ip4 = ip_hdr(skb)->saddr; 167 mdb = br_mdb_ip_get_rcu(br, &ip); 168 if (mdb) 169 return mdb; 170 ip.src.ip4 = 0; 171 } 172 break; 173 #if IS_ENABLED(CONFIG_IPV6) 174 case htons(ETH_P_IPV6): 175 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 176 if (brmctx->multicast_mld_version == 2) { 177 struct net_bridge_mdb_entry *mdb; 178 179 ip.src.ip6 = ipv6_hdr(skb)->saddr; 180 mdb = br_mdb_ip_get_rcu(br, &ip); 181 if (mdb) 182 return mdb; 183 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 184 } 185 break; 186 #endif 187 default: 188 ip.proto = 0; 189 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 190 } 191 192 return br_mdb_ip_get_rcu(br, &ip); 193 } 194 195 /* IMPORTANT: this function must be used only when the contexts cannot be 196 * passed down (e.g. timer) and must be used for read-only purposes because 197 * the vlan snooping option can change, so it can return any context 198 * (non-vlan or vlan). Its initial intended purpose is to read timer values 199 * from the *current* context based on the option. At worst that could lead 200 * to inconsistent timers when the contexts are changed, i.e. src timer 201 * which needs to re-arm with a specific delay taken from the old context 202 */ 203 static struct net_bridge_mcast_port * 204 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) 205 { 206 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; 207 struct net_bridge_vlan *vlan; 208 209 lockdep_assert_held_once(&pg->key.port->br->multicast_lock); 210 211 /* if vlan snooping is disabled use the port's multicast context */ 212 if (!pg->key.addr.vid || 213 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 214 goto out; 215 216 /* locking is tricky here, due to different rules for multicast and 217 * vlans we need to take rcu to find the vlan and make sure it has 218 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under 219 * multicast_lock which must be already held here, so the vlan's pmctx 220 * can safely be used on return 221 */ 222 rcu_read_lock(); 223 vlan = br_vlan_find(nbp_vlan_group(pg->key.port), pg->key.addr.vid); 224 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 225 pmctx = &vlan->port_mcast_ctx; 226 else 227 pmctx = NULL; 228 rcu_read_unlock(); 229 out: 230 return pmctx; 231 } 232 233 static bool br_port_group_equal(struct net_bridge_port_group *p, 234 struct net_bridge_port *port, 235 const unsigned char *src) 236 { 237 if (p->key.port != port) 238 return false; 239 240 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 241 return true; 242 243 return ether_addr_equal(src, p->eth_addr); 244 } 245 246 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, 247 struct net_bridge_port_group *pg, 248 struct br_ip *sg_ip) 249 { 250 struct net_bridge_port_group_sg_key sg_key; 251 struct net_bridge_port_group *src_pg; 252 struct net_bridge_mcast *brmctx; 253 254 memset(&sg_key, 0, sizeof(sg_key)); 255 brmctx = br_multicast_port_ctx_get_global(pmctx); 256 sg_key.port = pg->key.port; 257 sg_key.addr = *sg_ip; 258 if (br_sg_port_find(brmctx->br, &sg_key)) 259 return; 260 261 src_pg = __br_multicast_add_group(brmctx, pmctx, 262 sg_ip, pg->eth_addr, 263 MCAST_INCLUDE, false, false); 264 if (IS_ERR_OR_NULL(src_pg) || 265 src_pg->rt_protocol != RTPROT_KERNEL) 266 return; 267 268 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 269 } 270 271 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 272 struct br_ip *sg_ip) 273 { 274 struct net_bridge_port_group_sg_key sg_key; 275 struct net_bridge *br = pg->key.port->br; 276 struct net_bridge_port_group *src_pg; 277 278 memset(&sg_key, 0, sizeof(sg_key)); 279 sg_key.port = pg->key.port; 280 sg_key.addr = *sg_ip; 281 src_pg = br_sg_port_find(br, &sg_key); 282 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 283 src_pg->rt_protocol != RTPROT_KERNEL) 284 return; 285 286 br_multicast_find_del_pg(br, src_pg); 287 } 288 289 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 290 * to all other ports' S,G entries which are not blocked by the current group 291 * for proper replication, the assumption is that any S,G blocked entries 292 * are already added so the S,G,port lookup should skip them. 293 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 294 * deleted we need to remove it from all ports' S,G entries where it was 295 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 296 */ 297 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 298 u8 filter_mode) 299 { 300 struct net_bridge *br = pg->key.port->br; 301 struct net_bridge_port_group *pg_lst; 302 struct net_bridge_mcast_port *pmctx; 303 struct net_bridge_mdb_entry *mp; 304 struct br_ip sg_ip; 305 306 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 307 return; 308 309 mp = br_mdb_ip_get(br, &pg->key.addr); 310 if (!mp) 311 return; 312 pmctx = &pg->key.port->multicast_ctx; 313 314 memset(&sg_ip, 0, sizeof(sg_ip)); 315 sg_ip = pg->key.addr; 316 317 for (pg_lst = mlock_dereference(mp->ports, br); 318 pg_lst; 319 pg_lst = mlock_dereference(pg_lst->next, br)) { 320 struct net_bridge_group_src *src_ent; 321 322 if (pg_lst == pg) 323 continue; 324 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 325 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 326 continue; 327 sg_ip.src = src_ent->addr.src; 328 switch (filter_mode) { 329 case MCAST_INCLUDE: 330 __fwd_del_star_excl(pg, &sg_ip); 331 break; 332 case MCAST_EXCLUDE: 333 __fwd_add_star_excl(pmctx, pg, &sg_ip); 334 break; 335 } 336 } 337 } 338 } 339 340 /* called when adding a new S,G with host_joined == false by default */ 341 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 342 struct net_bridge_port_group *sg) 343 { 344 struct net_bridge_mdb_entry *sg_mp; 345 346 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 347 return; 348 if (!star_mp->host_joined) 349 return; 350 351 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 352 if (!sg_mp) 353 return; 354 sg_mp->host_joined = true; 355 } 356 357 /* set the host_joined state of all of *,G's S,G entries */ 358 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 359 { 360 struct net_bridge *br = star_mp->br; 361 struct net_bridge_mdb_entry *sg_mp; 362 struct net_bridge_port_group *pg; 363 struct br_ip sg_ip; 364 365 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 366 return; 367 368 memset(&sg_ip, 0, sizeof(sg_ip)); 369 sg_ip = star_mp->addr; 370 for (pg = mlock_dereference(star_mp->ports, br); 371 pg; 372 pg = mlock_dereference(pg->next, br)) { 373 struct net_bridge_group_src *src_ent; 374 375 hlist_for_each_entry(src_ent, &pg->src_list, node) { 376 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 377 continue; 378 sg_ip.src = src_ent->addr.src; 379 sg_mp = br_mdb_ip_get(br, &sg_ip); 380 if (!sg_mp) 381 continue; 382 sg_mp->host_joined = star_mp->host_joined; 383 } 384 } 385 } 386 387 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 388 { 389 struct net_bridge_port_group __rcu **pp; 390 struct net_bridge_port_group *p; 391 392 /* *,G exclude ports are only added to S,G entries */ 393 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 394 return; 395 396 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 397 * we should ignore perm entries since they're managed by user-space 398 */ 399 for (pp = &sgmp->ports; 400 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 401 pp = &p->next) 402 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 403 MDB_PG_FLAGS_PERMANENT))) 404 return; 405 406 /* currently the host can only have joined the *,G which means 407 * we treat it as EXCLUDE {}, so for an S,G it's considered a 408 * STAR_EXCLUDE entry and we can safely leave it 409 */ 410 sgmp->host_joined = false; 411 412 for (pp = &sgmp->ports; 413 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 414 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 415 br_multicast_del_pg(sgmp, p, pp); 416 else 417 pp = &p->next; 418 } 419 } 420 421 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 422 struct net_bridge_port_group *sg) 423 { 424 struct net_bridge_port_group_sg_key sg_key; 425 struct net_bridge *br = star_mp->br; 426 struct net_bridge_mcast_port *pmctx; 427 struct net_bridge_port_group *pg; 428 struct net_bridge_mcast *brmctx; 429 430 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 431 return; 432 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 433 return; 434 435 br_multicast_sg_host_state(star_mp, sg); 436 memset(&sg_key, 0, sizeof(sg_key)); 437 sg_key.addr = sg->key.addr; 438 brmctx = &br->multicast_ctx; 439 /* we need to add all exclude ports to the S,G */ 440 for (pg = mlock_dereference(star_mp->ports, br); 441 pg; 442 pg = mlock_dereference(pg->next, br)) { 443 struct net_bridge_port_group *src_pg; 444 445 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 446 continue; 447 448 sg_key.port = pg->key.port; 449 if (br_sg_port_find(br, &sg_key)) 450 continue; 451 452 pmctx = &pg->key.port->multicast_ctx; 453 src_pg = __br_multicast_add_group(brmctx, pmctx, 454 &sg->key.addr, 455 sg->eth_addr, 456 MCAST_INCLUDE, false, false); 457 if (IS_ERR_OR_NULL(src_pg) || 458 src_pg->rt_protocol != RTPROT_KERNEL) 459 continue; 460 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 461 } 462 } 463 464 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 465 { 466 struct net_bridge_mdb_entry *star_mp; 467 struct net_bridge_mcast_port *pmctx; 468 struct net_bridge_port_group *sg; 469 struct net_bridge_mcast *brmctx; 470 struct br_ip sg_ip; 471 472 if (src->flags & BR_SGRP_F_INSTALLED) 473 return; 474 475 memset(&sg_ip, 0, sizeof(sg_ip)); 476 pmctx = &src->pg->key.port->multicast_ctx; 477 brmctx = br_multicast_port_ctx_get_global(pmctx); 478 sg_ip = src->pg->key.addr; 479 sg_ip.src = src->addr.src; 480 481 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip, 482 src->pg->eth_addr, MCAST_INCLUDE, false, 483 !timer_pending(&src->timer)); 484 if (IS_ERR_OR_NULL(sg)) 485 return; 486 src->flags |= BR_SGRP_F_INSTALLED; 487 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 488 489 /* if it was added by user-space as perm we can skip next steps */ 490 if (sg->rt_protocol != RTPROT_KERNEL && 491 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 492 return; 493 494 /* the kernel is now responsible for removing this S,G */ 495 del_timer(&sg->timer); 496 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 497 if (!star_mp) 498 return; 499 500 br_multicast_sg_add_exclude_ports(star_mp, sg); 501 } 502 503 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 504 bool fastleave) 505 { 506 struct net_bridge_port_group *p, *pg = src->pg; 507 struct net_bridge_port_group __rcu **pp; 508 struct net_bridge_mdb_entry *mp; 509 struct br_ip sg_ip; 510 511 memset(&sg_ip, 0, sizeof(sg_ip)); 512 sg_ip = pg->key.addr; 513 sg_ip.src = src->addr.src; 514 515 mp = br_mdb_ip_get(src->br, &sg_ip); 516 if (!mp) 517 return; 518 519 for (pp = &mp->ports; 520 (p = mlock_dereference(*pp, src->br)) != NULL; 521 pp = &p->next) { 522 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 523 continue; 524 525 if (p->rt_protocol != RTPROT_KERNEL && 526 (p->flags & MDB_PG_FLAGS_PERMANENT)) 527 break; 528 529 if (fastleave) 530 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 531 br_multicast_del_pg(mp, p, pp); 532 break; 533 } 534 src->flags &= ~BR_SGRP_F_INSTALLED; 535 } 536 537 /* install S,G and based on src's timer enable or disable forwarding */ 538 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 539 { 540 struct net_bridge_port_group_sg_key sg_key; 541 struct net_bridge_port_group *sg; 542 u8 old_flags; 543 544 br_multicast_fwd_src_add(src); 545 546 memset(&sg_key, 0, sizeof(sg_key)); 547 sg_key.addr = src->pg->key.addr; 548 sg_key.addr.src = src->addr.src; 549 sg_key.port = src->pg->key.port; 550 551 sg = br_sg_port_find(src->br, &sg_key); 552 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 553 return; 554 555 old_flags = sg->flags; 556 if (timer_pending(&src->timer)) 557 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 558 else 559 sg->flags |= MDB_PG_FLAGS_BLOCKED; 560 561 if (old_flags != sg->flags) { 562 struct net_bridge_mdb_entry *sg_mp; 563 564 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 565 if (!sg_mp) 566 return; 567 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 568 } 569 } 570 571 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 572 { 573 struct net_bridge_mdb_entry *mp; 574 575 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 576 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 577 WARN_ON(mp->ports); 578 579 del_timer_sync(&mp->timer); 580 kfree_rcu(mp, rcu); 581 } 582 583 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 584 { 585 struct net_bridge *br = mp->br; 586 587 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 588 br_mdb_rht_params); 589 hlist_del_init_rcu(&mp->mdb_node); 590 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 591 queue_work(system_long_wq, &br->mcast_gc_work); 592 } 593 594 static void br_multicast_group_expired(struct timer_list *t) 595 { 596 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 597 struct net_bridge *br = mp->br; 598 599 spin_lock(&br->multicast_lock); 600 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 601 timer_pending(&mp->timer)) 602 goto out; 603 604 br_multicast_host_leave(mp, true); 605 606 if (mp->ports) 607 goto out; 608 br_multicast_del_mdb_entry(mp); 609 out: 610 spin_unlock(&br->multicast_lock); 611 } 612 613 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 614 { 615 struct net_bridge_group_src *src; 616 617 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 618 WARN_ON(!hlist_unhashed(&src->node)); 619 620 del_timer_sync(&src->timer); 621 kfree_rcu(src, rcu); 622 } 623 624 void br_multicast_del_group_src(struct net_bridge_group_src *src, 625 bool fastleave) 626 { 627 struct net_bridge *br = src->pg->key.port->br; 628 629 br_multicast_fwd_src_remove(src, fastleave); 630 hlist_del_init_rcu(&src->node); 631 src->pg->src_ents--; 632 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 633 queue_work(system_long_wq, &br->mcast_gc_work); 634 } 635 636 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 637 { 638 struct net_bridge_port_group *pg; 639 640 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 641 WARN_ON(!hlist_unhashed(&pg->mglist)); 642 WARN_ON(!hlist_empty(&pg->src_list)); 643 644 del_timer_sync(&pg->rexmit_timer); 645 del_timer_sync(&pg->timer); 646 kfree_rcu(pg, rcu); 647 } 648 649 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 650 struct net_bridge_port_group *pg, 651 struct net_bridge_port_group __rcu **pp) 652 { 653 struct net_bridge *br = pg->key.port->br; 654 struct net_bridge_group_src *ent; 655 struct hlist_node *tmp; 656 657 rcu_assign_pointer(*pp, pg->next); 658 hlist_del_init(&pg->mglist); 659 br_multicast_eht_clean_sets(pg); 660 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 661 br_multicast_del_group_src(ent, false); 662 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 663 if (!br_multicast_is_star_g(&mp->addr)) { 664 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 665 br_sg_port_rht_params); 666 br_multicast_sg_del_exclude_ports(mp); 667 } else { 668 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 669 } 670 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 671 queue_work(system_long_wq, &br->mcast_gc_work); 672 673 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 674 mod_timer(&mp->timer, jiffies); 675 } 676 677 static void br_multicast_find_del_pg(struct net_bridge *br, 678 struct net_bridge_port_group *pg) 679 { 680 struct net_bridge_port_group __rcu **pp; 681 struct net_bridge_mdb_entry *mp; 682 struct net_bridge_port_group *p; 683 684 mp = br_mdb_ip_get(br, &pg->key.addr); 685 if (WARN_ON(!mp)) 686 return; 687 688 for (pp = &mp->ports; 689 (p = mlock_dereference(*pp, br)) != NULL; 690 pp = &p->next) { 691 if (p != pg) 692 continue; 693 694 br_multicast_del_pg(mp, pg, pp); 695 return; 696 } 697 698 WARN_ON(1); 699 } 700 701 static void br_multicast_port_group_expired(struct timer_list *t) 702 { 703 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 704 struct net_bridge_group_src *src_ent; 705 struct net_bridge *br = pg->key.port->br; 706 struct hlist_node *tmp; 707 bool changed; 708 709 spin_lock(&br->multicast_lock); 710 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 711 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 712 goto out; 713 714 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 715 pg->filter_mode = MCAST_INCLUDE; 716 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 717 if (!timer_pending(&src_ent->timer)) { 718 br_multicast_del_group_src(src_ent, false); 719 changed = true; 720 } 721 } 722 723 if (hlist_empty(&pg->src_list)) { 724 br_multicast_find_del_pg(br, pg); 725 } else if (changed) { 726 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 727 728 if (changed && br_multicast_is_star_g(&pg->key.addr)) 729 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 730 731 if (WARN_ON(!mp)) 732 goto out; 733 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 734 } 735 out: 736 spin_unlock(&br->multicast_lock); 737 } 738 739 static void br_multicast_gc(struct hlist_head *head) 740 { 741 struct net_bridge_mcast_gc *gcent; 742 struct hlist_node *tmp; 743 744 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 745 hlist_del_init(&gcent->gc_node); 746 gcent->destroy(gcent); 747 } 748 } 749 750 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, 751 struct net_bridge_port_group *pg, 752 __be32 ip_dst, __be32 group, 753 bool with_srcs, bool over_lmqt, 754 u8 sflag, u8 *igmp_type, 755 bool *need_rexmit) 756 { 757 struct net_bridge_port *p = pg ? pg->key.port : NULL; 758 struct net_bridge_group_src *ent; 759 size_t pkt_size, igmp_hdr_size; 760 unsigned long now = jiffies; 761 struct igmpv3_query *ihv3; 762 void *csum_start = NULL; 763 __sum16 *csum = NULL; 764 struct sk_buff *skb; 765 struct igmphdr *ih; 766 struct ethhdr *eth; 767 unsigned long lmqt; 768 struct iphdr *iph; 769 u16 lmqt_srcs = 0; 770 771 igmp_hdr_size = sizeof(*ih); 772 if (brmctx->multicast_igmp_version == 3) { 773 igmp_hdr_size = sizeof(*ihv3); 774 if (pg && with_srcs) { 775 lmqt = now + (brmctx->multicast_last_member_interval * 776 brmctx->multicast_last_member_count); 777 hlist_for_each_entry(ent, &pg->src_list, node) { 778 if (over_lmqt == time_after(ent->timer.expires, 779 lmqt) && 780 ent->src_query_rexmit_cnt > 0) 781 lmqt_srcs++; 782 } 783 784 if (!lmqt_srcs) 785 return NULL; 786 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 787 } 788 } 789 790 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 791 if ((p && pkt_size > p->dev->mtu) || 792 pkt_size > brmctx->br->dev->mtu) 793 return NULL; 794 795 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 796 if (!skb) 797 goto out; 798 799 skb->protocol = htons(ETH_P_IP); 800 801 skb_reset_mac_header(skb); 802 eth = eth_hdr(skb); 803 804 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 805 ip_eth_mc_map(ip_dst, eth->h_dest); 806 eth->h_proto = htons(ETH_P_IP); 807 skb_put(skb, sizeof(*eth)); 808 809 skb_set_network_header(skb, skb->len); 810 iph = ip_hdr(skb); 811 iph->tot_len = htons(pkt_size - sizeof(*eth)); 812 813 iph->version = 4; 814 iph->ihl = 6; 815 iph->tos = 0xc0; 816 iph->id = 0; 817 iph->frag_off = htons(IP_DF); 818 iph->ttl = 1; 819 iph->protocol = IPPROTO_IGMP; 820 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 821 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0; 822 iph->daddr = ip_dst; 823 ((u8 *)&iph[1])[0] = IPOPT_RA; 824 ((u8 *)&iph[1])[1] = 4; 825 ((u8 *)&iph[1])[2] = 0; 826 ((u8 *)&iph[1])[3] = 0; 827 ip_send_check(iph); 828 skb_put(skb, 24); 829 830 skb_set_transport_header(skb, skb->len); 831 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 832 833 switch (brmctx->multicast_igmp_version) { 834 case 2: 835 ih = igmp_hdr(skb); 836 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 837 ih->code = (group ? brmctx->multicast_last_member_interval : 838 brmctx->multicast_query_response_interval) / 839 (HZ / IGMP_TIMER_SCALE); 840 ih->group = group; 841 ih->csum = 0; 842 csum = &ih->csum; 843 csum_start = (void *)ih; 844 break; 845 case 3: 846 ihv3 = igmpv3_query_hdr(skb); 847 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 848 ihv3->code = (group ? brmctx->multicast_last_member_interval : 849 brmctx->multicast_query_response_interval) / 850 (HZ / IGMP_TIMER_SCALE); 851 ihv3->group = group; 852 ihv3->qqic = brmctx->multicast_query_interval / HZ; 853 ihv3->nsrcs = htons(lmqt_srcs); 854 ihv3->resv = 0; 855 ihv3->suppress = sflag; 856 ihv3->qrv = 2; 857 ihv3->csum = 0; 858 csum = &ihv3->csum; 859 csum_start = (void *)ihv3; 860 if (!pg || !with_srcs) 861 break; 862 863 lmqt_srcs = 0; 864 hlist_for_each_entry(ent, &pg->src_list, node) { 865 if (over_lmqt == time_after(ent->timer.expires, 866 lmqt) && 867 ent->src_query_rexmit_cnt > 0) { 868 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 869 ent->src_query_rexmit_cnt--; 870 if (need_rexmit && ent->src_query_rexmit_cnt) 871 *need_rexmit = true; 872 } 873 } 874 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 875 kfree_skb(skb); 876 return NULL; 877 } 878 break; 879 } 880 881 if (WARN_ON(!csum || !csum_start)) { 882 kfree_skb(skb); 883 return NULL; 884 } 885 886 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 887 skb_put(skb, igmp_hdr_size); 888 __skb_pull(skb, sizeof(*eth)); 889 890 out: 891 return skb; 892 } 893 894 #if IS_ENABLED(CONFIG_IPV6) 895 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, 896 struct net_bridge_port_group *pg, 897 const struct in6_addr *ip6_dst, 898 const struct in6_addr *group, 899 bool with_srcs, bool over_llqt, 900 u8 sflag, u8 *igmp_type, 901 bool *need_rexmit) 902 { 903 struct net_bridge_port *p = pg ? pg->key.port : NULL; 904 struct net_bridge_group_src *ent; 905 size_t pkt_size, mld_hdr_size; 906 unsigned long now = jiffies; 907 struct mld2_query *mld2q; 908 void *csum_start = NULL; 909 unsigned long interval; 910 __sum16 *csum = NULL; 911 struct ipv6hdr *ip6h; 912 struct mld_msg *mldq; 913 struct sk_buff *skb; 914 unsigned long llqt; 915 struct ethhdr *eth; 916 u16 llqt_srcs = 0; 917 u8 *hopopt; 918 919 mld_hdr_size = sizeof(*mldq); 920 if (brmctx->multicast_mld_version == 2) { 921 mld_hdr_size = sizeof(*mld2q); 922 if (pg && with_srcs) { 923 llqt = now + (brmctx->multicast_last_member_interval * 924 brmctx->multicast_last_member_count); 925 hlist_for_each_entry(ent, &pg->src_list, node) { 926 if (over_llqt == time_after(ent->timer.expires, 927 llqt) && 928 ent->src_query_rexmit_cnt > 0) 929 llqt_srcs++; 930 } 931 932 if (!llqt_srcs) 933 return NULL; 934 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 935 } 936 } 937 938 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 939 if ((p && pkt_size > p->dev->mtu) || 940 pkt_size > brmctx->br->dev->mtu) 941 return NULL; 942 943 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 944 if (!skb) 945 goto out; 946 947 skb->protocol = htons(ETH_P_IPV6); 948 949 /* Ethernet header */ 950 skb_reset_mac_header(skb); 951 eth = eth_hdr(skb); 952 953 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 954 eth->h_proto = htons(ETH_P_IPV6); 955 skb_put(skb, sizeof(*eth)); 956 957 /* IPv6 header + HbH option */ 958 skb_set_network_header(skb, skb->len); 959 ip6h = ipv6_hdr(skb); 960 961 *(__force __be32 *)ip6h = htonl(0x60000000); 962 ip6h->payload_len = htons(8 + mld_hdr_size); 963 ip6h->nexthdr = IPPROTO_HOPOPTS; 964 ip6h->hop_limit = 1; 965 ip6h->daddr = *ip6_dst; 966 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev, 967 &ip6h->daddr, 0, &ip6h->saddr)) { 968 kfree_skb(skb); 969 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false); 970 return NULL; 971 } 972 973 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true); 974 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 975 976 hopopt = (u8 *)(ip6h + 1); 977 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 978 hopopt[1] = 0; /* length of HbH */ 979 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 980 hopopt[3] = 2; /* Length of RA Option */ 981 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 982 hopopt[5] = 0; 983 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 984 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 985 986 skb_put(skb, sizeof(*ip6h) + 8); 987 988 /* ICMPv6 */ 989 skb_set_transport_header(skb, skb->len); 990 interval = ipv6_addr_any(group) ? 991 brmctx->multicast_query_response_interval : 992 brmctx->multicast_last_member_interval; 993 *igmp_type = ICMPV6_MGM_QUERY; 994 switch (brmctx->multicast_mld_version) { 995 case 1: 996 mldq = (struct mld_msg *)icmp6_hdr(skb); 997 mldq->mld_type = ICMPV6_MGM_QUERY; 998 mldq->mld_code = 0; 999 mldq->mld_cksum = 0; 1000 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 1001 mldq->mld_reserved = 0; 1002 mldq->mld_mca = *group; 1003 csum = &mldq->mld_cksum; 1004 csum_start = (void *)mldq; 1005 break; 1006 case 2: 1007 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1008 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 1009 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 1010 mld2q->mld2q_code = 0; 1011 mld2q->mld2q_cksum = 0; 1012 mld2q->mld2q_resv1 = 0; 1013 mld2q->mld2q_resv2 = 0; 1014 mld2q->mld2q_suppress = sflag; 1015 mld2q->mld2q_qrv = 2; 1016 mld2q->mld2q_nsrcs = htons(llqt_srcs); 1017 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; 1018 mld2q->mld2q_mca = *group; 1019 csum = &mld2q->mld2q_cksum; 1020 csum_start = (void *)mld2q; 1021 if (!pg || !with_srcs) 1022 break; 1023 1024 llqt_srcs = 0; 1025 hlist_for_each_entry(ent, &pg->src_list, node) { 1026 if (over_llqt == time_after(ent->timer.expires, 1027 llqt) && 1028 ent->src_query_rexmit_cnt > 0) { 1029 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 1030 ent->src_query_rexmit_cnt--; 1031 if (need_rexmit && ent->src_query_rexmit_cnt) 1032 *need_rexmit = true; 1033 } 1034 } 1035 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 1036 kfree_skb(skb); 1037 return NULL; 1038 } 1039 break; 1040 } 1041 1042 if (WARN_ON(!csum || !csum_start)) { 1043 kfree_skb(skb); 1044 return NULL; 1045 } 1046 1047 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 1048 IPPROTO_ICMPV6, 1049 csum_partial(csum_start, mld_hdr_size, 0)); 1050 skb_put(skb, mld_hdr_size); 1051 __skb_pull(skb, sizeof(*eth)); 1052 1053 out: 1054 return skb; 1055 } 1056 #endif 1057 1058 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1059 struct net_bridge_port_group *pg, 1060 struct br_ip *ip_dst, 1061 struct br_ip *group, 1062 bool with_srcs, bool over_lmqt, 1063 u8 sflag, u8 *igmp_type, 1064 bool *need_rexmit) 1065 { 1066 __be32 ip4_dst; 1067 1068 switch (group->proto) { 1069 case htons(ETH_P_IP): 1070 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1071 return br_ip4_multicast_alloc_query(brmctx, pg, 1072 ip4_dst, group->dst.ip4, 1073 with_srcs, over_lmqt, 1074 sflag, igmp_type, 1075 need_rexmit); 1076 #if IS_ENABLED(CONFIG_IPV6) 1077 case htons(ETH_P_IPV6): { 1078 struct in6_addr ip6_dst; 1079 1080 if (ip_dst) 1081 ip6_dst = ip_dst->dst.ip6; 1082 else 1083 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1084 htonl(1)); 1085 1086 return br_ip6_multicast_alloc_query(brmctx, pg, 1087 &ip6_dst, &group->dst.ip6, 1088 with_srcs, over_lmqt, 1089 sflag, igmp_type, 1090 need_rexmit); 1091 } 1092 #endif 1093 } 1094 return NULL; 1095 } 1096 1097 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1098 struct br_ip *group) 1099 { 1100 struct net_bridge_mdb_entry *mp; 1101 int err; 1102 1103 mp = br_mdb_ip_get(br, group); 1104 if (mp) 1105 return mp; 1106 1107 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1108 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1109 return ERR_PTR(-E2BIG); 1110 } 1111 1112 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1113 if (unlikely(!mp)) 1114 return ERR_PTR(-ENOMEM); 1115 1116 mp->br = br; 1117 mp->addr = *group; 1118 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1119 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1120 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1121 br_mdb_rht_params); 1122 if (err) { 1123 kfree(mp); 1124 mp = ERR_PTR(err); 1125 } else { 1126 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1127 } 1128 1129 return mp; 1130 } 1131 1132 static void br_multicast_group_src_expired(struct timer_list *t) 1133 { 1134 struct net_bridge_group_src *src = from_timer(src, t, timer); 1135 struct net_bridge_port_group *pg; 1136 struct net_bridge *br = src->br; 1137 1138 spin_lock(&br->multicast_lock); 1139 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1140 timer_pending(&src->timer)) 1141 goto out; 1142 1143 pg = src->pg; 1144 if (pg->filter_mode == MCAST_INCLUDE) { 1145 br_multicast_del_group_src(src, false); 1146 if (!hlist_empty(&pg->src_list)) 1147 goto out; 1148 br_multicast_find_del_pg(br, pg); 1149 } else { 1150 br_multicast_fwd_src_handle(src); 1151 } 1152 1153 out: 1154 spin_unlock(&br->multicast_lock); 1155 } 1156 1157 struct net_bridge_group_src * 1158 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1159 { 1160 struct net_bridge_group_src *ent; 1161 1162 switch (ip->proto) { 1163 case htons(ETH_P_IP): 1164 hlist_for_each_entry(ent, &pg->src_list, node) 1165 if (ip->src.ip4 == ent->addr.src.ip4) 1166 return ent; 1167 break; 1168 #if IS_ENABLED(CONFIG_IPV6) 1169 case htons(ETH_P_IPV6): 1170 hlist_for_each_entry(ent, &pg->src_list, node) 1171 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1172 return ent; 1173 break; 1174 #endif 1175 } 1176 1177 return NULL; 1178 } 1179 1180 static struct net_bridge_group_src * 1181 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1182 { 1183 struct net_bridge_group_src *grp_src; 1184 1185 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1186 return NULL; 1187 1188 switch (src_ip->proto) { 1189 case htons(ETH_P_IP): 1190 if (ipv4_is_zeronet(src_ip->src.ip4) || 1191 ipv4_is_multicast(src_ip->src.ip4)) 1192 return NULL; 1193 break; 1194 #if IS_ENABLED(CONFIG_IPV6) 1195 case htons(ETH_P_IPV6): 1196 if (ipv6_addr_any(&src_ip->src.ip6) || 1197 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1198 return NULL; 1199 break; 1200 #endif 1201 } 1202 1203 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1204 if (unlikely(!grp_src)) 1205 return NULL; 1206 1207 grp_src->pg = pg; 1208 grp_src->br = pg->key.port->br; 1209 grp_src->addr = *src_ip; 1210 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1211 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1212 1213 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1214 pg->src_ents++; 1215 1216 return grp_src; 1217 } 1218 1219 struct net_bridge_port_group *br_multicast_new_port_group( 1220 struct net_bridge_port *port, 1221 struct br_ip *group, 1222 struct net_bridge_port_group __rcu *next, 1223 unsigned char flags, 1224 const unsigned char *src, 1225 u8 filter_mode, 1226 u8 rt_protocol) 1227 { 1228 struct net_bridge_port_group *p; 1229 1230 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1231 if (unlikely(!p)) 1232 return NULL; 1233 1234 p->key.addr = *group; 1235 p->key.port = port; 1236 p->flags = flags; 1237 p->filter_mode = filter_mode; 1238 p->rt_protocol = rt_protocol; 1239 p->eht_host_tree = RB_ROOT; 1240 p->eht_set_tree = RB_ROOT; 1241 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1242 INIT_HLIST_HEAD(&p->src_list); 1243 1244 if (!br_multicast_is_star_g(group) && 1245 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1246 br_sg_port_rht_params)) { 1247 kfree(p); 1248 return NULL; 1249 } 1250 1251 rcu_assign_pointer(p->next, next); 1252 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1253 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1254 hlist_add_head(&p->mglist, &port->mglist); 1255 1256 if (src) 1257 memcpy(p->eth_addr, src, ETH_ALEN); 1258 else 1259 eth_broadcast_addr(p->eth_addr); 1260 1261 return p; 1262 } 1263 1264 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify) 1265 { 1266 if (!mp->host_joined) { 1267 mp->host_joined = true; 1268 if (br_multicast_is_star_g(&mp->addr)) 1269 br_multicast_star_g_host_state(mp); 1270 if (notify) 1271 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1272 } 1273 1274 if (br_group_is_l2(&mp->addr)) 1275 return; 1276 1277 mod_timer(&mp->timer, 1278 jiffies + mp->br->multicast_ctx.multicast_membership_interval); 1279 } 1280 1281 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1282 { 1283 if (!mp->host_joined) 1284 return; 1285 1286 mp->host_joined = false; 1287 if (br_multicast_is_star_g(&mp->addr)) 1288 br_multicast_star_g_host_state(mp); 1289 if (notify) 1290 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1291 } 1292 1293 static struct net_bridge_port_group * 1294 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 1295 struct net_bridge_mcast_port *pmctx, 1296 struct br_ip *group, 1297 const unsigned char *src, 1298 u8 filter_mode, 1299 bool igmpv2_mldv1, 1300 bool blocked) 1301 { 1302 struct net_bridge_port_group __rcu **pp; 1303 struct net_bridge_port_group *p = NULL; 1304 struct net_bridge_mdb_entry *mp; 1305 unsigned long now = jiffies; 1306 1307 if (!netif_running(brmctx->br->dev) || 1308 (pmctx && pmctx->port->state == BR_STATE_DISABLED)) 1309 goto out; 1310 1311 mp = br_multicast_new_group(brmctx->br, group); 1312 if (IS_ERR(mp)) 1313 return ERR_CAST(mp); 1314 1315 if (!pmctx) { 1316 br_multicast_host_join(mp, true); 1317 goto out; 1318 } 1319 1320 for (pp = &mp->ports; 1321 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 1322 pp = &p->next) { 1323 if (br_port_group_equal(p, pmctx->port, src)) 1324 goto found; 1325 if ((unsigned long)p->key.port < (unsigned long)pmctx->port) 1326 break; 1327 } 1328 1329 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src, 1330 filter_mode, RTPROT_KERNEL); 1331 if (unlikely(!p)) { 1332 p = ERR_PTR(-ENOMEM); 1333 goto out; 1334 } 1335 rcu_assign_pointer(*pp, p); 1336 if (blocked) 1337 p->flags |= MDB_PG_FLAGS_BLOCKED; 1338 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB); 1339 1340 found: 1341 if (igmpv2_mldv1) 1342 mod_timer(&p->timer, 1343 now + brmctx->multicast_membership_interval); 1344 1345 out: 1346 return p; 1347 } 1348 1349 static int br_multicast_add_group(struct net_bridge_mcast *brmctx, 1350 struct net_bridge_mcast_port *pmctx, 1351 struct br_ip *group, 1352 const unsigned char *src, 1353 u8 filter_mode, 1354 bool igmpv2_mldv1) 1355 { 1356 struct net_bridge_port_group *pg; 1357 int err; 1358 1359 spin_lock(&brmctx->br->multicast_lock); 1360 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, 1361 igmpv2_mldv1, false); 1362 /* NULL is considered valid for host joined groups */ 1363 err = PTR_ERR_OR_ZERO(pg); 1364 spin_unlock(&brmctx->br->multicast_lock); 1365 1366 return err; 1367 } 1368 1369 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, 1370 struct net_bridge_mcast_port *pmctx, 1371 __be32 group, 1372 __u16 vid, 1373 const unsigned char *src, 1374 bool igmpv2) 1375 { 1376 struct br_ip br_group; 1377 u8 filter_mode; 1378 1379 if (ipv4_is_local_multicast(group)) 1380 return 0; 1381 1382 memset(&br_group, 0, sizeof(br_group)); 1383 br_group.dst.ip4 = group; 1384 br_group.proto = htons(ETH_P_IP); 1385 br_group.vid = vid; 1386 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1387 1388 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1389 filter_mode, igmpv2); 1390 } 1391 1392 #if IS_ENABLED(CONFIG_IPV6) 1393 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, 1394 struct net_bridge_mcast_port *pmctx, 1395 const struct in6_addr *group, 1396 __u16 vid, 1397 const unsigned char *src, 1398 bool mldv1) 1399 { 1400 struct br_ip br_group; 1401 u8 filter_mode; 1402 1403 if (ipv6_addr_is_ll_all_nodes(group)) 1404 return 0; 1405 1406 memset(&br_group, 0, sizeof(br_group)); 1407 br_group.dst.ip6 = *group; 1408 br_group.proto = htons(ETH_P_IPV6); 1409 br_group.vid = vid; 1410 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1411 1412 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1413 filter_mode, mldv1); 1414 } 1415 #endif 1416 1417 static bool br_multicast_rport_del(struct hlist_node *rlist) 1418 { 1419 if (hlist_unhashed(rlist)) 1420 return false; 1421 1422 hlist_del_init_rcu(rlist); 1423 return true; 1424 } 1425 1426 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1427 { 1428 return br_multicast_rport_del(&pmctx->ip4_rlist); 1429 } 1430 1431 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1432 { 1433 #if IS_ENABLED(CONFIG_IPV6) 1434 return br_multicast_rport_del(&pmctx->ip6_rlist); 1435 #else 1436 return false; 1437 #endif 1438 } 1439 1440 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, 1441 struct timer_list *t, 1442 struct hlist_node *rlist) 1443 { 1444 struct net_bridge *br = pmctx->port->br; 1445 bool del; 1446 1447 spin_lock(&br->multicast_lock); 1448 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1449 pmctx->multicast_router == MDB_RTR_TYPE_PERM || 1450 timer_pending(t)) 1451 goto out; 1452 1453 del = br_multicast_rport_del(rlist); 1454 br_multicast_rport_del_notify(pmctx, del); 1455 out: 1456 spin_unlock(&br->multicast_lock); 1457 } 1458 1459 static void br_ip4_multicast_router_expired(struct timer_list *t) 1460 { 1461 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1462 ip4_mc_router_timer); 1463 1464 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); 1465 } 1466 1467 #if IS_ENABLED(CONFIG_IPV6) 1468 static void br_ip6_multicast_router_expired(struct timer_list *t) 1469 { 1470 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1471 ip6_mc_router_timer); 1472 1473 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); 1474 } 1475 #endif 1476 1477 static void br_mc_router_state_change(struct net_bridge *p, 1478 bool is_mc_router) 1479 { 1480 struct switchdev_attr attr = { 1481 .orig_dev = p->dev, 1482 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1483 .flags = SWITCHDEV_F_DEFER, 1484 .u.mrouter = is_mc_router, 1485 }; 1486 1487 switchdev_port_attr_set(p->dev, &attr, NULL); 1488 } 1489 1490 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, 1491 struct timer_list *timer) 1492 { 1493 spin_lock(&brmctx->br->multicast_lock); 1494 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1495 brmctx->multicast_router == MDB_RTR_TYPE_PERM || 1496 br_ip4_multicast_is_router(brmctx) || 1497 br_ip6_multicast_is_router(brmctx)) 1498 goto out; 1499 1500 br_mc_router_state_change(brmctx->br, false); 1501 out: 1502 spin_unlock(&brmctx->br->multicast_lock); 1503 } 1504 1505 static void br_ip4_multicast_local_router_expired(struct timer_list *t) 1506 { 1507 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1508 ip4_mc_router_timer); 1509 1510 br_multicast_local_router_expired(brmctx, t); 1511 } 1512 1513 #if IS_ENABLED(CONFIG_IPV6) 1514 static void br_ip6_multicast_local_router_expired(struct timer_list *t) 1515 { 1516 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1517 ip6_mc_router_timer); 1518 1519 br_multicast_local_router_expired(brmctx, t); 1520 } 1521 #endif 1522 1523 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, 1524 struct bridge_mcast_own_query *query) 1525 { 1526 spin_lock(&brmctx->br->multicast_lock); 1527 if (!netif_running(brmctx->br->dev) || 1528 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1529 goto out; 1530 1531 br_multicast_start_querier(brmctx, query); 1532 1533 out: 1534 spin_unlock(&brmctx->br->multicast_lock); 1535 } 1536 1537 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1538 { 1539 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1540 ip4_other_query.timer); 1541 1542 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); 1543 } 1544 1545 #if IS_ENABLED(CONFIG_IPV6) 1546 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1547 { 1548 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1549 ip6_other_query.timer); 1550 1551 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); 1552 } 1553 #endif 1554 1555 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, 1556 struct br_ip *ip, 1557 struct sk_buff *skb) 1558 { 1559 if (ip->proto == htons(ETH_P_IP)) 1560 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1561 #if IS_ENABLED(CONFIG_IPV6) 1562 else 1563 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1564 #endif 1565 } 1566 1567 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, 1568 struct net_bridge_mcast_port *pmctx, 1569 struct net_bridge_port_group *pg, 1570 struct br_ip *ip_dst, 1571 struct br_ip *group, 1572 bool with_srcs, 1573 u8 sflag, 1574 bool *need_rexmit) 1575 { 1576 bool over_lmqt = !!sflag; 1577 struct sk_buff *skb; 1578 u8 igmp_type; 1579 1580 again_under_lmqt: 1581 skb = br_multicast_alloc_query(brmctx, pg, ip_dst, group, with_srcs, 1582 over_lmqt, sflag, &igmp_type, 1583 need_rexmit); 1584 if (!skb) 1585 return; 1586 1587 if (pmctx) { 1588 skb->dev = pmctx->port->dev; 1589 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type, 1590 BR_MCAST_DIR_TX); 1591 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1592 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev, 1593 br_dev_queue_push_xmit); 1594 1595 if (over_lmqt && with_srcs && sflag) { 1596 over_lmqt = false; 1597 goto again_under_lmqt; 1598 } 1599 } else { 1600 br_multicast_select_own_querier(brmctx, group, skb); 1601 br_multicast_count(brmctx->br, NULL, skb, igmp_type, 1602 BR_MCAST_DIR_RX); 1603 netif_rx(skb); 1604 } 1605 } 1606 1607 static void br_multicast_send_query(struct net_bridge_mcast *brmctx, 1608 struct net_bridge_mcast_port *pmctx, 1609 struct bridge_mcast_own_query *own_query) 1610 { 1611 struct bridge_mcast_other_query *other_query = NULL; 1612 struct br_ip br_group; 1613 unsigned long time; 1614 1615 if (!netif_running(brmctx->br->dev) || 1616 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 1617 !br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER)) 1618 return; 1619 1620 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1621 1622 if (pmctx ? (own_query == &pmctx->ip4_own_query) : 1623 (own_query == &brmctx->ip4_own_query)) { 1624 other_query = &brmctx->ip4_other_query; 1625 br_group.proto = htons(ETH_P_IP); 1626 #if IS_ENABLED(CONFIG_IPV6) 1627 } else { 1628 other_query = &brmctx->ip6_other_query; 1629 br_group.proto = htons(ETH_P_IPV6); 1630 #endif 1631 } 1632 1633 if (!other_query || timer_pending(&other_query->timer)) 1634 return; 1635 1636 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false, 1637 0, NULL); 1638 1639 time = jiffies; 1640 time += own_query->startup_sent < brmctx->multicast_startup_query_count ? 1641 brmctx->multicast_startup_query_interval : 1642 brmctx->multicast_query_interval; 1643 mod_timer(&own_query->timer, time); 1644 } 1645 1646 static void 1647 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, 1648 struct bridge_mcast_own_query *query) 1649 { 1650 struct net_bridge *br = pmctx->port->br; 1651 1652 spin_lock(&br->multicast_lock); 1653 if (pmctx->port->state == BR_STATE_DISABLED || 1654 pmctx->port->state == BR_STATE_BLOCKING) 1655 goto out; 1656 1657 if (query->startup_sent < br->multicast_ctx.multicast_startup_query_count) 1658 query->startup_sent++; 1659 1660 br_multicast_send_query(&br->multicast_ctx, pmctx, query); 1661 1662 out: 1663 spin_unlock(&br->multicast_lock); 1664 } 1665 1666 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1667 { 1668 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1669 ip4_own_query.timer); 1670 1671 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); 1672 } 1673 1674 #if IS_ENABLED(CONFIG_IPV6) 1675 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1676 { 1677 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1678 ip6_own_query.timer); 1679 1680 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); 1681 } 1682 #endif 1683 1684 static void br_multicast_port_group_rexmit(struct timer_list *t) 1685 { 1686 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1687 struct bridge_mcast_other_query *other_query = NULL; 1688 struct net_bridge *br = pg->key.port->br; 1689 struct net_bridge_mcast_port *pmctx; 1690 struct net_bridge_mcast *brmctx; 1691 bool need_rexmit = false; 1692 1693 spin_lock(&br->multicast_lock); 1694 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1695 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1696 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1697 goto out; 1698 1699 brmctx = &br->multicast_ctx; 1700 pmctx = &pg->key.port->multicast_ctx; 1701 if (pg->key.addr.proto == htons(ETH_P_IP)) 1702 other_query = &brmctx->ip4_other_query; 1703 #if IS_ENABLED(CONFIG_IPV6) 1704 else 1705 other_query = &brmctx->ip6_other_query; 1706 #endif 1707 1708 if (!other_query || timer_pending(&other_query->timer)) 1709 goto out; 1710 1711 if (pg->grp_query_rexmit_cnt) { 1712 pg->grp_query_rexmit_cnt--; 1713 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1714 &pg->key.addr, false, 1, NULL); 1715 } 1716 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1717 &pg->key.addr, true, 0, &need_rexmit); 1718 1719 if (pg->grp_query_rexmit_cnt || need_rexmit) 1720 mod_timer(&pg->rexmit_timer, jiffies + 1721 brmctx->multicast_last_member_interval); 1722 out: 1723 spin_unlock(&br->multicast_lock); 1724 } 1725 1726 static int br_mc_disabled_update(struct net_device *dev, bool value, 1727 struct netlink_ext_ack *extack) 1728 { 1729 struct switchdev_attr attr = { 1730 .orig_dev = dev, 1731 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1732 .flags = SWITCHDEV_F_DEFER, 1733 .u.mc_disabled = !value, 1734 }; 1735 1736 return switchdev_port_attr_set(dev, &attr, extack); 1737 } 1738 1739 void br_multicast_port_ctx_init(struct net_bridge_port *port, 1740 struct net_bridge_vlan *vlan, 1741 struct net_bridge_mcast_port *pmctx) 1742 { 1743 pmctx->port = port; 1744 pmctx->vlan = vlan; 1745 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1746 timer_setup(&pmctx->ip4_mc_router_timer, 1747 br_ip4_multicast_router_expired, 0); 1748 timer_setup(&pmctx->ip4_own_query.timer, 1749 br_ip4_multicast_port_query_expired, 0); 1750 #if IS_ENABLED(CONFIG_IPV6) 1751 timer_setup(&pmctx->ip6_mc_router_timer, 1752 br_ip6_multicast_router_expired, 0); 1753 timer_setup(&pmctx->ip6_own_query.timer, 1754 br_ip6_multicast_port_query_expired, 0); 1755 #endif 1756 } 1757 1758 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 1759 { 1760 #if IS_ENABLED(CONFIG_IPV6) 1761 del_timer_sync(&pmctx->ip6_mc_router_timer); 1762 #endif 1763 del_timer_sync(&pmctx->ip4_mc_router_timer); 1764 } 1765 1766 int br_multicast_add_port(struct net_bridge_port *port) 1767 { 1768 int err; 1769 1770 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 1771 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx); 1772 1773 err = br_mc_disabled_update(port->dev, 1774 br_opt_get(port->br, 1775 BROPT_MULTICAST_ENABLED), 1776 NULL); 1777 if (err && err != -EOPNOTSUPP) 1778 return err; 1779 1780 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1781 if (!port->mcast_stats) 1782 return -ENOMEM; 1783 1784 return 0; 1785 } 1786 1787 void br_multicast_del_port(struct net_bridge_port *port) 1788 { 1789 struct net_bridge *br = port->br; 1790 struct net_bridge_port_group *pg; 1791 HLIST_HEAD(deleted_head); 1792 struct hlist_node *n; 1793 1794 /* Take care of the remaining groups, only perm ones should be left */ 1795 spin_lock_bh(&br->multicast_lock); 1796 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1797 br_multicast_find_del_pg(br, pg); 1798 hlist_move_list(&br->mcast_gc_list, &deleted_head); 1799 spin_unlock_bh(&br->multicast_lock); 1800 br_multicast_gc(&deleted_head); 1801 br_multicast_port_ctx_deinit(&port->multicast_ctx); 1802 free_percpu(port->mcast_stats); 1803 } 1804 1805 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1806 { 1807 query->startup_sent = 0; 1808 1809 if (try_to_del_timer_sync(&query->timer) >= 0 || 1810 del_timer(&query->timer)) 1811 mod_timer(&query->timer, jiffies); 1812 } 1813 1814 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) 1815 { 1816 struct net_bridge *br = pmctx->port->br; 1817 struct net_bridge_mcast *brmctx; 1818 1819 brmctx = br_multicast_port_ctx_get_global(pmctx); 1820 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1821 !netif_running(br->dev)) 1822 return; 1823 1824 br_multicast_enable(&pmctx->ip4_own_query); 1825 #if IS_ENABLED(CONFIG_IPV6) 1826 br_multicast_enable(&pmctx->ip6_own_query); 1827 #endif 1828 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { 1829 br_ip4_multicast_add_router(brmctx, pmctx); 1830 br_ip6_multicast_add_router(brmctx, pmctx); 1831 } 1832 } 1833 1834 void br_multicast_enable_port(struct net_bridge_port *port) 1835 { 1836 struct net_bridge *br = port->br; 1837 1838 spin_lock_bh(&br->multicast_lock); 1839 __br_multicast_enable_port_ctx(&port->multicast_ctx); 1840 spin_unlock_bh(&br->multicast_lock); 1841 } 1842 1843 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) 1844 { 1845 struct net_bridge_port_group *pg; 1846 struct hlist_node *n; 1847 bool del = false; 1848 1849 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) 1850 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && 1851 (!br_multicast_port_ctx_is_vlan(pmctx) || 1852 pg->key.addr.vid == pmctx->vlan->vid)) 1853 br_multicast_find_del_pg(pmctx->port->br, pg); 1854 1855 del |= br_ip4_multicast_rport_del(pmctx); 1856 del_timer(&pmctx->ip4_mc_router_timer); 1857 del_timer(&pmctx->ip4_own_query.timer); 1858 del |= br_ip6_multicast_rport_del(pmctx); 1859 #if IS_ENABLED(CONFIG_IPV6) 1860 del_timer(&pmctx->ip6_mc_router_timer); 1861 del_timer(&pmctx->ip6_own_query.timer); 1862 #endif 1863 br_multicast_rport_del_notify(pmctx, del); 1864 } 1865 1866 void br_multicast_disable_port(struct net_bridge_port *port) 1867 { 1868 spin_lock_bh(&port->br->multicast_lock); 1869 __br_multicast_disable_port_ctx(&port->multicast_ctx); 1870 spin_unlock_bh(&port->br->multicast_lock); 1871 } 1872 1873 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1874 { 1875 struct net_bridge_group_src *ent; 1876 struct hlist_node *tmp; 1877 int deleted = 0; 1878 1879 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1880 if (ent->flags & BR_SGRP_F_DELETE) { 1881 br_multicast_del_group_src(ent, false); 1882 deleted++; 1883 } 1884 1885 return deleted; 1886 } 1887 1888 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 1889 unsigned long expires) 1890 { 1891 mod_timer(&src->timer, expires); 1892 br_multicast_fwd_src_handle(src); 1893 } 1894 1895 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, 1896 struct net_bridge_mcast_port *pmctx, 1897 struct net_bridge_port_group *pg) 1898 { 1899 struct bridge_mcast_other_query *other_query = NULL; 1900 u32 lmqc = brmctx->multicast_last_member_count; 1901 unsigned long lmqt, lmi, now = jiffies; 1902 struct net_bridge_group_src *ent; 1903 1904 if (!netif_running(brmctx->br->dev) || 1905 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1906 return; 1907 1908 if (pg->key.addr.proto == htons(ETH_P_IP)) 1909 other_query = &brmctx->ip4_other_query; 1910 #if IS_ENABLED(CONFIG_IPV6) 1911 else 1912 other_query = &brmctx->ip6_other_query; 1913 #endif 1914 1915 lmqt = now + br_multicast_lmqt(brmctx); 1916 hlist_for_each_entry(ent, &pg->src_list, node) { 1917 if (ent->flags & BR_SGRP_F_SEND) { 1918 ent->flags &= ~BR_SGRP_F_SEND; 1919 if (ent->timer.expires > lmqt) { 1920 if (br_opt_get(brmctx->br, 1921 BROPT_MULTICAST_QUERIER) && 1922 other_query && 1923 !timer_pending(&other_query->timer)) 1924 ent->src_query_rexmit_cnt = lmqc; 1925 __grp_src_mod_timer(ent, lmqt); 1926 } 1927 } 1928 } 1929 1930 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER) || 1931 !other_query || timer_pending(&other_query->timer)) 1932 return; 1933 1934 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1935 &pg->key.addr, true, 1, NULL); 1936 1937 lmi = now + brmctx->multicast_last_member_interval; 1938 if (!timer_pending(&pg->rexmit_timer) || 1939 time_after(pg->rexmit_timer.expires, lmi)) 1940 mod_timer(&pg->rexmit_timer, lmi); 1941 } 1942 1943 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, 1944 struct net_bridge_mcast_port *pmctx, 1945 struct net_bridge_port_group *pg) 1946 { 1947 struct bridge_mcast_other_query *other_query = NULL; 1948 unsigned long now = jiffies, lmi; 1949 1950 if (!netif_running(brmctx->br->dev) || 1951 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1952 return; 1953 1954 if (pg->key.addr.proto == htons(ETH_P_IP)) 1955 other_query = &brmctx->ip4_other_query; 1956 #if IS_ENABLED(CONFIG_IPV6) 1957 else 1958 other_query = &brmctx->ip6_other_query; 1959 #endif 1960 1961 if (br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER) && 1962 other_query && !timer_pending(&other_query->timer)) { 1963 lmi = now + brmctx->multicast_last_member_interval; 1964 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; 1965 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1966 &pg->key.addr, false, 0, NULL); 1967 if (!timer_pending(&pg->rexmit_timer) || 1968 time_after(pg->rexmit_timer.expires, lmi)) 1969 mod_timer(&pg->rexmit_timer, lmi); 1970 } 1971 1972 if (pg->filter_mode == MCAST_EXCLUDE && 1973 (!timer_pending(&pg->timer) || 1974 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) 1975 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx)); 1976 } 1977 1978 /* State Msg type New state Actions 1979 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 1980 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 1981 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 1982 */ 1983 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, 1984 struct net_bridge_port_group *pg, void *h_addr, 1985 void *srcs, u32 nsrcs, size_t addr_size, 1986 int grec_type) 1987 { 1988 struct net_bridge_group_src *ent; 1989 unsigned long now = jiffies; 1990 bool changed = false; 1991 struct br_ip src_ip; 1992 u32 src_idx; 1993 1994 memset(&src_ip, 0, sizeof(src_ip)); 1995 src_ip.proto = pg->key.addr.proto; 1996 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1997 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1998 ent = br_multicast_find_group_src(pg, &src_ip); 1999 if (!ent) { 2000 ent = br_multicast_new_group_src(pg, &src_ip); 2001 if (ent) 2002 changed = true; 2003 } 2004 2005 if (ent) 2006 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2007 } 2008 2009 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2010 grec_type)) 2011 changed = true; 2012 2013 return changed; 2014 } 2015 2016 /* State Msg type New state Actions 2017 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2018 * Delete (A-B) 2019 * Group Timer=GMI 2020 */ 2021 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, 2022 struct net_bridge_port_group *pg, void *h_addr, 2023 void *srcs, u32 nsrcs, size_t addr_size, 2024 int grec_type) 2025 { 2026 struct net_bridge_group_src *ent; 2027 struct br_ip src_ip; 2028 u32 src_idx; 2029 2030 hlist_for_each_entry(ent, &pg->src_list, node) 2031 ent->flags |= BR_SGRP_F_DELETE; 2032 2033 memset(&src_ip, 0, sizeof(src_ip)); 2034 src_ip.proto = pg->key.addr.proto; 2035 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2036 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2037 ent = br_multicast_find_group_src(pg, &src_ip); 2038 if (ent) 2039 ent->flags &= ~BR_SGRP_F_DELETE; 2040 else 2041 ent = br_multicast_new_group_src(pg, &src_ip); 2042 if (ent) 2043 br_multicast_fwd_src_handle(ent); 2044 } 2045 2046 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2047 grec_type); 2048 2049 __grp_src_delete_marked(pg); 2050 } 2051 2052 /* State Msg type New state Actions 2053 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 2054 * Delete (X-A) 2055 * Delete (Y-A) 2056 * Group Timer=GMI 2057 */ 2058 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, 2059 struct net_bridge_port_group *pg, void *h_addr, 2060 void *srcs, u32 nsrcs, size_t addr_size, 2061 int grec_type) 2062 { 2063 struct net_bridge_group_src *ent; 2064 unsigned long now = jiffies; 2065 bool changed = false; 2066 struct br_ip src_ip; 2067 u32 src_idx; 2068 2069 hlist_for_each_entry(ent, &pg->src_list, node) 2070 ent->flags |= BR_SGRP_F_DELETE; 2071 2072 memset(&src_ip, 0, sizeof(src_ip)); 2073 src_ip.proto = pg->key.addr.proto; 2074 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2075 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2076 ent = br_multicast_find_group_src(pg, &src_ip); 2077 if (ent) { 2078 ent->flags &= ~BR_SGRP_F_DELETE; 2079 } else { 2080 ent = br_multicast_new_group_src(pg, &src_ip); 2081 if (ent) { 2082 __grp_src_mod_timer(ent, 2083 now + br_multicast_gmi(brmctx)); 2084 changed = true; 2085 } 2086 } 2087 } 2088 2089 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2090 grec_type)) 2091 changed = true; 2092 2093 if (__grp_src_delete_marked(pg)) 2094 changed = true; 2095 2096 return changed; 2097 } 2098 2099 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, 2100 struct net_bridge_port_group *pg, void *h_addr, 2101 void *srcs, u32 nsrcs, size_t addr_size, 2102 int grec_type) 2103 { 2104 bool changed = false; 2105 2106 switch (pg->filter_mode) { 2107 case MCAST_INCLUDE: 2108 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2109 grec_type); 2110 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2111 changed = true; 2112 break; 2113 case MCAST_EXCLUDE: 2114 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, 2115 addr_size, grec_type); 2116 break; 2117 } 2118 2119 pg->filter_mode = MCAST_EXCLUDE; 2120 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2121 2122 return changed; 2123 } 2124 2125 /* State Msg type New state Actions 2126 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 2127 * Send Q(G,A-B) 2128 */ 2129 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, 2130 struct net_bridge_mcast_port *pmctx, 2131 struct net_bridge_port_group *pg, void *h_addr, 2132 void *srcs, u32 nsrcs, size_t addr_size, 2133 int grec_type) 2134 { 2135 u32 src_idx, to_send = pg->src_ents; 2136 struct net_bridge_group_src *ent; 2137 unsigned long now = jiffies; 2138 bool changed = false; 2139 struct br_ip src_ip; 2140 2141 hlist_for_each_entry(ent, &pg->src_list, node) 2142 ent->flags |= BR_SGRP_F_SEND; 2143 2144 memset(&src_ip, 0, sizeof(src_ip)); 2145 src_ip.proto = pg->key.addr.proto; 2146 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2147 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2148 ent = br_multicast_find_group_src(pg, &src_ip); 2149 if (ent) { 2150 ent->flags &= ~BR_SGRP_F_SEND; 2151 to_send--; 2152 } else { 2153 ent = br_multicast_new_group_src(pg, &src_ip); 2154 if (ent) 2155 changed = true; 2156 } 2157 if (ent) 2158 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2159 } 2160 2161 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2162 grec_type)) 2163 changed = true; 2164 2165 if (to_send) 2166 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2167 2168 return changed; 2169 } 2170 2171 /* State Msg type New state Actions 2172 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 2173 * Send Q(G,X-A) 2174 * Send Q(G) 2175 */ 2176 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, 2177 struct net_bridge_mcast_port *pmctx, 2178 struct net_bridge_port_group *pg, void *h_addr, 2179 void *srcs, u32 nsrcs, size_t addr_size, 2180 int grec_type) 2181 { 2182 u32 src_idx, to_send = pg->src_ents; 2183 struct net_bridge_group_src *ent; 2184 unsigned long now = jiffies; 2185 bool changed = false; 2186 struct br_ip src_ip; 2187 2188 hlist_for_each_entry(ent, &pg->src_list, node) 2189 if (timer_pending(&ent->timer)) 2190 ent->flags |= BR_SGRP_F_SEND; 2191 2192 memset(&src_ip, 0, sizeof(src_ip)); 2193 src_ip.proto = pg->key.addr.proto; 2194 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2195 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2196 ent = br_multicast_find_group_src(pg, &src_ip); 2197 if (ent) { 2198 if (timer_pending(&ent->timer)) { 2199 ent->flags &= ~BR_SGRP_F_SEND; 2200 to_send--; 2201 } 2202 } else { 2203 ent = br_multicast_new_group_src(pg, &src_ip); 2204 if (ent) 2205 changed = true; 2206 } 2207 if (ent) 2208 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2209 } 2210 2211 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2212 grec_type)) 2213 changed = true; 2214 2215 if (to_send) 2216 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2217 2218 __grp_send_query_and_rexmit(brmctx, pmctx, pg); 2219 2220 return changed; 2221 } 2222 2223 static bool br_multicast_toin(struct net_bridge_mcast *brmctx, 2224 struct net_bridge_mcast_port *pmctx, 2225 struct net_bridge_port_group *pg, void *h_addr, 2226 void *srcs, u32 nsrcs, size_t addr_size, 2227 int grec_type) 2228 { 2229 bool changed = false; 2230 2231 switch (pg->filter_mode) { 2232 case MCAST_INCLUDE: 2233 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, 2234 nsrcs, addr_size, grec_type); 2235 break; 2236 case MCAST_EXCLUDE: 2237 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, 2238 nsrcs, addr_size, grec_type); 2239 break; 2240 } 2241 2242 if (br_multicast_eht_should_del_pg(pg)) { 2243 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2244 br_multicast_find_del_pg(pg->key.port->br, pg); 2245 /* a notification has already been sent and we shouldn't 2246 * access pg after the delete so we have to return false 2247 */ 2248 changed = false; 2249 } 2250 2251 return changed; 2252 } 2253 2254 /* State Msg type New state Actions 2255 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2256 * Delete (A-B) 2257 * Send Q(G,A*B) 2258 * Group Timer=GMI 2259 */ 2260 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, 2261 struct net_bridge_mcast_port *pmctx, 2262 struct net_bridge_port_group *pg, void *h_addr, 2263 void *srcs, u32 nsrcs, size_t addr_size, 2264 int grec_type) 2265 { 2266 struct net_bridge_group_src *ent; 2267 u32 src_idx, to_send = 0; 2268 struct br_ip src_ip; 2269 2270 hlist_for_each_entry(ent, &pg->src_list, node) 2271 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2272 2273 memset(&src_ip, 0, sizeof(src_ip)); 2274 src_ip.proto = pg->key.addr.proto; 2275 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2276 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2277 ent = br_multicast_find_group_src(pg, &src_ip); 2278 if (ent) { 2279 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2280 BR_SGRP_F_SEND; 2281 to_send++; 2282 } else { 2283 ent = br_multicast_new_group_src(pg, &src_ip); 2284 } 2285 if (ent) 2286 br_multicast_fwd_src_handle(ent); 2287 } 2288 2289 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2290 grec_type); 2291 2292 __grp_src_delete_marked(pg); 2293 if (to_send) 2294 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2295 } 2296 2297 /* State Msg type New state Actions 2298 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2299 * Delete (X-A) 2300 * Delete (Y-A) 2301 * Send Q(G,A-Y) 2302 * Group Timer=GMI 2303 */ 2304 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, 2305 struct net_bridge_mcast_port *pmctx, 2306 struct net_bridge_port_group *pg, void *h_addr, 2307 void *srcs, u32 nsrcs, size_t addr_size, 2308 int grec_type) 2309 { 2310 struct net_bridge_group_src *ent; 2311 u32 src_idx, to_send = 0; 2312 bool changed = false; 2313 struct br_ip src_ip; 2314 2315 hlist_for_each_entry(ent, &pg->src_list, node) 2316 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2317 2318 memset(&src_ip, 0, sizeof(src_ip)); 2319 src_ip.proto = pg->key.addr.proto; 2320 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2321 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2322 ent = br_multicast_find_group_src(pg, &src_ip); 2323 if (ent) { 2324 ent->flags &= ~BR_SGRP_F_DELETE; 2325 } else { 2326 ent = br_multicast_new_group_src(pg, &src_ip); 2327 if (ent) { 2328 __grp_src_mod_timer(ent, pg->timer.expires); 2329 changed = true; 2330 } 2331 } 2332 if (ent && timer_pending(&ent->timer)) { 2333 ent->flags |= BR_SGRP_F_SEND; 2334 to_send++; 2335 } 2336 } 2337 2338 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2339 grec_type)) 2340 changed = true; 2341 2342 if (__grp_src_delete_marked(pg)) 2343 changed = true; 2344 if (to_send) 2345 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2346 2347 return changed; 2348 } 2349 2350 static bool br_multicast_toex(struct net_bridge_mcast *brmctx, 2351 struct net_bridge_mcast_port *pmctx, 2352 struct net_bridge_port_group *pg, void *h_addr, 2353 void *srcs, u32 nsrcs, size_t addr_size, 2354 int grec_type) 2355 { 2356 bool changed = false; 2357 2358 switch (pg->filter_mode) { 2359 case MCAST_INCLUDE: 2360 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, 2361 addr_size, grec_type); 2362 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2363 changed = true; 2364 break; 2365 case MCAST_EXCLUDE: 2366 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, 2367 nsrcs, addr_size, grec_type); 2368 break; 2369 } 2370 2371 pg->filter_mode = MCAST_EXCLUDE; 2372 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2373 2374 return changed; 2375 } 2376 2377 /* State Msg type New state Actions 2378 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2379 */ 2380 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, 2381 struct net_bridge_mcast_port *pmctx, 2382 struct net_bridge_port_group *pg, void *h_addr, 2383 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2384 { 2385 struct net_bridge_group_src *ent; 2386 u32 src_idx, to_send = 0; 2387 bool changed = false; 2388 struct br_ip src_ip; 2389 2390 hlist_for_each_entry(ent, &pg->src_list, node) 2391 ent->flags &= ~BR_SGRP_F_SEND; 2392 2393 memset(&src_ip, 0, sizeof(src_ip)); 2394 src_ip.proto = pg->key.addr.proto; 2395 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2396 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2397 ent = br_multicast_find_group_src(pg, &src_ip); 2398 if (ent) { 2399 ent->flags |= BR_SGRP_F_SEND; 2400 to_send++; 2401 } 2402 } 2403 2404 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2405 grec_type)) 2406 changed = true; 2407 2408 if (to_send) 2409 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2410 2411 return changed; 2412 } 2413 2414 /* State Msg type New state Actions 2415 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2416 * Send Q(G,A-Y) 2417 */ 2418 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, 2419 struct net_bridge_mcast_port *pmctx, 2420 struct net_bridge_port_group *pg, void *h_addr, 2421 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2422 { 2423 struct net_bridge_group_src *ent; 2424 u32 src_idx, to_send = 0; 2425 bool changed = false; 2426 struct br_ip src_ip; 2427 2428 hlist_for_each_entry(ent, &pg->src_list, node) 2429 ent->flags &= ~BR_SGRP_F_SEND; 2430 2431 memset(&src_ip, 0, sizeof(src_ip)); 2432 src_ip.proto = pg->key.addr.proto; 2433 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2434 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2435 ent = br_multicast_find_group_src(pg, &src_ip); 2436 if (!ent) { 2437 ent = br_multicast_new_group_src(pg, &src_ip); 2438 if (ent) { 2439 __grp_src_mod_timer(ent, pg->timer.expires); 2440 changed = true; 2441 } 2442 } 2443 if (ent && timer_pending(&ent->timer)) { 2444 ent->flags |= BR_SGRP_F_SEND; 2445 to_send++; 2446 } 2447 } 2448 2449 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2450 grec_type)) 2451 changed = true; 2452 2453 if (to_send) 2454 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2455 2456 return changed; 2457 } 2458 2459 static bool br_multicast_block(struct net_bridge_mcast *brmctx, 2460 struct net_bridge_mcast_port *pmctx, 2461 struct net_bridge_port_group *pg, void *h_addr, 2462 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2463 { 2464 bool changed = false; 2465 2466 switch (pg->filter_mode) { 2467 case MCAST_INCLUDE: 2468 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, 2469 nsrcs, addr_size, grec_type); 2470 break; 2471 case MCAST_EXCLUDE: 2472 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, 2473 nsrcs, addr_size, grec_type); 2474 break; 2475 } 2476 2477 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2478 br_multicast_eht_should_del_pg(pg)) { 2479 if (br_multicast_eht_should_del_pg(pg)) 2480 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2481 br_multicast_find_del_pg(pg->key.port->br, pg); 2482 /* a notification has already been sent and we shouldn't 2483 * access pg after the delete so we have to return false 2484 */ 2485 changed = false; 2486 } 2487 2488 return changed; 2489 } 2490 2491 static struct net_bridge_port_group * 2492 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2493 struct net_bridge_port *p, 2494 const unsigned char *src) 2495 { 2496 struct net_bridge *br __maybe_unused = mp->br; 2497 struct net_bridge_port_group *pg; 2498 2499 for (pg = mlock_dereference(mp->ports, br); 2500 pg; 2501 pg = mlock_dereference(pg->next, br)) 2502 if (br_port_group_equal(pg, p, src)) 2503 return pg; 2504 2505 return NULL; 2506 } 2507 2508 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, 2509 struct net_bridge_mcast_port *pmctx, 2510 struct sk_buff *skb, 2511 u16 vid) 2512 { 2513 bool igmpv2 = brmctx->multicast_igmp_version == 2; 2514 struct net_bridge_mdb_entry *mdst; 2515 struct net_bridge_port_group *pg; 2516 const unsigned char *src; 2517 struct igmpv3_report *ih; 2518 struct igmpv3_grec *grec; 2519 int i, len, num, type; 2520 __be32 group, *h_addr; 2521 bool changed = false; 2522 int err = 0; 2523 u16 nsrcs; 2524 2525 ih = igmpv3_report_hdr(skb); 2526 num = ntohs(ih->ngrec); 2527 len = skb_transport_offset(skb) + sizeof(*ih); 2528 2529 for (i = 0; i < num; i++) { 2530 len += sizeof(*grec); 2531 if (!ip_mc_may_pull(skb, len)) 2532 return -EINVAL; 2533 2534 grec = (void *)(skb->data + len - sizeof(*grec)); 2535 group = grec->grec_mca; 2536 type = grec->grec_type; 2537 nsrcs = ntohs(grec->grec_nsrcs); 2538 2539 len += nsrcs * 4; 2540 if (!ip_mc_may_pull(skb, len)) 2541 return -EINVAL; 2542 2543 switch (type) { 2544 case IGMPV3_MODE_IS_INCLUDE: 2545 case IGMPV3_MODE_IS_EXCLUDE: 2546 case IGMPV3_CHANGE_TO_INCLUDE: 2547 case IGMPV3_CHANGE_TO_EXCLUDE: 2548 case IGMPV3_ALLOW_NEW_SOURCES: 2549 case IGMPV3_BLOCK_OLD_SOURCES: 2550 break; 2551 2552 default: 2553 continue; 2554 } 2555 2556 src = eth_hdr(skb)->h_source; 2557 if (nsrcs == 0 && 2558 (type == IGMPV3_CHANGE_TO_INCLUDE || 2559 type == IGMPV3_MODE_IS_INCLUDE)) { 2560 if (!pmctx || igmpv2) { 2561 br_ip4_multicast_leave_group(brmctx, pmctx, 2562 group, vid, src); 2563 continue; 2564 } 2565 } else { 2566 err = br_ip4_multicast_add_group(brmctx, pmctx, group, 2567 vid, src, igmpv2); 2568 if (err) 2569 break; 2570 } 2571 2572 if (!pmctx || igmpv2) 2573 continue; 2574 2575 spin_lock_bh(&brmctx->br->multicast_lock); 2576 mdst = br_mdb_ip4_get(brmctx->br, group, vid); 2577 if (!mdst) 2578 goto unlock_continue; 2579 pg = br_multicast_find_port(mdst, pmctx->port, src); 2580 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2581 goto unlock_continue; 2582 /* reload grec and host addr */ 2583 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2584 h_addr = &ip_hdr(skb)->saddr; 2585 switch (type) { 2586 case IGMPV3_ALLOW_NEW_SOURCES: 2587 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2588 grec->grec_src, 2589 nsrcs, sizeof(__be32), type); 2590 break; 2591 case IGMPV3_MODE_IS_INCLUDE: 2592 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2593 grec->grec_src, 2594 nsrcs, sizeof(__be32), type); 2595 break; 2596 case IGMPV3_MODE_IS_EXCLUDE: 2597 changed = br_multicast_isexc(brmctx, pg, h_addr, 2598 grec->grec_src, 2599 nsrcs, sizeof(__be32), type); 2600 break; 2601 case IGMPV3_CHANGE_TO_INCLUDE: 2602 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2603 grec->grec_src, 2604 nsrcs, sizeof(__be32), type); 2605 break; 2606 case IGMPV3_CHANGE_TO_EXCLUDE: 2607 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2608 grec->grec_src, 2609 nsrcs, sizeof(__be32), type); 2610 break; 2611 case IGMPV3_BLOCK_OLD_SOURCES: 2612 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2613 grec->grec_src, 2614 nsrcs, sizeof(__be32), type); 2615 break; 2616 } 2617 if (changed) 2618 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2619 unlock_continue: 2620 spin_unlock_bh(&brmctx->br->multicast_lock); 2621 } 2622 2623 return err; 2624 } 2625 2626 #if IS_ENABLED(CONFIG_IPV6) 2627 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, 2628 struct net_bridge_mcast_port *pmctx, 2629 struct sk_buff *skb, 2630 u16 vid) 2631 { 2632 bool mldv1 = brmctx->multicast_mld_version == 1; 2633 struct net_bridge_mdb_entry *mdst; 2634 struct net_bridge_port_group *pg; 2635 unsigned int nsrcs_offset; 2636 const unsigned char *src; 2637 struct icmp6hdr *icmp6h; 2638 struct in6_addr *h_addr; 2639 struct mld2_grec *grec; 2640 unsigned int grec_len; 2641 bool changed = false; 2642 int i, len, num; 2643 int err = 0; 2644 2645 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h))) 2646 return -EINVAL; 2647 2648 icmp6h = icmp6_hdr(skb); 2649 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 2650 len = skb_transport_offset(skb) + sizeof(*icmp6h); 2651 2652 for (i = 0; i < num; i++) { 2653 __be16 *_nsrcs, __nsrcs; 2654 u16 nsrcs; 2655 2656 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2657 2658 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2659 nsrcs_offset + sizeof(__nsrcs)) 2660 return -EINVAL; 2661 2662 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2663 sizeof(__nsrcs), &__nsrcs); 2664 if (!_nsrcs) 2665 return -EINVAL; 2666 2667 nsrcs = ntohs(*_nsrcs); 2668 grec_len = struct_size(grec, grec_src, nsrcs); 2669 2670 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2671 return -EINVAL; 2672 2673 grec = (struct mld2_grec *)(skb->data + len); 2674 len += grec_len; 2675 2676 switch (grec->grec_type) { 2677 case MLD2_MODE_IS_INCLUDE: 2678 case MLD2_MODE_IS_EXCLUDE: 2679 case MLD2_CHANGE_TO_INCLUDE: 2680 case MLD2_CHANGE_TO_EXCLUDE: 2681 case MLD2_ALLOW_NEW_SOURCES: 2682 case MLD2_BLOCK_OLD_SOURCES: 2683 break; 2684 2685 default: 2686 continue; 2687 } 2688 2689 src = eth_hdr(skb)->h_source; 2690 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2691 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2692 nsrcs == 0) { 2693 if (!pmctx || mldv1) { 2694 br_ip6_multicast_leave_group(brmctx, pmctx, 2695 &grec->grec_mca, 2696 vid, src); 2697 continue; 2698 } 2699 } else { 2700 err = br_ip6_multicast_add_group(brmctx, pmctx, 2701 &grec->grec_mca, vid, 2702 src, mldv1); 2703 if (err) 2704 break; 2705 } 2706 2707 if (!pmctx || mldv1) 2708 continue; 2709 2710 spin_lock_bh(&brmctx->br->multicast_lock); 2711 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid); 2712 if (!mdst) 2713 goto unlock_continue; 2714 pg = br_multicast_find_port(mdst, pmctx->port, src); 2715 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2716 goto unlock_continue; 2717 h_addr = &ipv6_hdr(skb)->saddr; 2718 switch (grec->grec_type) { 2719 case MLD2_ALLOW_NEW_SOURCES: 2720 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2721 grec->grec_src, nsrcs, 2722 sizeof(struct in6_addr), 2723 grec->grec_type); 2724 break; 2725 case MLD2_MODE_IS_INCLUDE: 2726 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2727 grec->grec_src, nsrcs, 2728 sizeof(struct in6_addr), 2729 grec->grec_type); 2730 break; 2731 case MLD2_MODE_IS_EXCLUDE: 2732 changed = br_multicast_isexc(brmctx, pg, h_addr, 2733 grec->grec_src, nsrcs, 2734 sizeof(struct in6_addr), 2735 grec->grec_type); 2736 break; 2737 case MLD2_CHANGE_TO_INCLUDE: 2738 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2739 grec->grec_src, nsrcs, 2740 sizeof(struct in6_addr), 2741 grec->grec_type); 2742 break; 2743 case MLD2_CHANGE_TO_EXCLUDE: 2744 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2745 grec->grec_src, nsrcs, 2746 sizeof(struct in6_addr), 2747 grec->grec_type); 2748 break; 2749 case MLD2_BLOCK_OLD_SOURCES: 2750 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2751 grec->grec_src, nsrcs, 2752 sizeof(struct in6_addr), 2753 grec->grec_type); 2754 break; 2755 } 2756 if (changed) 2757 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2758 unlock_continue: 2759 spin_unlock_bh(&brmctx->br->multicast_lock); 2760 } 2761 2762 return err; 2763 } 2764 #endif 2765 2766 static bool br_ip4_multicast_select_querier(struct net_bridge_mcast *brmctx, 2767 struct net_bridge_port *port, 2768 __be32 saddr) 2769 { 2770 if (!timer_pending(&brmctx->ip4_own_query.timer) && 2771 !timer_pending(&brmctx->ip4_other_query.timer)) 2772 goto update; 2773 2774 if (!brmctx->ip4_querier.addr.src.ip4) 2775 goto update; 2776 2777 if (ntohl(saddr) <= ntohl(brmctx->ip4_querier.addr.src.ip4)) 2778 goto update; 2779 2780 return false; 2781 2782 update: 2783 brmctx->ip4_querier.addr.src.ip4 = saddr; 2784 2785 /* update protected by general multicast_lock by caller */ 2786 rcu_assign_pointer(brmctx->ip4_querier.port, port); 2787 2788 return true; 2789 } 2790 2791 #if IS_ENABLED(CONFIG_IPV6) 2792 static bool br_ip6_multicast_select_querier(struct net_bridge_mcast *brmctx, 2793 struct net_bridge_port *port, 2794 struct in6_addr *saddr) 2795 { 2796 if (!timer_pending(&brmctx->ip6_own_query.timer) && 2797 !timer_pending(&brmctx->ip6_other_query.timer)) 2798 goto update; 2799 2800 if (ipv6_addr_cmp(saddr, &brmctx->ip6_querier.addr.src.ip6) <= 0) 2801 goto update; 2802 2803 return false; 2804 2805 update: 2806 brmctx->ip6_querier.addr.src.ip6 = *saddr; 2807 2808 /* update protected by general multicast_lock by caller */ 2809 rcu_assign_pointer(brmctx->ip6_querier.port, port); 2810 2811 return true; 2812 } 2813 #endif 2814 2815 static void 2816 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, 2817 struct bridge_mcast_other_query *query, 2818 unsigned long max_delay) 2819 { 2820 if (!timer_pending(&query->timer)) 2821 query->delay_time = jiffies + max_delay; 2822 2823 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); 2824 } 2825 2826 static void br_port_mc_router_state_change(struct net_bridge_port *p, 2827 bool is_mc_router) 2828 { 2829 struct switchdev_attr attr = { 2830 .orig_dev = p->dev, 2831 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 2832 .flags = SWITCHDEV_F_DEFER, 2833 .u.mrouter = is_mc_router, 2834 }; 2835 2836 switchdev_port_attr_set(p->dev, &attr, NULL); 2837 } 2838 2839 static struct net_bridge_port * 2840 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, 2841 struct hlist_head *mc_router_list, 2842 struct hlist_node *rlist) 2843 { 2844 struct net_bridge_mcast_port *pmctx; 2845 2846 #if IS_ENABLED(CONFIG_IPV6) 2847 if (mc_router_list == &brmctx->ip6_mc_router_list) 2848 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 2849 ip6_rlist); 2850 else 2851 #endif 2852 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 2853 ip4_rlist); 2854 2855 return pmctx->port; 2856 } 2857 2858 static struct hlist_node * 2859 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, 2860 struct net_bridge_port *port, 2861 struct hlist_head *mc_router_list) 2862 2863 { 2864 struct hlist_node *slot = NULL; 2865 struct net_bridge_port *p; 2866 struct hlist_node *rlist; 2867 2868 hlist_for_each(rlist, mc_router_list) { 2869 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); 2870 2871 if ((unsigned long)port >= (unsigned long)p) 2872 break; 2873 2874 slot = rlist; 2875 } 2876 2877 return slot; 2878 } 2879 2880 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, 2881 struct hlist_node *rnode) 2882 { 2883 #if IS_ENABLED(CONFIG_IPV6) 2884 if (rnode != &pmctx->ip6_rlist) 2885 return hlist_unhashed(&pmctx->ip6_rlist); 2886 else 2887 return hlist_unhashed(&pmctx->ip4_rlist); 2888 #else 2889 return true; 2890 #endif 2891 } 2892 2893 /* Add port to router_list 2894 * list is maintained ordered by pointer value 2895 * and locked by br->multicast_lock and RCU 2896 */ 2897 static void br_multicast_add_router(struct net_bridge_mcast *brmctx, 2898 struct net_bridge_mcast_port *pmctx, 2899 struct hlist_node *rlist, 2900 struct hlist_head *mc_router_list) 2901 { 2902 struct hlist_node *slot; 2903 2904 if (!hlist_unhashed(rlist)) 2905 return; 2906 2907 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list); 2908 2909 if (slot) 2910 hlist_add_behind_rcu(rlist, slot); 2911 else 2912 hlist_add_head_rcu(rlist, mc_router_list); 2913 2914 /* For backwards compatibility for now, only notify if we 2915 * switched from no IPv4/IPv6 multicast router to a new 2916 * IPv4 or IPv6 multicast router. 2917 */ 2918 if (br_multicast_no_router_otherpf(pmctx, rlist)) { 2919 br_rtr_notify(pmctx->port->br->dev, pmctx->port, RTM_NEWMDB); 2920 br_port_mc_router_state_change(pmctx->port, true); 2921 } 2922 } 2923 2924 /* Add port to router_list 2925 * list is maintained ordered by pointer value 2926 * and locked by br->multicast_lock and RCU 2927 */ 2928 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 2929 struct net_bridge_mcast_port *pmctx) 2930 { 2931 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist, 2932 &brmctx->ip4_mc_router_list); 2933 } 2934 2935 /* Add port to router_list 2936 * list is maintained ordered by pointer value 2937 * and locked by br->multicast_lock and RCU 2938 */ 2939 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 2940 struct net_bridge_mcast_port *pmctx) 2941 { 2942 #if IS_ENABLED(CONFIG_IPV6) 2943 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist, 2944 &brmctx->ip6_mc_router_list); 2945 #endif 2946 } 2947 2948 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, 2949 struct net_bridge_mcast_port *pmctx, 2950 struct timer_list *timer, 2951 struct hlist_node *rlist, 2952 struct hlist_head *mc_router_list) 2953 { 2954 unsigned long now = jiffies; 2955 2956 if (!pmctx) { 2957 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 2958 if (!br_ip4_multicast_is_router(brmctx) && 2959 !br_ip6_multicast_is_router(brmctx)) 2960 br_mc_router_state_change(brmctx->br, true); 2961 mod_timer(timer, now + brmctx->multicast_querier_interval); 2962 } 2963 return; 2964 } 2965 2966 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 2967 pmctx->multicast_router == MDB_RTR_TYPE_PERM) 2968 return; 2969 2970 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); 2971 mod_timer(timer, now + brmctx->multicast_querier_interval); 2972 } 2973 2974 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, 2975 struct net_bridge_mcast_port *pmctx) 2976 { 2977 struct timer_list *timer = &brmctx->ip4_mc_router_timer; 2978 struct hlist_node *rlist = NULL; 2979 2980 if (pmctx) { 2981 timer = &pmctx->ip4_mc_router_timer; 2982 rlist = &pmctx->ip4_rlist; 2983 } 2984 2985 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 2986 &brmctx->ip4_mc_router_list); 2987 } 2988 2989 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, 2990 struct net_bridge_mcast_port *pmctx) 2991 { 2992 #if IS_ENABLED(CONFIG_IPV6) 2993 struct timer_list *timer = &brmctx->ip6_mc_router_timer; 2994 struct hlist_node *rlist = NULL; 2995 2996 if (pmctx) { 2997 timer = &pmctx->ip6_mc_router_timer; 2998 rlist = &pmctx->ip6_rlist; 2999 } 3000 3001 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3002 &brmctx->ip6_mc_router_list); 3003 #endif 3004 } 3005 3006 static void 3007 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, 3008 struct net_bridge_mcast_port *pmctx, 3009 struct bridge_mcast_other_query *query, 3010 struct br_ip *saddr, 3011 unsigned long max_delay) 3012 { 3013 if (!br_ip4_multicast_select_querier(brmctx, pmctx->port, saddr->src.ip4)) 3014 return; 3015 3016 br_multicast_update_query_timer(brmctx, query, max_delay); 3017 br_ip4_multicast_mark_router(brmctx, pmctx); 3018 } 3019 3020 #if IS_ENABLED(CONFIG_IPV6) 3021 static void 3022 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, 3023 struct net_bridge_mcast_port *pmctx, 3024 struct bridge_mcast_other_query *query, 3025 struct br_ip *saddr, 3026 unsigned long max_delay) 3027 { 3028 if (!br_ip6_multicast_select_querier(brmctx, pmctx->port, &saddr->src.ip6)) 3029 return; 3030 3031 br_multicast_update_query_timer(brmctx, query, max_delay); 3032 br_ip6_multicast_mark_router(brmctx, pmctx); 3033 } 3034 #endif 3035 3036 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, 3037 struct net_bridge_mcast_port *pmctx, 3038 struct sk_buff *skb, 3039 u16 vid) 3040 { 3041 unsigned int transport_len = ip_transport_len(skb); 3042 const struct iphdr *iph = ip_hdr(skb); 3043 struct igmphdr *ih = igmp_hdr(skb); 3044 struct net_bridge_mdb_entry *mp; 3045 struct igmpv3_query *ih3; 3046 struct net_bridge_port_group *p; 3047 struct net_bridge_port_group __rcu **pp; 3048 struct br_ip saddr; 3049 unsigned long max_delay; 3050 unsigned long now = jiffies; 3051 __be32 group; 3052 3053 spin_lock(&brmctx->br->multicast_lock); 3054 if (!netif_running(brmctx->br->dev) || 3055 (pmctx && pmctx->port->state == BR_STATE_DISABLED)) 3056 goto out; 3057 3058 group = ih->group; 3059 3060 if (transport_len == sizeof(*ih)) { 3061 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 3062 3063 if (!max_delay) { 3064 max_delay = 10 * HZ; 3065 group = 0; 3066 } 3067 } else if (transport_len >= sizeof(*ih3)) { 3068 ih3 = igmpv3_query_hdr(skb); 3069 if (ih3->nsrcs || 3070 (brmctx->multicast_igmp_version == 3 && group && 3071 ih3->suppress)) 3072 goto out; 3073 3074 max_delay = ih3->code ? 3075 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 3076 } else { 3077 goto out; 3078 } 3079 3080 if (!group) { 3081 saddr.proto = htons(ETH_P_IP); 3082 saddr.src.ip4 = iph->saddr; 3083 3084 br_ip4_multicast_query_received(brmctx, pmctx, 3085 &brmctx->ip4_other_query, 3086 &saddr, max_delay); 3087 goto out; 3088 } 3089 3090 mp = br_mdb_ip4_get(brmctx->br, group, vid); 3091 if (!mp) 3092 goto out; 3093 3094 max_delay *= brmctx->multicast_last_member_count; 3095 3096 if (mp->host_joined && 3097 (timer_pending(&mp->timer) ? 3098 time_after(mp->timer.expires, now + max_delay) : 3099 try_to_del_timer_sync(&mp->timer) >= 0)) 3100 mod_timer(&mp->timer, now + max_delay); 3101 3102 for (pp = &mp->ports; 3103 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3104 pp = &p->next) { 3105 if (timer_pending(&p->timer) ? 3106 time_after(p->timer.expires, now + max_delay) : 3107 try_to_del_timer_sync(&p->timer) >= 0 && 3108 (brmctx->multicast_igmp_version == 2 || 3109 p->filter_mode == MCAST_EXCLUDE)) 3110 mod_timer(&p->timer, now + max_delay); 3111 } 3112 3113 out: 3114 spin_unlock(&brmctx->br->multicast_lock); 3115 } 3116 3117 #if IS_ENABLED(CONFIG_IPV6) 3118 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, 3119 struct net_bridge_mcast_port *pmctx, 3120 struct sk_buff *skb, 3121 u16 vid) 3122 { 3123 unsigned int transport_len = ipv6_transport_len(skb); 3124 struct mld_msg *mld; 3125 struct net_bridge_mdb_entry *mp; 3126 struct mld2_query *mld2q; 3127 struct net_bridge_port_group *p; 3128 struct net_bridge_port_group __rcu **pp; 3129 struct br_ip saddr; 3130 unsigned long max_delay; 3131 unsigned long now = jiffies; 3132 unsigned int offset = skb_transport_offset(skb); 3133 const struct in6_addr *group = NULL; 3134 bool is_general_query; 3135 int err = 0; 3136 3137 spin_lock(&brmctx->br->multicast_lock); 3138 if (!netif_running(brmctx->br->dev) || 3139 (pmctx && pmctx->port->state == BR_STATE_DISABLED)) 3140 goto out; 3141 3142 if (transport_len == sizeof(*mld)) { 3143 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 3144 err = -EINVAL; 3145 goto out; 3146 } 3147 mld = (struct mld_msg *) icmp6_hdr(skb); 3148 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 3149 if (max_delay) 3150 group = &mld->mld_mca; 3151 } else { 3152 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 3153 err = -EINVAL; 3154 goto out; 3155 } 3156 mld2q = (struct mld2_query *)icmp6_hdr(skb); 3157 if (!mld2q->mld2q_nsrcs) 3158 group = &mld2q->mld2q_mca; 3159 if (brmctx->multicast_mld_version == 2 && 3160 !ipv6_addr_any(&mld2q->mld2q_mca) && 3161 mld2q->mld2q_suppress) 3162 goto out; 3163 3164 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 3165 } 3166 3167 is_general_query = group && ipv6_addr_any(group); 3168 3169 if (is_general_query) { 3170 saddr.proto = htons(ETH_P_IPV6); 3171 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 3172 3173 br_ip6_multicast_query_received(brmctx, pmctx, 3174 &brmctx->ip6_other_query, 3175 &saddr, max_delay); 3176 goto out; 3177 } else if (!group) { 3178 goto out; 3179 } 3180 3181 mp = br_mdb_ip6_get(brmctx->br, group, vid); 3182 if (!mp) 3183 goto out; 3184 3185 max_delay *= brmctx->multicast_last_member_count; 3186 if (mp->host_joined && 3187 (timer_pending(&mp->timer) ? 3188 time_after(mp->timer.expires, now + max_delay) : 3189 try_to_del_timer_sync(&mp->timer) >= 0)) 3190 mod_timer(&mp->timer, now + max_delay); 3191 3192 for (pp = &mp->ports; 3193 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3194 pp = &p->next) { 3195 if (timer_pending(&p->timer) ? 3196 time_after(p->timer.expires, now + max_delay) : 3197 try_to_del_timer_sync(&p->timer) >= 0 && 3198 (brmctx->multicast_mld_version == 1 || 3199 p->filter_mode == MCAST_EXCLUDE)) 3200 mod_timer(&p->timer, now + max_delay); 3201 } 3202 3203 out: 3204 spin_unlock(&brmctx->br->multicast_lock); 3205 return err; 3206 } 3207 #endif 3208 3209 static void 3210 br_multicast_leave_group(struct net_bridge_mcast *brmctx, 3211 struct net_bridge_mcast_port *pmctx, 3212 struct br_ip *group, 3213 struct bridge_mcast_other_query *other_query, 3214 struct bridge_mcast_own_query *own_query, 3215 const unsigned char *src) 3216 { 3217 struct net_bridge_mdb_entry *mp; 3218 struct net_bridge_port_group *p; 3219 unsigned long now; 3220 unsigned long time; 3221 3222 spin_lock(&brmctx->br->multicast_lock); 3223 if (!netif_running(brmctx->br->dev) || 3224 (pmctx && pmctx->port->state == BR_STATE_DISABLED)) 3225 goto out; 3226 3227 mp = br_mdb_ip_get(brmctx->br, group); 3228 if (!mp) 3229 goto out; 3230 3231 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { 3232 struct net_bridge_port_group __rcu **pp; 3233 3234 for (pp = &mp->ports; 3235 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3236 pp = &p->next) { 3237 if (!br_port_group_equal(p, pmctx->port, src)) 3238 continue; 3239 3240 if (p->flags & MDB_PG_FLAGS_PERMANENT) 3241 break; 3242 3243 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 3244 br_multicast_del_pg(mp, p, pp); 3245 } 3246 goto out; 3247 } 3248 3249 if (timer_pending(&other_query->timer)) 3250 goto out; 3251 3252 if (br_opt_get(brmctx->br, BROPT_MULTICAST_QUERIER)) { 3253 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr, 3254 false, 0, NULL); 3255 3256 time = jiffies + brmctx->multicast_last_member_count * 3257 brmctx->multicast_last_member_interval; 3258 3259 mod_timer(&own_query->timer, time); 3260 3261 for (p = mlock_dereference(mp->ports, brmctx->br); 3262 p != NULL; 3263 p = mlock_dereference(p->next, brmctx->br)) { 3264 if (!br_port_group_equal(p, pmctx->port, src)) 3265 continue; 3266 3267 if (!hlist_unhashed(&p->mglist) && 3268 (timer_pending(&p->timer) ? 3269 time_after(p->timer.expires, time) : 3270 try_to_del_timer_sync(&p->timer) >= 0)) { 3271 mod_timer(&p->timer, time); 3272 } 3273 3274 break; 3275 } 3276 } 3277 3278 now = jiffies; 3279 time = now + brmctx->multicast_last_member_count * 3280 brmctx->multicast_last_member_interval; 3281 3282 if (!pmctx) { 3283 if (mp->host_joined && 3284 (timer_pending(&mp->timer) ? 3285 time_after(mp->timer.expires, time) : 3286 try_to_del_timer_sync(&mp->timer) >= 0)) { 3287 mod_timer(&mp->timer, time); 3288 } 3289 3290 goto out; 3291 } 3292 3293 for (p = mlock_dereference(mp->ports, brmctx->br); 3294 p != NULL; 3295 p = mlock_dereference(p->next, brmctx->br)) { 3296 if (p->key.port != pmctx->port) 3297 continue; 3298 3299 if (!hlist_unhashed(&p->mglist) && 3300 (timer_pending(&p->timer) ? 3301 time_after(p->timer.expires, time) : 3302 try_to_del_timer_sync(&p->timer) >= 0)) { 3303 mod_timer(&p->timer, time); 3304 } 3305 3306 break; 3307 } 3308 out: 3309 spin_unlock(&brmctx->br->multicast_lock); 3310 } 3311 3312 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 3313 struct net_bridge_mcast_port *pmctx, 3314 __be32 group, 3315 __u16 vid, 3316 const unsigned char *src) 3317 { 3318 struct br_ip br_group; 3319 struct bridge_mcast_own_query *own_query; 3320 3321 if (ipv4_is_local_multicast(group)) 3322 return; 3323 3324 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; 3325 3326 memset(&br_group, 0, sizeof(br_group)); 3327 br_group.dst.ip4 = group; 3328 br_group.proto = htons(ETH_P_IP); 3329 br_group.vid = vid; 3330 3331 br_multicast_leave_group(brmctx, pmctx, &br_group, 3332 &brmctx->ip4_other_query, 3333 own_query, src); 3334 } 3335 3336 #if IS_ENABLED(CONFIG_IPV6) 3337 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 3338 struct net_bridge_mcast_port *pmctx, 3339 const struct in6_addr *group, 3340 __u16 vid, 3341 const unsigned char *src) 3342 { 3343 struct br_ip br_group; 3344 struct bridge_mcast_own_query *own_query; 3345 3346 if (ipv6_addr_is_ll_all_nodes(group)) 3347 return; 3348 3349 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; 3350 3351 memset(&br_group, 0, sizeof(br_group)); 3352 br_group.dst.ip6 = *group; 3353 br_group.proto = htons(ETH_P_IPV6); 3354 br_group.vid = vid; 3355 3356 br_multicast_leave_group(brmctx, pmctx, &br_group, 3357 &brmctx->ip6_other_query, 3358 own_query, src); 3359 } 3360 #endif 3361 3362 static void br_multicast_err_count(const struct net_bridge *br, 3363 const struct net_bridge_port *p, 3364 __be16 proto) 3365 { 3366 struct bridge_mcast_stats __percpu *stats; 3367 struct bridge_mcast_stats *pstats; 3368 3369 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3370 return; 3371 3372 if (p) 3373 stats = p->mcast_stats; 3374 else 3375 stats = br->mcast_stats; 3376 if (WARN_ON(!stats)) 3377 return; 3378 3379 pstats = this_cpu_ptr(stats); 3380 3381 u64_stats_update_begin(&pstats->syncp); 3382 switch (proto) { 3383 case htons(ETH_P_IP): 3384 pstats->mstats.igmp_parse_errors++; 3385 break; 3386 #if IS_ENABLED(CONFIG_IPV6) 3387 case htons(ETH_P_IPV6): 3388 pstats->mstats.mld_parse_errors++; 3389 break; 3390 #endif 3391 } 3392 u64_stats_update_end(&pstats->syncp); 3393 } 3394 3395 static void br_multicast_pim(struct net_bridge_mcast *brmctx, 3396 struct net_bridge_mcast_port *pmctx, 3397 const struct sk_buff *skb) 3398 { 3399 unsigned int offset = skb_transport_offset(skb); 3400 struct pimhdr *pimhdr, _pimhdr; 3401 3402 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3403 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3404 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3405 return; 3406 3407 spin_lock(&brmctx->br->multicast_lock); 3408 br_ip4_multicast_mark_router(brmctx, pmctx); 3409 spin_unlock(&brmctx->br->multicast_lock); 3410 } 3411 3412 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3413 struct net_bridge_mcast_port *pmctx, 3414 struct sk_buff *skb) 3415 { 3416 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3417 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3418 return -ENOMSG; 3419 3420 spin_lock(&brmctx->br->multicast_lock); 3421 br_ip4_multicast_mark_router(brmctx, pmctx); 3422 spin_unlock(&brmctx->br->multicast_lock); 3423 3424 return 0; 3425 } 3426 3427 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, 3428 struct net_bridge_mcast_port *pmctx, 3429 struct sk_buff *skb, 3430 u16 vid) 3431 { 3432 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3433 const unsigned char *src; 3434 struct igmphdr *ih; 3435 int err; 3436 3437 err = ip_mc_check_igmp(skb); 3438 3439 if (err == -ENOMSG) { 3440 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3441 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3442 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3443 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3444 br_multicast_pim(brmctx, pmctx, skb); 3445 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3446 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); 3447 } 3448 3449 return 0; 3450 } else if (err < 0) { 3451 br_multicast_err_count(brmctx->br, p, skb->protocol); 3452 return err; 3453 } 3454 3455 ih = igmp_hdr(skb); 3456 src = eth_hdr(skb)->h_source; 3457 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3458 3459 switch (ih->type) { 3460 case IGMP_HOST_MEMBERSHIP_REPORT: 3461 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3462 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3463 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid, 3464 src, true); 3465 break; 3466 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3467 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); 3468 break; 3469 case IGMP_HOST_MEMBERSHIP_QUERY: 3470 br_ip4_multicast_query(brmctx, pmctx, skb, vid); 3471 break; 3472 case IGMP_HOST_LEAVE_MESSAGE: 3473 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src); 3474 break; 3475 } 3476 3477 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3478 BR_MCAST_DIR_RX); 3479 3480 return err; 3481 } 3482 3483 #if IS_ENABLED(CONFIG_IPV6) 3484 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3485 struct net_bridge_mcast_port *pmctx, 3486 struct sk_buff *skb) 3487 { 3488 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3489 return; 3490 3491 spin_lock(&brmctx->br->multicast_lock); 3492 br_ip6_multicast_mark_router(brmctx, pmctx); 3493 spin_unlock(&brmctx->br->multicast_lock); 3494 } 3495 3496 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, 3497 struct net_bridge_mcast_port *pmctx, 3498 struct sk_buff *skb, 3499 u16 vid) 3500 { 3501 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3502 const unsigned char *src; 3503 struct mld_msg *mld; 3504 int err; 3505 3506 err = ipv6_mc_check_mld(skb); 3507 3508 if (err == -ENOMSG || err == -ENODATA) { 3509 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3510 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3511 if (err == -ENODATA && 3512 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) 3513 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); 3514 3515 return 0; 3516 } else if (err < 0) { 3517 br_multicast_err_count(brmctx->br, p, skb->protocol); 3518 return err; 3519 } 3520 3521 mld = (struct mld_msg *)skb_transport_header(skb); 3522 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3523 3524 switch (mld->mld_type) { 3525 case ICMPV6_MGM_REPORT: 3526 src = eth_hdr(skb)->h_source; 3527 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3528 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca, 3529 vid, src, true); 3530 break; 3531 case ICMPV6_MLD2_REPORT: 3532 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); 3533 break; 3534 case ICMPV6_MGM_QUERY: 3535 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); 3536 break; 3537 case ICMPV6_MGM_REDUCTION: 3538 src = eth_hdr(skb)->h_source; 3539 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid, 3540 src); 3541 break; 3542 } 3543 3544 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3545 BR_MCAST_DIR_RX); 3546 3547 return err; 3548 } 3549 #endif 3550 3551 int br_multicast_rcv(struct net_bridge_mcast **brmctx, 3552 struct net_bridge_mcast_port **pmctx, 3553 struct net_bridge_vlan *vlan, 3554 struct sk_buff *skb, u16 vid) 3555 { 3556 int ret = 0; 3557 3558 BR_INPUT_SKB_CB(skb)->igmp = 0; 3559 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3560 3561 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED)) 3562 return 0; 3563 3564 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { 3565 const struct net_bridge_vlan *masterv; 3566 3567 /* the vlan has the master flag set only when transmitting 3568 * through the bridge device 3569 */ 3570 if (br_vlan_is_master(vlan)) { 3571 masterv = vlan; 3572 *brmctx = &vlan->br_mcast_ctx; 3573 *pmctx = NULL; 3574 } else { 3575 masterv = vlan->brvlan; 3576 *brmctx = &vlan->brvlan->br_mcast_ctx; 3577 *pmctx = &vlan->port_mcast_ctx; 3578 } 3579 3580 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 3581 return 0; 3582 } 3583 3584 switch (skb->protocol) { 3585 case htons(ETH_P_IP): 3586 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid); 3587 break; 3588 #if IS_ENABLED(CONFIG_IPV6) 3589 case htons(ETH_P_IPV6): 3590 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid); 3591 break; 3592 #endif 3593 } 3594 3595 return ret; 3596 } 3597 3598 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, 3599 struct bridge_mcast_own_query *query, 3600 struct bridge_mcast_querier *querier) 3601 { 3602 spin_lock(&brmctx->br->multicast_lock); 3603 if (query->startup_sent < brmctx->multicast_startup_query_count) 3604 query->startup_sent++; 3605 3606 RCU_INIT_POINTER(querier->port, NULL); 3607 br_multicast_send_query(brmctx, NULL, query); 3608 spin_unlock(&brmctx->br->multicast_lock); 3609 } 3610 3611 static void br_ip4_multicast_query_expired(struct timer_list *t) 3612 { 3613 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3614 ip4_own_query.timer); 3615 3616 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, 3617 &brmctx->ip4_querier); 3618 } 3619 3620 #if IS_ENABLED(CONFIG_IPV6) 3621 static void br_ip6_multicast_query_expired(struct timer_list *t) 3622 { 3623 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3624 ip6_own_query.timer); 3625 3626 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, 3627 &brmctx->ip6_querier); 3628 } 3629 #endif 3630 3631 static void br_multicast_gc_work(struct work_struct *work) 3632 { 3633 struct net_bridge *br = container_of(work, struct net_bridge, 3634 mcast_gc_work); 3635 HLIST_HEAD(deleted_head); 3636 3637 spin_lock_bh(&br->multicast_lock); 3638 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3639 spin_unlock_bh(&br->multicast_lock); 3640 3641 br_multicast_gc(&deleted_head); 3642 } 3643 3644 void br_multicast_ctx_init(struct net_bridge *br, 3645 struct net_bridge_vlan *vlan, 3646 struct net_bridge_mcast *brmctx) 3647 { 3648 brmctx->br = br; 3649 brmctx->vlan = vlan; 3650 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3651 brmctx->multicast_last_member_count = 2; 3652 brmctx->multicast_startup_query_count = 2; 3653 3654 brmctx->multicast_last_member_interval = HZ; 3655 brmctx->multicast_query_response_interval = 10 * HZ; 3656 brmctx->multicast_startup_query_interval = 125 * HZ / 4; 3657 brmctx->multicast_query_interval = 125 * HZ; 3658 brmctx->multicast_querier_interval = 255 * HZ; 3659 brmctx->multicast_membership_interval = 260 * HZ; 3660 3661 brmctx->ip4_other_query.delay_time = 0; 3662 brmctx->ip4_querier.port = NULL; 3663 brmctx->multicast_igmp_version = 2; 3664 #if IS_ENABLED(CONFIG_IPV6) 3665 brmctx->multicast_mld_version = 1; 3666 brmctx->ip6_other_query.delay_time = 0; 3667 brmctx->ip6_querier.port = NULL; 3668 #endif 3669 3670 timer_setup(&brmctx->ip4_mc_router_timer, 3671 br_ip4_multicast_local_router_expired, 0); 3672 timer_setup(&brmctx->ip4_other_query.timer, 3673 br_ip4_multicast_querier_expired, 0); 3674 timer_setup(&brmctx->ip4_own_query.timer, 3675 br_ip4_multicast_query_expired, 0); 3676 #if IS_ENABLED(CONFIG_IPV6) 3677 timer_setup(&brmctx->ip6_mc_router_timer, 3678 br_ip6_multicast_local_router_expired, 0); 3679 timer_setup(&brmctx->ip6_other_query.timer, 3680 br_ip6_multicast_querier_expired, 0); 3681 timer_setup(&brmctx->ip6_own_query.timer, 3682 br_ip6_multicast_query_expired, 0); 3683 #endif 3684 } 3685 3686 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) 3687 { 3688 __br_multicast_stop(brmctx); 3689 } 3690 3691 void br_multicast_init(struct net_bridge *br) 3692 { 3693 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 3694 3695 br_multicast_ctx_init(br, NULL, &br->multicast_ctx); 3696 3697 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 3698 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 3699 3700 spin_lock_init(&br->multicast_lock); 3701 INIT_HLIST_HEAD(&br->mdb_list); 3702 INIT_HLIST_HEAD(&br->mcast_gc_list); 3703 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 3704 } 3705 3706 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 3707 { 3708 struct in_device *in_dev = in_dev_get(br->dev); 3709 3710 if (!in_dev) 3711 return; 3712 3713 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3714 in_dev_put(in_dev); 3715 } 3716 3717 #if IS_ENABLED(CONFIG_IPV6) 3718 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3719 { 3720 struct in6_addr addr; 3721 3722 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3723 ipv6_dev_mc_inc(br->dev, &addr); 3724 } 3725 #else 3726 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3727 { 3728 } 3729 #endif 3730 3731 void br_multicast_join_snoopers(struct net_bridge *br) 3732 { 3733 br_ip4_multicast_join_snoopers(br); 3734 br_ip6_multicast_join_snoopers(br); 3735 } 3736 3737 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 3738 { 3739 struct in_device *in_dev = in_dev_get(br->dev); 3740 3741 if (WARN_ON(!in_dev)) 3742 return; 3743 3744 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3745 in_dev_put(in_dev); 3746 } 3747 3748 #if IS_ENABLED(CONFIG_IPV6) 3749 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3750 { 3751 struct in6_addr addr; 3752 3753 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3754 ipv6_dev_mc_dec(br->dev, &addr); 3755 } 3756 #else 3757 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3758 { 3759 } 3760 #endif 3761 3762 void br_multicast_leave_snoopers(struct net_bridge *br) 3763 { 3764 br_ip4_multicast_leave_snoopers(br); 3765 br_ip6_multicast_leave_snoopers(br); 3766 } 3767 3768 static void __br_multicast_open_query(struct net_bridge *br, 3769 struct bridge_mcast_own_query *query) 3770 { 3771 query->startup_sent = 0; 3772 3773 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3774 return; 3775 3776 mod_timer(&query->timer, jiffies); 3777 } 3778 3779 static void __br_multicast_open(struct net_bridge_mcast *brmctx) 3780 { 3781 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query); 3782 #if IS_ENABLED(CONFIG_IPV6) 3783 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query); 3784 #endif 3785 } 3786 3787 void br_multicast_open(struct net_bridge *br) 3788 { 3789 ASSERT_RTNL(); 3790 3791 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 3792 struct net_bridge_vlan_group *vg; 3793 struct net_bridge_vlan *vlan; 3794 3795 vg = br_vlan_group(br); 3796 if (vg) { 3797 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 3798 struct net_bridge_mcast *brmctx; 3799 3800 brmctx = &vlan->br_mcast_ctx; 3801 if (br_vlan_is_brentry(vlan) && 3802 !br_multicast_ctx_vlan_disabled(brmctx)) 3803 __br_multicast_open(&vlan->br_mcast_ctx); 3804 } 3805 } 3806 } 3807 3808 __br_multicast_open(&br->multicast_ctx); 3809 } 3810 3811 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 3812 { 3813 del_timer_sync(&brmctx->ip4_mc_router_timer); 3814 del_timer_sync(&brmctx->ip4_other_query.timer); 3815 del_timer_sync(&brmctx->ip4_own_query.timer); 3816 #if IS_ENABLED(CONFIG_IPV6) 3817 del_timer_sync(&brmctx->ip6_mc_router_timer); 3818 del_timer_sync(&brmctx->ip6_other_query.timer); 3819 del_timer_sync(&brmctx->ip6_own_query.timer); 3820 #endif 3821 } 3822 3823 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) 3824 { 3825 struct net_bridge *br; 3826 3827 /* it's okay to check for the flag without the multicast lock because it 3828 * can only change under RTNL -> multicast_lock, we need the latter to 3829 * sync with timers and packets 3830 */ 3831 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) 3832 return; 3833 3834 if (br_vlan_is_master(vlan)) { 3835 br = vlan->br; 3836 3837 if (!br_vlan_is_brentry(vlan) || 3838 (on && 3839 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx))) 3840 return; 3841 3842 spin_lock_bh(&br->multicast_lock); 3843 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 3844 spin_unlock_bh(&br->multicast_lock); 3845 3846 if (on) 3847 __br_multicast_open(&vlan->br_mcast_ctx); 3848 else 3849 __br_multicast_stop(&vlan->br_mcast_ctx); 3850 } else { 3851 struct net_bridge_mcast *brmctx; 3852 3853 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx); 3854 if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) 3855 return; 3856 3857 br = vlan->port->br; 3858 spin_lock_bh(&br->multicast_lock); 3859 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 3860 if (on) 3861 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx); 3862 else 3863 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx); 3864 spin_unlock_bh(&br->multicast_lock); 3865 } 3866 } 3867 3868 void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) 3869 { 3870 struct net_bridge_port *p; 3871 3872 if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) 3873 return; 3874 3875 list_for_each_entry(p, &vlan->br->port_list, list) { 3876 struct net_bridge_vlan *vport; 3877 3878 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid); 3879 if (!vport) 3880 continue; 3881 br_multicast_toggle_one_vlan(vport, on); 3882 } 3883 } 3884 3885 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, 3886 struct netlink_ext_ack *extack) 3887 { 3888 struct net_bridge_vlan_group *vg; 3889 struct net_bridge_vlan *vlan; 3890 struct net_bridge_port *p; 3891 3892 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) 3893 return 0; 3894 3895 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) { 3896 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled"); 3897 return -EINVAL; 3898 } 3899 3900 vg = br_vlan_group(br); 3901 if (!vg) 3902 return 0; 3903 3904 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); 3905 3906 /* disable/enable non-vlan mcast contexts based on vlan snooping */ 3907 if (on) 3908 __br_multicast_stop(&br->multicast_ctx); 3909 else 3910 __br_multicast_open(&br->multicast_ctx); 3911 list_for_each_entry(p, &br->port_list, list) { 3912 if (on) 3913 br_multicast_disable_port(p); 3914 else 3915 br_multicast_enable_port(p); 3916 } 3917 3918 list_for_each_entry(vlan, &vg->vlan_list, vlist) 3919 br_multicast_toggle_vlan(vlan, on); 3920 3921 return 0; 3922 } 3923 3924 void br_multicast_stop(struct net_bridge *br) 3925 { 3926 ASSERT_RTNL(); 3927 3928 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 3929 struct net_bridge_vlan_group *vg; 3930 struct net_bridge_vlan *vlan; 3931 3932 vg = br_vlan_group(br); 3933 if (vg) { 3934 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 3935 struct net_bridge_mcast *brmctx; 3936 3937 brmctx = &vlan->br_mcast_ctx; 3938 if (br_vlan_is_brentry(vlan) && 3939 !br_multicast_ctx_vlan_disabled(brmctx)) 3940 __br_multicast_stop(&vlan->br_mcast_ctx); 3941 } 3942 } 3943 } 3944 3945 __br_multicast_stop(&br->multicast_ctx); 3946 } 3947 3948 void br_multicast_dev_del(struct net_bridge *br) 3949 { 3950 struct net_bridge_mdb_entry *mp; 3951 HLIST_HEAD(deleted_head); 3952 struct hlist_node *tmp; 3953 3954 spin_lock_bh(&br->multicast_lock); 3955 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 3956 br_multicast_del_mdb_entry(mp); 3957 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3958 spin_unlock_bh(&br->multicast_lock); 3959 3960 br_multicast_ctx_deinit(&br->multicast_ctx); 3961 br_multicast_gc(&deleted_head); 3962 cancel_work_sync(&br->mcast_gc_work); 3963 3964 rcu_barrier(); 3965 } 3966 3967 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 3968 { 3969 struct net_bridge_mcast *brmctx = &br->multicast_ctx; 3970 int err = -EINVAL; 3971 3972 spin_lock_bh(&br->multicast_lock); 3973 3974 switch (val) { 3975 case MDB_RTR_TYPE_DISABLED: 3976 case MDB_RTR_TYPE_PERM: 3977 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 3978 del_timer(&brmctx->ip4_mc_router_timer); 3979 #if IS_ENABLED(CONFIG_IPV6) 3980 del_timer(&brmctx->ip6_mc_router_timer); 3981 #endif 3982 brmctx->multicast_router = val; 3983 err = 0; 3984 break; 3985 case MDB_RTR_TYPE_TEMP_QUERY: 3986 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 3987 br_mc_router_state_change(br, false); 3988 brmctx->multicast_router = val; 3989 err = 0; 3990 break; 3991 } 3992 3993 spin_unlock_bh(&br->multicast_lock); 3994 3995 return err; 3996 } 3997 3998 static void 3999 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) 4000 { 4001 if (!deleted) 4002 return; 4003 4004 /* For backwards compatibility for now, only notify if there is 4005 * no multicast router anymore for both IPv4 and IPv6. 4006 */ 4007 if (!hlist_unhashed(&pmctx->ip4_rlist)) 4008 return; 4009 #if IS_ENABLED(CONFIG_IPV6) 4010 if (!hlist_unhashed(&pmctx->ip6_rlist)) 4011 return; 4012 #endif 4013 4014 br_rtr_notify(pmctx->port->br->dev, pmctx->port, RTM_DELMDB); 4015 br_port_mc_router_state_change(pmctx->port, false); 4016 4017 /* don't allow timer refresh */ 4018 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) 4019 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4020 } 4021 4022 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 4023 { 4024 struct net_bridge_mcast *brmctx = &p->br->multicast_ctx; 4025 struct net_bridge_mcast_port *pmctx = &p->multicast_ctx; 4026 unsigned long now = jiffies; 4027 int err = -EINVAL; 4028 bool del = false; 4029 4030 spin_lock(&p->br->multicast_lock); 4031 if (pmctx->multicast_router == val) { 4032 /* Refresh the temp router port timer */ 4033 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { 4034 mod_timer(&pmctx->ip4_mc_router_timer, 4035 now + brmctx->multicast_querier_interval); 4036 #if IS_ENABLED(CONFIG_IPV6) 4037 mod_timer(&pmctx->ip6_mc_router_timer, 4038 now + brmctx->multicast_querier_interval); 4039 #endif 4040 } 4041 err = 0; 4042 goto unlock; 4043 } 4044 switch (val) { 4045 case MDB_RTR_TYPE_DISABLED: 4046 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4047 del |= br_ip4_multicast_rport_del(pmctx); 4048 del_timer(&pmctx->ip4_mc_router_timer); 4049 del |= br_ip6_multicast_rport_del(pmctx); 4050 #if IS_ENABLED(CONFIG_IPV6) 4051 del_timer(&pmctx->ip6_mc_router_timer); 4052 #endif 4053 br_multicast_rport_del_notify(pmctx, del); 4054 break; 4055 case MDB_RTR_TYPE_TEMP_QUERY: 4056 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4057 del |= br_ip4_multicast_rport_del(pmctx); 4058 del |= br_ip6_multicast_rport_del(pmctx); 4059 br_multicast_rport_del_notify(pmctx, del); 4060 break; 4061 case MDB_RTR_TYPE_PERM: 4062 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4063 del_timer(&pmctx->ip4_mc_router_timer); 4064 br_ip4_multicast_add_router(brmctx, pmctx); 4065 #if IS_ENABLED(CONFIG_IPV6) 4066 del_timer(&pmctx->ip6_mc_router_timer); 4067 #endif 4068 br_ip6_multicast_add_router(brmctx, pmctx); 4069 break; 4070 case MDB_RTR_TYPE_TEMP: 4071 pmctx->multicast_router = MDB_RTR_TYPE_TEMP; 4072 br_ip4_multicast_mark_router(brmctx, pmctx); 4073 br_ip6_multicast_mark_router(brmctx, pmctx); 4074 break; 4075 default: 4076 goto unlock; 4077 } 4078 err = 0; 4079 unlock: 4080 spin_unlock(&p->br->multicast_lock); 4081 4082 return err; 4083 } 4084 4085 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 4086 struct bridge_mcast_own_query *query) 4087 { 4088 struct net_bridge_port *port; 4089 4090 __br_multicast_open_query(brmctx->br, query); 4091 4092 rcu_read_lock(); 4093 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { 4094 if (port->state == BR_STATE_DISABLED || 4095 port->state == BR_STATE_BLOCKING) 4096 continue; 4097 4098 if (query == &brmctx->ip4_own_query) 4099 br_multicast_enable(&port->multicast_ctx.ip4_own_query); 4100 #if IS_ENABLED(CONFIG_IPV6) 4101 else 4102 br_multicast_enable(&port->multicast_ctx.ip6_own_query); 4103 #endif 4104 } 4105 rcu_read_unlock(); 4106 } 4107 4108 int br_multicast_toggle(struct net_bridge *br, unsigned long val, 4109 struct netlink_ext_ack *extack) 4110 { 4111 struct net_bridge_port *port; 4112 bool change_snoopers = false; 4113 int err = 0; 4114 4115 spin_lock_bh(&br->multicast_lock); 4116 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 4117 goto unlock; 4118 4119 err = br_mc_disabled_update(br->dev, val, extack); 4120 if (err == -EOPNOTSUPP) 4121 err = 0; 4122 if (err) 4123 goto unlock; 4124 4125 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 4126 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 4127 change_snoopers = true; 4128 goto unlock; 4129 } 4130 4131 if (!netif_running(br->dev)) 4132 goto unlock; 4133 4134 br_multicast_open(br); 4135 list_for_each_entry(port, &br->port_list, list) 4136 __br_multicast_enable_port_ctx(&port->multicast_ctx); 4137 4138 change_snoopers = true; 4139 4140 unlock: 4141 spin_unlock_bh(&br->multicast_lock); 4142 4143 /* br_multicast_join_snoopers has the potential to cause 4144 * an MLD Report/Leave to be delivered to br_multicast_rcv, 4145 * which would in turn call br_multicast_add_group, which would 4146 * attempt to acquire multicast_lock. This function should be 4147 * called after the lock has been released to avoid deadlocks on 4148 * multicast_lock. 4149 * 4150 * br_multicast_leave_snoopers does not have the problem since 4151 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 4152 * returns without calling br_multicast_ipv4/6_rcv if it's not 4153 * enabled. Moved both functions out just for symmetry. 4154 */ 4155 if (change_snoopers) { 4156 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4157 br_multicast_join_snoopers(br); 4158 else 4159 br_multicast_leave_snoopers(br); 4160 } 4161 4162 return err; 4163 } 4164 4165 bool br_multicast_enabled(const struct net_device *dev) 4166 { 4167 struct net_bridge *br = netdev_priv(dev); 4168 4169 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 4170 } 4171 EXPORT_SYMBOL_GPL(br_multicast_enabled); 4172 4173 bool br_multicast_router(const struct net_device *dev) 4174 { 4175 struct net_bridge *br = netdev_priv(dev); 4176 bool is_router; 4177 4178 spin_lock_bh(&br->multicast_lock); 4179 is_router = br_multicast_is_router(&br->multicast_ctx, NULL); 4180 spin_unlock_bh(&br->multicast_lock); 4181 return is_router; 4182 } 4183 EXPORT_SYMBOL_GPL(br_multicast_router); 4184 4185 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 4186 { 4187 struct net_bridge_mcast *brmctx = &br->multicast_ctx; 4188 unsigned long max_delay; 4189 4190 val = !!val; 4191 4192 spin_lock_bh(&br->multicast_lock); 4193 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) 4194 goto unlock; 4195 4196 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); 4197 if (!val) 4198 goto unlock; 4199 4200 max_delay = brmctx->multicast_query_response_interval; 4201 4202 if (!timer_pending(&brmctx->ip4_other_query.timer)) 4203 brmctx->ip4_other_query.delay_time = jiffies + max_delay; 4204 4205 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); 4206 4207 #if IS_ENABLED(CONFIG_IPV6) 4208 if (!timer_pending(&brmctx->ip6_other_query.timer)) 4209 brmctx->ip6_other_query.delay_time = jiffies + max_delay; 4210 4211 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); 4212 #endif 4213 4214 unlock: 4215 spin_unlock_bh(&br->multicast_lock); 4216 4217 return 0; 4218 } 4219 4220 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 4221 { 4222 /* Currently we support only version 2 and 3 */ 4223 switch (val) { 4224 case 2: 4225 case 3: 4226 break; 4227 default: 4228 return -EINVAL; 4229 } 4230 4231 spin_lock_bh(&br->multicast_lock); 4232 br->multicast_ctx.multicast_igmp_version = val; 4233 spin_unlock_bh(&br->multicast_lock); 4234 4235 return 0; 4236 } 4237 4238 #if IS_ENABLED(CONFIG_IPV6) 4239 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 4240 { 4241 /* Currently we support version 1 and 2 */ 4242 switch (val) { 4243 case 1: 4244 case 2: 4245 break; 4246 default: 4247 return -EINVAL; 4248 } 4249 4250 spin_lock_bh(&br->multicast_lock); 4251 br->multicast_ctx.multicast_mld_version = val; 4252 spin_unlock_bh(&br->multicast_lock); 4253 4254 return 0; 4255 } 4256 #endif 4257 4258 /** 4259 * br_multicast_list_adjacent - Returns snooped multicast addresses 4260 * @dev: The bridge port adjacent to which to retrieve addresses 4261 * @br_ip_list: The list to store found, snooped multicast IP addresses in 4262 * 4263 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 4264 * snooping feature on all bridge ports of dev's bridge device, excluding 4265 * the addresses from dev itself. 4266 * 4267 * Returns the number of items added to br_ip_list. 4268 * 4269 * Notes: 4270 * - br_ip_list needs to be initialized by caller 4271 * - br_ip_list might contain duplicates in the end 4272 * (needs to be taken care of by caller) 4273 * - br_ip_list needs to be freed by caller 4274 */ 4275 int br_multicast_list_adjacent(struct net_device *dev, 4276 struct list_head *br_ip_list) 4277 { 4278 struct net_bridge *br; 4279 struct net_bridge_port *port; 4280 struct net_bridge_port_group *group; 4281 struct br_ip_list *entry; 4282 int count = 0; 4283 4284 rcu_read_lock(); 4285 if (!br_ip_list || !netif_is_bridge_port(dev)) 4286 goto unlock; 4287 4288 port = br_port_get_rcu(dev); 4289 if (!port || !port->br) 4290 goto unlock; 4291 4292 br = port->br; 4293 4294 list_for_each_entry_rcu(port, &br->port_list, list) { 4295 if (!port->dev || port->dev == dev) 4296 continue; 4297 4298 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 4299 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 4300 if (!entry) 4301 goto unlock; 4302 4303 entry->addr = group->key.addr; 4304 list_add(&entry->list, br_ip_list); 4305 count++; 4306 } 4307 } 4308 4309 unlock: 4310 rcu_read_unlock(); 4311 return count; 4312 } 4313 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 4314 4315 /** 4316 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 4317 * @dev: The bridge port providing the bridge on which to check for a querier 4318 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4319 * 4320 * Checks whether the given interface has a bridge on top and if so returns 4321 * true if a valid querier exists anywhere on the bridged link layer. 4322 * Otherwise returns false. 4323 */ 4324 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 4325 { 4326 struct net_bridge *br; 4327 struct net_bridge_port *port; 4328 struct ethhdr eth; 4329 bool ret = false; 4330 4331 rcu_read_lock(); 4332 if (!netif_is_bridge_port(dev)) 4333 goto unlock; 4334 4335 port = br_port_get_rcu(dev); 4336 if (!port || !port->br) 4337 goto unlock; 4338 4339 br = port->br; 4340 4341 memset(ð, 0, sizeof(eth)); 4342 eth.h_proto = htons(proto); 4343 4344 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL); 4345 4346 unlock: 4347 rcu_read_unlock(); 4348 return ret; 4349 } 4350 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 4351 4352 /** 4353 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 4354 * @dev: The bridge port adjacent to which to check for a querier 4355 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4356 * 4357 * Checks whether the given interface has a bridge on top and if so returns 4358 * true if a selected querier is behind one of the other ports of this 4359 * bridge. Otherwise returns false. 4360 */ 4361 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 4362 { 4363 struct net_bridge_mcast *brmctx; 4364 struct net_bridge *br; 4365 struct net_bridge_port *port; 4366 bool ret = false; 4367 4368 rcu_read_lock(); 4369 if (!netif_is_bridge_port(dev)) 4370 goto unlock; 4371 4372 port = br_port_get_rcu(dev); 4373 if (!port || !port->br) 4374 goto unlock; 4375 4376 br = port->br; 4377 brmctx = &br->multicast_ctx; 4378 4379 switch (proto) { 4380 case ETH_P_IP: 4381 if (!timer_pending(&brmctx->ip4_other_query.timer) || 4382 rcu_dereference(brmctx->ip4_querier.port) == port) 4383 goto unlock; 4384 break; 4385 #if IS_ENABLED(CONFIG_IPV6) 4386 case ETH_P_IPV6: 4387 if (!timer_pending(&brmctx->ip6_other_query.timer) || 4388 rcu_dereference(brmctx->ip6_querier.port) == port) 4389 goto unlock; 4390 break; 4391 #endif 4392 default: 4393 goto unlock; 4394 } 4395 4396 ret = true; 4397 unlock: 4398 rcu_read_unlock(); 4399 return ret; 4400 } 4401 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 4402 4403 /** 4404 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port 4405 * @dev: The bridge port adjacent to which to check for a multicast router 4406 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4407 * 4408 * Checks whether the given interface has a bridge on top and if so returns 4409 * true if a multicast router is behind one of the other ports of this 4410 * bridge. Otherwise returns false. 4411 */ 4412 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) 4413 { 4414 struct net_bridge_mcast_port *pmctx; 4415 struct net_bridge_mcast *brmctx; 4416 struct net_bridge_port *port; 4417 bool ret = false; 4418 4419 rcu_read_lock(); 4420 port = br_port_get_check_rcu(dev); 4421 if (!port) 4422 goto unlock; 4423 4424 brmctx = &port->br->multicast_ctx; 4425 switch (proto) { 4426 case ETH_P_IP: 4427 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, 4428 ip4_rlist) { 4429 if (pmctx->port == port) 4430 continue; 4431 4432 ret = true; 4433 goto unlock; 4434 } 4435 break; 4436 #if IS_ENABLED(CONFIG_IPV6) 4437 case ETH_P_IPV6: 4438 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, 4439 ip6_rlist) { 4440 if (pmctx->port == port) 4441 continue; 4442 4443 ret = true; 4444 goto unlock; 4445 } 4446 break; 4447 #endif 4448 default: 4449 /* when compiled without IPv6 support, be conservative and 4450 * always assume presence of an IPv6 multicast router 4451 */ 4452 ret = true; 4453 } 4454 4455 unlock: 4456 rcu_read_unlock(); 4457 return ret; 4458 } 4459 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); 4460 4461 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 4462 const struct sk_buff *skb, u8 type, u8 dir) 4463 { 4464 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 4465 __be16 proto = skb->protocol; 4466 unsigned int t_len; 4467 4468 u64_stats_update_begin(&pstats->syncp); 4469 switch (proto) { 4470 case htons(ETH_P_IP): 4471 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 4472 switch (type) { 4473 case IGMP_HOST_MEMBERSHIP_REPORT: 4474 pstats->mstats.igmp_v1reports[dir]++; 4475 break; 4476 case IGMPV2_HOST_MEMBERSHIP_REPORT: 4477 pstats->mstats.igmp_v2reports[dir]++; 4478 break; 4479 case IGMPV3_HOST_MEMBERSHIP_REPORT: 4480 pstats->mstats.igmp_v3reports[dir]++; 4481 break; 4482 case IGMP_HOST_MEMBERSHIP_QUERY: 4483 if (t_len != sizeof(struct igmphdr)) { 4484 pstats->mstats.igmp_v3queries[dir]++; 4485 } else { 4486 unsigned int offset = skb_transport_offset(skb); 4487 struct igmphdr *ih, _ihdr; 4488 4489 ih = skb_header_pointer(skb, offset, 4490 sizeof(_ihdr), &_ihdr); 4491 if (!ih) 4492 break; 4493 if (!ih->code) 4494 pstats->mstats.igmp_v1queries[dir]++; 4495 else 4496 pstats->mstats.igmp_v2queries[dir]++; 4497 } 4498 break; 4499 case IGMP_HOST_LEAVE_MESSAGE: 4500 pstats->mstats.igmp_leaves[dir]++; 4501 break; 4502 } 4503 break; 4504 #if IS_ENABLED(CONFIG_IPV6) 4505 case htons(ETH_P_IPV6): 4506 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 4507 sizeof(struct ipv6hdr); 4508 t_len -= skb_network_header_len(skb); 4509 switch (type) { 4510 case ICMPV6_MGM_REPORT: 4511 pstats->mstats.mld_v1reports[dir]++; 4512 break; 4513 case ICMPV6_MLD2_REPORT: 4514 pstats->mstats.mld_v2reports[dir]++; 4515 break; 4516 case ICMPV6_MGM_QUERY: 4517 if (t_len != sizeof(struct mld_msg)) 4518 pstats->mstats.mld_v2queries[dir]++; 4519 else 4520 pstats->mstats.mld_v1queries[dir]++; 4521 break; 4522 case ICMPV6_MGM_REDUCTION: 4523 pstats->mstats.mld_leaves[dir]++; 4524 break; 4525 } 4526 break; 4527 #endif /* CONFIG_IPV6 */ 4528 } 4529 u64_stats_update_end(&pstats->syncp); 4530 } 4531 4532 void br_multicast_count(struct net_bridge *br, 4533 const struct net_bridge_port *p, 4534 const struct sk_buff *skb, u8 type, u8 dir) 4535 { 4536 struct bridge_mcast_stats __percpu *stats; 4537 4538 /* if multicast_disabled is true then igmp type can't be set */ 4539 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 4540 return; 4541 4542 if (p) 4543 stats = p->mcast_stats; 4544 else 4545 stats = br->mcast_stats; 4546 if (WARN_ON(!stats)) 4547 return; 4548 4549 br_mcast_stats_add(stats, skb, type, dir); 4550 } 4551 4552 int br_multicast_init_stats(struct net_bridge *br) 4553 { 4554 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 4555 if (!br->mcast_stats) 4556 return -ENOMEM; 4557 4558 return 0; 4559 } 4560 4561 void br_multicast_uninit_stats(struct net_bridge *br) 4562 { 4563 free_percpu(br->mcast_stats); 4564 } 4565 4566 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 4567 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 4568 { 4569 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 4570 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 4571 } 4572 4573 void br_multicast_get_stats(const struct net_bridge *br, 4574 const struct net_bridge_port *p, 4575 struct br_mcast_stats *dest) 4576 { 4577 struct bridge_mcast_stats __percpu *stats; 4578 struct br_mcast_stats tdst; 4579 int i; 4580 4581 memset(dest, 0, sizeof(*dest)); 4582 if (p) 4583 stats = p->mcast_stats; 4584 else 4585 stats = br->mcast_stats; 4586 if (WARN_ON(!stats)) 4587 return; 4588 4589 memset(&tdst, 0, sizeof(tdst)); 4590 for_each_possible_cpu(i) { 4591 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 4592 struct br_mcast_stats temp; 4593 unsigned int start; 4594 4595 do { 4596 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 4597 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 4598 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 4599 4600 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 4601 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 4602 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 4603 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 4604 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 4605 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 4606 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 4607 tdst.igmp_parse_errors += temp.igmp_parse_errors; 4608 4609 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 4610 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 4611 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 4612 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 4613 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 4614 tdst.mld_parse_errors += temp.mld_parse_errors; 4615 } 4616 memcpy(dest, &tdst, sizeof(*dest)); 4617 } 4618 4619 int br_mdb_hash_init(struct net_bridge *br) 4620 { 4621 int err; 4622 4623 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 4624 if (err) 4625 return err; 4626 4627 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 4628 if (err) { 4629 rhashtable_destroy(&br->sg_port_tbl); 4630 return err; 4631 } 4632 4633 return 0; 4634 } 4635 4636 void br_mdb_hash_fini(struct net_bridge *br) 4637 { 4638 rhashtable_destroy(&br->sg_port_tbl); 4639 rhashtable_destroy(&br->mdb_hash_tbl); 4640 } 4641