1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 #include <trace/events/bridge.h> 35 36 #include "br_private.h" 37 #include "br_private_mcast_eht.h" 38 39 static const struct rhashtable_params br_mdb_rht_params = { 40 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 41 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 42 .key_len = sizeof(struct br_ip), 43 .automatic_shrinking = true, 44 }; 45 46 static const struct rhashtable_params br_sg_port_rht_params = { 47 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 48 .key_offset = offsetof(struct net_bridge_port_group, key), 49 .key_len = sizeof(struct net_bridge_port_group_sg_key), 50 .automatic_shrinking = true, 51 }; 52 53 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 54 struct bridge_mcast_own_query *query); 55 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 56 struct net_bridge_mcast_port *pmctx); 57 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 58 struct net_bridge_mcast_port *pmctx, 59 __be32 group, 60 __u16 vid, 61 const unsigned char *src); 62 static void br_multicast_port_group_rexmit(struct timer_list *t); 63 64 static void 65 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); 66 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 67 struct net_bridge_mcast_port *pmctx); 68 #if IS_ENABLED(CONFIG_IPV6) 69 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 70 struct net_bridge_mcast_port *pmctx, 71 const struct in6_addr *group, 72 __u16 vid, const unsigned char *src); 73 #endif 74 static struct net_bridge_port_group * 75 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 76 struct net_bridge_mcast_port *pmctx, 77 struct br_ip *group, 78 const unsigned char *src, 79 u8 filter_mode, 80 bool igmpv2_mldv1, 81 bool blocked); 82 static void br_multicast_find_del_pg(struct net_bridge *br, 83 struct net_bridge_port_group *pg); 84 static void __br_multicast_stop(struct net_bridge_mcast *brmctx); 85 86 static int br_mc_disabled_update(struct net_device *dev, bool value, 87 struct netlink_ext_ack *extack); 88 89 static struct net_bridge_port_group * 90 br_sg_port_find(struct net_bridge *br, 91 struct net_bridge_port_group_sg_key *sg_p) 92 { 93 lockdep_assert_held_once(&br->multicast_lock); 94 95 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 96 br_sg_port_rht_params); 97 } 98 99 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 100 struct br_ip *dst) 101 { 102 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 103 } 104 105 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 106 struct br_ip *dst) 107 { 108 struct net_bridge_mdb_entry *ent; 109 110 lockdep_assert_held_once(&br->multicast_lock); 111 112 rcu_read_lock(); 113 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 114 rcu_read_unlock(); 115 116 return ent; 117 } 118 119 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 120 __be32 dst, __u16 vid) 121 { 122 struct br_ip br_dst; 123 124 memset(&br_dst, 0, sizeof(br_dst)); 125 br_dst.dst.ip4 = dst; 126 br_dst.proto = htons(ETH_P_IP); 127 br_dst.vid = vid; 128 129 return br_mdb_ip_get(br, &br_dst); 130 } 131 132 #if IS_ENABLED(CONFIG_IPV6) 133 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 134 const struct in6_addr *dst, 135 __u16 vid) 136 { 137 struct br_ip br_dst; 138 139 memset(&br_dst, 0, sizeof(br_dst)); 140 br_dst.dst.ip6 = *dst; 141 br_dst.proto = htons(ETH_P_IPV6); 142 br_dst.vid = vid; 143 144 return br_mdb_ip_get(br, &br_dst); 145 } 146 #endif 147 148 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, 149 struct sk_buff *skb, u16 vid) 150 { 151 struct net_bridge *br = brmctx->br; 152 struct br_ip ip; 153 154 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 155 br_multicast_ctx_vlan_global_disabled(brmctx)) 156 return NULL; 157 158 if (BR_INPUT_SKB_CB(skb)->igmp) 159 return NULL; 160 161 memset(&ip, 0, sizeof(ip)); 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.dst.ip4 = ip_hdr(skb)->daddr; 168 if (brmctx->multicast_igmp_version == 3) { 169 struct net_bridge_mdb_entry *mdb; 170 171 ip.src.ip4 = ip_hdr(skb)->saddr; 172 mdb = br_mdb_ip_get_rcu(br, &ip); 173 if (mdb) 174 return mdb; 175 ip.src.ip4 = 0; 176 } 177 break; 178 #if IS_ENABLED(CONFIG_IPV6) 179 case htons(ETH_P_IPV6): 180 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 181 if (brmctx->multicast_mld_version == 2) { 182 struct net_bridge_mdb_entry *mdb; 183 184 ip.src.ip6 = ipv6_hdr(skb)->saddr; 185 mdb = br_mdb_ip_get_rcu(br, &ip); 186 if (mdb) 187 return mdb; 188 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 189 } 190 break; 191 #endif 192 default: 193 ip.proto = 0; 194 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 195 } 196 197 return br_mdb_ip_get_rcu(br, &ip); 198 } 199 200 /* IMPORTANT: this function must be used only when the contexts cannot be 201 * passed down (e.g. timer) and must be used for read-only purposes because 202 * the vlan snooping option can change, so it can return any context 203 * (non-vlan or vlan). Its initial intended purpose is to read timer values 204 * from the *current* context based on the option. At worst that could lead 205 * to inconsistent timers when the contexts are changed, i.e. src timer 206 * which needs to re-arm with a specific delay taken from the old context 207 */ 208 static struct net_bridge_mcast_port * 209 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) 210 { 211 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; 212 struct net_bridge_vlan *vlan; 213 214 lockdep_assert_held_once(&pg->key.port->br->multicast_lock); 215 216 /* if vlan snooping is disabled use the port's multicast context */ 217 if (!pg->key.addr.vid || 218 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 219 goto out; 220 221 /* locking is tricky here, due to different rules for multicast and 222 * vlans we need to take rcu to find the vlan and make sure it has 223 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under 224 * multicast_lock which must be already held here, so the vlan's pmctx 225 * can safely be used on return 226 */ 227 rcu_read_lock(); 228 vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid); 229 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 230 pmctx = &vlan->port_mcast_ctx; 231 else 232 pmctx = NULL; 233 rcu_read_unlock(); 234 out: 235 return pmctx; 236 } 237 238 static struct net_bridge_mcast_port * 239 br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid) 240 { 241 struct net_bridge_mcast_port *pmctx = NULL; 242 struct net_bridge_vlan *vlan; 243 244 lockdep_assert_held_once(&port->br->multicast_lock); 245 246 if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 247 return NULL; 248 249 /* Take RCU to access the vlan. */ 250 rcu_read_lock(); 251 252 vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid); 253 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 254 pmctx = &vlan->port_mcast_ctx; 255 256 rcu_read_unlock(); 257 258 return pmctx; 259 } 260 261 /* when snooping we need to check if the contexts should be used 262 * in the following order: 263 * - if pmctx is non-NULL (port), check if it should be used 264 * - if pmctx is NULL (bridge), check if brmctx should be used 265 */ 266 static bool 267 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx, 268 const struct net_bridge_mcast_port *pmctx) 269 { 270 if (!netif_running(brmctx->br->dev)) 271 return false; 272 273 if (pmctx) 274 return !br_multicast_port_ctx_state_disabled(pmctx); 275 else 276 return !br_multicast_ctx_vlan_disabled(brmctx); 277 } 278 279 static bool br_port_group_equal(struct net_bridge_port_group *p, 280 struct net_bridge_port *port, 281 const unsigned char *src) 282 { 283 if (p->key.port != port) 284 return false; 285 286 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 287 return true; 288 289 return ether_addr_equal(src, p->eth_addr); 290 } 291 292 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, 293 struct net_bridge_port_group *pg, 294 struct br_ip *sg_ip) 295 { 296 struct net_bridge_port_group_sg_key sg_key; 297 struct net_bridge_port_group *src_pg; 298 struct net_bridge_mcast *brmctx; 299 300 memset(&sg_key, 0, sizeof(sg_key)); 301 brmctx = br_multicast_port_ctx_get_global(pmctx); 302 sg_key.port = pg->key.port; 303 sg_key.addr = *sg_ip; 304 if (br_sg_port_find(brmctx->br, &sg_key)) 305 return; 306 307 src_pg = __br_multicast_add_group(brmctx, pmctx, 308 sg_ip, pg->eth_addr, 309 MCAST_INCLUDE, false, false); 310 if (IS_ERR_OR_NULL(src_pg) || 311 src_pg->rt_protocol != RTPROT_KERNEL) 312 return; 313 314 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 315 } 316 317 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 318 struct br_ip *sg_ip) 319 { 320 struct net_bridge_port_group_sg_key sg_key; 321 struct net_bridge *br = pg->key.port->br; 322 struct net_bridge_port_group *src_pg; 323 324 memset(&sg_key, 0, sizeof(sg_key)); 325 sg_key.port = pg->key.port; 326 sg_key.addr = *sg_ip; 327 src_pg = br_sg_port_find(br, &sg_key); 328 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 329 src_pg->rt_protocol != RTPROT_KERNEL) 330 return; 331 332 br_multicast_find_del_pg(br, src_pg); 333 } 334 335 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 336 * to all other ports' S,G entries which are not blocked by the current group 337 * for proper replication, the assumption is that any S,G blocked entries 338 * are already added so the S,G,port lookup should skip them. 339 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 340 * deleted we need to remove it from all ports' S,G entries where it was 341 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 342 */ 343 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 344 u8 filter_mode) 345 { 346 struct net_bridge *br = pg->key.port->br; 347 struct net_bridge_port_group *pg_lst; 348 struct net_bridge_mcast_port *pmctx; 349 struct net_bridge_mdb_entry *mp; 350 struct br_ip sg_ip; 351 352 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 353 return; 354 355 mp = br_mdb_ip_get(br, &pg->key.addr); 356 if (!mp) 357 return; 358 pmctx = br_multicast_pg_to_port_ctx(pg); 359 if (!pmctx) 360 return; 361 362 memset(&sg_ip, 0, sizeof(sg_ip)); 363 sg_ip = pg->key.addr; 364 365 for (pg_lst = mlock_dereference(mp->ports, br); 366 pg_lst; 367 pg_lst = mlock_dereference(pg_lst->next, br)) { 368 struct net_bridge_group_src *src_ent; 369 370 if (pg_lst == pg) 371 continue; 372 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 373 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 374 continue; 375 sg_ip.src = src_ent->addr.src; 376 switch (filter_mode) { 377 case MCAST_INCLUDE: 378 __fwd_del_star_excl(pg, &sg_ip); 379 break; 380 case MCAST_EXCLUDE: 381 __fwd_add_star_excl(pmctx, pg, &sg_ip); 382 break; 383 } 384 } 385 } 386 } 387 388 /* called when adding a new S,G with host_joined == false by default */ 389 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 390 struct net_bridge_port_group *sg) 391 { 392 struct net_bridge_mdb_entry *sg_mp; 393 394 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 395 return; 396 if (!star_mp->host_joined) 397 return; 398 399 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 400 if (!sg_mp) 401 return; 402 sg_mp->host_joined = true; 403 } 404 405 /* set the host_joined state of all of *,G's S,G entries */ 406 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 407 { 408 struct net_bridge *br = star_mp->br; 409 struct net_bridge_mdb_entry *sg_mp; 410 struct net_bridge_port_group *pg; 411 struct br_ip sg_ip; 412 413 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 414 return; 415 416 memset(&sg_ip, 0, sizeof(sg_ip)); 417 sg_ip = star_mp->addr; 418 for (pg = mlock_dereference(star_mp->ports, br); 419 pg; 420 pg = mlock_dereference(pg->next, br)) { 421 struct net_bridge_group_src *src_ent; 422 423 hlist_for_each_entry(src_ent, &pg->src_list, node) { 424 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 425 continue; 426 sg_ip.src = src_ent->addr.src; 427 sg_mp = br_mdb_ip_get(br, &sg_ip); 428 if (!sg_mp) 429 continue; 430 sg_mp->host_joined = star_mp->host_joined; 431 } 432 } 433 } 434 435 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 436 { 437 struct net_bridge_port_group __rcu **pp; 438 struct net_bridge_port_group *p; 439 440 /* *,G exclude ports are only added to S,G entries */ 441 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 442 return; 443 444 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 445 * we should ignore perm entries since they're managed by user-space 446 */ 447 for (pp = &sgmp->ports; 448 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 449 pp = &p->next) 450 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 451 MDB_PG_FLAGS_PERMANENT))) 452 return; 453 454 /* currently the host can only have joined the *,G which means 455 * we treat it as EXCLUDE {}, so for an S,G it's considered a 456 * STAR_EXCLUDE entry and we can safely leave it 457 */ 458 sgmp->host_joined = false; 459 460 for (pp = &sgmp->ports; 461 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 462 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 463 br_multicast_del_pg(sgmp, p, pp); 464 else 465 pp = &p->next; 466 } 467 } 468 469 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 470 struct net_bridge_port_group *sg) 471 { 472 struct net_bridge_port_group_sg_key sg_key; 473 struct net_bridge *br = star_mp->br; 474 struct net_bridge_mcast_port *pmctx; 475 struct net_bridge_port_group *pg; 476 struct net_bridge_mcast *brmctx; 477 478 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 479 return; 480 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 481 return; 482 483 br_multicast_sg_host_state(star_mp, sg); 484 memset(&sg_key, 0, sizeof(sg_key)); 485 sg_key.addr = sg->key.addr; 486 /* we need to add all exclude ports to the S,G */ 487 for (pg = mlock_dereference(star_mp->ports, br); 488 pg; 489 pg = mlock_dereference(pg->next, br)) { 490 struct net_bridge_port_group *src_pg; 491 492 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 493 continue; 494 495 sg_key.port = pg->key.port; 496 if (br_sg_port_find(br, &sg_key)) 497 continue; 498 499 pmctx = br_multicast_pg_to_port_ctx(pg); 500 if (!pmctx) 501 continue; 502 brmctx = br_multicast_port_ctx_get_global(pmctx); 503 504 src_pg = __br_multicast_add_group(brmctx, pmctx, 505 &sg->key.addr, 506 sg->eth_addr, 507 MCAST_INCLUDE, false, false); 508 if (IS_ERR_OR_NULL(src_pg) || 509 src_pg->rt_protocol != RTPROT_KERNEL) 510 continue; 511 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 512 } 513 } 514 515 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 516 { 517 struct net_bridge_mdb_entry *star_mp; 518 struct net_bridge_mcast_port *pmctx; 519 struct net_bridge_port_group *sg; 520 struct net_bridge_mcast *brmctx; 521 struct br_ip sg_ip; 522 523 if (src->flags & BR_SGRP_F_INSTALLED) 524 return; 525 526 memset(&sg_ip, 0, sizeof(sg_ip)); 527 pmctx = br_multicast_pg_to_port_ctx(src->pg); 528 if (!pmctx) 529 return; 530 brmctx = br_multicast_port_ctx_get_global(pmctx); 531 sg_ip = src->pg->key.addr; 532 sg_ip.src = src->addr.src; 533 534 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip, 535 src->pg->eth_addr, MCAST_INCLUDE, false, 536 !timer_pending(&src->timer)); 537 if (IS_ERR_OR_NULL(sg)) 538 return; 539 src->flags |= BR_SGRP_F_INSTALLED; 540 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 541 542 /* if it was added by user-space as perm we can skip next steps */ 543 if (sg->rt_protocol != RTPROT_KERNEL && 544 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 545 return; 546 547 /* the kernel is now responsible for removing this S,G */ 548 del_timer(&sg->timer); 549 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 550 if (!star_mp) 551 return; 552 553 br_multicast_sg_add_exclude_ports(star_mp, sg); 554 } 555 556 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 557 bool fastleave) 558 { 559 struct net_bridge_port_group *p, *pg = src->pg; 560 struct net_bridge_port_group __rcu **pp; 561 struct net_bridge_mdb_entry *mp; 562 struct br_ip sg_ip; 563 564 memset(&sg_ip, 0, sizeof(sg_ip)); 565 sg_ip = pg->key.addr; 566 sg_ip.src = src->addr.src; 567 568 mp = br_mdb_ip_get(src->br, &sg_ip); 569 if (!mp) 570 return; 571 572 for (pp = &mp->ports; 573 (p = mlock_dereference(*pp, src->br)) != NULL; 574 pp = &p->next) { 575 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 576 continue; 577 578 if (p->rt_protocol != RTPROT_KERNEL && 579 (p->flags & MDB_PG_FLAGS_PERMANENT) && 580 !(src->flags & BR_SGRP_F_USER_ADDED)) 581 break; 582 583 if (fastleave) 584 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 585 br_multicast_del_pg(mp, p, pp); 586 break; 587 } 588 src->flags &= ~BR_SGRP_F_INSTALLED; 589 } 590 591 /* install S,G and based on src's timer enable or disable forwarding */ 592 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 593 { 594 struct net_bridge_port_group_sg_key sg_key; 595 struct net_bridge_port_group *sg; 596 u8 old_flags; 597 598 br_multicast_fwd_src_add(src); 599 600 memset(&sg_key, 0, sizeof(sg_key)); 601 sg_key.addr = src->pg->key.addr; 602 sg_key.addr.src = src->addr.src; 603 sg_key.port = src->pg->key.port; 604 605 sg = br_sg_port_find(src->br, &sg_key); 606 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 607 return; 608 609 old_flags = sg->flags; 610 if (timer_pending(&src->timer)) 611 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 612 else 613 sg->flags |= MDB_PG_FLAGS_BLOCKED; 614 615 if (old_flags != sg->flags) { 616 struct net_bridge_mdb_entry *sg_mp; 617 618 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 619 if (!sg_mp) 620 return; 621 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 622 } 623 } 624 625 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 626 { 627 struct net_bridge_mdb_entry *mp; 628 629 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 630 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 631 WARN_ON(mp->ports); 632 633 timer_shutdown_sync(&mp->timer); 634 kfree_rcu(mp, rcu); 635 } 636 637 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 638 { 639 struct net_bridge *br = mp->br; 640 641 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 642 br_mdb_rht_params); 643 hlist_del_init_rcu(&mp->mdb_node); 644 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 645 queue_work(system_long_wq, &br->mcast_gc_work); 646 } 647 648 static void br_multicast_group_expired(struct timer_list *t) 649 { 650 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 651 struct net_bridge *br = mp->br; 652 653 spin_lock(&br->multicast_lock); 654 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 655 timer_pending(&mp->timer)) 656 goto out; 657 658 br_multicast_host_leave(mp, true); 659 660 if (mp->ports) 661 goto out; 662 br_multicast_del_mdb_entry(mp); 663 out: 664 spin_unlock(&br->multicast_lock); 665 } 666 667 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 668 { 669 struct net_bridge_group_src *src; 670 671 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 672 WARN_ON(!hlist_unhashed(&src->node)); 673 674 timer_shutdown_sync(&src->timer); 675 kfree_rcu(src, rcu); 676 } 677 678 void __br_multicast_del_group_src(struct net_bridge_group_src *src) 679 { 680 struct net_bridge *br = src->pg->key.port->br; 681 682 hlist_del_init_rcu(&src->node); 683 src->pg->src_ents--; 684 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 685 queue_work(system_long_wq, &br->mcast_gc_work); 686 } 687 688 void br_multicast_del_group_src(struct net_bridge_group_src *src, 689 bool fastleave) 690 { 691 br_multicast_fwd_src_remove(src, fastleave); 692 __br_multicast_del_group_src(src); 693 } 694 695 static int 696 br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx, 697 struct netlink_ext_ack *extack, 698 const char *what) 699 { 700 u32 max = READ_ONCE(pmctx->mdb_max_entries); 701 u32 n = READ_ONCE(pmctx->mdb_n_entries); 702 703 if (max && n >= max) { 704 NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u", 705 what, n, max); 706 return -E2BIG; 707 } 708 709 WRITE_ONCE(pmctx->mdb_n_entries, n + 1); 710 return 0; 711 } 712 713 static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx) 714 { 715 u32 n = READ_ONCE(pmctx->mdb_n_entries); 716 717 WARN_ON_ONCE(n == 0); 718 WRITE_ONCE(pmctx->mdb_n_entries, n - 1); 719 } 720 721 static int br_multicast_port_ngroups_inc(struct net_bridge_port *port, 722 const struct br_ip *group, 723 struct netlink_ext_ack *extack) 724 { 725 struct net_bridge_mcast_port *pmctx; 726 int err; 727 728 lockdep_assert_held_once(&port->br->multicast_lock); 729 730 /* Always count on the port context. */ 731 err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack, 732 "Port"); 733 if (err) { 734 trace_br_mdb_full(port->dev, group); 735 return err; 736 } 737 738 /* Only count on the VLAN context if VID is given, and if snooping on 739 * that VLAN is enabled. 740 */ 741 if (!group->vid) 742 return 0; 743 744 pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid); 745 if (!pmctx) 746 return 0; 747 748 err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN"); 749 if (err) { 750 trace_br_mdb_full(port->dev, group); 751 goto dec_one_out; 752 } 753 754 return 0; 755 756 dec_one_out: 757 br_multicast_port_ngroups_dec_one(&port->multicast_ctx); 758 return err; 759 } 760 761 static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid) 762 { 763 struct net_bridge_mcast_port *pmctx; 764 765 lockdep_assert_held_once(&port->br->multicast_lock); 766 767 if (vid) { 768 pmctx = br_multicast_port_vid_to_port_ctx(port, vid); 769 if (pmctx) 770 br_multicast_port_ngroups_dec_one(pmctx); 771 } 772 br_multicast_port_ngroups_dec_one(&port->multicast_ctx); 773 } 774 775 u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx) 776 { 777 return READ_ONCE(pmctx->mdb_n_entries); 778 } 779 780 void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max) 781 { 782 WRITE_ONCE(pmctx->mdb_max_entries, max); 783 } 784 785 u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx) 786 { 787 return READ_ONCE(pmctx->mdb_max_entries); 788 } 789 790 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 791 { 792 struct net_bridge_port_group *pg; 793 794 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 795 WARN_ON(!hlist_unhashed(&pg->mglist)); 796 WARN_ON(!hlist_empty(&pg->src_list)); 797 798 timer_shutdown_sync(&pg->rexmit_timer); 799 timer_shutdown_sync(&pg->timer); 800 kfree_rcu(pg, rcu); 801 } 802 803 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 804 struct net_bridge_port_group *pg, 805 struct net_bridge_port_group __rcu **pp) 806 { 807 struct net_bridge *br = pg->key.port->br; 808 struct net_bridge_group_src *ent; 809 struct hlist_node *tmp; 810 811 rcu_assign_pointer(*pp, pg->next); 812 hlist_del_init(&pg->mglist); 813 br_multicast_eht_clean_sets(pg); 814 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 815 br_multicast_del_group_src(ent, false); 816 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 817 if (!br_multicast_is_star_g(&mp->addr)) { 818 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 819 br_sg_port_rht_params); 820 br_multicast_sg_del_exclude_ports(mp); 821 } else { 822 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 823 } 824 br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid); 825 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 826 queue_work(system_long_wq, &br->mcast_gc_work); 827 828 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 829 mod_timer(&mp->timer, jiffies); 830 } 831 832 static void br_multicast_find_del_pg(struct net_bridge *br, 833 struct net_bridge_port_group *pg) 834 { 835 struct net_bridge_port_group __rcu **pp; 836 struct net_bridge_mdb_entry *mp; 837 struct net_bridge_port_group *p; 838 839 mp = br_mdb_ip_get(br, &pg->key.addr); 840 if (WARN_ON(!mp)) 841 return; 842 843 for (pp = &mp->ports; 844 (p = mlock_dereference(*pp, br)) != NULL; 845 pp = &p->next) { 846 if (p != pg) 847 continue; 848 849 br_multicast_del_pg(mp, pg, pp); 850 return; 851 } 852 853 WARN_ON(1); 854 } 855 856 static void br_multicast_port_group_expired(struct timer_list *t) 857 { 858 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 859 struct net_bridge_group_src *src_ent; 860 struct net_bridge *br = pg->key.port->br; 861 struct hlist_node *tmp; 862 bool changed; 863 864 spin_lock(&br->multicast_lock); 865 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 866 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 867 goto out; 868 869 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 870 pg->filter_mode = MCAST_INCLUDE; 871 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 872 if (!timer_pending(&src_ent->timer)) { 873 br_multicast_del_group_src(src_ent, false); 874 changed = true; 875 } 876 } 877 878 if (hlist_empty(&pg->src_list)) { 879 br_multicast_find_del_pg(br, pg); 880 } else if (changed) { 881 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 882 883 if (changed && br_multicast_is_star_g(&pg->key.addr)) 884 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 885 886 if (WARN_ON(!mp)) 887 goto out; 888 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 889 } 890 out: 891 spin_unlock(&br->multicast_lock); 892 } 893 894 static void br_multicast_gc(struct hlist_head *head) 895 { 896 struct net_bridge_mcast_gc *gcent; 897 struct hlist_node *tmp; 898 899 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 900 hlist_del_init(&gcent->gc_node); 901 gcent->destroy(gcent); 902 } 903 } 904 905 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx, 906 struct net_bridge_mcast_port *pmctx, 907 struct sk_buff *skb) 908 { 909 struct net_bridge_vlan *vlan = NULL; 910 911 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx)) 912 vlan = pmctx->vlan; 913 else if (br_multicast_ctx_is_vlan(brmctx)) 914 vlan = brmctx->vlan; 915 916 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 917 u16 vlan_proto; 918 919 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0) 920 return; 921 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid); 922 } 923 } 924 925 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, 926 struct net_bridge_mcast_port *pmctx, 927 struct net_bridge_port_group *pg, 928 __be32 ip_dst, __be32 group, 929 bool with_srcs, bool over_lmqt, 930 u8 sflag, u8 *igmp_type, 931 bool *need_rexmit) 932 { 933 struct net_bridge_port *p = pg ? pg->key.port : NULL; 934 struct net_bridge_group_src *ent; 935 size_t pkt_size, igmp_hdr_size; 936 unsigned long now = jiffies; 937 struct igmpv3_query *ihv3; 938 void *csum_start = NULL; 939 __sum16 *csum = NULL; 940 struct sk_buff *skb; 941 struct igmphdr *ih; 942 struct ethhdr *eth; 943 unsigned long lmqt; 944 struct iphdr *iph; 945 u16 lmqt_srcs = 0; 946 947 igmp_hdr_size = sizeof(*ih); 948 if (brmctx->multicast_igmp_version == 3) { 949 igmp_hdr_size = sizeof(*ihv3); 950 if (pg && with_srcs) { 951 lmqt = now + (brmctx->multicast_last_member_interval * 952 brmctx->multicast_last_member_count); 953 hlist_for_each_entry(ent, &pg->src_list, node) { 954 if (over_lmqt == time_after(ent->timer.expires, 955 lmqt) && 956 ent->src_query_rexmit_cnt > 0) 957 lmqt_srcs++; 958 } 959 960 if (!lmqt_srcs) 961 return NULL; 962 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 963 } 964 } 965 966 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 967 if ((p && pkt_size > p->dev->mtu) || 968 pkt_size > brmctx->br->dev->mtu) 969 return NULL; 970 971 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 972 if (!skb) 973 goto out; 974 975 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 976 skb->protocol = htons(ETH_P_IP); 977 978 skb_reset_mac_header(skb); 979 eth = eth_hdr(skb); 980 981 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 982 ip_eth_mc_map(ip_dst, eth->h_dest); 983 eth->h_proto = htons(ETH_P_IP); 984 skb_put(skb, sizeof(*eth)); 985 986 skb_set_network_header(skb, skb->len); 987 iph = ip_hdr(skb); 988 iph->tot_len = htons(pkt_size - sizeof(*eth)); 989 990 iph->version = 4; 991 iph->ihl = 6; 992 iph->tos = 0xc0; 993 iph->id = 0; 994 iph->frag_off = htons(IP_DF); 995 iph->ttl = 1; 996 iph->protocol = IPPROTO_IGMP; 997 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 998 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0; 999 iph->daddr = ip_dst; 1000 ((u8 *)&iph[1])[0] = IPOPT_RA; 1001 ((u8 *)&iph[1])[1] = 4; 1002 ((u8 *)&iph[1])[2] = 0; 1003 ((u8 *)&iph[1])[3] = 0; 1004 ip_send_check(iph); 1005 skb_put(skb, 24); 1006 1007 skb_set_transport_header(skb, skb->len); 1008 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 1009 1010 switch (brmctx->multicast_igmp_version) { 1011 case 2: 1012 ih = igmp_hdr(skb); 1013 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 1014 ih->code = (group ? brmctx->multicast_last_member_interval : 1015 brmctx->multicast_query_response_interval) / 1016 (HZ / IGMP_TIMER_SCALE); 1017 ih->group = group; 1018 ih->csum = 0; 1019 csum = &ih->csum; 1020 csum_start = (void *)ih; 1021 break; 1022 case 3: 1023 ihv3 = igmpv3_query_hdr(skb); 1024 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 1025 ihv3->code = (group ? brmctx->multicast_last_member_interval : 1026 brmctx->multicast_query_response_interval) / 1027 (HZ / IGMP_TIMER_SCALE); 1028 ihv3->group = group; 1029 ihv3->qqic = brmctx->multicast_query_interval / HZ; 1030 ihv3->nsrcs = htons(lmqt_srcs); 1031 ihv3->resv = 0; 1032 ihv3->suppress = sflag; 1033 ihv3->qrv = 2; 1034 ihv3->csum = 0; 1035 csum = &ihv3->csum; 1036 csum_start = (void *)ihv3; 1037 if (!pg || !with_srcs) 1038 break; 1039 1040 lmqt_srcs = 0; 1041 hlist_for_each_entry(ent, &pg->src_list, node) { 1042 if (over_lmqt == time_after(ent->timer.expires, 1043 lmqt) && 1044 ent->src_query_rexmit_cnt > 0) { 1045 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 1046 ent->src_query_rexmit_cnt--; 1047 if (need_rexmit && ent->src_query_rexmit_cnt) 1048 *need_rexmit = true; 1049 } 1050 } 1051 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 1052 kfree_skb(skb); 1053 return NULL; 1054 } 1055 break; 1056 } 1057 1058 if (WARN_ON(!csum || !csum_start)) { 1059 kfree_skb(skb); 1060 return NULL; 1061 } 1062 1063 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 1064 skb_put(skb, igmp_hdr_size); 1065 __skb_pull(skb, sizeof(*eth)); 1066 1067 out: 1068 return skb; 1069 } 1070 1071 #if IS_ENABLED(CONFIG_IPV6) 1072 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1073 struct net_bridge_mcast_port *pmctx, 1074 struct net_bridge_port_group *pg, 1075 const struct in6_addr *ip6_dst, 1076 const struct in6_addr *group, 1077 bool with_srcs, bool over_llqt, 1078 u8 sflag, u8 *igmp_type, 1079 bool *need_rexmit) 1080 { 1081 struct net_bridge_port *p = pg ? pg->key.port : NULL; 1082 struct net_bridge_group_src *ent; 1083 size_t pkt_size, mld_hdr_size; 1084 unsigned long now = jiffies; 1085 struct mld2_query *mld2q; 1086 void *csum_start = NULL; 1087 unsigned long interval; 1088 __sum16 *csum = NULL; 1089 struct ipv6hdr *ip6h; 1090 struct mld_msg *mldq; 1091 struct sk_buff *skb; 1092 unsigned long llqt; 1093 struct ethhdr *eth; 1094 u16 llqt_srcs = 0; 1095 u8 *hopopt; 1096 1097 mld_hdr_size = sizeof(*mldq); 1098 if (brmctx->multicast_mld_version == 2) { 1099 mld_hdr_size = sizeof(*mld2q); 1100 if (pg && with_srcs) { 1101 llqt = now + (brmctx->multicast_last_member_interval * 1102 brmctx->multicast_last_member_count); 1103 hlist_for_each_entry(ent, &pg->src_list, node) { 1104 if (over_llqt == time_after(ent->timer.expires, 1105 llqt) && 1106 ent->src_query_rexmit_cnt > 0) 1107 llqt_srcs++; 1108 } 1109 1110 if (!llqt_srcs) 1111 return NULL; 1112 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 1113 } 1114 } 1115 1116 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 1117 if ((p && pkt_size > p->dev->mtu) || 1118 pkt_size > brmctx->br->dev->mtu) 1119 return NULL; 1120 1121 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 1122 if (!skb) 1123 goto out; 1124 1125 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 1126 skb->protocol = htons(ETH_P_IPV6); 1127 1128 /* Ethernet header */ 1129 skb_reset_mac_header(skb); 1130 eth = eth_hdr(skb); 1131 1132 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 1133 eth->h_proto = htons(ETH_P_IPV6); 1134 skb_put(skb, sizeof(*eth)); 1135 1136 /* IPv6 header + HbH option */ 1137 skb_set_network_header(skb, skb->len); 1138 ip6h = ipv6_hdr(skb); 1139 1140 *(__force __be32 *)ip6h = htonl(0x60000000); 1141 ip6h->payload_len = htons(8 + mld_hdr_size); 1142 ip6h->nexthdr = IPPROTO_HOPOPTS; 1143 ip6h->hop_limit = 1; 1144 ip6h->daddr = *ip6_dst; 1145 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev, 1146 &ip6h->daddr, 0, &ip6h->saddr)) { 1147 kfree_skb(skb); 1148 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false); 1149 return NULL; 1150 } 1151 1152 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true); 1153 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 1154 1155 hopopt = (u8 *)(ip6h + 1); 1156 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 1157 hopopt[1] = 0; /* length of HbH */ 1158 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 1159 hopopt[3] = 2; /* Length of RA Option */ 1160 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 1161 hopopt[5] = 0; 1162 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 1163 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 1164 1165 skb_put(skb, sizeof(*ip6h) + 8); 1166 1167 /* ICMPv6 */ 1168 skb_set_transport_header(skb, skb->len); 1169 interval = ipv6_addr_any(group) ? 1170 brmctx->multicast_query_response_interval : 1171 brmctx->multicast_last_member_interval; 1172 *igmp_type = ICMPV6_MGM_QUERY; 1173 switch (brmctx->multicast_mld_version) { 1174 case 1: 1175 mldq = (struct mld_msg *)icmp6_hdr(skb); 1176 mldq->mld_type = ICMPV6_MGM_QUERY; 1177 mldq->mld_code = 0; 1178 mldq->mld_cksum = 0; 1179 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 1180 mldq->mld_reserved = 0; 1181 mldq->mld_mca = *group; 1182 csum = &mldq->mld_cksum; 1183 csum_start = (void *)mldq; 1184 break; 1185 case 2: 1186 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1187 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 1188 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 1189 mld2q->mld2q_code = 0; 1190 mld2q->mld2q_cksum = 0; 1191 mld2q->mld2q_resv1 = 0; 1192 mld2q->mld2q_resv2 = 0; 1193 mld2q->mld2q_suppress = sflag; 1194 mld2q->mld2q_qrv = 2; 1195 mld2q->mld2q_nsrcs = htons(llqt_srcs); 1196 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; 1197 mld2q->mld2q_mca = *group; 1198 csum = &mld2q->mld2q_cksum; 1199 csum_start = (void *)mld2q; 1200 if (!pg || !with_srcs) 1201 break; 1202 1203 llqt_srcs = 0; 1204 hlist_for_each_entry(ent, &pg->src_list, node) { 1205 if (over_llqt == time_after(ent->timer.expires, 1206 llqt) && 1207 ent->src_query_rexmit_cnt > 0) { 1208 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 1209 ent->src_query_rexmit_cnt--; 1210 if (need_rexmit && ent->src_query_rexmit_cnt) 1211 *need_rexmit = true; 1212 } 1213 } 1214 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 1215 kfree_skb(skb); 1216 return NULL; 1217 } 1218 break; 1219 } 1220 1221 if (WARN_ON(!csum || !csum_start)) { 1222 kfree_skb(skb); 1223 return NULL; 1224 } 1225 1226 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 1227 IPPROTO_ICMPV6, 1228 csum_partial(csum_start, mld_hdr_size, 0)); 1229 skb_put(skb, mld_hdr_size); 1230 __skb_pull(skb, sizeof(*eth)); 1231 1232 out: 1233 return skb; 1234 } 1235 #endif 1236 1237 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1238 struct net_bridge_mcast_port *pmctx, 1239 struct net_bridge_port_group *pg, 1240 struct br_ip *ip_dst, 1241 struct br_ip *group, 1242 bool with_srcs, bool over_lmqt, 1243 u8 sflag, u8 *igmp_type, 1244 bool *need_rexmit) 1245 { 1246 __be32 ip4_dst; 1247 1248 switch (group->proto) { 1249 case htons(ETH_P_IP): 1250 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1251 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg, 1252 ip4_dst, group->dst.ip4, 1253 with_srcs, over_lmqt, 1254 sflag, igmp_type, 1255 need_rexmit); 1256 #if IS_ENABLED(CONFIG_IPV6) 1257 case htons(ETH_P_IPV6): { 1258 struct in6_addr ip6_dst; 1259 1260 if (ip_dst) 1261 ip6_dst = ip_dst->dst.ip6; 1262 else 1263 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1264 htonl(1)); 1265 1266 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg, 1267 &ip6_dst, &group->dst.ip6, 1268 with_srcs, over_lmqt, 1269 sflag, igmp_type, 1270 need_rexmit); 1271 } 1272 #endif 1273 } 1274 return NULL; 1275 } 1276 1277 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1278 struct br_ip *group) 1279 { 1280 struct net_bridge_mdb_entry *mp; 1281 int err; 1282 1283 mp = br_mdb_ip_get(br, group); 1284 if (mp) 1285 return mp; 1286 1287 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1288 trace_br_mdb_full(br->dev, group); 1289 br_mc_disabled_update(br->dev, false, NULL); 1290 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1291 return ERR_PTR(-E2BIG); 1292 } 1293 1294 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1295 if (unlikely(!mp)) 1296 return ERR_PTR(-ENOMEM); 1297 1298 mp->br = br; 1299 mp->addr = *group; 1300 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1301 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1302 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1303 br_mdb_rht_params); 1304 if (err) { 1305 kfree(mp); 1306 mp = ERR_PTR(err); 1307 } else { 1308 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1309 } 1310 1311 return mp; 1312 } 1313 1314 static void br_multicast_group_src_expired(struct timer_list *t) 1315 { 1316 struct net_bridge_group_src *src = from_timer(src, t, timer); 1317 struct net_bridge_port_group *pg; 1318 struct net_bridge *br = src->br; 1319 1320 spin_lock(&br->multicast_lock); 1321 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1322 timer_pending(&src->timer)) 1323 goto out; 1324 1325 pg = src->pg; 1326 if (pg->filter_mode == MCAST_INCLUDE) { 1327 br_multicast_del_group_src(src, false); 1328 if (!hlist_empty(&pg->src_list)) 1329 goto out; 1330 br_multicast_find_del_pg(br, pg); 1331 } else { 1332 br_multicast_fwd_src_handle(src); 1333 } 1334 1335 out: 1336 spin_unlock(&br->multicast_lock); 1337 } 1338 1339 struct net_bridge_group_src * 1340 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1341 { 1342 struct net_bridge_group_src *ent; 1343 1344 switch (ip->proto) { 1345 case htons(ETH_P_IP): 1346 hlist_for_each_entry(ent, &pg->src_list, node) 1347 if (ip->src.ip4 == ent->addr.src.ip4) 1348 return ent; 1349 break; 1350 #if IS_ENABLED(CONFIG_IPV6) 1351 case htons(ETH_P_IPV6): 1352 hlist_for_each_entry(ent, &pg->src_list, node) 1353 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1354 return ent; 1355 break; 1356 #endif 1357 } 1358 1359 return NULL; 1360 } 1361 1362 struct net_bridge_group_src * 1363 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1364 { 1365 struct net_bridge_group_src *grp_src; 1366 1367 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1368 return NULL; 1369 1370 switch (src_ip->proto) { 1371 case htons(ETH_P_IP): 1372 if (ipv4_is_zeronet(src_ip->src.ip4) || 1373 ipv4_is_multicast(src_ip->src.ip4)) 1374 return NULL; 1375 break; 1376 #if IS_ENABLED(CONFIG_IPV6) 1377 case htons(ETH_P_IPV6): 1378 if (ipv6_addr_any(&src_ip->src.ip6) || 1379 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1380 return NULL; 1381 break; 1382 #endif 1383 } 1384 1385 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1386 if (unlikely(!grp_src)) 1387 return NULL; 1388 1389 grp_src->pg = pg; 1390 grp_src->br = pg->key.port->br; 1391 grp_src->addr = *src_ip; 1392 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1393 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1394 1395 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1396 pg->src_ents++; 1397 1398 return grp_src; 1399 } 1400 1401 struct net_bridge_port_group *br_multicast_new_port_group( 1402 struct net_bridge_port *port, 1403 const struct br_ip *group, 1404 struct net_bridge_port_group __rcu *next, 1405 unsigned char flags, 1406 const unsigned char *src, 1407 u8 filter_mode, 1408 u8 rt_protocol, 1409 struct netlink_ext_ack *extack) 1410 { 1411 struct net_bridge_port_group *p; 1412 int err; 1413 1414 err = br_multicast_port_ngroups_inc(port, group, extack); 1415 if (err) 1416 return NULL; 1417 1418 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1419 if (unlikely(!p)) { 1420 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group"); 1421 goto dec_out; 1422 } 1423 1424 p->key.addr = *group; 1425 p->key.port = port; 1426 p->flags = flags; 1427 p->filter_mode = filter_mode; 1428 p->rt_protocol = rt_protocol; 1429 p->eht_host_tree = RB_ROOT; 1430 p->eht_set_tree = RB_ROOT; 1431 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1432 INIT_HLIST_HEAD(&p->src_list); 1433 1434 if (!br_multicast_is_star_g(group) && 1435 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1436 br_sg_port_rht_params)) { 1437 NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group"); 1438 goto free_out; 1439 } 1440 1441 rcu_assign_pointer(p->next, next); 1442 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1443 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1444 hlist_add_head(&p->mglist, &port->mglist); 1445 1446 if (src) 1447 memcpy(p->eth_addr, src, ETH_ALEN); 1448 else 1449 eth_broadcast_addr(p->eth_addr); 1450 1451 return p; 1452 1453 free_out: 1454 kfree(p); 1455 dec_out: 1456 br_multicast_port_ngroups_dec(port, group->vid); 1457 return NULL; 1458 } 1459 1460 void br_multicast_del_port_group(struct net_bridge_port_group *p) 1461 { 1462 struct net_bridge_port *port = p->key.port; 1463 __u16 vid = p->key.addr.vid; 1464 1465 hlist_del_init(&p->mglist); 1466 if (!br_multicast_is_star_g(&p->key.addr)) 1467 rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode, 1468 br_sg_port_rht_params); 1469 kfree(p); 1470 br_multicast_port_ngroups_dec(port, vid); 1471 } 1472 1473 void br_multicast_host_join(const struct net_bridge_mcast *brmctx, 1474 struct net_bridge_mdb_entry *mp, bool notify) 1475 { 1476 if (!mp->host_joined) { 1477 mp->host_joined = true; 1478 if (br_multicast_is_star_g(&mp->addr)) 1479 br_multicast_star_g_host_state(mp); 1480 if (notify) 1481 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1482 } 1483 1484 if (br_group_is_l2(&mp->addr)) 1485 return; 1486 1487 mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval); 1488 } 1489 1490 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1491 { 1492 if (!mp->host_joined) 1493 return; 1494 1495 mp->host_joined = false; 1496 if (br_multicast_is_star_g(&mp->addr)) 1497 br_multicast_star_g_host_state(mp); 1498 if (notify) 1499 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1500 } 1501 1502 static struct net_bridge_port_group * 1503 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 1504 struct net_bridge_mcast_port *pmctx, 1505 struct br_ip *group, 1506 const unsigned char *src, 1507 u8 filter_mode, 1508 bool igmpv2_mldv1, 1509 bool blocked) 1510 { 1511 struct net_bridge_port_group __rcu **pp; 1512 struct net_bridge_port_group *p = NULL; 1513 struct net_bridge_mdb_entry *mp; 1514 unsigned long now = jiffies; 1515 1516 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 1517 goto out; 1518 1519 mp = br_multicast_new_group(brmctx->br, group); 1520 if (IS_ERR(mp)) 1521 return ERR_CAST(mp); 1522 1523 if (!pmctx) { 1524 br_multicast_host_join(brmctx, mp, true); 1525 goto out; 1526 } 1527 1528 for (pp = &mp->ports; 1529 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 1530 pp = &p->next) { 1531 if (br_port_group_equal(p, pmctx->port, src)) 1532 goto found; 1533 if ((unsigned long)p->key.port < (unsigned long)pmctx->port) 1534 break; 1535 } 1536 1537 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src, 1538 filter_mode, RTPROT_KERNEL, NULL); 1539 if (unlikely(!p)) { 1540 p = ERR_PTR(-ENOMEM); 1541 goto out; 1542 } 1543 rcu_assign_pointer(*pp, p); 1544 if (blocked) 1545 p->flags |= MDB_PG_FLAGS_BLOCKED; 1546 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB); 1547 1548 found: 1549 if (igmpv2_mldv1) 1550 mod_timer(&p->timer, 1551 now + brmctx->multicast_membership_interval); 1552 1553 out: 1554 return p; 1555 } 1556 1557 static int br_multicast_add_group(struct net_bridge_mcast *brmctx, 1558 struct net_bridge_mcast_port *pmctx, 1559 struct br_ip *group, 1560 const unsigned char *src, 1561 u8 filter_mode, 1562 bool igmpv2_mldv1) 1563 { 1564 struct net_bridge_port_group *pg; 1565 int err; 1566 1567 spin_lock(&brmctx->br->multicast_lock); 1568 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, 1569 igmpv2_mldv1, false); 1570 /* NULL is considered valid for host joined groups */ 1571 err = PTR_ERR_OR_ZERO(pg); 1572 spin_unlock(&brmctx->br->multicast_lock); 1573 1574 return err; 1575 } 1576 1577 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, 1578 struct net_bridge_mcast_port *pmctx, 1579 __be32 group, 1580 __u16 vid, 1581 const unsigned char *src, 1582 bool igmpv2) 1583 { 1584 struct br_ip br_group; 1585 u8 filter_mode; 1586 1587 if (ipv4_is_local_multicast(group)) 1588 return 0; 1589 1590 memset(&br_group, 0, sizeof(br_group)); 1591 br_group.dst.ip4 = group; 1592 br_group.proto = htons(ETH_P_IP); 1593 br_group.vid = vid; 1594 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1595 1596 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1597 filter_mode, igmpv2); 1598 } 1599 1600 #if IS_ENABLED(CONFIG_IPV6) 1601 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, 1602 struct net_bridge_mcast_port *pmctx, 1603 const struct in6_addr *group, 1604 __u16 vid, 1605 const unsigned char *src, 1606 bool mldv1) 1607 { 1608 struct br_ip br_group; 1609 u8 filter_mode; 1610 1611 if (ipv6_addr_is_ll_all_nodes(group)) 1612 return 0; 1613 1614 memset(&br_group, 0, sizeof(br_group)); 1615 br_group.dst.ip6 = *group; 1616 br_group.proto = htons(ETH_P_IPV6); 1617 br_group.vid = vid; 1618 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1619 1620 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1621 filter_mode, mldv1); 1622 } 1623 #endif 1624 1625 static bool br_multicast_rport_del(struct hlist_node *rlist) 1626 { 1627 if (hlist_unhashed(rlist)) 1628 return false; 1629 1630 hlist_del_init_rcu(rlist); 1631 return true; 1632 } 1633 1634 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1635 { 1636 return br_multicast_rport_del(&pmctx->ip4_rlist); 1637 } 1638 1639 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1640 { 1641 #if IS_ENABLED(CONFIG_IPV6) 1642 return br_multicast_rport_del(&pmctx->ip6_rlist); 1643 #else 1644 return false; 1645 #endif 1646 } 1647 1648 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, 1649 struct timer_list *t, 1650 struct hlist_node *rlist) 1651 { 1652 struct net_bridge *br = pmctx->port->br; 1653 bool del; 1654 1655 spin_lock(&br->multicast_lock); 1656 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1657 pmctx->multicast_router == MDB_RTR_TYPE_PERM || 1658 timer_pending(t)) 1659 goto out; 1660 1661 del = br_multicast_rport_del(rlist); 1662 br_multicast_rport_del_notify(pmctx, del); 1663 out: 1664 spin_unlock(&br->multicast_lock); 1665 } 1666 1667 static void br_ip4_multicast_router_expired(struct timer_list *t) 1668 { 1669 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1670 ip4_mc_router_timer); 1671 1672 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); 1673 } 1674 1675 #if IS_ENABLED(CONFIG_IPV6) 1676 static void br_ip6_multicast_router_expired(struct timer_list *t) 1677 { 1678 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1679 ip6_mc_router_timer); 1680 1681 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); 1682 } 1683 #endif 1684 1685 static void br_mc_router_state_change(struct net_bridge *p, 1686 bool is_mc_router) 1687 { 1688 struct switchdev_attr attr = { 1689 .orig_dev = p->dev, 1690 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1691 .flags = SWITCHDEV_F_DEFER, 1692 .u.mrouter = is_mc_router, 1693 }; 1694 1695 switchdev_port_attr_set(p->dev, &attr, NULL); 1696 } 1697 1698 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, 1699 struct timer_list *timer) 1700 { 1701 spin_lock(&brmctx->br->multicast_lock); 1702 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1703 brmctx->multicast_router == MDB_RTR_TYPE_PERM || 1704 br_ip4_multicast_is_router(brmctx) || 1705 br_ip6_multicast_is_router(brmctx)) 1706 goto out; 1707 1708 br_mc_router_state_change(brmctx->br, false); 1709 out: 1710 spin_unlock(&brmctx->br->multicast_lock); 1711 } 1712 1713 static void br_ip4_multicast_local_router_expired(struct timer_list *t) 1714 { 1715 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1716 ip4_mc_router_timer); 1717 1718 br_multicast_local_router_expired(brmctx, t); 1719 } 1720 1721 #if IS_ENABLED(CONFIG_IPV6) 1722 static void br_ip6_multicast_local_router_expired(struct timer_list *t) 1723 { 1724 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1725 ip6_mc_router_timer); 1726 1727 br_multicast_local_router_expired(brmctx, t); 1728 } 1729 #endif 1730 1731 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, 1732 struct bridge_mcast_own_query *query) 1733 { 1734 spin_lock(&brmctx->br->multicast_lock); 1735 if (!netif_running(brmctx->br->dev) || 1736 br_multicast_ctx_vlan_global_disabled(brmctx) || 1737 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1738 goto out; 1739 1740 br_multicast_start_querier(brmctx, query); 1741 1742 out: 1743 spin_unlock(&brmctx->br->multicast_lock); 1744 } 1745 1746 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1747 { 1748 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1749 ip4_other_query.timer); 1750 1751 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); 1752 } 1753 1754 #if IS_ENABLED(CONFIG_IPV6) 1755 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1756 { 1757 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1758 ip6_other_query.timer); 1759 1760 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); 1761 } 1762 #endif 1763 1764 static void br_multicast_query_delay_expired(struct timer_list *t) 1765 { 1766 } 1767 1768 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, 1769 struct br_ip *ip, 1770 struct sk_buff *skb) 1771 { 1772 if (ip->proto == htons(ETH_P_IP)) 1773 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1774 #if IS_ENABLED(CONFIG_IPV6) 1775 else 1776 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1777 #endif 1778 } 1779 1780 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, 1781 struct net_bridge_mcast_port *pmctx, 1782 struct net_bridge_port_group *pg, 1783 struct br_ip *ip_dst, 1784 struct br_ip *group, 1785 bool with_srcs, 1786 u8 sflag, 1787 bool *need_rexmit) 1788 { 1789 bool over_lmqt = !!sflag; 1790 struct sk_buff *skb; 1791 u8 igmp_type; 1792 1793 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1794 !br_multicast_ctx_matches_vlan_snooping(brmctx)) 1795 return; 1796 1797 again_under_lmqt: 1798 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group, 1799 with_srcs, over_lmqt, sflag, &igmp_type, 1800 need_rexmit); 1801 if (!skb) 1802 return; 1803 1804 if (pmctx) { 1805 skb->dev = pmctx->port->dev; 1806 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type, 1807 BR_MCAST_DIR_TX); 1808 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1809 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev, 1810 br_dev_queue_push_xmit); 1811 1812 if (over_lmqt && with_srcs && sflag) { 1813 over_lmqt = false; 1814 goto again_under_lmqt; 1815 } 1816 } else { 1817 br_multicast_select_own_querier(brmctx, group, skb); 1818 br_multicast_count(brmctx->br, NULL, skb, igmp_type, 1819 BR_MCAST_DIR_RX); 1820 netif_rx(skb); 1821 } 1822 } 1823 1824 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier, 1825 struct bridge_mcast_querier *dest) 1826 { 1827 unsigned int seq; 1828 1829 memset(dest, 0, sizeof(*dest)); 1830 do { 1831 seq = read_seqcount_begin(&querier->seq); 1832 dest->port_ifidx = querier->port_ifidx; 1833 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip)); 1834 } while (read_seqcount_retry(&querier->seq, seq)); 1835 } 1836 1837 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx, 1838 struct bridge_mcast_querier *querier, 1839 int ifindex, 1840 struct br_ip *saddr) 1841 { 1842 write_seqcount_begin(&querier->seq); 1843 querier->port_ifidx = ifindex; 1844 memcpy(&querier->addr, saddr, sizeof(*saddr)); 1845 write_seqcount_end(&querier->seq); 1846 } 1847 1848 static void br_multicast_send_query(struct net_bridge_mcast *brmctx, 1849 struct net_bridge_mcast_port *pmctx, 1850 struct bridge_mcast_own_query *own_query) 1851 { 1852 struct bridge_mcast_other_query *other_query = NULL; 1853 struct bridge_mcast_querier *querier; 1854 struct br_ip br_group; 1855 unsigned long time; 1856 1857 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1858 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 1859 !brmctx->multicast_querier) 1860 return; 1861 1862 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1863 1864 if (pmctx ? (own_query == &pmctx->ip4_own_query) : 1865 (own_query == &brmctx->ip4_own_query)) { 1866 querier = &brmctx->ip4_querier; 1867 other_query = &brmctx->ip4_other_query; 1868 br_group.proto = htons(ETH_P_IP); 1869 #if IS_ENABLED(CONFIG_IPV6) 1870 } else { 1871 querier = &brmctx->ip6_querier; 1872 other_query = &brmctx->ip6_other_query; 1873 br_group.proto = htons(ETH_P_IPV6); 1874 #endif 1875 } 1876 1877 if (!other_query || timer_pending(&other_query->timer)) 1878 return; 1879 1880 /* we're about to select ourselves as querier */ 1881 if (!pmctx && querier->port_ifidx) { 1882 struct br_ip zeroip = {}; 1883 1884 br_multicast_update_querier(brmctx, querier, 0, &zeroip); 1885 } 1886 1887 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false, 1888 0, NULL); 1889 1890 time = jiffies; 1891 time += own_query->startup_sent < brmctx->multicast_startup_query_count ? 1892 brmctx->multicast_startup_query_interval : 1893 brmctx->multicast_query_interval; 1894 mod_timer(&own_query->timer, time); 1895 } 1896 1897 static void 1898 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, 1899 struct bridge_mcast_own_query *query) 1900 { 1901 struct net_bridge *br = pmctx->port->br; 1902 struct net_bridge_mcast *brmctx; 1903 1904 spin_lock(&br->multicast_lock); 1905 if (br_multicast_port_ctx_state_stopped(pmctx)) 1906 goto out; 1907 1908 brmctx = br_multicast_port_ctx_get_global(pmctx); 1909 if (query->startup_sent < brmctx->multicast_startup_query_count) 1910 query->startup_sent++; 1911 1912 br_multicast_send_query(brmctx, pmctx, query); 1913 1914 out: 1915 spin_unlock(&br->multicast_lock); 1916 } 1917 1918 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1919 { 1920 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1921 ip4_own_query.timer); 1922 1923 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); 1924 } 1925 1926 #if IS_ENABLED(CONFIG_IPV6) 1927 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1928 { 1929 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1930 ip6_own_query.timer); 1931 1932 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); 1933 } 1934 #endif 1935 1936 static void br_multicast_port_group_rexmit(struct timer_list *t) 1937 { 1938 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1939 struct bridge_mcast_other_query *other_query = NULL; 1940 struct net_bridge *br = pg->key.port->br; 1941 struct net_bridge_mcast_port *pmctx; 1942 struct net_bridge_mcast *brmctx; 1943 bool need_rexmit = false; 1944 1945 spin_lock(&br->multicast_lock); 1946 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1947 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1948 goto out; 1949 1950 pmctx = br_multicast_pg_to_port_ctx(pg); 1951 if (!pmctx) 1952 goto out; 1953 brmctx = br_multicast_port_ctx_get_global(pmctx); 1954 if (!brmctx->multicast_querier) 1955 goto out; 1956 1957 if (pg->key.addr.proto == htons(ETH_P_IP)) 1958 other_query = &brmctx->ip4_other_query; 1959 #if IS_ENABLED(CONFIG_IPV6) 1960 else 1961 other_query = &brmctx->ip6_other_query; 1962 #endif 1963 1964 if (!other_query || timer_pending(&other_query->timer)) 1965 goto out; 1966 1967 if (pg->grp_query_rexmit_cnt) { 1968 pg->grp_query_rexmit_cnt--; 1969 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1970 &pg->key.addr, false, 1, NULL); 1971 } 1972 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1973 &pg->key.addr, true, 0, &need_rexmit); 1974 1975 if (pg->grp_query_rexmit_cnt || need_rexmit) 1976 mod_timer(&pg->rexmit_timer, jiffies + 1977 brmctx->multicast_last_member_interval); 1978 out: 1979 spin_unlock(&br->multicast_lock); 1980 } 1981 1982 static int br_mc_disabled_update(struct net_device *dev, bool value, 1983 struct netlink_ext_ack *extack) 1984 { 1985 struct switchdev_attr attr = { 1986 .orig_dev = dev, 1987 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1988 .flags = SWITCHDEV_F_DEFER, 1989 .u.mc_disabled = !value, 1990 }; 1991 1992 return switchdev_port_attr_set(dev, &attr, extack); 1993 } 1994 1995 void br_multicast_port_ctx_init(struct net_bridge_port *port, 1996 struct net_bridge_vlan *vlan, 1997 struct net_bridge_mcast_port *pmctx) 1998 { 1999 pmctx->port = port; 2000 pmctx->vlan = vlan; 2001 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2002 timer_setup(&pmctx->ip4_mc_router_timer, 2003 br_ip4_multicast_router_expired, 0); 2004 timer_setup(&pmctx->ip4_own_query.timer, 2005 br_ip4_multicast_port_query_expired, 0); 2006 #if IS_ENABLED(CONFIG_IPV6) 2007 timer_setup(&pmctx->ip6_mc_router_timer, 2008 br_ip6_multicast_router_expired, 0); 2009 timer_setup(&pmctx->ip6_own_query.timer, 2010 br_ip6_multicast_port_query_expired, 0); 2011 #endif 2012 } 2013 2014 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 2015 { 2016 #if IS_ENABLED(CONFIG_IPV6) 2017 del_timer_sync(&pmctx->ip6_mc_router_timer); 2018 #endif 2019 del_timer_sync(&pmctx->ip4_mc_router_timer); 2020 } 2021 2022 int br_multicast_add_port(struct net_bridge_port *port) 2023 { 2024 int err; 2025 2026 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 2027 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx); 2028 2029 err = br_mc_disabled_update(port->dev, 2030 br_opt_get(port->br, 2031 BROPT_MULTICAST_ENABLED), 2032 NULL); 2033 if (err && err != -EOPNOTSUPP) 2034 return err; 2035 2036 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2037 if (!port->mcast_stats) 2038 return -ENOMEM; 2039 2040 return 0; 2041 } 2042 2043 void br_multicast_del_port(struct net_bridge_port *port) 2044 { 2045 struct net_bridge *br = port->br; 2046 struct net_bridge_port_group *pg; 2047 HLIST_HEAD(deleted_head); 2048 struct hlist_node *n; 2049 2050 /* Take care of the remaining groups, only perm ones should be left */ 2051 spin_lock_bh(&br->multicast_lock); 2052 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 2053 br_multicast_find_del_pg(br, pg); 2054 hlist_move_list(&br->mcast_gc_list, &deleted_head); 2055 spin_unlock_bh(&br->multicast_lock); 2056 br_multicast_gc(&deleted_head); 2057 br_multicast_port_ctx_deinit(&port->multicast_ctx); 2058 free_percpu(port->mcast_stats); 2059 } 2060 2061 static void br_multicast_enable(struct bridge_mcast_own_query *query) 2062 { 2063 query->startup_sent = 0; 2064 2065 if (try_to_del_timer_sync(&query->timer) >= 0 || 2066 del_timer(&query->timer)) 2067 mod_timer(&query->timer, jiffies); 2068 } 2069 2070 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) 2071 { 2072 struct net_bridge *br = pmctx->port->br; 2073 struct net_bridge_mcast *brmctx; 2074 2075 brmctx = br_multicast_port_ctx_get_global(pmctx); 2076 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 2077 !netif_running(br->dev)) 2078 return; 2079 2080 br_multicast_enable(&pmctx->ip4_own_query); 2081 #if IS_ENABLED(CONFIG_IPV6) 2082 br_multicast_enable(&pmctx->ip6_own_query); 2083 #endif 2084 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { 2085 br_ip4_multicast_add_router(brmctx, pmctx); 2086 br_ip6_multicast_add_router(brmctx, pmctx); 2087 } 2088 2089 if (br_multicast_port_ctx_is_vlan(pmctx)) { 2090 struct net_bridge_port_group *pg; 2091 u32 n = 0; 2092 2093 /* The mcast_n_groups counter might be wrong. First, 2094 * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries 2095 * are flushed, thus mcast_n_groups after the toggle does not 2096 * reflect the true values. And second, permanent entries added 2097 * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected 2098 * either. Thus we have to refresh the counter. 2099 */ 2100 2101 hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) { 2102 if (pg->key.addr.vid == pmctx->vlan->vid) 2103 n++; 2104 } 2105 WRITE_ONCE(pmctx->mdb_n_entries, n); 2106 } 2107 } 2108 2109 void br_multicast_enable_port(struct net_bridge_port *port) 2110 { 2111 struct net_bridge *br = port->br; 2112 2113 spin_lock_bh(&br->multicast_lock); 2114 __br_multicast_enable_port_ctx(&port->multicast_ctx); 2115 spin_unlock_bh(&br->multicast_lock); 2116 } 2117 2118 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) 2119 { 2120 struct net_bridge_port_group *pg; 2121 struct hlist_node *n; 2122 bool del = false; 2123 2124 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) 2125 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && 2126 (!br_multicast_port_ctx_is_vlan(pmctx) || 2127 pg->key.addr.vid == pmctx->vlan->vid)) 2128 br_multicast_find_del_pg(pmctx->port->br, pg); 2129 2130 del |= br_ip4_multicast_rport_del(pmctx); 2131 del_timer(&pmctx->ip4_mc_router_timer); 2132 del_timer(&pmctx->ip4_own_query.timer); 2133 del |= br_ip6_multicast_rport_del(pmctx); 2134 #if IS_ENABLED(CONFIG_IPV6) 2135 del_timer(&pmctx->ip6_mc_router_timer); 2136 del_timer(&pmctx->ip6_own_query.timer); 2137 #endif 2138 br_multicast_rport_del_notify(pmctx, del); 2139 } 2140 2141 void br_multicast_disable_port(struct net_bridge_port *port) 2142 { 2143 spin_lock_bh(&port->br->multicast_lock); 2144 __br_multicast_disable_port_ctx(&port->multicast_ctx); 2145 spin_unlock_bh(&port->br->multicast_lock); 2146 } 2147 2148 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 2149 { 2150 struct net_bridge_group_src *ent; 2151 struct hlist_node *tmp; 2152 int deleted = 0; 2153 2154 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 2155 if (ent->flags & BR_SGRP_F_DELETE) { 2156 br_multicast_del_group_src(ent, false); 2157 deleted++; 2158 } 2159 2160 return deleted; 2161 } 2162 2163 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 2164 unsigned long expires) 2165 { 2166 mod_timer(&src->timer, expires); 2167 br_multicast_fwd_src_handle(src); 2168 } 2169 2170 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, 2171 struct net_bridge_mcast_port *pmctx, 2172 struct net_bridge_port_group *pg) 2173 { 2174 struct bridge_mcast_other_query *other_query = NULL; 2175 u32 lmqc = brmctx->multicast_last_member_count; 2176 unsigned long lmqt, lmi, now = jiffies; 2177 struct net_bridge_group_src *ent; 2178 2179 if (!netif_running(brmctx->br->dev) || 2180 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2181 return; 2182 2183 if (pg->key.addr.proto == htons(ETH_P_IP)) 2184 other_query = &brmctx->ip4_other_query; 2185 #if IS_ENABLED(CONFIG_IPV6) 2186 else 2187 other_query = &brmctx->ip6_other_query; 2188 #endif 2189 2190 lmqt = now + br_multicast_lmqt(brmctx); 2191 hlist_for_each_entry(ent, &pg->src_list, node) { 2192 if (ent->flags & BR_SGRP_F_SEND) { 2193 ent->flags &= ~BR_SGRP_F_SEND; 2194 if (ent->timer.expires > lmqt) { 2195 if (brmctx->multicast_querier && 2196 other_query && 2197 !timer_pending(&other_query->timer)) 2198 ent->src_query_rexmit_cnt = lmqc; 2199 __grp_src_mod_timer(ent, lmqt); 2200 } 2201 } 2202 } 2203 2204 if (!brmctx->multicast_querier || 2205 !other_query || timer_pending(&other_query->timer)) 2206 return; 2207 2208 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2209 &pg->key.addr, true, 1, NULL); 2210 2211 lmi = now + brmctx->multicast_last_member_interval; 2212 if (!timer_pending(&pg->rexmit_timer) || 2213 time_after(pg->rexmit_timer.expires, lmi)) 2214 mod_timer(&pg->rexmit_timer, lmi); 2215 } 2216 2217 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, 2218 struct net_bridge_mcast_port *pmctx, 2219 struct net_bridge_port_group *pg) 2220 { 2221 struct bridge_mcast_other_query *other_query = NULL; 2222 unsigned long now = jiffies, lmi; 2223 2224 if (!netif_running(brmctx->br->dev) || 2225 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2226 return; 2227 2228 if (pg->key.addr.proto == htons(ETH_P_IP)) 2229 other_query = &brmctx->ip4_other_query; 2230 #if IS_ENABLED(CONFIG_IPV6) 2231 else 2232 other_query = &brmctx->ip6_other_query; 2233 #endif 2234 2235 if (brmctx->multicast_querier && 2236 other_query && !timer_pending(&other_query->timer)) { 2237 lmi = now + brmctx->multicast_last_member_interval; 2238 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; 2239 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2240 &pg->key.addr, false, 0, NULL); 2241 if (!timer_pending(&pg->rexmit_timer) || 2242 time_after(pg->rexmit_timer.expires, lmi)) 2243 mod_timer(&pg->rexmit_timer, lmi); 2244 } 2245 2246 if (pg->filter_mode == MCAST_EXCLUDE && 2247 (!timer_pending(&pg->timer) || 2248 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) 2249 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx)); 2250 } 2251 2252 /* State Msg type New state Actions 2253 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 2254 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 2255 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 2256 */ 2257 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, 2258 struct net_bridge_port_group *pg, void *h_addr, 2259 void *srcs, u32 nsrcs, size_t addr_size, 2260 int grec_type) 2261 { 2262 struct net_bridge_group_src *ent; 2263 unsigned long now = jiffies; 2264 bool changed = false; 2265 struct br_ip src_ip; 2266 u32 src_idx; 2267 2268 memset(&src_ip, 0, sizeof(src_ip)); 2269 src_ip.proto = pg->key.addr.proto; 2270 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2271 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2272 ent = br_multicast_find_group_src(pg, &src_ip); 2273 if (!ent) { 2274 ent = br_multicast_new_group_src(pg, &src_ip); 2275 if (ent) 2276 changed = true; 2277 } 2278 2279 if (ent) 2280 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2281 } 2282 2283 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2284 grec_type)) 2285 changed = true; 2286 2287 return changed; 2288 } 2289 2290 /* State Msg type New state Actions 2291 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2292 * Delete (A-B) 2293 * Group Timer=GMI 2294 */ 2295 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, 2296 struct net_bridge_port_group *pg, void *h_addr, 2297 void *srcs, u32 nsrcs, size_t addr_size, 2298 int grec_type) 2299 { 2300 struct net_bridge_group_src *ent; 2301 struct br_ip src_ip; 2302 u32 src_idx; 2303 2304 hlist_for_each_entry(ent, &pg->src_list, node) 2305 ent->flags |= BR_SGRP_F_DELETE; 2306 2307 memset(&src_ip, 0, sizeof(src_ip)); 2308 src_ip.proto = pg->key.addr.proto; 2309 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2310 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2311 ent = br_multicast_find_group_src(pg, &src_ip); 2312 if (ent) 2313 ent->flags &= ~BR_SGRP_F_DELETE; 2314 else 2315 ent = br_multicast_new_group_src(pg, &src_ip); 2316 if (ent) 2317 br_multicast_fwd_src_handle(ent); 2318 } 2319 2320 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2321 grec_type); 2322 2323 __grp_src_delete_marked(pg); 2324 } 2325 2326 /* State Msg type New state Actions 2327 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 2328 * Delete (X-A) 2329 * Delete (Y-A) 2330 * Group Timer=GMI 2331 */ 2332 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, 2333 struct net_bridge_port_group *pg, void *h_addr, 2334 void *srcs, u32 nsrcs, size_t addr_size, 2335 int grec_type) 2336 { 2337 struct net_bridge_group_src *ent; 2338 unsigned long now = jiffies; 2339 bool changed = false; 2340 struct br_ip src_ip; 2341 u32 src_idx; 2342 2343 hlist_for_each_entry(ent, &pg->src_list, node) 2344 ent->flags |= BR_SGRP_F_DELETE; 2345 2346 memset(&src_ip, 0, sizeof(src_ip)); 2347 src_ip.proto = pg->key.addr.proto; 2348 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2349 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2350 ent = br_multicast_find_group_src(pg, &src_ip); 2351 if (ent) { 2352 ent->flags &= ~BR_SGRP_F_DELETE; 2353 } else { 2354 ent = br_multicast_new_group_src(pg, &src_ip); 2355 if (ent) { 2356 __grp_src_mod_timer(ent, 2357 now + br_multicast_gmi(brmctx)); 2358 changed = true; 2359 } 2360 } 2361 } 2362 2363 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2364 grec_type)) 2365 changed = true; 2366 2367 if (__grp_src_delete_marked(pg)) 2368 changed = true; 2369 2370 return changed; 2371 } 2372 2373 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, 2374 struct net_bridge_port_group *pg, void *h_addr, 2375 void *srcs, u32 nsrcs, size_t addr_size, 2376 int grec_type) 2377 { 2378 bool changed = false; 2379 2380 switch (pg->filter_mode) { 2381 case MCAST_INCLUDE: 2382 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2383 grec_type); 2384 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2385 changed = true; 2386 break; 2387 case MCAST_EXCLUDE: 2388 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, 2389 addr_size, grec_type); 2390 break; 2391 } 2392 2393 pg->filter_mode = MCAST_EXCLUDE; 2394 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2395 2396 return changed; 2397 } 2398 2399 /* State Msg type New state Actions 2400 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 2401 * Send Q(G,A-B) 2402 */ 2403 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, 2404 struct net_bridge_mcast_port *pmctx, 2405 struct net_bridge_port_group *pg, void *h_addr, 2406 void *srcs, u32 nsrcs, size_t addr_size, 2407 int grec_type) 2408 { 2409 u32 src_idx, to_send = pg->src_ents; 2410 struct net_bridge_group_src *ent; 2411 unsigned long now = jiffies; 2412 bool changed = false; 2413 struct br_ip src_ip; 2414 2415 hlist_for_each_entry(ent, &pg->src_list, node) 2416 ent->flags |= BR_SGRP_F_SEND; 2417 2418 memset(&src_ip, 0, sizeof(src_ip)); 2419 src_ip.proto = pg->key.addr.proto; 2420 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2421 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2422 ent = br_multicast_find_group_src(pg, &src_ip); 2423 if (ent) { 2424 ent->flags &= ~BR_SGRP_F_SEND; 2425 to_send--; 2426 } else { 2427 ent = br_multicast_new_group_src(pg, &src_ip); 2428 if (ent) 2429 changed = true; 2430 } 2431 if (ent) 2432 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2433 } 2434 2435 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2436 grec_type)) 2437 changed = true; 2438 2439 if (to_send) 2440 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2441 2442 return changed; 2443 } 2444 2445 /* State Msg type New state Actions 2446 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 2447 * Send Q(G,X-A) 2448 * Send Q(G) 2449 */ 2450 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, 2451 struct net_bridge_mcast_port *pmctx, 2452 struct net_bridge_port_group *pg, void *h_addr, 2453 void *srcs, u32 nsrcs, size_t addr_size, 2454 int grec_type) 2455 { 2456 u32 src_idx, to_send = pg->src_ents; 2457 struct net_bridge_group_src *ent; 2458 unsigned long now = jiffies; 2459 bool changed = false; 2460 struct br_ip src_ip; 2461 2462 hlist_for_each_entry(ent, &pg->src_list, node) 2463 if (timer_pending(&ent->timer)) 2464 ent->flags |= BR_SGRP_F_SEND; 2465 2466 memset(&src_ip, 0, sizeof(src_ip)); 2467 src_ip.proto = pg->key.addr.proto; 2468 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2469 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2470 ent = br_multicast_find_group_src(pg, &src_ip); 2471 if (ent) { 2472 if (timer_pending(&ent->timer)) { 2473 ent->flags &= ~BR_SGRP_F_SEND; 2474 to_send--; 2475 } 2476 } else { 2477 ent = br_multicast_new_group_src(pg, &src_ip); 2478 if (ent) 2479 changed = true; 2480 } 2481 if (ent) 2482 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2483 } 2484 2485 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2486 grec_type)) 2487 changed = true; 2488 2489 if (to_send) 2490 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2491 2492 __grp_send_query_and_rexmit(brmctx, pmctx, pg); 2493 2494 return changed; 2495 } 2496 2497 static bool br_multicast_toin(struct net_bridge_mcast *brmctx, 2498 struct net_bridge_mcast_port *pmctx, 2499 struct net_bridge_port_group *pg, void *h_addr, 2500 void *srcs, u32 nsrcs, size_t addr_size, 2501 int grec_type) 2502 { 2503 bool changed = false; 2504 2505 switch (pg->filter_mode) { 2506 case MCAST_INCLUDE: 2507 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, 2508 nsrcs, addr_size, grec_type); 2509 break; 2510 case MCAST_EXCLUDE: 2511 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, 2512 nsrcs, addr_size, grec_type); 2513 break; 2514 } 2515 2516 if (br_multicast_eht_should_del_pg(pg)) { 2517 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2518 br_multicast_find_del_pg(pg->key.port->br, pg); 2519 /* a notification has already been sent and we shouldn't 2520 * access pg after the delete so we have to return false 2521 */ 2522 changed = false; 2523 } 2524 2525 return changed; 2526 } 2527 2528 /* State Msg type New state Actions 2529 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2530 * Delete (A-B) 2531 * Send Q(G,A*B) 2532 * Group Timer=GMI 2533 */ 2534 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, 2535 struct net_bridge_mcast_port *pmctx, 2536 struct net_bridge_port_group *pg, void *h_addr, 2537 void *srcs, u32 nsrcs, size_t addr_size, 2538 int grec_type) 2539 { 2540 struct net_bridge_group_src *ent; 2541 u32 src_idx, to_send = 0; 2542 struct br_ip src_ip; 2543 2544 hlist_for_each_entry(ent, &pg->src_list, node) 2545 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2546 2547 memset(&src_ip, 0, sizeof(src_ip)); 2548 src_ip.proto = pg->key.addr.proto; 2549 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2550 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2551 ent = br_multicast_find_group_src(pg, &src_ip); 2552 if (ent) { 2553 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2554 BR_SGRP_F_SEND; 2555 to_send++; 2556 } else { 2557 ent = br_multicast_new_group_src(pg, &src_ip); 2558 } 2559 if (ent) 2560 br_multicast_fwd_src_handle(ent); 2561 } 2562 2563 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2564 grec_type); 2565 2566 __grp_src_delete_marked(pg); 2567 if (to_send) 2568 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2569 } 2570 2571 /* State Msg type New state Actions 2572 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2573 * Delete (X-A) 2574 * Delete (Y-A) 2575 * Send Q(G,A-Y) 2576 * Group Timer=GMI 2577 */ 2578 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, 2579 struct net_bridge_mcast_port *pmctx, 2580 struct net_bridge_port_group *pg, void *h_addr, 2581 void *srcs, u32 nsrcs, size_t addr_size, 2582 int grec_type) 2583 { 2584 struct net_bridge_group_src *ent; 2585 u32 src_idx, to_send = 0; 2586 bool changed = false; 2587 struct br_ip src_ip; 2588 2589 hlist_for_each_entry(ent, &pg->src_list, node) 2590 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2591 2592 memset(&src_ip, 0, sizeof(src_ip)); 2593 src_ip.proto = pg->key.addr.proto; 2594 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2595 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2596 ent = br_multicast_find_group_src(pg, &src_ip); 2597 if (ent) { 2598 ent->flags &= ~BR_SGRP_F_DELETE; 2599 } else { 2600 ent = br_multicast_new_group_src(pg, &src_ip); 2601 if (ent) { 2602 __grp_src_mod_timer(ent, pg->timer.expires); 2603 changed = true; 2604 } 2605 } 2606 if (ent && timer_pending(&ent->timer)) { 2607 ent->flags |= BR_SGRP_F_SEND; 2608 to_send++; 2609 } 2610 } 2611 2612 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2613 grec_type)) 2614 changed = true; 2615 2616 if (__grp_src_delete_marked(pg)) 2617 changed = true; 2618 if (to_send) 2619 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2620 2621 return changed; 2622 } 2623 2624 static bool br_multicast_toex(struct net_bridge_mcast *brmctx, 2625 struct net_bridge_mcast_port *pmctx, 2626 struct net_bridge_port_group *pg, void *h_addr, 2627 void *srcs, u32 nsrcs, size_t addr_size, 2628 int grec_type) 2629 { 2630 bool changed = false; 2631 2632 switch (pg->filter_mode) { 2633 case MCAST_INCLUDE: 2634 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, 2635 addr_size, grec_type); 2636 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2637 changed = true; 2638 break; 2639 case MCAST_EXCLUDE: 2640 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, 2641 nsrcs, addr_size, grec_type); 2642 break; 2643 } 2644 2645 pg->filter_mode = MCAST_EXCLUDE; 2646 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2647 2648 return changed; 2649 } 2650 2651 /* State Msg type New state Actions 2652 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2653 */ 2654 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, 2655 struct net_bridge_mcast_port *pmctx, 2656 struct net_bridge_port_group *pg, void *h_addr, 2657 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2658 { 2659 struct net_bridge_group_src *ent; 2660 u32 src_idx, to_send = 0; 2661 bool changed = false; 2662 struct br_ip src_ip; 2663 2664 hlist_for_each_entry(ent, &pg->src_list, node) 2665 ent->flags &= ~BR_SGRP_F_SEND; 2666 2667 memset(&src_ip, 0, sizeof(src_ip)); 2668 src_ip.proto = pg->key.addr.proto; 2669 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2670 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2671 ent = br_multicast_find_group_src(pg, &src_ip); 2672 if (ent) { 2673 ent->flags |= BR_SGRP_F_SEND; 2674 to_send++; 2675 } 2676 } 2677 2678 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2679 grec_type)) 2680 changed = true; 2681 2682 if (to_send) 2683 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2684 2685 return changed; 2686 } 2687 2688 /* State Msg type New state Actions 2689 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2690 * Send Q(G,A-Y) 2691 */ 2692 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, 2693 struct net_bridge_mcast_port *pmctx, 2694 struct net_bridge_port_group *pg, void *h_addr, 2695 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2696 { 2697 struct net_bridge_group_src *ent; 2698 u32 src_idx, to_send = 0; 2699 bool changed = false; 2700 struct br_ip src_ip; 2701 2702 hlist_for_each_entry(ent, &pg->src_list, node) 2703 ent->flags &= ~BR_SGRP_F_SEND; 2704 2705 memset(&src_ip, 0, sizeof(src_ip)); 2706 src_ip.proto = pg->key.addr.proto; 2707 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2708 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2709 ent = br_multicast_find_group_src(pg, &src_ip); 2710 if (!ent) { 2711 ent = br_multicast_new_group_src(pg, &src_ip); 2712 if (ent) { 2713 __grp_src_mod_timer(ent, pg->timer.expires); 2714 changed = true; 2715 } 2716 } 2717 if (ent && timer_pending(&ent->timer)) { 2718 ent->flags |= BR_SGRP_F_SEND; 2719 to_send++; 2720 } 2721 } 2722 2723 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2724 grec_type)) 2725 changed = true; 2726 2727 if (to_send) 2728 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2729 2730 return changed; 2731 } 2732 2733 static bool br_multicast_block(struct net_bridge_mcast *brmctx, 2734 struct net_bridge_mcast_port *pmctx, 2735 struct net_bridge_port_group *pg, void *h_addr, 2736 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2737 { 2738 bool changed = false; 2739 2740 switch (pg->filter_mode) { 2741 case MCAST_INCLUDE: 2742 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, 2743 nsrcs, addr_size, grec_type); 2744 break; 2745 case MCAST_EXCLUDE: 2746 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, 2747 nsrcs, addr_size, grec_type); 2748 break; 2749 } 2750 2751 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2752 br_multicast_eht_should_del_pg(pg)) { 2753 if (br_multicast_eht_should_del_pg(pg)) 2754 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2755 br_multicast_find_del_pg(pg->key.port->br, pg); 2756 /* a notification has already been sent and we shouldn't 2757 * access pg after the delete so we have to return false 2758 */ 2759 changed = false; 2760 } 2761 2762 return changed; 2763 } 2764 2765 static struct net_bridge_port_group * 2766 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2767 struct net_bridge_port *p, 2768 const unsigned char *src) 2769 { 2770 struct net_bridge *br __maybe_unused = mp->br; 2771 struct net_bridge_port_group *pg; 2772 2773 for (pg = mlock_dereference(mp->ports, br); 2774 pg; 2775 pg = mlock_dereference(pg->next, br)) 2776 if (br_port_group_equal(pg, p, src)) 2777 return pg; 2778 2779 return NULL; 2780 } 2781 2782 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, 2783 struct net_bridge_mcast_port *pmctx, 2784 struct sk_buff *skb, 2785 u16 vid) 2786 { 2787 bool igmpv2 = brmctx->multicast_igmp_version == 2; 2788 struct net_bridge_mdb_entry *mdst; 2789 struct net_bridge_port_group *pg; 2790 const unsigned char *src; 2791 struct igmpv3_report *ih; 2792 struct igmpv3_grec *grec; 2793 int i, len, num, type; 2794 __be32 group, *h_addr; 2795 bool changed = false; 2796 int err = 0; 2797 u16 nsrcs; 2798 2799 ih = igmpv3_report_hdr(skb); 2800 num = ntohs(ih->ngrec); 2801 len = skb_transport_offset(skb) + sizeof(*ih); 2802 2803 for (i = 0; i < num; i++) { 2804 len += sizeof(*grec); 2805 if (!ip_mc_may_pull(skb, len)) 2806 return -EINVAL; 2807 2808 grec = (void *)(skb->data + len - sizeof(*grec)); 2809 group = grec->grec_mca; 2810 type = grec->grec_type; 2811 nsrcs = ntohs(grec->grec_nsrcs); 2812 2813 len += nsrcs * 4; 2814 if (!ip_mc_may_pull(skb, len)) 2815 return -EINVAL; 2816 2817 switch (type) { 2818 case IGMPV3_MODE_IS_INCLUDE: 2819 case IGMPV3_MODE_IS_EXCLUDE: 2820 case IGMPV3_CHANGE_TO_INCLUDE: 2821 case IGMPV3_CHANGE_TO_EXCLUDE: 2822 case IGMPV3_ALLOW_NEW_SOURCES: 2823 case IGMPV3_BLOCK_OLD_SOURCES: 2824 break; 2825 2826 default: 2827 continue; 2828 } 2829 2830 src = eth_hdr(skb)->h_source; 2831 if (nsrcs == 0 && 2832 (type == IGMPV3_CHANGE_TO_INCLUDE || 2833 type == IGMPV3_MODE_IS_INCLUDE)) { 2834 if (!pmctx || igmpv2) { 2835 br_ip4_multicast_leave_group(brmctx, pmctx, 2836 group, vid, src); 2837 continue; 2838 } 2839 } else { 2840 err = br_ip4_multicast_add_group(brmctx, pmctx, group, 2841 vid, src, igmpv2); 2842 if (err) 2843 break; 2844 } 2845 2846 if (!pmctx || igmpv2) 2847 continue; 2848 2849 spin_lock(&brmctx->br->multicast_lock); 2850 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2851 goto unlock_continue; 2852 2853 mdst = br_mdb_ip4_get(brmctx->br, group, vid); 2854 if (!mdst) 2855 goto unlock_continue; 2856 pg = br_multicast_find_port(mdst, pmctx->port, src); 2857 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2858 goto unlock_continue; 2859 /* reload grec and host addr */ 2860 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2861 h_addr = &ip_hdr(skb)->saddr; 2862 switch (type) { 2863 case IGMPV3_ALLOW_NEW_SOURCES: 2864 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2865 grec->grec_src, 2866 nsrcs, sizeof(__be32), type); 2867 break; 2868 case IGMPV3_MODE_IS_INCLUDE: 2869 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2870 grec->grec_src, 2871 nsrcs, sizeof(__be32), type); 2872 break; 2873 case IGMPV3_MODE_IS_EXCLUDE: 2874 changed = br_multicast_isexc(brmctx, pg, h_addr, 2875 grec->grec_src, 2876 nsrcs, sizeof(__be32), type); 2877 break; 2878 case IGMPV3_CHANGE_TO_INCLUDE: 2879 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2880 grec->grec_src, 2881 nsrcs, sizeof(__be32), type); 2882 break; 2883 case IGMPV3_CHANGE_TO_EXCLUDE: 2884 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2885 grec->grec_src, 2886 nsrcs, sizeof(__be32), type); 2887 break; 2888 case IGMPV3_BLOCK_OLD_SOURCES: 2889 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2890 grec->grec_src, 2891 nsrcs, sizeof(__be32), type); 2892 break; 2893 } 2894 if (changed) 2895 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2896 unlock_continue: 2897 spin_unlock(&brmctx->br->multicast_lock); 2898 } 2899 2900 return err; 2901 } 2902 2903 #if IS_ENABLED(CONFIG_IPV6) 2904 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, 2905 struct net_bridge_mcast_port *pmctx, 2906 struct sk_buff *skb, 2907 u16 vid) 2908 { 2909 bool mldv1 = brmctx->multicast_mld_version == 1; 2910 struct net_bridge_mdb_entry *mdst; 2911 struct net_bridge_port_group *pg; 2912 unsigned int nsrcs_offset; 2913 struct mld2_report *mld2r; 2914 const unsigned char *src; 2915 struct in6_addr *h_addr; 2916 struct mld2_grec *grec; 2917 unsigned int grec_len; 2918 bool changed = false; 2919 int i, len, num; 2920 int err = 0; 2921 2922 if (!ipv6_mc_may_pull(skb, sizeof(*mld2r))) 2923 return -EINVAL; 2924 2925 mld2r = (struct mld2_report *)icmp6_hdr(skb); 2926 num = ntohs(mld2r->mld2r_ngrec); 2927 len = skb_transport_offset(skb) + sizeof(*mld2r); 2928 2929 for (i = 0; i < num; i++) { 2930 __be16 *_nsrcs, __nsrcs; 2931 u16 nsrcs; 2932 2933 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2934 2935 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2936 nsrcs_offset + sizeof(__nsrcs)) 2937 return -EINVAL; 2938 2939 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2940 sizeof(__nsrcs), &__nsrcs); 2941 if (!_nsrcs) 2942 return -EINVAL; 2943 2944 nsrcs = ntohs(*_nsrcs); 2945 grec_len = struct_size(grec, grec_src, nsrcs); 2946 2947 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2948 return -EINVAL; 2949 2950 grec = (struct mld2_grec *)(skb->data + len); 2951 len += grec_len; 2952 2953 switch (grec->grec_type) { 2954 case MLD2_MODE_IS_INCLUDE: 2955 case MLD2_MODE_IS_EXCLUDE: 2956 case MLD2_CHANGE_TO_INCLUDE: 2957 case MLD2_CHANGE_TO_EXCLUDE: 2958 case MLD2_ALLOW_NEW_SOURCES: 2959 case MLD2_BLOCK_OLD_SOURCES: 2960 break; 2961 2962 default: 2963 continue; 2964 } 2965 2966 src = eth_hdr(skb)->h_source; 2967 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2968 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2969 nsrcs == 0) { 2970 if (!pmctx || mldv1) { 2971 br_ip6_multicast_leave_group(brmctx, pmctx, 2972 &grec->grec_mca, 2973 vid, src); 2974 continue; 2975 } 2976 } else { 2977 err = br_ip6_multicast_add_group(brmctx, pmctx, 2978 &grec->grec_mca, vid, 2979 src, mldv1); 2980 if (err) 2981 break; 2982 } 2983 2984 if (!pmctx || mldv1) 2985 continue; 2986 2987 spin_lock(&brmctx->br->multicast_lock); 2988 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2989 goto unlock_continue; 2990 2991 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid); 2992 if (!mdst) 2993 goto unlock_continue; 2994 pg = br_multicast_find_port(mdst, pmctx->port, src); 2995 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2996 goto unlock_continue; 2997 h_addr = &ipv6_hdr(skb)->saddr; 2998 switch (grec->grec_type) { 2999 case MLD2_ALLOW_NEW_SOURCES: 3000 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 3001 grec->grec_src, nsrcs, 3002 sizeof(struct in6_addr), 3003 grec->grec_type); 3004 break; 3005 case MLD2_MODE_IS_INCLUDE: 3006 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 3007 grec->grec_src, nsrcs, 3008 sizeof(struct in6_addr), 3009 grec->grec_type); 3010 break; 3011 case MLD2_MODE_IS_EXCLUDE: 3012 changed = br_multicast_isexc(brmctx, pg, h_addr, 3013 grec->grec_src, nsrcs, 3014 sizeof(struct in6_addr), 3015 grec->grec_type); 3016 break; 3017 case MLD2_CHANGE_TO_INCLUDE: 3018 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 3019 grec->grec_src, nsrcs, 3020 sizeof(struct in6_addr), 3021 grec->grec_type); 3022 break; 3023 case MLD2_CHANGE_TO_EXCLUDE: 3024 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 3025 grec->grec_src, nsrcs, 3026 sizeof(struct in6_addr), 3027 grec->grec_type); 3028 break; 3029 case MLD2_BLOCK_OLD_SOURCES: 3030 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 3031 grec->grec_src, nsrcs, 3032 sizeof(struct in6_addr), 3033 grec->grec_type); 3034 break; 3035 } 3036 if (changed) 3037 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 3038 unlock_continue: 3039 spin_unlock(&brmctx->br->multicast_lock); 3040 } 3041 3042 return err; 3043 } 3044 #endif 3045 3046 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx, 3047 struct net_bridge_mcast_port *pmctx, 3048 struct br_ip *saddr) 3049 { 3050 int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0; 3051 struct timer_list *own_timer, *other_timer; 3052 struct bridge_mcast_querier *querier; 3053 3054 switch (saddr->proto) { 3055 case htons(ETH_P_IP): 3056 querier = &brmctx->ip4_querier; 3057 own_timer = &brmctx->ip4_own_query.timer; 3058 other_timer = &brmctx->ip4_other_query.timer; 3059 if (!querier->addr.src.ip4 || 3060 ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4)) 3061 goto update; 3062 break; 3063 #if IS_ENABLED(CONFIG_IPV6) 3064 case htons(ETH_P_IPV6): 3065 querier = &brmctx->ip6_querier; 3066 own_timer = &brmctx->ip6_own_query.timer; 3067 other_timer = &brmctx->ip6_other_query.timer; 3068 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0) 3069 goto update; 3070 break; 3071 #endif 3072 default: 3073 return false; 3074 } 3075 3076 if (!timer_pending(own_timer) && !timer_pending(other_timer)) 3077 goto update; 3078 3079 return false; 3080 3081 update: 3082 br_multicast_update_querier(brmctx, querier, port_ifidx, saddr); 3083 3084 return true; 3085 } 3086 3087 static struct net_bridge_port * 3088 __br_multicast_get_querier_port(struct net_bridge *br, 3089 const struct bridge_mcast_querier *querier) 3090 { 3091 int port_ifidx = READ_ONCE(querier->port_ifidx); 3092 struct net_bridge_port *p; 3093 struct net_device *dev; 3094 3095 if (port_ifidx == 0) 3096 return NULL; 3097 3098 dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx); 3099 if (!dev) 3100 return NULL; 3101 p = br_port_get_rtnl_rcu(dev); 3102 if (!p || p->br != br) 3103 return NULL; 3104 3105 return p; 3106 } 3107 3108 size_t br_multicast_querier_state_size(void) 3109 { 3110 return nla_total_size(0) + /* nest attribute */ 3111 nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */ 3112 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */ 3113 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */ 3114 #if IS_ENABLED(CONFIG_IPV6) 3115 nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */ 3116 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */ 3117 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */ 3118 #endif 3119 0; 3120 } 3121 3122 /* protected by rtnl or rcu */ 3123 int br_multicast_dump_querier_state(struct sk_buff *skb, 3124 const struct net_bridge_mcast *brmctx, 3125 int nest_attr) 3126 { 3127 struct bridge_mcast_querier querier = {}; 3128 struct net_bridge_port *p; 3129 struct nlattr *nest; 3130 3131 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 3132 br_multicast_ctx_vlan_global_disabled(brmctx)) 3133 return 0; 3134 3135 nest = nla_nest_start(skb, nest_attr); 3136 if (!nest) 3137 return -EMSGSIZE; 3138 3139 rcu_read_lock(); 3140 if (!brmctx->multicast_querier && 3141 !timer_pending(&brmctx->ip4_other_query.timer)) 3142 goto out_v6; 3143 3144 br_multicast_read_querier(&brmctx->ip4_querier, &querier); 3145 if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS, 3146 querier.addr.src.ip4)) { 3147 rcu_read_unlock(); 3148 goto out_err; 3149 } 3150 3151 p = __br_multicast_get_querier_port(brmctx->br, &querier); 3152 if (timer_pending(&brmctx->ip4_other_query.timer) && 3153 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER, 3154 br_timer_value(&brmctx->ip4_other_query.timer), 3155 BRIDGE_QUERIER_PAD) || 3156 (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) { 3157 rcu_read_unlock(); 3158 goto out_err; 3159 } 3160 3161 out_v6: 3162 #if IS_ENABLED(CONFIG_IPV6) 3163 if (!brmctx->multicast_querier && 3164 !timer_pending(&brmctx->ip6_other_query.timer)) 3165 goto out; 3166 3167 br_multicast_read_querier(&brmctx->ip6_querier, &querier); 3168 if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS, 3169 &querier.addr.src.ip6)) { 3170 rcu_read_unlock(); 3171 goto out_err; 3172 } 3173 3174 p = __br_multicast_get_querier_port(brmctx->br, &querier); 3175 if (timer_pending(&brmctx->ip6_other_query.timer) && 3176 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER, 3177 br_timer_value(&brmctx->ip6_other_query.timer), 3178 BRIDGE_QUERIER_PAD) || 3179 (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT, 3180 p->dev->ifindex)))) { 3181 rcu_read_unlock(); 3182 goto out_err; 3183 } 3184 out: 3185 #endif 3186 rcu_read_unlock(); 3187 nla_nest_end(skb, nest); 3188 if (!nla_len(nest)) 3189 nla_nest_cancel(skb, nest); 3190 3191 return 0; 3192 3193 out_err: 3194 nla_nest_cancel(skb, nest); 3195 return -EMSGSIZE; 3196 } 3197 3198 static void 3199 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, 3200 struct bridge_mcast_other_query *query, 3201 unsigned long max_delay) 3202 { 3203 if (!timer_pending(&query->timer)) 3204 mod_timer(&query->delay_timer, jiffies + max_delay); 3205 3206 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); 3207 } 3208 3209 static void br_port_mc_router_state_change(struct net_bridge_port *p, 3210 bool is_mc_router) 3211 { 3212 struct switchdev_attr attr = { 3213 .orig_dev = p->dev, 3214 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 3215 .flags = SWITCHDEV_F_DEFER, 3216 .u.mrouter = is_mc_router, 3217 }; 3218 3219 switchdev_port_attr_set(p->dev, &attr, NULL); 3220 } 3221 3222 static struct net_bridge_port * 3223 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, 3224 struct hlist_head *mc_router_list, 3225 struct hlist_node *rlist) 3226 { 3227 struct net_bridge_mcast_port *pmctx; 3228 3229 #if IS_ENABLED(CONFIG_IPV6) 3230 if (mc_router_list == &brmctx->ip6_mc_router_list) 3231 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3232 ip6_rlist); 3233 else 3234 #endif 3235 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3236 ip4_rlist); 3237 3238 return pmctx->port; 3239 } 3240 3241 static struct hlist_node * 3242 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, 3243 struct net_bridge_port *port, 3244 struct hlist_head *mc_router_list) 3245 3246 { 3247 struct hlist_node *slot = NULL; 3248 struct net_bridge_port *p; 3249 struct hlist_node *rlist; 3250 3251 hlist_for_each(rlist, mc_router_list) { 3252 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); 3253 3254 if ((unsigned long)port >= (unsigned long)p) 3255 break; 3256 3257 slot = rlist; 3258 } 3259 3260 return slot; 3261 } 3262 3263 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, 3264 struct hlist_node *rnode) 3265 { 3266 #if IS_ENABLED(CONFIG_IPV6) 3267 if (rnode != &pmctx->ip6_rlist) 3268 return hlist_unhashed(&pmctx->ip6_rlist); 3269 else 3270 return hlist_unhashed(&pmctx->ip4_rlist); 3271 #else 3272 return true; 3273 #endif 3274 } 3275 3276 /* Add port to router_list 3277 * list is maintained ordered by pointer value 3278 * and locked by br->multicast_lock and RCU 3279 */ 3280 static void br_multicast_add_router(struct net_bridge_mcast *brmctx, 3281 struct net_bridge_mcast_port *pmctx, 3282 struct hlist_node *rlist, 3283 struct hlist_head *mc_router_list) 3284 { 3285 struct hlist_node *slot; 3286 3287 if (!hlist_unhashed(rlist)) 3288 return; 3289 3290 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list); 3291 3292 if (slot) 3293 hlist_add_behind_rcu(rlist, slot); 3294 else 3295 hlist_add_head_rcu(rlist, mc_router_list); 3296 3297 /* For backwards compatibility for now, only notify if we 3298 * switched from no IPv4/IPv6 multicast router to a new 3299 * IPv4 or IPv6 multicast router. 3300 */ 3301 if (br_multicast_no_router_otherpf(pmctx, rlist)) { 3302 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB); 3303 br_port_mc_router_state_change(pmctx->port, true); 3304 } 3305 } 3306 3307 /* Add port to router_list 3308 * list is maintained ordered by pointer value 3309 * and locked by br->multicast_lock and RCU 3310 */ 3311 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 3312 struct net_bridge_mcast_port *pmctx) 3313 { 3314 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist, 3315 &brmctx->ip4_mc_router_list); 3316 } 3317 3318 /* Add port to router_list 3319 * list is maintained ordered by pointer value 3320 * and locked by br->multicast_lock and RCU 3321 */ 3322 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 3323 struct net_bridge_mcast_port *pmctx) 3324 { 3325 #if IS_ENABLED(CONFIG_IPV6) 3326 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist, 3327 &brmctx->ip6_mc_router_list); 3328 #endif 3329 } 3330 3331 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, 3332 struct net_bridge_mcast_port *pmctx, 3333 struct timer_list *timer, 3334 struct hlist_node *rlist, 3335 struct hlist_head *mc_router_list) 3336 { 3337 unsigned long now = jiffies; 3338 3339 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3340 return; 3341 3342 if (!pmctx) { 3343 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 3344 if (!br_ip4_multicast_is_router(brmctx) && 3345 !br_ip6_multicast_is_router(brmctx)) 3346 br_mc_router_state_change(brmctx->br, true); 3347 mod_timer(timer, now + brmctx->multicast_querier_interval); 3348 } 3349 return; 3350 } 3351 3352 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 3353 pmctx->multicast_router == MDB_RTR_TYPE_PERM) 3354 return; 3355 3356 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); 3357 mod_timer(timer, now + brmctx->multicast_querier_interval); 3358 } 3359 3360 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, 3361 struct net_bridge_mcast_port *pmctx) 3362 { 3363 struct timer_list *timer = &brmctx->ip4_mc_router_timer; 3364 struct hlist_node *rlist = NULL; 3365 3366 if (pmctx) { 3367 timer = &pmctx->ip4_mc_router_timer; 3368 rlist = &pmctx->ip4_rlist; 3369 } 3370 3371 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3372 &brmctx->ip4_mc_router_list); 3373 } 3374 3375 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, 3376 struct net_bridge_mcast_port *pmctx) 3377 { 3378 #if IS_ENABLED(CONFIG_IPV6) 3379 struct timer_list *timer = &brmctx->ip6_mc_router_timer; 3380 struct hlist_node *rlist = NULL; 3381 3382 if (pmctx) { 3383 timer = &pmctx->ip6_mc_router_timer; 3384 rlist = &pmctx->ip6_rlist; 3385 } 3386 3387 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3388 &brmctx->ip6_mc_router_list); 3389 #endif 3390 } 3391 3392 static void 3393 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, 3394 struct net_bridge_mcast_port *pmctx, 3395 struct bridge_mcast_other_query *query, 3396 struct br_ip *saddr, 3397 unsigned long max_delay) 3398 { 3399 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3400 return; 3401 3402 br_multicast_update_query_timer(brmctx, query, max_delay); 3403 br_ip4_multicast_mark_router(brmctx, pmctx); 3404 } 3405 3406 #if IS_ENABLED(CONFIG_IPV6) 3407 static void 3408 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, 3409 struct net_bridge_mcast_port *pmctx, 3410 struct bridge_mcast_other_query *query, 3411 struct br_ip *saddr, 3412 unsigned long max_delay) 3413 { 3414 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3415 return; 3416 3417 br_multicast_update_query_timer(brmctx, query, max_delay); 3418 br_ip6_multicast_mark_router(brmctx, pmctx); 3419 } 3420 #endif 3421 3422 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, 3423 struct net_bridge_mcast_port *pmctx, 3424 struct sk_buff *skb, 3425 u16 vid) 3426 { 3427 unsigned int transport_len = ip_transport_len(skb); 3428 const struct iphdr *iph = ip_hdr(skb); 3429 struct igmphdr *ih = igmp_hdr(skb); 3430 struct net_bridge_mdb_entry *mp; 3431 struct igmpv3_query *ih3; 3432 struct net_bridge_port_group *p; 3433 struct net_bridge_port_group __rcu **pp; 3434 struct br_ip saddr = {}; 3435 unsigned long max_delay; 3436 unsigned long now = jiffies; 3437 __be32 group; 3438 3439 spin_lock(&brmctx->br->multicast_lock); 3440 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3441 goto out; 3442 3443 group = ih->group; 3444 3445 if (transport_len == sizeof(*ih)) { 3446 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 3447 3448 if (!max_delay) { 3449 max_delay = 10 * HZ; 3450 group = 0; 3451 } 3452 } else if (transport_len >= sizeof(*ih3)) { 3453 ih3 = igmpv3_query_hdr(skb); 3454 if (ih3->nsrcs || 3455 (brmctx->multicast_igmp_version == 3 && group && 3456 ih3->suppress)) 3457 goto out; 3458 3459 max_delay = ih3->code ? 3460 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 3461 } else { 3462 goto out; 3463 } 3464 3465 if (!group) { 3466 saddr.proto = htons(ETH_P_IP); 3467 saddr.src.ip4 = iph->saddr; 3468 3469 br_ip4_multicast_query_received(brmctx, pmctx, 3470 &brmctx->ip4_other_query, 3471 &saddr, max_delay); 3472 goto out; 3473 } 3474 3475 mp = br_mdb_ip4_get(brmctx->br, group, vid); 3476 if (!mp) 3477 goto out; 3478 3479 max_delay *= brmctx->multicast_last_member_count; 3480 3481 if (mp->host_joined && 3482 (timer_pending(&mp->timer) ? 3483 time_after(mp->timer.expires, now + max_delay) : 3484 try_to_del_timer_sync(&mp->timer) >= 0)) 3485 mod_timer(&mp->timer, now + max_delay); 3486 3487 for (pp = &mp->ports; 3488 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3489 pp = &p->next) { 3490 if (timer_pending(&p->timer) ? 3491 time_after(p->timer.expires, now + max_delay) : 3492 try_to_del_timer_sync(&p->timer) >= 0 && 3493 (brmctx->multicast_igmp_version == 2 || 3494 p->filter_mode == MCAST_EXCLUDE)) 3495 mod_timer(&p->timer, now + max_delay); 3496 } 3497 3498 out: 3499 spin_unlock(&brmctx->br->multicast_lock); 3500 } 3501 3502 #if IS_ENABLED(CONFIG_IPV6) 3503 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, 3504 struct net_bridge_mcast_port *pmctx, 3505 struct sk_buff *skb, 3506 u16 vid) 3507 { 3508 unsigned int transport_len = ipv6_transport_len(skb); 3509 struct mld_msg *mld; 3510 struct net_bridge_mdb_entry *mp; 3511 struct mld2_query *mld2q; 3512 struct net_bridge_port_group *p; 3513 struct net_bridge_port_group __rcu **pp; 3514 struct br_ip saddr = {}; 3515 unsigned long max_delay; 3516 unsigned long now = jiffies; 3517 unsigned int offset = skb_transport_offset(skb); 3518 const struct in6_addr *group = NULL; 3519 bool is_general_query; 3520 int err = 0; 3521 3522 spin_lock(&brmctx->br->multicast_lock); 3523 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3524 goto out; 3525 3526 if (transport_len == sizeof(*mld)) { 3527 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 3528 err = -EINVAL; 3529 goto out; 3530 } 3531 mld = (struct mld_msg *) icmp6_hdr(skb); 3532 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 3533 if (max_delay) 3534 group = &mld->mld_mca; 3535 } else { 3536 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 3537 err = -EINVAL; 3538 goto out; 3539 } 3540 mld2q = (struct mld2_query *)icmp6_hdr(skb); 3541 if (!mld2q->mld2q_nsrcs) 3542 group = &mld2q->mld2q_mca; 3543 if (brmctx->multicast_mld_version == 2 && 3544 !ipv6_addr_any(&mld2q->mld2q_mca) && 3545 mld2q->mld2q_suppress) 3546 goto out; 3547 3548 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 3549 } 3550 3551 is_general_query = group && ipv6_addr_any(group); 3552 3553 if (is_general_query) { 3554 saddr.proto = htons(ETH_P_IPV6); 3555 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 3556 3557 br_ip6_multicast_query_received(brmctx, pmctx, 3558 &brmctx->ip6_other_query, 3559 &saddr, max_delay); 3560 goto out; 3561 } else if (!group) { 3562 goto out; 3563 } 3564 3565 mp = br_mdb_ip6_get(brmctx->br, group, vid); 3566 if (!mp) 3567 goto out; 3568 3569 max_delay *= brmctx->multicast_last_member_count; 3570 if (mp->host_joined && 3571 (timer_pending(&mp->timer) ? 3572 time_after(mp->timer.expires, now + max_delay) : 3573 try_to_del_timer_sync(&mp->timer) >= 0)) 3574 mod_timer(&mp->timer, now + max_delay); 3575 3576 for (pp = &mp->ports; 3577 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3578 pp = &p->next) { 3579 if (timer_pending(&p->timer) ? 3580 time_after(p->timer.expires, now + max_delay) : 3581 try_to_del_timer_sync(&p->timer) >= 0 && 3582 (brmctx->multicast_mld_version == 1 || 3583 p->filter_mode == MCAST_EXCLUDE)) 3584 mod_timer(&p->timer, now + max_delay); 3585 } 3586 3587 out: 3588 spin_unlock(&brmctx->br->multicast_lock); 3589 return err; 3590 } 3591 #endif 3592 3593 static void 3594 br_multicast_leave_group(struct net_bridge_mcast *brmctx, 3595 struct net_bridge_mcast_port *pmctx, 3596 struct br_ip *group, 3597 struct bridge_mcast_other_query *other_query, 3598 struct bridge_mcast_own_query *own_query, 3599 const unsigned char *src) 3600 { 3601 struct net_bridge_mdb_entry *mp; 3602 struct net_bridge_port_group *p; 3603 unsigned long now; 3604 unsigned long time; 3605 3606 spin_lock(&brmctx->br->multicast_lock); 3607 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3608 goto out; 3609 3610 mp = br_mdb_ip_get(brmctx->br, group); 3611 if (!mp) 3612 goto out; 3613 3614 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { 3615 struct net_bridge_port_group __rcu **pp; 3616 3617 for (pp = &mp->ports; 3618 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3619 pp = &p->next) { 3620 if (!br_port_group_equal(p, pmctx->port, src)) 3621 continue; 3622 3623 if (p->flags & MDB_PG_FLAGS_PERMANENT) 3624 break; 3625 3626 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 3627 br_multicast_del_pg(mp, p, pp); 3628 } 3629 goto out; 3630 } 3631 3632 if (timer_pending(&other_query->timer)) 3633 goto out; 3634 3635 if (brmctx->multicast_querier) { 3636 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr, 3637 false, 0, NULL); 3638 3639 time = jiffies + brmctx->multicast_last_member_count * 3640 brmctx->multicast_last_member_interval; 3641 3642 mod_timer(&own_query->timer, time); 3643 3644 for (p = mlock_dereference(mp->ports, brmctx->br); 3645 p != NULL && pmctx != NULL; 3646 p = mlock_dereference(p->next, brmctx->br)) { 3647 if (!br_port_group_equal(p, pmctx->port, src)) 3648 continue; 3649 3650 if (!hlist_unhashed(&p->mglist) && 3651 (timer_pending(&p->timer) ? 3652 time_after(p->timer.expires, time) : 3653 try_to_del_timer_sync(&p->timer) >= 0)) { 3654 mod_timer(&p->timer, time); 3655 } 3656 3657 break; 3658 } 3659 } 3660 3661 now = jiffies; 3662 time = now + brmctx->multicast_last_member_count * 3663 brmctx->multicast_last_member_interval; 3664 3665 if (!pmctx) { 3666 if (mp->host_joined && 3667 (timer_pending(&mp->timer) ? 3668 time_after(mp->timer.expires, time) : 3669 try_to_del_timer_sync(&mp->timer) >= 0)) { 3670 mod_timer(&mp->timer, time); 3671 } 3672 3673 goto out; 3674 } 3675 3676 for (p = mlock_dereference(mp->ports, brmctx->br); 3677 p != NULL; 3678 p = mlock_dereference(p->next, brmctx->br)) { 3679 if (p->key.port != pmctx->port) 3680 continue; 3681 3682 if (!hlist_unhashed(&p->mglist) && 3683 (timer_pending(&p->timer) ? 3684 time_after(p->timer.expires, time) : 3685 try_to_del_timer_sync(&p->timer) >= 0)) { 3686 mod_timer(&p->timer, time); 3687 } 3688 3689 break; 3690 } 3691 out: 3692 spin_unlock(&brmctx->br->multicast_lock); 3693 } 3694 3695 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 3696 struct net_bridge_mcast_port *pmctx, 3697 __be32 group, 3698 __u16 vid, 3699 const unsigned char *src) 3700 { 3701 struct br_ip br_group; 3702 struct bridge_mcast_own_query *own_query; 3703 3704 if (ipv4_is_local_multicast(group)) 3705 return; 3706 3707 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; 3708 3709 memset(&br_group, 0, sizeof(br_group)); 3710 br_group.dst.ip4 = group; 3711 br_group.proto = htons(ETH_P_IP); 3712 br_group.vid = vid; 3713 3714 br_multicast_leave_group(brmctx, pmctx, &br_group, 3715 &brmctx->ip4_other_query, 3716 own_query, src); 3717 } 3718 3719 #if IS_ENABLED(CONFIG_IPV6) 3720 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 3721 struct net_bridge_mcast_port *pmctx, 3722 const struct in6_addr *group, 3723 __u16 vid, 3724 const unsigned char *src) 3725 { 3726 struct br_ip br_group; 3727 struct bridge_mcast_own_query *own_query; 3728 3729 if (ipv6_addr_is_ll_all_nodes(group)) 3730 return; 3731 3732 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; 3733 3734 memset(&br_group, 0, sizeof(br_group)); 3735 br_group.dst.ip6 = *group; 3736 br_group.proto = htons(ETH_P_IPV6); 3737 br_group.vid = vid; 3738 3739 br_multicast_leave_group(brmctx, pmctx, &br_group, 3740 &brmctx->ip6_other_query, 3741 own_query, src); 3742 } 3743 #endif 3744 3745 static void br_multicast_err_count(const struct net_bridge *br, 3746 const struct net_bridge_port *p, 3747 __be16 proto) 3748 { 3749 struct bridge_mcast_stats __percpu *stats; 3750 struct bridge_mcast_stats *pstats; 3751 3752 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3753 return; 3754 3755 if (p) 3756 stats = p->mcast_stats; 3757 else 3758 stats = br->mcast_stats; 3759 if (WARN_ON(!stats)) 3760 return; 3761 3762 pstats = this_cpu_ptr(stats); 3763 3764 u64_stats_update_begin(&pstats->syncp); 3765 switch (proto) { 3766 case htons(ETH_P_IP): 3767 pstats->mstats.igmp_parse_errors++; 3768 break; 3769 #if IS_ENABLED(CONFIG_IPV6) 3770 case htons(ETH_P_IPV6): 3771 pstats->mstats.mld_parse_errors++; 3772 break; 3773 #endif 3774 } 3775 u64_stats_update_end(&pstats->syncp); 3776 } 3777 3778 static void br_multicast_pim(struct net_bridge_mcast *brmctx, 3779 struct net_bridge_mcast_port *pmctx, 3780 const struct sk_buff *skb) 3781 { 3782 unsigned int offset = skb_transport_offset(skb); 3783 struct pimhdr *pimhdr, _pimhdr; 3784 3785 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3786 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3787 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3788 return; 3789 3790 spin_lock(&brmctx->br->multicast_lock); 3791 br_ip4_multicast_mark_router(brmctx, pmctx); 3792 spin_unlock(&brmctx->br->multicast_lock); 3793 } 3794 3795 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3796 struct net_bridge_mcast_port *pmctx, 3797 struct sk_buff *skb) 3798 { 3799 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3800 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3801 return -ENOMSG; 3802 3803 spin_lock(&brmctx->br->multicast_lock); 3804 br_ip4_multicast_mark_router(brmctx, pmctx); 3805 spin_unlock(&brmctx->br->multicast_lock); 3806 3807 return 0; 3808 } 3809 3810 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, 3811 struct net_bridge_mcast_port *pmctx, 3812 struct sk_buff *skb, 3813 u16 vid) 3814 { 3815 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3816 const unsigned char *src; 3817 struct igmphdr *ih; 3818 int err; 3819 3820 err = ip_mc_check_igmp(skb); 3821 3822 if (err == -ENOMSG) { 3823 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3824 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3825 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3826 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3827 br_multicast_pim(brmctx, pmctx, skb); 3828 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3829 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); 3830 } 3831 3832 return 0; 3833 } else if (err < 0) { 3834 br_multicast_err_count(brmctx->br, p, skb->protocol); 3835 return err; 3836 } 3837 3838 ih = igmp_hdr(skb); 3839 src = eth_hdr(skb)->h_source; 3840 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3841 3842 switch (ih->type) { 3843 case IGMP_HOST_MEMBERSHIP_REPORT: 3844 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3845 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3846 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid, 3847 src, true); 3848 break; 3849 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3850 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); 3851 break; 3852 case IGMP_HOST_MEMBERSHIP_QUERY: 3853 br_ip4_multicast_query(brmctx, pmctx, skb, vid); 3854 break; 3855 case IGMP_HOST_LEAVE_MESSAGE: 3856 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src); 3857 break; 3858 } 3859 3860 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3861 BR_MCAST_DIR_RX); 3862 3863 return err; 3864 } 3865 3866 #if IS_ENABLED(CONFIG_IPV6) 3867 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3868 struct net_bridge_mcast_port *pmctx, 3869 struct sk_buff *skb) 3870 { 3871 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3872 return; 3873 3874 spin_lock(&brmctx->br->multicast_lock); 3875 br_ip6_multicast_mark_router(brmctx, pmctx); 3876 spin_unlock(&brmctx->br->multicast_lock); 3877 } 3878 3879 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, 3880 struct net_bridge_mcast_port *pmctx, 3881 struct sk_buff *skb, 3882 u16 vid) 3883 { 3884 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3885 const unsigned char *src; 3886 struct mld_msg *mld; 3887 int err; 3888 3889 err = ipv6_mc_check_mld(skb); 3890 3891 if (err == -ENOMSG || err == -ENODATA) { 3892 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3893 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3894 if (err == -ENODATA && 3895 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) 3896 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); 3897 3898 return 0; 3899 } else if (err < 0) { 3900 br_multicast_err_count(brmctx->br, p, skb->protocol); 3901 return err; 3902 } 3903 3904 mld = (struct mld_msg *)skb_transport_header(skb); 3905 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3906 3907 switch (mld->mld_type) { 3908 case ICMPV6_MGM_REPORT: 3909 src = eth_hdr(skb)->h_source; 3910 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3911 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca, 3912 vid, src, true); 3913 break; 3914 case ICMPV6_MLD2_REPORT: 3915 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); 3916 break; 3917 case ICMPV6_MGM_QUERY: 3918 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); 3919 break; 3920 case ICMPV6_MGM_REDUCTION: 3921 src = eth_hdr(skb)->h_source; 3922 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid, 3923 src); 3924 break; 3925 } 3926 3927 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3928 BR_MCAST_DIR_RX); 3929 3930 return err; 3931 } 3932 #endif 3933 3934 int br_multicast_rcv(struct net_bridge_mcast **brmctx, 3935 struct net_bridge_mcast_port **pmctx, 3936 struct net_bridge_vlan *vlan, 3937 struct sk_buff *skb, u16 vid) 3938 { 3939 int ret = 0; 3940 3941 BR_INPUT_SKB_CB(skb)->igmp = 0; 3942 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3943 3944 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED)) 3945 return 0; 3946 3947 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { 3948 const struct net_bridge_vlan *masterv; 3949 3950 /* the vlan has the master flag set only when transmitting 3951 * through the bridge device 3952 */ 3953 if (br_vlan_is_master(vlan)) { 3954 masterv = vlan; 3955 *brmctx = &vlan->br_mcast_ctx; 3956 *pmctx = NULL; 3957 } else { 3958 masterv = vlan->brvlan; 3959 *brmctx = &vlan->brvlan->br_mcast_ctx; 3960 *pmctx = &vlan->port_mcast_ctx; 3961 } 3962 3963 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 3964 return 0; 3965 } 3966 3967 switch (skb->protocol) { 3968 case htons(ETH_P_IP): 3969 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid); 3970 break; 3971 #if IS_ENABLED(CONFIG_IPV6) 3972 case htons(ETH_P_IPV6): 3973 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid); 3974 break; 3975 #endif 3976 } 3977 3978 return ret; 3979 } 3980 3981 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, 3982 struct bridge_mcast_own_query *query, 3983 struct bridge_mcast_querier *querier) 3984 { 3985 spin_lock(&brmctx->br->multicast_lock); 3986 if (br_multicast_ctx_vlan_disabled(brmctx)) 3987 goto out; 3988 3989 if (query->startup_sent < brmctx->multicast_startup_query_count) 3990 query->startup_sent++; 3991 3992 br_multicast_send_query(brmctx, NULL, query); 3993 out: 3994 spin_unlock(&brmctx->br->multicast_lock); 3995 } 3996 3997 static void br_ip4_multicast_query_expired(struct timer_list *t) 3998 { 3999 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 4000 ip4_own_query.timer); 4001 4002 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, 4003 &brmctx->ip4_querier); 4004 } 4005 4006 #if IS_ENABLED(CONFIG_IPV6) 4007 static void br_ip6_multicast_query_expired(struct timer_list *t) 4008 { 4009 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 4010 ip6_own_query.timer); 4011 4012 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, 4013 &brmctx->ip6_querier); 4014 } 4015 #endif 4016 4017 static void br_multicast_gc_work(struct work_struct *work) 4018 { 4019 struct net_bridge *br = container_of(work, struct net_bridge, 4020 mcast_gc_work); 4021 HLIST_HEAD(deleted_head); 4022 4023 spin_lock_bh(&br->multicast_lock); 4024 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4025 spin_unlock_bh(&br->multicast_lock); 4026 4027 br_multicast_gc(&deleted_head); 4028 } 4029 4030 void br_multicast_ctx_init(struct net_bridge *br, 4031 struct net_bridge_vlan *vlan, 4032 struct net_bridge_mcast *brmctx) 4033 { 4034 brmctx->br = br; 4035 brmctx->vlan = vlan; 4036 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4037 brmctx->multicast_last_member_count = 2; 4038 brmctx->multicast_startup_query_count = 2; 4039 4040 brmctx->multicast_last_member_interval = HZ; 4041 brmctx->multicast_query_response_interval = 10 * HZ; 4042 brmctx->multicast_startup_query_interval = 125 * HZ / 4; 4043 brmctx->multicast_query_interval = 125 * HZ; 4044 brmctx->multicast_querier_interval = 255 * HZ; 4045 brmctx->multicast_membership_interval = 260 * HZ; 4046 4047 brmctx->ip4_querier.port_ifidx = 0; 4048 seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); 4049 brmctx->multicast_igmp_version = 2; 4050 #if IS_ENABLED(CONFIG_IPV6) 4051 brmctx->multicast_mld_version = 1; 4052 brmctx->ip6_querier.port_ifidx = 0; 4053 seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); 4054 #endif 4055 4056 timer_setup(&brmctx->ip4_mc_router_timer, 4057 br_ip4_multicast_local_router_expired, 0); 4058 timer_setup(&brmctx->ip4_other_query.timer, 4059 br_ip4_multicast_querier_expired, 0); 4060 timer_setup(&brmctx->ip4_other_query.delay_timer, 4061 br_multicast_query_delay_expired, 0); 4062 timer_setup(&brmctx->ip4_own_query.timer, 4063 br_ip4_multicast_query_expired, 0); 4064 #if IS_ENABLED(CONFIG_IPV6) 4065 timer_setup(&brmctx->ip6_mc_router_timer, 4066 br_ip6_multicast_local_router_expired, 0); 4067 timer_setup(&brmctx->ip6_other_query.timer, 4068 br_ip6_multicast_querier_expired, 0); 4069 timer_setup(&brmctx->ip6_other_query.delay_timer, 4070 br_multicast_query_delay_expired, 0); 4071 timer_setup(&brmctx->ip6_own_query.timer, 4072 br_ip6_multicast_query_expired, 0); 4073 #endif 4074 } 4075 4076 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) 4077 { 4078 __br_multicast_stop(brmctx); 4079 } 4080 4081 void br_multicast_init(struct net_bridge *br) 4082 { 4083 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 4084 4085 br_multicast_ctx_init(br, NULL, &br->multicast_ctx); 4086 4087 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 4088 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 4089 4090 spin_lock_init(&br->multicast_lock); 4091 INIT_HLIST_HEAD(&br->mdb_list); 4092 INIT_HLIST_HEAD(&br->mcast_gc_list); 4093 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 4094 } 4095 4096 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 4097 { 4098 struct in_device *in_dev = in_dev_get(br->dev); 4099 4100 if (!in_dev) 4101 return; 4102 4103 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 4104 in_dev_put(in_dev); 4105 } 4106 4107 #if IS_ENABLED(CONFIG_IPV6) 4108 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 4109 { 4110 struct in6_addr addr; 4111 4112 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 4113 ipv6_dev_mc_inc(br->dev, &addr); 4114 } 4115 #else 4116 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 4117 { 4118 } 4119 #endif 4120 4121 void br_multicast_join_snoopers(struct net_bridge *br) 4122 { 4123 br_ip4_multicast_join_snoopers(br); 4124 br_ip6_multicast_join_snoopers(br); 4125 } 4126 4127 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 4128 { 4129 struct in_device *in_dev = in_dev_get(br->dev); 4130 4131 if (WARN_ON(!in_dev)) 4132 return; 4133 4134 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 4135 in_dev_put(in_dev); 4136 } 4137 4138 #if IS_ENABLED(CONFIG_IPV6) 4139 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 4140 { 4141 struct in6_addr addr; 4142 4143 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 4144 ipv6_dev_mc_dec(br->dev, &addr); 4145 } 4146 #else 4147 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 4148 { 4149 } 4150 #endif 4151 4152 void br_multicast_leave_snoopers(struct net_bridge *br) 4153 { 4154 br_ip4_multicast_leave_snoopers(br); 4155 br_ip6_multicast_leave_snoopers(br); 4156 } 4157 4158 static void __br_multicast_open_query(struct net_bridge *br, 4159 struct bridge_mcast_own_query *query) 4160 { 4161 query->startup_sent = 0; 4162 4163 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4164 return; 4165 4166 mod_timer(&query->timer, jiffies); 4167 } 4168 4169 static void __br_multicast_open(struct net_bridge_mcast *brmctx) 4170 { 4171 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query); 4172 #if IS_ENABLED(CONFIG_IPV6) 4173 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query); 4174 #endif 4175 } 4176 4177 void br_multicast_open(struct net_bridge *br) 4178 { 4179 ASSERT_RTNL(); 4180 4181 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4182 struct net_bridge_vlan_group *vg; 4183 struct net_bridge_vlan *vlan; 4184 4185 vg = br_vlan_group(br); 4186 if (vg) { 4187 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4188 struct net_bridge_mcast *brmctx; 4189 4190 brmctx = &vlan->br_mcast_ctx; 4191 if (br_vlan_is_brentry(vlan) && 4192 !br_multicast_ctx_vlan_disabled(brmctx)) 4193 __br_multicast_open(&vlan->br_mcast_ctx); 4194 } 4195 } 4196 } else { 4197 __br_multicast_open(&br->multicast_ctx); 4198 } 4199 } 4200 4201 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 4202 { 4203 del_timer_sync(&brmctx->ip4_mc_router_timer); 4204 del_timer_sync(&brmctx->ip4_other_query.timer); 4205 del_timer_sync(&brmctx->ip4_other_query.delay_timer); 4206 del_timer_sync(&brmctx->ip4_own_query.timer); 4207 #if IS_ENABLED(CONFIG_IPV6) 4208 del_timer_sync(&brmctx->ip6_mc_router_timer); 4209 del_timer_sync(&brmctx->ip6_other_query.timer); 4210 del_timer_sync(&brmctx->ip6_other_query.delay_timer); 4211 del_timer_sync(&brmctx->ip6_own_query.timer); 4212 #endif 4213 } 4214 4215 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) 4216 { 4217 struct net_bridge *br; 4218 4219 /* it's okay to check for the flag without the multicast lock because it 4220 * can only change under RTNL -> multicast_lock, we need the latter to 4221 * sync with timers and packets 4222 */ 4223 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) 4224 return; 4225 4226 if (br_vlan_is_master(vlan)) { 4227 br = vlan->br; 4228 4229 if (!br_vlan_is_brentry(vlan) || 4230 (on && 4231 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx))) 4232 return; 4233 4234 spin_lock_bh(&br->multicast_lock); 4235 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4236 spin_unlock_bh(&br->multicast_lock); 4237 4238 if (on) 4239 __br_multicast_open(&vlan->br_mcast_ctx); 4240 else 4241 __br_multicast_stop(&vlan->br_mcast_ctx); 4242 } else { 4243 struct net_bridge_mcast *brmctx; 4244 4245 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx); 4246 if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) 4247 return; 4248 4249 br = vlan->port->br; 4250 spin_lock_bh(&br->multicast_lock); 4251 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4252 if (on) 4253 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx); 4254 else 4255 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx); 4256 spin_unlock_bh(&br->multicast_lock); 4257 } 4258 } 4259 4260 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) 4261 { 4262 struct net_bridge_port *p; 4263 4264 if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) 4265 return; 4266 4267 list_for_each_entry(p, &vlan->br->port_list, list) { 4268 struct net_bridge_vlan *vport; 4269 4270 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid); 4271 if (!vport) 4272 continue; 4273 br_multicast_toggle_one_vlan(vport, on); 4274 } 4275 4276 if (br_vlan_is_brentry(vlan)) 4277 br_multicast_toggle_one_vlan(vlan, on); 4278 } 4279 4280 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, 4281 struct netlink_ext_ack *extack) 4282 { 4283 struct net_bridge_vlan_group *vg; 4284 struct net_bridge_vlan *vlan; 4285 struct net_bridge_port *p; 4286 4287 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) 4288 return 0; 4289 4290 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) { 4291 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled"); 4292 return -EINVAL; 4293 } 4294 4295 vg = br_vlan_group(br); 4296 if (!vg) 4297 return 0; 4298 4299 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); 4300 4301 /* disable/enable non-vlan mcast contexts based on vlan snooping */ 4302 if (on) 4303 __br_multicast_stop(&br->multicast_ctx); 4304 else 4305 __br_multicast_open(&br->multicast_ctx); 4306 list_for_each_entry(p, &br->port_list, list) { 4307 if (on) 4308 br_multicast_disable_port(p); 4309 else 4310 br_multicast_enable_port(p); 4311 } 4312 4313 list_for_each_entry(vlan, &vg->vlan_list, vlist) 4314 br_multicast_toggle_vlan(vlan, on); 4315 4316 return 0; 4317 } 4318 4319 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on) 4320 { 4321 ASSERT_RTNL(); 4322 4323 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and 4324 * requires only RTNL to change 4325 */ 4326 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 4327 return false; 4328 4329 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED; 4330 br_multicast_toggle_vlan(vlan, on); 4331 4332 return true; 4333 } 4334 4335 void br_multicast_stop(struct net_bridge *br) 4336 { 4337 ASSERT_RTNL(); 4338 4339 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4340 struct net_bridge_vlan_group *vg; 4341 struct net_bridge_vlan *vlan; 4342 4343 vg = br_vlan_group(br); 4344 if (vg) { 4345 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4346 struct net_bridge_mcast *brmctx; 4347 4348 brmctx = &vlan->br_mcast_ctx; 4349 if (br_vlan_is_brentry(vlan) && 4350 !br_multicast_ctx_vlan_disabled(brmctx)) 4351 __br_multicast_stop(&vlan->br_mcast_ctx); 4352 } 4353 } 4354 } else { 4355 __br_multicast_stop(&br->multicast_ctx); 4356 } 4357 } 4358 4359 void br_multicast_dev_del(struct net_bridge *br) 4360 { 4361 struct net_bridge_mdb_entry *mp; 4362 HLIST_HEAD(deleted_head); 4363 struct hlist_node *tmp; 4364 4365 spin_lock_bh(&br->multicast_lock); 4366 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 4367 br_multicast_del_mdb_entry(mp); 4368 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4369 spin_unlock_bh(&br->multicast_lock); 4370 4371 br_multicast_ctx_deinit(&br->multicast_ctx); 4372 br_multicast_gc(&deleted_head); 4373 cancel_work_sync(&br->mcast_gc_work); 4374 4375 rcu_barrier(); 4376 } 4377 4378 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) 4379 { 4380 int err = -EINVAL; 4381 4382 spin_lock_bh(&brmctx->br->multicast_lock); 4383 4384 switch (val) { 4385 case MDB_RTR_TYPE_DISABLED: 4386 case MDB_RTR_TYPE_PERM: 4387 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); 4388 del_timer(&brmctx->ip4_mc_router_timer); 4389 #if IS_ENABLED(CONFIG_IPV6) 4390 del_timer(&brmctx->ip6_mc_router_timer); 4391 #endif 4392 brmctx->multicast_router = val; 4393 err = 0; 4394 break; 4395 case MDB_RTR_TYPE_TEMP_QUERY: 4396 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 4397 br_mc_router_state_change(brmctx->br, false); 4398 brmctx->multicast_router = val; 4399 err = 0; 4400 break; 4401 } 4402 4403 spin_unlock_bh(&brmctx->br->multicast_lock); 4404 4405 return err; 4406 } 4407 4408 static void 4409 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) 4410 { 4411 if (!deleted) 4412 return; 4413 4414 /* For backwards compatibility for now, only notify if there is 4415 * no multicast router anymore for both IPv4 and IPv6. 4416 */ 4417 if (!hlist_unhashed(&pmctx->ip4_rlist)) 4418 return; 4419 #if IS_ENABLED(CONFIG_IPV6) 4420 if (!hlist_unhashed(&pmctx->ip6_rlist)) 4421 return; 4422 #endif 4423 4424 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB); 4425 br_port_mc_router_state_change(pmctx->port, false); 4426 4427 /* don't allow timer refresh */ 4428 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) 4429 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4430 } 4431 4432 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, 4433 unsigned long val) 4434 { 4435 struct net_bridge_mcast *brmctx; 4436 unsigned long now = jiffies; 4437 int err = -EINVAL; 4438 bool del = false; 4439 4440 brmctx = br_multicast_port_ctx_get_global(pmctx); 4441 spin_lock_bh(&brmctx->br->multicast_lock); 4442 if (pmctx->multicast_router == val) { 4443 /* Refresh the temp router port timer */ 4444 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { 4445 mod_timer(&pmctx->ip4_mc_router_timer, 4446 now + brmctx->multicast_querier_interval); 4447 #if IS_ENABLED(CONFIG_IPV6) 4448 mod_timer(&pmctx->ip6_mc_router_timer, 4449 now + brmctx->multicast_querier_interval); 4450 #endif 4451 } 4452 err = 0; 4453 goto unlock; 4454 } 4455 switch (val) { 4456 case MDB_RTR_TYPE_DISABLED: 4457 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4458 del |= br_ip4_multicast_rport_del(pmctx); 4459 del_timer(&pmctx->ip4_mc_router_timer); 4460 del |= br_ip6_multicast_rport_del(pmctx); 4461 #if IS_ENABLED(CONFIG_IPV6) 4462 del_timer(&pmctx->ip6_mc_router_timer); 4463 #endif 4464 br_multicast_rport_del_notify(pmctx, del); 4465 break; 4466 case MDB_RTR_TYPE_TEMP_QUERY: 4467 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4468 del |= br_ip4_multicast_rport_del(pmctx); 4469 del |= br_ip6_multicast_rport_del(pmctx); 4470 br_multicast_rport_del_notify(pmctx, del); 4471 break; 4472 case MDB_RTR_TYPE_PERM: 4473 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4474 del_timer(&pmctx->ip4_mc_router_timer); 4475 br_ip4_multicast_add_router(brmctx, pmctx); 4476 #if IS_ENABLED(CONFIG_IPV6) 4477 del_timer(&pmctx->ip6_mc_router_timer); 4478 #endif 4479 br_ip6_multicast_add_router(brmctx, pmctx); 4480 break; 4481 case MDB_RTR_TYPE_TEMP: 4482 pmctx->multicast_router = MDB_RTR_TYPE_TEMP; 4483 br_ip4_multicast_mark_router(brmctx, pmctx); 4484 br_ip6_multicast_mark_router(brmctx, pmctx); 4485 break; 4486 default: 4487 goto unlock; 4488 } 4489 err = 0; 4490 unlock: 4491 spin_unlock_bh(&brmctx->br->multicast_lock); 4492 4493 return err; 4494 } 4495 4496 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router) 4497 { 4498 int err; 4499 4500 if (br_vlan_is_master(v)) 4501 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router); 4502 else 4503 err = br_multicast_set_port_router(&v->port_mcast_ctx, 4504 mcast_router); 4505 4506 return err; 4507 } 4508 4509 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 4510 struct bridge_mcast_own_query *query) 4511 { 4512 struct net_bridge_port *port; 4513 4514 if (!br_multicast_ctx_matches_vlan_snooping(brmctx)) 4515 return; 4516 4517 __br_multicast_open_query(brmctx->br, query); 4518 4519 rcu_read_lock(); 4520 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { 4521 struct bridge_mcast_own_query *ip4_own_query; 4522 #if IS_ENABLED(CONFIG_IPV6) 4523 struct bridge_mcast_own_query *ip6_own_query; 4524 #endif 4525 4526 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx)) 4527 continue; 4528 4529 if (br_multicast_ctx_is_vlan(brmctx)) { 4530 struct net_bridge_vlan *vlan; 4531 4532 vlan = br_vlan_find(nbp_vlan_group_rcu(port), 4533 brmctx->vlan->vid); 4534 if (!vlan || 4535 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx)) 4536 continue; 4537 4538 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query; 4539 #if IS_ENABLED(CONFIG_IPV6) 4540 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query; 4541 #endif 4542 } else { 4543 ip4_own_query = &port->multicast_ctx.ip4_own_query; 4544 #if IS_ENABLED(CONFIG_IPV6) 4545 ip6_own_query = &port->multicast_ctx.ip6_own_query; 4546 #endif 4547 } 4548 4549 if (query == &brmctx->ip4_own_query) 4550 br_multicast_enable(ip4_own_query); 4551 #if IS_ENABLED(CONFIG_IPV6) 4552 else 4553 br_multicast_enable(ip6_own_query); 4554 #endif 4555 } 4556 rcu_read_unlock(); 4557 } 4558 4559 int br_multicast_toggle(struct net_bridge *br, unsigned long val, 4560 struct netlink_ext_ack *extack) 4561 { 4562 struct net_bridge_port *port; 4563 bool change_snoopers = false; 4564 int err = 0; 4565 4566 spin_lock_bh(&br->multicast_lock); 4567 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 4568 goto unlock; 4569 4570 err = br_mc_disabled_update(br->dev, val, extack); 4571 if (err == -EOPNOTSUPP) 4572 err = 0; 4573 if (err) 4574 goto unlock; 4575 4576 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 4577 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 4578 change_snoopers = true; 4579 goto unlock; 4580 } 4581 4582 if (!netif_running(br->dev)) 4583 goto unlock; 4584 4585 br_multicast_open(br); 4586 list_for_each_entry(port, &br->port_list, list) 4587 __br_multicast_enable_port_ctx(&port->multicast_ctx); 4588 4589 change_snoopers = true; 4590 4591 unlock: 4592 spin_unlock_bh(&br->multicast_lock); 4593 4594 /* br_multicast_join_snoopers has the potential to cause 4595 * an MLD Report/Leave to be delivered to br_multicast_rcv, 4596 * which would in turn call br_multicast_add_group, which would 4597 * attempt to acquire multicast_lock. This function should be 4598 * called after the lock has been released to avoid deadlocks on 4599 * multicast_lock. 4600 * 4601 * br_multicast_leave_snoopers does not have the problem since 4602 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 4603 * returns without calling br_multicast_ipv4/6_rcv if it's not 4604 * enabled. Moved both functions out just for symmetry. 4605 */ 4606 if (change_snoopers) { 4607 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4608 br_multicast_join_snoopers(br); 4609 else 4610 br_multicast_leave_snoopers(br); 4611 } 4612 4613 return err; 4614 } 4615 4616 bool br_multicast_enabled(const struct net_device *dev) 4617 { 4618 struct net_bridge *br = netdev_priv(dev); 4619 4620 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 4621 } 4622 EXPORT_SYMBOL_GPL(br_multicast_enabled); 4623 4624 bool br_multicast_router(const struct net_device *dev) 4625 { 4626 struct net_bridge *br = netdev_priv(dev); 4627 bool is_router; 4628 4629 spin_lock_bh(&br->multicast_lock); 4630 is_router = br_multicast_is_router(&br->multicast_ctx, NULL); 4631 spin_unlock_bh(&br->multicast_lock); 4632 return is_router; 4633 } 4634 EXPORT_SYMBOL_GPL(br_multicast_router); 4635 4636 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) 4637 { 4638 unsigned long max_delay; 4639 4640 val = !!val; 4641 4642 spin_lock_bh(&brmctx->br->multicast_lock); 4643 if (brmctx->multicast_querier == val) 4644 goto unlock; 4645 4646 WRITE_ONCE(brmctx->multicast_querier, val); 4647 if (!val) 4648 goto unlock; 4649 4650 max_delay = brmctx->multicast_query_response_interval; 4651 4652 if (!timer_pending(&brmctx->ip4_other_query.timer)) 4653 mod_timer(&brmctx->ip4_other_query.delay_timer, 4654 jiffies + max_delay); 4655 4656 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); 4657 4658 #if IS_ENABLED(CONFIG_IPV6) 4659 if (!timer_pending(&brmctx->ip6_other_query.timer)) 4660 mod_timer(&brmctx->ip6_other_query.delay_timer, 4661 jiffies + max_delay); 4662 4663 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); 4664 #endif 4665 4666 unlock: 4667 spin_unlock_bh(&brmctx->br->multicast_lock); 4668 4669 return 0; 4670 } 4671 4672 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx, 4673 unsigned long val) 4674 { 4675 /* Currently we support only version 2 and 3 */ 4676 switch (val) { 4677 case 2: 4678 case 3: 4679 break; 4680 default: 4681 return -EINVAL; 4682 } 4683 4684 spin_lock_bh(&brmctx->br->multicast_lock); 4685 brmctx->multicast_igmp_version = val; 4686 spin_unlock_bh(&brmctx->br->multicast_lock); 4687 4688 return 0; 4689 } 4690 4691 #if IS_ENABLED(CONFIG_IPV6) 4692 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, 4693 unsigned long val) 4694 { 4695 /* Currently we support version 1 and 2 */ 4696 switch (val) { 4697 case 1: 4698 case 2: 4699 break; 4700 default: 4701 return -EINVAL; 4702 } 4703 4704 spin_lock_bh(&brmctx->br->multicast_lock); 4705 brmctx->multicast_mld_version = val; 4706 spin_unlock_bh(&brmctx->br->multicast_lock); 4707 4708 return 0; 4709 } 4710 #endif 4711 4712 void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, 4713 unsigned long val) 4714 { 4715 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4716 4717 if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { 4718 br_info(brmctx->br, 4719 "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", 4720 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), 4721 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); 4722 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; 4723 } 4724 4725 brmctx->multicast_query_interval = intvl_jiffies; 4726 } 4727 4728 void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, 4729 unsigned long val) 4730 { 4731 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4732 4733 if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { 4734 br_info(brmctx->br, 4735 "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", 4736 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), 4737 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); 4738 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; 4739 } 4740 4741 brmctx->multicast_startup_query_interval = intvl_jiffies; 4742 } 4743 4744 /** 4745 * br_multicast_list_adjacent - Returns snooped multicast addresses 4746 * @dev: The bridge port adjacent to which to retrieve addresses 4747 * @br_ip_list: The list to store found, snooped multicast IP addresses in 4748 * 4749 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 4750 * snooping feature on all bridge ports of dev's bridge device, excluding 4751 * the addresses from dev itself. 4752 * 4753 * Returns the number of items added to br_ip_list. 4754 * 4755 * Notes: 4756 * - br_ip_list needs to be initialized by caller 4757 * - br_ip_list might contain duplicates in the end 4758 * (needs to be taken care of by caller) 4759 * - br_ip_list needs to be freed by caller 4760 */ 4761 int br_multicast_list_adjacent(struct net_device *dev, 4762 struct list_head *br_ip_list) 4763 { 4764 struct net_bridge *br; 4765 struct net_bridge_port *port; 4766 struct net_bridge_port_group *group; 4767 struct br_ip_list *entry; 4768 int count = 0; 4769 4770 rcu_read_lock(); 4771 if (!br_ip_list || !netif_is_bridge_port(dev)) 4772 goto unlock; 4773 4774 port = br_port_get_rcu(dev); 4775 if (!port || !port->br) 4776 goto unlock; 4777 4778 br = port->br; 4779 4780 list_for_each_entry_rcu(port, &br->port_list, list) { 4781 if (!port->dev || port->dev == dev) 4782 continue; 4783 4784 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 4785 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 4786 if (!entry) 4787 goto unlock; 4788 4789 entry->addr = group->key.addr; 4790 list_add(&entry->list, br_ip_list); 4791 count++; 4792 } 4793 } 4794 4795 unlock: 4796 rcu_read_unlock(); 4797 return count; 4798 } 4799 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 4800 4801 /** 4802 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 4803 * @dev: The bridge port providing the bridge on which to check for a querier 4804 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4805 * 4806 * Checks whether the given interface has a bridge on top and if so returns 4807 * true if a valid querier exists anywhere on the bridged link layer. 4808 * Otherwise returns false. 4809 */ 4810 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 4811 { 4812 struct net_bridge *br; 4813 struct net_bridge_port *port; 4814 struct ethhdr eth; 4815 bool ret = false; 4816 4817 rcu_read_lock(); 4818 if (!netif_is_bridge_port(dev)) 4819 goto unlock; 4820 4821 port = br_port_get_rcu(dev); 4822 if (!port || !port->br) 4823 goto unlock; 4824 4825 br = port->br; 4826 4827 memset(ð, 0, sizeof(eth)); 4828 eth.h_proto = htons(proto); 4829 4830 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL); 4831 4832 unlock: 4833 rcu_read_unlock(); 4834 return ret; 4835 } 4836 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 4837 4838 /** 4839 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 4840 * @dev: The bridge port adjacent to which to check for a querier 4841 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4842 * 4843 * Checks whether the given interface has a bridge on top and if so returns 4844 * true if a selected querier is behind one of the other ports of this 4845 * bridge. Otherwise returns false. 4846 */ 4847 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 4848 { 4849 struct net_bridge_mcast *brmctx; 4850 struct net_bridge *br; 4851 struct net_bridge_port *port; 4852 bool ret = false; 4853 int port_ifidx; 4854 4855 rcu_read_lock(); 4856 if (!netif_is_bridge_port(dev)) 4857 goto unlock; 4858 4859 port = br_port_get_rcu(dev); 4860 if (!port || !port->br) 4861 goto unlock; 4862 4863 br = port->br; 4864 brmctx = &br->multicast_ctx; 4865 4866 switch (proto) { 4867 case ETH_P_IP: 4868 port_ifidx = brmctx->ip4_querier.port_ifidx; 4869 if (!timer_pending(&brmctx->ip4_other_query.timer) || 4870 port_ifidx == port->dev->ifindex) 4871 goto unlock; 4872 break; 4873 #if IS_ENABLED(CONFIG_IPV6) 4874 case ETH_P_IPV6: 4875 port_ifidx = brmctx->ip6_querier.port_ifidx; 4876 if (!timer_pending(&brmctx->ip6_other_query.timer) || 4877 port_ifidx == port->dev->ifindex) 4878 goto unlock; 4879 break; 4880 #endif 4881 default: 4882 goto unlock; 4883 } 4884 4885 ret = true; 4886 unlock: 4887 rcu_read_unlock(); 4888 return ret; 4889 } 4890 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 4891 4892 /** 4893 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port 4894 * @dev: The bridge port adjacent to which to check for a multicast router 4895 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4896 * 4897 * Checks whether the given interface has a bridge on top and if so returns 4898 * true if a multicast router is behind one of the other ports of this 4899 * bridge. Otherwise returns false. 4900 */ 4901 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) 4902 { 4903 struct net_bridge_mcast_port *pmctx; 4904 struct net_bridge_mcast *brmctx; 4905 struct net_bridge_port *port; 4906 bool ret = false; 4907 4908 rcu_read_lock(); 4909 port = br_port_get_check_rcu(dev); 4910 if (!port) 4911 goto unlock; 4912 4913 brmctx = &port->br->multicast_ctx; 4914 switch (proto) { 4915 case ETH_P_IP: 4916 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, 4917 ip4_rlist) { 4918 if (pmctx->port == port) 4919 continue; 4920 4921 ret = true; 4922 goto unlock; 4923 } 4924 break; 4925 #if IS_ENABLED(CONFIG_IPV6) 4926 case ETH_P_IPV6: 4927 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, 4928 ip6_rlist) { 4929 if (pmctx->port == port) 4930 continue; 4931 4932 ret = true; 4933 goto unlock; 4934 } 4935 break; 4936 #endif 4937 default: 4938 /* when compiled without IPv6 support, be conservative and 4939 * always assume presence of an IPv6 multicast router 4940 */ 4941 ret = true; 4942 } 4943 4944 unlock: 4945 rcu_read_unlock(); 4946 return ret; 4947 } 4948 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); 4949 4950 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 4951 const struct sk_buff *skb, u8 type, u8 dir) 4952 { 4953 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 4954 __be16 proto = skb->protocol; 4955 unsigned int t_len; 4956 4957 u64_stats_update_begin(&pstats->syncp); 4958 switch (proto) { 4959 case htons(ETH_P_IP): 4960 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 4961 switch (type) { 4962 case IGMP_HOST_MEMBERSHIP_REPORT: 4963 pstats->mstats.igmp_v1reports[dir]++; 4964 break; 4965 case IGMPV2_HOST_MEMBERSHIP_REPORT: 4966 pstats->mstats.igmp_v2reports[dir]++; 4967 break; 4968 case IGMPV3_HOST_MEMBERSHIP_REPORT: 4969 pstats->mstats.igmp_v3reports[dir]++; 4970 break; 4971 case IGMP_HOST_MEMBERSHIP_QUERY: 4972 if (t_len != sizeof(struct igmphdr)) { 4973 pstats->mstats.igmp_v3queries[dir]++; 4974 } else { 4975 unsigned int offset = skb_transport_offset(skb); 4976 struct igmphdr *ih, _ihdr; 4977 4978 ih = skb_header_pointer(skb, offset, 4979 sizeof(_ihdr), &_ihdr); 4980 if (!ih) 4981 break; 4982 if (!ih->code) 4983 pstats->mstats.igmp_v1queries[dir]++; 4984 else 4985 pstats->mstats.igmp_v2queries[dir]++; 4986 } 4987 break; 4988 case IGMP_HOST_LEAVE_MESSAGE: 4989 pstats->mstats.igmp_leaves[dir]++; 4990 break; 4991 } 4992 break; 4993 #if IS_ENABLED(CONFIG_IPV6) 4994 case htons(ETH_P_IPV6): 4995 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 4996 sizeof(struct ipv6hdr); 4997 t_len -= skb_network_header_len(skb); 4998 switch (type) { 4999 case ICMPV6_MGM_REPORT: 5000 pstats->mstats.mld_v1reports[dir]++; 5001 break; 5002 case ICMPV6_MLD2_REPORT: 5003 pstats->mstats.mld_v2reports[dir]++; 5004 break; 5005 case ICMPV6_MGM_QUERY: 5006 if (t_len != sizeof(struct mld_msg)) 5007 pstats->mstats.mld_v2queries[dir]++; 5008 else 5009 pstats->mstats.mld_v1queries[dir]++; 5010 break; 5011 case ICMPV6_MGM_REDUCTION: 5012 pstats->mstats.mld_leaves[dir]++; 5013 break; 5014 } 5015 break; 5016 #endif /* CONFIG_IPV6 */ 5017 } 5018 u64_stats_update_end(&pstats->syncp); 5019 } 5020 5021 void br_multicast_count(struct net_bridge *br, 5022 const struct net_bridge_port *p, 5023 const struct sk_buff *skb, u8 type, u8 dir) 5024 { 5025 struct bridge_mcast_stats __percpu *stats; 5026 5027 /* if multicast_disabled is true then igmp type can't be set */ 5028 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 5029 return; 5030 5031 if (p) 5032 stats = p->mcast_stats; 5033 else 5034 stats = br->mcast_stats; 5035 if (WARN_ON(!stats)) 5036 return; 5037 5038 br_mcast_stats_add(stats, skb, type, dir); 5039 } 5040 5041 int br_multicast_init_stats(struct net_bridge *br) 5042 { 5043 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 5044 if (!br->mcast_stats) 5045 return -ENOMEM; 5046 5047 return 0; 5048 } 5049 5050 void br_multicast_uninit_stats(struct net_bridge *br) 5051 { 5052 free_percpu(br->mcast_stats); 5053 } 5054 5055 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 5056 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 5057 { 5058 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 5059 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 5060 } 5061 5062 void br_multicast_get_stats(const struct net_bridge *br, 5063 const struct net_bridge_port *p, 5064 struct br_mcast_stats *dest) 5065 { 5066 struct bridge_mcast_stats __percpu *stats; 5067 struct br_mcast_stats tdst; 5068 int i; 5069 5070 memset(dest, 0, sizeof(*dest)); 5071 if (p) 5072 stats = p->mcast_stats; 5073 else 5074 stats = br->mcast_stats; 5075 if (WARN_ON(!stats)) 5076 return; 5077 5078 memset(&tdst, 0, sizeof(tdst)); 5079 for_each_possible_cpu(i) { 5080 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 5081 struct br_mcast_stats temp; 5082 unsigned int start; 5083 5084 do { 5085 start = u64_stats_fetch_begin(&cpu_stats->syncp); 5086 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 5087 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 5088 5089 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 5090 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 5091 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 5092 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 5093 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 5094 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 5095 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 5096 tdst.igmp_parse_errors += temp.igmp_parse_errors; 5097 5098 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 5099 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 5100 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 5101 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 5102 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 5103 tdst.mld_parse_errors += temp.mld_parse_errors; 5104 } 5105 memcpy(dest, &tdst, sizeof(*dest)); 5106 } 5107 5108 int br_mdb_hash_init(struct net_bridge *br) 5109 { 5110 int err; 5111 5112 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 5113 if (err) 5114 return err; 5115 5116 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 5117 if (err) { 5118 rhashtable_destroy(&br->sg_port_tbl); 5119 return err; 5120 } 5121 5122 return 0; 5123 } 5124 5125 void br_mdb_hash_fini(struct net_bridge *br) 5126 { 5127 rhashtable_destroy(&br->sg_port_tbl); 5128 rhashtable_destroy(&br->mdb_hash_tbl); 5129 } 5130