1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 #include <trace/events/bridge.h> 35 36 #include "br_private.h" 37 #include "br_private_mcast_eht.h" 38 39 static const struct rhashtable_params br_mdb_rht_params = { 40 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 41 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 42 .key_len = sizeof(struct br_ip), 43 .automatic_shrinking = true, 44 }; 45 46 static const struct rhashtable_params br_sg_port_rht_params = { 47 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 48 .key_offset = offsetof(struct net_bridge_port_group, key), 49 .key_len = sizeof(struct net_bridge_port_group_sg_key), 50 .automatic_shrinking = true, 51 }; 52 53 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 54 struct bridge_mcast_own_query *query); 55 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 56 struct net_bridge_mcast_port *pmctx); 57 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 58 struct net_bridge_mcast_port *pmctx, 59 __be32 group, 60 __u16 vid, 61 const unsigned char *src); 62 static void br_multicast_port_group_rexmit(struct timer_list *t); 63 64 static void 65 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted); 66 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 67 struct net_bridge_mcast_port *pmctx); 68 #if IS_ENABLED(CONFIG_IPV6) 69 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 70 struct net_bridge_mcast_port *pmctx, 71 const struct in6_addr *group, 72 __u16 vid, const unsigned char *src); 73 #endif 74 static struct net_bridge_port_group * 75 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 76 struct net_bridge_mcast_port *pmctx, 77 struct br_ip *group, 78 const unsigned char *src, 79 u8 filter_mode, 80 bool igmpv2_mldv1, 81 bool blocked); 82 static void br_multicast_find_del_pg(struct net_bridge *br, 83 struct net_bridge_port_group *pg); 84 static void __br_multicast_stop(struct net_bridge_mcast *brmctx); 85 86 static int br_mc_disabled_update(struct net_device *dev, bool value, 87 struct netlink_ext_ack *extack); 88 89 static struct net_bridge_port_group * 90 br_sg_port_find(struct net_bridge *br, 91 struct net_bridge_port_group_sg_key *sg_p) 92 { 93 lockdep_assert_held_once(&br->multicast_lock); 94 95 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 96 br_sg_port_rht_params); 97 } 98 99 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 100 struct br_ip *dst) 101 { 102 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 103 } 104 105 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 106 struct br_ip *dst) 107 { 108 struct net_bridge_mdb_entry *ent; 109 110 lockdep_assert_held_once(&br->multicast_lock); 111 112 rcu_read_lock(); 113 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 114 rcu_read_unlock(); 115 116 return ent; 117 } 118 119 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 120 __be32 dst, __u16 vid) 121 { 122 struct br_ip br_dst; 123 124 memset(&br_dst, 0, sizeof(br_dst)); 125 br_dst.dst.ip4 = dst; 126 br_dst.proto = htons(ETH_P_IP); 127 br_dst.vid = vid; 128 129 return br_mdb_ip_get(br, &br_dst); 130 } 131 132 #if IS_ENABLED(CONFIG_IPV6) 133 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 134 const struct in6_addr *dst, 135 __u16 vid) 136 { 137 struct br_ip br_dst; 138 139 memset(&br_dst, 0, sizeof(br_dst)); 140 br_dst.dst.ip6 = *dst; 141 br_dst.proto = htons(ETH_P_IPV6); 142 br_dst.vid = vid; 143 144 return br_mdb_ip_get(br, &br_dst); 145 } 146 #endif 147 148 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx, 149 struct sk_buff *skb, u16 vid) 150 { 151 struct net_bridge *br = brmctx->br; 152 struct br_ip ip; 153 154 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 155 br_multicast_ctx_vlan_global_disabled(brmctx)) 156 return NULL; 157 158 if (BR_INPUT_SKB_CB(skb)->igmp) 159 return NULL; 160 161 memset(&ip, 0, sizeof(ip)); 162 ip.proto = skb->protocol; 163 ip.vid = vid; 164 165 switch (skb->protocol) { 166 case htons(ETH_P_IP): 167 ip.dst.ip4 = ip_hdr(skb)->daddr; 168 if (brmctx->multicast_igmp_version == 3) { 169 struct net_bridge_mdb_entry *mdb; 170 171 ip.src.ip4 = ip_hdr(skb)->saddr; 172 mdb = br_mdb_ip_get_rcu(br, &ip); 173 if (mdb) 174 return mdb; 175 ip.src.ip4 = 0; 176 } 177 break; 178 #if IS_ENABLED(CONFIG_IPV6) 179 case htons(ETH_P_IPV6): 180 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 181 if (brmctx->multicast_mld_version == 2) { 182 struct net_bridge_mdb_entry *mdb; 183 184 ip.src.ip6 = ipv6_hdr(skb)->saddr; 185 mdb = br_mdb_ip_get_rcu(br, &ip); 186 if (mdb) 187 return mdb; 188 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 189 } 190 break; 191 #endif 192 default: 193 ip.proto = 0; 194 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 195 } 196 197 return br_mdb_ip_get_rcu(br, &ip); 198 } 199 200 /* IMPORTANT: this function must be used only when the contexts cannot be 201 * passed down (e.g. timer) and must be used for read-only purposes because 202 * the vlan snooping option can change, so it can return any context 203 * (non-vlan or vlan). Its initial intended purpose is to read timer values 204 * from the *current* context based on the option. At worst that could lead 205 * to inconsistent timers when the contexts are changed, i.e. src timer 206 * which needs to re-arm with a specific delay taken from the old context 207 */ 208 static struct net_bridge_mcast_port * 209 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg) 210 { 211 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx; 212 struct net_bridge_vlan *vlan; 213 214 lockdep_assert_held_once(&pg->key.port->br->multicast_lock); 215 216 /* if vlan snooping is disabled use the port's multicast context */ 217 if (!pg->key.addr.vid || 218 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 219 goto out; 220 221 /* locking is tricky here, due to different rules for multicast and 222 * vlans we need to take rcu to find the vlan and make sure it has 223 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under 224 * multicast_lock which must be already held here, so the vlan's pmctx 225 * can safely be used on return 226 */ 227 rcu_read_lock(); 228 vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid); 229 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 230 pmctx = &vlan->port_mcast_ctx; 231 else 232 pmctx = NULL; 233 rcu_read_unlock(); 234 out: 235 return pmctx; 236 } 237 238 static struct net_bridge_mcast_port * 239 br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid) 240 { 241 struct net_bridge_mcast_port *pmctx = NULL; 242 struct net_bridge_vlan *vlan; 243 244 lockdep_assert_held_once(&port->br->multicast_lock); 245 246 if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) 247 return NULL; 248 249 /* Take RCU to access the vlan. */ 250 rcu_read_lock(); 251 252 vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid); 253 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx)) 254 pmctx = &vlan->port_mcast_ctx; 255 256 rcu_read_unlock(); 257 258 return pmctx; 259 } 260 261 /* when snooping we need to check if the contexts should be used 262 * in the following order: 263 * - if pmctx is non-NULL (port), check if it should be used 264 * - if pmctx is NULL (bridge), check if brmctx should be used 265 */ 266 static bool 267 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx, 268 const struct net_bridge_mcast_port *pmctx) 269 { 270 if (!netif_running(brmctx->br->dev)) 271 return false; 272 273 if (pmctx) 274 return !br_multicast_port_ctx_state_disabled(pmctx); 275 else 276 return !br_multicast_ctx_vlan_disabled(brmctx); 277 } 278 279 static bool br_port_group_equal(struct net_bridge_port_group *p, 280 struct net_bridge_port *port, 281 const unsigned char *src) 282 { 283 if (p->key.port != port) 284 return false; 285 286 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 287 return true; 288 289 return ether_addr_equal(src, p->eth_addr); 290 } 291 292 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx, 293 struct net_bridge_port_group *pg, 294 struct br_ip *sg_ip) 295 { 296 struct net_bridge_port_group_sg_key sg_key; 297 struct net_bridge_port_group *src_pg; 298 struct net_bridge_mcast *brmctx; 299 300 memset(&sg_key, 0, sizeof(sg_key)); 301 brmctx = br_multicast_port_ctx_get_global(pmctx); 302 sg_key.port = pg->key.port; 303 sg_key.addr = *sg_ip; 304 if (br_sg_port_find(brmctx->br, &sg_key)) 305 return; 306 307 src_pg = __br_multicast_add_group(brmctx, pmctx, 308 sg_ip, pg->eth_addr, 309 MCAST_INCLUDE, false, false); 310 if (IS_ERR_OR_NULL(src_pg) || 311 src_pg->rt_protocol != RTPROT_KERNEL) 312 return; 313 314 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 315 } 316 317 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 318 struct br_ip *sg_ip) 319 { 320 struct net_bridge_port_group_sg_key sg_key; 321 struct net_bridge *br = pg->key.port->br; 322 struct net_bridge_port_group *src_pg; 323 324 memset(&sg_key, 0, sizeof(sg_key)); 325 sg_key.port = pg->key.port; 326 sg_key.addr = *sg_ip; 327 src_pg = br_sg_port_find(br, &sg_key); 328 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 329 src_pg->rt_protocol != RTPROT_KERNEL) 330 return; 331 332 br_multicast_find_del_pg(br, src_pg); 333 } 334 335 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 336 * to all other ports' S,G entries which are not blocked by the current group 337 * for proper replication, the assumption is that any S,G blocked entries 338 * are already added so the S,G,port lookup should skip them. 339 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 340 * deleted we need to remove it from all ports' S,G entries where it was 341 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 342 */ 343 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 344 u8 filter_mode) 345 { 346 struct net_bridge *br = pg->key.port->br; 347 struct net_bridge_port_group *pg_lst; 348 struct net_bridge_mcast_port *pmctx; 349 struct net_bridge_mdb_entry *mp; 350 struct br_ip sg_ip; 351 352 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 353 return; 354 355 mp = br_mdb_ip_get(br, &pg->key.addr); 356 if (!mp) 357 return; 358 pmctx = br_multicast_pg_to_port_ctx(pg); 359 if (!pmctx) 360 return; 361 362 memset(&sg_ip, 0, sizeof(sg_ip)); 363 sg_ip = pg->key.addr; 364 365 for (pg_lst = mlock_dereference(mp->ports, br); 366 pg_lst; 367 pg_lst = mlock_dereference(pg_lst->next, br)) { 368 struct net_bridge_group_src *src_ent; 369 370 if (pg_lst == pg) 371 continue; 372 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 373 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 374 continue; 375 sg_ip.src = src_ent->addr.src; 376 switch (filter_mode) { 377 case MCAST_INCLUDE: 378 __fwd_del_star_excl(pg, &sg_ip); 379 break; 380 case MCAST_EXCLUDE: 381 __fwd_add_star_excl(pmctx, pg, &sg_ip); 382 break; 383 } 384 } 385 } 386 } 387 388 /* called when adding a new S,G with host_joined == false by default */ 389 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 390 struct net_bridge_port_group *sg) 391 { 392 struct net_bridge_mdb_entry *sg_mp; 393 394 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 395 return; 396 if (!star_mp->host_joined) 397 return; 398 399 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 400 if (!sg_mp) 401 return; 402 sg_mp->host_joined = true; 403 } 404 405 /* set the host_joined state of all of *,G's S,G entries */ 406 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 407 { 408 struct net_bridge *br = star_mp->br; 409 struct net_bridge_mdb_entry *sg_mp; 410 struct net_bridge_port_group *pg; 411 struct br_ip sg_ip; 412 413 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 414 return; 415 416 memset(&sg_ip, 0, sizeof(sg_ip)); 417 sg_ip = star_mp->addr; 418 for (pg = mlock_dereference(star_mp->ports, br); 419 pg; 420 pg = mlock_dereference(pg->next, br)) { 421 struct net_bridge_group_src *src_ent; 422 423 hlist_for_each_entry(src_ent, &pg->src_list, node) { 424 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 425 continue; 426 sg_ip.src = src_ent->addr.src; 427 sg_mp = br_mdb_ip_get(br, &sg_ip); 428 if (!sg_mp) 429 continue; 430 sg_mp->host_joined = star_mp->host_joined; 431 } 432 } 433 } 434 435 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 436 { 437 struct net_bridge_port_group __rcu **pp; 438 struct net_bridge_port_group *p; 439 440 /* *,G exclude ports are only added to S,G entries */ 441 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 442 return; 443 444 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 445 * we should ignore perm entries since they're managed by user-space 446 */ 447 for (pp = &sgmp->ports; 448 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 449 pp = &p->next) 450 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 451 MDB_PG_FLAGS_PERMANENT))) 452 return; 453 454 /* currently the host can only have joined the *,G which means 455 * we treat it as EXCLUDE {}, so for an S,G it's considered a 456 * STAR_EXCLUDE entry and we can safely leave it 457 */ 458 sgmp->host_joined = false; 459 460 for (pp = &sgmp->ports; 461 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 462 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 463 br_multicast_del_pg(sgmp, p, pp); 464 else 465 pp = &p->next; 466 } 467 } 468 469 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 470 struct net_bridge_port_group *sg) 471 { 472 struct net_bridge_port_group_sg_key sg_key; 473 struct net_bridge *br = star_mp->br; 474 struct net_bridge_mcast_port *pmctx; 475 struct net_bridge_port_group *pg; 476 struct net_bridge_mcast *brmctx; 477 478 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 479 return; 480 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 481 return; 482 483 br_multicast_sg_host_state(star_mp, sg); 484 memset(&sg_key, 0, sizeof(sg_key)); 485 sg_key.addr = sg->key.addr; 486 /* we need to add all exclude ports to the S,G */ 487 for (pg = mlock_dereference(star_mp->ports, br); 488 pg; 489 pg = mlock_dereference(pg->next, br)) { 490 struct net_bridge_port_group *src_pg; 491 492 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 493 continue; 494 495 sg_key.port = pg->key.port; 496 if (br_sg_port_find(br, &sg_key)) 497 continue; 498 499 pmctx = br_multicast_pg_to_port_ctx(pg); 500 if (!pmctx) 501 continue; 502 brmctx = br_multicast_port_ctx_get_global(pmctx); 503 504 src_pg = __br_multicast_add_group(brmctx, pmctx, 505 &sg->key.addr, 506 sg->eth_addr, 507 MCAST_INCLUDE, false, false); 508 if (IS_ERR_OR_NULL(src_pg) || 509 src_pg->rt_protocol != RTPROT_KERNEL) 510 continue; 511 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 512 } 513 } 514 515 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 516 { 517 struct net_bridge_mdb_entry *star_mp; 518 struct net_bridge_mcast_port *pmctx; 519 struct net_bridge_port_group *sg; 520 struct net_bridge_mcast *brmctx; 521 struct br_ip sg_ip; 522 523 if (src->flags & BR_SGRP_F_INSTALLED) 524 return; 525 526 memset(&sg_ip, 0, sizeof(sg_ip)); 527 pmctx = br_multicast_pg_to_port_ctx(src->pg); 528 if (!pmctx) 529 return; 530 brmctx = br_multicast_port_ctx_get_global(pmctx); 531 sg_ip = src->pg->key.addr; 532 sg_ip.src = src->addr.src; 533 534 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip, 535 src->pg->eth_addr, MCAST_INCLUDE, false, 536 !timer_pending(&src->timer)); 537 if (IS_ERR_OR_NULL(sg)) 538 return; 539 src->flags |= BR_SGRP_F_INSTALLED; 540 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 541 542 /* if it was added by user-space as perm we can skip next steps */ 543 if (sg->rt_protocol != RTPROT_KERNEL && 544 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 545 return; 546 547 /* the kernel is now responsible for removing this S,G */ 548 del_timer(&sg->timer); 549 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 550 if (!star_mp) 551 return; 552 553 br_multicast_sg_add_exclude_ports(star_mp, sg); 554 } 555 556 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 557 bool fastleave) 558 { 559 struct net_bridge_port_group *p, *pg = src->pg; 560 struct net_bridge_port_group __rcu **pp; 561 struct net_bridge_mdb_entry *mp; 562 struct br_ip sg_ip; 563 564 memset(&sg_ip, 0, sizeof(sg_ip)); 565 sg_ip = pg->key.addr; 566 sg_ip.src = src->addr.src; 567 568 mp = br_mdb_ip_get(src->br, &sg_ip); 569 if (!mp) 570 return; 571 572 for (pp = &mp->ports; 573 (p = mlock_dereference(*pp, src->br)) != NULL; 574 pp = &p->next) { 575 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 576 continue; 577 578 if (p->rt_protocol != RTPROT_KERNEL && 579 (p->flags & MDB_PG_FLAGS_PERMANENT) && 580 !(src->flags & BR_SGRP_F_USER_ADDED)) 581 break; 582 583 if (fastleave) 584 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 585 br_multicast_del_pg(mp, p, pp); 586 break; 587 } 588 src->flags &= ~BR_SGRP_F_INSTALLED; 589 } 590 591 /* install S,G and based on src's timer enable or disable forwarding */ 592 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 593 { 594 struct net_bridge_port_group_sg_key sg_key; 595 struct net_bridge_port_group *sg; 596 u8 old_flags; 597 598 br_multicast_fwd_src_add(src); 599 600 memset(&sg_key, 0, sizeof(sg_key)); 601 sg_key.addr = src->pg->key.addr; 602 sg_key.addr.src = src->addr.src; 603 sg_key.port = src->pg->key.port; 604 605 sg = br_sg_port_find(src->br, &sg_key); 606 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 607 return; 608 609 old_flags = sg->flags; 610 if (timer_pending(&src->timer)) 611 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 612 else 613 sg->flags |= MDB_PG_FLAGS_BLOCKED; 614 615 if (old_flags != sg->flags) { 616 struct net_bridge_mdb_entry *sg_mp; 617 618 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 619 if (!sg_mp) 620 return; 621 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 622 } 623 } 624 625 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 626 { 627 struct net_bridge_mdb_entry *mp; 628 629 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 630 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 631 WARN_ON(mp->ports); 632 633 timer_shutdown_sync(&mp->timer); 634 kfree_rcu(mp, rcu); 635 } 636 637 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 638 { 639 struct net_bridge *br = mp->br; 640 641 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 642 br_mdb_rht_params); 643 hlist_del_init_rcu(&mp->mdb_node); 644 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 645 queue_work(system_long_wq, &br->mcast_gc_work); 646 } 647 648 static void br_multicast_group_expired(struct timer_list *t) 649 { 650 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 651 struct net_bridge *br = mp->br; 652 653 spin_lock(&br->multicast_lock); 654 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 655 timer_pending(&mp->timer)) 656 goto out; 657 658 br_multicast_host_leave(mp, true); 659 660 if (mp->ports) 661 goto out; 662 br_multicast_del_mdb_entry(mp); 663 out: 664 spin_unlock(&br->multicast_lock); 665 } 666 667 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 668 { 669 struct net_bridge_group_src *src; 670 671 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 672 WARN_ON(!hlist_unhashed(&src->node)); 673 674 timer_shutdown_sync(&src->timer); 675 kfree_rcu(src, rcu); 676 } 677 678 void __br_multicast_del_group_src(struct net_bridge_group_src *src) 679 { 680 struct net_bridge *br = src->pg->key.port->br; 681 682 hlist_del_init_rcu(&src->node); 683 src->pg->src_ents--; 684 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 685 queue_work(system_long_wq, &br->mcast_gc_work); 686 } 687 688 void br_multicast_del_group_src(struct net_bridge_group_src *src, 689 bool fastleave) 690 { 691 br_multicast_fwd_src_remove(src, fastleave); 692 __br_multicast_del_group_src(src); 693 } 694 695 static int 696 br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx, 697 struct netlink_ext_ack *extack, 698 const char *what) 699 { 700 u32 max = READ_ONCE(pmctx->mdb_max_entries); 701 u32 n = READ_ONCE(pmctx->mdb_n_entries); 702 703 if (max && n >= max) { 704 NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u", 705 what, n, max); 706 return -E2BIG; 707 } 708 709 WRITE_ONCE(pmctx->mdb_n_entries, n + 1); 710 return 0; 711 } 712 713 static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx) 714 { 715 u32 n = READ_ONCE(pmctx->mdb_n_entries); 716 717 WARN_ON_ONCE(n == 0); 718 WRITE_ONCE(pmctx->mdb_n_entries, n - 1); 719 } 720 721 static int br_multicast_port_ngroups_inc(struct net_bridge_port *port, 722 const struct br_ip *group, 723 struct netlink_ext_ack *extack) 724 { 725 struct net_bridge_mcast_port *pmctx; 726 int err; 727 728 lockdep_assert_held_once(&port->br->multicast_lock); 729 730 /* Always count on the port context. */ 731 err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack, 732 "Port"); 733 if (err) { 734 trace_br_mdb_full(port->dev, group); 735 return err; 736 } 737 738 /* Only count on the VLAN context if VID is given, and if snooping on 739 * that VLAN is enabled. 740 */ 741 if (!group->vid) 742 return 0; 743 744 pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid); 745 if (!pmctx) 746 return 0; 747 748 err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN"); 749 if (err) { 750 trace_br_mdb_full(port->dev, group); 751 goto dec_one_out; 752 } 753 754 return 0; 755 756 dec_one_out: 757 br_multicast_port_ngroups_dec_one(&port->multicast_ctx); 758 return err; 759 } 760 761 static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid) 762 { 763 struct net_bridge_mcast_port *pmctx; 764 765 lockdep_assert_held_once(&port->br->multicast_lock); 766 767 if (vid) { 768 pmctx = br_multicast_port_vid_to_port_ctx(port, vid); 769 if (pmctx) 770 br_multicast_port_ngroups_dec_one(pmctx); 771 } 772 br_multicast_port_ngroups_dec_one(&port->multicast_ctx); 773 } 774 775 u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx) 776 { 777 return READ_ONCE(pmctx->mdb_n_entries); 778 } 779 780 void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max) 781 { 782 WRITE_ONCE(pmctx->mdb_max_entries, max); 783 } 784 785 u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx) 786 { 787 return READ_ONCE(pmctx->mdb_max_entries); 788 } 789 790 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 791 { 792 struct net_bridge_port_group *pg; 793 794 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 795 WARN_ON(!hlist_unhashed(&pg->mglist)); 796 WARN_ON(!hlist_empty(&pg->src_list)); 797 798 timer_shutdown_sync(&pg->rexmit_timer); 799 timer_shutdown_sync(&pg->timer); 800 kfree_rcu(pg, rcu); 801 } 802 803 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 804 struct net_bridge_port_group *pg, 805 struct net_bridge_port_group __rcu **pp) 806 { 807 struct net_bridge *br = pg->key.port->br; 808 struct net_bridge_group_src *ent; 809 struct hlist_node *tmp; 810 811 rcu_assign_pointer(*pp, pg->next); 812 hlist_del_init(&pg->mglist); 813 br_multicast_eht_clean_sets(pg); 814 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 815 br_multicast_del_group_src(ent, false); 816 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 817 if (!br_multicast_is_star_g(&mp->addr)) { 818 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 819 br_sg_port_rht_params); 820 br_multicast_sg_del_exclude_ports(mp); 821 } else { 822 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 823 } 824 br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid); 825 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 826 queue_work(system_long_wq, &br->mcast_gc_work); 827 828 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 829 mod_timer(&mp->timer, jiffies); 830 } 831 832 static void br_multicast_find_del_pg(struct net_bridge *br, 833 struct net_bridge_port_group *pg) 834 { 835 struct net_bridge_port_group __rcu **pp; 836 struct net_bridge_mdb_entry *mp; 837 struct net_bridge_port_group *p; 838 839 mp = br_mdb_ip_get(br, &pg->key.addr); 840 if (WARN_ON(!mp)) 841 return; 842 843 for (pp = &mp->ports; 844 (p = mlock_dereference(*pp, br)) != NULL; 845 pp = &p->next) { 846 if (p != pg) 847 continue; 848 849 br_multicast_del_pg(mp, pg, pp); 850 return; 851 } 852 853 WARN_ON(1); 854 } 855 856 static void br_multicast_port_group_expired(struct timer_list *t) 857 { 858 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 859 struct net_bridge_group_src *src_ent; 860 struct net_bridge *br = pg->key.port->br; 861 struct hlist_node *tmp; 862 bool changed; 863 864 spin_lock(&br->multicast_lock); 865 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 866 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 867 goto out; 868 869 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 870 pg->filter_mode = MCAST_INCLUDE; 871 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 872 if (!timer_pending(&src_ent->timer)) { 873 br_multicast_del_group_src(src_ent, false); 874 changed = true; 875 } 876 } 877 878 if (hlist_empty(&pg->src_list)) { 879 br_multicast_find_del_pg(br, pg); 880 } else if (changed) { 881 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 882 883 if (changed && br_multicast_is_star_g(&pg->key.addr)) 884 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 885 886 if (WARN_ON(!mp)) 887 goto out; 888 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 889 } 890 out: 891 spin_unlock(&br->multicast_lock); 892 } 893 894 static void br_multicast_gc(struct hlist_head *head) 895 { 896 struct net_bridge_mcast_gc *gcent; 897 struct hlist_node *tmp; 898 899 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 900 hlist_del_init(&gcent->gc_node); 901 gcent->destroy(gcent); 902 } 903 } 904 905 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx, 906 struct net_bridge_mcast_port *pmctx, 907 struct sk_buff *skb) 908 { 909 struct net_bridge_vlan *vlan = NULL; 910 911 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx)) 912 vlan = pmctx->vlan; 913 else if (br_multicast_ctx_is_vlan(brmctx)) 914 vlan = brmctx->vlan; 915 916 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 917 u16 vlan_proto; 918 919 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0) 920 return; 921 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid); 922 } 923 } 924 925 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx, 926 struct net_bridge_mcast_port *pmctx, 927 struct net_bridge_port_group *pg, 928 __be32 ip_dst, __be32 group, 929 bool with_srcs, bool over_lmqt, 930 u8 sflag, u8 *igmp_type, 931 bool *need_rexmit) 932 { 933 struct net_bridge_port *p = pg ? pg->key.port : NULL; 934 struct net_bridge_group_src *ent; 935 size_t pkt_size, igmp_hdr_size; 936 unsigned long now = jiffies; 937 struct igmpv3_query *ihv3; 938 void *csum_start = NULL; 939 __sum16 *csum = NULL; 940 struct sk_buff *skb; 941 struct igmphdr *ih; 942 struct ethhdr *eth; 943 unsigned long lmqt; 944 struct iphdr *iph; 945 u16 lmqt_srcs = 0; 946 947 igmp_hdr_size = sizeof(*ih); 948 if (brmctx->multicast_igmp_version == 3) { 949 igmp_hdr_size = sizeof(*ihv3); 950 if (pg && with_srcs) { 951 lmqt = now + (brmctx->multicast_last_member_interval * 952 brmctx->multicast_last_member_count); 953 hlist_for_each_entry(ent, &pg->src_list, node) { 954 if (over_lmqt == time_after(ent->timer.expires, 955 lmqt) && 956 ent->src_query_rexmit_cnt > 0) 957 lmqt_srcs++; 958 } 959 960 if (!lmqt_srcs) 961 return NULL; 962 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 963 } 964 } 965 966 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 967 if ((p && pkt_size > p->dev->mtu) || 968 pkt_size > brmctx->br->dev->mtu) 969 return NULL; 970 971 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 972 if (!skb) 973 goto out; 974 975 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 976 skb->protocol = htons(ETH_P_IP); 977 978 skb_reset_mac_header(skb); 979 eth = eth_hdr(skb); 980 981 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 982 ip_eth_mc_map(ip_dst, eth->h_dest); 983 eth->h_proto = htons(ETH_P_IP); 984 skb_put(skb, sizeof(*eth)); 985 986 skb_set_network_header(skb, skb->len); 987 iph = ip_hdr(skb); 988 iph->tot_len = htons(pkt_size - sizeof(*eth)); 989 990 iph->version = 4; 991 iph->ihl = 6; 992 iph->tos = 0xc0; 993 iph->id = 0; 994 iph->frag_off = htons(IP_DF); 995 iph->ttl = 1; 996 iph->protocol = IPPROTO_IGMP; 997 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 998 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0; 999 iph->daddr = ip_dst; 1000 ((u8 *)&iph[1])[0] = IPOPT_RA; 1001 ((u8 *)&iph[1])[1] = 4; 1002 ((u8 *)&iph[1])[2] = 0; 1003 ((u8 *)&iph[1])[3] = 0; 1004 ip_send_check(iph); 1005 skb_put(skb, 24); 1006 1007 skb_set_transport_header(skb, skb->len); 1008 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 1009 1010 switch (brmctx->multicast_igmp_version) { 1011 case 2: 1012 ih = igmp_hdr(skb); 1013 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 1014 ih->code = (group ? brmctx->multicast_last_member_interval : 1015 brmctx->multicast_query_response_interval) / 1016 (HZ / IGMP_TIMER_SCALE); 1017 ih->group = group; 1018 ih->csum = 0; 1019 csum = &ih->csum; 1020 csum_start = (void *)ih; 1021 break; 1022 case 3: 1023 ihv3 = igmpv3_query_hdr(skb); 1024 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 1025 ihv3->code = (group ? brmctx->multicast_last_member_interval : 1026 brmctx->multicast_query_response_interval) / 1027 (HZ / IGMP_TIMER_SCALE); 1028 ihv3->group = group; 1029 ihv3->qqic = brmctx->multicast_query_interval / HZ; 1030 ihv3->nsrcs = htons(lmqt_srcs); 1031 ihv3->resv = 0; 1032 ihv3->suppress = sflag; 1033 ihv3->qrv = 2; 1034 ihv3->csum = 0; 1035 csum = &ihv3->csum; 1036 csum_start = (void *)ihv3; 1037 if (!pg || !with_srcs) 1038 break; 1039 1040 lmqt_srcs = 0; 1041 hlist_for_each_entry(ent, &pg->src_list, node) { 1042 if (over_lmqt == time_after(ent->timer.expires, 1043 lmqt) && 1044 ent->src_query_rexmit_cnt > 0) { 1045 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 1046 ent->src_query_rexmit_cnt--; 1047 if (need_rexmit && ent->src_query_rexmit_cnt) 1048 *need_rexmit = true; 1049 } 1050 } 1051 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 1052 kfree_skb(skb); 1053 return NULL; 1054 } 1055 break; 1056 } 1057 1058 if (WARN_ON(!csum || !csum_start)) { 1059 kfree_skb(skb); 1060 return NULL; 1061 } 1062 1063 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 1064 skb_put(skb, igmp_hdr_size); 1065 __skb_pull(skb, sizeof(*eth)); 1066 1067 out: 1068 return skb; 1069 } 1070 1071 #if IS_ENABLED(CONFIG_IPV6) 1072 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1073 struct net_bridge_mcast_port *pmctx, 1074 struct net_bridge_port_group *pg, 1075 const struct in6_addr *ip6_dst, 1076 const struct in6_addr *group, 1077 bool with_srcs, bool over_llqt, 1078 u8 sflag, u8 *igmp_type, 1079 bool *need_rexmit) 1080 { 1081 struct net_bridge_port *p = pg ? pg->key.port : NULL; 1082 struct net_bridge_group_src *ent; 1083 size_t pkt_size, mld_hdr_size; 1084 unsigned long now = jiffies; 1085 struct mld2_query *mld2q; 1086 void *csum_start = NULL; 1087 unsigned long interval; 1088 __sum16 *csum = NULL; 1089 struct ipv6hdr *ip6h; 1090 struct mld_msg *mldq; 1091 struct sk_buff *skb; 1092 unsigned long llqt; 1093 struct ethhdr *eth; 1094 u16 llqt_srcs = 0; 1095 u8 *hopopt; 1096 1097 mld_hdr_size = sizeof(*mldq); 1098 if (brmctx->multicast_mld_version == 2) { 1099 mld_hdr_size = sizeof(*mld2q); 1100 if (pg && with_srcs) { 1101 llqt = now + (brmctx->multicast_last_member_interval * 1102 brmctx->multicast_last_member_count); 1103 hlist_for_each_entry(ent, &pg->src_list, node) { 1104 if (over_llqt == time_after(ent->timer.expires, 1105 llqt) && 1106 ent->src_query_rexmit_cnt > 0) 1107 llqt_srcs++; 1108 } 1109 1110 if (!llqt_srcs) 1111 return NULL; 1112 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 1113 } 1114 } 1115 1116 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 1117 if ((p && pkt_size > p->dev->mtu) || 1118 pkt_size > brmctx->br->dev->mtu) 1119 return NULL; 1120 1121 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size); 1122 if (!skb) 1123 goto out; 1124 1125 __br_multicast_query_handle_vlan(brmctx, pmctx, skb); 1126 skb->protocol = htons(ETH_P_IPV6); 1127 1128 /* Ethernet header */ 1129 skb_reset_mac_header(skb); 1130 eth = eth_hdr(skb); 1131 1132 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr); 1133 eth->h_proto = htons(ETH_P_IPV6); 1134 skb_put(skb, sizeof(*eth)); 1135 1136 /* IPv6 header + HbH option */ 1137 skb_set_network_header(skb, skb->len); 1138 ip6h = ipv6_hdr(skb); 1139 1140 *(__force __be32 *)ip6h = htonl(0x60000000); 1141 ip6h->payload_len = htons(8 + mld_hdr_size); 1142 ip6h->nexthdr = IPPROTO_HOPOPTS; 1143 ip6h->hop_limit = 1; 1144 ip6h->daddr = *ip6_dst; 1145 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev, 1146 &ip6h->daddr, 0, &ip6h->saddr)) { 1147 kfree_skb(skb); 1148 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false); 1149 return NULL; 1150 } 1151 1152 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true); 1153 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 1154 1155 hopopt = (u8 *)(ip6h + 1); 1156 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 1157 hopopt[1] = 0; /* length of HbH */ 1158 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 1159 hopopt[3] = 2; /* Length of RA Option */ 1160 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 1161 hopopt[5] = 0; 1162 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 1163 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 1164 1165 skb_put(skb, sizeof(*ip6h) + 8); 1166 1167 /* ICMPv6 */ 1168 skb_set_transport_header(skb, skb->len); 1169 interval = ipv6_addr_any(group) ? 1170 brmctx->multicast_query_response_interval : 1171 brmctx->multicast_last_member_interval; 1172 *igmp_type = ICMPV6_MGM_QUERY; 1173 switch (brmctx->multicast_mld_version) { 1174 case 1: 1175 mldq = (struct mld_msg *)icmp6_hdr(skb); 1176 mldq->mld_type = ICMPV6_MGM_QUERY; 1177 mldq->mld_code = 0; 1178 mldq->mld_cksum = 0; 1179 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 1180 mldq->mld_reserved = 0; 1181 mldq->mld_mca = *group; 1182 csum = &mldq->mld_cksum; 1183 csum_start = (void *)mldq; 1184 break; 1185 case 2: 1186 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1187 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 1188 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 1189 mld2q->mld2q_code = 0; 1190 mld2q->mld2q_cksum = 0; 1191 mld2q->mld2q_resv1 = 0; 1192 mld2q->mld2q_resv2 = 0; 1193 mld2q->mld2q_suppress = sflag; 1194 mld2q->mld2q_qrv = 2; 1195 mld2q->mld2q_nsrcs = htons(llqt_srcs); 1196 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ; 1197 mld2q->mld2q_mca = *group; 1198 csum = &mld2q->mld2q_cksum; 1199 csum_start = (void *)mld2q; 1200 if (!pg || !with_srcs) 1201 break; 1202 1203 llqt_srcs = 0; 1204 hlist_for_each_entry(ent, &pg->src_list, node) { 1205 if (over_llqt == time_after(ent->timer.expires, 1206 llqt) && 1207 ent->src_query_rexmit_cnt > 0) { 1208 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 1209 ent->src_query_rexmit_cnt--; 1210 if (need_rexmit && ent->src_query_rexmit_cnt) 1211 *need_rexmit = true; 1212 } 1213 } 1214 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 1215 kfree_skb(skb); 1216 return NULL; 1217 } 1218 break; 1219 } 1220 1221 if (WARN_ON(!csum || !csum_start)) { 1222 kfree_skb(skb); 1223 return NULL; 1224 } 1225 1226 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 1227 IPPROTO_ICMPV6, 1228 csum_partial(csum_start, mld_hdr_size, 0)); 1229 skb_put(skb, mld_hdr_size); 1230 __skb_pull(skb, sizeof(*eth)); 1231 1232 out: 1233 return skb; 1234 } 1235 #endif 1236 1237 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx, 1238 struct net_bridge_mcast_port *pmctx, 1239 struct net_bridge_port_group *pg, 1240 struct br_ip *ip_dst, 1241 struct br_ip *group, 1242 bool with_srcs, bool over_lmqt, 1243 u8 sflag, u8 *igmp_type, 1244 bool *need_rexmit) 1245 { 1246 __be32 ip4_dst; 1247 1248 switch (group->proto) { 1249 case htons(ETH_P_IP): 1250 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1251 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg, 1252 ip4_dst, group->dst.ip4, 1253 with_srcs, over_lmqt, 1254 sflag, igmp_type, 1255 need_rexmit); 1256 #if IS_ENABLED(CONFIG_IPV6) 1257 case htons(ETH_P_IPV6): { 1258 struct in6_addr ip6_dst; 1259 1260 if (ip_dst) 1261 ip6_dst = ip_dst->dst.ip6; 1262 else 1263 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1264 htonl(1)); 1265 1266 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg, 1267 &ip6_dst, &group->dst.ip6, 1268 with_srcs, over_lmqt, 1269 sflag, igmp_type, 1270 need_rexmit); 1271 } 1272 #endif 1273 } 1274 return NULL; 1275 } 1276 1277 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1278 struct br_ip *group) 1279 { 1280 struct net_bridge_mdb_entry *mp; 1281 int err; 1282 1283 mp = br_mdb_ip_get(br, group); 1284 if (mp) 1285 return mp; 1286 1287 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1288 trace_br_mdb_full(br->dev, group); 1289 br_mc_disabled_update(br->dev, false, NULL); 1290 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1291 return ERR_PTR(-E2BIG); 1292 } 1293 1294 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1295 if (unlikely(!mp)) 1296 return ERR_PTR(-ENOMEM); 1297 1298 mp->br = br; 1299 mp->addr = *group; 1300 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1301 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1302 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1303 br_mdb_rht_params); 1304 if (err) { 1305 kfree(mp); 1306 mp = ERR_PTR(err); 1307 } else { 1308 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1309 } 1310 1311 return mp; 1312 } 1313 1314 static void br_multicast_group_src_expired(struct timer_list *t) 1315 { 1316 struct net_bridge_group_src *src = from_timer(src, t, timer); 1317 struct net_bridge_port_group *pg; 1318 struct net_bridge *br = src->br; 1319 1320 spin_lock(&br->multicast_lock); 1321 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1322 timer_pending(&src->timer)) 1323 goto out; 1324 1325 pg = src->pg; 1326 if (pg->filter_mode == MCAST_INCLUDE) { 1327 br_multicast_del_group_src(src, false); 1328 if (!hlist_empty(&pg->src_list)) 1329 goto out; 1330 br_multicast_find_del_pg(br, pg); 1331 } else { 1332 br_multicast_fwd_src_handle(src); 1333 } 1334 1335 out: 1336 spin_unlock(&br->multicast_lock); 1337 } 1338 1339 struct net_bridge_group_src * 1340 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1341 { 1342 struct net_bridge_group_src *ent; 1343 1344 switch (ip->proto) { 1345 case htons(ETH_P_IP): 1346 hlist_for_each_entry(ent, &pg->src_list, node) 1347 if (ip->src.ip4 == ent->addr.src.ip4) 1348 return ent; 1349 break; 1350 #if IS_ENABLED(CONFIG_IPV6) 1351 case htons(ETH_P_IPV6): 1352 hlist_for_each_entry(ent, &pg->src_list, node) 1353 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1354 return ent; 1355 break; 1356 #endif 1357 } 1358 1359 return NULL; 1360 } 1361 1362 struct net_bridge_group_src * 1363 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1364 { 1365 struct net_bridge_group_src *grp_src; 1366 1367 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1368 return NULL; 1369 1370 switch (src_ip->proto) { 1371 case htons(ETH_P_IP): 1372 if (ipv4_is_zeronet(src_ip->src.ip4) || 1373 ipv4_is_multicast(src_ip->src.ip4)) 1374 return NULL; 1375 break; 1376 #if IS_ENABLED(CONFIG_IPV6) 1377 case htons(ETH_P_IPV6): 1378 if (ipv6_addr_any(&src_ip->src.ip6) || 1379 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1380 return NULL; 1381 break; 1382 #endif 1383 } 1384 1385 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1386 if (unlikely(!grp_src)) 1387 return NULL; 1388 1389 grp_src->pg = pg; 1390 grp_src->br = pg->key.port->br; 1391 grp_src->addr = *src_ip; 1392 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1393 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1394 1395 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1396 pg->src_ents++; 1397 1398 return grp_src; 1399 } 1400 1401 struct net_bridge_port_group *br_multicast_new_port_group( 1402 struct net_bridge_port *port, 1403 const struct br_ip *group, 1404 struct net_bridge_port_group __rcu *next, 1405 unsigned char flags, 1406 const unsigned char *src, 1407 u8 filter_mode, 1408 u8 rt_protocol, 1409 struct netlink_ext_ack *extack) 1410 { 1411 struct net_bridge_port_group *p; 1412 int err; 1413 1414 err = br_multicast_port_ngroups_inc(port, group, extack); 1415 if (err) 1416 return NULL; 1417 1418 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1419 if (unlikely(!p)) { 1420 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group"); 1421 goto dec_out; 1422 } 1423 1424 p->key.addr = *group; 1425 p->key.port = port; 1426 p->flags = flags; 1427 p->filter_mode = filter_mode; 1428 p->rt_protocol = rt_protocol; 1429 p->eht_host_tree = RB_ROOT; 1430 p->eht_set_tree = RB_ROOT; 1431 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1432 INIT_HLIST_HEAD(&p->src_list); 1433 1434 if (!br_multicast_is_star_g(group) && 1435 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1436 br_sg_port_rht_params)) { 1437 NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group"); 1438 goto free_out; 1439 } 1440 1441 rcu_assign_pointer(p->next, next); 1442 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1443 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1444 hlist_add_head(&p->mglist, &port->mglist); 1445 1446 if (src) 1447 memcpy(p->eth_addr, src, ETH_ALEN); 1448 else 1449 eth_broadcast_addr(p->eth_addr); 1450 1451 return p; 1452 1453 free_out: 1454 kfree(p); 1455 dec_out: 1456 br_multicast_port_ngroups_dec(port, group->vid); 1457 return NULL; 1458 } 1459 1460 void br_multicast_del_port_group(struct net_bridge_port_group *p) 1461 { 1462 struct net_bridge_port *port = p->key.port; 1463 __u16 vid = p->key.addr.vid; 1464 1465 hlist_del_init(&p->mglist); 1466 if (!br_multicast_is_star_g(&p->key.addr)) 1467 rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode, 1468 br_sg_port_rht_params); 1469 kfree(p); 1470 br_multicast_port_ngroups_dec(port, vid); 1471 } 1472 1473 void br_multicast_host_join(const struct net_bridge_mcast *brmctx, 1474 struct net_bridge_mdb_entry *mp, bool notify) 1475 { 1476 if (!mp->host_joined) { 1477 mp->host_joined = true; 1478 if (br_multicast_is_star_g(&mp->addr)) 1479 br_multicast_star_g_host_state(mp); 1480 if (notify) 1481 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1482 } 1483 1484 if (br_group_is_l2(&mp->addr)) 1485 return; 1486 1487 mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval); 1488 } 1489 1490 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1491 { 1492 if (!mp->host_joined) 1493 return; 1494 1495 mp->host_joined = false; 1496 if (br_multicast_is_star_g(&mp->addr)) 1497 br_multicast_star_g_host_state(mp); 1498 if (notify) 1499 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1500 } 1501 1502 static struct net_bridge_port_group * 1503 __br_multicast_add_group(struct net_bridge_mcast *brmctx, 1504 struct net_bridge_mcast_port *pmctx, 1505 struct br_ip *group, 1506 const unsigned char *src, 1507 u8 filter_mode, 1508 bool igmpv2_mldv1, 1509 bool blocked) 1510 { 1511 struct net_bridge_port_group __rcu **pp; 1512 struct net_bridge_port_group *p = NULL; 1513 struct net_bridge_mdb_entry *mp; 1514 unsigned long now = jiffies; 1515 1516 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 1517 goto out; 1518 1519 mp = br_multicast_new_group(brmctx->br, group); 1520 if (IS_ERR(mp)) 1521 return ERR_CAST(mp); 1522 1523 if (!pmctx) { 1524 br_multicast_host_join(brmctx, mp, true); 1525 goto out; 1526 } 1527 1528 for (pp = &mp->ports; 1529 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 1530 pp = &p->next) { 1531 if (br_port_group_equal(p, pmctx->port, src)) 1532 goto found; 1533 if ((unsigned long)p->key.port < (unsigned long)pmctx->port) 1534 break; 1535 } 1536 1537 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src, 1538 filter_mode, RTPROT_KERNEL, NULL); 1539 if (unlikely(!p)) { 1540 p = ERR_PTR(-ENOMEM); 1541 goto out; 1542 } 1543 rcu_assign_pointer(*pp, p); 1544 if (blocked) 1545 p->flags |= MDB_PG_FLAGS_BLOCKED; 1546 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB); 1547 1548 found: 1549 if (igmpv2_mldv1) 1550 mod_timer(&p->timer, 1551 now + brmctx->multicast_membership_interval); 1552 1553 out: 1554 return p; 1555 } 1556 1557 static int br_multicast_add_group(struct net_bridge_mcast *brmctx, 1558 struct net_bridge_mcast_port *pmctx, 1559 struct br_ip *group, 1560 const unsigned char *src, 1561 u8 filter_mode, 1562 bool igmpv2_mldv1) 1563 { 1564 struct net_bridge_port_group *pg; 1565 int err; 1566 1567 spin_lock(&brmctx->br->multicast_lock); 1568 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode, 1569 igmpv2_mldv1, false); 1570 /* NULL is considered valid for host joined groups */ 1571 err = PTR_ERR_OR_ZERO(pg); 1572 spin_unlock(&brmctx->br->multicast_lock); 1573 1574 return err; 1575 } 1576 1577 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx, 1578 struct net_bridge_mcast_port *pmctx, 1579 __be32 group, 1580 __u16 vid, 1581 const unsigned char *src, 1582 bool igmpv2) 1583 { 1584 struct br_ip br_group; 1585 u8 filter_mode; 1586 1587 if (ipv4_is_local_multicast(group)) 1588 return 0; 1589 1590 memset(&br_group, 0, sizeof(br_group)); 1591 br_group.dst.ip4 = group; 1592 br_group.proto = htons(ETH_P_IP); 1593 br_group.vid = vid; 1594 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1595 1596 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1597 filter_mode, igmpv2); 1598 } 1599 1600 #if IS_ENABLED(CONFIG_IPV6) 1601 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx, 1602 struct net_bridge_mcast_port *pmctx, 1603 const struct in6_addr *group, 1604 __u16 vid, 1605 const unsigned char *src, 1606 bool mldv1) 1607 { 1608 struct br_ip br_group; 1609 u8 filter_mode; 1610 1611 if (ipv6_addr_is_ll_all_nodes(group)) 1612 return 0; 1613 1614 memset(&br_group, 0, sizeof(br_group)); 1615 br_group.dst.ip6 = *group; 1616 br_group.proto = htons(ETH_P_IPV6); 1617 br_group.vid = vid; 1618 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1619 1620 return br_multicast_add_group(brmctx, pmctx, &br_group, src, 1621 filter_mode, mldv1); 1622 } 1623 #endif 1624 1625 static bool br_multicast_rport_del(struct hlist_node *rlist) 1626 { 1627 if (hlist_unhashed(rlist)) 1628 return false; 1629 1630 hlist_del_init_rcu(rlist); 1631 return true; 1632 } 1633 1634 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1635 { 1636 return br_multicast_rport_del(&pmctx->ip4_rlist); 1637 } 1638 1639 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx) 1640 { 1641 #if IS_ENABLED(CONFIG_IPV6) 1642 return br_multicast_rport_del(&pmctx->ip6_rlist); 1643 #else 1644 return false; 1645 #endif 1646 } 1647 1648 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx, 1649 struct timer_list *t, 1650 struct hlist_node *rlist) 1651 { 1652 struct net_bridge *br = pmctx->port->br; 1653 bool del; 1654 1655 spin_lock(&br->multicast_lock); 1656 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1657 pmctx->multicast_router == MDB_RTR_TYPE_PERM || 1658 timer_pending(t)) 1659 goto out; 1660 1661 del = br_multicast_rport_del(rlist); 1662 br_multicast_rport_del_notify(pmctx, del); 1663 out: 1664 spin_unlock(&br->multicast_lock); 1665 } 1666 1667 static void br_ip4_multicast_router_expired(struct timer_list *t) 1668 { 1669 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1670 ip4_mc_router_timer); 1671 1672 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); 1673 } 1674 1675 #if IS_ENABLED(CONFIG_IPV6) 1676 static void br_ip6_multicast_router_expired(struct timer_list *t) 1677 { 1678 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1679 ip6_mc_router_timer); 1680 1681 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); 1682 } 1683 #endif 1684 1685 static void br_mc_router_state_change(struct net_bridge *p, 1686 bool is_mc_router) 1687 { 1688 struct switchdev_attr attr = { 1689 .orig_dev = p->dev, 1690 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1691 .flags = SWITCHDEV_F_DEFER, 1692 .u.mrouter = is_mc_router, 1693 }; 1694 1695 switchdev_port_attr_set(p->dev, &attr, NULL); 1696 } 1697 1698 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx, 1699 struct timer_list *timer) 1700 { 1701 spin_lock(&brmctx->br->multicast_lock); 1702 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 1703 brmctx->multicast_router == MDB_RTR_TYPE_PERM || 1704 br_ip4_multicast_is_router(brmctx) || 1705 br_ip6_multicast_is_router(brmctx)) 1706 goto out; 1707 1708 br_mc_router_state_change(brmctx->br, false); 1709 out: 1710 spin_unlock(&brmctx->br->multicast_lock); 1711 } 1712 1713 static void br_ip4_multicast_local_router_expired(struct timer_list *t) 1714 { 1715 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1716 ip4_mc_router_timer); 1717 1718 br_multicast_local_router_expired(brmctx, t); 1719 } 1720 1721 #if IS_ENABLED(CONFIG_IPV6) 1722 static void br_ip6_multicast_local_router_expired(struct timer_list *t) 1723 { 1724 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1725 ip6_mc_router_timer); 1726 1727 br_multicast_local_router_expired(brmctx, t); 1728 } 1729 #endif 1730 1731 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx, 1732 struct bridge_mcast_own_query *query) 1733 { 1734 spin_lock(&brmctx->br->multicast_lock); 1735 if (!netif_running(brmctx->br->dev) || 1736 br_multicast_ctx_vlan_global_disabled(brmctx) || 1737 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 1738 goto out; 1739 1740 br_multicast_start_querier(brmctx, query); 1741 1742 out: 1743 spin_unlock(&brmctx->br->multicast_lock); 1744 } 1745 1746 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1747 { 1748 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1749 ip4_other_query.timer); 1750 1751 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); 1752 } 1753 1754 #if IS_ENABLED(CONFIG_IPV6) 1755 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1756 { 1757 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 1758 ip6_other_query.timer); 1759 1760 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); 1761 } 1762 #endif 1763 1764 static void br_multicast_query_delay_expired(struct timer_list *t) 1765 { 1766 } 1767 1768 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, 1769 struct br_ip *ip, 1770 struct sk_buff *skb) 1771 { 1772 if (ip->proto == htons(ETH_P_IP)) 1773 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1774 #if IS_ENABLED(CONFIG_IPV6) 1775 else 1776 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1777 #endif 1778 } 1779 1780 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx, 1781 struct net_bridge_mcast_port *pmctx, 1782 struct net_bridge_port_group *pg, 1783 struct br_ip *ip_dst, 1784 struct br_ip *group, 1785 bool with_srcs, 1786 u8 sflag, 1787 bool *need_rexmit) 1788 { 1789 bool over_lmqt = !!sflag; 1790 struct sk_buff *skb; 1791 u8 igmp_type; 1792 1793 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1794 !br_multicast_ctx_matches_vlan_snooping(brmctx)) 1795 return; 1796 1797 again_under_lmqt: 1798 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group, 1799 with_srcs, over_lmqt, sflag, &igmp_type, 1800 need_rexmit); 1801 if (!skb) 1802 return; 1803 1804 if (pmctx) { 1805 skb->dev = pmctx->port->dev; 1806 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type, 1807 BR_MCAST_DIR_TX); 1808 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1809 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev, 1810 br_dev_queue_push_xmit); 1811 1812 if (over_lmqt && with_srcs && sflag) { 1813 over_lmqt = false; 1814 goto again_under_lmqt; 1815 } 1816 } else { 1817 br_multicast_select_own_querier(brmctx, group, skb); 1818 br_multicast_count(brmctx->br, NULL, skb, igmp_type, 1819 BR_MCAST_DIR_RX); 1820 netif_rx(skb); 1821 } 1822 } 1823 1824 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier, 1825 struct bridge_mcast_querier *dest) 1826 { 1827 unsigned int seq; 1828 1829 memset(dest, 0, sizeof(*dest)); 1830 do { 1831 seq = read_seqcount_begin(&querier->seq); 1832 dest->port_ifidx = querier->port_ifidx; 1833 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip)); 1834 } while (read_seqcount_retry(&querier->seq, seq)); 1835 } 1836 1837 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx, 1838 struct bridge_mcast_querier *querier, 1839 int ifindex, 1840 struct br_ip *saddr) 1841 { 1842 write_seqcount_begin(&querier->seq); 1843 querier->port_ifidx = ifindex; 1844 memcpy(&querier->addr, saddr, sizeof(*saddr)); 1845 write_seqcount_end(&querier->seq); 1846 } 1847 1848 static void br_multicast_send_query(struct net_bridge_mcast *brmctx, 1849 struct net_bridge_mcast_port *pmctx, 1850 struct bridge_mcast_own_query *own_query) 1851 { 1852 struct bridge_mcast_other_query *other_query = NULL; 1853 struct bridge_mcast_querier *querier; 1854 struct br_ip br_group; 1855 unsigned long time; 1856 1857 if (!br_multicast_ctx_should_use(brmctx, pmctx) || 1858 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 1859 !brmctx->multicast_querier) 1860 return; 1861 1862 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1863 1864 if (pmctx ? (own_query == &pmctx->ip4_own_query) : 1865 (own_query == &brmctx->ip4_own_query)) { 1866 querier = &brmctx->ip4_querier; 1867 other_query = &brmctx->ip4_other_query; 1868 br_group.proto = htons(ETH_P_IP); 1869 #if IS_ENABLED(CONFIG_IPV6) 1870 } else { 1871 querier = &brmctx->ip6_querier; 1872 other_query = &brmctx->ip6_other_query; 1873 br_group.proto = htons(ETH_P_IPV6); 1874 #endif 1875 } 1876 1877 if (!other_query || timer_pending(&other_query->timer)) 1878 return; 1879 1880 /* we're about to select ourselves as querier */ 1881 if (!pmctx && querier->port_ifidx) { 1882 struct br_ip zeroip = {}; 1883 1884 br_multicast_update_querier(brmctx, querier, 0, &zeroip); 1885 } 1886 1887 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false, 1888 0, NULL); 1889 1890 time = jiffies; 1891 time += own_query->startup_sent < brmctx->multicast_startup_query_count ? 1892 brmctx->multicast_startup_query_interval : 1893 brmctx->multicast_query_interval; 1894 mod_timer(&own_query->timer, time); 1895 } 1896 1897 static void 1898 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx, 1899 struct bridge_mcast_own_query *query) 1900 { 1901 struct net_bridge *br = pmctx->port->br; 1902 struct net_bridge_mcast *brmctx; 1903 1904 spin_lock(&br->multicast_lock); 1905 if (br_multicast_port_ctx_state_stopped(pmctx)) 1906 goto out; 1907 1908 brmctx = br_multicast_port_ctx_get_global(pmctx); 1909 if (query->startup_sent < brmctx->multicast_startup_query_count) 1910 query->startup_sent++; 1911 1912 br_multicast_send_query(brmctx, pmctx, query); 1913 1914 out: 1915 spin_unlock(&br->multicast_lock); 1916 } 1917 1918 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1919 { 1920 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1921 ip4_own_query.timer); 1922 1923 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); 1924 } 1925 1926 #if IS_ENABLED(CONFIG_IPV6) 1927 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1928 { 1929 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, 1930 ip6_own_query.timer); 1931 1932 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); 1933 } 1934 #endif 1935 1936 static void br_multicast_port_group_rexmit(struct timer_list *t) 1937 { 1938 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1939 struct bridge_mcast_other_query *other_query = NULL; 1940 struct net_bridge *br = pg->key.port->br; 1941 struct net_bridge_mcast_port *pmctx; 1942 struct net_bridge_mcast *brmctx; 1943 bool need_rexmit = false; 1944 1945 spin_lock(&br->multicast_lock); 1946 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1947 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1948 goto out; 1949 1950 pmctx = br_multicast_pg_to_port_ctx(pg); 1951 if (!pmctx) 1952 goto out; 1953 brmctx = br_multicast_port_ctx_get_global(pmctx); 1954 if (!brmctx->multicast_querier) 1955 goto out; 1956 1957 if (pg->key.addr.proto == htons(ETH_P_IP)) 1958 other_query = &brmctx->ip4_other_query; 1959 #if IS_ENABLED(CONFIG_IPV6) 1960 else 1961 other_query = &brmctx->ip6_other_query; 1962 #endif 1963 1964 if (!other_query || timer_pending(&other_query->timer)) 1965 goto out; 1966 1967 if (pg->grp_query_rexmit_cnt) { 1968 pg->grp_query_rexmit_cnt--; 1969 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1970 &pg->key.addr, false, 1, NULL); 1971 } 1972 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 1973 &pg->key.addr, true, 0, &need_rexmit); 1974 1975 if (pg->grp_query_rexmit_cnt || need_rexmit) 1976 mod_timer(&pg->rexmit_timer, jiffies + 1977 brmctx->multicast_last_member_interval); 1978 out: 1979 spin_unlock(&br->multicast_lock); 1980 } 1981 1982 static int br_mc_disabled_update(struct net_device *dev, bool value, 1983 struct netlink_ext_ack *extack) 1984 { 1985 struct switchdev_attr attr = { 1986 .orig_dev = dev, 1987 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1988 .flags = SWITCHDEV_F_DEFER, 1989 .u.mc_disabled = !value, 1990 }; 1991 1992 return switchdev_port_attr_set(dev, &attr, extack); 1993 } 1994 1995 void br_multicast_port_ctx_init(struct net_bridge_port *port, 1996 struct net_bridge_vlan *vlan, 1997 struct net_bridge_mcast_port *pmctx) 1998 { 1999 pmctx->port = port; 2000 pmctx->vlan = vlan; 2001 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 2002 timer_setup(&pmctx->ip4_mc_router_timer, 2003 br_ip4_multicast_router_expired, 0); 2004 timer_setup(&pmctx->ip4_own_query.timer, 2005 br_ip4_multicast_port_query_expired, 0); 2006 #if IS_ENABLED(CONFIG_IPV6) 2007 timer_setup(&pmctx->ip6_mc_router_timer, 2008 br_ip6_multicast_router_expired, 0); 2009 timer_setup(&pmctx->ip6_own_query.timer, 2010 br_ip6_multicast_port_query_expired, 0); 2011 #endif 2012 } 2013 2014 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 2015 { 2016 #if IS_ENABLED(CONFIG_IPV6) 2017 del_timer_sync(&pmctx->ip6_mc_router_timer); 2018 #endif 2019 del_timer_sync(&pmctx->ip4_mc_router_timer); 2020 } 2021 2022 int br_multicast_add_port(struct net_bridge_port *port) 2023 { 2024 int err; 2025 2026 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 2027 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx); 2028 2029 err = br_mc_disabled_update(port->dev, 2030 br_opt_get(port->br, 2031 BROPT_MULTICAST_ENABLED), 2032 NULL); 2033 if (err && err != -EOPNOTSUPP) 2034 return err; 2035 2036 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 2037 if (!port->mcast_stats) 2038 return -ENOMEM; 2039 2040 return 0; 2041 } 2042 2043 void br_multicast_del_port(struct net_bridge_port *port) 2044 { 2045 struct net_bridge *br = port->br; 2046 struct net_bridge_port_group *pg; 2047 struct hlist_node *n; 2048 2049 /* Take care of the remaining groups, only perm ones should be left */ 2050 spin_lock_bh(&br->multicast_lock); 2051 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 2052 br_multicast_find_del_pg(br, pg); 2053 spin_unlock_bh(&br->multicast_lock); 2054 flush_work(&br->mcast_gc_work); 2055 br_multicast_port_ctx_deinit(&port->multicast_ctx); 2056 free_percpu(port->mcast_stats); 2057 } 2058 2059 static void br_multicast_enable(struct bridge_mcast_own_query *query) 2060 { 2061 query->startup_sent = 0; 2062 2063 if (try_to_del_timer_sync(&query->timer) >= 0 || 2064 del_timer(&query->timer)) 2065 mod_timer(&query->timer, jiffies); 2066 } 2067 2068 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) 2069 { 2070 struct net_bridge *br = pmctx->port->br; 2071 struct net_bridge_mcast *brmctx; 2072 2073 brmctx = br_multicast_port_ctx_get_global(pmctx); 2074 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || 2075 !netif_running(br->dev)) 2076 return; 2077 2078 br_multicast_enable(&pmctx->ip4_own_query); 2079 #if IS_ENABLED(CONFIG_IPV6) 2080 br_multicast_enable(&pmctx->ip6_own_query); 2081 #endif 2082 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) { 2083 br_ip4_multicast_add_router(brmctx, pmctx); 2084 br_ip6_multicast_add_router(brmctx, pmctx); 2085 } 2086 2087 if (br_multicast_port_ctx_is_vlan(pmctx)) { 2088 struct net_bridge_port_group *pg; 2089 u32 n = 0; 2090 2091 /* The mcast_n_groups counter might be wrong. First, 2092 * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries 2093 * are flushed, thus mcast_n_groups after the toggle does not 2094 * reflect the true values. And second, permanent entries added 2095 * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected 2096 * either. Thus we have to refresh the counter. 2097 */ 2098 2099 hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) { 2100 if (pg->key.addr.vid == pmctx->vlan->vid) 2101 n++; 2102 } 2103 WRITE_ONCE(pmctx->mdb_n_entries, n); 2104 } 2105 } 2106 2107 void br_multicast_enable_port(struct net_bridge_port *port) 2108 { 2109 struct net_bridge *br = port->br; 2110 2111 spin_lock_bh(&br->multicast_lock); 2112 __br_multicast_enable_port_ctx(&port->multicast_ctx); 2113 spin_unlock_bh(&br->multicast_lock); 2114 } 2115 2116 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) 2117 { 2118 struct net_bridge_port_group *pg; 2119 struct hlist_node *n; 2120 bool del = false; 2121 2122 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist) 2123 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) && 2124 (!br_multicast_port_ctx_is_vlan(pmctx) || 2125 pg->key.addr.vid == pmctx->vlan->vid)) 2126 br_multicast_find_del_pg(pmctx->port->br, pg); 2127 2128 del |= br_ip4_multicast_rport_del(pmctx); 2129 del_timer(&pmctx->ip4_mc_router_timer); 2130 del_timer(&pmctx->ip4_own_query.timer); 2131 del |= br_ip6_multicast_rport_del(pmctx); 2132 #if IS_ENABLED(CONFIG_IPV6) 2133 del_timer(&pmctx->ip6_mc_router_timer); 2134 del_timer(&pmctx->ip6_own_query.timer); 2135 #endif 2136 br_multicast_rport_del_notify(pmctx, del); 2137 } 2138 2139 void br_multicast_disable_port(struct net_bridge_port *port) 2140 { 2141 spin_lock_bh(&port->br->multicast_lock); 2142 __br_multicast_disable_port_ctx(&port->multicast_ctx); 2143 spin_unlock_bh(&port->br->multicast_lock); 2144 } 2145 2146 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 2147 { 2148 struct net_bridge_group_src *ent; 2149 struct hlist_node *tmp; 2150 int deleted = 0; 2151 2152 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 2153 if (ent->flags & BR_SGRP_F_DELETE) { 2154 br_multicast_del_group_src(ent, false); 2155 deleted++; 2156 } 2157 2158 return deleted; 2159 } 2160 2161 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 2162 unsigned long expires) 2163 { 2164 mod_timer(&src->timer, expires); 2165 br_multicast_fwd_src_handle(src); 2166 } 2167 2168 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx, 2169 struct net_bridge_mcast_port *pmctx, 2170 struct net_bridge_port_group *pg) 2171 { 2172 struct bridge_mcast_other_query *other_query = NULL; 2173 u32 lmqc = brmctx->multicast_last_member_count; 2174 unsigned long lmqt, lmi, now = jiffies; 2175 struct net_bridge_group_src *ent; 2176 2177 if (!netif_running(brmctx->br->dev) || 2178 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2179 return; 2180 2181 if (pg->key.addr.proto == htons(ETH_P_IP)) 2182 other_query = &brmctx->ip4_other_query; 2183 #if IS_ENABLED(CONFIG_IPV6) 2184 else 2185 other_query = &brmctx->ip6_other_query; 2186 #endif 2187 2188 lmqt = now + br_multicast_lmqt(brmctx); 2189 hlist_for_each_entry(ent, &pg->src_list, node) { 2190 if (ent->flags & BR_SGRP_F_SEND) { 2191 ent->flags &= ~BR_SGRP_F_SEND; 2192 if (ent->timer.expires > lmqt) { 2193 if (brmctx->multicast_querier && 2194 other_query && 2195 !timer_pending(&other_query->timer)) 2196 ent->src_query_rexmit_cnt = lmqc; 2197 __grp_src_mod_timer(ent, lmqt); 2198 } 2199 } 2200 } 2201 2202 if (!brmctx->multicast_querier || 2203 !other_query || timer_pending(&other_query->timer)) 2204 return; 2205 2206 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2207 &pg->key.addr, true, 1, NULL); 2208 2209 lmi = now + brmctx->multicast_last_member_interval; 2210 if (!timer_pending(&pg->rexmit_timer) || 2211 time_after(pg->rexmit_timer.expires, lmi)) 2212 mod_timer(&pg->rexmit_timer, lmi); 2213 } 2214 2215 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx, 2216 struct net_bridge_mcast_port *pmctx, 2217 struct net_bridge_port_group *pg) 2218 { 2219 struct bridge_mcast_other_query *other_query = NULL; 2220 unsigned long now = jiffies, lmi; 2221 2222 if (!netif_running(brmctx->br->dev) || 2223 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED)) 2224 return; 2225 2226 if (pg->key.addr.proto == htons(ETH_P_IP)) 2227 other_query = &brmctx->ip4_other_query; 2228 #if IS_ENABLED(CONFIG_IPV6) 2229 else 2230 other_query = &brmctx->ip6_other_query; 2231 #endif 2232 2233 if (brmctx->multicast_querier && 2234 other_query && !timer_pending(&other_query->timer)) { 2235 lmi = now + brmctx->multicast_last_member_interval; 2236 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1; 2237 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr, 2238 &pg->key.addr, false, 0, NULL); 2239 if (!timer_pending(&pg->rexmit_timer) || 2240 time_after(pg->rexmit_timer.expires, lmi)) 2241 mod_timer(&pg->rexmit_timer, lmi); 2242 } 2243 2244 if (pg->filter_mode == MCAST_EXCLUDE && 2245 (!timer_pending(&pg->timer) || 2246 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx)))) 2247 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx)); 2248 } 2249 2250 /* State Msg type New state Actions 2251 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 2252 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 2253 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 2254 */ 2255 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx, 2256 struct net_bridge_port_group *pg, void *h_addr, 2257 void *srcs, u32 nsrcs, size_t addr_size, 2258 int grec_type) 2259 { 2260 struct net_bridge_group_src *ent; 2261 unsigned long now = jiffies; 2262 bool changed = false; 2263 struct br_ip src_ip; 2264 u32 src_idx; 2265 2266 memset(&src_ip, 0, sizeof(src_ip)); 2267 src_ip.proto = pg->key.addr.proto; 2268 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2269 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2270 ent = br_multicast_find_group_src(pg, &src_ip); 2271 if (!ent) { 2272 ent = br_multicast_new_group_src(pg, &src_ip); 2273 if (ent) 2274 changed = true; 2275 } 2276 2277 if (ent) 2278 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2279 } 2280 2281 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2282 grec_type)) 2283 changed = true; 2284 2285 return changed; 2286 } 2287 2288 /* State Msg type New state Actions 2289 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2290 * Delete (A-B) 2291 * Group Timer=GMI 2292 */ 2293 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx, 2294 struct net_bridge_port_group *pg, void *h_addr, 2295 void *srcs, u32 nsrcs, size_t addr_size, 2296 int grec_type) 2297 { 2298 struct net_bridge_group_src *ent; 2299 struct br_ip src_ip; 2300 u32 src_idx; 2301 2302 hlist_for_each_entry(ent, &pg->src_list, node) 2303 ent->flags |= BR_SGRP_F_DELETE; 2304 2305 memset(&src_ip, 0, sizeof(src_ip)); 2306 src_ip.proto = pg->key.addr.proto; 2307 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2308 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2309 ent = br_multicast_find_group_src(pg, &src_ip); 2310 if (ent) 2311 ent->flags &= ~BR_SGRP_F_DELETE; 2312 else 2313 ent = br_multicast_new_group_src(pg, &src_ip); 2314 if (ent) 2315 br_multicast_fwd_src_handle(ent); 2316 } 2317 2318 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2319 grec_type); 2320 2321 __grp_src_delete_marked(pg); 2322 } 2323 2324 /* State Msg type New state Actions 2325 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 2326 * Delete (X-A) 2327 * Delete (Y-A) 2328 * Group Timer=GMI 2329 */ 2330 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx, 2331 struct net_bridge_port_group *pg, void *h_addr, 2332 void *srcs, u32 nsrcs, size_t addr_size, 2333 int grec_type) 2334 { 2335 struct net_bridge_group_src *ent; 2336 unsigned long now = jiffies; 2337 bool changed = false; 2338 struct br_ip src_ip; 2339 u32 src_idx; 2340 2341 hlist_for_each_entry(ent, &pg->src_list, node) 2342 ent->flags |= BR_SGRP_F_DELETE; 2343 2344 memset(&src_ip, 0, sizeof(src_ip)); 2345 src_ip.proto = pg->key.addr.proto; 2346 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2347 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2348 ent = br_multicast_find_group_src(pg, &src_ip); 2349 if (ent) { 2350 ent->flags &= ~BR_SGRP_F_DELETE; 2351 } else { 2352 ent = br_multicast_new_group_src(pg, &src_ip); 2353 if (ent) { 2354 __grp_src_mod_timer(ent, 2355 now + br_multicast_gmi(brmctx)); 2356 changed = true; 2357 } 2358 } 2359 } 2360 2361 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2362 grec_type)) 2363 changed = true; 2364 2365 if (__grp_src_delete_marked(pg)) 2366 changed = true; 2367 2368 return changed; 2369 } 2370 2371 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx, 2372 struct net_bridge_port_group *pg, void *h_addr, 2373 void *srcs, u32 nsrcs, size_t addr_size, 2374 int grec_type) 2375 { 2376 bool changed = false; 2377 2378 switch (pg->filter_mode) { 2379 case MCAST_INCLUDE: 2380 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2381 grec_type); 2382 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2383 changed = true; 2384 break; 2385 case MCAST_EXCLUDE: 2386 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs, 2387 addr_size, grec_type); 2388 break; 2389 } 2390 2391 pg->filter_mode = MCAST_EXCLUDE; 2392 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2393 2394 return changed; 2395 } 2396 2397 /* State Msg type New state Actions 2398 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 2399 * Send Q(G,A-B) 2400 */ 2401 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx, 2402 struct net_bridge_mcast_port *pmctx, 2403 struct net_bridge_port_group *pg, void *h_addr, 2404 void *srcs, u32 nsrcs, size_t addr_size, 2405 int grec_type) 2406 { 2407 u32 src_idx, to_send = pg->src_ents; 2408 struct net_bridge_group_src *ent; 2409 unsigned long now = jiffies; 2410 bool changed = false; 2411 struct br_ip src_ip; 2412 2413 hlist_for_each_entry(ent, &pg->src_list, node) 2414 ent->flags |= BR_SGRP_F_SEND; 2415 2416 memset(&src_ip, 0, sizeof(src_ip)); 2417 src_ip.proto = pg->key.addr.proto; 2418 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2419 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2420 ent = br_multicast_find_group_src(pg, &src_ip); 2421 if (ent) { 2422 ent->flags &= ~BR_SGRP_F_SEND; 2423 to_send--; 2424 } else { 2425 ent = br_multicast_new_group_src(pg, &src_ip); 2426 if (ent) 2427 changed = true; 2428 } 2429 if (ent) 2430 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2431 } 2432 2433 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2434 grec_type)) 2435 changed = true; 2436 2437 if (to_send) 2438 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2439 2440 return changed; 2441 } 2442 2443 /* State Msg type New state Actions 2444 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 2445 * Send Q(G,X-A) 2446 * Send Q(G) 2447 */ 2448 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx, 2449 struct net_bridge_mcast_port *pmctx, 2450 struct net_bridge_port_group *pg, void *h_addr, 2451 void *srcs, u32 nsrcs, size_t addr_size, 2452 int grec_type) 2453 { 2454 u32 src_idx, to_send = pg->src_ents; 2455 struct net_bridge_group_src *ent; 2456 unsigned long now = jiffies; 2457 bool changed = false; 2458 struct br_ip src_ip; 2459 2460 hlist_for_each_entry(ent, &pg->src_list, node) 2461 if (timer_pending(&ent->timer)) 2462 ent->flags |= BR_SGRP_F_SEND; 2463 2464 memset(&src_ip, 0, sizeof(src_ip)); 2465 src_ip.proto = pg->key.addr.proto; 2466 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2467 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2468 ent = br_multicast_find_group_src(pg, &src_ip); 2469 if (ent) { 2470 if (timer_pending(&ent->timer)) { 2471 ent->flags &= ~BR_SGRP_F_SEND; 2472 to_send--; 2473 } 2474 } else { 2475 ent = br_multicast_new_group_src(pg, &src_ip); 2476 if (ent) 2477 changed = true; 2478 } 2479 if (ent) 2480 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx)); 2481 } 2482 2483 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2484 grec_type)) 2485 changed = true; 2486 2487 if (to_send) 2488 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2489 2490 __grp_send_query_and_rexmit(brmctx, pmctx, pg); 2491 2492 return changed; 2493 } 2494 2495 static bool br_multicast_toin(struct net_bridge_mcast *brmctx, 2496 struct net_bridge_mcast_port *pmctx, 2497 struct net_bridge_port_group *pg, void *h_addr, 2498 void *srcs, u32 nsrcs, size_t addr_size, 2499 int grec_type) 2500 { 2501 bool changed = false; 2502 2503 switch (pg->filter_mode) { 2504 case MCAST_INCLUDE: 2505 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs, 2506 nsrcs, addr_size, grec_type); 2507 break; 2508 case MCAST_EXCLUDE: 2509 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs, 2510 nsrcs, addr_size, grec_type); 2511 break; 2512 } 2513 2514 if (br_multicast_eht_should_del_pg(pg)) { 2515 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2516 br_multicast_find_del_pg(pg->key.port->br, pg); 2517 /* a notification has already been sent and we shouldn't 2518 * access pg after the delete so we have to return false 2519 */ 2520 changed = false; 2521 } 2522 2523 return changed; 2524 } 2525 2526 /* State Msg type New state Actions 2527 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2528 * Delete (A-B) 2529 * Send Q(G,A*B) 2530 * Group Timer=GMI 2531 */ 2532 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx, 2533 struct net_bridge_mcast_port *pmctx, 2534 struct net_bridge_port_group *pg, void *h_addr, 2535 void *srcs, u32 nsrcs, size_t addr_size, 2536 int grec_type) 2537 { 2538 struct net_bridge_group_src *ent; 2539 u32 src_idx, to_send = 0; 2540 struct br_ip src_ip; 2541 2542 hlist_for_each_entry(ent, &pg->src_list, node) 2543 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2544 2545 memset(&src_ip, 0, sizeof(src_ip)); 2546 src_ip.proto = pg->key.addr.proto; 2547 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2548 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2549 ent = br_multicast_find_group_src(pg, &src_ip); 2550 if (ent) { 2551 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2552 BR_SGRP_F_SEND; 2553 to_send++; 2554 } else { 2555 ent = br_multicast_new_group_src(pg, &src_ip); 2556 } 2557 if (ent) 2558 br_multicast_fwd_src_handle(ent); 2559 } 2560 2561 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2562 grec_type); 2563 2564 __grp_src_delete_marked(pg); 2565 if (to_send) 2566 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2567 } 2568 2569 /* State Msg type New state Actions 2570 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2571 * Delete (X-A) 2572 * Delete (Y-A) 2573 * Send Q(G,A-Y) 2574 * Group Timer=GMI 2575 */ 2576 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx, 2577 struct net_bridge_mcast_port *pmctx, 2578 struct net_bridge_port_group *pg, void *h_addr, 2579 void *srcs, u32 nsrcs, size_t addr_size, 2580 int grec_type) 2581 { 2582 struct net_bridge_group_src *ent; 2583 u32 src_idx, to_send = 0; 2584 bool changed = false; 2585 struct br_ip src_ip; 2586 2587 hlist_for_each_entry(ent, &pg->src_list, node) 2588 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2589 2590 memset(&src_ip, 0, sizeof(src_ip)); 2591 src_ip.proto = pg->key.addr.proto; 2592 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2593 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2594 ent = br_multicast_find_group_src(pg, &src_ip); 2595 if (ent) { 2596 ent->flags &= ~BR_SGRP_F_DELETE; 2597 } else { 2598 ent = br_multicast_new_group_src(pg, &src_ip); 2599 if (ent) { 2600 __grp_src_mod_timer(ent, pg->timer.expires); 2601 changed = true; 2602 } 2603 } 2604 if (ent && timer_pending(&ent->timer)) { 2605 ent->flags |= BR_SGRP_F_SEND; 2606 to_send++; 2607 } 2608 } 2609 2610 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2611 grec_type)) 2612 changed = true; 2613 2614 if (__grp_src_delete_marked(pg)) 2615 changed = true; 2616 if (to_send) 2617 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2618 2619 return changed; 2620 } 2621 2622 static bool br_multicast_toex(struct net_bridge_mcast *brmctx, 2623 struct net_bridge_mcast_port *pmctx, 2624 struct net_bridge_port_group *pg, void *h_addr, 2625 void *srcs, u32 nsrcs, size_t addr_size, 2626 int grec_type) 2627 { 2628 bool changed = false; 2629 2630 switch (pg->filter_mode) { 2631 case MCAST_INCLUDE: 2632 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs, 2633 addr_size, grec_type); 2634 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2635 changed = true; 2636 break; 2637 case MCAST_EXCLUDE: 2638 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs, 2639 nsrcs, addr_size, grec_type); 2640 break; 2641 } 2642 2643 pg->filter_mode = MCAST_EXCLUDE; 2644 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx)); 2645 2646 return changed; 2647 } 2648 2649 /* State Msg type New state Actions 2650 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2651 */ 2652 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx, 2653 struct net_bridge_mcast_port *pmctx, 2654 struct net_bridge_port_group *pg, void *h_addr, 2655 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2656 { 2657 struct net_bridge_group_src *ent; 2658 u32 src_idx, to_send = 0; 2659 bool changed = false; 2660 struct br_ip src_ip; 2661 2662 hlist_for_each_entry(ent, &pg->src_list, node) 2663 ent->flags &= ~BR_SGRP_F_SEND; 2664 2665 memset(&src_ip, 0, sizeof(src_ip)); 2666 src_ip.proto = pg->key.addr.proto; 2667 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2668 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2669 ent = br_multicast_find_group_src(pg, &src_ip); 2670 if (ent) { 2671 ent->flags |= BR_SGRP_F_SEND; 2672 to_send++; 2673 } 2674 } 2675 2676 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2677 grec_type)) 2678 changed = true; 2679 2680 if (to_send) 2681 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2682 2683 return changed; 2684 } 2685 2686 /* State Msg type New state Actions 2687 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2688 * Send Q(G,A-Y) 2689 */ 2690 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx, 2691 struct net_bridge_mcast_port *pmctx, 2692 struct net_bridge_port_group *pg, void *h_addr, 2693 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2694 { 2695 struct net_bridge_group_src *ent; 2696 u32 src_idx, to_send = 0; 2697 bool changed = false; 2698 struct br_ip src_ip; 2699 2700 hlist_for_each_entry(ent, &pg->src_list, node) 2701 ent->flags &= ~BR_SGRP_F_SEND; 2702 2703 memset(&src_ip, 0, sizeof(src_ip)); 2704 src_ip.proto = pg->key.addr.proto; 2705 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2706 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2707 ent = br_multicast_find_group_src(pg, &src_ip); 2708 if (!ent) { 2709 ent = br_multicast_new_group_src(pg, &src_ip); 2710 if (ent) { 2711 __grp_src_mod_timer(ent, pg->timer.expires); 2712 changed = true; 2713 } 2714 } 2715 if (ent && timer_pending(&ent->timer)) { 2716 ent->flags |= BR_SGRP_F_SEND; 2717 to_send++; 2718 } 2719 } 2720 2721 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size, 2722 grec_type)) 2723 changed = true; 2724 2725 if (to_send) 2726 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg); 2727 2728 return changed; 2729 } 2730 2731 static bool br_multicast_block(struct net_bridge_mcast *brmctx, 2732 struct net_bridge_mcast_port *pmctx, 2733 struct net_bridge_port_group *pg, void *h_addr, 2734 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2735 { 2736 bool changed = false; 2737 2738 switch (pg->filter_mode) { 2739 case MCAST_INCLUDE: 2740 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs, 2741 nsrcs, addr_size, grec_type); 2742 break; 2743 case MCAST_EXCLUDE: 2744 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs, 2745 nsrcs, addr_size, grec_type); 2746 break; 2747 } 2748 2749 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2750 br_multicast_eht_should_del_pg(pg)) { 2751 if (br_multicast_eht_should_del_pg(pg)) 2752 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2753 br_multicast_find_del_pg(pg->key.port->br, pg); 2754 /* a notification has already been sent and we shouldn't 2755 * access pg after the delete so we have to return false 2756 */ 2757 changed = false; 2758 } 2759 2760 return changed; 2761 } 2762 2763 static struct net_bridge_port_group * 2764 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2765 struct net_bridge_port *p, 2766 const unsigned char *src) 2767 { 2768 struct net_bridge *br __maybe_unused = mp->br; 2769 struct net_bridge_port_group *pg; 2770 2771 for (pg = mlock_dereference(mp->ports, br); 2772 pg; 2773 pg = mlock_dereference(pg->next, br)) 2774 if (br_port_group_equal(pg, p, src)) 2775 return pg; 2776 2777 return NULL; 2778 } 2779 2780 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx, 2781 struct net_bridge_mcast_port *pmctx, 2782 struct sk_buff *skb, 2783 u16 vid) 2784 { 2785 bool igmpv2 = brmctx->multicast_igmp_version == 2; 2786 struct net_bridge_mdb_entry *mdst; 2787 struct net_bridge_port_group *pg; 2788 const unsigned char *src; 2789 struct igmpv3_report *ih; 2790 struct igmpv3_grec *grec; 2791 int i, len, num, type; 2792 __be32 group, *h_addr; 2793 bool changed = false; 2794 int err = 0; 2795 u16 nsrcs; 2796 2797 ih = igmpv3_report_hdr(skb); 2798 num = ntohs(ih->ngrec); 2799 len = skb_transport_offset(skb) + sizeof(*ih); 2800 2801 for (i = 0; i < num; i++) { 2802 len += sizeof(*grec); 2803 if (!ip_mc_may_pull(skb, len)) 2804 return -EINVAL; 2805 2806 grec = (void *)(skb->data + len - sizeof(*grec)); 2807 group = grec->grec_mca; 2808 type = grec->grec_type; 2809 nsrcs = ntohs(grec->grec_nsrcs); 2810 2811 len += nsrcs * 4; 2812 if (!ip_mc_may_pull(skb, len)) 2813 return -EINVAL; 2814 2815 switch (type) { 2816 case IGMPV3_MODE_IS_INCLUDE: 2817 case IGMPV3_MODE_IS_EXCLUDE: 2818 case IGMPV3_CHANGE_TO_INCLUDE: 2819 case IGMPV3_CHANGE_TO_EXCLUDE: 2820 case IGMPV3_ALLOW_NEW_SOURCES: 2821 case IGMPV3_BLOCK_OLD_SOURCES: 2822 break; 2823 2824 default: 2825 continue; 2826 } 2827 2828 src = eth_hdr(skb)->h_source; 2829 if (nsrcs == 0 && 2830 (type == IGMPV3_CHANGE_TO_INCLUDE || 2831 type == IGMPV3_MODE_IS_INCLUDE)) { 2832 if (!pmctx || igmpv2) { 2833 br_ip4_multicast_leave_group(brmctx, pmctx, 2834 group, vid, src); 2835 continue; 2836 } 2837 } else { 2838 err = br_ip4_multicast_add_group(brmctx, pmctx, group, 2839 vid, src, igmpv2); 2840 if (err) 2841 break; 2842 } 2843 2844 if (!pmctx || igmpv2) 2845 continue; 2846 2847 spin_lock(&brmctx->br->multicast_lock); 2848 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2849 goto unlock_continue; 2850 2851 mdst = br_mdb_ip4_get(brmctx->br, group, vid); 2852 if (!mdst) 2853 goto unlock_continue; 2854 pg = br_multicast_find_port(mdst, pmctx->port, src); 2855 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2856 goto unlock_continue; 2857 /* reload grec and host addr */ 2858 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2859 h_addr = &ip_hdr(skb)->saddr; 2860 switch (type) { 2861 case IGMPV3_ALLOW_NEW_SOURCES: 2862 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2863 grec->grec_src, 2864 nsrcs, sizeof(__be32), type); 2865 break; 2866 case IGMPV3_MODE_IS_INCLUDE: 2867 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2868 grec->grec_src, 2869 nsrcs, sizeof(__be32), type); 2870 break; 2871 case IGMPV3_MODE_IS_EXCLUDE: 2872 changed = br_multicast_isexc(brmctx, pg, h_addr, 2873 grec->grec_src, 2874 nsrcs, sizeof(__be32), type); 2875 break; 2876 case IGMPV3_CHANGE_TO_INCLUDE: 2877 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 2878 grec->grec_src, 2879 nsrcs, sizeof(__be32), type); 2880 break; 2881 case IGMPV3_CHANGE_TO_EXCLUDE: 2882 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 2883 grec->grec_src, 2884 nsrcs, sizeof(__be32), type); 2885 break; 2886 case IGMPV3_BLOCK_OLD_SOURCES: 2887 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 2888 grec->grec_src, 2889 nsrcs, sizeof(__be32), type); 2890 break; 2891 } 2892 if (changed) 2893 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 2894 unlock_continue: 2895 spin_unlock(&brmctx->br->multicast_lock); 2896 } 2897 2898 return err; 2899 } 2900 2901 #if IS_ENABLED(CONFIG_IPV6) 2902 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx, 2903 struct net_bridge_mcast_port *pmctx, 2904 struct sk_buff *skb, 2905 u16 vid) 2906 { 2907 bool mldv1 = brmctx->multicast_mld_version == 1; 2908 struct net_bridge_mdb_entry *mdst; 2909 struct net_bridge_port_group *pg; 2910 unsigned int nsrcs_offset; 2911 struct mld2_report *mld2r; 2912 const unsigned char *src; 2913 struct in6_addr *h_addr; 2914 struct mld2_grec *grec; 2915 unsigned int grec_len; 2916 bool changed = false; 2917 int i, len, num; 2918 int err = 0; 2919 2920 if (!ipv6_mc_may_pull(skb, sizeof(*mld2r))) 2921 return -EINVAL; 2922 2923 mld2r = (struct mld2_report *)icmp6_hdr(skb); 2924 num = ntohs(mld2r->mld2r_ngrec); 2925 len = skb_transport_offset(skb) + sizeof(*mld2r); 2926 2927 for (i = 0; i < num; i++) { 2928 __be16 *_nsrcs, __nsrcs; 2929 u16 nsrcs; 2930 2931 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2932 2933 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2934 nsrcs_offset + sizeof(__nsrcs)) 2935 return -EINVAL; 2936 2937 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2938 sizeof(__nsrcs), &__nsrcs); 2939 if (!_nsrcs) 2940 return -EINVAL; 2941 2942 nsrcs = ntohs(*_nsrcs); 2943 grec_len = struct_size(grec, grec_src, nsrcs); 2944 2945 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2946 return -EINVAL; 2947 2948 grec = (struct mld2_grec *)(skb->data + len); 2949 len += grec_len; 2950 2951 switch (grec->grec_type) { 2952 case MLD2_MODE_IS_INCLUDE: 2953 case MLD2_MODE_IS_EXCLUDE: 2954 case MLD2_CHANGE_TO_INCLUDE: 2955 case MLD2_CHANGE_TO_EXCLUDE: 2956 case MLD2_ALLOW_NEW_SOURCES: 2957 case MLD2_BLOCK_OLD_SOURCES: 2958 break; 2959 2960 default: 2961 continue; 2962 } 2963 2964 src = eth_hdr(skb)->h_source; 2965 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2966 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2967 nsrcs == 0) { 2968 if (!pmctx || mldv1) { 2969 br_ip6_multicast_leave_group(brmctx, pmctx, 2970 &grec->grec_mca, 2971 vid, src); 2972 continue; 2973 } 2974 } else { 2975 err = br_ip6_multicast_add_group(brmctx, pmctx, 2976 &grec->grec_mca, vid, 2977 src, mldv1); 2978 if (err) 2979 break; 2980 } 2981 2982 if (!pmctx || mldv1) 2983 continue; 2984 2985 spin_lock(&brmctx->br->multicast_lock); 2986 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 2987 goto unlock_continue; 2988 2989 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid); 2990 if (!mdst) 2991 goto unlock_continue; 2992 pg = br_multicast_find_port(mdst, pmctx->port, src); 2993 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2994 goto unlock_continue; 2995 h_addr = &ipv6_hdr(skb)->saddr; 2996 switch (grec->grec_type) { 2997 case MLD2_ALLOW_NEW_SOURCES: 2998 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 2999 grec->grec_src, nsrcs, 3000 sizeof(struct in6_addr), 3001 grec->grec_type); 3002 break; 3003 case MLD2_MODE_IS_INCLUDE: 3004 changed = br_multicast_isinc_allow(brmctx, pg, h_addr, 3005 grec->grec_src, nsrcs, 3006 sizeof(struct in6_addr), 3007 grec->grec_type); 3008 break; 3009 case MLD2_MODE_IS_EXCLUDE: 3010 changed = br_multicast_isexc(brmctx, pg, h_addr, 3011 grec->grec_src, nsrcs, 3012 sizeof(struct in6_addr), 3013 grec->grec_type); 3014 break; 3015 case MLD2_CHANGE_TO_INCLUDE: 3016 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr, 3017 grec->grec_src, nsrcs, 3018 sizeof(struct in6_addr), 3019 grec->grec_type); 3020 break; 3021 case MLD2_CHANGE_TO_EXCLUDE: 3022 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr, 3023 grec->grec_src, nsrcs, 3024 sizeof(struct in6_addr), 3025 grec->grec_type); 3026 break; 3027 case MLD2_BLOCK_OLD_SOURCES: 3028 changed = br_multicast_block(brmctx, pmctx, pg, h_addr, 3029 grec->grec_src, nsrcs, 3030 sizeof(struct in6_addr), 3031 grec->grec_type); 3032 break; 3033 } 3034 if (changed) 3035 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB); 3036 unlock_continue: 3037 spin_unlock(&brmctx->br->multicast_lock); 3038 } 3039 3040 return err; 3041 } 3042 #endif 3043 3044 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx, 3045 struct net_bridge_mcast_port *pmctx, 3046 struct br_ip *saddr) 3047 { 3048 int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0; 3049 struct timer_list *own_timer, *other_timer; 3050 struct bridge_mcast_querier *querier; 3051 3052 switch (saddr->proto) { 3053 case htons(ETH_P_IP): 3054 querier = &brmctx->ip4_querier; 3055 own_timer = &brmctx->ip4_own_query.timer; 3056 other_timer = &brmctx->ip4_other_query.timer; 3057 if (!querier->addr.src.ip4 || 3058 ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4)) 3059 goto update; 3060 break; 3061 #if IS_ENABLED(CONFIG_IPV6) 3062 case htons(ETH_P_IPV6): 3063 querier = &brmctx->ip6_querier; 3064 own_timer = &brmctx->ip6_own_query.timer; 3065 other_timer = &brmctx->ip6_other_query.timer; 3066 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0) 3067 goto update; 3068 break; 3069 #endif 3070 default: 3071 return false; 3072 } 3073 3074 if (!timer_pending(own_timer) && !timer_pending(other_timer)) 3075 goto update; 3076 3077 return false; 3078 3079 update: 3080 br_multicast_update_querier(brmctx, querier, port_ifidx, saddr); 3081 3082 return true; 3083 } 3084 3085 static struct net_bridge_port * 3086 __br_multicast_get_querier_port(struct net_bridge *br, 3087 const struct bridge_mcast_querier *querier) 3088 { 3089 int port_ifidx = READ_ONCE(querier->port_ifidx); 3090 struct net_bridge_port *p; 3091 struct net_device *dev; 3092 3093 if (port_ifidx == 0) 3094 return NULL; 3095 3096 dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx); 3097 if (!dev) 3098 return NULL; 3099 p = br_port_get_rtnl_rcu(dev); 3100 if (!p || p->br != br) 3101 return NULL; 3102 3103 return p; 3104 } 3105 3106 size_t br_multicast_querier_state_size(void) 3107 { 3108 return nla_total_size(0) + /* nest attribute */ 3109 nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */ 3110 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */ 3111 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */ 3112 #if IS_ENABLED(CONFIG_IPV6) 3113 nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */ 3114 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */ 3115 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */ 3116 #endif 3117 0; 3118 } 3119 3120 /* protected by rtnl or rcu */ 3121 int br_multicast_dump_querier_state(struct sk_buff *skb, 3122 const struct net_bridge_mcast *brmctx, 3123 int nest_attr) 3124 { 3125 struct bridge_mcast_querier querier = {}; 3126 struct net_bridge_port *p; 3127 struct nlattr *nest; 3128 3129 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) || 3130 br_multicast_ctx_vlan_global_disabled(brmctx)) 3131 return 0; 3132 3133 nest = nla_nest_start(skb, nest_attr); 3134 if (!nest) 3135 return -EMSGSIZE; 3136 3137 rcu_read_lock(); 3138 if (!brmctx->multicast_querier && 3139 !timer_pending(&brmctx->ip4_other_query.timer)) 3140 goto out_v6; 3141 3142 br_multicast_read_querier(&brmctx->ip4_querier, &querier); 3143 if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS, 3144 querier.addr.src.ip4)) { 3145 rcu_read_unlock(); 3146 goto out_err; 3147 } 3148 3149 p = __br_multicast_get_querier_port(brmctx->br, &querier); 3150 if (timer_pending(&brmctx->ip4_other_query.timer) && 3151 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER, 3152 br_timer_value(&brmctx->ip4_other_query.timer), 3153 BRIDGE_QUERIER_PAD) || 3154 (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) { 3155 rcu_read_unlock(); 3156 goto out_err; 3157 } 3158 3159 out_v6: 3160 #if IS_ENABLED(CONFIG_IPV6) 3161 if (!brmctx->multicast_querier && 3162 !timer_pending(&brmctx->ip6_other_query.timer)) 3163 goto out; 3164 3165 br_multicast_read_querier(&brmctx->ip6_querier, &querier); 3166 if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS, 3167 &querier.addr.src.ip6)) { 3168 rcu_read_unlock(); 3169 goto out_err; 3170 } 3171 3172 p = __br_multicast_get_querier_port(brmctx->br, &querier); 3173 if (timer_pending(&brmctx->ip6_other_query.timer) && 3174 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER, 3175 br_timer_value(&brmctx->ip6_other_query.timer), 3176 BRIDGE_QUERIER_PAD) || 3177 (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT, 3178 p->dev->ifindex)))) { 3179 rcu_read_unlock(); 3180 goto out_err; 3181 } 3182 out: 3183 #endif 3184 rcu_read_unlock(); 3185 nla_nest_end(skb, nest); 3186 if (!nla_len(nest)) 3187 nla_nest_cancel(skb, nest); 3188 3189 return 0; 3190 3191 out_err: 3192 nla_nest_cancel(skb, nest); 3193 return -EMSGSIZE; 3194 } 3195 3196 static void 3197 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, 3198 struct bridge_mcast_other_query *query, 3199 unsigned long max_delay) 3200 { 3201 if (!timer_pending(&query->timer)) 3202 mod_timer(&query->delay_timer, jiffies + max_delay); 3203 3204 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); 3205 } 3206 3207 static void br_port_mc_router_state_change(struct net_bridge_port *p, 3208 bool is_mc_router) 3209 { 3210 struct switchdev_attr attr = { 3211 .orig_dev = p->dev, 3212 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 3213 .flags = SWITCHDEV_F_DEFER, 3214 .u.mrouter = is_mc_router, 3215 }; 3216 3217 switchdev_port_attr_set(p->dev, &attr, NULL); 3218 } 3219 3220 static struct net_bridge_port * 3221 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx, 3222 struct hlist_head *mc_router_list, 3223 struct hlist_node *rlist) 3224 { 3225 struct net_bridge_mcast_port *pmctx; 3226 3227 #if IS_ENABLED(CONFIG_IPV6) 3228 if (mc_router_list == &brmctx->ip6_mc_router_list) 3229 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3230 ip6_rlist); 3231 else 3232 #endif 3233 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port, 3234 ip4_rlist); 3235 3236 return pmctx->port; 3237 } 3238 3239 static struct hlist_node * 3240 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx, 3241 struct net_bridge_port *port, 3242 struct hlist_head *mc_router_list) 3243 3244 { 3245 struct hlist_node *slot = NULL; 3246 struct net_bridge_port *p; 3247 struct hlist_node *rlist; 3248 3249 hlist_for_each(rlist, mc_router_list) { 3250 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist); 3251 3252 if ((unsigned long)port >= (unsigned long)p) 3253 break; 3254 3255 slot = rlist; 3256 } 3257 3258 return slot; 3259 } 3260 3261 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx, 3262 struct hlist_node *rnode) 3263 { 3264 #if IS_ENABLED(CONFIG_IPV6) 3265 if (rnode != &pmctx->ip6_rlist) 3266 return hlist_unhashed(&pmctx->ip6_rlist); 3267 else 3268 return hlist_unhashed(&pmctx->ip4_rlist); 3269 #else 3270 return true; 3271 #endif 3272 } 3273 3274 /* Add port to router_list 3275 * list is maintained ordered by pointer value 3276 * and locked by br->multicast_lock and RCU 3277 */ 3278 static void br_multicast_add_router(struct net_bridge_mcast *brmctx, 3279 struct net_bridge_mcast_port *pmctx, 3280 struct hlist_node *rlist, 3281 struct hlist_head *mc_router_list) 3282 { 3283 struct hlist_node *slot; 3284 3285 if (!hlist_unhashed(rlist)) 3286 return; 3287 3288 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list); 3289 3290 if (slot) 3291 hlist_add_behind_rcu(rlist, slot); 3292 else 3293 hlist_add_head_rcu(rlist, mc_router_list); 3294 3295 /* For backwards compatibility for now, only notify if we 3296 * switched from no IPv4/IPv6 multicast router to a new 3297 * IPv4 or IPv6 multicast router. 3298 */ 3299 if (br_multicast_no_router_otherpf(pmctx, rlist)) { 3300 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB); 3301 br_port_mc_router_state_change(pmctx->port, true); 3302 } 3303 } 3304 3305 /* Add port to router_list 3306 * list is maintained ordered by pointer value 3307 * and locked by br->multicast_lock and RCU 3308 */ 3309 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx, 3310 struct net_bridge_mcast_port *pmctx) 3311 { 3312 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist, 3313 &brmctx->ip4_mc_router_list); 3314 } 3315 3316 /* Add port to router_list 3317 * list is maintained ordered by pointer value 3318 * and locked by br->multicast_lock and RCU 3319 */ 3320 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx, 3321 struct net_bridge_mcast_port *pmctx) 3322 { 3323 #if IS_ENABLED(CONFIG_IPV6) 3324 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist, 3325 &brmctx->ip6_mc_router_list); 3326 #endif 3327 } 3328 3329 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx, 3330 struct net_bridge_mcast_port *pmctx, 3331 struct timer_list *timer, 3332 struct hlist_node *rlist, 3333 struct hlist_head *mc_router_list) 3334 { 3335 unsigned long now = jiffies; 3336 3337 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3338 return; 3339 3340 if (!pmctx) { 3341 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 3342 if (!br_ip4_multicast_is_router(brmctx) && 3343 !br_ip6_multicast_is_router(brmctx)) 3344 br_mc_router_state_change(brmctx->br, true); 3345 mod_timer(timer, now + brmctx->multicast_querier_interval); 3346 } 3347 return; 3348 } 3349 3350 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED || 3351 pmctx->multicast_router == MDB_RTR_TYPE_PERM) 3352 return; 3353 3354 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list); 3355 mod_timer(timer, now + brmctx->multicast_querier_interval); 3356 } 3357 3358 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx, 3359 struct net_bridge_mcast_port *pmctx) 3360 { 3361 struct timer_list *timer = &brmctx->ip4_mc_router_timer; 3362 struct hlist_node *rlist = NULL; 3363 3364 if (pmctx) { 3365 timer = &pmctx->ip4_mc_router_timer; 3366 rlist = &pmctx->ip4_rlist; 3367 } 3368 3369 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3370 &brmctx->ip4_mc_router_list); 3371 } 3372 3373 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx, 3374 struct net_bridge_mcast_port *pmctx) 3375 { 3376 #if IS_ENABLED(CONFIG_IPV6) 3377 struct timer_list *timer = &brmctx->ip6_mc_router_timer; 3378 struct hlist_node *rlist = NULL; 3379 3380 if (pmctx) { 3381 timer = &pmctx->ip6_mc_router_timer; 3382 rlist = &pmctx->ip6_rlist; 3383 } 3384 3385 br_multicast_mark_router(brmctx, pmctx, timer, rlist, 3386 &brmctx->ip6_mc_router_list); 3387 #endif 3388 } 3389 3390 static void 3391 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx, 3392 struct net_bridge_mcast_port *pmctx, 3393 struct bridge_mcast_other_query *query, 3394 struct br_ip *saddr, 3395 unsigned long max_delay) 3396 { 3397 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3398 return; 3399 3400 br_multicast_update_query_timer(brmctx, query, max_delay); 3401 br_ip4_multicast_mark_router(brmctx, pmctx); 3402 } 3403 3404 #if IS_ENABLED(CONFIG_IPV6) 3405 static void 3406 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx, 3407 struct net_bridge_mcast_port *pmctx, 3408 struct bridge_mcast_other_query *query, 3409 struct br_ip *saddr, 3410 unsigned long max_delay) 3411 { 3412 if (!br_multicast_select_querier(brmctx, pmctx, saddr)) 3413 return; 3414 3415 br_multicast_update_query_timer(brmctx, query, max_delay); 3416 br_ip6_multicast_mark_router(brmctx, pmctx); 3417 } 3418 #endif 3419 3420 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, 3421 struct net_bridge_mcast_port *pmctx, 3422 struct sk_buff *skb, 3423 u16 vid) 3424 { 3425 unsigned int transport_len = ip_transport_len(skb); 3426 const struct iphdr *iph = ip_hdr(skb); 3427 struct igmphdr *ih = igmp_hdr(skb); 3428 struct net_bridge_mdb_entry *mp; 3429 struct igmpv3_query *ih3; 3430 struct net_bridge_port_group *p; 3431 struct net_bridge_port_group __rcu **pp; 3432 struct br_ip saddr = {}; 3433 unsigned long max_delay; 3434 unsigned long now = jiffies; 3435 __be32 group; 3436 3437 spin_lock(&brmctx->br->multicast_lock); 3438 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3439 goto out; 3440 3441 group = ih->group; 3442 3443 if (transport_len == sizeof(*ih)) { 3444 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 3445 3446 if (!max_delay) { 3447 max_delay = 10 * HZ; 3448 group = 0; 3449 } 3450 } else if (transport_len >= sizeof(*ih3)) { 3451 ih3 = igmpv3_query_hdr(skb); 3452 if (ih3->nsrcs || 3453 (brmctx->multicast_igmp_version == 3 && group && 3454 ih3->suppress)) 3455 goto out; 3456 3457 max_delay = ih3->code ? 3458 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 3459 } else { 3460 goto out; 3461 } 3462 3463 if (!group) { 3464 saddr.proto = htons(ETH_P_IP); 3465 saddr.src.ip4 = iph->saddr; 3466 3467 br_ip4_multicast_query_received(brmctx, pmctx, 3468 &brmctx->ip4_other_query, 3469 &saddr, max_delay); 3470 goto out; 3471 } 3472 3473 mp = br_mdb_ip4_get(brmctx->br, group, vid); 3474 if (!mp) 3475 goto out; 3476 3477 max_delay *= brmctx->multicast_last_member_count; 3478 3479 if (mp->host_joined && 3480 (timer_pending(&mp->timer) ? 3481 time_after(mp->timer.expires, now + max_delay) : 3482 try_to_del_timer_sync(&mp->timer) >= 0)) 3483 mod_timer(&mp->timer, now + max_delay); 3484 3485 for (pp = &mp->ports; 3486 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3487 pp = &p->next) { 3488 if (timer_pending(&p->timer) ? 3489 time_after(p->timer.expires, now + max_delay) : 3490 try_to_del_timer_sync(&p->timer) >= 0 && 3491 (brmctx->multicast_igmp_version == 2 || 3492 p->filter_mode == MCAST_EXCLUDE)) 3493 mod_timer(&p->timer, now + max_delay); 3494 } 3495 3496 out: 3497 spin_unlock(&brmctx->br->multicast_lock); 3498 } 3499 3500 #if IS_ENABLED(CONFIG_IPV6) 3501 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, 3502 struct net_bridge_mcast_port *pmctx, 3503 struct sk_buff *skb, 3504 u16 vid) 3505 { 3506 unsigned int transport_len = ipv6_transport_len(skb); 3507 struct mld_msg *mld; 3508 struct net_bridge_mdb_entry *mp; 3509 struct mld2_query *mld2q; 3510 struct net_bridge_port_group *p; 3511 struct net_bridge_port_group __rcu **pp; 3512 struct br_ip saddr = {}; 3513 unsigned long max_delay; 3514 unsigned long now = jiffies; 3515 unsigned int offset = skb_transport_offset(skb); 3516 const struct in6_addr *group = NULL; 3517 bool is_general_query; 3518 int err = 0; 3519 3520 spin_lock(&brmctx->br->multicast_lock); 3521 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3522 goto out; 3523 3524 if (transport_len == sizeof(*mld)) { 3525 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 3526 err = -EINVAL; 3527 goto out; 3528 } 3529 mld = (struct mld_msg *) icmp6_hdr(skb); 3530 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 3531 if (max_delay) 3532 group = &mld->mld_mca; 3533 } else { 3534 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 3535 err = -EINVAL; 3536 goto out; 3537 } 3538 mld2q = (struct mld2_query *)icmp6_hdr(skb); 3539 if (!mld2q->mld2q_nsrcs) 3540 group = &mld2q->mld2q_mca; 3541 if (brmctx->multicast_mld_version == 2 && 3542 !ipv6_addr_any(&mld2q->mld2q_mca) && 3543 mld2q->mld2q_suppress) 3544 goto out; 3545 3546 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 3547 } 3548 3549 is_general_query = group && ipv6_addr_any(group); 3550 3551 if (is_general_query) { 3552 saddr.proto = htons(ETH_P_IPV6); 3553 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 3554 3555 br_ip6_multicast_query_received(brmctx, pmctx, 3556 &brmctx->ip6_other_query, 3557 &saddr, max_delay); 3558 goto out; 3559 } else if (!group) { 3560 goto out; 3561 } 3562 3563 mp = br_mdb_ip6_get(brmctx->br, group, vid); 3564 if (!mp) 3565 goto out; 3566 3567 max_delay *= brmctx->multicast_last_member_count; 3568 if (mp->host_joined && 3569 (timer_pending(&mp->timer) ? 3570 time_after(mp->timer.expires, now + max_delay) : 3571 try_to_del_timer_sync(&mp->timer) >= 0)) 3572 mod_timer(&mp->timer, now + max_delay); 3573 3574 for (pp = &mp->ports; 3575 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3576 pp = &p->next) { 3577 if (timer_pending(&p->timer) ? 3578 time_after(p->timer.expires, now + max_delay) : 3579 try_to_del_timer_sync(&p->timer) >= 0 && 3580 (brmctx->multicast_mld_version == 1 || 3581 p->filter_mode == MCAST_EXCLUDE)) 3582 mod_timer(&p->timer, now + max_delay); 3583 } 3584 3585 out: 3586 spin_unlock(&brmctx->br->multicast_lock); 3587 return err; 3588 } 3589 #endif 3590 3591 static void 3592 br_multicast_leave_group(struct net_bridge_mcast *brmctx, 3593 struct net_bridge_mcast_port *pmctx, 3594 struct br_ip *group, 3595 struct bridge_mcast_other_query *other_query, 3596 struct bridge_mcast_own_query *own_query, 3597 const unsigned char *src) 3598 { 3599 struct net_bridge_mdb_entry *mp; 3600 struct net_bridge_port_group *p; 3601 unsigned long now; 3602 unsigned long time; 3603 3604 spin_lock(&brmctx->br->multicast_lock); 3605 if (!br_multicast_ctx_should_use(brmctx, pmctx)) 3606 goto out; 3607 3608 mp = br_mdb_ip_get(brmctx->br, group); 3609 if (!mp) 3610 goto out; 3611 3612 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) { 3613 struct net_bridge_port_group __rcu **pp; 3614 3615 for (pp = &mp->ports; 3616 (p = mlock_dereference(*pp, brmctx->br)) != NULL; 3617 pp = &p->next) { 3618 if (!br_port_group_equal(p, pmctx->port, src)) 3619 continue; 3620 3621 if (p->flags & MDB_PG_FLAGS_PERMANENT) 3622 break; 3623 3624 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 3625 br_multicast_del_pg(mp, p, pp); 3626 } 3627 goto out; 3628 } 3629 3630 if (timer_pending(&other_query->timer)) 3631 goto out; 3632 3633 if (brmctx->multicast_querier) { 3634 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr, 3635 false, 0, NULL); 3636 3637 time = jiffies + brmctx->multicast_last_member_count * 3638 brmctx->multicast_last_member_interval; 3639 3640 mod_timer(&own_query->timer, time); 3641 3642 for (p = mlock_dereference(mp->ports, brmctx->br); 3643 p != NULL && pmctx != NULL; 3644 p = mlock_dereference(p->next, brmctx->br)) { 3645 if (!br_port_group_equal(p, pmctx->port, src)) 3646 continue; 3647 3648 if (!hlist_unhashed(&p->mglist) && 3649 (timer_pending(&p->timer) ? 3650 time_after(p->timer.expires, time) : 3651 try_to_del_timer_sync(&p->timer) >= 0)) { 3652 mod_timer(&p->timer, time); 3653 } 3654 3655 break; 3656 } 3657 } 3658 3659 now = jiffies; 3660 time = now + brmctx->multicast_last_member_count * 3661 brmctx->multicast_last_member_interval; 3662 3663 if (!pmctx) { 3664 if (mp->host_joined && 3665 (timer_pending(&mp->timer) ? 3666 time_after(mp->timer.expires, time) : 3667 try_to_del_timer_sync(&mp->timer) >= 0)) { 3668 mod_timer(&mp->timer, time); 3669 } 3670 3671 goto out; 3672 } 3673 3674 for (p = mlock_dereference(mp->ports, brmctx->br); 3675 p != NULL; 3676 p = mlock_dereference(p->next, brmctx->br)) { 3677 if (p->key.port != pmctx->port) 3678 continue; 3679 3680 if (!hlist_unhashed(&p->mglist) && 3681 (timer_pending(&p->timer) ? 3682 time_after(p->timer.expires, time) : 3683 try_to_del_timer_sync(&p->timer) >= 0)) { 3684 mod_timer(&p->timer, time); 3685 } 3686 3687 break; 3688 } 3689 out: 3690 spin_unlock(&brmctx->br->multicast_lock); 3691 } 3692 3693 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx, 3694 struct net_bridge_mcast_port *pmctx, 3695 __be32 group, 3696 __u16 vid, 3697 const unsigned char *src) 3698 { 3699 struct br_ip br_group; 3700 struct bridge_mcast_own_query *own_query; 3701 3702 if (ipv4_is_local_multicast(group)) 3703 return; 3704 3705 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query; 3706 3707 memset(&br_group, 0, sizeof(br_group)); 3708 br_group.dst.ip4 = group; 3709 br_group.proto = htons(ETH_P_IP); 3710 br_group.vid = vid; 3711 3712 br_multicast_leave_group(brmctx, pmctx, &br_group, 3713 &brmctx->ip4_other_query, 3714 own_query, src); 3715 } 3716 3717 #if IS_ENABLED(CONFIG_IPV6) 3718 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx, 3719 struct net_bridge_mcast_port *pmctx, 3720 const struct in6_addr *group, 3721 __u16 vid, 3722 const unsigned char *src) 3723 { 3724 struct br_ip br_group; 3725 struct bridge_mcast_own_query *own_query; 3726 3727 if (ipv6_addr_is_ll_all_nodes(group)) 3728 return; 3729 3730 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query; 3731 3732 memset(&br_group, 0, sizeof(br_group)); 3733 br_group.dst.ip6 = *group; 3734 br_group.proto = htons(ETH_P_IPV6); 3735 br_group.vid = vid; 3736 3737 br_multicast_leave_group(brmctx, pmctx, &br_group, 3738 &brmctx->ip6_other_query, 3739 own_query, src); 3740 } 3741 #endif 3742 3743 static void br_multicast_err_count(const struct net_bridge *br, 3744 const struct net_bridge_port *p, 3745 __be16 proto) 3746 { 3747 struct bridge_mcast_stats __percpu *stats; 3748 struct bridge_mcast_stats *pstats; 3749 3750 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3751 return; 3752 3753 if (p) 3754 stats = p->mcast_stats; 3755 else 3756 stats = br->mcast_stats; 3757 if (WARN_ON(!stats)) 3758 return; 3759 3760 pstats = this_cpu_ptr(stats); 3761 3762 u64_stats_update_begin(&pstats->syncp); 3763 switch (proto) { 3764 case htons(ETH_P_IP): 3765 pstats->mstats.igmp_parse_errors++; 3766 break; 3767 #if IS_ENABLED(CONFIG_IPV6) 3768 case htons(ETH_P_IPV6): 3769 pstats->mstats.mld_parse_errors++; 3770 break; 3771 #endif 3772 } 3773 u64_stats_update_end(&pstats->syncp); 3774 } 3775 3776 static void br_multicast_pim(struct net_bridge_mcast *brmctx, 3777 struct net_bridge_mcast_port *pmctx, 3778 const struct sk_buff *skb) 3779 { 3780 unsigned int offset = skb_transport_offset(skb); 3781 struct pimhdr *pimhdr, _pimhdr; 3782 3783 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3784 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3785 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3786 return; 3787 3788 spin_lock(&brmctx->br->multicast_lock); 3789 br_ip4_multicast_mark_router(brmctx, pmctx); 3790 spin_unlock(&brmctx->br->multicast_lock); 3791 } 3792 3793 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3794 struct net_bridge_mcast_port *pmctx, 3795 struct sk_buff *skb) 3796 { 3797 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3798 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3799 return -ENOMSG; 3800 3801 spin_lock(&brmctx->br->multicast_lock); 3802 br_ip4_multicast_mark_router(brmctx, pmctx); 3803 spin_unlock(&brmctx->br->multicast_lock); 3804 3805 return 0; 3806 } 3807 3808 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx, 3809 struct net_bridge_mcast_port *pmctx, 3810 struct sk_buff *skb, 3811 u16 vid) 3812 { 3813 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3814 const unsigned char *src; 3815 struct igmphdr *ih; 3816 int err; 3817 3818 err = ip_mc_check_igmp(skb); 3819 3820 if (err == -ENOMSG) { 3821 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3822 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3823 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3824 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3825 br_multicast_pim(brmctx, pmctx, skb); 3826 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3827 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb); 3828 } 3829 3830 return 0; 3831 } else if (err < 0) { 3832 br_multicast_err_count(brmctx->br, p, skb->protocol); 3833 return err; 3834 } 3835 3836 ih = igmp_hdr(skb); 3837 src = eth_hdr(skb)->h_source; 3838 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3839 3840 switch (ih->type) { 3841 case IGMP_HOST_MEMBERSHIP_REPORT: 3842 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3843 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3844 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid, 3845 src, true); 3846 break; 3847 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3848 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid); 3849 break; 3850 case IGMP_HOST_MEMBERSHIP_QUERY: 3851 br_ip4_multicast_query(brmctx, pmctx, skb, vid); 3852 break; 3853 case IGMP_HOST_LEAVE_MESSAGE: 3854 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src); 3855 break; 3856 } 3857 3858 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3859 BR_MCAST_DIR_RX); 3860 3861 return err; 3862 } 3863 3864 #if IS_ENABLED(CONFIG_IPV6) 3865 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx, 3866 struct net_bridge_mcast_port *pmctx, 3867 struct sk_buff *skb) 3868 { 3869 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3870 return; 3871 3872 spin_lock(&brmctx->br->multicast_lock); 3873 br_ip6_multicast_mark_router(brmctx, pmctx); 3874 spin_unlock(&brmctx->br->multicast_lock); 3875 } 3876 3877 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx, 3878 struct net_bridge_mcast_port *pmctx, 3879 struct sk_buff *skb, 3880 u16 vid) 3881 { 3882 struct net_bridge_port *p = pmctx ? pmctx->port : NULL; 3883 const unsigned char *src; 3884 struct mld_msg *mld; 3885 int err; 3886 3887 err = ipv6_mc_check_mld(skb); 3888 3889 if (err == -ENOMSG || err == -ENODATA) { 3890 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3891 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3892 if (err == -ENODATA && 3893 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) 3894 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb); 3895 3896 return 0; 3897 } else if (err < 0) { 3898 br_multicast_err_count(brmctx->br, p, skb->protocol); 3899 return err; 3900 } 3901 3902 mld = (struct mld_msg *)skb_transport_header(skb); 3903 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3904 3905 switch (mld->mld_type) { 3906 case ICMPV6_MGM_REPORT: 3907 src = eth_hdr(skb)->h_source; 3908 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3909 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca, 3910 vid, src, true); 3911 break; 3912 case ICMPV6_MLD2_REPORT: 3913 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid); 3914 break; 3915 case ICMPV6_MGM_QUERY: 3916 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid); 3917 break; 3918 case ICMPV6_MGM_REDUCTION: 3919 src = eth_hdr(skb)->h_source; 3920 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid, 3921 src); 3922 break; 3923 } 3924 3925 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp, 3926 BR_MCAST_DIR_RX); 3927 3928 return err; 3929 } 3930 #endif 3931 3932 int br_multicast_rcv(struct net_bridge_mcast **brmctx, 3933 struct net_bridge_mcast_port **pmctx, 3934 struct net_bridge_vlan *vlan, 3935 struct sk_buff *skb, u16 vid) 3936 { 3937 int ret = 0; 3938 3939 BR_INPUT_SKB_CB(skb)->igmp = 0; 3940 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3941 3942 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED)) 3943 return 0; 3944 3945 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) { 3946 const struct net_bridge_vlan *masterv; 3947 3948 /* the vlan has the master flag set only when transmitting 3949 * through the bridge device 3950 */ 3951 if (br_vlan_is_master(vlan)) { 3952 masterv = vlan; 3953 *brmctx = &vlan->br_mcast_ctx; 3954 *pmctx = NULL; 3955 } else { 3956 masterv = vlan->brvlan; 3957 *brmctx = &vlan->brvlan->br_mcast_ctx; 3958 *pmctx = &vlan->port_mcast_ctx; 3959 } 3960 3961 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 3962 return 0; 3963 } 3964 3965 switch (skb->protocol) { 3966 case htons(ETH_P_IP): 3967 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid); 3968 break; 3969 #if IS_ENABLED(CONFIG_IPV6) 3970 case htons(ETH_P_IPV6): 3971 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid); 3972 break; 3973 #endif 3974 } 3975 3976 return ret; 3977 } 3978 3979 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx, 3980 struct bridge_mcast_own_query *query, 3981 struct bridge_mcast_querier *querier) 3982 { 3983 spin_lock(&brmctx->br->multicast_lock); 3984 if (br_multicast_ctx_vlan_disabled(brmctx)) 3985 goto out; 3986 3987 if (query->startup_sent < brmctx->multicast_startup_query_count) 3988 query->startup_sent++; 3989 3990 br_multicast_send_query(brmctx, NULL, query); 3991 out: 3992 spin_unlock(&brmctx->br->multicast_lock); 3993 } 3994 3995 static void br_ip4_multicast_query_expired(struct timer_list *t) 3996 { 3997 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 3998 ip4_own_query.timer); 3999 4000 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, 4001 &brmctx->ip4_querier); 4002 } 4003 4004 #if IS_ENABLED(CONFIG_IPV6) 4005 static void br_ip6_multicast_query_expired(struct timer_list *t) 4006 { 4007 struct net_bridge_mcast *brmctx = from_timer(brmctx, t, 4008 ip6_own_query.timer); 4009 4010 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, 4011 &brmctx->ip6_querier); 4012 } 4013 #endif 4014 4015 static void br_multicast_gc_work(struct work_struct *work) 4016 { 4017 struct net_bridge *br = container_of(work, struct net_bridge, 4018 mcast_gc_work); 4019 HLIST_HEAD(deleted_head); 4020 4021 spin_lock_bh(&br->multicast_lock); 4022 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4023 spin_unlock_bh(&br->multicast_lock); 4024 4025 br_multicast_gc(&deleted_head); 4026 } 4027 4028 void br_multicast_ctx_init(struct net_bridge *br, 4029 struct net_bridge_vlan *vlan, 4030 struct net_bridge_mcast *brmctx) 4031 { 4032 brmctx->br = br; 4033 brmctx->vlan = vlan; 4034 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4035 brmctx->multicast_last_member_count = 2; 4036 brmctx->multicast_startup_query_count = 2; 4037 4038 brmctx->multicast_last_member_interval = HZ; 4039 brmctx->multicast_query_response_interval = 10 * HZ; 4040 brmctx->multicast_startup_query_interval = 125 * HZ / 4; 4041 brmctx->multicast_query_interval = 125 * HZ; 4042 brmctx->multicast_querier_interval = 255 * HZ; 4043 brmctx->multicast_membership_interval = 260 * HZ; 4044 4045 brmctx->ip4_querier.port_ifidx = 0; 4046 seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); 4047 brmctx->multicast_igmp_version = 2; 4048 #if IS_ENABLED(CONFIG_IPV6) 4049 brmctx->multicast_mld_version = 1; 4050 brmctx->ip6_querier.port_ifidx = 0; 4051 seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); 4052 #endif 4053 4054 timer_setup(&brmctx->ip4_mc_router_timer, 4055 br_ip4_multicast_local_router_expired, 0); 4056 timer_setup(&brmctx->ip4_other_query.timer, 4057 br_ip4_multicast_querier_expired, 0); 4058 timer_setup(&brmctx->ip4_other_query.delay_timer, 4059 br_multicast_query_delay_expired, 0); 4060 timer_setup(&brmctx->ip4_own_query.timer, 4061 br_ip4_multicast_query_expired, 0); 4062 #if IS_ENABLED(CONFIG_IPV6) 4063 timer_setup(&brmctx->ip6_mc_router_timer, 4064 br_ip6_multicast_local_router_expired, 0); 4065 timer_setup(&brmctx->ip6_other_query.timer, 4066 br_ip6_multicast_querier_expired, 0); 4067 timer_setup(&brmctx->ip6_other_query.delay_timer, 4068 br_multicast_query_delay_expired, 0); 4069 timer_setup(&brmctx->ip6_own_query.timer, 4070 br_ip6_multicast_query_expired, 0); 4071 #endif 4072 } 4073 4074 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx) 4075 { 4076 __br_multicast_stop(brmctx); 4077 } 4078 4079 void br_multicast_init(struct net_bridge *br) 4080 { 4081 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 4082 4083 br_multicast_ctx_init(br, NULL, &br->multicast_ctx); 4084 4085 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 4086 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 4087 4088 spin_lock_init(&br->multicast_lock); 4089 INIT_HLIST_HEAD(&br->mdb_list); 4090 INIT_HLIST_HEAD(&br->mcast_gc_list); 4091 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 4092 } 4093 4094 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 4095 { 4096 struct in_device *in_dev = in_dev_get(br->dev); 4097 4098 if (!in_dev) 4099 return; 4100 4101 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 4102 in_dev_put(in_dev); 4103 } 4104 4105 #if IS_ENABLED(CONFIG_IPV6) 4106 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 4107 { 4108 struct in6_addr addr; 4109 4110 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 4111 ipv6_dev_mc_inc(br->dev, &addr); 4112 } 4113 #else 4114 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 4115 { 4116 } 4117 #endif 4118 4119 void br_multicast_join_snoopers(struct net_bridge *br) 4120 { 4121 br_ip4_multicast_join_snoopers(br); 4122 br_ip6_multicast_join_snoopers(br); 4123 } 4124 4125 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 4126 { 4127 struct in_device *in_dev = in_dev_get(br->dev); 4128 4129 if (WARN_ON(!in_dev)) 4130 return; 4131 4132 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 4133 in_dev_put(in_dev); 4134 } 4135 4136 #if IS_ENABLED(CONFIG_IPV6) 4137 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 4138 { 4139 struct in6_addr addr; 4140 4141 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 4142 ipv6_dev_mc_dec(br->dev, &addr); 4143 } 4144 #else 4145 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 4146 { 4147 } 4148 #endif 4149 4150 void br_multicast_leave_snoopers(struct net_bridge *br) 4151 { 4152 br_ip4_multicast_leave_snoopers(br); 4153 br_ip6_multicast_leave_snoopers(br); 4154 } 4155 4156 static void __br_multicast_open_query(struct net_bridge *br, 4157 struct bridge_mcast_own_query *query) 4158 { 4159 query->startup_sent = 0; 4160 4161 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4162 return; 4163 4164 mod_timer(&query->timer, jiffies); 4165 } 4166 4167 static void __br_multicast_open(struct net_bridge_mcast *brmctx) 4168 { 4169 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query); 4170 #if IS_ENABLED(CONFIG_IPV6) 4171 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query); 4172 #endif 4173 } 4174 4175 void br_multicast_open(struct net_bridge *br) 4176 { 4177 ASSERT_RTNL(); 4178 4179 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4180 struct net_bridge_vlan_group *vg; 4181 struct net_bridge_vlan *vlan; 4182 4183 vg = br_vlan_group(br); 4184 if (vg) { 4185 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4186 struct net_bridge_mcast *brmctx; 4187 4188 brmctx = &vlan->br_mcast_ctx; 4189 if (br_vlan_is_brentry(vlan) && 4190 !br_multicast_ctx_vlan_disabled(brmctx)) 4191 __br_multicast_open(&vlan->br_mcast_ctx); 4192 } 4193 } 4194 } else { 4195 __br_multicast_open(&br->multicast_ctx); 4196 } 4197 } 4198 4199 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 4200 { 4201 del_timer_sync(&brmctx->ip4_mc_router_timer); 4202 del_timer_sync(&brmctx->ip4_other_query.timer); 4203 del_timer_sync(&brmctx->ip4_other_query.delay_timer); 4204 del_timer_sync(&brmctx->ip4_own_query.timer); 4205 #if IS_ENABLED(CONFIG_IPV6) 4206 del_timer_sync(&brmctx->ip6_mc_router_timer); 4207 del_timer_sync(&brmctx->ip6_other_query.timer); 4208 del_timer_sync(&brmctx->ip6_other_query.delay_timer); 4209 del_timer_sync(&brmctx->ip6_own_query.timer); 4210 #endif 4211 } 4212 4213 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) 4214 { 4215 struct net_bridge *br; 4216 4217 /* it's okay to check for the flag without the multicast lock because it 4218 * can only change under RTNL -> multicast_lock, we need the latter to 4219 * sync with timers and packets 4220 */ 4221 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) 4222 return; 4223 4224 if (br_vlan_is_master(vlan)) { 4225 br = vlan->br; 4226 4227 if (!br_vlan_is_brentry(vlan) || 4228 (on && 4229 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx))) 4230 return; 4231 4232 spin_lock_bh(&br->multicast_lock); 4233 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4234 spin_unlock_bh(&br->multicast_lock); 4235 4236 if (on) 4237 __br_multicast_open(&vlan->br_mcast_ctx); 4238 else 4239 __br_multicast_stop(&vlan->br_mcast_ctx); 4240 } else { 4241 struct net_bridge_mcast *brmctx; 4242 4243 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx); 4244 if (on && br_multicast_ctx_vlan_global_disabled(brmctx)) 4245 return; 4246 4247 br = vlan->port->br; 4248 spin_lock_bh(&br->multicast_lock); 4249 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED; 4250 if (on) 4251 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx); 4252 else 4253 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx); 4254 spin_unlock_bh(&br->multicast_lock); 4255 } 4256 } 4257 4258 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on) 4259 { 4260 struct net_bridge_port *p; 4261 4262 if (WARN_ON_ONCE(!br_vlan_is_master(vlan))) 4263 return; 4264 4265 list_for_each_entry(p, &vlan->br->port_list, list) { 4266 struct net_bridge_vlan *vport; 4267 4268 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid); 4269 if (!vport) 4270 continue; 4271 br_multicast_toggle_one_vlan(vport, on); 4272 } 4273 4274 if (br_vlan_is_brentry(vlan)) 4275 br_multicast_toggle_one_vlan(vlan, on); 4276 } 4277 4278 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, 4279 struct netlink_ext_ack *extack) 4280 { 4281 struct net_bridge_vlan_group *vg; 4282 struct net_bridge_vlan *vlan; 4283 struct net_bridge_port *p; 4284 4285 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on) 4286 return 0; 4287 4288 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) { 4289 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled"); 4290 return -EINVAL; 4291 } 4292 4293 vg = br_vlan_group(br); 4294 if (!vg) 4295 return 0; 4296 4297 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on); 4298 4299 /* disable/enable non-vlan mcast contexts based on vlan snooping */ 4300 if (on) 4301 __br_multicast_stop(&br->multicast_ctx); 4302 else 4303 __br_multicast_open(&br->multicast_ctx); 4304 list_for_each_entry(p, &br->port_list, list) { 4305 if (on) 4306 br_multicast_disable_port(p); 4307 else 4308 br_multicast_enable_port(p); 4309 } 4310 4311 list_for_each_entry(vlan, &vg->vlan_list, vlist) 4312 br_multicast_toggle_vlan(vlan, on); 4313 4314 return 0; 4315 } 4316 4317 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on) 4318 { 4319 ASSERT_RTNL(); 4320 4321 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and 4322 * requires only RTNL to change 4323 */ 4324 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) 4325 return false; 4326 4327 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED; 4328 br_multicast_toggle_vlan(vlan, on); 4329 4330 return true; 4331 } 4332 4333 void br_multicast_stop(struct net_bridge *br) 4334 { 4335 ASSERT_RTNL(); 4336 4337 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 4338 struct net_bridge_vlan_group *vg; 4339 struct net_bridge_vlan *vlan; 4340 4341 vg = br_vlan_group(br); 4342 if (vg) { 4343 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 4344 struct net_bridge_mcast *brmctx; 4345 4346 brmctx = &vlan->br_mcast_ctx; 4347 if (br_vlan_is_brentry(vlan) && 4348 !br_multicast_ctx_vlan_disabled(brmctx)) 4349 __br_multicast_stop(&vlan->br_mcast_ctx); 4350 } 4351 } 4352 } else { 4353 __br_multicast_stop(&br->multicast_ctx); 4354 } 4355 } 4356 4357 void br_multicast_dev_del(struct net_bridge *br) 4358 { 4359 struct net_bridge_mdb_entry *mp; 4360 HLIST_HEAD(deleted_head); 4361 struct hlist_node *tmp; 4362 4363 spin_lock_bh(&br->multicast_lock); 4364 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 4365 br_multicast_del_mdb_entry(mp); 4366 hlist_move_list(&br->mcast_gc_list, &deleted_head); 4367 spin_unlock_bh(&br->multicast_lock); 4368 4369 br_multicast_ctx_deinit(&br->multicast_ctx); 4370 br_multicast_gc(&deleted_head); 4371 cancel_work_sync(&br->mcast_gc_work); 4372 4373 rcu_barrier(); 4374 } 4375 4376 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) 4377 { 4378 int err = -EINVAL; 4379 4380 spin_lock_bh(&brmctx->br->multicast_lock); 4381 4382 switch (val) { 4383 case MDB_RTR_TYPE_DISABLED: 4384 case MDB_RTR_TYPE_PERM: 4385 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); 4386 del_timer(&brmctx->ip4_mc_router_timer); 4387 #if IS_ENABLED(CONFIG_IPV6) 4388 del_timer(&brmctx->ip6_mc_router_timer); 4389 #endif 4390 brmctx->multicast_router = val; 4391 err = 0; 4392 break; 4393 case MDB_RTR_TYPE_TEMP_QUERY: 4394 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 4395 br_mc_router_state_change(brmctx->br, false); 4396 brmctx->multicast_router = val; 4397 err = 0; 4398 break; 4399 } 4400 4401 spin_unlock_bh(&brmctx->br->multicast_lock); 4402 4403 return err; 4404 } 4405 4406 static void 4407 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted) 4408 { 4409 if (!deleted) 4410 return; 4411 4412 /* For backwards compatibility for now, only notify if there is 4413 * no multicast router anymore for both IPv4 and IPv6. 4414 */ 4415 if (!hlist_unhashed(&pmctx->ip4_rlist)) 4416 return; 4417 #if IS_ENABLED(CONFIG_IPV6) 4418 if (!hlist_unhashed(&pmctx->ip6_rlist)) 4419 return; 4420 #endif 4421 4422 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB); 4423 br_port_mc_router_state_change(pmctx->port, false); 4424 4425 /* don't allow timer refresh */ 4426 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) 4427 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4428 } 4429 4430 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, 4431 unsigned long val) 4432 { 4433 struct net_bridge_mcast *brmctx; 4434 unsigned long now = jiffies; 4435 int err = -EINVAL; 4436 bool del = false; 4437 4438 brmctx = br_multicast_port_ctx_get_global(pmctx); 4439 spin_lock_bh(&brmctx->br->multicast_lock); 4440 if (pmctx->multicast_router == val) { 4441 /* Refresh the temp router port timer */ 4442 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) { 4443 mod_timer(&pmctx->ip4_mc_router_timer, 4444 now + brmctx->multicast_querier_interval); 4445 #if IS_ENABLED(CONFIG_IPV6) 4446 mod_timer(&pmctx->ip6_mc_router_timer, 4447 now + brmctx->multicast_querier_interval); 4448 #endif 4449 } 4450 err = 0; 4451 goto unlock; 4452 } 4453 switch (val) { 4454 case MDB_RTR_TYPE_DISABLED: 4455 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4456 del |= br_ip4_multicast_rport_del(pmctx); 4457 del_timer(&pmctx->ip4_mc_router_timer); 4458 del |= br_ip6_multicast_rport_del(pmctx); 4459 #if IS_ENABLED(CONFIG_IPV6) 4460 del_timer(&pmctx->ip6_mc_router_timer); 4461 #endif 4462 br_multicast_rport_del_notify(pmctx, del); 4463 break; 4464 case MDB_RTR_TYPE_TEMP_QUERY: 4465 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 4466 del |= br_ip4_multicast_rport_del(pmctx); 4467 del |= br_ip6_multicast_rport_del(pmctx); 4468 br_multicast_rport_del_notify(pmctx, del); 4469 break; 4470 case MDB_RTR_TYPE_PERM: 4471 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4472 del_timer(&pmctx->ip4_mc_router_timer); 4473 br_ip4_multicast_add_router(brmctx, pmctx); 4474 #if IS_ENABLED(CONFIG_IPV6) 4475 del_timer(&pmctx->ip6_mc_router_timer); 4476 #endif 4477 br_ip6_multicast_add_router(brmctx, pmctx); 4478 break; 4479 case MDB_RTR_TYPE_TEMP: 4480 pmctx->multicast_router = MDB_RTR_TYPE_TEMP; 4481 br_ip4_multicast_mark_router(brmctx, pmctx); 4482 br_ip6_multicast_mark_router(brmctx, pmctx); 4483 break; 4484 default: 4485 goto unlock; 4486 } 4487 err = 0; 4488 unlock: 4489 spin_unlock_bh(&brmctx->br->multicast_lock); 4490 4491 return err; 4492 } 4493 4494 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router) 4495 { 4496 int err; 4497 4498 if (br_vlan_is_master(v)) 4499 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router); 4500 else 4501 err = br_multicast_set_port_router(&v->port_mcast_ctx, 4502 mcast_router); 4503 4504 return err; 4505 } 4506 4507 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, 4508 struct bridge_mcast_own_query *query) 4509 { 4510 struct net_bridge_port *port; 4511 4512 if (!br_multicast_ctx_matches_vlan_snooping(brmctx)) 4513 return; 4514 4515 __br_multicast_open_query(brmctx->br, query); 4516 4517 rcu_read_lock(); 4518 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) { 4519 struct bridge_mcast_own_query *ip4_own_query; 4520 #if IS_ENABLED(CONFIG_IPV6) 4521 struct bridge_mcast_own_query *ip6_own_query; 4522 #endif 4523 4524 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx)) 4525 continue; 4526 4527 if (br_multicast_ctx_is_vlan(brmctx)) { 4528 struct net_bridge_vlan *vlan; 4529 4530 vlan = br_vlan_find(nbp_vlan_group_rcu(port), 4531 brmctx->vlan->vid); 4532 if (!vlan || 4533 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx)) 4534 continue; 4535 4536 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query; 4537 #if IS_ENABLED(CONFIG_IPV6) 4538 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query; 4539 #endif 4540 } else { 4541 ip4_own_query = &port->multicast_ctx.ip4_own_query; 4542 #if IS_ENABLED(CONFIG_IPV6) 4543 ip6_own_query = &port->multicast_ctx.ip6_own_query; 4544 #endif 4545 } 4546 4547 if (query == &brmctx->ip4_own_query) 4548 br_multicast_enable(ip4_own_query); 4549 #if IS_ENABLED(CONFIG_IPV6) 4550 else 4551 br_multicast_enable(ip6_own_query); 4552 #endif 4553 } 4554 rcu_read_unlock(); 4555 } 4556 4557 int br_multicast_toggle(struct net_bridge *br, unsigned long val, 4558 struct netlink_ext_ack *extack) 4559 { 4560 struct net_bridge_port *port; 4561 bool change_snoopers = false; 4562 int err = 0; 4563 4564 spin_lock_bh(&br->multicast_lock); 4565 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 4566 goto unlock; 4567 4568 err = br_mc_disabled_update(br->dev, val, extack); 4569 if (err == -EOPNOTSUPP) 4570 err = 0; 4571 if (err) 4572 goto unlock; 4573 4574 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 4575 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 4576 change_snoopers = true; 4577 goto unlock; 4578 } 4579 4580 if (!netif_running(br->dev)) 4581 goto unlock; 4582 4583 br_multicast_open(br); 4584 list_for_each_entry(port, &br->port_list, list) 4585 __br_multicast_enable_port_ctx(&port->multicast_ctx); 4586 4587 change_snoopers = true; 4588 4589 unlock: 4590 spin_unlock_bh(&br->multicast_lock); 4591 4592 /* br_multicast_join_snoopers has the potential to cause 4593 * an MLD Report/Leave to be delivered to br_multicast_rcv, 4594 * which would in turn call br_multicast_add_group, which would 4595 * attempt to acquire multicast_lock. This function should be 4596 * called after the lock has been released to avoid deadlocks on 4597 * multicast_lock. 4598 * 4599 * br_multicast_leave_snoopers does not have the problem since 4600 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 4601 * returns without calling br_multicast_ipv4/6_rcv if it's not 4602 * enabled. Moved both functions out just for symmetry. 4603 */ 4604 if (change_snoopers) { 4605 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 4606 br_multicast_join_snoopers(br); 4607 else 4608 br_multicast_leave_snoopers(br); 4609 } 4610 4611 return err; 4612 } 4613 4614 bool br_multicast_enabled(const struct net_device *dev) 4615 { 4616 struct net_bridge *br = netdev_priv(dev); 4617 4618 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 4619 } 4620 EXPORT_SYMBOL_GPL(br_multicast_enabled); 4621 4622 bool br_multicast_router(const struct net_device *dev) 4623 { 4624 struct net_bridge *br = netdev_priv(dev); 4625 bool is_router; 4626 4627 spin_lock_bh(&br->multicast_lock); 4628 is_router = br_multicast_is_router(&br->multicast_ctx, NULL); 4629 spin_unlock_bh(&br->multicast_lock); 4630 return is_router; 4631 } 4632 EXPORT_SYMBOL_GPL(br_multicast_router); 4633 4634 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) 4635 { 4636 unsigned long max_delay; 4637 4638 val = !!val; 4639 4640 spin_lock_bh(&brmctx->br->multicast_lock); 4641 if (brmctx->multicast_querier == val) 4642 goto unlock; 4643 4644 WRITE_ONCE(brmctx->multicast_querier, val); 4645 if (!val) 4646 goto unlock; 4647 4648 max_delay = brmctx->multicast_query_response_interval; 4649 4650 if (!timer_pending(&brmctx->ip4_other_query.timer)) 4651 mod_timer(&brmctx->ip4_other_query.delay_timer, 4652 jiffies + max_delay); 4653 4654 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); 4655 4656 #if IS_ENABLED(CONFIG_IPV6) 4657 if (!timer_pending(&brmctx->ip6_other_query.timer)) 4658 mod_timer(&brmctx->ip6_other_query.delay_timer, 4659 jiffies + max_delay); 4660 4661 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); 4662 #endif 4663 4664 unlock: 4665 spin_unlock_bh(&brmctx->br->multicast_lock); 4666 4667 return 0; 4668 } 4669 4670 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx, 4671 unsigned long val) 4672 { 4673 /* Currently we support only version 2 and 3 */ 4674 switch (val) { 4675 case 2: 4676 case 3: 4677 break; 4678 default: 4679 return -EINVAL; 4680 } 4681 4682 spin_lock_bh(&brmctx->br->multicast_lock); 4683 brmctx->multicast_igmp_version = val; 4684 spin_unlock_bh(&brmctx->br->multicast_lock); 4685 4686 return 0; 4687 } 4688 4689 #if IS_ENABLED(CONFIG_IPV6) 4690 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, 4691 unsigned long val) 4692 { 4693 /* Currently we support version 1 and 2 */ 4694 switch (val) { 4695 case 1: 4696 case 2: 4697 break; 4698 default: 4699 return -EINVAL; 4700 } 4701 4702 spin_lock_bh(&brmctx->br->multicast_lock); 4703 brmctx->multicast_mld_version = val; 4704 spin_unlock_bh(&brmctx->br->multicast_lock); 4705 4706 return 0; 4707 } 4708 #endif 4709 4710 void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, 4711 unsigned long val) 4712 { 4713 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4714 4715 if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { 4716 br_info(brmctx->br, 4717 "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", 4718 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), 4719 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); 4720 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; 4721 } 4722 4723 brmctx->multicast_query_interval = intvl_jiffies; 4724 } 4725 4726 void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, 4727 unsigned long val) 4728 { 4729 unsigned long intvl_jiffies = clock_t_to_jiffies(val); 4730 4731 if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { 4732 br_info(brmctx->br, 4733 "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", 4734 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), 4735 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); 4736 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; 4737 } 4738 4739 brmctx->multicast_startup_query_interval = intvl_jiffies; 4740 } 4741 4742 /** 4743 * br_multicast_list_adjacent - Returns snooped multicast addresses 4744 * @dev: The bridge port adjacent to which to retrieve addresses 4745 * @br_ip_list: The list to store found, snooped multicast IP addresses in 4746 * 4747 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 4748 * snooping feature on all bridge ports of dev's bridge device, excluding 4749 * the addresses from dev itself. 4750 * 4751 * Returns the number of items added to br_ip_list. 4752 * 4753 * Notes: 4754 * - br_ip_list needs to be initialized by caller 4755 * - br_ip_list might contain duplicates in the end 4756 * (needs to be taken care of by caller) 4757 * - br_ip_list needs to be freed by caller 4758 */ 4759 int br_multicast_list_adjacent(struct net_device *dev, 4760 struct list_head *br_ip_list) 4761 { 4762 struct net_bridge *br; 4763 struct net_bridge_port *port; 4764 struct net_bridge_port_group *group; 4765 struct br_ip_list *entry; 4766 int count = 0; 4767 4768 rcu_read_lock(); 4769 if (!br_ip_list || !netif_is_bridge_port(dev)) 4770 goto unlock; 4771 4772 port = br_port_get_rcu(dev); 4773 if (!port || !port->br) 4774 goto unlock; 4775 4776 br = port->br; 4777 4778 list_for_each_entry_rcu(port, &br->port_list, list) { 4779 if (!port->dev || port->dev == dev) 4780 continue; 4781 4782 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 4783 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 4784 if (!entry) 4785 goto unlock; 4786 4787 entry->addr = group->key.addr; 4788 list_add(&entry->list, br_ip_list); 4789 count++; 4790 } 4791 } 4792 4793 unlock: 4794 rcu_read_unlock(); 4795 return count; 4796 } 4797 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 4798 4799 /** 4800 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 4801 * @dev: The bridge port providing the bridge on which to check for a querier 4802 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4803 * 4804 * Checks whether the given interface has a bridge on top and if so returns 4805 * true if a valid querier exists anywhere on the bridged link layer. 4806 * Otherwise returns false. 4807 */ 4808 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 4809 { 4810 struct net_bridge *br; 4811 struct net_bridge_port *port; 4812 struct ethhdr eth; 4813 bool ret = false; 4814 4815 rcu_read_lock(); 4816 if (!netif_is_bridge_port(dev)) 4817 goto unlock; 4818 4819 port = br_port_get_rcu(dev); 4820 if (!port || !port->br) 4821 goto unlock; 4822 4823 br = port->br; 4824 4825 memset(ð, 0, sizeof(eth)); 4826 eth.h_proto = htons(proto); 4827 4828 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL); 4829 4830 unlock: 4831 rcu_read_unlock(); 4832 return ret; 4833 } 4834 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 4835 4836 /** 4837 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 4838 * @dev: The bridge port adjacent to which to check for a querier 4839 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4840 * 4841 * Checks whether the given interface has a bridge on top and if so returns 4842 * true if a selected querier is behind one of the other ports of this 4843 * bridge. Otherwise returns false. 4844 */ 4845 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 4846 { 4847 struct net_bridge_mcast *brmctx; 4848 struct net_bridge *br; 4849 struct net_bridge_port *port; 4850 bool ret = false; 4851 int port_ifidx; 4852 4853 rcu_read_lock(); 4854 if (!netif_is_bridge_port(dev)) 4855 goto unlock; 4856 4857 port = br_port_get_rcu(dev); 4858 if (!port || !port->br) 4859 goto unlock; 4860 4861 br = port->br; 4862 brmctx = &br->multicast_ctx; 4863 4864 switch (proto) { 4865 case ETH_P_IP: 4866 port_ifidx = brmctx->ip4_querier.port_ifidx; 4867 if (!timer_pending(&brmctx->ip4_other_query.timer) || 4868 port_ifidx == port->dev->ifindex) 4869 goto unlock; 4870 break; 4871 #if IS_ENABLED(CONFIG_IPV6) 4872 case ETH_P_IPV6: 4873 port_ifidx = brmctx->ip6_querier.port_ifidx; 4874 if (!timer_pending(&brmctx->ip6_other_query.timer) || 4875 port_ifidx == port->dev->ifindex) 4876 goto unlock; 4877 break; 4878 #endif 4879 default: 4880 goto unlock; 4881 } 4882 4883 ret = true; 4884 unlock: 4885 rcu_read_unlock(); 4886 return ret; 4887 } 4888 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 4889 4890 /** 4891 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port 4892 * @dev: The bridge port adjacent to which to check for a multicast router 4893 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 4894 * 4895 * Checks whether the given interface has a bridge on top and if so returns 4896 * true if a multicast router is behind one of the other ports of this 4897 * bridge. Otherwise returns false. 4898 */ 4899 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto) 4900 { 4901 struct net_bridge_mcast_port *pmctx; 4902 struct net_bridge_mcast *brmctx; 4903 struct net_bridge_port *port; 4904 bool ret = false; 4905 4906 rcu_read_lock(); 4907 port = br_port_get_check_rcu(dev); 4908 if (!port) 4909 goto unlock; 4910 4911 brmctx = &port->br->multicast_ctx; 4912 switch (proto) { 4913 case ETH_P_IP: 4914 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list, 4915 ip4_rlist) { 4916 if (pmctx->port == port) 4917 continue; 4918 4919 ret = true; 4920 goto unlock; 4921 } 4922 break; 4923 #if IS_ENABLED(CONFIG_IPV6) 4924 case ETH_P_IPV6: 4925 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list, 4926 ip6_rlist) { 4927 if (pmctx->port == port) 4928 continue; 4929 4930 ret = true; 4931 goto unlock; 4932 } 4933 break; 4934 #endif 4935 default: 4936 /* when compiled without IPv6 support, be conservative and 4937 * always assume presence of an IPv6 multicast router 4938 */ 4939 ret = true; 4940 } 4941 4942 unlock: 4943 rcu_read_unlock(); 4944 return ret; 4945 } 4946 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent); 4947 4948 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 4949 const struct sk_buff *skb, u8 type, u8 dir) 4950 { 4951 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 4952 __be16 proto = skb->protocol; 4953 unsigned int t_len; 4954 4955 u64_stats_update_begin(&pstats->syncp); 4956 switch (proto) { 4957 case htons(ETH_P_IP): 4958 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 4959 switch (type) { 4960 case IGMP_HOST_MEMBERSHIP_REPORT: 4961 pstats->mstats.igmp_v1reports[dir]++; 4962 break; 4963 case IGMPV2_HOST_MEMBERSHIP_REPORT: 4964 pstats->mstats.igmp_v2reports[dir]++; 4965 break; 4966 case IGMPV3_HOST_MEMBERSHIP_REPORT: 4967 pstats->mstats.igmp_v3reports[dir]++; 4968 break; 4969 case IGMP_HOST_MEMBERSHIP_QUERY: 4970 if (t_len != sizeof(struct igmphdr)) { 4971 pstats->mstats.igmp_v3queries[dir]++; 4972 } else { 4973 unsigned int offset = skb_transport_offset(skb); 4974 struct igmphdr *ih, _ihdr; 4975 4976 ih = skb_header_pointer(skb, offset, 4977 sizeof(_ihdr), &_ihdr); 4978 if (!ih) 4979 break; 4980 if (!ih->code) 4981 pstats->mstats.igmp_v1queries[dir]++; 4982 else 4983 pstats->mstats.igmp_v2queries[dir]++; 4984 } 4985 break; 4986 case IGMP_HOST_LEAVE_MESSAGE: 4987 pstats->mstats.igmp_leaves[dir]++; 4988 break; 4989 } 4990 break; 4991 #if IS_ENABLED(CONFIG_IPV6) 4992 case htons(ETH_P_IPV6): 4993 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 4994 sizeof(struct ipv6hdr); 4995 t_len -= skb_network_header_len(skb); 4996 switch (type) { 4997 case ICMPV6_MGM_REPORT: 4998 pstats->mstats.mld_v1reports[dir]++; 4999 break; 5000 case ICMPV6_MLD2_REPORT: 5001 pstats->mstats.mld_v2reports[dir]++; 5002 break; 5003 case ICMPV6_MGM_QUERY: 5004 if (t_len != sizeof(struct mld_msg)) 5005 pstats->mstats.mld_v2queries[dir]++; 5006 else 5007 pstats->mstats.mld_v1queries[dir]++; 5008 break; 5009 case ICMPV6_MGM_REDUCTION: 5010 pstats->mstats.mld_leaves[dir]++; 5011 break; 5012 } 5013 break; 5014 #endif /* CONFIG_IPV6 */ 5015 } 5016 u64_stats_update_end(&pstats->syncp); 5017 } 5018 5019 void br_multicast_count(struct net_bridge *br, 5020 const struct net_bridge_port *p, 5021 const struct sk_buff *skb, u8 type, u8 dir) 5022 { 5023 struct bridge_mcast_stats __percpu *stats; 5024 5025 /* if multicast_disabled is true then igmp type can't be set */ 5026 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 5027 return; 5028 5029 if (p) 5030 stats = p->mcast_stats; 5031 else 5032 stats = br->mcast_stats; 5033 if (WARN_ON(!stats)) 5034 return; 5035 5036 br_mcast_stats_add(stats, skb, type, dir); 5037 } 5038 5039 int br_multicast_init_stats(struct net_bridge *br) 5040 { 5041 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 5042 if (!br->mcast_stats) 5043 return -ENOMEM; 5044 5045 return 0; 5046 } 5047 5048 void br_multicast_uninit_stats(struct net_bridge *br) 5049 { 5050 free_percpu(br->mcast_stats); 5051 } 5052 5053 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 5054 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 5055 { 5056 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 5057 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 5058 } 5059 5060 void br_multicast_get_stats(const struct net_bridge *br, 5061 const struct net_bridge_port *p, 5062 struct br_mcast_stats *dest) 5063 { 5064 struct bridge_mcast_stats __percpu *stats; 5065 struct br_mcast_stats tdst; 5066 int i; 5067 5068 memset(dest, 0, sizeof(*dest)); 5069 if (p) 5070 stats = p->mcast_stats; 5071 else 5072 stats = br->mcast_stats; 5073 if (WARN_ON(!stats)) 5074 return; 5075 5076 memset(&tdst, 0, sizeof(tdst)); 5077 for_each_possible_cpu(i) { 5078 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 5079 struct br_mcast_stats temp; 5080 unsigned int start; 5081 5082 do { 5083 start = u64_stats_fetch_begin(&cpu_stats->syncp); 5084 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 5085 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 5086 5087 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 5088 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 5089 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 5090 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 5091 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 5092 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 5093 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 5094 tdst.igmp_parse_errors += temp.igmp_parse_errors; 5095 5096 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 5097 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 5098 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 5099 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 5100 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 5101 tdst.mld_parse_errors += temp.mld_parse_errors; 5102 } 5103 memcpy(dest, &tdst, sizeof(*dest)); 5104 } 5105 5106 int br_mdb_hash_init(struct net_bridge *br) 5107 { 5108 int err; 5109 5110 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 5111 if (err) 5112 return err; 5113 5114 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 5115 if (err) { 5116 rhashtable_destroy(&br->sg_port_tbl); 5117 return err; 5118 } 5119 5120 return 0; 5121 } 5122 5123 void br_mdb_hash_fini(struct net_bridge *br) 5124 { 5125 rhashtable_destroy(&br->sg_port_tbl); 5126 rhashtable_destroy(&br->mdb_hash_tbl); 5127 } 5128