1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Bridge multicast support. 4 * 5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/if_ether.h> 11 #include <linux/igmp.h> 12 #include <linux/in.h> 13 #include <linux/jhash.h> 14 #include <linux/kernel.h> 15 #include <linux/log2.h> 16 #include <linux/netdevice.h> 17 #include <linux/netfilter_bridge.h> 18 #include <linux/random.h> 19 #include <linux/rculist.h> 20 #include <linux/skbuff.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/inetdevice.h> 24 #include <linux/mroute.h> 25 #include <net/ip.h> 26 #include <net/switchdev.h> 27 #if IS_ENABLED(CONFIG_IPV6) 28 #include <linux/icmpv6.h> 29 #include <net/ipv6.h> 30 #include <net/mld.h> 31 #include <net/ip6_checksum.h> 32 #include <net/addrconf.h> 33 #endif 34 35 #include "br_private.h" 36 #include "br_private_mcast_eht.h" 37 38 static const struct rhashtable_params br_mdb_rht_params = { 39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode), 40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 41 .key_len = sizeof(struct br_ip), 42 .automatic_shrinking = true, 43 }; 44 45 static const struct rhashtable_params br_sg_port_rht_params = { 46 .head_offset = offsetof(struct net_bridge_port_group, rhnode), 47 .key_offset = offsetof(struct net_bridge_port_group, key), 48 .key_len = sizeof(struct net_bridge_port_group_sg_key), 49 .automatic_shrinking = true, 50 }; 51 52 static void br_multicast_start_querier(struct net_bridge *br, 53 struct bridge_mcast_own_query *query); 54 static void br_multicast_add_router(struct net_bridge *br, 55 struct net_bridge_port *port); 56 static void br_ip4_multicast_leave_group(struct net_bridge *br, 57 struct net_bridge_port *port, 58 __be32 group, 59 __u16 vid, 60 const unsigned char *src); 61 static void br_multicast_port_group_rexmit(struct timer_list *t); 62 63 static void __del_port_router(struct net_bridge_port *p); 64 #if IS_ENABLED(CONFIG_IPV6) 65 static void br_ip6_multicast_leave_group(struct net_bridge *br, 66 struct net_bridge_port *port, 67 const struct in6_addr *group, 68 __u16 vid, const unsigned char *src); 69 #endif 70 static struct net_bridge_port_group * 71 __br_multicast_add_group(struct net_bridge *br, 72 struct net_bridge_port *port, 73 struct br_ip *group, 74 const unsigned char *src, 75 u8 filter_mode, 76 bool igmpv2_mldv1, 77 bool blocked); 78 static void br_multicast_find_del_pg(struct net_bridge *br, 79 struct net_bridge_port_group *pg); 80 81 static struct net_bridge_port_group * 82 br_sg_port_find(struct net_bridge *br, 83 struct net_bridge_port_group_sg_key *sg_p) 84 { 85 lockdep_assert_held_once(&br->multicast_lock); 86 87 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p, 88 br_sg_port_rht_params); 89 } 90 91 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br, 92 struct br_ip *dst) 93 { 94 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 95 } 96 97 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br, 98 struct br_ip *dst) 99 { 100 struct net_bridge_mdb_entry *ent; 101 102 lockdep_assert_held_once(&br->multicast_lock); 103 104 rcu_read_lock(); 105 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); 106 rcu_read_unlock(); 107 108 return ent; 109 } 110 111 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br, 112 __be32 dst, __u16 vid) 113 { 114 struct br_ip br_dst; 115 116 memset(&br_dst, 0, sizeof(br_dst)); 117 br_dst.dst.ip4 = dst; 118 br_dst.proto = htons(ETH_P_IP); 119 br_dst.vid = vid; 120 121 return br_mdb_ip_get(br, &br_dst); 122 } 123 124 #if IS_ENABLED(CONFIG_IPV6) 125 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br, 126 const struct in6_addr *dst, 127 __u16 vid) 128 { 129 struct br_ip br_dst; 130 131 memset(&br_dst, 0, sizeof(br_dst)); 132 br_dst.dst.ip6 = *dst; 133 br_dst.proto = htons(ETH_P_IPV6); 134 br_dst.vid = vid; 135 136 return br_mdb_ip_get(br, &br_dst); 137 } 138 #endif 139 140 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 141 struct sk_buff *skb, u16 vid) 142 { 143 struct br_ip ip; 144 145 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 146 return NULL; 147 148 if (BR_INPUT_SKB_CB(skb)->igmp) 149 return NULL; 150 151 memset(&ip, 0, sizeof(ip)); 152 ip.proto = skb->protocol; 153 ip.vid = vid; 154 155 switch (skb->protocol) { 156 case htons(ETH_P_IP): 157 ip.dst.ip4 = ip_hdr(skb)->daddr; 158 if (br->multicast_igmp_version == 3) { 159 struct net_bridge_mdb_entry *mdb; 160 161 ip.src.ip4 = ip_hdr(skb)->saddr; 162 mdb = br_mdb_ip_get_rcu(br, &ip); 163 if (mdb) 164 return mdb; 165 ip.src.ip4 = 0; 166 } 167 break; 168 #if IS_ENABLED(CONFIG_IPV6) 169 case htons(ETH_P_IPV6): 170 ip.dst.ip6 = ipv6_hdr(skb)->daddr; 171 if (br->multicast_mld_version == 2) { 172 struct net_bridge_mdb_entry *mdb; 173 174 ip.src.ip6 = ipv6_hdr(skb)->saddr; 175 mdb = br_mdb_ip_get_rcu(br, &ip); 176 if (mdb) 177 return mdb; 178 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6)); 179 } 180 break; 181 #endif 182 default: 183 ip.proto = 0; 184 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest); 185 } 186 187 return br_mdb_ip_get_rcu(br, &ip); 188 } 189 190 static bool br_port_group_equal(struct net_bridge_port_group *p, 191 struct net_bridge_port *port, 192 const unsigned char *src) 193 { 194 if (p->key.port != port) 195 return false; 196 197 if (!(port->flags & BR_MULTICAST_TO_UNICAST)) 198 return true; 199 200 return ether_addr_equal(src, p->eth_addr); 201 } 202 203 static void __fwd_add_star_excl(struct net_bridge_port_group *pg, 204 struct br_ip *sg_ip) 205 { 206 struct net_bridge_port_group_sg_key sg_key; 207 struct net_bridge *br = pg->key.port->br; 208 struct net_bridge_port_group *src_pg; 209 210 memset(&sg_key, 0, sizeof(sg_key)); 211 sg_key.port = pg->key.port; 212 sg_key.addr = *sg_ip; 213 if (br_sg_port_find(br, &sg_key)) 214 return; 215 216 src_pg = __br_multicast_add_group(br, pg->key.port, sg_ip, pg->eth_addr, 217 MCAST_INCLUDE, false, false); 218 if (IS_ERR_OR_NULL(src_pg) || 219 src_pg->rt_protocol != RTPROT_KERNEL) 220 return; 221 222 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 223 } 224 225 static void __fwd_del_star_excl(struct net_bridge_port_group *pg, 226 struct br_ip *sg_ip) 227 { 228 struct net_bridge_port_group_sg_key sg_key; 229 struct net_bridge *br = pg->key.port->br; 230 struct net_bridge_port_group *src_pg; 231 232 memset(&sg_key, 0, sizeof(sg_key)); 233 sg_key.port = pg->key.port; 234 sg_key.addr = *sg_ip; 235 src_pg = br_sg_port_find(br, &sg_key); 236 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) || 237 src_pg->rt_protocol != RTPROT_KERNEL) 238 return; 239 240 br_multicast_find_del_pg(br, src_pg); 241 } 242 243 /* When a port group transitions to (or is added as) EXCLUDE we need to add it 244 * to all other ports' S,G entries which are not blocked by the current group 245 * for proper replication, the assumption is that any S,G blocked entries 246 * are already added so the S,G,port lookup should skip them. 247 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being 248 * deleted we need to remove it from all ports' S,G entries where it was 249 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL). 250 */ 251 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg, 252 u8 filter_mode) 253 { 254 struct net_bridge *br = pg->key.port->br; 255 struct net_bridge_port_group *pg_lst; 256 struct net_bridge_mdb_entry *mp; 257 struct br_ip sg_ip; 258 259 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr))) 260 return; 261 262 mp = br_mdb_ip_get(br, &pg->key.addr); 263 if (!mp) 264 return; 265 266 memset(&sg_ip, 0, sizeof(sg_ip)); 267 sg_ip = pg->key.addr; 268 for (pg_lst = mlock_dereference(mp->ports, br); 269 pg_lst; 270 pg_lst = mlock_dereference(pg_lst->next, br)) { 271 struct net_bridge_group_src *src_ent; 272 273 if (pg_lst == pg) 274 continue; 275 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) { 276 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 277 continue; 278 sg_ip.src = src_ent->addr.src; 279 switch (filter_mode) { 280 case MCAST_INCLUDE: 281 __fwd_del_star_excl(pg, &sg_ip); 282 break; 283 case MCAST_EXCLUDE: 284 __fwd_add_star_excl(pg, &sg_ip); 285 break; 286 } 287 } 288 } 289 } 290 291 /* called when adding a new S,G with host_joined == false by default */ 292 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp, 293 struct net_bridge_port_group *sg) 294 { 295 struct net_bridge_mdb_entry *sg_mp; 296 297 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 298 return; 299 if (!star_mp->host_joined) 300 return; 301 302 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr); 303 if (!sg_mp) 304 return; 305 sg_mp->host_joined = true; 306 } 307 308 /* set the host_joined state of all of *,G's S,G entries */ 309 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp) 310 { 311 struct net_bridge *br = star_mp->br; 312 struct net_bridge_mdb_entry *sg_mp; 313 struct net_bridge_port_group *pg; 314 struct br_ip sg_ip; 315 316 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 317 return; 318 319 memset(&sg_ip, 0, sizeof(sg_ip)); 320 sg_ip = star_mp->addr; 321 for (pg = mlock_dereference(star_mp->ports, br); 322 pg; 323 pg = mlock_dereference(pg->next, br)) { 324 struct net_bridge_group_src *src_ent; 325 326 hlist_for_each_entry(src_ent, &pg->src_list, node) { 327 if (!(src_ent->flags & BR_SGRP_F_INSTALLED)) 328 continue; 329 sg_ip.src = src_ent->addr.src; 330 sg_mp = br_mdb_ip_get(br, &sg_ip); 331 if (!sg_mp) 332 continue; 333 sg_mp->host_joined = star_mp->host_joined; 334 } 335 } 336 } 337 338 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp) 339 { 340 struct net_bridge_port_group __rcu **pp; 341 struct net_bridge_port_group *p; 342 343 /* *,G exclude ports are only added to S,G entries */ 344 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr))) 345 return; 346 347 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports 348 * we should ignore perm entries since they're managed by user-space 349 */ 350 for (pp = &sgmp->ports; 351 (p = mlock_dereference(*pp, sgmp->br)) != NULL; 352 pp = &p->next) 353 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL | 354 MDB_PG_FLAGS_PERMANENT))) 355 return; 356 357 /* currently the host can only have joined the *,G which means 358 * we treat it as EXCLUDE {}, so for an S,G it's considered a 359 * STAR_EXCLUDE entry and we can safely leave it 360 */ 361 sgmp->host_joined = false; 362 363 for (pp = &sgmp->ports; 364 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) { 365 if (!(p->flags & MDB_PG_FLAGS_PERMANENT)) 366 br_multicast_del_pg(sgmp, p, pp); 367 else 368 pp = &p->next; 369 } 370 } 371 372 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp, 373 struct net_bridge_port_group *sg) 374 { 375 struct net_bridge_port_group_sg_key sg_key; 376 struct net_bridge *br = star_mp->br; 377 struct net_bridge_port_group *pg; 378 379 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr))) 380 return; 381 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr))) 382 return; 383 384 br_multicast_sg_host_state(star_mp, sg); 385 memset(&sg_key, 0, sizeof(sg_key)); 386 sg_key.addr = sg->key.addr; 387 /* we need to add all exclude ports to the S,G */ 388 for (pg = mlock_dereference(star_mp->ports, br); 389 pg; 390 pg = mlock_dereference(pg->next, br)) { 391 struct net_bridge_port_group *src_pg; 392 393 if (pg == sg || pg->filter_mode == MCAST_INCLUDE) 394 continue; 395 396 sg_key.port = pg->key.port; 397 if (br_sg_port_find(br, &sg_key)) 398 continue; 399 400 src_pg = __br_multicast_add_group(br, pg->key.port, 401 &sg->key.addr, 402 sg->eth_addr, 403 MCAST_INCLUDE, false, false); 404 if (IS_ERR_OR_NULL(src_pg) || 405 src_pg->rt_protocol != RTPROT_KERNEL) 406 continue; 407 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL; 408 } 409 } 410 411 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) 412 { 413 struct net_bridge_mdb_entry *star_mp; 414 struct net_bridge_port_group *sg; 415 struct br_ip sg_ip; 416 417 if (src->flags & BR_SGRP_F_INSTALLED) 418 return; 419 420 memset(&sg_ip, 0, sizeof(sg_ip)); 421 sg_ip = src->pg->key.addr; 422 sg_ip.src = src->addr.src; 423 sg = __br_multicast_add_group(src->br, src->pg->key.port, &sg_ip, 424 src->pg->eth_addr, MCAST_INCLUDE, false, 425 !timer_pending(&src->timer)); 426 if (IS_ERR_OR_NULL(sg)) 427 return; 428 src->flags |= BR_SGRP_F_INSTALLED; 429 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL; 430 431 /* if it was added by user-space as perm we can skip next steps */ 432 if (sg->rt_protocol != RTPROT_KERNEL && 433 (sg->flags & MDB_PG_FLAGS_PERMANENT)) 434 return; 435 436 /* the kernel is now responsible for removing this S,G */ 437 del_timer(&sg->timer); 438 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 439 if (!star_mp) 440 return; 441 442 br_multicast_sg_add_exclude_ports(star_mp, sg); 443 } 444 445 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src, 446 bool fastleave) 447 { 448 struct net_bridge_port_group *p, *pg = src->pg; 449 struct net_bridge_port_group __rcu **pp; 450 struct net_bridge_mdb_entry *mp; 451 struct br_ip sg_ip; 452 453 memset(&sg_ip, 0, sizeof(sg_ip)); 454 sg_ip = pg->key.addr; 455 sg_ip.src = src->addr.src; 456 457 mp = br_mdb_ip_get(src->br, &sg_ip); 458 if (!mp) 459 return; 460 461 for (pp = &mp->ports; 462 (p = mlock_dereference(*pp, src->br)) != NULL; 463 pp = &p->next) { 464 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr)) 465 continue; 466 467 if (p->rt_protocol != RTPROT_KERNEL && 468 (p->flags & MDB_PG_FLAGS_PERMANENT)) 469 break; 470 471 if (fastleave) 472 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 473 br_multicast_del_pg(mp, p, pp); 474 break; 475 } 476 src->flags &= ~BR_SGRP_F_INSTALLED; 477 } 478 479 /* install S,G and based on src's timer enable or disable forwarding */ 480 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src) 481 { 482 struct net_bridge_port_group_sg_key sg_key; 483 struct net_bridge_port_group *sg; 484 u8 old_flags; 485 486 br_multicast_fwd_src_add(src); 487 488 memset(&sg_key, 0, sizeof(sg_key)); 489 sg_key.addr = src->pg->key.addr; 490 sg_key.addr.src = src->addr.src; 491 sg_key.port = src->pg->key.port; 492 493 sg = br_sg_port_find(src->br, &sg_key); 494 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT)) 495 return; 496 497 old_flags = sg->flags; 498 if (timer_pending(&src->timer)) 499 sg->flags &= ~MDB_PG_FLAGS_BLOCKED; 500 else 501 sg->flags |= MDB_PG_FLAGS_BLOCKED; 502 503 if (old_flags != sg->flags) { 504 struct net_bridge_mdb_entry *sg_mp; 505 506 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr); 507 if (!sg_mp) 508 return; 509 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB); 510 } 511 } 512 513 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc) 514 { 515 struct net_bridge_mdb_entry *mp; 516 517 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc); 518 WARN_ON(!hlist_unhashed(&mp->mdb_node)); 519 WARN_ON(mp->ports); 520 521 del_timer_sync(&mp->timer); 522 kfree_rcu(mp, rcu); 523 } 524 525 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) 526 { 527 struct net_bridge *br = mp->br; 528 529 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode, 530 br_mdb_rht_params); 531 hlist_del_init_rcu(&mp->mdb_node); 532 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list); 533 queue_work(system_long_wq, &br->mcast_gc_work); 534 } 535 536 static void br_multicast_group_expired(struct timer_list *t) 537 { 538 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); 539 struct net_bridge *br = mp->br; 540 541 spin_lock(&br->multicast_lock); 542 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) || 543 timer_pending(&mp->timer)) 544 goto out; 545 546 br_multicast_host_leave(mp, true); 547 548 if (mp->ports) 549 goto out; 550 br_multicast_del_mdb_entry(mp); 551 out: 552 spin_unlock(&br->multicast_lock); 553 } 554 555 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc) 556 { 557 struct net_bridge_group_src *src; 558 559 src = container_of(gc, struct net_bridge_group_src, mcast_gc); 560 WARN_ON(!hlist_unhashed(&src->node)); 561 562 del_timer_sync(&src->timer); 563 kfree_rcu(src, rcu); 564 } 565 566 void br_multicast_del_group_src(struct net_bridge_group_src *src, 567 bool fastleave) 568 { 569 struct net_bridge *br = src->pg->key.port->br; 570 571 br_multicast_fwd_src_remove(src, fastleave); 572 hlist_del_init_rcu(&src->node); 573 src->pg->src_ents--; 574 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list); 575 queue_work(system_long_wq, &br->mcast_gc_work); 576 } 577 578 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc) 579 { 580 struct net_bridge_port_group *pg; 581 582 pg = container_of(gc, struct net_bridge_port_group, mcast_gc); 583 WARN_ON(!hlist_unhashed(&pg->mglist)); 584 WARN_ON(!hlist_empty(&pg->src_list)); 585 586 del_timer_sync(&pg->rexmit_timer); 587 del_timer_sync(&pg->timer); 588 kfree_rcu(pg, rcu); 589 } 590 591 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, 592 struct net_bridge_port_group *pg, 593 struct net_bridge_port_group __rcu **pp) 594 { 595 struct net_bridge *br = pg->key.port->br; 596 struct net_bridge_group_src *ent; 597 struct hlist_node *tmp; 598 599 rcu_assign_pointer(*pp, pg->next); 600 hlist_del_init(&pg->mglist); 601 br_multicast_eht_clean_sets(pg); 602 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 603 br_multicast_del_group_src(ent, false); 604 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB); 605 if (!br_multicast_is_star_g(&mp->addr)) { 606 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode, 607 br_sg_port_rht_params); 608 br_multicast_sg_del_exclude_ports(mp); 609 } else { 610 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 611 } 612 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list); 613 queue_work(system_long_wq, &br->mcast_gc_work); 614 615 if (!mp->ports && !mp->host_joined && netif_running(br->dev)) 616 mod_timer(&mp->timer, jiffies); 617 } 618 619 static void br_multicast_find_del_pg(struct net_bridge *br, 620 struct net_bridge_port_group *pg) 621 { 622 struct net_bridge_port_group __rcu **pp; 623 struct net_bridge_mdb_entry *mp; 624 struct net_bridge_port_group *p; 625 626 mp = br_mdb_ip_get(br, &pg->key.addr); 627 if (WARN_ON(!mp)) 628 return; 629 630 for (pp = &mp->ports; 631 (p = mlock_dereference(*pp, br)) != NULL; 632 pp = &p->next) { 633 if (p != pg) 634 continue; 635 636 br_multicast_del_pg(mp, pg, pp); 637 return; 638 } 639 640 WARN_ON(1); 641 } 642 643 static void br_multicast_port_group_expired(struct timer_list *t) 644 { 645 struct net_bridge_port_group *pg = from_timer(pg, t, timer); 646 struct net_bridge_group_src *src_ent; 647 struct net_bridge *br = pg->key.port->br; 648 struct hlist_node *tmp; 649 bool changed; 650 651 spin_lock(&br->multicast_lock); 652 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 653 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT) 654 goto out; 655 656 changed = !!(pg->filter_mode == MCAST_EXCLUDE); 657 pg->filter_mode = MCAST_INCLUDE; 658 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) { 659 if (!timer_pending(&src_ent->timer)) { 660 br_multicast_del_group_src(src_ent, false); 661 changed = true; 662 } 663 } 664 665 if (hlist_empty(&pg->src_list)) { 666 br_multicast_find_del_pg(br, pg); 667 } else if (changed) { 668 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr); 669 670 if (changed && br_multicast_is_star_g(&pg->key.addr)) 671 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE); 672 673 if (WARN_ON(!mp)) 674 goto out; 675 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB); 676 } 677 out: 678 spin_unlock(&br->multicast_lock); 679 } 680 681 static void br_multicast_gc(struct hlist_head *head) 682 { 683 struct net_bridge_mcast_gc *gcent; 684 struct hlist_node *tmp; 685 686 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) { 687 hlist_del_init(&gcent->gc_node); 688 gcent->destroy(gcent); 689 } 690 } 691 692 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 693 struct net_bridge_port_group *pg, 694 __be32 ip_dst, __be32 group, 695 bool with_srcs, bool over_lmqt, 696 u8 sflag, u8 *igmp_type, 697 bool *need_rexmit) 698 { 699 struct net_bridge_port *p = pg ? pg->key.port : NULL; 700 struct net_bridge_group_src *ent; 701 size_t pkt_size, igmp_hdr_size; 702 unsigned long now = jiffies; 703 struct igmpv3_query *ihv3; 704 void *csum_start = NULL; 705 __sum16 *csum = NULL; 706 struct sk_buff *skb; 707 struct igmphdr *ih; 708 struct ethhdr *eth; 709 unsigned long lmqt; 710 struct iphdr *iph; 711 u16 lmqt_srcs = 0; 712 713 igmp_hdr_size = sizeof(*ih); 714 if (br->multicast_igmp_version == 3) { 715 igmp_hdr_size = sizeof(*ihv3); 716 if (pg && with_srcs) { 717 lmqt = now + (br->multicast_last_member_interval * 718 br->multicast_last_member_count); 719 hlist_for_each_entry(ent, &pg->src_list, node) { 720 if (over_lmqt == time_after(ent->timer.expires, 721 lmqt) && 722 ent->src_query_rexmit_cnt > 0) 723 lmqt_srcs++; 724 } 725 726 if (!lmqt_srcs) 727 return NULL; 728 igmp_hdr_size += lmqt_srcs * sizeof(__be32); 729 } 730 } 731 732 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size; 733 if ((p && pkt_size > p->dev->mtu) || 734 pkt_size > br->dev->mtu) 735 return NULL; 736 737 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 738 if (!skb) 739 goto out; 740 741 skb->protocol = htons(ETH_P_IP); 742 743 skb_reset_mac_header(skb); 744 eth = eth_hdr(skb); 745 746 ether_addr_copy(eth->h_source, br->dev->dev_addr); 747 ip_eth_mc_map(ip_dst, eth->h_dest); 748 eth->h_proto = htons(ETH_P_IP); 749 skb_put(skb, sizeof(*eth)); 750 751 skb_set_network_header(skb, skb->len); 752 iph = ip_hdr(skb); 753 iph->tot_len = htons(pkt_size - sizeof(*eth)); 754 755 iph->version = 4; 756 iph->ihl = 6; 757 iph->tos = 0xc0; 758 iph->id = 0; 759 iph->frag_off = htons(IP_DF); 760 iph->ttl = 1; 761 iph->protocol = IPPROTO_IGMP; 762 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ? 763 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 764 iph->daddr = ip_dst; 765 ((u8 *)&iph[1])[0] = IPOPT_RA; 766 ((u8 *)&iph[1])[1] = 4; 767 ((u8 *)&iph[1])[2] = 0; 768 ((u8 *)&iph[1])[3] = 0; 769 ip_send_check(iph); 770 skb_put(skb, 24); 771 772 skb_set_transport_header(skb, skb->len); 773 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY; 774 775 switch (br->multicast_igmp_version) { 776 case 2: 777 ih = igmp_hdr(skb); 778 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 779 ih->code = (group ? br->multicast_last_member_interval : 780 br->multicast_query_response_interval) / 781 (HZ / IGMP_TIMER_SCALE); 782 ih->group = group; 783 ih->csum = 0; 784 csum = &ih->csum; 785 csum_start = (void *)ih; 786 break; 787 case 3: 788 ihv3 = igmpv3_query_hdr(skb); 789 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; 790 ihv3->code = (group ? br->multicast_last_member_interval : 791 br->multicast_query_response_interval) / 792 (HZ / IGMP_TIMER_SCALE); 793 ihv3->group = group; 794 ihv3->qqic = br->multicast_query_interval / HZ; 795 ihv3->nsrcs = htons(lmqt_srcs); 796 ihv3->resv = 0; 797 ihv3->suppress = sflag; 798 ihv3->qrv = 2; 799 ihv3->csum = 0; 800 csum = &ihv3->csum; 801 csum_start = (void *)ihv3; 802 if (!pg || !with_srcs) 803 break; 804 805 lmqt_srcs = 0; 806 hlist_for_each_entry(ent, &pg->src_list, node) { 807 if (over_lmqt == time_after(ent->timer.expires, 808 lmqt) && 809 ent->src_query_rexmit_cnt > 0) { 810 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4; 811 ent->src_query_rexmit_cnt--; 812 if (need_rexmit && ent->src_query_rexmit_cnt) 813 *need_rexmit = true; 814 } 815 } 816 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) { 817 kfree_skb(skb); 818 return NULL; 819 } 820 break; 821 } 822 823 if (WARN_ON(!csum || !csum_start)) { 824 kfree_skb(skb); 825 return NULL; 826 } 827 828 *csum = ip_compute_csum(csum_start, igmp_hdr_size); 829 skb_put(skb, igmp_hdr_size); 830 __skb_pull(skb, sizeof(*eth)); 831 832 out: 833 return skb; 834 } 835 836 #if IS_ENABLED(CONFIG_IPV6) 837 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 838 struct net_bridge_port_group *pg, 839 const struct in6_addr *ip6_dst, 840 const struct in6_addr *group, 841 bool with_srcs, bool over_llqt, 842 u8 sflag, u8 *igmp_type, 843 bool *need_rexmit) 844 { 845 struct net_bridge_port *p = pg ? pg->key.port : NULL; 846 struct net_bridge_group_src *ent; 847 size_t pkt_size, mld_hdr_size; 848 unsigned long now = jiffies; 849 struct mld2_query *mld2q; 850 void *csum_start = NULL; 851 unsigned long interval; 852 __sum16 *csum = NULL; 853 struct ipv6hdr *ip6h; 854 struct mld_msg *mldq; 855 struct sk_buff *skb; 856 unsigned long llqt; 857 struct ethhdr *eth; 858 u16 llqt_srcs = 0; 859 u8 *hopopt; 860 861 mld_hdr_size = sizeof(*mldq); 862 if (br->multicast_mld_version == 2) { 863 mld_hdr_size = sizeof(*mld2q); 864 if (pg && with_srcs) { 865 llqt = now + (br->multicast_last_member_interval * 866 br->multicast_last_member_count); 867 hlist_for_each_entry(ent, &pg->src_list, node) { 868 if (over_llqt == time_after(ent->timer.expires, 869 llqt) && 870 ent->src_query_rexmit_cnt > 0) 871 llqt_srcs++; 872 } 873 874 if (!llqt_srcs) 875 return NULL; 876 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr); 877 } 878 } 879 880 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size; 881 if ((p && pkt_size > p->dev->mtu) || 882 pkt_size > br->dev->mtu) 883 return NULL; 884 885 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size); 886 if (!skb) 887 goto out; 888 889 skb->protocol = htons(ETH_P_IPV6); 890 891 /* Ethernet header */ 892 skb_reset_mac_header(skb); 893 eth = eth_hdr(skb); 894 895 ether_addr_copy(eth->h_source, br->dev->dev_addr); 896 eth->h_proto = htons(ETH_P_IPV6); 897 skb_put(skb, sizeof(*eth)); 898 899 /* IPv6 header + HbH option */ 900 skb_set_network_header(skb, skb->len); 901 ip6h = ipv6_hdr(skb); 902 903 *(__force __be32 *)ip6h = htonl(0x60000000); 904 ip6h->payload_len = htons(8 + mld_hdr_size); 905 ip6h->nexthdr = IPPROTO_HOPOPTS; 906 ip6h->hop_limit = 1; 907 ip6h->daddr = *ip6_dst; 908 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 909 &ip6h->saddr)) { 910 kfree_skb(skb); 911 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false); 912 return NULL; 913 } 914 915 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 916 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 917 918 hopopt = (u8 *)(ip6h + 1); 919 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 920 hopopt[1] = 0; /* length of HbH */ 921 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 922 hopopt[3] = 2; /* Length of RA Option */ 923 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 924 hopopt[5] = 0; 925 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 926 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 927 928 skb_put(skb, sizeof(*ip6h) + 8); 929 930 /* ICMPv6 */ 931 skb_set_transport_header(skb, skb->len); 932 interval = ipv6_addr_any(group) ? 933 br->multicast_query_response_interval : 934 br->multicast_last_member_interval; 935 *igmp_type = ICMPV6_MGM_QUERY; 936 switch (br->multicast_mld_version) { 937 case 1: 938 mldq = (struct mld_msg *)icmp6_hdr(skb); 939 mldq->mld_type = ICMPV6_MGM_QUERY; 940 mldq->mld_code = 0; 941 mldq->mld_cksum = 0; 942 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 943 mldq->mld_reserved = 0; 944 mldq->mld_mca = *group; 945 csum = &mldq->mld_cksum; 946 csum_start = (void *)mldq; 947 break; 948 case 2: 949 mld2q = (struct mld2_query *)icmp6_hdr(skb); 950 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval)); 951 mld2q->mld2q_type = ICMPV6_MGM_QUERY; 952 mld2q->mld2q_code = 0; 953 mld2q->mld2q_cksum = 0; 954 mld2q->mld2q_resv1 = 0; 955 mld2q->mld2q_resv2 = 0; 956 mld2q->mld2q_suppress = sflag; 957 mld2q->mld2q_qrv = 2; 958 mld2q->mld2q_nsrcs = htons(llqt_srcs); 959 mld2q->mld2q_qqic = br->multicast_query_interval / HZ; 960 mld2q->mld2q_mca = *group; 961 csum = &mld2q->mld2q_cksum; 962 csum_start = (void *)mld2q; 963 if (!pg || !with_srcs) 964 break; 965 966 llqt_srcs = 0; 967 hlist_for_each_entry(ent, &pg->src_list, node) { 968 if (over_llqt == time_after(ent->timer.expires, 969 llqt) && 970 ent->src_query_rexmit_cnt > 0) { 971 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6; 972 ent->src_query_rexmit_cnt--; 973 if (need_rexmit && ent->src_query_rexmit_cnt) 974 *need_rexmit = true; 975 } 976 } 977 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) { 978 kfree_skb(skb); 979 return NULL; 980 } 981 break; 982 } 983 984 if (WARN_ON(!csum || !csum_start)) { 985 kfree_skb(skb); 986 return NULL; 987 } 988 989 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size, 990 IPPROTO_ICMPV6, 991 csum_partial(csum_start, mld_hdr_size, 0)); 992 skb_put(skb, mld_hdr_size); 993 __skb_pull(skb, sizeof(*eth)); 994 995 out: 996 return skb; 997 } 998 #endif 999 1000 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 1001 struct net_bridge_port_group *pg, 1002 struct br_ip *ip_dst, 1003 struct br_ip *group, 1004 bool with_srcs, bool over_lmqt, 1005 u8 sflag, u8 *igmp_type, 1006 bool *need_rexmit) 1007 { 1008 __be32 ip4_dst; 1009 1010 switch (group->proto) { 1011 case htons(ETH_P_IP): 1012 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP); 1013 return br_ip4_multicast_alloc_query(br, pg, 1014 ip4_dst, group->dst.ip4, 1015 with_srcs, over_lmqt, 1016 sflag, igmp_type, 1017 need_rexmit); 1018 #if IS_ENABLED(CONFIG_IPV6) 1019 case htons(ETH_P_IPV6): { 1020 struct in6_addr ip6_dst; 1021 1022 if (ip_dst) 1023 ip6_dst = ip_dst->dst.ip6; 1024 else 1025 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0, 1026 htonl(1)); 1027 1028 return br_ip6_multicast_alloc_query(br, pg, 1029 &ip6_dst, &group->dst.ip6, 1030 with_srcs, over_lmqt, 1031 sflag, igmp_type, 1032 need_rexmit); 1033 } 1034 #endif 1035 } 1036 return NULL; 1037 } 1038 1039 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 1040 struct br_ip *group) 1041 { 1042 struct net_bridge_mdb_entry *mp; 1043 int err; 1044 1045 mp = br_mdb_ip_get(br, group); 1046 if (mp) 1047 return mp; 1048 1049 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) { 1050 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false); 1051 return ERR_PTR(-E2BIG); 1052 } 1053 1054 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 1055 if (unlikely(!mp)) 1056 return ERR_PTR(-ENOMEM); 1057 1058 mp->br = br; 1059 mp->addr = *group; 1060 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry; 1061 timer_setup(&mp->timer, br_multicast_group_expired, 0); 1062 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode, 1063 br_mdb_rht_params); 1064 if (err) { 1065 kfree(mp); 1066 mp = ERR_PTR(err); 1067 } else { 1068 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list); 1069 } 1070 1071 return mp; 1072 } 1073 1074 static void br_multicast_group_src_expired(struct timer_list *t) 1075 { 1076 struct net_bridge_group_src *src = from_timer(src, t, timer); 1077 struct net_bridge_port_group *pg; 1078 struct net_bridge *br = src->br; 1079 1080 spin_lock(&br->multicast_lock); 1081 if (hlist_unhashed(&src->node) || !netif_running(br->dev) || 1082 timer_pending(&src->timer)) 1083 goto out; 1084 1085 pg = src->pg; 1086 if (pg->filter_mode == MCAST_INCLUDE) { 1087 br_multicast_del_group_src(src, false); 1088 if (!hlist_empty(&pg->src_list)) 1089 goto out; 1090 br_multicast_find_del_pg(br, pg); 1091 } else { 1092 br_multicast_fwd_src_handle(src); 1093 } 1094 1095 out: 1096 spin_unlock(&br->multicast_lock); 1097 } 1098 1099 struct net_bridge_group_src * 1100 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip) 1101 { 1102 struct net_bridge_group_src *ent; 1103 1104 switch (ip->proto) { 1105 case htons(ETH_P_IP): 1106 hlist_for_each_entry(ent, &pg->src_list, node) 1107 if (ip->src.ip4 == ent->addr.src.ip4) 1108 return ent; 1109 break; 1110 #if IS_ENABLED(CONFIG_IPV6) 1111 case htons(ETH_P_IPV6): 1112 hlist_for_each_entry(ent, &pg->src_list, node) 1113 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6)) 1114 return ent; 1115 break; 1116 #endif 1117 } 1118 1119 return NULL; 1120 } 1121 1122 static struct net_bridge_group_src * 1123 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip) 1124 { 1125 struct net_bridge_group_src *grp_src; 1126 1127 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT)) 1128 return NULL; 1129 1130 switch (src_ip->proto) { 1131 case htons(ETH_P_IP): 1132 if (ipv4_is_zeronet(src_ip->src.ip4) || 1133 ipv4_is_multicast(src_ip->src.ip4)) 1134 return NULL; 1135 break; 1136 #if IS_ENABLED(CONFIG_IPV6) 1137 case htons(ETH_P_IPV6): 1138 if (ipv6_addr_any(&src_ip->src.ip6) || 1139 ipv6_addr_is_multicast(&src_ip->src.ip6)) 1140 return NULL; 1141 break; 1142 #endif 1143 } 1144 1145 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC); 1146 if (unlikely(!grp_src)) 1147 return NULL; 1148 1149 grp_src->pg = pg; 1150 grp_src->br = pg->key.port->br; 1151 grp_src->addr = *src_ip; 1152 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src; 1153 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0); 1154 1155 hlist_add_head_rcu(&grp_src->node, &pg->src_list); 1156 pg->src_ents++; 1157 1158 return grp_src; 1159 } 1160 1161 struct net_bridge_port_group *br_multicast_new_port_group( 1162 struct net_bridge_port *port, 1163 struct br_ip *group, 1164 struct net_bridge_port_group __rcu *next, 1165 unsigned char flags, 1166 const unsigned char *src, 1167 u8 filter_mode, 1168 u8 rt_protocol) 1169 { 1170 struct net_bridge_port_group *p; 1171 1172 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1173 if (unlikely(!p)) 1174 return NULL; 1175 1176 p->key.addr = *group; 1177 p->key.port = port; 1178 p->flags = flags; 1179 p->filter_mode = filter_mode; 1180 p->rt_protocol = rt_protocol; 1181 p->eht_host_tree = RB_ROOT; 1182 p->eht_set_tree = RB_ROOT; 1183 p->mcast_gc.destroy = br_multicast_destroy_port_group; 1184 INIT_HLIST_HEAD(&p->src_list); 1185 1186 if (!br_multicast_is_star_g(group) && 1187 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode, 1188 br_sg_port_rht_params)) { 1189 kfree(p); 1190 return NULL; 1191 } 1192 1193 rcu_assign_pointer(p->next, next); 1194 timer_setup(&p->timer, br_multicast_port_group_expired, 0); 1195 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0); 1196 hlist_add_head(&p->mglist, &port->mglist); 1197 1198 if (src) 1199 memcpy(p->eth_addr, src, ETH_ALEN); 1200 else 1201 eth_broadcast_addr(p->eth_addr); 1202 1203 return p; 1204 } 1205 1206 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify) 1207 { 1208 if (!mp->host_joined) { 1209 mp->host_joined = true; 1210 if (br_multicast_is_star_g(&mp->addr)) 1211 br_multicast_star_g_host_state(mp); 1212 if (notify) 1213 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB); 1214 } 1215 1216 if (br_group_is_l2(&mp->addr)) 1217 return; 1218 1219 mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval); 1220 } 1221 1222 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify) 1223 { 1224 if (!mp->host_joined) 1225 return; 1226 1227 mp->host_joined = false; 1228 if (br_multicast_is_star_g(&mp->addr)) 1229 br_multicast_star_g_host_state(mp); 1230 if (notify) 1231 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB); 1232 } 1233 1234 static struct net_bridge_port_group * 1235 __br_multicast_add_group(struct net_bridge *br, 1236 struct net_bridge_port *port, 1237 struct br_ip *group, 1238 const unsigned char *src, 1239 u8 filter_mode, 1240 bool igmpv2_mldv1, 1241 bool blocked) 1242 { 1243 struct net_bridge_port_group __rcu **pp; 1244 struct net_bridge_port_group *p = NULL; 1245 struct net_bridge_mdb_entry *mp; 1246 unsigned long now = jiffies; 1247 1248 if (!netif_running(br->dev) || 1249 (port && port->state == BR_STATE_DISABLED)) 1250 goto out; 1251 1252 mp = br_multicast_new_group(br, group); 1253 if (IS_ERR(mp)) 1254 return ERR_CAST(mp); 1255 1256 if (!port) { 1257 br_multicast_host_join(mp, true); 1258 goto out; 1259 } 1260 1261 for (pp = &mp->ports; 1262 (p = mlock_dereference(*pp, br)) != NULL; 1263 pp = &p->next) { 1264 if (br_port_group_equal(p, port, src)) 1265 goto found; 1266 if ((unsigned long)p->key.port < (unsigned long)port) 1267 break; 1268 } 1269 1270 p = br_multicast_new_port_group(port, group, *pp, 0, src, 1271 filter_mode, RTPROT_KERNEL); 1272 if (unlikely(!p)) { 1273 p = ERR_PTR(-ENOMEM); 1274 goto out; 1275 } 1276 rcu_assign_pointer(*pp, p); 1277 if (blocked) 1278 p->flags |= MDB_PG_FLAGS_BLOCKED; 1279 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB); 1280 1281 found: 1282 if (igmpv2_mldv1) 1283 mod_timer(&p->timer, now + br->multicast_membership_interval); 1284 1285 out: 1286 return p; 1287 } 1288 1289 static int br_multicast_add_group(struct net_bridge *br, 1290 struct net_bridge_port *port, 1291 struct br_ip *group, 1292 const unsigned char *src, 1293 u8 filter_mode, 1294 bool igmpv2_mldv1) 1295 { 1296 struct net_bridge_port_group *pg; 1297 int err; 1298 1299 spin_lock(&br->multicast_lock); 1300 pg = __br_multicast_add_group(br, port, group, src, filter_mode, 1301 igmpv2_mldv1, false); 1302 /* NULL is considered valid for host joined groups */ 1303 err = PTR_ERR_OR_ZERO(pg); 1304 spin_unlock(&br->multicast_lock); 1305 1306 return err; 1307 } 1308 1309 static int br_ip4_multicast_add_group(struct net_bridge *br, 1310 struct net_bridge_port *port, 1311 __be32 group, 1312 __u16 vid, 1313 const unsigned char *src, 1314 bool igmpv2) 1315 { 1316 struct br_ip br_group; 1317 u8 filter_mode; 1318 1319 if (ipv4_is_local_multicast(group)) 1320 return 0; 1321 1322 memset(&br_group, 0, sizeof(br_group)); 1323 br_group.dst.ip4 = group; 1324 br_group.proto = htons(ETH_P_IP); 1325 br_group.vid = vid; 1326 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1327 1328 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 1329 igmpv2); 1330 } 1331 1332 #if IS_ENABLED(CONFIG_IPV6) 1333 static int br_ip6_multicast_add_group(struct net_bridge *br, 1334 struct net_bridge_port *port, 1335 const struct in6_addr *group, 1336 __u16 vid, 1337 const unsigned char *src, 1338 bool mldv1) 1339 { 1340 struct br_ip br_group; 1341 u8 filter_mode; 1342 1343 if (ipv6_addr_is_ll_all_nodes(group)) 1344 return 0; 1345 1346 memset(&br_group, 0, sizeof(br_group)); 1347 br_group.dst.ip6 = *group; 1348 br_group.proto = htons(ETH_P_IPV6); 1349 br_group.vid = vid; 1350 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE; 1351 1352 return br_multicast_add_group(br, port, &br_group, src, filter_mode, 1353 mldv1); 1354 } 1355 #endif 1356 1357 static void br_multicast_router_expired(struct timer_list *t) 1358 { 1359 struct net_bridge_port *port = 1360 from_timer(port, t, multicast_router_timer); 1361 struct net_bridge *br = port->br; 1362 1363 spin_lock(&br->multicast_lock); 1364 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 1365 port->multicast_router == MDB_RTR_TYPE_PERM || 1366 timer_pending(&port->multicast_router_timer)) 1367 goto out; 1368 1369 __del_port_router(port); 1370 out: 1371 spin_unlock(&br->multicast_lock); 1372 } 1373 1374 static void br_mc_router_state_change(struct net_bridge *p, 1375 bool is_mc_router) 1376 { 1377 struct switchdev_attr attr = { 1378 .orig_dev = p->dev, 1379 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, 1380 .flags = SWITCHDEV_F_DEFER, 1381 .u.mrouter = is_mc_router, 1382 }; 1383 1384 switchdev_port_attr_set(p->dev, &attr, NULL); 1385 } 1386 1387 static void br_multicast_local_router_expired(struct timer_list *t) 1388 { 1389 struct net_bridge *br = from_timer(br, t, multicast_router_timer); 1390 1391 spin_lock(&br->multicast_lock); 1392 if (br->multicast_router == MDB_RTR_TYPE_DISABLED || 1393 br->multicast_router == MDB_RTR_TYPE_PERM || 1394 timer_pending(&br->multicast_router_timer)) 1395 goto out; 1396 1397 br_mc_router_state_change(br, false); 1398 out: 1399 spin_unlock(&br->multicast_lock); 1400 } 1401 1402 static void br_multicast_querier_expired(struct net_bridge *br, 1403 struct bridge_mcast_own_query *query) 1404 { 1405 spin_lock(&br->multicast_lock); 1406 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1407 goto out; 1408 1409 br_multicast_start_querier(br, query); 1410 1411 out: 1412 spin_unlock(&br->multicast_lock); 1413 } 1414 1415 static void br_ip4_multicast_querier_expired(struct timer_list *t) 1416 { 1417 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer); 1418 1419 br_multicast_querier_expired(br, &br->ip4_own_query); 1420 } 1421 1422 #if IS_ENABLED(CONFIG_IPV6) 1423 static void br_ip6_multicast_querier_expired(struct timer_list *t) 1424 { 1425 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer); 1426 1427 br_multicast_querier_expired(br, &br->ip6_own_query); 1428 } 1429 #endif 1430 1431 static void br_multicast_select_own_querier(struct net_bridge *br, 1432 struct br_ip *ip, 1433 struct sk_buff *skb) 1434 { 1435 if (ip->proto == htons(ETH_P_IP)) 1436 br->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr; 1437 #if IS_ENABLED(CONFIG_IPV6) 1438 else 1439 br->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr; 1440 #endif 1441 } 1442 1443 static void __br_multicast_send_query(struct net_bridge *br, 1444 struct net_bridge_port *port, 1445 struct net_bridge_port_group *pg, 1446 struct br_ip *ip_dst, 1447 struct br_ip *group, 1448 bool with_srcs, 1449 u8 sflag, 1450 bool *need_rexmit) 1451 { 1452 bool over_lmqt = !!sflag; 1453 struct sk_buff *skb; 1454 u8 igmp_type; 1455 1456 again_under_lmqt: 1457 skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs, 1458 over_lmqt, sflag, &igmp_type, 1459 need_rexmit); 1460 if (!skb) 1461 return; 1462 1463 if (port) { 1464 skb->dev = port->dev; 1465 br_multicast_count(br, port, skb, igmp_type, 1466 BR_MCAST_DIR_TX); 1467 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 1468 dev_net(port->dev), NULL, skb, NULL, skb->dev, 1469 br_dev_queue_push_xmit); 1470 1471 if (over_lmqt && with_srcs && sflag) { 1472 over_lmqt = false; 1473 goto again_under_lmqt; 1474 } 1475 } else { 1476 br_multicast_select_own_querier(br, group, skb); 1477 br_multicast_count(br, port, skb, igmp_type, 1478 BR_MCAST_DIR_RX); 1479 netif_rx(skb); 1480 } 1481 } 1482 1483 static void br_multicast_send_query(struct net_bridge *br, 1484 struct net_bridge_port *port, 1485 struct bridge_mcast_own_query *own_query) 1486 { 1487 struct bridge_mcast_other_query *other_query = NULL; 1488 struct br_ip br_group; 1489 unsigned long time; 1490 1491 if (!netif_running(br->dev) || 1492 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1493 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1494 return; 1495 1496 memset(&br_group.dst, 0, sizeof(br_group.dst)); 1497 1498 if (port ? (own_query == &port->ip4_own_query) : 1499 (own_query == &br->ip4_own_query)) { 1500 other_query = &br->ip4_other_query; 1501 br_group.proto = htons(ETH_P_IP); 1502 #if IS_ENABLED(CONFIG_IPV6) 1503 } else { 1504 other_query = &br->ip6_other_query; 1505 br_group.proto = htons(ETH_P_IPV6); 1506 #endif 1507 } 1508 1509 if (!other_query || timer_pending(&other_query->timer)) 1510 return; 1511 1512 __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0, 1513 NULL); 1514 1515 time = jiffies; 1516 time += own_query->startup_sent < br->multicast_startup_query_count ? 1517 br->multicast_startup_query_interval : 1518 br->multicast_query_interval; 1519 mod_timer(&own_query->timer, time); 1520 } 1521 1522 static void 1523 br_multicast_port_query_expired(struct net_bridge_port *port, 1524 struct bridge_mcast_own_query *query) 1525 { 1526 struct net_bridge *br = port->br; 1527 1528 spin_lock(&br->multicast_lock); 1529 if (port->state == BR_STATE_DISABLED || 1530 port->state == BR_STATE_BLOCKING) 1531 goto out; 1532 1533 if (query->startup_sent < br->multicast_startup_query_count) 1534 query->startup_sent++; 1535 1536 br_multicast_send_query(port->br, port, query); 1537 1538 out: 1539 spin_unlock(&br->multicast_lock); 1540 } 1541 1542 static void br_ip4_multicast_port_query_expired(struct timer_list *t) 1543 { 1544 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer); 1545 1546 br_multicast_port_query_expired(port, &port->ip4_own_query); 1547 } 1548 1549 #if IS_ENABLED(CONFIG_IPV6) 1550 static void br_ip6_multicast_port_query_expired(struct timer_list *t) 1551 { 1552 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer); 1553 1554 br_multicast_port_query_expired(port, &port->ip6_own_query); 1555 } 1556 #endif 1557 1558 static void br_multicast_port_group_rexmit(struct timer_list *t) 1559 { 1560 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); 1561 struct bridge_mcast_other_query *other_query = NULL; 1562 struct net_bridge *br = pg->key.port->br; 1563 bool need_rexmit = false; 1564 1565 spin_lock(&br->multicast_lock); 1566 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || 1567 !br_opt_get(br, BROPT_MULTICAST_ENABLED) || 1568 !br_opt_get(br, BROPT_MULTICAST_QUERIER)) 1569 goto out; 1570 1571 if (pg->key.addr.proto == htons(ETH_P_IP)) 1572 other_query = &br->ip4_other_query; 1573 #if IS_ENABLED(CONFIG_IPV6) 1574 else 1575 other_query = &br->ip6_other_query; 1576 #endif 1577 1578 if (!other_query || timer_pending(&other_query->timer)) 1579 goto out; 1580 1581 if (pg->grp_query_rexmit_cnt) { 1582 pg->grp_query_rexmit_cnt--; 1583 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1584 &pg->key.addr, false, 1, NULL); 1585 } 1586 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1587 &pg->key.addr, true, 0, &need_rexmit); 1588 1589 if (pg->grp_query_rexmit_cnt || need_rexmit) 1590 mod_timer(&pg->rexmit_timer, jiffies + 1591 br->multicast_last_member_interval); 1592 out: 1593 spin_unlock(&br->multicast_lock); 1594 } 1595 1596 static int br_mc_disabled_update(struct net_device *dev, bool value, 1597 struct netlink_ext_ack *extack) 1598 { 1599 struct switchdev_attr attr = { 1600 .orig_dev = dev, 1601 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, 1602 .flags = SWITCHDEV_F_DEFER, 1603 .u.mc_disabled = !value, 1604 }; 1605 1606 return switchdev_port_attr_set(dev, &attr, extack); 1607 } 1608 1609 int br_multicast_add_port(struct net_bridge_port *port) 1610 { 1611 int err; 1612 1613 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 1614 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT; 1615 1616 timer_setup(&port->multicast_router_timer, 1617 br_multicast_router_expired, 0); 1618 timer_setup(&port->ip4_own_query.timer, 1619 br_ip4_multicast_port_query_expired, 0); 1620 #if IS_ENABLED(CONFIG_IPV6) 1621 timer_setup(&port->ip6_own_query.timer, 1622 br_ip6_multicast_port_query_expired, 0); 1623 #endif 1624 err = br_mc_disabled_update(port->dev, 1625 br_opt_get(port->br, 1626 BROPT_MULTICAST_ENABLED), 1627 NULL); 1628 if (err && err != -EOPNOTSUPP) 1629 return err; 1630 1631 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 1632 if (!port->mcast_stats) 1633 return -ENOMEM; 1634 1635 return 0; 1636 } 1637 1638 void br_multicast_del_port(struct net_bridge_port *port) 1639 { 1640 struct net_bridge *br = port->br; 1641 struct net_bridge_port_group *pg; 1642 HLIST_HEAD(deleted_head); 1643 struct hlist_node *n; 1644 1645 /* Take care of the remaining groups, only perm ones should be left */ 1646 spin_lock_bh(&br->multicast_lock); 1647 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1648 br_multicast_find_del_pg(br, pg); 1649 hlist_move_list(&br->mcast_gc_list, &deleted_head); 1650 spin_unlock_bh(&br->multicast_lock); 1651 br_multicast_gc(&deleted_head); 1652 del_timer_sync(&port->multicast_router_timer); 1653 free_percpu(port->mcast_stats); 1654 } 1655 1656 static void br_multicast_enable(struct bridge_mcast_own_query *query) 1657 { 1658 query->startup_sent = 0; 1659 1660 if (try_to_del_timer_sync(&query->timer) >= 0 || 1661 del_timer(&query->timer)) 1662 mod_timer(&query->timer, jiffies); 1663 } 1664 1665 static void __br_multicast_enable_port(struct net_bridge_port *port) 1666 { 1667 struct net_bridge *br = port->br; 1668 1669 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev)) 1670 return; 1671 1672 br_multicast_enable(&port->ip4_own_query); 1673 #if IS_ENABLED(CONFIG_IPV6) 1674 br_multicast_enable(&port->ip6_own_query); 1675 #endif 1676 if (port->multicast_router == MDB_RTR_TYPE_PERM && 1677 hlist_unhashed(&port->rlist)) 1678 br_multicast_add_router(br, port); 1679 } 1680 1681 void br_multicast_enable_port(struct net_bridge_port *port) 1682 { 1683 struct net_bridge *br = port->br; 1684 1685 spin_lock(&br->multicast_lock); 1686 __br_multicast_enable_port(port); 1687 spin_unlock(&br->multicast_lock); 1688 } 1689 1690 void br_multicast_disable_port(struct net_bridge_port *port) 1691 { 1692 struct net_bridge *br = port->br; 1693 struct net_bridge_port_group *pg; 1694 struct hlist_node *n; 1695 1696 spin_lock(&br->multicast_lock); 1697 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 1698 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT)) 1699 br_multicast_find_del_pg(br, pg); 1700 1701 __del_port_router(port); 1702 1703 del_timer(&port->multicast_router_timer); 1704 del_timer(&port->ip4_own_query.timer); 1705 #if IS_ENABLED(CONFIG_IPV6) 1706 del_timer(&port->ip6_own_query.timer); 1707 #endif 1708 spin_unlock(&br->multicast_lock); 1709 } 1710 1711 static int __grp_src_delete_marked(struct net_bridge_port_group *pg) 1712 { 1713 struct net_bridge_group_src *ent; 1714 struct hlist_node *tmp; 1715 int deleted = 0; 1716 1717 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) 1718 if (ent->flags & BR_SGRP_F_DELETE) { 1719 br_multicast_del_group_src(ent, false); 1720 deleted++; 1721 } 1722 1723 return deleted; 1724 } 1725 1726 static void __grp_src_mod_timer(struct net_bridge_group_src *src, 1727 unsigned long expires) 1728 { 1729 mod_timer(&src->timer, expires); 1730 br_multicast_fwd_src_handle(src); 1731 } 1732 1733 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg) 1734 { 1735 struct bridge_mcast_other_query *other_query = NULL; 1736 struct net_bridge *br = pg->key.port->br; 1737 u32 lmqc = br->multicast_last_member_count; 1738 unsigned long lmqt, lmi, now = jiffies; 1739 struct net_bridge_group_src *ent; 1740 1741 if (!netif_running(br->dev) || 1742 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1743 return; 1744 1745 if (pg->key.addr.proto == htons(ETH_P_IP)) 1746 other_query = &br->ip4_other_query; 1747 #if IS_ENABLED(CONFIG_IPV6) 1748 else 1749 other_query = &br->ip6_other_query; 1750 #endif 1751 1752 lmqt = now + br_multicast_lmqt(br); 1753 hlist_for_each_entry(ent, &pg->src_list, node) { 1754 if (ent->flags & BR_SGRP_F_SEND) { 1755 ent->flags &= ~BR_SGRP_F_SEND; 1756 if (ent->timer.expires > lmqt) { 1757 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1758 other_query && 1759 !timer_pending(&other_query->timer)) 1760 ent->src_query_rexmit_cnt = lmqc; 1761 __grp_src_mod_timer(ent, lmqt); 1762 } 1763 } 1764 } 1765 1766 if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) || 1767 !other_query || timer_pending(&other_query->timer)) 1768 return; 1769 1770 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1771 &pg->key.addr, true, 1, NULL); 1772 1773 lmi = now + br->multicast_last_member_interval; 1774 if (!timer_pending(&pg->rexmit_timer) || 1775 time_after(pg->rexmit_timer.expires, lmi)) 1776 mod_timer(&pg->rexmit_timer, lmi); 1777 } 1778 1779 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg) 1780 { 1781 struct bridge_mcast_other_query *other_query = NULL; 1782 struct net_bridge *br = pg->key.port->br; 1783 unsigned long now = jiffies, lmi; 1784 1785 if (!netif_running(br->dev) || 1786 !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 1787 return; 1788 1789 if (pg->key.addr.proto == htons(ETH_P_IP)) 1790 other_query = &br->ip4_other_query; 1791 #if IS_ENABLED(CONFIG_IPV6) 1792 else 1793 other_query = &br->ip6_other_query; 1794 #endif 1795 1796 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) && 1797 other_query && !timer_pending(&other_query->timer)) { 1798 lmi = now + br->multicast_last_member_interval; 1799 pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1; 1800 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr, 1801 &pg->key.addr, false, 0, NULL); 1802 if (!timer_pending(&pg->rexmit_timer) || 1803 time_after(pg->rexmit_timer.expires, lmi)) 1804 mod_timer(&pg->rexmit_timer, lmi); 1805 } 1806 1807 if (pg->filter_mode == MCAST_EXCLUDE && 1808 (!timer_pending(&pg->timer) || 1809 time_after(pg->timer.expires, now + br_multicast_lmqt(br)))) 1810 mod_timer(&pg->timer, now + br_multicast_lmqt(br)); 1811 } 1812 1813 /* State Msg type New state Actions 1814 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI 1815 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI 1816 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI 1817 */ 1818 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg, void *h_addr, 1819 void *srcs, u32 nsrcs, size_t addr_size, 1820 int grec_type) 1821 { 1822 struct net_bridge *br = pg->key.port->br; 1823 struct net_bridge_group_src *ent; 1824 unsigned long now = jiffies; 1825 bool changed = false; 1826 struct br_ip src_ip; 1827 u32 src_idx; 1828 1829 memset(&src_ip, 0, sizeof(src_ip)); 1830 src_ip.proto = pg->key.addr.proto; 1831 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1832 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1833 ent = br_multicast_find_group_src(pg, &src_ip); 1834 if (!ent) { 1835 ent = br_multicast_new_group_src(pg, &src_ip); 1836 if (ent) 1837 changed = true; 1838 } 1839 1840 if (ent) 1841 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 1842 } 1843 1844 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 1845 changed = true; 1846 1847 return changed; 1848 } 1849 1850 /* State Msg type New state Actions 1851 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 1852 * Delete (A-B) 1853 * Group Timer=GMI 1854 */ 1855 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg, void *h_addr, 1856 void *srcs, u32 nsrcs, size_t addr_size, 1857 int grec_type) 1858 { 1859 struct net_bridge_group_src *ent; 1860 struct br_ip src_ip; 1861 u32 src_idx; 1862 1863 hlist_for_each_entry(ent, &pg->src_list, node) 1864 ent->flags |= BR_SGRP_F_DELETE; 1865 1866 memset(&src_ip, 0, sizeof(src_ip)); 1867 src_ip.proto = pg->key.addr.proto; 1868 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1869 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1870 ent = br_multicast_find_group_src(pg, &src_ip); 1871 if (ent) 1872 ent->flags &= ~BR_SGRP_F_DELETE; 1873 else 1874 ent = br_multicast_new_group_src(pg, &src_ip); 1875 if (ent) 1876 br_multicast_fwd_src_handle(ent); 1877 } 1878 1879 br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type); 1880 1881 __grp_src_delete_marked(pg); 1882 } 1883 1884 /* State Msg type New state Actions 1885 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI 1886 * Delete (X-A) 1887 * Delete (Y-A) 1888 * Group Timer=GMI 1889 */ 1890 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg, void *h_addr, 1891 void *srcs, u32 nsrcs, size_t addr_size, 1892 int grec_type) 1893 { 1894 struct net_bridge *br = pg->key.port->br; 1895 struct net_bridge_group_src *ent; 1896 unsigned long now = jiffies; 1897 bool changed = false; 1898 struct br_ip src_ip; 1899 u32 src_idx; 1900 1901 hlist_for_each_entry(ent, &pg->src_list, node) 1902 ent->flags |= BR_SGRP_F_DELETE; 1903 1904 memset(&src_ip, 0, sizeof(src_ip)); 1905 src_ip.proto = pg->key.addr.proto; 1906 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1907 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1908 ent = br_multicast_find_group_src(pg, &src_ip); 1909 if (ent) { 1910 ent->flags &= ~BR_SGRP_F_DELETE; 1911 } else { 1912 ent = br_multicast_new_group_src(pg, &src_ip); 1913 if (ent) { 1914 __grp_src_mod_timer(ent, 1915 now + br_multicast_gmi(br)); 1916 changed = true; 1917 } 1918 } 1919 } 1920 1921 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 1922 changed = true; 1923 1924 if (__grp_src_delete_marked(pg)) 1925 changed = true; 1926 1927 return changed; 1928 } 1929 1930 static bool br_multicast_isexc(struct net_bridge_port_group *pg, void *h_addr, 1931 void *srcs, u32 nsrcs, size_t addr_size, 1932 int grec_type) 1933 { 1934 struct net_bridge *br = pg->key.port->br; 1935 bool changed = false; 1936 1937 switch (pg->filter_mode) { 1938 case MCAST_INCLUDE: 1939 __grp_src_isexc_incl(pg, h_addr, srcs, nsrcs, addr_size, 1940 grec_type); 1941 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 1942 changed = true; 1943 break; 1944 case MCAST_EXCLUDE: 1945 changed = __grp_src_isexc_excl(pg, h_addr, srcs, nsrcs, addr_size, 1946 grec_type); 1947 break; 1948 } 1949 1950 pg->filter_mode = MCAST_EXCLUDE; 1951 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 1952 1953 return changed; 1954 } 1955 1956 /* State Msg type New state Actions 1957 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI 1958 * Send Q(G,A-B) 1959 */ 1960 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg, void *h_addr, 1961 void *srcs, u32 nsrcs, size_t addr_size, 1962 int grec_type) 1963 { 1964 struct net_bridge *br = pg->key.port->br; 1965 u32 src_idx, to_send = pg->src_ents; 1966 struct net_bridge_group_src *ent; 1967 unsigned long now = jiffies; 1968 bool changed = false; 1969 struct br_ip src_ip; 1970 1971 hlist_for_each_entry(ent, &pg->src_list, node) 1972 ent->flags |= BR_SGRP_F_SEND; 1973 1974 memset(&src_ip, 0, sizeof(src_ip)); 1975 src_ip.proto = pg->key.addr.proto; 1976 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 1977 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 1978 ent = br_multicast_find_group_src(pg, &src_ip); 1979 if (ent) { 1980 ent->flags &= ~BR_SGRP_F_SEND; 1981 to_send--; 1982 } else { 1983 ent = br_multicast_new_group_src(pg, &src_ip); 1984 if (ent) 1985 changed = true; 1986 } 1987 if (ent) 1988 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 1989 } 1990 1991 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 1992 changed = true; 1993 1994 if (to_send) 1995 __grp_src_query_marked_and_rexmit(pg); 1996 1997 return changed; 1998 } 1999 2000 /* State Msg type New state Actions 2001 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI 2002 * Send Q(G,X-A) 2003 * Send Q(G) 2004 */ 2005 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg, void *h_addr, 2006 void *srcs, u32 nsrcs, size_t addr_size, 2007 int grec_type) 2008 { 2009 struct net_bridge *br = pg->key.port->br; 2010 u32 src_idx, to_send = pg->src_ents; 2011 struct net_bridge_group_src *ent; 2012 unsigned long now = jiffies; 2013 bool changed = false; 2014 struct br_ip src_ip; 2015 2016 hlist_for_each_entry(ent, &pg->src_list, node) 2017 if (timer_pending(&ent->timer)) 2018 ent->flags |= BR_SGRP_F_SEND; 2019 2020 memset(&src_ip, 0, sizeof(src_ip)); 2021 src_ip.proto = pg->key.addr.proto; 2022 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2023 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2024 ent = br_multicast_find_group_src(pg, &src_ip); 2025 if (ent) { 2026 if (timer_pending(&ent->timer)) { 2027 ent->flags &= ~BR_SGRP_F_SEND; 2028 to_send--; 2029 } 2030 } else { 2031 ent = br_multicast_new_group_src(pg, &src_ip); 2032 if (ent) 2033 changed = true; 2034 } 2035 if (ent) 2036 __grp_src_mod_timer(ent, now + br_multicast_gmi(br)); 2037 } 2038 2039 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2040 changed = true; 2041 2042 if (to_send) 2043 __grp_src_query_marked_and_rexmit(pg); 2044 2045 __grp_send_query_and_rexmit(pg); 2046 2047 return changed; 2048 } 2049 2050 static bool br_multicast_toin(struct net_bridge_port_group *pg, void *h_addr, 2051 void *srcs, u32 nsrcs, size_t addr_size, 2052 int grec_type) 2053 { 2054 bool changed = false; 2055 2056 switch (pg->filter_mode) { 2057 case MCAST_INCLUDE: 2058 changed = __grp_src_toin_incl(pg, h_addr, srcs, nsrcs, addr_size, 2059 grec_type); 2060 break; 2061 case MCAST_EXCLUDE: 2062 changed = __grp_src_toin_excl(pg, h_addr, srcs, nsrcs, addr_size, 2063 grec_type); 2064 break; 2065 } 2066 2067 if (br_multicast_eht_should_del_pg(pg)) { 2068 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2069 br_multicast_find_del_pg(pg->key.port->br, pg); 2070 /* a notification has already been sent and we shouldn't 2071 * access pg after the delete so we have to return false 2072 */ 2073 changed = false; 2074 } 2075 2076 return changed; 2077 } 2078 2079 /* State Msg type New state Actions 2080 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 2081 * Delete (A-B) 2082 * Send Q(G,A*B) 2083 * Group Timer=GMI 2084 */ 2085 static void __grp_src_toex_incl(struct net_bridge_port_group *pg, void *h_addr, 2086 void *srcs, u32 nsrcs, size_t addr_size, 2087 int grec_type) 2088 { 2089 struct net_bridge_group_src *ent; 2090 u32 src_idx, to_send = 0; 2091 struct br_ip src_ip; 2092 2093 hlist_for_each_entry(ent, &pg->src_list, node) 2094 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2095 2096 memset(&src_ip, 0, sizeof(src_ip)); 2097 src_ip.proto = pg->key.addr.proto; 2098 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2099 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2100 ent = br_multicast_find_group_src(pg, &src_ip); 2101 if (ent) { 2102 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) | 2103 BR_SGRP_F_SEND; 2104 to_send++; 2105 } else { 2106 ent = br_multicast_new_group_src(pg, &src_ip); 2107 } 2108 if (ent) 2109 br_multicast_fwd_src_handle(ent); 2110 } 2111 2112 br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type); 2113 2114 __grp_src_delete_marked(pg); 2115 if (to_send) 2116 __grp_src_query_marked_and_rexmit(pg); 2117 } 2118 2119 /* State Msg type New state Actions 2120 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer 2121 * Delete (X-A) 2122 * Delete (Y-A) 2123 * Send Q(G,A-Y) 2124 * Group Timer=GMI 2125 */ 2126 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg, void *h_addr, 2127 void *srcs, u32 nsrcs, size_t addr_size, 2128 int grec_type) 2129 { 2130 struct net_bridge_group_src *ent; 2131 u32 src_idx, to_send = 0; 2132 bool changed = false; 2133 struct br_ip src_ip; 2134 2135 hlist_for_each_entry(ent, &pg->src_list, node) 2136 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE; 2137 2138 memset(&src_ip, 0, sizeof(src_ip)); 2139 src_ip.proto = pg->key.addr.proto; 2140 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2141 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2142 ent = br_multicast_find_group_src(pg, &src_ip); 2143 if (ent) { 2144 ent->flags &= ~BR_SGRP_F_DELETE; 2145 } else { 2146 ent = br_multicast_new_group_src(pg, &src_ip); 2147 if (ent) { 2148 __grp_src_mod_timer(ent, pg->timer.expires); 2149 changed = true; 2150 } 2151 } 2152 if (ent && timer_pending(&ent->timer)) { 2153 ent->flags |= BR_SGRP_F_SEND; 2154 to_send++; 2155 } 2156 } 2157 2158 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2159 changed = true; 2160 2161 if (__grp_src_delete_marked(pg)) 2162 changed = true; 2163 if (to_send) 2164 __grp_src_query_marked_and_rexmit(pg); 2165 2166 return changed; 2167 } 2168 2169 static bool br_multicast_toex(struct net_bridge_port_group *pg, void *h_addr, 2170 void *srcs, u32 nsrcs, size_t addr_size, 2171 int grec_type) 2172 { 2173 struct net_bridge *br = pg->key.port->br; 2174 bool changed = false; 2175 2176 switch (pg->filter_mode) { 2177 case MCAST_INCLUDE: 2178 __grp_src_toex_incl(pg, h_addr, srcs, nsrcs, addr_size, 2179 grec_type); 2180 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE); 2181 changed = true; 2182 break; 2183 case MCAST_EXCLUDE: 2184 changed = __grp_src_toex_excl(pg, h_addr, srcs, nsrcs, addr_size, 2185 grec_type); 2186 break; 2187 } 2188 2189 pg->filter_mode = MCAST_EXCLUDE; 2190 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br)); 2191 2192 return changed; 2193 } 2194 2195 /* State Msg type New state Actions 2196 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) 2197 */ 2198 static bool __grp_src_block_incl(struct net_bridge_port_group *pg, void *h_addr, 2199 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2200 { 2201 struct net_bridge_group_src *ent; 2202 u32 src_idx, to_send = 0; 2203 bool changed = false; 2204 struct br_ip src_ip; 2205 2206 hlist_for_each_entry(ent, &pg->src_list, node) 2207 ent->flags &= ~BR_SGRP_F_SEND; 2208 2209 memset(&src_ip, 0, sizeof(src_ip)); 2210 src_ip.proto = pg->key.addr.proto; 2211 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2212 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2213 ent = br_multicast_find_group_src(pg, &src_ip); 2214 if (ent) { 2215 ent->flags |= BR_SGRP_F_SEND; 2216 to_send++; 2217 } 2218 } 2219 2220 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2221 changed = true; 2222 2223 if (to_send) 2224 __grp_src_query_marked_and_rexmit(pg); 2225 2226 return changed; 2227 } 2228 2229 /* State Msg type New state Actions 2230 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer 2231 * Send Q(G,A-Y) 2232 */ 2233 static bool __grp_src_block_excl(struct net_bridge_port_group *pg, void *h_addr, 2234 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2235 { 2236 struct net_bridge_group_src *ent; 2237 u32 src_idx, to_send = 0; 2238 bool changed = false; 2239 struct br_ip src_ip; 2240 2241 hlist_for_each_entry(ent, &pg->src_list, node) 2242 ent->flags &= ~BR_SGRP_F_SEND; 2243 2244 memset(&src_ip, 0, sizeof(src_ip)); 2245 src_ip.proto = pg->key.addr.proto; 2246 for (src_idx = 0; src_idx < nsrcs; src_idx++) { 2247 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size); 2248 ent = br_multicast_find_group_src(pg, &src_ip); 2249 if (!ent) { 2250 ent = br_multicast_new_group_src(pg, &src_ip); 2251 if (ent) { 2252 __grp_src_mod_timer(ent, pg->timer.expires); 2253 changed = true; 2254 } 2255 } 2256 if (ent && timer_pending(&ent->timer)) { 2257 ent->flags |= BR_SGRP_F_SEND; 2258 to_send++; 2259 } 2260 } 2261 2262 if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type)) 2263 changed = true; 2264 2265 if (to_send) 2266 __grp_src_query_marked_and_rexmit(pg); 2267 2268 return changed; 2269 } 2270 2271 static bool br_multicast_block(struct net_bridge_port_group *pg, void *h_addr, 2272 void *srcs, u32 nsrcs, size_t addr_size, int grec_type) 2273 { 2274 bool changed = false; 2275 2276 switch (pg->filter_mode) { 2277 case MCAST_INCLUDE: 2278 changed = __grp_src_block_incl(pg, h_addr, srcs, nsrcs, addr_size, 2279 grec_type); 2280 break; 2281 case MCAST_EXCLUDE: 2282 changed = __grp_src_block_excl(pg, h_addr, srcs, nsrcs, addr_size, 2283 grec_type); 2284 break; 2285 } 2286 2287 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) || 2288 br_multicast_eht_should_del_pg(pg)) { 2289 if (br_multicast_eht_should_del_pg(pg)) 2290 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2291 br_multicast_find_del_pg(pg->key.port->br, pg); 2292 /* a notification has already been sent and we shouldn't 2293 * access pg after the delete so we have to return false 2294 */ 2295 changed = false; 2296 } 2297 2298 return changed; 2299 } 2300 2301 static struct net_bridge_port_group * 2302 br_multicast_find_port(struct net_bridge_mdb_entry *mp, 2303 struct net_bridge_port *p, 2304 const unsigned char *src) 2305 { 2306 struct net_bridge *br __maybe_unused = mp->br; 2307 struct net_bridge_port_group *pg; 2308 2309 for (pg = mlock_dereference(mp->ports, br); 2310 pg; 2311 pg = mlock_dereference(pg->next, br)) 2312 if (br_port_group_equal(pg, p, src)) 2313 return pg; 2314 2315 return NULL; 2316 } 2317 2318 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 2319 struct net_bridge_port *port, 2320 struct sk_buff *skb, 2321 u16 vid) 2322 { 2323 bool igmpv2 = br->multicast_igmp_version == 2; 2324 struct net_bridge_mdb_entry *mdst; 2325 struct net_bridge_port_group *pg; 2326 const unsigned char *src; 2327 struct igmpv3_report *ih; 2328 struct igmpv3_grec *grec; 2329 int i, len, num, type; 2330 __be32 group, *h_addr; 2331 bool changed = false; 2332 int err = 0; 2333 u16 nsrcs; 2334 2335 ih = igmpv3_report_hdr(skb); 2336 num = ntohs(ih->ngrec); 2337 len = skb_transport_offset(skb) + sizeof(*ih); 2338 2339 for (i = 0; i < num; i++) { 2340 len += sizeof(*grec); 2341 if (!ip_mc_may_pull(skb, len)) 2342 return -EINVAL; 2343 2344 grec = (void *)(skb->data + len - sizeof(*grec)); 2345 group = grec->grec_mca; 2346 type = grec->grec_type; 2347 nsrcs = ntohs(grec->grec_nsrcs); 2348 2349 len += nsrcs * 4; 2350 if (!ip_mc_may_pull(skb, len)) 2351 return -EINVAL; 2352 2353 switch (type) { 2354 case IGMPV3_MODE_IS_INCLUDE: 2355 case IGMPV3_MODE_IS_EXCLUDE: 2356 case IGMPV3_CHANGE_TO_INCLUDE: 2357 case IGMPV3_CHANGE_TO_EXCLUDE: 2358 case IGMPV3_ALLOW_NEW_SOURCES: 2359 case IGMPV3_BLOCK_OLD_SOURCES: 2360 break; 2361 2362 default: 2363 continue; 2364 } 2365 2366 src = eth_hdr(skb)->h_source; 2367 if (nsrcs == 0 && 2368 (type == IGMPV3_CHANGE_TO_INCLUDE || 2369 type == IGMPV3_MODE_IS_INCLUDE)) { 2370 if (!port || igmpv2) { 2371 br_ip4_multicast_leave_group(br, port, group, vid, src); 2372 continue; 2373 } 2374 } else { 2375 err = br_ip4_multicast_add_group(br, port, group, vid, 2376 src, igmpv2); 2377 if (err) 2378 break; 2379 } 2380 2381 if (!port || igmpv2) 2382 continue; 2383 2384 spin_lock_bh(&br->multicast_lock); 2385 mdst = br_mdb_ip4_get(br, group, vid); 2386 if (!mdst) 2387 goto unlock_continue; 2388 pg = br_multicast_find_port(mdst, port, src); 2389 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2390 goto unlock_continue; 2391 /* reload grec and host addr */ 2392 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4)); 2393 h_addr = &ip_hdr(skb)->saddr; 2394 switch (type) { 2395 case IGMPV3_ALLOW_NEW_SOURCES: 2396 changed = br_multicast_isinc_allow(pg, h_addr, grec->grec_src, 2397 nsrcs, sizeof(__be32), type); 2398 break; 2399 case IGMPV3_MODE_IS_INCLUDE: 2400 changed = br_multicast_isinc_allow(pg, h_addr, grec->grec_src, 2401 nsrcs, sizeof(__be32), type); 2402 break; 2403 case IGMPV3_MODE_IS_EXCLUDE: 2404 changed = br_multicast_isexc(pg, h_addr, grec->grec_src, 2405 nsrcs, sizeof(__be32), type); 2406 break; 2407 case IGMPV3_CHANGE_TO_INCLUDE: 2408 changed = br_multicast_toin(pg, h_addr, grec->grec_src, 2409 nsrcs, sizeof(__be32), type); 2410 break; 2411 case IGMPV3_CHANGE_TO_EXCLUDE: 2412 changed = br_multicast_toex(pg, h_addr, grec->grec_src, 2413 nsrcs, sizeof(__be32), type); 2414 break; 2415 case IGMPV3_BLOCK_OLD_SOURCES: 2416 changed = br_multicast_block(pg, h_addr, grec->grec_src, 2417 nsrcs, sizeof(__be32), type); 2418 break; 2419 } 2420 if (changed) 2421 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 2422 unlock_continue: 2423 spin_unlock_bh(&br->multicast_lock); 2424 } 2425 2426 return err; 2427 } 2428 2429 #if IS_ENABLED(CONFIG_IPV6) 2430 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 2431 struct net_bridge_port *port, 2432 struct sk_buff *skb, 2433 u16 vid) 2434 { 2435 bool mldv1 = br->multicast_mld_version == 1; 2436 struct net_bridge_mdb_entry *mdst; 2437 struct net_bridge_port_group *pg; 2438 unsigned int nsrcs_offset; 2439 const unsigned char *src; 2440 struct icmp6hdr *icmp6h; 2441 struct in6_addr *h_addr; 2442 struct mld2_grec *grec; 2443 unsigned int grec_len; 2444 bool changed = false; 2445 int i, len, num; 2446 int err = 0; 2447 2448 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h))) 2449 return -EINVAL; 2450 2451 icmp6h = icmp6_hdr(skb); 2452 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 2453 len = skb_transport_offset(skb) + sizeof(*icmp6h); 2454 2455 for (i = 0; i < num; i++) { 2456 __be16 *_nsrcs, __nsrcs; 2457 u16 nsrcs; 2458 2459 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); 2460 2461 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < 2462 nsrcs_offset + sizeof(__nsrcs)) 2463 return -EINVAL; 2464 2465 _nsrcs = skb_header_pointer(skb, nsrcs_offset, 2466 sizeof(__nsrcs), &__nsrcs); 2467 if (!_nsrcs) 2468 return -EINVAL; 2469 2470 nsrcs = ntohs(*_nsrcs); 2471 grec_len = struct_size(grec, grec_src, nsrcs); 2472 2473 if (!ipv6_mc_may_pull(skb, len + grec_len)) 2474 return -EINVAL; 2475 2476 grec = (struct mld2_grec *)(skb->data + len); 2477 len += grec_len; 2478 2479 switch (grec->grec_type) { 2480 case MLD2_MODE_IS_INCLUDE: 2481 case MLD2_MODE_IS_EXCLUDE: 2482 case MLD2_CHANGE_TO_INCLUDE: 2483 case MLD2_CHANGE_TO_EXCLUDE: 2484 case MLD2_ALLOW_NEW_SOURCES: 2485 case MLD2_BLOCK_OLD_SOURCES: 2486 break; 2487 2488 default: 2489 continue; 2490 } 2491 2492 src = eth_hdr(skb)->h_source; 2493 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 2494 grec->grec_type == MLD2_MODE_IS_INCLUDE) && 2495 nsrcs == 0) { 2496 if (!port || mldv1) { 2497 br_ip6_multicast_leave_group(br, port, 2498 &grec->grec_mca, 2499 vid, src); 2500 continue; 2501 } 2502 } else { 2503 err = br_ip6_multicast_add_group(br, port, 2504 &grec->grec_mca, vid, 2505 src, mldv1); 2506 if (err) 2507 break; 2508 } 2509 2510 if (!port || mldv1) 2511 continue; 2512 2513 spin_lock_bh(&br->multicast_lock); 2514 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid); 2515 if (!mdst) 2516 goto unlock_continue; 2517 pg = br_multicast_find_port(mdst, port, src); 2518 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT)) 2519 goto unlock_continue; 2520 h_addr = &ipv6_hdr(skb)->saddr; 2521 switch (grec->grec_type) { 2522 case MLD2_ALLOW_NEW_SOURCES: 2523 changed = br_multicast_isinc_allow(pg, h_addr, 2524 grec->grec_src, nsrcs, 2525 sizeof(struct in6_addr), 2526 grec->grec_type); 2527 break; 2528 case MLD2_MODE_IS_INCLUDE: 2529 changed = br_multicast_isinc_allow(pg, h_addr, 2530 grec->grec_src, nsrcs, 2531 sizeof(struct in6_addr), 2532 grec->grec_type); 2533 break; 2534 case MLD2_MODE_IS_EXCLUDE: 2535 changed = br_multicast_isexc(pg, h_addr, 2536 grec->grec_src, nsrcs, 2537 sizeof(struct in6_addr), 2538 grec->grec_type); 2539 break; 2540 case MLD2_CHANGE_TO_INCLUDE: 2541 changed = br_multicast_toin(pg, h_addr, 2542 grec->grec_src, nsrcs, 2543 sizeof(struct in6_addr), 2544 grec->grec_type); 2545 break; 2546 case MLD2_CHANGE_TO_EXCLUDE: 2547 changed = br_multicast_toex(pg, h_addr, 2548 grec->grec_src, nsrcs, 2549 sizeof(struct in6_addr), 2550 grec->grec_type); 2551 break; 2552 case MLD2_BLOCK_OLD_SOURCES: 2553 changed = br_multicast_block(pg, h_addr, 2554 grec->grec_src, nsrcs, 2555 sizeof(struct in6_addr), 2556 grec->grec_type); 2557 break; 2558 } 2559 if (changed) 2560 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB); 2561 unlock_continue: 2562 spin_unlock_bh(&br->multicast_lock); 2563 } 2564 2565 return err; 2566 } 2567 #endif 2568 2569 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 2570 struct net_bridge_port *port, 2571 __be32 saddr) 2572 { 2573 if (!timer_pending(&br->ip4_own_query.timer) && 2574 !timer_pending(&br->ip4_other_query.timer)) 2575 goto update; 2576 2577 if (!br->ip4_querier.addr.src.ip4) 2578 goto update; 2579 2580 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.src.ip4)) 2581 goto update; 2582 2583 return false; 2584 2585 update: 2586 br->ip4_querier.addr.src.ip4 = saddr; 2587 2588 /* update protected by general multicast_lock by caller */ 2589 rcu_assign_pointer(br->ip4_querier.port, port); 2590 2591 return true; 2592 } 2593 2594 #if IS_ENABLED(CONFIG_IPV6) 2595 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 2596 struct net_bridge_port *port, 2597 struct in6_addr *saddr) 2598 { 2599 if (!timer_pending(&br->ip6_own_query.timer) && 2600 !timer_pending(&br->ip6_other_query.timer)) 2601 goto update; 2602 2603 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.src.ip6) <= 0) 2604 goto update; 2605 2606 return false; 2607 2608 update: 2609 br->ip6_querier.addr.src.ip6 = *saddr; 2610 2611 /* update protected by general multicast_lock by caller */ 2612 rcu_assign_pointer(br->ip6_querier.port, port); 2613 2614 return true; 2615 } 2616 #endif 2617 2618 static bool br_multicast_select_querier(struct net_bridge *br, 2619 struct net_bridge_port *port, 2620 struct br_ip *saddr) 2621 { 2622 switch (saddr->proto) { 2623 case htons(ETH_P_IP): 2624 return br_ip4_multicast_select_querier(br, port, saddr->src.ip4); 2625 #if IS_ENABLED(CONFIG_IPV6) 2626 case htons(ETH_P_IPV6): 2627 return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6); 2628 #endif 2629 } 2630 2631 return false; 2632 } 2633 2634 static void 2635 br_multicast_update_query_timer(struct net_bridge *br, 2636 struct bridge_mcast_other_query *query, 2637 unsigned long max_delay) 2638 { 2639 if (!timer_pending(&query->timer)) 2640 query->delay_time = jiffies + max_delay; 2641 2642 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 2643 } 2644 2645 static void br_port_mc_router_state_change(struct net_bridge_port *p, 2646 bool is_mc_router) 2647 { 2648 struct switchdev_attr attr = { 2649 .orig_dev = p->dev, 2650 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER, 2651 .flags = SWITCHDEV_F_DEFER, 2652 .u.mrouter = is_mc_router, 2653 }; 2654 2655 switchdev_port_attr_set(p->dev, &attr, NULL); 2656 } 2657 2658 /* 2659 * Add port to router_list 2660 * list is maintained ordered by pointer value 2661 * and locked by br->multicast_lock and RCU 2662 */ 2663 static void br_multicast_add_router(struct net_bridge *br, 2664 struct net_bridge_port *port) 2665 { 2666 struct net_bridge_port *p; 2667 struct hlist_node *slot = NULL; 2668 2669 if (!hlist_unhashed(&port->rlist)) 2670 return; 2671 2672 hlist_for_each_entry(p, &br->router_list, rlist) { 2673 if ((unsigned long) port >= (unsigned long) p) 2674 break; 2675 slot = &p->rlist; 2676 } 2677 2678 if (slot) 2679 hlist_add_behind_rcu(&port->rlist, slot); 2680 else 2681 hlist_add_head_rcu(&port->rlist, &br->router_list); 2682 br_rtr_notify(br->dev, port, RTM_NEWMDB); 2683 br_port_mc_router_state_change(port, true); 2684 } 2685 2686 static void br_multicast_mark_router(struct net_bridge *br, 2687 struct net_bridge_port *port) 2688 { 2689 unsigned long now = jiffies; 2690 2691 if (!port) { 2692 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) { 2693 if (!timer_pending(&br->multicast_router_timer)) 2694 br_mc_router_state_change(br, true); 2695 mod_timer(&br->multicast_router_timer, 2696 now + br->multicast_querier_interval); 2697 } 2698 return; 2699 } 2700 2701 if (port->multicast_router == MDB_RTR_TYPE_DISABLED || 2702 port->multicast_router == MDB_RTR_TYPE_PERM) 2703 return; 2704 2705 br_multicast_add_router(br, port); 2706 2707 mod_timer(&port->multicast_router_timer, 2708 now + br->multicast_querier_interval); 2709 } 2710 2711 static void br_multicast_query_received(struct net_bridge *br, 2712 struct net_bridge_port *port, 2713 struct bridge_mcast_other_query *query, 2714 struct br_ip *saddr, 2715 unsigned long max_delay) 2716 { 2717 if (!br_multicast_select_querier(br, port, saddr)) 2718 return; 2719 2720 br_multicast_update_query_timer(br, query, max_delay); 2721 br_multicast_mark_router(br, port); 2722 } 2723 2724 static void br_ip4_multicast_query(struct net_bridge *br, 2725 struct net_bridge_port *port, 2726 struct sk_buff *skb, 2727 u16 vid) 2728 { 2729 unsigned int transport_len = ip_transport_len(skb); 2730 const struct iphdr *iph = ip_hdr(skb); 2731 struct igmphdr *ih = igmp_hdr(skb); 2732 struct net_bridge_mdb_entry *mp; 2733 struct igmpv3_query *ih3; 2734 struct net_bridge_port_group *p; 2735 struct net_bridge_port_group __rcu **pp; 2736 struct br_ip saddr; 2737 unsigned long max_delay; 2738 unsigned long now = jiffies; 2739 __be32 group; 2740 2741 spin_lock(&br->multicast_lock); 2742 if (!netif_running(br->dev) || 2743 (port && port->state == BR_STATE_DISABLED)) 2744 goto out; 2745 2746 group = ih->group; 2747 2748 if (transport_len == sizeof(*ih)) { 2749 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 2750 2751 if (!max_delay) { 2752 max_delay = 10 * HZ; 2753 group = 0; 2754 } 2755 } else if (transport_len >= sizeof(*ih3)) { 2756 ih3 = igmpv3_query_hdr(skb); 2757 if (ih3->nsrcs || 2758 (br->multicast_igmp_version == 3 && group && ih3->suppress)) 2759 goto out; 2760 2761 max_delay = ih3->code ? 2762 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 2763 } else { 2764 goto out; 2765 } 2766 2767 if (!group) { 2768 saddr.proto = htons(ETH_P_IP); 2769 saddr.src.ip4 = iph->saddr; 2770 2771 br_multicast_query_received(br, port, &br->ip4_other_query, 2772 &saddr, max_delay); 2773 goto out; 2774 } 2775 2776 mp = br_mdb_ip4_get(br, group, vid); 2777 if (!mp) 2778 goto out; 2779 2780 max_delay *= br->multicast_last_member_count; 2781 2782 if (mp->host_joined && 2783 (timer_pending(&mp->timer) ? 2784 time_after(mp->timer.expires, now + max_delay) : 2785 try_to_del_timer_sync(&mp->timer) >= 0)) 2786 mod_timer(&mp->timer, now + max_delay); 2787 2788 for (pp = &mp->ports; 2789 (p = mlock_dereference(*pp, br)) != NULL; 2790 pp = &p->next) { 2791 if (timer_pending(&p->timer) ? 2792 time_after(p->timer.expires, now + max_delay) : 2793 try_to_del_timer_sync(&p->timer) >= 0 && 2794 (br->multicast_igmp_version == 2 || 2795 p->filter_mode == MCAST_EXCLUDE)) 2796 mod_timer(&p->timer, now + max_delay); 2797 } 2798 2799 out: 2800 spin_unlock(&br->multicast_lock); 2801 } 2802 2803 #if IS_ENABLED(CONFIG_IPV6) 2804 static int br_ip6_multicast_query(struct net_bridge *br, 2805 struct net_bridge_port *port, 2806 struct sk_buff *skb, 2807 u16 vid) 2808 { 2809 unsigned int transport_len = ipv6_transport_len(skb); 2810 struct mld_msg *mld; 2811 struct net_bridge_mdb_entry *mp; 2812 struct mld2_query *mld2q; 2813 struct net_bridge_port_group *p; 2814 struct net_bridge_port_group __rcu **pp; 2815 struct br_ip saddr; 2816 unsigned long max_delay; 2817 unsigned long now = jiffies; 2818 unsigned int offset = skb_transport_offset(skb); 2819 const struct in6_addr *group = NULL; 2820 bool is_general_query; 2821 int err = 0; 2822 2823 spin_lock(&br->multicast_lock); 2824 if (!netif_running(br->dev) || 2825 (port && port->state == BR_STATE_DISABLED)) 2826 goto out; 2827 2828 if (transport_len == sizeof(*mld)) { 2829 if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 2830 err = -EINVAL; 2831 goto out; 2832 } 2833 mld = (struct mld_msg *) icmp6_hdr(skb); 2834 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 2835 if (max_delay) 2836 group = &mld->mld_mca; 2837 } else { 2838 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 2839 err = -EINVAL; 2840 goto out; 2841 } 2842 mld2q = (struct mld2_query *)icmp6_hdr(skb); 2843 if (!mld2q->mld2q_nsrcs) 2844 group = &mld2q->mld2q_mca; 2845 if (br->multicast_mld_version == 2 && 2846 !ipv6_addr_any(&mld2q->mld2q_mca) && 2847 mld2q->mld2q_suppress) 2848 goto out; 2849 2850 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 2851 } 2852 2853 is_general_query = group && ipv6_addr_any(group); 2854 2855 if (is_general_query) { 2856 saddr.proto = htons(ETH_P_IPV6); 2857 saddr.src.ip6 = ipv6_hdr(skb)->saddr; 2858 2859 br_multicast_query_received(br, port, &br->ip6_other_query, 2860 &saddr, max_delay); 2861 goto out; 2862 } else if (!group) { 2863 goto out; 2864 } 2865 2866 mp = br_mdb_ip6_get(br, group, vid); 2867 if (!mp) 2868 goto out; 2869 2870 max_delay *= br->multicast_last_member_count; 2871 if (mp->host_joined && 2872 (timer_pending(&mp->timer) ? 2873 time_after(mp->timer.expires, now + max_delay) : 2874 try_to_del_timer_sync(&mp->timer) >= 0)) 2875 mod_timer(&mp->timer, now + max_delay); 2876 2877 for (pp = &mp->ports; 2878 (p = mlock_dereference(*pp, br)) != NULL; 2879 pp = &p->next) { 2880 if (timer_pending(&p->timer) ? 2881 time_after(p->timer.expires, now + max_delay) : 2882 try_to_del_timer_sync(&p->timer) >= 0 && 2883 (br->multicast_mld_version == 1 || 2884 p->filter_mode == MCAST_EXCLUDE)) 2885 mod_timer(&p->timer, now + max_delay); 2886 } 2887 2888 out: 2889 spin_unlock(&br->multicast_lock); 2890 return err; 2891 } 2892 #endif 2893 2894 static void 2895 br_multicast_leave_group(struct net_bridge *br, 2896 struct net_bridge_port *port, 2897 struct br_ip *group, 2898 struct bridge_mcast_other_query *other_query, 2899 struct bridge_mcast_own_query *own_query, 2900 const unsigned char *src) 2901 { 2902 struct net_bridge_mdb_entry *mp; 2903 struct net_bridge_port_group *p; 2904 unsigned long now; 2905 unsigned long time; 2906 2907 spin_lock(&br->multicast_lock); 2908 if (!netif_running(br->dev) || 2909 (port && port->state == BR_STATE_DISABLED)) 2910 goto out; 2911 2912 mp = br_mdb_ip_get(br, group); 2913 if (!mp) 2914 goto out; 2915 2916 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 2917 struct net_bridge_port_group __rcu **pp; 2918 2919 for (pp = &mp->ports; 2920 (p = mlock_dereference(*pp, br)) != NULL; 2921 pp = &p->next) { 2922 if (!br_port_group_equal(p, port, src)) 2923 continue; 2924 2925 if (p->flags & MDB_PG_FLAGS_PERMANENT) 2926 break; 2927 2928 p->flags |= MDB_PG_FLAGS_FAST_LEAVE; 2929 br_multicast_del_pg(mp, p, pp); 2930 } 2931 goto out; 2932 } 2933 2934 if (timer_pending(&other_query->timer)) 2935 goto out; 2936 2937 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) { 2938 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr, 2939 false, 0, NULL); 2940 2941 time = jiffies + br->multicast_last_member_count * 2942 br->multicast_last_member_interval; 2943 2944 mod_timer(&own_query->timer, time); 2945 2946 for (p = mlock_dereference(mp->ports, br); 2947 p != NULL; 2948 p = mlock_dereference(p->next, br)) { 2949 if (!br_port_group_equal(p, port, src)) 2950 continue; 2951 2952 if (!hlist_unhashed(&p->mglist) && 2953 (timer_pending(&p->timer) ? 2954 time_after(p->timer.expires, time) : 2955 try_to_del_timer_sync(&p->timer) >= 0)) { 2956 mod_timer(&p->timer, time); 2957 } 2958 2959 break; 2960 } 2961 } 2962 2963 now = jiffies; 2964 time = now + br->multicast_last_member_count * 2965 br->multicast_last_member_interval; 2966 2967 if (!port) { 2968 if (mp->host_joined && 2969 (timer_pending(&mp->timer) ? 2970 time_after(mp->timer.expires, time) : 2971 try_to_del_timer_sync(&mp->timer) >= 0)) { 2972 mod_timer(&mp->timer, time); 2973 } 2974 2975 goto out; 2976 } 2977 2978 for (p = mlock_dereference(mp->ports, br); 2979 p != NULL; 2980 p = mlock_dereference(p->next, br)) { 2981 if (p->key.port != port) 2982 continue; 2983 2984 if (!hlist_unhashed(&p->mglist) && 2985 (timer_pending(&p->timer) ? 2986 time_after(p->timer.expires, time) : 2987 try_to_del_timer_sync(&p->timer) >= 0)) { 2988 mod_timer(&p->timer, time); 2989 } 2990 2991 break; 2992 } 2993 out: 2994 spin_unlock(&br->multicast_lock); 2995 } 2996 2997 static void br_ip4_multicast_leave_group(struct net_bridge *br, 2998 struct net_bridge_port *port, 2999 __be32 group, 3000 __u16 vid, 3001 const unsigned char *src) 3002 { 3003 struct br_ip br_group; 3004 struct bridge_mcast_own_query *own_query; 3005 3006 if (ipv4_is_local_multicast(group)) 3007 return; 3008 3009 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 3010 3011 memset(&br_group, 0, sizeof(br_group)); 3012 br_group.dst.ip4 = group; 3013 br_group.proto = htons(ETH_P_IP); 3014 br_group.vid = vid; 3015 3016 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 3017 own_query, src); 3018 } 3019 3020 #if IS_ENABLED(CONFIG_IPV6) 3021 static void br_ip6_multicast_leave_group(struct net_bridge *br, 3022 struct net_bridge_port *port, 3023 const struct in6_addr *group, 3024 __u16 vid, 3025 const unsigned char *src) 3026 { 3027 struct br_ip br_group; 3028 struct bridge_mcast_own_query *own_query; 3029 3030 if (ipv6_addr_is_ll_all_nodes(group)) 3031 return; 3032 3033 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 3034 3035 memset(&br_group, 0, sizeof(br_group)); 3036 br_group.dst.ip6 = *group; 3037 br_group.proto = htons(ETH_P_IPV6); 3038 br_group.vid = vid; 3039 3040 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 3041 own_query, src); 3042 } 3043 #endif 3044 3045 static void br_multicast_err_count(const struct net_bridge *br, 3046 const struct net_bridge_port *p, 3047 __be16 proto) 3048 { 3049 struct bridge_mcast_stats __percpu *stats; 3050 struct bridge_mcast_stats *pstats; 3051 3052 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3053 return; 3054 3055 if (p) 3056 stats = p->mcast_stats; 3057 else 3058 stats = br->mcast_stats; 3059 if (WARN_ON(!stats)) 3060 return; 3061 3062 pstats = this_cpu_ptr(stats); 3063 3064 u64_stats_update_begin(&pstats->syncp); 3065 switch (proto) { 3066 case htons(ETH_P_IP): 3067 pstats->mstats.igmp_parse_errors++; 3068 break; 3069 #if IS_ENABLED(CONFIG_IPV6) 3070 case htons(ETH_P_IPV6): 3071 pstats->mstats.mld_parse_errors++; 3072 break; 3073 #endif 3074 } 3075 u64_stats_update_end(&pstats->syncp); 3076 } 3077 3078 static void br_multicast_pim(struct net_bridge *br, 3079 struct net_bridge_port *port, 3080 const struct sk_buff *skb) 3081 { 3082 unsigned int offset = skb_transport_offset(skb); 3083 struct pimhdr *pimhdr, _pimhdr; 3084 3085 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr); 3086 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION || 3087 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO) 3088 return; 3089 3090 br_multicast_mark_router(br, port); 3091 } 3092 3093 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br, 3094 struct net_bridge_port *port, 3095 struct sk_buff *skb) 3096 { 3097 if (ip_hdr(skb)->protocol != IPPROTO_IGMP || 3098 igmp_hdr(skb)->type != IGMP_MRDISC_ADV) 3099 return -ENOMSG; 3100 3101 br_multicast_mark_router(br, port); 3102 3103 return 0; 3104 } 3105 3106 static int br_multicast_ipv4_rcv(struct net_bridge *br, 3107 struct net_bridge_port *port, 3108 struct sk_buff *skb, 3109 u16 vid) 3110 { 3111 const unsigned char *src; 3112 struct igmphdr *ih; 3113 int err; 3114 3115 err = ip_mc_check_igmp(skb); 3116 3117 if (err == -ENOMSG) { 3118 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) { 3119 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3120 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) { 3121 if (ip_hdr(skb)->protocol == IPPROTO_PIM) 3122 br_multicast_pim(br, port, skb); 3123 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) { 3124 br_ip4_multicast_mrd_rcv(br, port, skb); 3125 } 3126 3127 return 0; 3128 } else if (err < 0) { 3129 br_multicast_err_count(br, port, skb->protocol); 3130 return err; 3131 } 3132 3133 ih = igmp_hdr(skb); 3134 src = eth_hdr(skb)->h_source; 3135 BR_INPUT_SKB_CB(skb)->igmp = ih->type; 3136 3137 switch (ih->type) { 3138 case IGMP_HOST_MEMBERSHIP_REPORT: 3139 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3140 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3141 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src, 3142 true); 3143 break; 3144 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3145 err = br_ip4_multicast_igmp3_report(br, port, skb, vid); 3146 break; 3147 case IGMP_HOST_MEMBERSHIP_QUERY: 3148 br_ip4_multicast_query(br, port, skb, vid); 3149 break; 3150 case IGMP_HOST_LEAVE_MESSAGE: 3151 br_ip4_multicast_leave_group(br, port, ih->group, vid, src); 3152 break; 3153 } 3154 3155 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 3156 BR_MCAST_DIR_RX); 3157 3158 return err; 3159 } 3160 3161 #if IS_ENABLED(CONFIG_IPV6) 3162 static void br_ip6_multicast_mrd_rcv(struct net_bridge *br, 3163 struct net_bridge_port *port, 3164 struct sk_buff *skb) 3165 { 3166 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) 3167 return; 3168 3169 br_multicast_mark_router(br, port); 3170 } 3171 3172 static int br_multicast_ipv6_rcv(struct net_bridge *br, 3173 struct net_bridge_port *port, 3174 struct sk_buff *skb, 3175 u16 vid) 3176 { 3177 const unsigned char *src; 3178 struct mld_msg *mld; 3179 int err; 3180 3181 err = ipv6_mc_check_mld(skb); 3182 3183 if (err == -ENOMSG || err == -ENODATA) { 3184 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) 3185 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3186 if (err == -ENODATA && 3187 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) 3188 br_ip6_multicast_mrd_rcv(br, port, skb); 3189 3190 return 0; 3191 } else if (err < 0) { 3192 br_multicast_err_count(br, port, skb->protocol); 3193 return err; 3194 } 3195 3196 mld = (struct mld_msg *)skb_transport_header(skb); 3197 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type; 3198 3199 switch (mld->mld_type) { 3200 case ICMPV6_MGM_REPORT: 3201 src = eth_hdr(skb)->h_source; 3202 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 3203 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid, 3204 src, true); 3205 break; 3206 case ICMPV6_MLD2_REPORT: 3207 err = br_ip6_multicast_mld2_report(br, port, skb, vid); 3208 break; 3209 case ICMPV6_MGM_QUERY: 3210 err = br_ip6_multicast_query(br, port, skb, vid); 3211 break; 3212 case ICMPV6_MGM_REDUCTION: 3213 src = eth_hdr(skb)->h_source; 3214 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src); 3215 break; 3216 } 3217 3218 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp, 3219 BR_MCAST_DIR_RX); 3220 3221 return err; 3222 } 3223 #endif 3224 3225 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 3226 struct sk_buff *skb, u16 vid) 3227 { 3228 int ret = 0; 3229 3230 BR_INPUT_SKB_CB(skb)->igmp = 0; 3231 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 3232 3233 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3234 return 0; 3235 3236 switch (skb->protocol) { 3237 case htons(ETH_P_IP): 3238 ret = br_multicast_ipv4_rcv(br, port, skb, vid); 3239 break; 3240 #if IS_ENABLED(CONFIG_IPV6) 3241 case htons(ETH_P_IPV6): 3242 ret = br_multicast_ipv6_rcv(br, port, skb, vid); 3243 break; 3244 #endif 3245 } 3246 3247 return ret; 3248 } 3249 3250 static void br_multicast_query_expired(struct net_bridge *br, 3251 struct bridge_mcast_own_query *query, 3252 struct bridge_mcast_querier *querier) 3253 { 3254 spin_lock(&br->multicast_lock); 3255 if (query->startup_sent < br->multicast_startup_query_count) 3256 query->startup_sent++; 3257 3258 RCU_INIT_POINTER(querier->port, NULL); 3259 br_multicast_send_query(br, NULL, query); 3260 spin_unlock(&br->multicast_lock); 3261 } 3262 3263 static void br_ip4_multicast_query_expired(struct timer_list *t) 3264 { 3265 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer); 3266 3267 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 3268 } 3269 3270 #if IS_ENABLED(CONFIG_IPV6) 3271 static void br_ip6_multicast_query_expired(struct timer_list *t) 3272 { 3273 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer); 3274 3275 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 3276 } 3277 #endif 3278 3279 static void br_multicast_gc_work(struct work_struct *work) 3280 { 3281 struct net_bridge *br = container_of(work, struct net_bridge, 3282 mcast_gc_work); 3283 HLIST_HEAD(deleted_head); 3284 3285 spin_lock_bh(&br->multicast_lock); 3286 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3287 spin_unlock_bh(&br->multicast_lock); 3288 3289 br_multicast_gc(&deleted_head); 3290 } 3291 3292 void br_multicast_init(struct net_bridge *br) 3293 { 3294 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX; 3295 3296 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3297 br->multicast_last_member_count = 2; 3298 br->multicast_startup_query_count = 2; 3299 3300 br->multicast_last_member_interval = HZ; 3301 br->multicast_query_response_interval = 10 * HZ; 3302 br->multicast_startup_query_interval = 125 * HZ / 4; 3303 br->multicast_query_interval = 125 * HZ; 3304 br->multicast_querier_interval = 255 * HZ; 3305 br->multicast_membership_interval = 260 * HZ; 3306 3307 br->ip4_other_query.delay_time = 0; 3308 br->ip4_querier.port = NULL; 3309 br->multicast_igmp_version = 2; 3310 #if IS_ENABLED(CONFIG_IPV6) 3311 br->multicast_mld_version = 1; 3312 br->ip6_other_query.delay_time = 0; 3313 br->ip6_querier.port = NULL; 3314 #endif 3315 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true); 3316 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true); 3317 3318 spin_lock_init(&br->multicast_lock); 3319 timer_setup(&br->multicast_router_timer, 3320 br_multicast_local_router_expired, 0); 3321 timer_setup(&br->ip4_other_query.timer, 3322 br_ip4_multicast_querier_expired, 0); 3323 timer_setup(&br->ip4_own_query.timer, 3324 br_ip4_multicast_query_expired, 0); 3325 #if IS_ENABLED(CONFIG_IPV6) 3326 timer_setup(&br->ip6_other_query.timer, 3327 br_ip6_multicast_querier_expired, 0); 3328 timer_setup(&br->ip6_own_query.timer, 3329 br_ip6_multicast_query_expired, 0); 3330 #endif 3331 INIT_HLIST_HEAD(&br->mdb_list); 3332 INIT_HLIST_HEAD(&br->mcast_gc_list); 3333 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work); 3334 } 3335 3336 static void br_ip4_multicast_join_snoopers(struct net_bridge *br) 3337 { 3338 struct in_device *in_dev = in_dev_get(br->dev); 3339 3340 if (!in_dev) 3341 return; 3342 3343 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3344 in_dev_put(in_dev); 3345 } 3346 3347 #if IS_ENABLED(CONFIG_IPV6) 3348 static void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3349 { 3350 struct in6_addr addr; 3351 3352 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3353 ipv6_dev_mc_inc(br->dev, &addr); 3354 } 3355 #else 3356 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) 3357 { 3358 } 3359 #endif 3360 3361 void br_multicast_join_snoopers(struct net_bridge *br) 3362 { 3363 br_ip4_multicast_join_snoopers(br); 3364 br_ip6_multicast_join_snoopers(br); 3365 } 3366 3367 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br) 3368 { 3369 struct in_device *in_dev = in_dev_get(br->dev); 3370 3371 if (WARN_ON(!in_dev)) 3372 return; 3373 3374 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC); 3375 in_dev_put(in_dev); 3376 } 3377 3378 #if IS_ENABLED(CONFIG_IPV6) 3379 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3380 { 3381 struct in6_addr addr; 3382 3383 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a)); 3384 ipv6_dev_mc_dec(br->dev, &addr); 3385 } 3386 #else 3387 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) 3388 { 3389 } 3390 #endif 3391 3392 void br_multicast_leave_snoopers(struct net_bridge *br) 3393 { 3394 br_ip4_multicast_leave_snoopers(br); 3395 br_ip6_multicast_leave_snoopers(br); 3396 } 3397 3398 static void __br_multicast_open(struct net_bridge *br, 3399 struct bridge_mcast_own_query *query) 3400 { 3401 query->startup_sent = 0; 3402 3403 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3404 return; 3405 3406 mod_timer(&query->timer, jiffies); 3407 } 3408 3409 void br_multicast_open(struct net_bridge *br) 3410 { 3411 __br_multicast_open(br, &br->ip4_own_query); 3412 #if IS_ENABLED(CONFIG_IPV6) 3413 __br_multicast_open(br, &br->ip6_own_query); 3414 #endif 3415 } 3416 3417 void br_multicast_stop(struct net_bridge *br) 3418 { 3419 del_timer_sync(&br->multicast_router_timer); 3420 del_timer_sync(&br->ip4_other_query.timer); 3421 del_timer_sync(&br->ip4_own_query.timer); 3422 #if IS_ENABLED(CONFIG_IPV6) 3423 del_timer_sync(&br->ip6_other_query.timer); 3424 del_timer_sync(&br->ip6_own_query.timer); 3425 #endif 3426 } 3427 3428 void br_multicast_dev_del(struct net_bridge *br) 3429 { 3430 struct net_bridge_mdb_entry *mp; 3431 HLIST_HEAD(deleted_head); 3432 struct hlist_node *tmp; 3433 3434 spin_lock_bh(&br->multicast_lock); 3435 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) 3436 br_multicast_del_mdb_entry(mp); 3437 hlist_move_list(&br->mcast_gc_list, &deleted_head); 3438 spin_unlock_bh(&br->multicast_lock); 3439 3440 br_multicast_gc(&deleted_head); 3441 cancel_work_sync(&br->mcast_gc_work); 3442 3443 rcu_barrier(); 3444 } 3445 3446 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 3447 { 3448 int err = -EINVAL; 3449 3450 spin_lock_bh(&br->multicast_lock); 3451 3452 switch (val) { 3453 case MDB_RTR_TYPE_DISABLED: 3454 case MDB_RTR_TYPE_PERM: 3455 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM); 3456 del_timer(&br->multicast_router_timer); 3457 br->multicast_router = val; 3458 err = 0; 3459 break; 3460 case MDB_RTR_TYPE_TEMP_QUERY: 3461 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY) 3462 br_mc_router_state_change(br, false); 3463 br->multicast_router = val; 3464 err = 0; 3465 break; 3466 } 3467 3468 spin_unlock_bh(&br->multicast_lock); 3469 3470 return err; 3471 } 3472 3473 static void __del_port_router(struct net_bridge_port *p) 3474 { 3475 if (hlist_unhashed(&p->rlist)) 3476 return; 3477 hlist_del_init_rcu(&p->rlist); 3478 br_rtr_notify(p->br->dev, p, RTM_DELMDB); 3479 br_port_mc_router_state_change(p, false); 3480 3481 /* don't allow timer refresh */ 3482 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 3483 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3484 } 3485 3486 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 3487 { 3488 struct net_bridge *br = p->br; 3489 unsigned long now = jiffies; 3490 int err = -EINVAL; 3491 3492 spin_lock(&br->multicast_lock); 3493 if (p->multicast_router == val) { 3494 /* Refresh the temp router port timer */ 3495 if (p->multicast_router == MDB_RTR_TYPE_TEMP) 3496 mod_timer(&p->multicast_router_timer, 3497 now + br->multicast_querier_interval); 3498 err = 0; 3499 goto unlock; 3500 } 3501 switch (val) { 3502 case MDB_RTR_TYPE_DISABLED: 3503 p->multicast_router = MDB_RTR_TYPE_DISABLED; 3504 __del_port_router(p); 3505 del_timer(&p->multicast_router_timer); 3506 break; 3507 case MDB_RTR_TYPE_TEMP_QUERY: 3508 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 3509 __del_port_router(p); 3510 break; 3511 case MDB_RTR_TYPE_PERM: 3512 p->multicast_router = MDB_RTR_TYPE_PERM; 3513 del_timer(&p->multicast_router_timer); 3514 br_multicast_add_router(br, p); 3515 break; 3516 case MDB_RTR_TYPE_TEMP: 3517 p->multicast_router = MDB_RTR_TYPE_TEMP; 3518 br_multicast_mark_router(br, p); 3519 break; 3520 default: 3521 goto unlock; 3522 } 3523 err = 0; 3524 unlock: 3525 spin_unlock(&br->multicast_lock); 3526 3527 return err; 3528 } 3529 3530 static void br_multicast_start_querier(struct net_bridge *br, 3531 struct bridge_mcast_own_query *query) 3532 { 3533 struct net_bridge_port *port; 3534 3535 __br_multicast_open(br, query); 3536 3537 rcu_read_lock(); 3538 list_for_each_entry_rcu(port, &br->port_list, list) { 3539 if (port->state == BR_STATE_DISABLED || 3540 port->state == BR_STATE_BLOCKING) 3541 continue; 3542 3543 if (query == &br->ip4_own_query) 3544 br_multicast_enable(&port->ip4_own_query); 3545 #if IS_ENABLED(CONFIG_IPV6) 3546 else 3547 br_multicast_enable(&port->ip6_own_query); 3548 #endif 3549 } 3550 rcu_read_unlock(); 3551 } 3552 3553 int br_multicast_toggle(struct net_bridge *br, unsigned long val, 3554 struct netlink_ext_ack *extack) 3555 { 3556 struct net_bridge_port *port; 3557 bool change_snoopers = false; 3558 int err = 0; 3559 3560 spin_lock_bh(&br->multicast_lock); 3561 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) 3562 goto unlock; 3563 3564 err = br_mc_disabled_update(br->dev, val, extack); 3565 if (err == -EOPNOTSUPP) 3566 err = 0; 3567 if (err) 3568 goto unlock; 3569 3570 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); 3571 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { 3572 change_snoopers = true; 3573 goto unlock; 3574 } 3575 3576 if (!netif_running(br->dev)) 3577 goto unlock; 3578 3579 br_multicast_open(br); 3580 list_for_each_entry(port, &br->port_list, list) 3581 __br_multicast_enable_port(port); 3582 3583 change_snoopers = true; 3584 3585 unlock: 3586 spin_unlock_bh(&br->multicast_lock); 3587 3588 /* br_multicast_join_snoopers has the potential to cause 3589 * an MLD Report/Leave to be delivered to br_multicast_rcv, 3590 * which would in turn call br_multicast_add_group, which would 3591 * attempt to acquire multicast_lock. This function should be 3592 * called after the lock has been released to avoid deadlocks on 3593 * multicast_lock. 3594 * 3595 * br_multicast_leave_snoopers does not have the problem since 3596 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and 3597 * returns without calling br_multicast_ipv4/6_rcv if it's not 3598 * enabled. Moved both functions out just for symmetry. 3599 */ 3600 if (change_snoopers) { 3601 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 3602 br_multicast_join_snoopers(br); 3603 else 3604 br_multicast_leave_snoopers(br); 3605 } 3606 3607 return err; 3608 } 3609 3610 bool br_multicast_enabled(const struct net_device *dev) 3611 { 3612 struct net_bridge *br = netdev_priv(dev); 3613 3614 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED); 3615 } 3616 EXPORT_SYMBOL_GPL(br_multicast_enabled); 3617 3618 bool br_multicast_router(const struct net_device *dev) 3619 { 3620 struct net_bridge *br = netdev_priv(dev); 3621 bool is_router; 3622 3623 spin_lock_bh(&br->multicast_lock); 3624 is_router = br_multicast_is_router(br); 3625 spin_unlock_bh(&br->multicast_lock); 3626 return is_router; 3627 } 3628 EXPORT_SYMBOL_GPL(br_multicast_router); 3629 3630 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 3631 { 3632 unsigned long max_delay; 3633 3634 val = !!val; 3635 3636 spin_lock_bh(&br->multicast_lock); 3637 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val) 3638 goto unlock; 3639 3640 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val); 3641 if (!val) 3642 goto unlock; 3643 3644 max_delay = br->multicast_query_response_interval; 3645 3646 if (!timer_pending(&br->ip4_other_query.timer)) 3647 br->ip4_other_query.delay_time = jiffies + max_delay; 3648 3649 br_multicast_start_querier(br, &br->ip4_own_query); 3650 3651 #if IS_ENABLED(CONFIG_IPV6) 3652 if (!timer_pending(&br->ip6_other_query.timer)) 3653 br->ip6_other_query.delay_time = jiffies + max_delay; 3654 3655 br_multicast_start_querier(br, &br->ip6_own_query); 3656 #endif 3657 3658 unlock: 3659 spin_unlock_bh(&br->multicast_lock); 3660 3661 return 0; 3662 } 3663 3664 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val) 3665 { 3666 /* Currently we support only version 2 and 3 */ 3667 switch (val) { 3668 case 2: 3669 case 3: 3670 break; 3671 default: 3672 return -EINVAL; 3673 } 3674 3675 spin_lock_bh(&br->multicast_lock); 3676 br->multicast_igmp_version = val; 3677 spin_unlock_bh(&br->multicast_lock); 3678 3679 return 0; 3680 } 3681 3682 #if IS_ENABLED(CONFIG_IPV6) 3683 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) 3684 { 3685 /* Currently we support version 1 and 2 */ 3686 switch (val) { 3687 case 1: 3688 case 2: 3689 break; 3690 default: 3691 return -EINVAL; 3692 } 3693 3694 spin_lock_bh(&br->multicast_lock); 3695 br->multicast_mld_version = val; 3696 spin_unlock_bh(&br->multicast_lock); 3697 3698 return 0; 3699 } 3700 #endif 3701 3702 /** 3703 * br_multicast_list_adjacent - Returns snooped multicast addresses 3704 * @dev: The bridge port adjacent to which to retrieve addresses 3705 * @br_ip_list: The list to store found, snooped multicast IP addresses in 3706 * 3707 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 3708 * snooping feature on all bridge ports of dev's bridge device, excluding 3709 * the addresses from dev itself. 3710 * 3711 * Returns the number of items added to br_ip_list. 3712 * 3713 * Notes: 3714 * - br_ip_list needs to be initialized by caller 3715 * - br_ip_list might contain duplicates in the end 3716 * (needs to be taken care of by caller) 3717 * - br_ip_list needs to be freed by caller 3718 */ 3719 int br_multicast_list_adjacent(struct net_device *dev, 3720 struct list_head *br_ip_list) 3721 { 3722 struct net_bridge *br; 3723 struct net_bridge_port *port; 3724 struct net_bridge_port_group *group; 3725 struct br_ip_list *entry; 3726 int count = 0; 3727 3728 rcu_read_lock(); 3729 if (!br_ip_list || !netif_is_bridge_port(dev)) 3730 goto unlock; 3731 3732 port = br_port_get_rcu(dev); 3733 if (!port || !port->br) 3734 goto unlock; 3735 3736 br = port->br; 3737 3738 list_for_each_entry_rcu(port, &br->port_list, list) { 3739 if (!port->dev || port->dev == dev) 3740 continue; 3741 3742 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 3743 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 3744 if (!entry) 3745 goto unlock; 3746 3747 entry->addr = group->key.addr; 3748 list_add(&entry->list, br_ip_list); 3749 count++; 3750 } 3751 } 3752 3753 unlock: 3754 rcu_read_unlock(); 3755 return count; 3756 } 3757 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 3758 3759 /** 3760 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 3761 * @dev: The bridge port providing the bridge on which to check for a querier 3762 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3763 * 3764 * Checks whether the given interface has a bridge on top and if so returns 3765 * true if a valid querier exists anywhere on the bridged link layer. 3766 * Otherwise returns false. 3767 */ 3768 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 3769 { 3770 struct net_bridge *br; 3771 struct net_bridge_port *port; 3772 struct ethhdr eth; 3773 bool ret = false; 3774 3775 rcu_read_lock(); 3776 if (!netif_is_bridge_port(dev)) 3777 goto unlock; 3778 3779 port = br_port_get_rcu(dev); 3780 if (!port || !port->br) 3781 goto unlock; 3782 3783 br = port->br; 3784 3785 memset(ð, 0, sizeof(eth)); 3786 eth.h_proto = htons(proto); 3787 3788 ret = br_multicast_querier_exists(br, ð, NULL); 3789 3790 unlock: 3791 rcu_read_unlock(); 3792 return ret; 3793 } 3794 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 3795 3796 /** 3797 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 3798 * @dev: The bridge port adjacent to which to check for a querier 3799 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 3800 * 3801 * Checks whether the given interface has a bridge on top and if so returns 3802 * true if a selected querier is behind one of the other ports of this 3803 * bridge. Otherwise returns false. 3804 */ 3805 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 3806 { 3807 struct net_bridge *br; 3808 struct net_bridge_port *port; 3809 bool ret = false; 3810 3811 rcu_read_lock(); 3812 if (!netif_is_bridge_port(dev)) 3813 goto unlock; 3814 3815 port = br_port_get_rcu(dev); 3816 if (!port || !port->br) 3817 goto unlock; 3818 3819 br = port->br; 3820 3821 switch (proto) { 3822 case ETH_P_IP: 3823 if (!timer_pending(&br->ip4_other_query.timer) || 3824 rcu_dereference(br->ip4_querier.port) == port) 3825 goto unlock; 3826 break; 3827 #if IS_ENABLED(CONFIG_IPV6) 3828 case ETH_P_IPV6: 3829 if (!timer_pending(&br->ip6_other_query.timer) || 3830 rcu_dereference(br->ip6_querier.port) == port) 3831 goto unlock; 3832 break; 3833 #endif 3834 default: 3835 goto unlock; 3836 } 3837 3838 ret = true; 3839 unlock: 3840 rcu_read_unlock(); 3841 return ret; 3842 } 3843 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 3844 3845 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats, 3846 const struct sk_buff *skb, u8 type, u8 dir) 3847 { 3848 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats); 3849 __be16 proto = skb->protocol; 3850 unsigned int t_len; 3851 3852 u64_stats_update_begin(&pstats->syncp); 3853 switch (proto) { 3854 case htons(ETH_P_IP): 3855 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 3856 switch (type) { 3857 case IGMP_HOST_MEMBERSHIP_REPORT: 3858 pstats->mstats.igmp_v1reports[dir]++; 3859 break; 3860 case IGMPV2_HOST_MEMBERSHIP_REPORT: 3861 pstats->mstats.igmp_v2reports[dir]++; 3862 break; 3863 case IGMPV3_HOST_MEMBERSHIP_REPORT: 3864 pstats->mstats.igmp_v3reports[dir]++; 3865 break; 3866 case IGMP_HOST_MEMBERSHIP_QUERY: 3867 if (t_len != sizeof(struct igmphdr)) { 3868 pstats->mstats.igmp_v3queries[dir]++; 3869 } else { 3870 unsigned int offset = skb_transport_offset(skb); 3871 struct igmphdr *ih, _ihdr; 3872 3873 ih = skb_header_pointer(skb, offset, 3874 sizeof(_ihdr), &_ihdr); 3875 if (!ih) 3876 break; 3877 if (!ih->code) 3878 pstats->mstats.igmp_v1queries[dir]++; 3879 else 3880 pstats->mstats.igmp_v2queries[dir]++; 3881 } 3882 break; 3883 case IGMP_HOST_LEAVE_MESSAGE: 3884 pstats->mstats.igmp_leaves[dir]++; 3885 break; 3886 } 3887 break; 3888 #if IS_ENABLED(CONFIG_IPV6) 3889 case htons(ETH_P_IPV6): 3890 t_len = ntohs(ipv6_hdr(skb)->payload_len) + 3891 sizeof(struct ipv6hdr); 3892 t_len -= skb_network_header_len(skb); 3893 switch (type) { 3894 case ICMPV6_MGM_REPORT: 3895 pstats->mstats.mld_v1reports[dir]++; 3896 break; 3897 case ICMPV6_MLD2_REPORT: 3898 pstats->mstats.mld_v2reports[dir]++; 3899 break; 3900 case ICMPV6_MGM_QUERY: 3901 if (t_len != sizeof(struct mld_msg)) 3902 pstats->mstats.mld_v2queries[dir]++; 3903 else 3904 pstats->mstats.mld_v1queries[dir]++; 3905 break; 3906 case ICMPV6_MGM_REDUCTION: 3907 pstats->mstats.mld_leaves[dir]++; 3908 break; 3909 } 3910 break; 3911 #endif /* CONFIG_IPV6 */ 3912 } 3913 u64_stats_update_end(&pstats->syncp); 3914 } 3915 3916 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 3917 const struct sk_buff *skb, u8 type, u8 dir) 3918 { 3919 struct bridge_mcast_stats __percpu *stats; 3920 3921 /* if multicast_disabled is true then igmp type can't be set */ 3922 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) 3923 return; 3924 3925 if (p) 3926 stats = p->mcast_stats; 3927 else 3928 stats = br->mcast_stats; 3929 if (WARN_ON(!stats)) 3930 return; 3931 3932 br_mcast_stats_add(stats, skb, type, dir); 3933 } 3934 3935 int br_multicast_init_stats(struct net_bridge *br) 3936 { 3937 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats); 3938 if (!br->mcast_stats) 3939 return -ENOMEM; 3940 3941 return 0; 3942 } 3943 3944 void br_multicast_uninit_stats(struct net_bridge *br) 3945 { 3946 free_percpu(br->mcast_stats); 3947 } 3948 3949 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 3950 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 3951 { 3952 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 3953 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX]; 3954 } 3955 3956 void br_multicast_get_stats(const struct net_bridge *br, 3957 const struct net_bridge_port *p, 3958 struct br_mcast_stats *dest) 3959 { 3960 struct bridge_mcast_stats __percpu *stats; 3961 struct br_mcast_stats tdst; 3962 int i; 3963 3964 memset(dest, 0, sizeof(*dest)); 3965 if (p) 3966 stats = p->mcast_stats; 3967 else 3968 stats = br->mcast_stats; 3969 if (WARN_ON(!stats)) 3970 return; 3971 3972 memset(&tdst, 0, sizeof(tdst)); 3973 for_each_possible_cpu(i) { 3974 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i); 3975 struct br_mcast_stats temp; 3976 unsigned int start; 3977 3978 do { 3979 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 3980 memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); 3981 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 3982 3983 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); 3984 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); 3985 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries); 3986 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves); 3987 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports); 3988 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports); 3989 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports); 3990 tdst.igmp_parse_errors += temp.igmp_parse_errors; 3991 3992 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries); 3993 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries); 3994 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves); 3995 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports); 3996 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports); 3997 tdst.mld_parse_errors += temp.mld_parse_errors; 3998 } 3999 memcpy(dest, &tdst, sizeof(*dest)); 4000 } 4001 4002 int br_mdb_hash_init(struct net_bridge *br) 4003 { 4004 int err; 4005 4006 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params); 4007 if (err) 4008 return err; 4009 4010 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params); 4011 if (err) { 4012 rhashtable_destroy(&br->sg_port_tbl); 4013 return err; 4014 } 4015 4016 return 0; 4017 } 4018 4019 void br_mdb_hash_fini(struct net_bridge *br) 4020 { 4021 rhashtable_destroy(&br->sg_port_tbl); 4022 rhashtable_destroy(&br->mdb_hash_tbl); 4023 } 4024