1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux NET3: Internet Group Management Protocol [IGMP] 4 * 5 * This code implements the IGMP protocol as defined in RFC1112. There has 6 * been a further revision of this protocol since which is now supported. 7 * 8 * If you have trouble with this module be careful what gcc you have used, 9 * the older version didn't come out right using gcc 2.5.8, the newer one 10 * seems to fall out with gcc 2.6.2. 11 * 12 * Authors: 13 * Alan Cox <alan@lxorguk.ukuu.org.uk> 14 * 15 * Fixes: 16 * 17 * Alan Cox : Added lots of __inline__ to optimise 18 * the memory usage of all the tiny little 19 * functions. 20 * Alan Cox : Dumped the header building experiment. 21 * Alan Cox : Minor tweaks ready for multicast routing 22 * and extended IGMP protocol. 23 * Alan Cox : Removed a load of inline directives. Gcc 2.5.8 24 * writes utterly bogus code otherwise (sigh) 25 * fixed IGMP loopback to behave in the manner 26 * desired by mrouted, fixed the fact it has been 27 * broken since 1.3.6 and cleaned up a few minor 28 * points. 29 * 30 * Chih-Jen Chang : Tried to revise IGMP to Version 2 31 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu 32 * The enhancements are mainly based on Steve Deering's 33 * ipmulti-3.5 source code. 34 * Chih-Jen Chang : Added the igmp_get_mrouter_info and 35 * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of 36 * the mrouted version on that device. 37 * Chih-Jen Chang : Added the max_resp_time parameter to 38 * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter 39 * to identify the multicast router version 40 * and do what the IGMP version 2 specified. 41 * Chih-Jen Chang : Added a timer to revert to IGMP V2 router 42 * Tsu-Sheng Tsao if the specified time expired. 43 * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. 44 * Alan Cox : Use GFP_ATOMIC in the right places. 45 * Christian Daudt : igmp timer wasn't set for local group 46 * memberships but was being deleted, 47 * which caused a "del_timer() called 48 * from %p with timer not initialized\n" 49 * message (960131). 50 * Christian Daudt : removed del_timer from 51 * igmp_timer_expire function (960205). 52 * Christian Daudt : igmp_heard_report now only calls 53 * igmp_timer_expire if tm->running is 54 * true (960216). 55 * Malcolm Beattie : ttl comparison wrong in igmp_rcv made 56 * igmp_heard_query never trigger. Expiry 57 * miscalculation fixed in igmp_heard_query 58 * and random() made to return unsigned to 59 * prevent negative expiry times. 60 * Alexey Kuznetsov: Wrong group leaving behaviour, backport 61 * fix from pending 2.1.x patches. 62 * Alan Cox: Forget to enable FDDI support earlier. 63 * Alexey Kuznetsov: Fixed leaving groups on device down. 64 * Alexey Kuznetsov: Accordance to igmp-v2-06 draft. 65 * David L Stevens: IGMPv3 support, with help from 66 * Vinay Kulkarni 67 */ 68 69 #include <linux/module.h> 70 #include <linux/slab.h> 71 #include <linux/uaccess.h> 72 #include <linux/types.h> 73 #include <linux/kernel.h> 74 #include <linux/jiffies.h> 75 #include <linux/string.h> 76 #include <linux/socket.h> 77 #include <linux/sockios.h> 78 #include <linux/in.h> 79 #include <linux/inet.h> 80 #include <linux/netdevice.h> 81 #include <linux/skbuff.h> 82 #include <linux/inetdevice.h> 83 #include <linux/igmp.h> 84 #include <linux/if_arp.h> 85 #include <linux/rtnetlink.h> 86 #include <linux/times.h> 87 #include <linux/pkt_sched.h> 88 #include <linux/byteorder/generic.h> 89 90 #include <net/net_namespace.h> 91 #include <net/arp.h> 92 #include <net/ip.h> 93 #include <net/protocol.h> 94 #include <net/route.h> 95 #include <net/sock.h> 96 #include <net/checksum.h> 97 #include <net/inet_common.h> 98 #include <linux/netfilter_ipv4.h> 99 #ifdef CONFIG_IP_MROUTE 100 #include <linux/mroute.h> 101 #endif 102 #ifdef CONFIG_PROC_FS 103 #include <linux/proc_fs.h> 104 #include <linux/seq_file.h> 105 #endif 106 107 #ifdef CONFIG_IP_MULTICAST 108 /* Parameter names and values are taken from igmp-v2-06 draft */ 109 110 #define IGMP_QUERY_INTERVAL (125*HZ) 111 #define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ) 112 113 #define IGMP_INITIAL_REPORT_DELAY (1) 114 115 /* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs! 116 * IGMP specs require to report membership immediately after 117 * joining a group, but we delay the first report by a 118 * small interval. It seems more natural and still does not 119 * contradict to specs provided this delay is small enough. 120 */ 121 122 #define IGMP_V1_SEEN(in_dev) \ 123 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \ 124 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \ 125 ((in_dev)->mr_v1_seen && \ 126 time_before(jiffies, (in_dev)->mr_v1_seen))) 127 #define IGMP_V2_SEEN(in_dev) \ 128 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \ 129 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \ 130 ((in_dev)->mr_v2_seen && \ 131 time_before(jiffies, (in_dev)->mr_v2_seen))) 132 133 static int unsolicited_report_interval(struct in_device *in_dev) 134 { 135 int interval_ms, interval_jiffies; 136 137 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) 138 interval_ms = IN_DEV_CONF_GET( 139 in_dev, 140 IGMPV2_UNSOLICITED_REPORT_INTERVAL); 141 else /* v3 */ 142 interval_ms = IN_DEV_CONF_GET( 143 in_dev, 144 IGMPV3_UNSOLICITED_REPORT_INTERVAL); 145 146 interval_jiffies = msecs_to_jiffies(interval_ms); 147 148 /* _timer functions can't handle a delay of 0 jiffies so ensure 149 * we always return a positive value. 150 */ 151 if (interval_jiffies <= 0) 152 interval_jiffies = 1; 153 return interval_jiffies; 154 } 155 156 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, 157 gfp_t gfp); 158 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im); 159 static void igmpv3_clear_delrec(struct in_device *in_dev); 160 static int sf_setstate(struct ip_mc_list *pmc); 161 static void sf_markstate(struct ip_mc_list *pmc); 162 #endif 163 static void ip_mc_clear_src(struct ip_mc_list *pmc); 164 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 165 int sfcount, __be32 *psfsrc, int delta); 166 167 static void ip_ma_put(struct ip_mc_list *im) 168 { 169 if (refcount_dec_and_test(&im->refcnt)) { 170 in_dev_put(im->interface); 171 kfree_rcu(im, rcu); 172 } 173 } 174 175 #define for_each_pmc_rcu(in_dev, pmc) \ 176 for (pmc = rcu_dereference(in_dev->mc_list); \ 177 pmc != NULL; \ 178 pmc = rcu_dereference(pmc->next_rcu)) 179 180 #define for_each_pmc_rtnl(in_dev, pmc) \ 181 for (pmc = rtnl_dereference(in_dev->mc_list); \ 182 pmc != NULL; \ 183 pmc = rtnl_dereference(pmc->next_rcu)) 184 185 static void ip_sf_list_clear_all(struct ip_sf_list *psf) 186 { 187 struct ip_sf_list *next; 188 189 while (psf) { 190 next = psf->sf_next; 191 kfree(psf); 192 psf = next; 193 } 194 } 195 196 #ifdef CONFIG_IP_MULTICAST 197 198 /* 199 * Timer management 200 */ 201 202 static void igmp_stop_timer(struct ip_mc_list *im) 203 { 204 spin_lock_bh(&im->lock); 205 if (del_timer(&im->timer)) 206 refcount_dec(&im->refcnt); 207 im->tm_running = 0; 208 im->reporter = 0; 209 im->unsolicit_count = 0; 210 spin_unlock_bh(&im->lock); 211 } 212 213 /* It must be called with locked im->lock */ 214 static void igmp_start_timer(struct ip_mc_list *im, int max_delay) 215 { 216 int tv = get_random_u32_below(max_delay); 217 218 im->tm_running = 1; 219 if (!mod_timer(&im->timer, jiffies+tv+2)) 220 refcount_inc(&im->refcnt); 221 } 222 223 static void igmp_gq_start_timer(struct in_device *in_dev) 224 { 225 int tv = get_random_u32_below(in_dev->mr_maxdelay); 226 unsigned long exp = jiffies + tv + 2; 227 228 if (in_dev->mr_gq_running && 229 time_after_eq(exp, (in_dev->mr_gq_timer).expires)) 230 return; 231 232 in_dev->mr_gq_running = 1; 233 if (!mod_timer(&in_dev->mr_gq_timer, exp)) 234 in_dev_hold(in_dev); 235 } 236 237 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay) 238 { 239 int tv = get_random_u32_below(delay); 240 241 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2)) 242 in_dev_hold(in_dev); 243 } 244 245 static void igmp_mod_timer(struct ip_mc_list *im, int max_delay) 246 { 247 spin_lock_bh(&im->lock); 248 im->unsolicit_count = 0; 249 if (del_timer(&im->timer)) { 250 if ((long)(im->timer.expires-jiffies) < max_delay) { 251 add_timer(&im->timer); 252 im->tm_running = 1; 253 spin_unlock_bh(&im->lock); 254 return; 255 } 256 refcount_dec(&im->refcnt); 257 } 258 igmp_start_timer(im, max_delay); 259 spin_unlock_bh(&im->lock); 260 } 261 262 263 /* 264 * Send an IGMP report. 265 */ 266 267 #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4) 268 269 270 static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type, 271 int gdeleted, int sdeleted) 272 { 273 switch (type) { 274 case IGMPV3_MODE_IS_INCLUDE: 275 case IGMPV3_MODE_IS_EXCLUDE: 276 if (gdeleted || sdeleted) 277 return 0; 278 if (!(pmc->gsquery && !psf->sf_gsresp)) { 279 if (pmc->sfmode == MCAST_INCLUDE) 280 return 1; 281 /* don't include if this source is excluded 282 * in all filters 283 */ 284 if (psf->sf_count[MCAST_INCLUDE]) 285 return type == IGMPV3_MODE_IS_INCLUDE; 286 return pmc->sfcount[MCAST_EXCLUDE] == 287 psf->sf_count[MCAST_EXCLUDE]; 288 } 289 return 0; 290 case IGMPV3_CHANGE_TO_INCLUDE: 291 if (gdeleted || sdeleted) 292 return 0; 293 return psf->sf_count[MCAST_INCLUDE] != 0; 294 case IGMPV3_CHANGE_TO_EXCLUDE: 295 if (gdeleted || sdeleted) 296 return 0; 297 if (pmc->sfcount[MCAST_EXCLUDE] == 0 || 298 psf->sf_count[MCAST_INCLUDE]) 299 return 0; 300 return pmc->sfcount[MCAST_EXCLUDE] == 301 psf->sf_count[MCAST_EXCLUDE]; 302 case IGMPV3_ALLOW_NEW_SOURCES: 303 if (gdeleted || !psf->sf_crcount) 304 return 0; 305 return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted; 306 case IGMPV3_BLOCK_OLD_SOURCES: 307 if (pmc->sfmode == MCAST_INCLUDE) 308 return gdeleted || (psf->sf_crcount && sdeleted); 309 return psf->sf_crcount && !gdeleted && !sdeleted; 310 } 311 return 0; 312 } 313 314 static int 315 igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) 316 { 317 struct ip_sf_list *psf; 318 int scount = 0; 319 320 for (psf = pmc->sources; psf; psf = psf->sf_next) { 321 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) 322 continue; 323 scount++; 324 } 325 return scount; 326 } 327 328 /* source address selection per RFC 3376 section 4.2.13 */ 329 static __be32 igmpv3_get_srcaddr(struct net_device *dev, 330 const struct flowi4 *fl4) 331 { 332 struct in_device *in_dev = __in_dev_get_rcu(dev); 333 const struct in_ifaddr *ifa; 334 335 if (!in_dev) 336 return htonl(INADDR_ANY); 337 338 in_dev_for_each_ifa_rcu(ifa, in_dev) { 339 if (fl4->saddr == ifa->ifa_local) 340 return fl4->saddr; 341 } 342 343 return htonl(INADDR_ANY); 344 } 345 346 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) 347 { 348 struct sk_buff *skb; 349 struct rtable *rt; 350 struct iphdr *pip; 351 struct igmpv3_report *pig; 352 struct net *net = dev_net(dev); 353 struct flowi4 fl4; 354 int hlen = LL_RESERVED_SPACE(dev); 355 int tlen = dev->needed_tailroom; 356 unsigned int size = mtu; 357 358 while (1) { 359 skb = alloc_skb(size + hlen + tlen, 360 GFP_ATOMIC | __GFP_NOWARN); 361 if (skb) 362 break; 363 size >>= 1; 364 if (size < 256) 365 return NULL; 366 } 367 skb->priority = TC_PRIO_CONTROL; 368 369 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 370 0, 0, 371 IPPROTO_IGMP, 0, dev->ifindex); 372 if (IS_ERR(rt)) { 373 kfree_skb(skb); 374 return NULL; 375 } 376 377 skb_dst_set(skb, &rt->dst); 378 skb->dev = dev; 379 380 skb_reserve(skb, hlen); 381 skb_tailroom_reserve(skb, mtu, tlen); 382 383 skb_reset_network_header(skb); 384 pip = ip_hdr(skb); 385 skb_put(skb, sizeof(struct iphdr) + 4); 386 387 pip->version = 4; 388 pip->ihl = (sizeof(struct iphdr)+4)>>2; 389 pip->tos = 0xc0; 390 pip->frag_off = htons(IP_DF); 391 pip->ttl = 1; 392 pip->daddr = fl4.daddr; 393 394 rcu_read_lock(); 395 pip->saddr = igmpv3_get_srcaddr(dev, &fl4); 396 rcu_read_unlock(); 397 398 pip->protocol = IPPROTO_IGMP; 399 pip->tot_len = 0; /* filled in later */ 400 ip_select_ident(net, skb, NULL); 401 ((u8 *)&pip[1])[0] = IPOPT_RA; 402 ((u8 *)&pip[1])[1] = 4; 403 ((u8 *)&pip[1])[2] = 0; 404 ((u8 *)&pip[1])[3] = 0; 405 406 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; 407 skb_put(skb, sizeof(*pig)); 408 pig = igmpv3_report_hdr(skb); 409 pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT; 410 pig->resv1 = 0; 411 pig->csum = 0; 412 pig->resv2 = 0; 413 pig->ngrec = 0; 414 return skb; 415 } 416 417 static int igmpv3_sendpack(struct sk_buff *skb) 418 { 419 struct igmphdr *pig = igmp_hdr(skb); 420 const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb); 421 422 pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen); 423 424 return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); 425 } 426 427 static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) 428 { 429 return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel); 430 } 431 432 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, 433 int type, struct igmpv3_grec **ppgr, unsigned int mtu) 434 { 435 struct net_device *dev = pmc->interface->dev; 436 struct igmpv3_report *pih; 437 struct igmpv3_grec *pgr; 438 439 if (!skb) { 440 skb = igmpv3_newpack(dev, mtu); 441 if (!skb) 442 return NULL; 443 } 444 pgr = skb_put(skb, sizeof(struct igmpv3_grec)); 445 pgr->grec_type = type; 446 pgr->grec_auxwords = 0; 447 pgr->grec_nsrcs = 0; 448 pgr->grec_mca = pmc->multiaddr; 449 pih = igmpv3_report_hdr(skb); 450 pih->ngrec = htons(ntohs(pih->ngrec)+1); 451 *ppgr = pgr; 452 return skb; 453 } 454 455 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) 456 457 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, 458 int type, int gdeleted, int sdeleted) 459 { 460 struct net_device *dev = pmc->interface->dev; 461 struct net *net = dev_net(dev); 462 struct igmpv3_report *pih; 463 struct igmpv3_grec *pgr = NULL; 464 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; 465 int scount, stotal, first, isquery, truncate; 466 unsigned int mtu; 467 468 if (pmc->multiaddr == IGMP_ALL_HOSTS) 469 return skb; 470 if (ipv4_is_local_multicast(pmc->multiaddr) && 471 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 472 return skb; 473 474 mtu = READ_ONCE(dev->mtu); 475 if (mtu < IPV4_MIN_MTU) 476 return skb; 477 478 isquery = type == IGMPV3_MODE_IS_INCLUDE || 479 type == IGMPV3_MODE_IS_EXCLUDE; 480 truncate = type == IGMPV3_MODE_IS_EXCLUDE || 481 type == IGMPV3_CHANGE_TO_EXCLUDE; 482 483 stotal = scount = 0; 484 485 psf_list = sdeleted ? &pmc->tomb : &pmc->sources; 486 487 if (!*psf_list) 488 goto empty_source; 489 490 pih = skb ? igmpv3_report_hdr(skb) : NULL; 491 492 /* EX and TO_EX get a fresh packet, if needed */ 493 if (truncate) { 494 if (pih && pih->ngrec && 495 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 496 if (skb) 497 igmpv3_sendpack(skb); 498 skb = igmpv3_newpack(dev, mtu); 499 } 500 } 501 first = 1; 502 psf_prev = NULL; 503 for (psf = *psf_list; psf; psf = psf_next) { 504 __be32 *psrc; 505 506 psf_next = psf->sf_next; 507 508 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { 509 psf_prev = psf; 510 continue; 511 } 512 513 /* Based on RFC3376 5.1. Should not send source-list change 514 * records when there is a filter mode change. 515 */ 516 if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) || 517 (!gdeleted && pmc->crcount)) && 518 (type == IGMPV3_ALLOW_NEW_SOURCES || 519 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) 520 goto decrease_sf_crcount; 521 522 /* clear marks on query responses */ 523 if (isquery) 524 psf->sf_gsresp = 0; 525 526 if (AVAILABLE(skb) < sizeof(__be32) + 527 first*sizeof(struct igmpv3_grec)) { 528 if (truncate && !first) 529 break; /* truncate these */ 530 if (pgr) 531 pgr->grec_nsrcs = htons(scount); 532 if (skb) 533 igmpv3_sendpack(skb); 534 skb = igmpv3_newpack(dev, mtu); 535 first = 1; 536 scount = 0; 537 } 538 if (first) { 539 skb = add_grhead(skb, pmc, type, &pgr, mtu); 540 first = 0; 541 } 542 if (!skb) 543 return NULL; 544 psrc = skb_put(skb, sizeof(__be32)); 545 *psrc = psf->sf_inaddr; 546 scount++; stotal++; 547 if ((type == IGMPV3_ALLOW_NEW_SOURCES || 548 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 549 decrease_sf_crcount: 550 psf->sf_crcount--; 551 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { 552 if (psf_prev) 553 psf_prev->sf_next = psf->sf_next; 554 else 555 *psf_list = psf->sf_next; 556 kfree(psf); 557 continue; 558 } 559 } 560 psf_prev = psf; 561 } 562 563 empty_source: 564 if (!stotal) { 565 if (type == IGMPV3_ALLOW_NEW_SOURCES || 566 type == IGMPV3_BLOCK_OLD_SOURCES) 567 return skb; 568 if (pmc->crcount || isquery) { 569 /* make sure we have room for group header */ 570 if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) { 571 igmpv3_sendpack(skb); 572 skb = NULL; /* add_grhead will get a new one */ 573 } 574 skb = add_grhead(skb, pmc, type, &pgr, mtu); 575 } 576 } 577 if (pgr) 578 pgr->grec_nsrcs = htons(scount); 579 580 if (isquery) 581 pmc->gsquery = 0; /* clear query state on report */ 582 return skb; 583 } 584 585 static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) 586 { 587 struct sk_buff *skb = NULL; 588 struct net *net = dev_net(in_dev->dev); 589 int type; 590 591 if (!pmc) { 592 rcu_read_lock(); 593 for_each_pmc_rcu(in_dev, pmc) { 594 if (pmc->multiaddr == IGMP_ALL_HOSTS) 595 continue; 596 if (ipv4_is_local_multicast(pmc->multiaddr) && 597 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 598 continue; 599 spin_lock_bh(&pmc->lock); 600 if (pmc->sfcount[MCAST_EXCLUDE]) 601 type = IGMPV3_MODE_IS_EXCLUDE; 602 else 603 type = IGMPV3_MODE_IS_INCLUDE; 604 skb = add_grec(skb, pmc, type, 0, 0); 605 spin_unlock_bh(&pmc->lock); 606 } 607 rcu_read_unlock(); 608 } else { 609 spin_lock_bh(&pmc->lock); 610 if (pmc->sfcount[MCAST_EXCLUDE]) 611 type = IGMPV3_MODE_IS_EXCLUDE; 612 else 613 type = IGMPV3_MODE_IS_INCLUDE; 614 skb = add_grec(skb, pmc, type, 0, 0); 615 spin_unlock_bh(&pmc->lock); 616 } 617 if (!skb) 618 return 0; 619 return igmpv3_sendpack(skb); 620 } 621 622 /* 623 * remove zero-count source records from a source filter list 624 */ 625 static void igmpv3_clear_zeros(struct ip_sf_list **ppsf) 626 { 627 struct ip_sf_list *psf_prev, *psf_next, *psf; 628 629 psf_prev = NULL; 630 for (psf = *ppsf; psf; psf = psf_next) { 631 psf_next = psf->sf_next; 632 if (psf->sf_crcount == 0) { 633 if (psf_prev) 634 psf_prev->sf_next = psf->sf_next; 635 else 636 *ppsf = psf->sf_next; 637 kfree(psf); 638 } else 639 psf_prev = psf; 640 } 641 } 642 643 static void kfree_pmc(struct ip_mc_list *pmc) 644 { 645 ip_sf_list_clear_all(pmc->sources); 646 ip_sf_list_clear_all(pmc->tomb); 647 kfree(pmc); 648 } 649 650 static void igmpv3_send_cr(struct in_device *in_dev) 651 { 652 struct ip_mc_list *pmc, *pmc_prev, *pmc_next; 653 struct sk_buff *skb = NULL; 654 int type, dtype; 655 656 rcu_read_lock(); 657 spin_lock_bh(&in_dev->mc_tomb_lock); 658 659 /* deleted MCA's */ 660 pmc_prev = NULL; 661 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) { 662 pmc_next = pmc->next; 663 if (pmc->sfmode == MCAST_INCLUDE) { 664 type = IGMPV3_BLOCK_OLD_SOURCES; 665 dtype = IGMPV3_BLOCK_OLD_SOURCES; 666 skb = add_grec(skb, pmc, type, 1, 0); 667 skb = add_grec(skb, pmc, dtype, 1, 1); 668 } 669 if (pmc->crcount) { 670 if (pmc->sfmode == MCAST_EXCLUDE) { 671 type = IGMPV3_CHANGE_TO_INCLUDE; 672 skb = add_grec(skb, pmc, type, 1, 0); 673 } 674 pmc->crcount--; 675 if (pmc->crcount == 0) { 676 igmpv3_clear_zeros(&pmc->tomb); 677 igmpv3_clear_zeros(&pmc->sources); 678 } 679 } 680 if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) { 681 if (pmc_prev) 682 pmc_prev->next = pmc_next; 683 else 684 in_dev->mc_tomb = pmc_next; 685 in_dev_put(pmc->interface); 686 kfree_pmc(pmc); 687 } else 688 pmc_prev = pmc; 689 } 690 spin_unlock_bh(&in_dev->mc_tomb_lock); 691 692 /* change recs */ 693 for_each_pmc_rcu(in_dev, pmc) { 694 spin_lock_bh(&pmc->lock); 695 if (pmc->sfcount[MCAST_EXCLUDE]) { 696 type = IGMPV3_BLOCK_OLD_SOURCES; 697 dtype = IGMPV3_ALLOW_NEW_SOURCES; 698 } else { 699 type = IGMPV3_ALLOW_NEW_SOURCES; 700 dtype = IGMPV3_BLOCK_OLD_SOURCES; 701 } 702 skb = add_grec(skb, pmc, type, 0, 0); 703 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */ 704 705 /* filter mode changes */ 706 if (pmc->crcount) { 707 if (pmc->sfmode == MCAST_EXCLUDE) 708 type = IGMPV3_CHANGE_TO_EXCLUDE; 709 else 710 type = IGMPV3_CHANGE_TO_INCLUDE; 711 skb = add_grec(skb, pmc, type, 0, 0); 712 pmc->crcount--; 713 } 714 spin_unlock_bh(&pmc->lock); 715 } 716 rcu_read_unlock(); 717 718 if (!skb) 719 return; 720 (void) igmpv3_sendpack(skb); 721 } 722 723 static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, 724 int type) 725 { 726 struct sk_buff *skb; 727 struct iphdr *iph; 728 struct igmphdr *ih; 729 struct rtable *rt; 730 struct net_device *dev = in_dev->dev; 731 struct net *net = dev_net(dev); 732 __be32 group = pmc ? pmc->multiaddr : 0; 733 struct flowi4 fl4; 734 __be32 dst; 735 int hlen, tlen; 736 737 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) 738 return igmpv3_send_report(in_dev, pmc); 739 740 if (ipv4_is_local_multicast(group) && 741 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 742 return 0; 743 744 if (type == IGMP_HOST_LEAVE_MESSAGE) 745 dst = IGMP_ALL_ROUTER; 746 else 747 dst = group; 748 749 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0, 750 0, 0, 751 IPPROTO_IGMP, 0, dev->ifindex); 752 if (IS_ERR(rt)) 753 return -1; 754 755 hlen = LL_RESERVED_SPACE(dev); 756 tlen = dev->needed_tailroom; 757 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); 758 if (!skb) { 759 ip_rt_put(rt); 760 return -1; 761 } 762 skb->priority = TC_PRIO_CONTROL; 763 764 skb_dst_set(skb, &rt->dst); 765 766 skb_reserve(skb, hlen); 767 768 skb_reset_network_header(skb); 769 iph = ip_hdr(skb); 770 skb_put(skb, sizeof(struct iphdr) + 4); 771 772 iph->version = 4; 773 iph->ihl = (sizeof(struct iphdr)+4)>>2; 774 iph->tos = 0xc0; 775 iph->frag_off = htons(IP_DF); 776 iph->ttl = 1; 777 iph->daddr = dst; 778 iph->saddr = fl4.saddr; 779 iph->protocol = IPPROTO_IGMP; 780 ip_select_ident(net, skb, NULL); 781 ((u8 *)&iph[1])[0] = IPOPT_RA; 782 ((u8 *)&iph[1])[1] = 4; 783 ((u8 *)&iph[1])[2] = 0; 784 ((u8 *)&iph[1])[3] = 0; 785 786 ih = skb_put(skb, sizeof(struct igmphdr)); 787 ih->type = type; 788 ih->code = 0; 789 ih->csum = 0; 790 ih->group = group; 791 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 792 793 return ip_local_out(net, skb->sk, skb); 794 } 795 796 static void igmp_gq_timer_expire(struct timer_list *t) 797 { 798 struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer); 799 800 in_dev->mr_gq_running = 0; 801 igmpv3_send_report(in_dev, NULL); 802 in_dev_put(in_dev); 803 } 804 805 static void igmp_ifc_timer_expire(struct timer_list *t) 806 { 807 struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer); 808 u32 mr_ifc_count; 809 810 igmpv3_send_cr(in_dev); 811 restart: 812 mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count); 813 814 if (mr_ifc_count) { 815 if (cmpxchg(&in_dev->mr_ifc_count, 816 mr_ifc_count, 817 mr_ifc_count - 1) != mr_ifc_count) 818 goto restart; 819 igmp_ifc_start_timer(in_dev, 820 unsolicited_report_interval(in_dev)); 821 } 822 in_dev_put(in_dev); 823 } 824 825 static void igmp_ifc_event(struct in_device *in_dev) 826 { 827 struct net *net = dev_net(in_dev->dev); 828 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) 829 return; 830 WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv)); 831 igmp_ifc_start_timer(in_dev, 1); 832 } 833 834 835 static void igmp_timer_expire(struct timer_list *t) 836 { 837 struct ip_mc_list *im = from_timer(im, t, timer); 838 struct in_device *in_dev = im->interface; 839 840 spin_lock(&im->lock); 841 im->tm_running = 0; 842 843 if (im->unsolicit_count && --im->unsolicit_count) 844 igmp_start_timer(im, unsolicited_report_interval(in_dev)); 845 846 im->reporter = 1; 847 spin_unlock(&im->lock); 848 849 if (IGMP_V1_SEEN(in_dev)) 850 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT); 851 else if (IGMP_V2_SEEN(in_dev)) 852 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT); 853 else 854 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT); 855 856 ip_ma_put(im); 857 } 858 859 /* mark EXCLUDE-mode sources */ 860 static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) 861 { 862 struct ip_sf_list *psf; 863 int i, scount; 864 865 scount = 0; 866 for (psf = pmc->sources; psf; psf = psf->sf_next) { 867 if (scount == nsrcs) 868 break; 869 for (i = 0; i < nsrcs; i++) { 870 /* skip inactive filters */ 871 if (psf->sf_count[MCAST_INCLUDE] || 872 pmc->sfcount[MCAST_EXCLUDE] != 873 psf->sf_count[MCAST_EXCLUDE]) 874 break; 875 if (srcs[i] == psf->sf_inaddr) { 876 scount++; 877 break; 878 } 879 } 880 } 881 pmc->gsquery = 0; 882 if (scount == nsrcs) /* all sources excluded */ 883 return 0; 884 return 1; 885 } 886 887 static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) 888 { 889 struct ip_sf_list *psf; 890 int i, scount; 891 892 if (pmc->sfmode == MCAST_EXCLUDE) 893 return igmp_xmarksources(pmc, nsrcs, srcs); 894 895 /* mark INCLUDE-mode sources */ 896 scount = 0; 897 for (psf = pmc->sources; psf; psf = psf->sf_next) { 898 if (scount == nsrcs) 899 break; 900 for (i = 0; i < nsrcs; i++) 901 if (srcs[i] == psf->sf_inaddr) { 902 psf->sf_gsresp = 1; 903 scount++; 904 break; 905 } 906 } 907 if (!scount) { 908 pmc->gsquery = 0; 909 return 0; 910 } 911 pmc->gsquery = 1; 912 return 1; 913 } 914 915 /* return true if packet was dropped */ 916 static bool igmp_heard_report(struct in_device *in_dev, __be32 group) 917 { 918 struct ip_mc_list *im; 919 struct net *net = dev_net(in_dev->dev); 920 921 /* Timers are only set for non-local groups */ 922 923 if (group == IGMP_ALL_HOSTS) 924 return false; 925 if (ipv4_is_local_multicast(group) && 926 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 927 return false; 928 929 rcu_read_lock(); 930 for_each_pmc_rcu(in_dev, im) { 931 if (im->multiaddr == group) { 932 igmp_stop_timer(im); 933 break; 934 } 935 } 936 rcu_read_unlock(); 937 return false; 938 } 939 940 /* return true if packet was dropped */ 941 static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, 942 int len) 943 { 944 struct igmphdr *ih = igmp_hdr(skb); 945 struct igmpv3_query *ih3 = igmpv3_query_hdr(skb); 946 struct ip_mc_list *im; 947 __be32 group = ih->group; 948 int max_delay; 949 int mark = 0; 950 struct net *net = dev_net(in_dev->dev); 951 952 953 if (len == 8) { 954 if (ih->code == 0) { 955 /* Alas, old v1 router presents here. */ 956 957 max_delay = IGMP_QUERY_RESPONSE_INTERVAL; 958 in_dev->mr_v1_seen = jiffies + 959 (in_dev->mr_qrv * in_dev->mr_qi) + 960 in_dev->mr_qri; 961 group = 0; 962 } else { 963 /* v2 router present */ 964 max_delay = ih->code*(HZ/IGMP_TIMER_SCALE); 965 in_dev->mr_v2_seen = jiffies + 966 (in_dev->mr_qrv * in_dev->mr_qi) + 967 in_dev->mr_qri; 968 } 969 /* cancel the interface change timer */ 970 WRITE_ONCE(in_dev->mr_ifc_count, 0); 971 if (del_timer(&in_dev->mr_ifc_timer)) 972 __in_dev_put(in_dev); 973 /* clear deleted report items */ 974 igmpv3_clear_delrec(in_dev); 975 } else if (len < 12) { 976 return true; /* ignore bogus packet; freed by caller */ 977 } else if (IGMP_V1_SEEN(in_dev)) { 978 /* This is a v3 query with v1 queriers present */ 979 max_delay = IGMP_QUERY_RESPONSE_INTERVAL; 980 group = 0; 981 } else if (IGMP_V2_SEEN(in_dev)) { 982 /* this is a v3 query with v2 queriers present; 983 * Interpretation of the max_delay code is problematic here. 984 * A real v2 host would use ih_code directly, while v3 has a 985 * different encoding. We use the v3 encoding as more likely 986 * to be intended in a v3 query. 987 */ 988 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); 989 if (!max_delay) 990 max_delay = 1; /* can't mod w/ 0 */ 991 } else { /* v3 */ 992 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 993 return true; 994 995 ih3 = igmpv3_query_hdr(skb); 996 if (ih3->nsrcs) { 997 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) 998 + ntohs(ih3->nsrcs)*sizeof(__be32))) 999 return true; 1000 ih3 = igmpv3_query_hdr(skb); 1001 } 1002 1003 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); 1004 if (!max_delay) 1005 max_delay = 1; /* can't mod w/ 0 */ 1006 in_dev->mr_maxdelay = max_delay; 1007 1008 /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently 1009 * received value was zero, use the default or statically 1010 * configured value. 1011 */ 1012 in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1013 in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL; 1014 1015 /* RFC3376, 8.3. Query Response Interval: 1016 * The number of seconds represented by the [Query Response 1017 * Interval] must be less than the [Query Interval]. 1018 */ 1019 if (in_dev->mr_qri >= in_dev->mr_qi) 1020 in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ; 1021 1022 if (!group) { /* general query */ 1023 if (ih3->nsrcs) 1024 return true; /* no sources allowed */ 1025 igmp_gq_start_timer(in_dev); 1026 return false; 1027 } 1028 /* mark sources to include, if group & source-specific */ 1029 mark = ih3->nsrcs != 0; 1030 } 1031 1032 /* 1033 * - Start the timers in all of our membership records 1034 * that the query applies to for the interface on 1035 * which the query arrived excl. those that belong 1036 * to a "local" group (224.0.0.X) 1037 * - For timers already running check if they need to 1038 * be reset. 1039 * - Use the igmp->igmp_code field as the maximum 1040 * delay possible 1041 */ 1042 rcu_read_lock(); 1043 for_each_pmc_rcu(in_dev, im) { 1044 int changed; 1045 1046 if (group && group != im->multiaddr) 1047 continue; 1048 if (im->multiaddr == IGMP_ALL_HOSTS) 1049 continue; 1050 if (ipv4_is_local_multicast(im->multiaddr) && 1051 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1052 continue; 1053 spin_lock_bh(&im->lock); 1054 if (im->tm_running) 1055 im->gsquery = im->gsquery && mark; 1056 else 1057 im->gsquery = mark; 1058 changed = !im->gsquery || 1059 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); 1060 spin_unlock_bh(&im->lock); 1061 if (changed) 1062 igmp_mod_timer(im, max_delay); 1063 } 1064 rcu_read_unlock(); 1065 return false; 1066 } 1067 1068 /* called in rcu_read_lock() section */ 1069 int igmp_rcv(struct sk_buff *skb) 1070 { 1071 /* This basically follows the spec line by line -- see RFC1112 */ 1072 struct igmphdr *ih; 1073 struct net_device *dev = skb->dev; 1074 struct in_device *in_dev; 1075 int len = skb->len; 1076 bool dropped = true; 1077 1078 if (netif_is_l3_master(dev)) { 1079 dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif); 1080 if (!dev) 1081 goto drop; 1082 } 1083 1084 in_dev = __in_dev_get_rcu(dev); 1085 if (!in_dev) 1086 goto drop; 1087 1088 if (!pskb_may_pull(skb, sizeof(struct igmphdr))) 1089 goto drop; 1090 1091 if (skb_checksum_simple_validate(skb)) 1092 goto drop; 1093 1094 ih = igmp_hdr(skb); 1095 switch (ih->type) { 1096 case IGMP_HOST_MEMBERSHIP_QUERY: 1097 dropped = igmp_heard_query(in_dev, skb, len); 1098 break; 1099 case IGMP_HOST_MEMBERSHIP_REPORT: 1100 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1101 /* Is it our report looped back? */ 1102 if (rt_is_output_route(skb_rtable(skb))) 1103 break; 1104 /* don't rely on MC router hearing unicast reports */ 1105 if (skb->pkt_type == PACKET_MULTICAST || 1106 skb->pkt_type == PACKET_BROADCAST) 1107 dropped = igmp_heard_report(in_dev, ih->group); 1108 break; 1109 case IGMP_PIM: 1110 #ifdef CONFIG_IP_PIMSM_V1 1111 return pim_rcv_v1(skb); 1112 #endif 1113 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1114 case IGMP_DVMRP: 1115 case IGMP_TRACE: 1116 case IGMP_HOST_LEAVE_MESSAGE: 1117 case IGMP_MTRACE: 1118 case IGMP_MTRACE_RESP: 1119 break; 1120 default: 1121 break; 1122 } 1123 1124 drop: 1125 if (dropped) 1126 kfree_skb(skb); 1127 else 1128 consume_skb(skb); 1129 return 0; 1130 } 1131 1132 #endif 1133 1134 1135 /* 1136 * Add a filter to a device 1137 */ 1138 1139 static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) 1140 { 1141 char buf[MAX_ADDR_LEN]; 1142 struct net_device *dev = in_dev->dev; 1143 1144 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. 1145 We will get multicast token leakage, when IFF_MULTICAST 1146 is changed. This check should be done in ndo_set_rx_mode 1147 routine. Something sort of: 1148 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } 1149 --ANK 1150 */ 1151 if (arp_mc_map(addr, buf, dev, 0) == 0) 1152 dev_mc_add(dev, buf); 1153 } 1154 1155 /* 1156 * Remove a filter from a device 1157 */ 1158 1159 static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) 1160 { 1161 char buf[MAX_ADDR_LEN]; 1162 struct net_device *dev = in_dev->dev; 1163 1164 if (arp_mc_map(addr, buf, dev, 0) == 0) 1165 dev_mc_del(dev, buf); 1166 } 1167 1168 #ifdef CONFIG_IP_MULTICAST 1169 /* 1170 * deleted ip_mc_list manipulation 1171 */ 1172 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, 1173 gfp_t gfp) 1174 { 1175 struct ip_mc_list *pmc; 1176 struct net *net = dev_net(in_dev->dev); 1177 1178 /* this is an "ip_mc_list" for convenience; only the fields below 1179 * are actually used. In particular, the refcnt and users are not 1180 * used for management of the delete list. Using the same structure 1181 * for deleted items allows change reports to use common code with 1182 * non-deleted or query-response MCA's. 1183 */ 1184 pmc = kzalloc(sizeof(*pmc), gfp); 1185 if (!pmc) 1186 return; 1187 spin_lock_init(&pmc->lock); 1188 spin_lock_bh(&im->lock); 1189 pmc->interface = im->interface; 1190 in_dev_hold(in_dev); 1191 pmc->multiaddr = im->multiaddr; 1192 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1193 pmc->sfmode = im->sfmode; 1194 if (pmc->sfmode == MCAST_INCLUDE) { 1195 struct ip_sf_list *psf; 1196 1197 pmc->tomb = im->tomb; 1198 pmc->sources = im->sources; 1199 im->tomb = im->sources = NULL; 1200 for (psf = pmc->sources; psf; psf = psf->sf_next) 1201 psf->sf_crcount = pmc->crcount; 1202 } 1203 spin_unlock_bh(&im->lock); 1204 1205 spin_lock_bh(&in_dev->mc_tomb_lock); 1206 pmc->next = in_dev->mc_tomb; 1207 in_dev->mc_tomb = pmc; 1208 spin_unlock_bh(&in_dev->mc_tomb_lock); 1209 } 1210 1211 /* 1212 * restore ip_mc_list deleted records 1213 */ 1214 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) 1215 { 1216 struct ip_mc_list *pmc, *pmc_prev; 1217 struct ip_sf_list *psf; 1218 struct net *net = dev_net(in_dev->dev); 1219 __be32 multiaddr = im->multiaddr; 1220 1221 spin_lock_bh(&in_dev->mc_tomb_lock); 1222 pmc_prev = NULL; 1223 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) { 1224 if (pmc->multiaddr == multiaddr) 1225 break; 1226 pmc_prev = pmc; 1227 } 1228 if (pmc) { 1229 if (pmc_prev) 1230 pmc_prev->next = pmc->next; 1231 else 1232 in_dev->mc_tomb = pmc->next; 1233 } 1234 spin_unlock_bh(&in_dev->mc_tomb_lock); 1235 1236 spin_lock_bh(&im->lock); 1237 if (pmc) { 1238 im->interface = pmc->interface; 1239 if (im->sfmode == MCAST_INCLUDE) { 1240 swap(im->tomb, pmc->tomb); 1241 swap(im->sources, pmc->sources); 1242 for (psf = im->sources; psf; psf = psf->sf_next) 1243 psf->sf_crcount = in_dev->mr_qrv ?: 1244 READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1245 } else { 1246 im->crcount = in_dev->mr_qrv ?: 1247 READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1248 } 1249 in_dev_put(pmc->interface); 1250 kfree_pmc(pmc); 1251 } 1252 spin_unlock_bh(&im->lock); 1253 } 1254 1255 /* 1256 * flush ip_mc_list deleted records 1257 */ 1258 static void igmpv3_clear_delrec(struct in_device *in_dev) 1259 { 1260 struct ip_mc_list *pmc, *nextpmc; 1261 1262 spin_lock_bh(&in_dev->mc_tomb_lock); 1263 pmc = in_dev->mc_tomb; 1264 in_dev->mc_tomb = NULL; 1265 spin_unlock_bh(&in_dev->mc_tomb_lock); 1266 1267 for (; pmc; pmc = nextpmc) { 1268 nextpmc = pmc->next; 1269 ip_mc_clear_src(pmc); 1270 in_dev_put(pmc->interface); 1271 kfree_pmc(pmc); 1272 } 1273 /* clear dead sources, too */ 1274 rcu_read_lock(); 1275 for_each_pmc_rcu(in_dev, pmc) { 1276 struct ip_sf_list *psf; 1277 1278 spin_lock_bh(&pmc->lock); 1279 psf = pmc->tomb; 1280 pmc->tomb = NULL; 1281 spin_unlock_bh(&pmc->lock); 1282 ip_sf_list_clear_all(psf); 1283 } 1284 rcu_read_unlock(); 1285 } 1286 #endif 1287 1288 static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp) 1289 { 1290 struct in_device *in_dev = im->interface; 1291 #ifdef CONFIG_IP_MULTICAST 1292 struct net *net = dev_net(in_dev->dev); 1293 int reporter; 1294 #endif 1295 1296 if (im->loaded) { 1297 im->loaded = 0; 1298 ip_mc_filter_del(in_dev, im->multiaddr); 1299 } 1300 1301 #ifdef CONFIG_IP_MULTICAST 1302 if (im->multiaddr == IGMP_ALL_HOSTS) 1303 return; 1304 if (ipv4_is_local_multicast(im->multiaddr) && 1305 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1306 return; 1307 1308 reporter = im->reporter; 1309 igmp_stop_timer(im); 1310 1311 if (!in_dev->dead) { 1312 if (IGMP_V1_SEEN(in_dev)) 1313 return; 1314 if (IGMP_V2_SEEN(in_dev)) { 1315 if (reporter) 1316 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); 1317 return; 1318 } 1319 /* IGMPv3 */ 1320 igmpv3_add_delrec(in_dev, im, gfp); 1321 1322 igmp_ifc_event(in_dev); 1323 } 1324 #endif 1325 } 1326 1327 static void igmp_group_dropped(struct ip_mc_list *im) 1328 { 1329 __igmp_group_dropped(im, GFP_KERNEL); 1330 } 1331 1332 static void igmp_group_added(struct ip_mc_list *im) 1333 { 1334 struct in_device *in_dev = im->interface; 1335 #ifdef CONFIG_IP_MULTICAST 1336 struct net *net = dev_net(in_dev->dev); 1337 #endif 1338 1339 if (im->loaded == 0) { 1340 im->loaded = 1; 1341 ip_mc_filter_add(in_dev, im->multiaddr); 1342 } 1343 1344 #ifdef CONFIG_IP_MULTICAST 1345 if (im->multiaddr == IGMP_ALL_HOSTS) 1346 return; 1347 if (ipv4_is_local_multicast(im->multiaddr) && 1348 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1349 return; 1350 1351 if (in_dev->dead) 1352 return; 1353 1354 im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1355 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { 1356 spin_lock_bh(&im->lock); 1357 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); 1358 spin_unlock_bh(&im->lock); 1359 return; 1360 } 1361 /* else, v3 */ 1362 1363 /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should 1364 * not send filter-mode change record as the mode should be from 1365 * IN() to IN(A). 1366 */ 1367 if (im->sfmode == MCAST_EXCLUDE) 1368 im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1369 1370 igmp_ifc_event(in_dev); 1371 #endif 1372 } 1373 1374 1375 /* 1376 * Multicast list managers 1377 */ 1378 1379 static u32 ip_mc_hash(const struct ip_mc_list *im) 1380 { 1381 return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG); 1382 } 1383 1384 static void ip_mc_hash_add(struct in_device *in_dev, 1385 struct ip_mc_list *im) 1386 { 1387 struct ip_mc_list __rcu **mc_hash; 1388 u32 hash; 1389 1390 mc_hash = rtnl_dereference(in_dev->mc_hash); 1391 if (mc_hash) { 1392 hash = ip_mc_hash(im); 1393 im->next_hash = mc_hash[hash]; 1394 rcu_assign_pointer(mc_hash[hash], im); 1395 return; 1396 } 1397 1398 /* do not use a hash table for small number of items */ 1399 if (in_dev->mc_count < 4) 1400 return; 1401 1402 mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG, 1403 GFP_KERNEL); 1404 if (!mc_hash) 1405 return; 1406 1407 for_each_pmc_rtnl(in_dev, im) { 1408 hash = ip_mc_hash(im); 1409 im->next_hash = mc_hash[hash]; 1410 RCU_INIT_POINTER(mc_hash[hash], im); 1411 } 1412 1413 rcu_assign_pointer(in_dev->mc_hash, mc_hash); 1414 } 1415 1416 static void ip_mc_hash_remove(struct in_device *in_dev, 1417 struct ip_mc_list *im) 1418 { 1419 struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash); 1420 struct ip_mc_list *aux; 1421 1422 if (!mc_hash) 1423 return; 1424 mc_hash += ip_mc_hash(im); 1425 while ((aux = rtnl_dereference(*mc_hash)) != im) 1426 mc_hash = &aux->next_hash; 1427 *mc_hash = im->next_hash; 1428 } 1429 1430 1431 /* 1432 * A socket has joined a multicast group on device dev. 1433 */ 1434 static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr, 1435 unsigned int mode, gfp_t gfp) 1436 { 1437 struct ip_mc_list *im; 1438 1439 ASSERT_RTNL(); 1440 1441 for_each_pmc_rtnl(in_dev, im) { 1442 if (im->multiaddr == addr) { 1443 im->users++; 1444 ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0); 1445 goto out; 1446 } 1447 } 1448 1449 im = kzalloc(sizeof(*im), gfp); 1450 if (!im) 1451 goto out; 1452 1453 im->users = 1; 1454 im->interface = in_dev; 1455 in_dev_hold(in_dev); 1456 im->multiaddr = addr; 1457 /* initial mode is (EX, empty) */ 1458 im->sfmode = mode; 1459 im->sfcount[mode] = 1; 1460 refcount_set(&im->refcnt, 1); 1461 spin_lock_init(&im->lock); 1462 #ifdef CONFIG_IP_MULTICAST 1463 timer_setup(&im->timer, igmp_timer_expire, 0); 1464 #endif 1465 1466 im->next_rcu = in_dev->mc_list; 1467 in_dev->mc_count++; 1468 rcu_assign_pointer(in_dev->mc_list, im); 1469 1470 ip_mc_hash_add(in_dev, im); 1471 1472 #ifdef CONFIG_IP_MULTICAST 1473 igmpv3_del_delrec(in_dev, im); 1474 #endif 1475 igmp_group_added(im); 1476 if (!in_dev->dead) 1477 ip_rt_multicast_event(in_dev); 1478 out: 1479 return; 1480 } 1481 1482 void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp) 1483 { 1484 ____ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE, gfp); 1485 } 1486 EXPORT_SYMBOL(__ip_mc_inc_group); 1487 1488 void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1489 { 1490 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL); 1491 } 1492 EXPORT_SYMBOL(ip_mc_inc_group); 1493 1494 static int ip_mc_check_iphdr(struct sk_buff *skb) 1495 { 1496 const struct iphdr *iph; 1497 unsigned int len; 1498 unsigned int offset = skb_network_offset(skb) + sizeof(*iph); 1499 1500 if (!pskb_may_pull(skb, offset)) 1501 return -EINVAL; 1502 1503 iph = ip_hdr(skb); 1504 1505 if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph)) 1506 return -EINVAL; 1507 1508 offset += ip_hdrlen(skb) - sizeof(*iph); 1509 1510 if (!pskb_may_pull(skb, offset)) 1511 return -EINVAL; 1512 1513 iph = ip_hdr(skb); 1514 1515 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1516 return -EINVAL; 1517 1518 len = skb_network_offset(skb) + ntohs(iph->tot_len); 1519 if (skb->len < len || len < offset) 1520 return -EINVAL; 1521 1522 skb_set_transport_header(skb, offset); 1523 1524 return 0; 1525 } 1526 1527 static int ip_mc_check_igmp_reportv3(struct sk_buff *skb) 1528 { 1529 unsigned int len = skb_transport_offset(skb); 1530 1531 len += sizeof(struct igmpv3_report); 1532 1533 return ip_mc_may_pull(skb, len) ? 0 : -EINVAL; 1534 } 1535 1536 static int ip_mc_check_igmp_query(struct sk_buff *skb) 1537 { 1538 unsigned int transport_len = ip_transport_len(skb); 1539 unsigned int len; 1540 1541 /* IGMPv{1,2}? */ 1542 if (transport_len != sizeof(struct igmphdr)) { 1543 /* or IGMPv3? */ 1544 if (transport_len < sizeof(struct igmpv3_query)) 1545 return -EINVAL; 1546 1547 len = skb_transport_offset(skb) + sizeof(struct igmpv3_query); 1548 if (!ip_mc_may_pull(skb, len)) 1549 return -EINVAL; 1550 } 1551 1552 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer 1553 * all-systems destination addresses (224.0.0.1) for general queries 1554 */ 1555 if (!igmp_hdr(skb)->group && 1556 ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP)) 1557 return -EINVAL; 1558 1559 return 0; 1560 } 1561 1562 static int ip_mc_check_igmp_msg(struct sk_buff *skb) 1563 { 1564 switch (igmp_hdr(skb)->type) { 1565 case IGMP_HOST_LEAVE_MESSAGE: 1566 case IGMP_HOST_MEMBERSHIP_REPORT: 1567 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1568 return 0; 1569 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1570 return ip_mc_check_igmp_reportv3(skb); 1571 case IGMP_HOST_MEMBERSHIP_QUERY: 1572 return ip_mc_check_igmp_query(skb); 1573 default: 1574 return -ENOMSG; 1575 } 1576 } 1577 1578 static __sum16 ip_mc_validate_checksum(struct sk_buff *skb) 1579 { 1580 return skb_checksum_simple_validate(skb); 1581 } 1582 1583 static int ip_mc_check_igmp_csum(struct sk_buff *skb) 1584 { 1585 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1586 unsigned int transport_len = ip_transport_len(skb); 1587 struct sk_buff *skb_chk; 1588 1589 if (!ip_mc_may_pull(skb, len)) 1590 return -EINVAL; 1591 1592 skb_chk = skb_checksum_trimmed(skb, transport_len, 1593 ip_mc_validate_checksum); 1594 if (!skb_chk) 1595 return -EINVAL; 1596 1597 if (skb_chk != skb) 1598 kfree_skb(skb_chk); 1599 1600 return 0; 1601 } 1602 1603 /** 1604 * ip_mc_check_igmp - checks whether this is a sane IGMP packet 1605 * @skb: the skb to validate 1606 * 1607 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1608 * skb transport header accordingly and returns zero. 1609 * 1610 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1611 * standard 1612 * -ENOMSG: IP header validation succeeded but it is not an IGMP packet. 1613 * -ENOMEM: A memory allocation failure happened. 1614 * 1615 * Caller needs to set the skb network header and free any returned skb if it 1616 * differs from the provided skb. 1617 */ 1618 int ip_mc_check_igmp(struct sk_buff *skb) 1619 { 1620 int ret = ip_mc_check_iphdr(skb); 1621 1622 if (ret < 0) 1623 return ret; 1624 1625 if (ip_hdr(skb)->protocol != IPPROTO_IGMP) 1626 return -ENOMSG; 1627 1628 ret = ip_mc_check_igmp_csum(skb); 1629 if (ret < 0) 1630 return ret; 1631 1632 return ip_mc_check_igmp_msg(skb); 1633 } 1634 EXPORT_SYMBOL(ip_mc_check_igmp); 1635 1636 /* 1637 * Resend IGMP JOIN report; used by netdev notifier. 1638 */ 1639 static void ip_mc_rejoin_groups(struct in_device *in_dev) 1640 { 1641 #ifdef CONFIG_IP_MULTICAST 1642 struct ip_mc_list *im; 1643 int type; 1644 struct net *net = dev_net(in_dev->dev); 1645 1646 ASSERT_RTNL(); 1647 1648 for_each_pmc_rtnl(in_dev, im) { 1649 if (im->multiaddr == IGMP_ALL_HOSTS) 1650 continue; 1651 if (ipv4_is_local_multicast(im->multiaddr) && 1652 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1653 continue; 1654 1655 /* a failover is happening and switches 1656 * must be notified immediately 1657 */ 1658 if (IGMP_V1_SEEN(in_dev)) 1659 type = IGMP_HOST_MEMBERSHIP_REPORT; 1660 else if (IGMP_V2_SEEN(in_dev)) 1661 type = IGMPV2_HOST_MEMBERSHIP_REPORT; 1662 else 1663 type = IGMPV3_HOST_MEMBERSHIP_REPORT; 1664 igmp_send_report(in_dev, im, type); 1665 } 1666 #endif 1667 } 1668 1669 /* 1670 * A socket has left a multicast group on device dev 1671 */ 1672 1673 void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp) 1674 { 1675 struct ip_mc_list *i; 1676 struct ip_mc_list __rcu **ip; 1677 1678 ASSERT_RTNL(); 1679 1680 for (ip = &in_dev->mc_list; 1681 (i = rtnl_dereference(*ip)) != NULL; 1682 ip = &i->next_rcu) { 1683 if (i->multiaddr == addr) { 1684 if (--i->users == 0) { 1685 ip_mc_hash_remove(in_dev, i); 1686 *ip = i->next_rcu; 1687 in_dev->mc_count--; 1688 __igmp_group_dropped(i, gfp); 1689 ip_mc_clear_src(i); 1690 1691 if (!in_dev->dead) 1692 ip_rt_multicast_event(in_dev); 1693 1694 ip_ma_put(i); 1695 return; 1696 } 1697 break; 1698 } 1699 } 1700 } 1701 EXPORT_SYMBOL(__ip_mc_dec_group); 1702 1703 /* Device changing type */ 1704 1705 void ip_mc_unmap(struct in_device *in_dev) 1706 { 1707 struct ip_mc_list *pmc; 1708 1709 ASSERT_RTNL(); 1710 1711 for_each_pmc_rtnl(in_dev, pmc) 1712 igmp_group_dropped(pmc); 1713 } 1714 1715 void ip_mc_remap(struct in_device *in_dev) 1716 { 1717 struct ip_mc_list *pmc; 1718 1719 ASSERT_RTNL(); 1720 1721 for_each_pmc_rtnl(in_dev, pmc) { 1722 #ifdef CONFIG_IP_MULTICAST 1723 igmpv3_del_delrec(in_dev, pmc); 1724 #endif 1725 igmp_group_added(pmc); 1726 } 1727 } 1728 1729 /* Device going down */ 1730 1731 void ip_mc_down(struct in_device *in_dev) 1732 { 1733 struct ip_mc_list *pmc; 1734 1735 ASSERT_RTNL(); 1736 1737 for_each_pmc_rtnl(in_dev, pmc) 1738 igmp_group_dropped(pmc); 1739 1740 #ifdef CONFIG_IP_MULTICAST 1741 WRITE_ONCE(in_dev->mr_ifc_count, 0); 1742 if (del_timer(&in_dev->mr_ifc_timer)) 1743 __in_dev_put(in_dev); 1744 in_dev->mr_gq_running = 0; 1745 if (del_timer(&in_dev->mr_gq_timer)) 1746 __in_dev_put(in_dev); 1747 #endif 1748 1749 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); 1750 } 1751 1752 #ifdef CONFIG_IP_MULTICAST 1753 static void ip_mc_reset(struct in_device *in_dev) 1754 { 1755 struct net *net = dev_net(in_dev->dev); 1756 1757 in_dev->mr_qi = IGMP_QUERY_INTERVAL; 1758 in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL; 1759 in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1760 } 1761 #else 1762 static void ip_mc_reset(struct in_device *in_dev) 1763 { 1764 } 1765 #endif 1766 1767 void ip_mc_init_dev(struct in_device *in_dev) 1768 { 1769 ASSERT_RTNL(); 1770 1771 #ifdef CONFIG_IP_MULTICAST 1772 timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0); 1773 timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0); 1774 #endif 1775 ip_mc_reset(in_dev); 1776 1777 spin_lock_init(&in_dev->mc_tomb_lock); 1778 } 1779 1780 /* Device going up */ 1781 1782 void ip_mc_up(struct in_device *in_dev) 1783 { 1784 struct ip_mc_list *pmc; 1785 1786 ASSERT_RTNL(); 1787 1788 ip_mc_reset(in_dev); 1789 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1790 1791 for_each_pmc_rtnl(in_dev, pmc) { 1792 #ifdef CONFIG_IP_MULTICAST 1793 igmpv3_del_delrec(in_dev, pmc); 1794 #endif 1795 igmp_group_added(pmc); 1796 } 1797 } 1798 1799 /* 1800 * Device is about to be destroyed: clean up. 1801 */ 1802 1803 void ip_mc_destroy_dev(struct in_device *in_dev) 1804 { 1805 struct ip_mc_list *i; 1806 1807 ASSERT_RTNL(); 1808 1809 /* Deactivate timers */ 1810 ip_mc_down(in_dev); 1811 #ifdef CONFIG_IP_MULTICAST 1812 igmpv3_clear_delrec(in_dev); 1813 #endif 1814 1815 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { 1816 in_dev->mc_list = i->next_rcu; 1817 in_dev->mc_count--; 1818 ip_mc_clear_src(i); 1819 ip_ma_put(i); 1820 } 1821 } 1822 1823 /* RTNL is locked */ 1824 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) 1825 { 1826 struct net_device *dev = NULL; 1827 struct in_device *idev = NULL; 1828 1829 if (imr->imr_ifindex) { 1830 idev = inetdev_by_index(net, imr->imr_ifindex); 1831 return idev; 1832 } 1833 if (imr->imr_address.s_addr) { 1834 dev = __ip_dev_find(net, imr->imr_address.s_addr, false); 1835 if (!dev) 1836 return NULL; 1837 } 1838 1839 if (!dev) { 1840 struct rtable *rt = ip_route_output(net, 1841 imr->imr_multiaddr.s_addr, 1842 0, 0, 0); 1843 if (!IS_ERR(rt)) { 1844 dev = rt->dst.dev; 1845 ip_rt_put(rt); 1846 } 1847 } 1848 if (dev) { 1849 imr->imr_ifindex = dev->ifindex; 1850 idev = __in_dev_get_rtnl(dev); 1851 } 1852 return idev; 1853 } 1854 1855 /* 1856 * Join a socket to a group 1857 */ 1858 1859 static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, 1860 __be32 *psfsrc) 1861 { 1862 struct ip_sf_list *psf, *psf_prev; 1863 int rv = 0; 1864 1865 psf_prev = NULL; 1866 for (psf = pmc->sources; psf; psf = psf->sf_next) { 1867 if (psf->sf_inaddr == *psfsrc) 1868 break; 1869 psf_prev = psf; 1870 } 1871 if (!psf || psf->sf_count[sfmode] == 0) { 1872 /* source filter not found, or count wrong => bug */ 1873 return -ESRCH; 1874 } 1875 psf->sf_count[sfmode]--; 1876 if (psf->sf_count[sfmode] == 0) { 1877 ip_rt_multicast_event(pmc->interface); 1878 } 1879 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { 1880 #ifdef CONFIG_IP_MULTICAST 1881 struct in_device *in_dev = pmc->interface; 1882 struct net *net = dev_net(in_dev->dev); 1883 #endif 1884 1885 /* no more filters for this source */ 1886 if (psf_prev) 1887 psf_prev->sf_next = psf->sf_next; 1888 else 1889 pmc->sources = psf->sf_next; 1890 #ifdef CONFIG_IP_MULTICAST 1891 if (psf->sf_oldin && 1892 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { 1893 psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1894 psf->sf_next = pmc->tomb; 1895 pmc->tomb = psf; 1896 rv = 1; 1897 } else 1898 #endif 1899 kfree(psf); 1900 } 1901 return rv; 1902 } 1903 1904 #ifndef CONFIG_IP_MULTICAST 1905 #define igmp_ifc_event(x) do { } while (0) 1906 #endif 1907 1908 static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 1909 int sfcount, __be32 *psfsrc, int delta) 1910 { 1911 struct ip_mc_list *pmc; 1912 int changerec = 0; 1913 int i, err; 1914 1915 if (!in_dev) 1916 return -ENODEV; 1917 rcu_read_lock(); 1918 for_each_pmc_rcu(in_dev, pmc) { 1919 if (*pmca == pmc->multiaddr) 1920 break; 1921 } 1922 if (!pmc) { 1923 /* MCA not found?? bug */ 1924 rcu_read_unlock(); 1925 return -ESRCH; 1926 } 1927 spin_lock_bh(&pmc->lock); 1928 rcu_read_unlock(); 1929 #ifdef CONFIG_IP_MULTICAST 1930 sf_markstate(pmc); 1931 #endif 1932 if (!delta) { 1933 err = -EINVAL; 1934 if (!pmc->sfcount[sfmode]) 1935 goto out_unlock; 1936 pmc->sfcount[sfmode]--; 1937 } 1938 err = 0; 1939 for (i = 0; i < sfcount; i++) { 1940 int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]); 1941 1942 changerec |= rv > 0; 1943 if (!err && rv < 0) 1944 err = rv; 1945 } 1946 if (pmc->sfmode == MCAST_EXCLUDE && 1947 pmc->sfcount[MCAST_EXCLUDE] == 0 && 1948 pmc->sfcount[MCAST_INCLUDE]) { 1949 #ifdef CONFIG_IP_MULTICAST 1950 struct ip_sf_list *psf; 1951 struct net *net = dev_net(in_dev->dev); 1952 #endif 1953 1954 /* filter mode change */ 1955 pmc->sfmode = MCAST_INCLUDE; 1956 #ifdef CONFIG_IP_MULTICAST 1957 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1958 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); 1959 for (psf = pmc->sources; psf; psf = psf->sf_next) 1960 psf->sf_crcount = 0; 1961 igmp_ifc_event(pmc->interface); 1962 } else if (sf_setstate(pmc) || changerec) { 1963 igmp_ifc_event(pmc->interface); 1964 #endif 1965 } 1966 out_unlock: 1967 spin_unlock_bh(&pmc->lock); 1968 return err; 1969 } 1970 1971 /* 1972 * Add multicast single-source filter to the interface list 1973 */ 1974 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, 1975 __be32 *psfsrc) 1976 { 1977 struct ip_sf_list *psf, *psf_prev; 1978 1979 psf_prev = NULL; 1980 for (psf = pmc->sources; psf; psf = psf->sf_next) { 1981 if (psf->sf_inaddr == *psfsrc) 1982 break; 1983 psf_prev = psf; 1984 } 1985 if (!psf) { 1986 psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 1987 if (!psf) 1988 return -ENOBUFS; 1989 psf->sf_inaddr = *psfsrc; 1990 if (psf_prev) { 1991 psf_prev->sf_next = psf; 1992 } else 1993 pmc->sources = psf; 1994 } 1995 psf->sf_count[sfmode]++; 1996 if (psf->sf_count[sfmode] == 1) { 1997 ip_rt_multicast_event(pmc->interface); 1998 } 1999 return 0; 2000 } 2001 2002 #ifdef CONFIG_IP_MULTICAST 2003 static void sf_markstate(struct ip_mc_list *pmc) 2004 { 2005 struct ip_sf_list *psf; 2006 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; 2007 2008 for (psf = pmc->sources; psf; psf = psf->sf_next) 2009 if (pmc->sfcount[MCAST_EXCLUDE]) { 2010 psf->sf_oldin = mca_xcount == 2011 psf->sf_count[MCAST_EXCLUDE] && 2012 !psf->sf_count[MCAST_INCLUDE]; 2013 } else 2014 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; 2015 } 2016 2017 static int sf_setstate(struct ip_mc_list *pmc) 2018 { 2019 struct ip_sf_list *psf, *dpsf; 2020 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; 2021 int qrv = pmc->interface->mr_qrv; 2022 int new_in, rv; 2023 2024 rv = 0; 2025 for (psf = pmc->sources; psf; psf = psf->sf_next) { 2026 if (pmc->sfcount[MCAST_EXCLUDE]) { 2027 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && 2028 !psf->sf_count[MCAST_INCLUDE]; 2029 } else 2030 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 2031 if (new_in) { 2032 if (!psf->sf_oldin) { 2033 struct ip_sf_list *prev = NULL; 2034 2035 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) { 2036 if (dpsf->sf_inaddr == psf->sf_inaddr) 2037 break; 2038 prev = dpsf; 2039 } 2040 if (dpsf) { 2041 if (prev) 2042 prev->sf_next = dpsf->sf_next; 2043 else 2044 pmc->tomb = dpsf->sf_next; 2045 kfree(dpsf); 2046 } 2047 psf->sf_crcount = qrv; 2048 rv++; 2049 } 2050 } else if (psf->sf_oldin) { 2051 2052 psf->sf_crcount = 0; 2053 /* 2054 * add or update "delete" records if an active filter 2055 * is now inactive 2056 */ 2057 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) 2058 if (dpsf->sf_inaddr == psf->sf_inaddr) 2059 break; 2060 if (!dpsf) { 2061 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); 2062 if (!dpsf) 2063 continue; 2064 *dpsf = *psf; 2065 /* pmc->lock held by callers */ 2066 dpsf->sf_next = pmc->tomb; 2067 pmc->tomb = dpsf; 2068 } 2069 dpsf->sf_crcount = qrv; 2070 rv++; 2071 } 2072 } 2073 return rv; 2074 } 2075 #endif 2076 2077 /* 2078 * Add multicast source filter list to the interface list 2079 */ 2080 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 2081 int sfcount, __be32 *psfsrc, int delta) 2082 { 2083 struct ip_mc_list *pmc; 2084 int isexclude; 2085 int i, err; 2086 2087 if (!in_dev) 2088 return -ENODEV; 2089 rcu_read_lock(); 2090 for_each_pmc_rcu(in_dev, pmc) { 2091 if (*pmca == pmc->multiaddr) 2092 break; 2093 } 2094 if (!pmc) { 2095 /* MCA not found?? bug */ 2096 rcu_read_unlock(); 2097 return -ESRCH; 2098 } 2099 spin_lock_bh(&pmc->lock); 2100 rcu_read_unlock(); 2101 2102 #ifdef CONFIG_IP_MULTICAST 2103 sf_markstate(pmc); 2104 #endif 2105 isexclude = pmc->sfmode == MCAST_EXCLUDE; 2106 if (!delta) 2107 pmc->sfcount[sfmode]++; 2108 err = 0; 2109 for (i = 0; i < sfcount; i++) { 2110 err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]); 2111 if (err) 2112 break; 2113 } 2114 if (err) { 2115 int j; 2116 2117 if (!delta) 2118 pmc->sfcount[sfmode]--; 2119 for (j = 0; j < i; j++) 2120 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); 2121 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { 2122 #ifdef CONFIG_IP_MULTICAST 2123 struct ip_sf_list *psf; 2124 struct net *net = dev_net(pmc->interface->dev); 2125 in_dev = pmc->interface; 2126 #endif 2127 2128 /* filter mode change */ 2129 if (pmc->sfcount[MCAST_EXCLUDE]) 2130 pmc->sfmode = MCAST_EXCLUDE; 2131 else if (pmc->sfcount[MCAST_INCLUDE]) 2132 pmc->sfmode = MCAST_INCLUDE; 2133 #ifdef CONFIG_IP_MULTICAST 2134 /* else no filters; keep old mode for reports */ 2135 2136 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 2137 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); 2138 for (psf = pmc->sources; psf; psf = psf->sf_next) 2139 psf->sf_crcount = 0; 2140 igmp_ifc_event(in_dev); 2141 } else if (sf_setstate(pmc)) { 2142 igmp_ifc_event(in_dev); 2143 #endif 2144 } 2145 spin_unlock_bh(&pmc->lock); 2146 return err; 2147 } 2148 2149 static void ip_mc_clear_src(struct ip_mc_list *pmc) 2150 { 2151 struct ip_sf_list *tomb, *sources; 2152 2153 spin_lock_bh(&pmc->lock); 2154 tomb = pmc->tomb; 2155 pmc->tomb = NULL; 2156 sources = pmc->sources; 2157 pmc->sources = NULL; 2158 pmc->sfmode = MCAST_EXCLUDE; 2159 pmc->sfcount[MCAST_INCLUDE] = 0; 2160 pmc->sfcount[MCAST_EXCLUDE] = 1; 2161 spin_unlock_bh(&pmc->lock); 2162 2163 ip_sf_list_clear_all(tomb); 2164 ip_sf_list_clear_all(sources); 2165 } 2166 2167 /* Join a multicast group 2168 */ 2169 static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, 2170 unsigned int mode) 2171 { 2172 __be32 addr = imr->imr_multiaddr.s_addr; 2173 struct ip_mc_socklist *iml, *i; 2174 struct in_device *in_dev; 2175 struct inet_sock *inet = inet_sk(sk); 2176 struct net *net = sock_net(sk); 2177 int ifindex; 2178 int count = 0; 2179 int err; 2180 2181 ASSERT_RTNL(); 2182 2183 if (!ipv4_is_multicast(addr)) 2184 return -EINVAL; 2185 2186 in_dev = ip_mc_find_dev(net, imr); 2187 2188 if (!in_dev) { 2189 err = -ENODEV; 2190 goto done; 2191 } 2192 2193 err = -EADDRINUSE; 2194 ifindex = imr->imr_ifindex; 2195 for_each_pmc_rtnl(inet, i) { 2196 if (i->multi.imr_multiaddr.s_addr == addr && 2197 i->multi.imr_ifindex == ifindex) 2198 goto done; 2199 count++; 2200 } 2201 err = -ENOBUFS; 2202 if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships)) 2203 goto done; 2204 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); 2205 if (!iml) 2206 goto done; 2207 2208 memcpy(&iml->multi, imr, sizeof(*imr)); 2209 iml->next_rcu = inet->mc_list; 2210 iml->sflist = NULL; 2211 iml->sfmode = mode; 2212 rcu_assign_pointer(inet->mc_list, iml); 2213 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL); 2214 err = 0; 2215 done: 2216 return err; 2217 } 2218 2219 /* Join ASM (Any-Source Multicast) group 2220 */ 2221 int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) 2222 { 2223 return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE); 2224 } 2225 EXPORT_SYMBOL(ip_mc_join_group); 2226 2227 /* Join SSM (Source-Specific Multicast) group 2228 */ 2229 int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, 2230 unsigned int mode) 2231 { 2232 return __ip_mc_join_group(sk, imr, mode); 2233 } 2234 2235 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 2236 struct in_device *in_dev) 2237 { 2238 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); 2239 int err; 2240 2241 if (!psf) { 2242 /* any-source empty exclude case */ 2243 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 2244 iml->sfmode, 0, NULL, 0); 2245 } 2246 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 2247 iml->sfmode, psf->sl_count, psf->sl_addr, 0); 2248 RCU_INIT_POINTER(iml->sflist, NULL); 2249 /* decrease mem now to avoid the memleak warning */ 2250 atomic_sub(struct_size(psf, sl_addr, psf->sl_max), &sk->sk_omem_alloc); 2251 kfree_rcu(psf, rcu); 2252 return err; 2253 } 2254 2255 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) 2256 { 2257 struct inet_sock *inet = inet_sk(sk); 2258 struct ip_mc_socklist *iml; 2259 struct ip_mc_socklist __rcu **imlp; 2260 struct in_device *in_dev; 2261 struct net *net = sock_net(sk); 2262 __be32 group = imr->imr_multiaddr.s_addr; 2263 u32 ifindex; 2264 int ret = -EADDRNOTAVAIL; 2265 2266 ASSERT_RTNL(); 2267 2268 in_dev = ip_mc_find_dev(net, imr); 2269 if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { 2270 ret = -ENODEV; 2271 goto out; 2272 } 2273 ifindex = imr->imr_ifindex; 2274 for (imlp = &inet->mc_list; 2275 (iml = rtnl_dereference(*imlp)) != NULL; 2276 imlp = &iml->next_rcu) { 2277 if (iml->multi.imr_multiaddr.s_addr != group) 2278 continue; 2279 if (ifindex) { 2280 if (iml->multi.imr_ifindex != ifindex) 2281 continue; 2282 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr != 2283 iml->multi.imr_address.s_addr) 2284 continue; 2285 2286 (void) ip_mc_leave_src(sk, iml, in_dev); 2287 2288 *imlp = iml->next_rcu; 2289 2290 if (in_dev) 2291 ip_mc_dec_group(in_dev, group); 2292 2293 /* decrease mem now to avoid the memleak warning */ 2294 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2295 kfree_rcu(iml, rcu); 2296 return 0; 2297 } 2298 out: 2299 return ret; 2300 } 2301 EXPORT_SYMBOL(ip_mc_leave_group); 2302 2303 int ip_mc_source(int add, int omode, struct sock *sk, struct 2304 ip_mreq_source *mreqs, int ifindex) 2305 { 2306 int err; 2307 struct ip_mreqn imr; 2308 __be32 addr = mreqs->imr_multiaddr; 2309 struct ip_mc_socklist *pmc; 2310 struct in_device *in_dev = NULL; 2311 struct inet_sock *inet = inet_sk(sk); 2312 struct ip_sf_socklist *psl; 2313 struct net *net = sock_net(sk); 2314 int leavegroup = 0; 2315 int i, j, rv; 2316 2317 if (!ipv4_is_multicast(addr)) 2318 return -EINVAL; 2319 2320 ASSERT_RTNL(); 2321 2322 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; 2323 imr.imr_address.s_addr = mreqs->imr_interface; 2324 imr.imr_ifindex = ifindex; 2325 in_dev = ip_mc_find_dev(net, &imr); 2326 2327 if (!in_dev) { 2328 err = -ENODEV; 2329 goto done; 2330 } 2331 err = -EADDRNOTAVAIL; 2332 2333 for_each_pmc_rtnl(inet, pmc) { 2334 if ((pmc->multi.imr_multiaddr.s_addr == 2335 imr.imr_multiaddr.s_addr) && 2336 (pmc->multi.imr_ifindex == imr.imr_ifindex)) 2337 break; 2338 } 2339 if (!pmc) { /* must have a prior join */ 2340 err = -EINVAL; 2341 goto done; 2342 } 2343 /* if a source filter was set, must be the same mode as before */ 2344 if (pmc->sflist) { 2345 if (pmc->sfmode != omode) { 2346 err = -EINVAL; 2347 goto done; 2348 } 2349 } else if (pmc->sfmode != omode) { 2350 /* allow mode switches for empty-set filters */ 2351 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); 2352 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, 2353 NULL, 0); 2354 pmc->sfmode = omode; 2355 } 2356 2357 psl = rtnl_dereference(pmc->sflist); 2358 if (!add) { 2359 if (!psl) 2360 goto done; /* err = -EADDRNOTAVAIL */ 2361 rv = !0; 2362 for (i = 0; i < psl->sl_count; i++) { 2363 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 2364 sizeof(__be32)); 2365 if (rv == 0) 2366 break; 2367 } 2368 if (rv) /* source not found */ 2369 goto done; /* err = -EADDRNOTAVAIL */ 2370 2371 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 2372 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { 2373 leavegroup = 1; 2374 goto done; 2375 } 2376 2377 /* update the interface filter */ 2378 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 2379 &mreqs->imr_sourceaddr, 1); 2380 2381 for (j = i+1; j < psl->sl_count; j++) 2382 psl->sl_addr[j-1] = psl->sl_addr[j]; 2383 psl->sl_count--; 2384 err = 0; 2385 goto done; 2386 } 2387 /* else, add a new source to the filter */ 2388 2389 if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { 2390 err = -ENOBUFS; 2391 goto done; 2392 } 2393 if (!psl || psl->sl_count == psl->sl_max) { 2394 struct ip_sf_socklist *newpsl; 2395 int count = IP_SFBLOCK; 2396 2397 if (psl) 2398 count += psl->sl_max; 2399 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count), 2400 GFP_KERNEL); 2401 if (!newpsl) { 2402 err = -ENOBUFS; 2403 goto done; 2404 } 2405 newpsl->sl_max = count; 2406 newpsl->sl_count = count - IP_SFBLOCK; 2407 if (psl) { 2408 for (i = 0; i < psl->sl_count; i++) 2409 newpsl->sl_addr[i] = psl->sl_addr[i]; 2410 /* decrease mem now to avoid the memleak warning */ 2411 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2412 &sk->sk_omem_alloc); 2413 } 2414 rcu_assign_pointer(pmc->sflist, newpsl); 2415 if (psl) 2416 kfree_rcu(psl, rcu); 2417 psl = newpsl; 2418 } 2419 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2420 for (i = 0; i < psl->sl_count; i++) { 2421 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 2422 sizeof(__be32)); 2423 if (rv == 0) 2424 break; 2425 } 2426 if (rv == 0) /* address already there is an error */ 2427 goto done; 2428 for (j = psl->sl_count-1; j >= i; j--) 2429 psl->sl_addr[j+1] = psl->sl_addr[j]; 2430 psl->sl_addr[i] = mreqs->imr_sourceaddr; 2431 psl->sl_count++; 2432 err = 0; 2433 /* update the interface list */ 2434 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 2435 &mreqs->imr_sourceaddr, 1); 2436 done: 2437 if (leavegroup) 2438 err = ip_mc_leave_group(sk, &imr); 2439 return err; 2440 } 2441 2442 int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) 2443 { 2444 int err = 0; 2445 struct ip_mreqn imr; 2446 __be32 addr = msf->imsf_multiaddr; 2447 struct ip_mc_socklist *pmc; 2448 struct in_device *in_dev; 2449 struct inet_sock *inet = inet_sk(sk); 2450 struct ip_sf_socklist *newpsl, *psl; 2451 struct net *net = sock_net(sk); 2452 int leavegroup = 0; 2453 2454 if (!ipv4_is_multicast(addr)) 2455 return -EINVAL; 2456 if (msf->imsf_fmode != MCAST_INCLUDE && 2457 msf->imsf_fmode != MCAST_EXCLUDE) 2458 return -EINVAL; 2459 2460 ASSERT_RTNL(); 2461 2462 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2463 imr.imr_address.s_addr = msf->imsf_interface; 2464 imr.imr_ifindex = ifindex; 2465 in_dev = ip_mc_find_dev(net, &imr); 2466 2467 if (!in_dev) { 2468 err = -ENODEV; 2469 goto done; 2470 } 2471 2472 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 2473 if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) { 2474 leavegroup = 1; 2475 goto done; 2476 } 2477 2478 for_each_pmc_rtnl(inet, pmc) { 2479 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2480 pmc->multi.imr_ifindex == imr.imr_ifindex) 2481 break; 2482 } 2483 if (!pmc) { /* must have a prior join */ 2484 err = -EINVAL; 2485 goto done; 2486 } 2487 if (msf->imsf_numsrc) { 2488 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, 2489 msf->imsf_numsrc), 2490 GFP_KERNEL); 2491 if (!newpsl) { 2492 err = -ENOBUFS; 2493 goto done; 2494 } 2495 newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc; 2496 memcpy(newpsl->sl_addr, msf->imsf_slist_flex, 2497 flex_array_size(msf, imsf_slist_flex, msf->imsf_numsrc)); 2498 err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2499 msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0); 2500 if (err) { 2501 sock_kfree_s(sk, newpsl, 2502 struct_size(newpsl, sl_addr, 2503 newpsl->sl_max)); 2504 goto done; 2505 } 2506 } else { 2507 newpsl = NULL; 2508 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2509 msf->imsf_fmode, 0, NULL, 0); 2510 } 2511 psl = rtnl_dereference(pmc->sflist); 2512 if (psl) { 2513 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2514 psl->sl_count, psl->sl_addr, 0); 2515 /* decrease mem now to avoid the memleak warning */ 2516 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2517 &sk->sk_omem_alloc); 2518 } else { 2519 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2520 0, NULL, 0); 2521 } 2522 rcu_assign_pointer(pmc->sflist, newpsl); 2523 if (psl) 2524 kfree_rcu(psl, rcu); 2525 pmc->sfmode = msf->imsf_fmode; 2526 err = 0; 2527 done: 2528 if (leavegroup) 2529 err = ip_mc_leave_group(sk, &imr); 2530 return err; 2531 } 2532 int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, 2533 sockptr_t optval, sockptr_t optlen) 2534 { 2535 int err, len, count, copycount, msf_size; 2536 struct ip_mreqn imr; 2537 __be32 addr = msf->imsf_multiaddr; 2538 struct ip_mc_socklist *pmc; 2539 struct in_device *in_dev; 2540 struct inet_sock *inet = inet_sk(sk); 2541 struct ip_sf_socklist *psl; 2542 struct net *net = sock_net(sk); 2543 2544 ASSERT_RTNL(); 2545 2546 if (!ipv4_is_multicast(addr)) 2547 return -EINVAL; 2548 2549 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2550 imr.imr_address.s_addr = msf->imsf_interface; 2551 imr.imr_ifindex = 0; 2552 in_dev = ip_mc_find_dev(net, &imr); 2553 2554 if (!in_dev) { 2555 err = -ENODEV; 2556 goto done; 2557 } 2558 err = -EADDRNOTAVAIL; 2559 2560 for_each_pmc_rtnl(inet, pmc) { 2561 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2562 pmc->multi.imr_ifindex == imr.imr_ifindex) 2563 break; 2564 } 2565 if (!pmc) /* must have a prior join */ 2566 goto done; 2567 msf->imsf_fmode = pmc->sfmode; 2568 psl = rtnl_dereference(pmc->sflist); 2569 if (!psl) { 2570 count = 0; 2571 } else { 2572 count = psl->sl_count; 2573 } 2574 copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc; 2575 len = flex_array_size(psl, sl_addr, copycount); 2576 msf->imsf_numsrc = count; 2577 msf_size = IP_MSFILTER_SIZE(copycount); 2578 if (copy_to_sockptr(optlen, &msf_size, sizeof(int)) || 2579 copy_to_sockptr(optval, msf, IP_MSFILTER_SIZE(0))) { 2580 return -EFAULT; 2581 } 2582 if (len && 2583 copy_to_sockptr_offset(optval, 2584 offsetof(struct ip_msfilter, imsf_slist_flex), 2585 psl->sl_addr, len)) 2586 return -EFAULT; 2587 return 0; 2588 done: 2589 return err; 2590 } 2591 2592 int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, 2593 sockptr_t optval, size_t ss_offset) 2594 { 2595 int i, count, copycount; 2596 struct sockaddr_in *psin; 2597 __be32 addr; 2598 struct ip_mc_socklist *pmc; 2599 struct inet_sock *inet = inet_sk(sk); 2600 struct ip_sf_socklist *psl; 2601 2602 ASSERT_RTNL(); 2603 2604 psin = (struct sockaddr_in *)&gsf->gf_group; 2605 if (psin->sin_family != AF_INET) 2606 return -EINVAL; 2607 addr = psin->sin_addr.s_addr; 2608 if (!ipv4_is_multicast(addr)) 2609 return -EINVAL; 2610 2611 for_each_pmc_rtnl(inet, pmc) { 2612 if (pmc->multi.imr_multiaddr.s_addr == addr && 2613 pmc->multi.imr_ifindex == gsf->gf_interface) 2614 break; 2615 } 2616 if (!pmc) /* must have a prior join */ 2617 return -EADDRNOTAVAIL; 2618 gsf->gf_fmode = pmc->sfmode; 2619 psl = rtnl_dereference(pmc->sflist); 2620 count = psl ? psl->sl_count : 0; 2621 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 2622 gsf->gf_numsrc = count; 2623 for (i = 0; i < copycount; i++) { 2624 struct sockaddr_storage ss; 2625 2626 psin = (struct sockaddr_in *)&ss; 2627 memset(&ss, 0, sizeof(ss)); 2628 psin->sin_family = AF_INET; 2629 psin->sin_addr.s_addr = psl->sl_addr[i]; 2630 if (copy_to_sockptr_offset(optval, ss_offset, 2631 &ss, sizeof(ss))) 2632 return -EFAULT; 2633 ss_offset += sizeof(ss); 2634 } 2635 return 0; 2636 } 2637 2638 /* 2639 * check if a multicast source filter allows delivery for a given <src,dst,intf> 2640 */ 2641 int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, 2642 int dif, int sdif) 2643 { 2644 struct inet_sock *inet = inet_sk(sk); 2645 struct ip_mc_socklist *pmc; 2646 struct ip_sf_socklist *psl; 2647 int i; 2648 int ret; 2649 2650 ret = 1; 2651 if (!ipv4_is_multicast(loc_addr)) 2652 goto out; 2653 2654 rcu_read_lock(); 2655 for_each_pmc_rcu(inet, pmc) { 2656 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2657 (pmc->multi.imr_ifindex == dif || 2658 (sdif && pmc->multi.imr_ifindex == sdif))) 2659 break; 2660 } 2661 ret = inet->mc_all; 2662 if (!pmc) 2663 goto unlock; 2664 psl = rcu_dereference(pmc->sflist); 2665 ret = (pmc->sfmode == MCAST_EXCLUDE); 2666 if (!psl) 2667 goto unlock; 2668 2669 for (i = 0; i < psl->sl_count; i++) { 2670 if (psl->sl_addr[i] == rmt_addr) 2671 break; 2672 } 2673 ret = 0; 2674 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 2675 goto unlock; 2676 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 2677 goto unlock; 2678 ret = 1; 2679 unlock: 2680 rcu_read_unlock(); 2681 out: 2682 return ret; 2683 } 2684 2685 /* 2686 * A socket is closing. 2687 */ 2688 2689 void ip_mc_drop_socket(struct sock *sk) 2690 { 2691 struct inet_sock *inet = inet_sk(sk); 2692 struct ip_mc_socklist *iml; 2693 struct net *net = sock_net(sk); 2694 2695 if (!inet->mc_list) 2696 return; 2697 2698 rtnl_lock(); 2699 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) { 2700 struct in_device *in_dev; 2701 2702 inet->mc_list = iml->next_rcu; 2703 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2704 (void) ip_mc_leave_src(sk, iml, in_dev); 2705 if (in_dev) 2706 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2707 /* decrease mem now to avoid the memleak warning */ 2708 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2709 kfree_rcu(iml, rcu); 2710 } 2711 rtnl_unlock(); 2712 } 2713 2714 /* called with rcu_read_lock() */ 2715 int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u8 proto) 2716 { 2717 struct ip_mc_list *im; 2718 struct ip_mc_list __rcu **mc_hash; 2719 struct ip_sf_list *psf; 2720 int rv = 0; 2721 2722 mc_hash = rcu_dereference(in_dev->mc_hash); 2723 if (mc_hash) { 2724 u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG); 2725 2726 for (im = rcu_dereference(mc_hash[hash]); 2727 im != NULL; 2728 im = rcu_dereference(im->next_hash)) { 2729 if (im->multiaddr == mc_addr) 2730 break; 2731 } 2732 } else { 2733 for_each_pmc_rcu(in_dev, im) { 2734 if (im->multiaddr == mc_addr) 2735 break; 2736 } 2737 } 2738 if (im && proto == IPPROTO_IGMP) { 2739 rv = 1; 2740 } else if (im) { 2741 if (src_addr) { 2742 spin_lock_bh(&im->lock); 2743 for (psf = im->sources; psf; psf = psf->sf_next) { 2744 if (psf->sf_inaddr == src_addr) 2745 break; 2746 } 2747 if (psf) 2748 rv = psf->sf_count[MCAST_INCLUDE] || 2749 psf->sf_count[MCAST_EXCLUDE] != 2750 im->sfcount[MCAST_EXCLUDE]; 2751 else 2752 rv = im->sfcount[MCAST_EXCLUDE] != 0; 2753 spin_unlock_bh(&im->lock); 2754 } else 2755 rv = 1; /* unspecified source; tentatively allow */ 2756 } 2757 return rv; 2758 } 2759 2760 #if defined(CONFIG_PROC_FS) 2761 struct igmp_mc_iter_state { 2762 struct seq_net_private p; 2763 struct net_device *dev; 2764 struct in_device *in_dev; 2765 }; 2766 2767 #define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private) 2768 2769 static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) 2770 { 2771 struct net *net = seq_file_net(seq); 2772 struct ip_mc_list *im = NULL; 2773 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2774 2775 state->in_dev = NULL; 2776 for_each_netdev_rcu(net, state->dev) { 2777 struct in_device *in_dev; 2778 2779 in_dev = __in_dev_get_rcu(state->dev); 2780 if (!in_dev) 2781 continue; 2782 im = rcu_dereference(in_dev->mc_list); 2783 if (im) { 2784 state->in_dev = in_dev; 2785 break; 2786 } 2787 } 2788 return im; 2789 } 2790 2791 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) 2792 { 2793 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2794 2795 im = rcu_dereference(im->next_rcu); 2796 while (!im) { 2797 state->dev = next_net_device_rcu(state->dev); 2798 if (!state->dev) { 2799 state->in_dev = NULL; 2800 break; 2801 } 2802 state->in_dev = __in_dev_get_rcu(state->dev); 2803 if (!state->in_dev) 2804 continue; 2805 im = rcu_dereference(state->in_dev->mc_list); 2806 } 2807 return im; 2808 } 2809 2810 static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos) 2811 { 2812 struct ip_mc_list *im = igmp_mc_get_first(seq); 2813 if (im) 2814 while (pos && (im = igmp_mc_get_next(seq, im)) != NULL) 2815 --pos; 2816 return pos ? NULL : im; 2817 } 2818 2819 static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) 2820 __acquires(rcu) 2821 { 2822 rcu_read_lock(); 2823 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2824 } 2825 2826 static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2827 { 2828 struct ip_mc_list *im; 2829 if (v == SEQ_START_TOKEN) 2830 im = igmp_mc_get_first(seq); 2831 else 2832 im = igmp_mc_get_next(seq, v); 2833 ++*pos; 2834 return im; 2835 } 2836 2837 static void igmp_mc_seq_stop(struct seq_file *seq, void *v) 2838 __releases(rcu) 2839 { 2840 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2841 2842 state->in_dev = NULL; 2843 state->dev = NULL; 2844 rcu_read_unlock(); 2845 } 2846 2847 static int igmp_mc_seq_show(struct seq_file *seq, void *v) 2848 { 2849 if (v == SEQ_START_TOKEN) 2850 seq_puts(seq, 2851 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n"); 2852 else { 2853 struct ip_mc_list *im = v; 2854 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2855 char *querier; 2856 long delta; 2857 2858 #ifdef CONFIG_IP_MULTICAST 2859 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" : 2860 IGMP_V2_SEEN(state->in_dev) ? "V2" : 2861 "V3"; 2862 #else 2863 querier = "NONE"; 2864 #endif 2865 2866 if (rcu_access_pointer(state->in_dev->mc_list) == im) { 2867 seq_printf(seq, "%d\t%-10s: %5d %7s\n", 2868 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); 2869 } 2870 2871 delta = im->timer.expires - jiffies; 2872 seq_printf(seq, 2873 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n", 2874 im->multiaddr, im->users, 2875 im->tm_running, 2876 im->tm_running ? jiffies_delta_to_clock_t(delta) : 0, 2877 im->reporter); 2878 } 2879 return 0; 2880 } 2881 2882 static const struct seq_operations igmp_mc_seq_ops = { 2883 .start = igmp_mc_seq_start, 2884 .next = igmp_mc_seq_next, 2885 .stop = igmp_mc_seq_stop, 2886 .show = igmp_mc_seq_show, 2887 }; 2888 2889 struct igmp_mcf_iter_state { 2890 struct seq_net_private p; 2891 struct net_device *dev; 2892 struct in_device *idev; 2893 struct ip_mc_list *im; 2894 }; 2895 2896 #define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private) 2897 2898 static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) 2899 { 2900 struct net *net = seq_file_net(seq); 2901 struct ip_sf_list *psf = NULL; 2902 struct ip_mc_list *im = NULL; 2903 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2904 2905 state->idev = NULL; 2906 state->im = NULL; 2907 for_each_netdev_rcu(net, state->dev) { 2908 struct in_device *idev; 2909 idev = __in_dev_get_rcu(state->dev); 2910 if (unlikely(!idev)) 2911 continue; 2912 im = rcu_dereference(idev->mc_list); 2913 if (likely(im)) { 2914 spin_lock_bh(&im->lock); 2915 psf = im->sources; 2916 if (likely(psf)) { 2917 state->im = im; 2918 state->idev = idev; 2919 break; 2920 } 2921 spin_unlock_bh(&im->lock); 2922 } 2923 } 2924 return psf; 2925 } 2926 2927 static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf) 2928 { 2929 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2930 2931 psf = psf->sf_next; 2932 while (!psf) { 2933 spin_unlock_bh(&state->im->lock); 2934 state->im = state->im->next; 2935 while (!state->im) { 2936 state->dev = next_net_device_rcu(state->dev); 2937 if (!state->dev) { 2938 state->idev = NULL; 2939 goto out; 2940 } 2941 state->idev = __in_dev_get_rcu(state->dev); 2942 if (!state->idev) 2943 continue; 2944 state->im = rcu_dereference(state->idev->mc_list); 2945 } 2946 if (!state->im) 2947 break; 2948 spin_lock_bh(&state->im->lock); 2949 psf = state->im->sources; 2950 } 2951 out: 2952 return psf; 2953 } 2954 2955 static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos) 2956 { 2957 struct ip_sf_list *psf = igmp_mcf_get_first(seq); 2958 if (psf) 2959 while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL) 2960 --pos; 2961 return pos ? NULL : psf; 2962 } 2963 2964 static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2965 __acquires(rcu) 2966 { 2967 rcu_read_lock(); 2968 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2969 } 2970 2971 static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2972 { 2973 struct ip_sf_list *psf; 2974 if (v == SEQ_START_TOKEN) 2975 psf = igmp_mcf_get_first(seq); 2976 else 2977 psf = igmp_mcf_get_next(seq, v); 2978 ++*pos; 2979 return psf; 2980 } 2981 2982 static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) 2983 __releases(rcu) 2984 { 2985 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2986 if (likely(state->im)) { 2987 spin_unlock_bh(&state->im->lock); 2988 state->im = NULL; 2989 } 2990 state->idev = NULL; 2991 state->dev = NULL; 2992 rcu_read_unlock(); 2993 } 2994 2995 static int igmp_mcf_seq_show(struct seq_file *seq, void *v) 2996 { 2997 struct ip_sf_list *psf = v; 2998 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2999 3000 if (v == SEQ_START_TOKEN) { 3001 seq_puts(seq, "Idx Device MCA SRC INC EXC\n"); 3002 } else { 3003 seq_printf(seq, 3004 "%3d %6.6s 0x%08x " 3005 "0x%08x %6lu %6lu\n", 3006 state->dev->ifindex, state->dev->name, 3007 ntohl(state->im->multiaddr), 3008 ntohl(psf->sf_inaddr), 3009 psf->sf_count[MCAST_INCLUDE], 3010 psf->sf_count[MCAST_EXCLUDE]); 3011 } 3012 return 0; 3013 } 3014 3015 static const struct seq_operations igmp_mcf_seq_ops = { 3016 .start = igmp_mcf_seq_start, 3017 .next = igmp_mcf_seq_next, 3018 .stop = igmp_mcf_seq_stop, 3019 .show = igmp_mcf_seq_show, 3020 }; 3021 3022 static int __net_init igmp_net_init(struct net *net) 3023 { 3024 struct proc_dir_entry *pde; 3025 int err; 3026 3027 pde = proc_create_net("igmp", 0444, net->proc_net, &igmp_mc_seq_ops, 3028 sizeof(struct igmp_mc_iter_state)); 3029 if (!pde) 3030 goto out_igmp; 3031 pde = proc_create_net("mcfilter", 0444, net->proc_net, 3032 &igmp_mcf_seq_ops, sizeof(struct igmp_mcf_iter_state)); 3033 if (!pde) 3034 goto out_mcfilter; 3035 err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET, 3036 SOCK_DGRAM, 0, net); 3037 if (err < 0) { 3038 pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n", 3039 err); 3040 goto out_sock; 3041 } 3042 3043 return 0; 3044 3045 out_sock: 3046 remove_proc_entry("mcfilter", net->proc_net); 3047 out_mcfilter: 3048 remove_proc_entry("igmp", net->proc_net); 3049 out_igmp: 3050 return -ENOMEM; 3051 } 3052 3053 static void __net_exit igmp_net_exit(struct net *net) 3054 { 3055 remove_proc_entry("mcfilter", net->proc_net); 3056 remove_proc_entry("igmp", net->proc_net); 3057 inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk); 3058 } 3059 3060 static struct pernet_operations igmp_net_ops = { 3061 .init = igmp_net_init, 3062 .exit = igmp_net_exit, 3063 }; 3064 #endif 3065 3066 static int igmp_netdev_event(struct notifier_block *this, 3067 unsigned long event, void *ptr) 3068 { 3069 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3070 struct in_device *in_dev; 3071 3072 switch (event) { 3073 case NETDEV_RESEND_IGMP: 3074 in_dev = __in_dev_get_rtnl(dev); 3075 if (in_dev) 3076 ip_mc_rejoin_groups(in_dev); 3077 break; 3078 default: 3079 break; 3080 } 3081 return NOTIFY_DONE; 3082 } 3083 3084 static struct notifier_block igmp_notifier = { 3085 .notifier_call = igmp_netdev_event, 3086 }; 3087 3088 int __init igmp_mc_init(void) 3089 { 3090 #if defined(CONFIG_PROC_FS) 3091 int err; 3092 3093 err = register_pernet_subsys(&igmp_net_ops); 3094 if (err) 3095 return err; 3096 err = register_netdevice_notifier(&igmp_notifier); 3097 if (err) 3098 goto reg_notif_fail; 3099 return 0; 3100 3101 reg_notif_fail: 3102 unregister_pernet_subsys(&igmp_net_ops); 3103 return err; 3104 #else 3105 return register_netdevice_notifier(&igmp_notifier); 3106 #endif 3107 } 3108