1 /* 2 * Multicast support for IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* Changes: 17 * 18 * yoshfuji : fix format of router-alert option 19 * YOSHIFUJI Hideaki @USAGI: 20 * Fixed source address for MLD message based on 21 * <draft-ietf-magma-mld-source-05.txt>. 22 * YOSHIFUJI Hideaki @USAGI: 23 * - Ignore Queries for invalid addresses. 24 * - MLD for link-local addresses. 25 * David L Stevens <dlstevens@us.ibm.com>: 26 * - MLDv2 support 27 */ 28 29 #include <linux/module.h> 30 #include <linux/errno.h> 31 #include <linux/types.h> 32 #include <linux/string.h> 33 #include <linux/socket.h> 34 #include <linux/sockios.h> 35 #include <linux/jiffies.h> 36 #include <linux/times.h> 37 #include <linux/net.h> 38 #include <linux/in.h> 39 #include <linux/in6.h> 40 #include <linux/netdevice.h> 41 #include <linux/if_arp.h> 42 #include <linux/route.h> 43 #include <linux/init.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <linux/slab.h> 47 #include <linux/pkt_sched.h> 48 #include <net/mld.h> 49 50 #include <linux/netfilter.h> 51 #include <linux/netfilter_ipv6.h> 52 53 #include <net/net_namespace.h> 54 #include <net/sock.h> 55 #include <net/snmp.h> 56 57 #include <net/ipv6.h> 58 #include <net/protocol.h> 59 #include <net/if_inet6.h> 60 #include <net/ndisc.h> 61 #include <net/addrconf.h> 62 #include <net/ip6_route.h> 63 #include <net/inet_common.h> 64 65 #include <net/ip6_checksum.h> 66 67 /* Ensure that we have struct in6_addr aligned on 32bit word. */ 68 static int __mld2_query_bugs[] __attribute__((__unused__)) = { 69 BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4), 70 BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4), 71 BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4) 72 }; 73 74 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; 75 76 static void igmp6_join_group(struct ifmcaddr6 *ma); 77 static void igmp6_leave_group(struct ifmcaddr6 *ma); 78 static void igmp6_timer_handler(struct timer_list *t); 79 80 static void mld_gq_timer_expire(struct timer_list *t); 81 static void mld_ifc_timer_expire(struct timer_list *t); 82 static void mld_ifc_event(struct inet6_dev *idev); 83 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 84 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 85 static void mld_clear_delrec(struct inet6_dev *idev); 86 static bool mld_in_v1_mode(const struct inet6_dev *idev); 87 static int sf_setstate(struct ifmcaddr6 *pmc); 88 static void sf_markstate(struct ifmcaddr6 *pmc); 89 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); 90 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, 91 int sfmode, int sfcount, const struct in6_addr *psfsrc, 92 int delta); 93 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, 94 int sfmode, int sfcount, const struct in6_addr *psfsrc, 95 int delta); 96 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 97 struct inet6_dev *idev); 98 static int __ipv6_dev_mc_inc(struct net_device *dev, 99 const struct in6_addr *addr, unsigned int mode); 100 101 #define MLD_QRV_DEFAULT 2 102 /* RFC3810, 9.2. Query Interval */ 103 #define MLD_QI_DEFAULT (125 * HZ) 104 /* RFC3810, 9.3. Query Response Interval */ 105 #define MLD_QRI_DEFAULT (10 * HZ) 106 107 /* RFC3810, 8.1 Query Version Distinctions */ 108 #define MLD_V1_QUERY_LEN 24 109 #define MLD_V2_QUERY_LEN_MIN 28 110 111 #define IPV6_MLD_MAX_MSF 64 112 113 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; 114 int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT; 115 116 /* 117 * socket join on multicast group 118 */ 119 120 #define for_each_pmc_rcu(np, pmc) \ 121 for (pmc = rcu_dereference(np->ipv6_mc_list); \ 122 pmc != NULL; \ 123 pmc = rcu_dereference(pmc->next)) 124 125 static int unsolicited_report_interval(struct inet6_dev *idev) 126 { 127 int iv; 128 129 if (mld_in_v1_mode(idev)) 130 iv = idev->cnf.mldv1_unsolicited_report_interval; 131 else 132 iv = idev->cnf.mldv2_unsolicited_report_interval; 133 134 return iv > 0 ? iv : 1; 135 } 136 137 static int __ipv6_sock_mc_join(struct sock *sk, int ifindex, 138 const struct in6_addr *addr, unsigned int mode) 139 { 140 struct net_device *dev = NULL; 141 struct ipv6_mc_socklist *mc_lst; 142 struct ipv6_pinfo *np = inet6_sk(sk); 143 struct net *net = sock_net(sk); 144 int err; 145 146 ASSERT_RTNL(); 147 148 if (!ipv6_addr_is_multicast(addr)) 149 return -EINVAL; 150 151 rcu_read_lock(); 152 for_each_pmc_rcu(np, mc_lst) { 153 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 154 ipv6_addr_equal(&mc_lst->addr, addr)) { 155 rcu_read_unlock(); 156 return -EADDRINUSE; 157 } 158 } 159 rcu_read_unlock(); 160 161 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL); 162 163 if (!mc_lst) 164 return -ENOMEM; 165 166 mc_lst->next = NULL; 167 mc_lst->addr = *addr; 168 169 if (ifindex == 0) { 170 struct rt6_info *rt; 171 rt = rt6_lookup(net, addr, NULL, 0, NULL, 0); 172 if (rt) { 173 dev = rt->dst.dev; 174 ip6_rt_put(rt); 175 } 176 } else 177 dev = __dev_get_by_index(net, ifindex); 178 179 if (!dev) { 180 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 181 return -ENODEV; 182 } 183 184 mc_lst->ifindex = dev->ifindex; 185 mc_lst->sfmode = mode; 186 rwlock_init(&mc_lst->sflock); 187 mc_lst->sflist = NULL; 188 189 /* 190 * now add/increase the group membership on the device 191 */ 192 193 err = __ipv6_dev_mc_inc(dev, addr, mode); 194 195 if (err) { 196 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 197 return err; 198 } 199 200 mc_lst->next = np->ipv6_mc_list; 201 rcu_assign_pointer(np->ipv6_mc_list, mc_lst); 202 203 return 0; 204 } 205 206 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) 207 { 208 return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE); 209 } 210 EXPORT_SYMBOL(ipv6_sock_mc_join); 211 212 int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, 213 const struct in6_addr *addr, unsigned int mode) 214 { 215 return __ipv6_sock_mc_join(sk, ifindex, addr, mode); 216 } 217 218 /* 219 * socket leave on multicast group 220 */ 221 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) 222 { 223 struct ipv6_pinfo *np = inet6_sk(sk); 224 struct ipv6_mc_socklist *mc_lst; 225 struct ipv6_mc_socklist __rcu **lnk; 226 struct net *net = sock_net(sk); 227 228 ASSERT_RTNL(); 229 230 if (!ipv6_addr_is_multicast(addr)) 231 return -EINVAL; 232 233 for (lnk = &np->ipv6_mc_list; 234 (mc_lst = rtnl_dereference(*lnk)) != NULL; 235 lnk = &mc_lst->next) { 236 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 237 ipv6_addr_equal(&mc_lst->addr, addr)) { 238 struct net_device *dev; 239 240 *lnk = mc_lst->next; 241 242 dev = __dev_get_by_index(net, mc_lst->ifindex); 243 if (dev) { 244 struct inet6_dev *idev = __in6_dev_get(dev); 245 246 (void) ip6_mc_leave_src(sk, mc_lst, idev); 247 if (idev) 248 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 249 } else 250 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 251 252 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 253 kfree_rcu(mc_lst, rcu); 254 return 0; 255 } 256 } 257 258 return -EADDRNOTAVAIL; 259 } 260 EXPORT_SYMBOL(ipv6_sock_mc_drop); 261 262 /* called with rcu_read_lock() */ 263 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, 264 const struct in6_addr *group, 265 int ifindex) 266 { 267 struct net_device *dev = NULL; 268 struct inet6_dev *idev = NULL; 269 270 if (ifindex == 0) { 271 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0); 272 273 if (rt) { 274 dev = rt->dst.dev; 275 ip6_rt_put(rt); 276 } 277 } else 278 dev = dev_get_by_index_rcu(net, ifindex); 279 280 if (!dev) 281 return NULL; 282 idev = __in6_dev_get(dev); 283 if (!idev) 284 return NULL; 285 read_lock_bh(&idev->lock); 286 if (idev->dead) { 287 read_unlock_bh(&idev->lock); 288 return NULL; 289 } 290 return idev; 291 } 292 293 void __ipv6_sock_mc_close(struct sock *sk) 294 { 295 struct ipv6_pinfo *np = inet6_sk(sk); 296 struct ipv6_mc_socklist *mc_lst; 297 struct net *net = sock_net(sk); 298 299 ASSERT_RTNL(); 300 301 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { 302 struct net_device *dev; 303 304 np->ipv6_mc_list = mc_lst->next; 305 306 dev = __dev_get_by_index(net, mc_lst->ifindex); 307 if (dev) { 308 struct inet6_dev *idev = __in6_dev_get(dev); 309 310 (void) ip6_mc_leave_src(sk, mc_lst, idev); 311 if (idev) 312 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 313 } else 314 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 315 316 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 317 kfree_rcu(mc_lst, rcu); 318 } 319 } 320 321 void ipv6_sock_mc_close(struct sock *sk) 322 { 323 struct ipv6_pinfo *np = inet6_sk(sk); 324 325 if (!rcu_access_pointer(np->ipv6_mc_list)) 326 return; 327 rtnl_lock(); 328 __ipv6_sock_mc_close(sk); 329 rtnl_unlock(); 330 } 331 332 int ip6_mc_source(int add, int omode, struct sock *sk, 333 struct group_source_req *pgsr) 334 { 335 struct in6_addr *source, *group; 336 struct ipv6_mc_socklist *pmc; 337 struct inet6_dev *idev; 338 struct ipv6_pinfo *inet6 = inet6_sk(sk); 339 struct ip6_sf_socklist *psl; 340 struct net *net = sock_net(sk); 341 int i, j, rv; 342 int leavegroup = 0; 343 int pmclocked = 0; 344 int err; 345 346 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr; 347 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr; 348 349 if (!ipv6_addr_is_multicast(group)) 350 return -EINVAL; 351 352 rcu_read_lock(); 353 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface); 354 if (!idev) { 355 rcu_read_unlock(); 356 return -ENODEV; 357 } 358 359 err = -EADDRNOTAVAIL; 360 361 for_each_pmc_rcu(inet6, pmc) { 362 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) 363 continue; 364 if (ipv6_addr_equal(&pmc->addr, group)) 365 break; 366 } 367 if (!pmc) { /* must have a prior join */ 368 err = -EINVAL; 369 goto done; 370 } 371 /* if a source filter was set, must be the same mode as before */ 372 if (pmc->sflist) { 373 if (pmc->sfmode != omode) { 374 err = -EINVAL; 375 goto done; 376 } 377 } else if (pmc->sfmode != omode) { 378 /* allow mode switches for empty-set filters */ 379 ip6_mc_add_src(idev, group, omode, 0, NULL, 0); 380 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 381 pmc->sfmode = omode; 382 } 383 384 write_lock(&pmc->sflock); 385 pmclocked = 1; 386 387 psl = pmc->sflist; 388 if (!add) { 389 if (!psl) 390 goto done; /* err = -EADDRNOTAVAIL */ 391 rv = !0; 392 for (i = 0; i < psl->sl_count; i++) { 393 rv = !ipv6_addr_equal(&psl->sl_addr[i], source); 394 if (rv == 0) 395 break; 396 } 397 if (rv) /* source not found */ 398 goto done; /* err = -EADDRNOTAVAIL */ 399 400 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 401 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { 402 leavegroup = 1; 403 goto done; 404 } 405 406 /* update the interface filter */ 407 ip6_mc_del_src(idev, group, omode, 1, source, 1); 408 409 for (j = i+1; j < psl->sl_count; j++) 410 psl->sl_addr[j-1] = psl->sl_addr[j]; 411 psl->sl_count--; 412 err = 0; 413 goto done; 414 } 415 /* else, add a new source to the filter */ 416 417 if (psl && psl->sl_count >= sysctl_mld_max_msf) { 418 err = -ENOBUFS; 419 goto done; 420 } 421 if (!psl || psl->sl_count == psl->sl_max) { 422 struct ip6_sf_socklist *newpsl; 423 int count = IP6_SFBLOCK; 424 425 if (psl) 426 count += psl->sl_max; 427 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC); 428 if (!newpsl) { 429 err = -ENOBUFS; 430 goto done; 431 } 432 newpsl->sl_max = count; 433 newpsl->sl_count = count - IP6_SFBLOCK; 434 if (psl) { 435 for (i = 0; i < psl->sl_count; i++) 436 newpsl->sl_addr[i] = psl->sl_addr[i]; 437 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); 438 } 439 pmc->sflist = psl = newpsl; 440 } 441 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 442 for (i = 0; i < psl->sl_count; i++) { 443 rv = !ipv6_addr_equal(&psl->sl_addr[i], source); 444 if (rv == 0) /* There is an error in the address. */ 445 goto done; 446 } 447 for (j = psl->sl_count-1; j >= i; j--) 448 psl->sl_addr[j+1] = psl->sl_addr[j]; 449 psl->sl_addr[i] = *source; 450 psl->sl_count++; 451 err = 0; 452 /* update the interface list */ 453 ip6_mc_add_src(idev, group, omode, 1, source, 1); 454 done: 455 if (pmclocked) 456 write_unlock(&pmc->sflock); 457 read_unlock_bh(&idev->lock); 458 rcu_read_unlock(); 459 if (leavegroup) 460 err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); 461 return err; 462 } 463 464 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) 465 { 466 const struct in6_addr *group; 467 struct ipv6_mc_socklist *pmc; 468 struct inet6_dev *idev; 469 struct ipv6_pinfo *inet6 = inet6_sk(sk); 470 struct ip6_sf_socklist *newpsl, *psl; 471 struct net *net = sock_net(sk); 472 int leavegroup = 0; 473 int i, err; 474 475 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 476 477 if (!ipv6_addr_is_multicast(group)) 478 return -EINVAL; 479 if (gsf->gf_fmode != MCAST_INCLUDE && 480 gsf->gf_fmode != MCAST_EXCLUDE) 481 return -EINVAL; 482 483 rcu_read_lock(); 484 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); 485 486 if (!idev) { 487 rcu_read_unlock(); 488 return -ENODEV; 489 } 490 491 err = 0; 492 493 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { 494 leavegroup = 1; 495 goto done; 496 } 497 498 for_each_pmc_rcu(inet6, pmc) { 499 if (pmc->ifindex != gsf->gf_interface) 500 continue; 501 if (ipv6_addr_equal(&pmc->addr, group)) 502 break; 503 } 504 if (!pmc) { /* must have a prior join */ 505 err = -EINVAL; 506 goto done; 507 } 508 if (gsf->gf_numsrc) { 509 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), 510 GFP_ATOMIC); 511 if (!newpsl) { 512 err = -ENOBUFS; 513 goto done; 514 } 515 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc; 516 for (i = 0; i < newpsl->sl_count; ++i) { 517 struct sockaddr_in6 *psin6; 518 519 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i]; 520 newpsl->sl_addr[i] = psin6->sin6_addr; 521 } 522 err = ip6_mc_add_src(idev, group, gsf->gf_fmode, 523 newpsl->sl_count, newpsl->sl_addr, 0); 524 if (err) { 525 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max)); 526 goto done; 527 } 528 } else { 529 newpsl = NULL; 530 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); 531 } 532 533 write_lock(&pmc->sflock); 534 psl = pmc->sflist; 535 if (psl) { 536 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 537 psl->sl_count, psl->sl_addr, 0); 538 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); 539 } else 540 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 541 pmc->sflist = newpsl; 542 pmc->sfmode = gsf->gf_fmode; 543 write_unlock(&pmc->sflock); 544 err = 0; 545 done: 546 read_unlock_bh(&idev->lock); 547 rcu_read_unlock(); 548 if (leavegroup) 549 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); 550 return err; 551 } 552 553 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, 554 struct group_filter __user *optval, int __user *optlen) 555 { 556 int err, i, count, copycount; 557 const struct in6_addr *group; 558 struct ipv6_mc_socklist *pmc; 559 struct inet6_dev *idev; 560 struct ipv6_pinfo *inet6 = inet6_sk(sk); 561 struct ip6_sf_socklist *psl; 562 struct net *net = sock_net(sk); 563 564 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 565 566 if (!ipv6_addr_is_multicast(group)) 567 return -EINVAL; 568 569 rcu_read_lock(); 570 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); 571 572 if (!idev) { 573 rcu_read_unlock(); 574 return -ENODEV; 575 } 576 577 err = -EADDRNOTAVAIL; 578 /* changes to the ipv6_mc_list require the socket lock and 579 * rtnl lock. We have the socket lock and rcu read lock, 580 * so reading the list is safe. 581 */ 582 583 for_each_pmc_rcu(inet6, pmc) { 584 if (pmc->ifindex != gsf->gf_interface) 585 continue; 586 if (ipv6_addr_equal(group, &pmc->addr)) 587 break; 588 } 589 if (!pmc) /* must have a prior join */ 590 goto done; 591 gsf->gf_fmode = pmc->sfmode; 592 psl = pmc->sflist; 593 count = psl ? psl->sl_count : 0; 594 read_unlock_bh(&idev->lock); 595 rcu_read_unlock(); 596 597 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 598 gsf->gf_numsrc = count; 599 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) || 600 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { 601 return -EFAULT; 602 } 603 /* changes to psl require the socket lock, and a write lock 604 * on pmc->sflock. We have the socket lock so reading here is safe. 605 */ 606 for (i = 0; i < copycount; i++) { 607 struct sockaddr_in6 *psin6; 608 struct sockaddr_storage ss; 609 610 psin6 = (struct sockaddr_in6 *)&ss; 611 memset(&ss, 0, sizeof(ss)); 612 psin6->sin6_family = AF_INET6; 613 psin6->sin6_addr = psl->sl_addr[i]; 614 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss))) 615 return -EFAULT; 616 } 617 return 0; 618 done: 619 read_unlock_bh(&idev->lock); 620 rcu_read_unlock(); 621 return err; 622 } 623 624 bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, 625 const struct in6_addr *src_addr) 626 { 627 struct ipv6_pinfo *np = inet6_sk(sk); 628 struct ipv6_mc_socklist *mc; 629 struct ip6_sf_socklist *psl; 630 bool rv = true; 631 632 rcu_read_lock(); 633 for_each_pmc_rcu(np, mc) { 634 if (ipv6_addr_equal(&mc->addr, mc_addr)) 635 break; 636 } 637 if (!mc) { 638 rcu_read_unlock(); 639 return np->mc_all; 640 } 641 read_lock(&mc->sflock); 642 psl = mc->sflist; 643 if (!psl) { 644 rv = mc->sfmode == MCAST_EXCLUDE; 645 } else { 646 int i; 647 648 for (i = 0; i < psl->sl_count; i++) { 649 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr)) 650 break; 651 } 652 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 653 rv = false; 654 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 655 rv = false; 656 } 657 read_unlock(&mc->sflock); 658 rcu_read_unlock(); 659 660 return rv; 661 } 662 663 static void igmp6_group_added(struct ifmcaddr6 *mc) 664 { 665 struct net_device *dev = mc->idev->dev; 666 char buf[MAX_ADDR_LEN]; 667 668 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < 669 IPV6_ADDR_SCOPE_LINKLOCAL) 670 return; 671 672 spin_lock_bh(&mc->mca_lock); 673 if (!(mc->mca_flags&MAF_LOADED)) { 674 mc->mca_flags |= MAF_LOADED; 675 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 676 dev_mc_add(dev, buf); 677 } 678 spin_unlock_bh(&mc->mca_lock); 679 680 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) 681 return; 682 683 if (mld_in_v1_mode(mc->idev)) { 684 igmp6_join_group(mc); 685 return; 686 } 687 /* else v2 */ 688 689 /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we 690 * should not send filter-mode change record as the mode 691 * should be from IN() to IN(A). 692 */ 693 if (mc->mca_sfmode == MCAST_EXCLUDE) 694 mc->mca_crcount = mc->idev->mc_qrv; 695 696 mld_ifc_event(mc->idev); 697 } 698 699 static void igmp6_group_dropped(struct ifmcaddr6 *mc) 700 { 701 struct net_device *dev = mc->idev->dev; 702 char buf[MAX_ADDR_LEN]; 703 704 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < 705 IPV6_ADDR_SCOPE_LINKLOCAL) 706 return; 707 708 spin_lock_bh(&mc->mca_lock); 709 if (mc->mca_flags&MAF_LOADED) { 710 mc->mca_flags &= ~MAF_LOADED; 711 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 712 dev_mc_del(dev, buf); 713 } 714 715 spin_unlock_bh(&mc->mca_lock); 716 if (mc->mca_flags & MAF_NOREPORT) 717 return; 718 719 if (!mc->idev->dead) 720 igmp6_leave_group(mc); 721 722 spin_lock_bh(&mc->mca_lock); 723 if (del_timer(&mc->mca_timer)) 724 refcount_dec(&mc->mca_refcnt); 725 spin_unlock_bh(&mc->mca_lock); 726 } 727 728 /* 729 * deleted ifmcaddr6 manipulation 730 */ 731 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) 732 { 733 struct ifmcaddr6 *pmc; 734 735 /* this is an "ifmcaddr6" for convenience; only the fields below 736 * are actually used. In particular, the refcnt and users are not 737 * used for management of the delete list. Using the same structure 738 * for deleted items allows change reports to use common code with 739 * non-deleted or query-response MCA's. 740 */ 741 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC); 742 if (!pmc) 743 return; 744 745 spin_lock_bh(&im->mca_lock); 746 spin_lock_init(&pmc->mca_lock); 747 pmc->idev = im->idev; 748 in6_dev_hold(idev); 749 pmc->mca_addr = im->mca_addr; 750 pmc->mca_crcount = idev->mc_qrv; 751 pmc->mca_sfmode = im->mca_sfmode; 752 if (pmc->mca_sfmode == MCAST_INCLUDE) { 753 struct ip6_sf_list *psf; 754 755 pmc->mca_tomb = im->mca_tomb; 756 pmc->mca_sources = im->mca_sources; 757 im->mca_tomb = im->mca_sources = NULL; 758 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 759 psf->sf_crcount = pmc->mca_crcount; 760 } 761 spin_unlock_bh(&im->mca_lock); 762 763 spin_lock_bh(&idev->mc_lock); 764 pmc->next = idev->mc_tomb; 765 idev->mc_tomb = pmc; 766 spin_unlock_bh(&idev->mc_lock); 767 } 768 769 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) 770 { 771 struct ifmcaddr6 *pmc, *pmc_prev; 772 struct ip6_sf_list *psf; 773 struct in6_addr *pmca = &im->mca_addr; 774 775 spin_lock_bh(&idev->mc_lock); 776 pmc_prev = NULL; 777 for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) { 778 if (ipv6_addr_equal(&pmc->mca_addr, pmca)) 779 break; 780 pmc_prev = pmc; 781 } 782 if (pmc) { 783 if (pmc_prev) 784 pmc_prev->next = pmc->next; 785 else 786 idev->mc_tomb = pmc->next; 787 } 788 spin_unlock_bh(&idev->mc_lock); 789 790 spin_lock_bh(&im->mca_lock); 791 if (pmc) { 792 im->idev = pmc->idev; 793 if (im->mca_sfmode == MCAST_INCLUDE) { 794 im->mca_tomb = pmc->mca_tomb; 795 im->mca_sources = pmc->mca_sources; 796 for (psf = im->mca_sources; psf; psf = psf->sf_next) 797 psf->sf_crcount = idev->mc_qrv; 798 } else { 799 im->mca_crcount = idev->mc_qrv; 800 } 801 in6_dev_put(pmc->idev); 802 kfree(pmc); 803 } 804 spin_unlock_bh(&im->mca_lock); 805 } 806 807 static void mld_clear_delrec(struct inet6_dev *idev) 808 { 809 struct ifmcaddr6 *pmc, *nextpmc; 810 811 spin_lock_bh(&idev->mc_lock); 812 pmc = idev->mc_tomb; 813 idev->mc_tomb = NULL; 814 spin_unlock_bh(&idev->mc_lock); 815 816 for (; pmc; pmc = nextpmc) { 817 nextpmc = pmc->next; 818 ip6_mc_clear_src(pmc); 819 in6_dev_put(pmc->idev); 820 kfree(pmc); 821 } 822 823 /* clear dead sources, too */ 824 read_lock_bh(&idev->lock); 825 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 826 struct ip6_sf_list *psf, *psf_next; 827 828 spin_lock_bh(&pmc->mca_lock); 829 psf = pmc->mca_tomb; 830 pmc->mca_tomb = NULL; 831 spin_unlock_bh(&pmc->mca_lock); 832 for (; psf; psf = psf_next) { 833 psf_next = psf->sf_next; 834 kfree(psf); 835 } 836 } 837 read_unlock_bh(&idev->lock); 838 } 839 840 static void mca_get(struct ifmcaddr6 *mc) 841 { 842 refcount_inc(&mc->mca_refcnt); 843 } 844 845 static void ma_put(struct ifmcaddr6 *mc) 846 { 847 if (refcount_dec_and_test(&mc->mca_refcnt)) { 848 in6_dev_put(mc->idev); 849 kfree(mc); 850 } 851 } 852 853 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, 854 const struct in6_addr *addr, 855 unsigned int mode) 856 { 857 struct ifmcaddr6 *mc; 858 859 mc = kzalloc(sizeof(*mc), GFP_ATOMIC); 860 if (!mc) 861 return NULL; 862 863 timer_setup(&mc->mca_timer, igmp6_timer_handler, 0); 864 865 mc->mca_addr = *addr; 866 mc->idev = idev; /* reference taken by caller */ 867 mc->mca_users = 1; 868 /* mca_stamp should be updated upon changes */ 869 mc->mca_cstamp = mc->mca_tstamp = jiffies; 870 refcount_set(&mc->mca_refcnt, 1); 871 spin_lock_init(&mc->mca_lock); 872 873 mc->mca_sfmode = mode; 874 mc->mca_sfcount[mode] = 1; 875 876 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || 877 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) 878 mc->mca_flags |= MAF_NOREPORT; 879 880 return mc; 881 } 882 883 /* 884 * device multicast group inc (add if not found) 885 */ 886 static int __ipv6_dev_mc_inc(struct net_device *dev, 887 const struct in6_addr *addr, unsigned int mode) 888 { 889 struct ifmcaddr6 *mc; 890 struct inet6_dev *idev; 891 892 ASSERT_RTNL(); 893 894 /* we need to take a reference on idev */ 895 idev = in6_dev_get(dev); 896 897 if (!idev) 898 return -EINVAL; 899 900 write_lock_bh(&idev->lock); 901 if (idev->dead) { 902 write_unlock_bh(&idev->lock); 903 in6_dev_put(idev); 904 return -ENODEV; 905 } 906 907 for (mc = idev->mc_list; mc; mc = mc->next) { 908 if (ipv6_addr_equal(&mc->mca_addr, addr)) { 909 mc->mca_users++; 910 write_unlock_bh(&idev->lock); 911 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0); 912 in6_dev_put(idev); 913 return 0; 914 } 915 } 916 917 mc = mca_alloc(idev, addr, mode); 918 if (!mc) { 919 write_unlock_bh(&idev->lock); 920 in6_dev_put(idev); 921 return -ENOMEM; 922 } 923 924 mc->next = idev->mc_list; 925 idev->mc_list = mc; 926 927 /* Hold this for the code below before we unlock, 928 * it is already exposed via idev->mc_list. 929 */ 930 mca_get(mc); 931 write_unlock_bh(&idev->lock); 932 933 mld_del_delrec(idev, mc); 934 igmp6_group_added(mc); 935 ma_put(mc); 936 return 0; 937 } 938 939 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) 940 { 941 return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE); 942 } 943 EXPORT_SYMBOL(ipv6_dev_mc_inc); 944 945 /* 946 * device multicast group del 947 */ 948 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) 949 { 950 struct ifmcaddr6 *ma, **map; 951 952 ASSERT_RTNL(); 953 954 write_lock_bh(&idev->lock); 955 for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) { 956 if (ipv6_addr_equal(&ma->mca_addr, addr)) { 957 if (--ma->mca_users == 0) { 958 *map = ma->next; 959 write_unlock_bh(&idev->lock); 960 961 igmp6_group_dropped(ma); 962 ip6_mc_clear_src(ma); 963 964 ma_put(ma); 965 return 0; 966 } 967 write_unlock_bh(&idev->lock); 968 return 0; 969 } 970 } 971 write_unlock_bh(&idev->lock); 972 973 return -ENOENT; 974 } 975 976 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) 977 { 978 struct inet6_dev *idev; 979 int err; 980 981 ASSERT_RTNL(); 982 983 idev = __in6_dev_get(dev); 984 if (!idev) 985 err = -ENODEV; 986 else 987 err = __ipv6_dev_mc_dec(idev, addr); 988 989 return err; 990 } 991 EXPORT_SYMBOL(ipv6_dev_mc_dec); 992 993 /* 994 * check if the interface/address pair is valid 995 */ 996 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, 997 const struct in6_addr *src_addr) 998 { 999 struct inet6_dev *idev; 1000 struct ifmcaddr6 *mc; 1001 bool rv = false; 1002 1003 rcu_read_lock(); 1004 idev = __in6_dev_get(dev); 1005 if (idev) { 1006 read_lock_bh(&idev->lock); 1007 for (mc = idev->mc_list; mc; mc = mc->next) { 1008 if (ipv6_addr_equal(&mc->mca_addr, group)) 1009 break; 1010 } 1011 if (mc) { 1012 if (src_addr && !ipv6_addr_any(src_addr)) { 1013 struct ip6_sf_list *psf; 1014 1015 spin_lock_bh(&mc->mca_lock); 1016 for (psf = mc->mca_sources; psf; psf = psf->sf_next) { 1017 if (ipv6_addr_equal(&psf->sf_addr, src_addr)) 1018 break; 1019 } 1020 if (psf) 1021 rv = psf->sf_count[MCAST_INCLUDE] || 1022 psf->sf_count[MCAST_EXCLUDE] != 1023 mc->mca_sfcount[MCAST_EXCLUDE]; 1024 else 1025 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0; 1026 spin_unlock_bh(&mc->mca_lock); 1027 } else 1028 rv = true; /* don't filter unspecified source */ 1029 } 1030 read_unlock_bh(&idev->lock); 1031 } 1032 rcu_read_unlock(); 1033 return rv; 1034 } 1035 1036 static void mld_gq_start_timer(struct inet6_dev *idev) 1037 { 1038 unsigned long tv = prandom_u32() % idev->mc_maxdelay; 1039 1040 idev->mc_gq_running = 1; 1041 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2)) 1042 in6_dev_hold(idev); 1043 } 1044 1045 static void mld_gq_stop_timer(struct inet6_dev *idev) 1046 { 1047 idev->mc_gq_running = 0; 1048 if (del_timer(&idev->mc_gq_timer)) 1049 __in6_dev_put(idev); 1050 } 1051 1052 static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay) 1053 { 1054 unsigned long tv = prandom_u32() % delay; 1055 1056 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2)) 1057 in6_dev_hold(idev); 1058 } 1059 1060 static void mld_ifc_stop_timer(struct inet6_dev *idev) 1061 { 1062 idev->mc_ifc_count = 0; 1063 if (del_timer(&idev->mc_ifc_timer)) 1064 __in6_dev_put(idev); 1065 } 1066 1067 static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay) 1068 { 1069 unsigned long tv = prandom_u32() % delay; 1070 1071 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2)) 1072 in6_dev_hold(idev); 1073 } 1074 1075 static void mld_dad_stop_timer(struct inet6_dev *idev) 1076 { 1077 if (del_timer(&idev->mc_dad_timer)) 1078 __in6_dev_put(idev); 1079 } 1080 1081 /* 1082 * IGMP handling (alias multicast ICMPv6 messages) 1083 */ 1084 1085 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) 1086 { 1087 unsigned long delay = resptime; 1088 1089 /* Do not start timer for these addresses */ 1090 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) || 1091 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1092 return; 1093 1094 if (del_timer(&ma->mca_timer)) { 1095 refcount_dec(&ma->mca_refcnt); 1096 delay = ma->mca_timer.expires - jiffies; 1097 } 1098 1099 if (delay >= resptime) 1100 delay = prandom_u32() % resptime; 1101 1102 ma->mca_timer.expires = jiffies + delay; 1103 if (!mod_timer(&ma->mca_timer, jiffies + delay)) 1104 refcount_inc(&ma->mca_refcnt); 1105 ma->mca_flags |= MAF_TIMER_RUNNING; 1106 } 1107 1108 /* mark EXCLUDE-mode sources */ 1109 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1110 const struct in6_addr *srcs) 1111 { 1112 struct ip6_sf_list *psf; 1113 int i, scount; 1114 1115 scount = 0; 1116 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 1117 if (scount == nsrcs) 1118 break; 1119 for (i = 0; i < nsrcs; i++) { 1120 /* skip inactive filters */ 1121 if (psf->sf_count[MCAST_INCLUDE] || 1122 pmc->mca_sfcount[MCAST_EXCLUDE] != 1123 psf->sf_count[MCAST_EXCLUDE]) 1124 break; 1125 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1126 scount++; 1127 break; 1128 } 1129 } 1130 } 1131 pmc->mca_flags &= ~MAF_GSQUERY; 1132 if (scount == nsrcs) /* all sources excluded */ 1133 return false; 1134 return true; 1135 } 1136 1137 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1138 const struct in6_addr *srcs) 1139 { 1140 struct ip6_sf_list *psf; 1141 int i, scount; 1142 1143 if (pmc->mca_sfmode == MCAST_EXCLUDE) 1144 return mld_xmarksources(pmc, nsrcs, srcs); 1145 1146 /* mark INCLUDE-mode sources */ 1147 1148 scount = 0; 1149 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 1150 if (scount == nsrcs) 1151 break; 1152 for (i = 0; i < nsrcs; i++) { 1153 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1154 psf->sf_gsresp = 1; 1155 scount++; 1156 break; 1157 } 1158 } 1159 } 1160 if (!scount) { 1161 pmc->mca_flags &= ~MAF_GSQUERY; 1162 return false; 1163 } 1164 pmc->mca_flags |= MAF_GSQUERY; 1165 return true; 1166 } 1167 1168 static int mld_force_mld_version(const struct inet6_dev *idev) 1169 { 1170 /* Normally, both are 0 here. If enforcement to a particular is 1171 * being used, individual device enforcement will have a lower 1172 * precedence over 'all' device (.../conf/all/force_mld_version). 1173 */ 1174 1175 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0) 1176 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version; 1177 else 1178 return idev->cnf.force_mld_version; 1179 } 1180 1181 static bool mld_in_v2_mode_only(const struct inet6_dev *idev) 1182 { 1183 return mld_force_mld_version(idev) == 2; 1184 } 1185 1186 static bool mld_in_v1_mode_only(const struct inet6_dev *idev) 1187 { 1188 return mld_force_mld_version(idev) == 1; 1189 } 1190 1191 static bool mld_in_v1_mode(const struct inet6_dev *idev) 1192 { 1193 if (mld_in_v2_mode_only(idev)) 1194 return false; 1195 if (mld_in_v1_mode_only(idev)) 1196 return true; 1197 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen)) 1198 return true; 1199 1200 return false; 1201 } 1202 1203 static void mld_set_v1_mode(struct inet6_dev *idev) 1204 { 1205 /* RFC3810, relevant sections: 1206 * - 9.1. Robustness Variable 1207 * - 9.2. Query Interval 1208 * - 9.3. Query Response Interval 1209 * - 9.12. Older Version Querier Present Timeout 1210 */ 1211 unsigned long switchback; 1212 1213 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri; 1214 1215 idev->mc_v1_seen = jiffies + switchback; 1216 } 1217 1218 static void mld_update_qrv(struct inet6_dev *idev, 1219 const struct mld2_query *mlh2) 1220 { 1221 /* RFC3810, relevant sections: 1222 * - 5.1.8. QRV (Querier's Robustness Variable) 1223 * - 9.1. Robustness Variable 1224 */ 1225 1226 /* The value of the Robustness Variable MUST NOT be zero, 1227 * and SHOULD NOT be one. Catch this here if we ever run 1228 * into such a case in future. 1229 */ 1230 const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv); 1231 WARN_ON(idev->mc_qrv == 0); 1232 1233 if (mlh2->mld2q_qrv > 0) 1234 idev->mc_qrv = mlh2->mld2q_qrv; 1235 1236 if (unlikely(idev->mc_qrv < min_qrv)) { 1237 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n", 1238 idev->mc_qrv, min_qrv); 1239 idev->mc_qrv = min_qrv; 1240 } 1241 } 1242 1243 static void mld_update_qi(struct inet6_dev *idev, 1244 const struct mld2_query *mlh2) 1245 { 1246 /* RFC3810, relevant sections: 1247 * - 5.1.9. QQIC (Querier's Query Interval Code) 1248 * - 9.2. Query Interval 1249 * - 9.12. Older Version Querier Present Timeout 1250 * (the [Query Interval] in the last Query received) 1251 */ 1252 unsigned long mc_qqi; 1253 1254 if (mlh2->mld2q_qqic < 128) { 1255 mc_qqi = mlh2->mld2q_qqic; 1256 } else { 1257 unsigned long mc_man, mc_exp; 1258 1259 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic); 1260 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic); 1261 1262 mc_qqi = (mc_man | 0x10) << (mc_exp + 3); 1263 } 1264 1265 idev->mc_qi = mc_qqi * HZ; 1266 } 1267 1268 static void mld_update_qri(struct inet6_dev *idev, 1269 const struct mld2_query *mlh2) 1270 { 1271 /* RFC3810, relevant sections: 1272 * - 5.1.3. Maximum Response Code 1273 * - 9.3. Query Response Interval 1274 */ 1275 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2)); 1276 } 1277 1278 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, 1279 unsigned long *max_delay, bool v1_query) 1280 { 1281 unsigned long mldv1_md; 1282 1283 /* Ignore v1 queries */ 1284 if (mld_in_v2_mode_only(idev)) 1285 return -EINVAL; 1286 1287 mldv1_md = ntohs(mld->mld_maxdelay); 1288 1289 /* When in MLDv1 fallback and a MLDv2 router start-up being 1290 * unaware of current MLDv1 operation, the MRC == MRD mapping 1291 * only works when the exponential algorithm is not being 1292 * used (as MLDv1 is unaware of such things). 1293 * 1294 * According to the RFC author, the MLDv2 implementations 1295 * he's aware of all use a MRC < 32768 on start up queries. 1296 * 1297 * Thus, should we *ever* encounter something else larger 1298 * than that, just assume the maximum possible within our 1299 * reach. 1300 */ 1301 if (!v1_query) 1302 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT); 1303 1304 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); 1305 1306 /* MLDv1 router present: we need to go into v1 mode *only* 1307 * when an MLDv1 query is received as per section 9.12. of 1308 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1 1309 * queries MUST be of exactly 24 octets. 1310 */ 1311 if (v1_query) 1312 mld_set_v1_mode(idev); 1313 1314 /* cancel MLDv2 report timer */ 1315 mld_gq_stop_timer(idev); 1316 /* cancel the interface change timer */ 1317 mld_ifc_stop_timer(idev); 1318 /* clear deleted report items */ 1319 mld_clear_delrec(idev); 1320 1321 return 0; 1322 } 1323 1324 static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, 1325 unsigned long *max_delay) 1326 { 1327 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL); 1328 1329 mld_update_qrv(idev, mld); 1330 mld_update_qi(idev, mld); 1331 mld_update_qri(idev, mld); 1332 1333 idev->mc_maxdelay = *max_delay; 1334 1335 return 0; 1336 } 1337 1338 /* called with rcu_read_lock() */ 1339 int igmp6_event_query(struct sk_buff *skb) 1340 { 1341 struct mld2_query *mlh2 = NULL; 1342 struct ifmcaddr6 *ma; 1343 const struct in6_addr *group; 1344 unsigned long max_delay; 1345 struct inet6_dev *idev; 1346 struct mld_msg *mld; 1347 int group_type; 1348 int mark = 0; 1349 int len, err; 1350 1351 if (!pskb_may_pull(skb, sizeof(struct in6_addr))) 1352 return -EINVAL; 1353 1354 /* compute payload length excluding extension headers */ 1355 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1356 len -= skb_network_header_len(skb); 1357 1358 /* RFC3810 6.2 1359 * Upon reception of an MLD message that contains a Query, the node 1360 * checks if the source address of the message is a valid link-local 1361 * address, if the Hop Limit is set to 1, and if the Router Alert 1362 * option is present in the Hop-By-Hop Options header of the IPv6 1363 * packet. If any of these checks fails, the packet is dropped. 1364 */ 1365 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) || 1366 ipv6_hdr(skb)->hop_limit != 1 || 1367 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) || 1368 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD)) 1369 return -EINVAL; 1370 1371 idev = __in6_dev_get(skb->dev); 1372 if (!idev) 1373 return 0; 1374 1375 mld = (struct mld_msg *)icmp6_hdr(skb); 1376 group = &mld->mld_mca; 1377 group_type = ipv6_addr_type(group); 1378 1379 if (group_type != IPV6_ADDR_ANY && 1380 !(group_type&IPV6_ADDR_MULTICAST)) 1381 return -EINVAL; 1382 1383 if (len < MLD_V1_QUERY_LEN) { 1384 return -EINVAL; 1385 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) { 1386 err = mld_process_v1(idev, mld, &max_delay, 1387 len == MLD_V1_QUERY_LEN); 1388 if (err < 0) 1389 return err; 1390 } else if (len >= MLD_V2_QUERY_LEN_MIN) { 1391 int srcs_offset = sizeof(struct mld2_query) - 1392 sizeof(struct icmp6hdr); 1393 1394 if (!pskb_may_pull(skb, srcs_offset)) 1395 return -EINVAL; 1396 1397 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1398 1399 err = mld_process_v2(idev, mlh2, &max_delay); 1400 if (err < 0) 1401 return err; 1402 1403 if (group_type == IPV6_ADDR_ANY) { /* general query */ 1404 if (mlh2->mld2q_nsrcs) 1405 return -EINVAL; /* no sources allowed */ 1406 1407 mld_gq_start_timer(idev); 1408 return 0; 1409 } 1410 /* mark sources to include, if group & source-specific */ 1411 if (mlh2->mld2q_nsrcs != 0) { 1412 if (!pskb_may_pull(skb, srcs_offset + 1413 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) 1414 return -EINVAL; 1415 1416 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1417 mark = 1; 1418 } 1419 } else { 1420 return -EINVAL; 1421 } 1422 1423 read_lock_bh(&idev->lock); 1424 if (group_type == IPV6_ADDR_ANY) { 1425 for (ma = idev->mc_list; ma; ma = ma->next) { 1426 spin_lock_bh(&ma->mca_lock); 1427 igmp6_group_queried(ma, max_delay); 1428 spin_unlock_bh(&ma->mca_lock); 1429 } 1430 } else { 1431 for (ma = idev->mc_list; ma; ma = ma->next) { 1432 if (!ipv6_addr_equal(group, &ma->mca_addr)) 1433 continue; 1434 spin_lock_bh(&ma->mca_lock); 1435 if (ma->mca_flags & MAF_TIMER_RUNNING) { 1436 /* gsquery <- gsquery && mark */ 1437 if (!mark) 1438 ma->mca_flags &= ~MAF_GSQUERY; 1439 } else { 1440 /* gsquery <- mark */ 1441 if (mark) 1442 ma->mca_flags |= MAF_GSQUERY; 1443 else 1444 ma->mca_flags &= ~MAF_GSQUERY; 1445 } 1446 if (!(ma->mca_flags & MAF_GSQUERY) || 1447 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) 1448 igmp6_group_queried(ma, max_delay); 1449 spin_unlock_bh(&ma->mca_lock); 1450 break; 1451 } 1452 } 1453 read_unlock_bh(&idev->lock); 1454 1455 return 0; 1456 } 1457 1458 /* called with rcu_read_lock() */ 1459 int igmp6_event_report(struct sk_buff *skb) 1460 { 1461 struct ifmcaddr6 *ma; 1462 struct inet6_dev *idev; 1463 struct mld_msg *mld; 1464 int addr_type; 1465 1466 /* Our own report looped back. Ignore it. */ 1467 if (skb->pkt_type == PACKET_LOOPBACK) 1468 return 0; 1469 1470 /* send our report if the MC router may not have heard this report */ 1471 if (skb->pkt_type != PACKET_MULTICAST && 1472 skb->pkt_type != PACKET_BROADCAST) 1473 return 0; 1474 1475 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) 1476 return -EINVAL; 1477 1478 mld = (struct mld_msg *)icmp6_hdr(skb); 1479 1480 /* Drop reports with not link local source */ 1481 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); 1482 if (addr_type != IPV6_ADDR_ANY && 1483 !(addr_type&IPV6_ADDR_LINKLOCAL)) 1484 return -EINVAL; 1485 1486 idev = __in6_dev_get(skb->dev); 1487 if (!idev) 1488 return -ENODEV; 1489 1490 /* 1491 * Cancel the timer for this group 1492 */ 1493 1494 read_lock_bh(&idev->lock); 1495 for (ma = idev->mc_list; ma; ma = ma->next) { 1496 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { 1497 spin_lock(&ma->mca_lock); 1498 if (del_timer(&ma->mca_timer)) 1499 refcount_dec(&ma->mca_refcnt); 1500 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING); 1501 spin_unlock(&ma->mca_lock); 1502 break; 1503 } 1504 } 1505 read_unlock_bh(&idev->lock); 1506 return 0; 1507 } 1508 1509 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, 1510 int gdeleted, int sdeleted) 1511 { 1512 switch (type) { 1513 case MLD2_MODE_IS_INCLUDE: 1514 case MLD2_MODE_IS_EXCLUDE: 1515 if (gdeleted || sdeleted) 1516 return false; 1517 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { 1518 if (pmc->mca_sfmode == MCAST_INCLUDE) 1519 return true; 1520 /* don't include if this source is excluded 1521 * in all filters 1522 */ 1523 if (psf->sf_count[MCAST_INCLUDE]) 1524 return type == MLD2_MODE_IS_INCLUDE; 1525 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1526 psf->sf_count[MCAST_EXCLUDE]; 1527 } 1528 return false; 1529 case MLD2_CHANGE_TO_INCLUDE: 1530 if (gdeleted || sdeleted) 1531 return false; 1532 return psf->sf_count[MCAST_INCLUDE] != 0; 1533 case MLD2_CHANGE_TO_EXCLUDE: 1534 if (gdeleted || sdeleted) 1535 return false; 1536 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || 1537 psf->sf_count[MCAST_INCLUDE]) 1538 return false; 1539 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1540 psf->sf_count[MCAST_EXCLUDE]; 1541 case MLD2_ALLOW_NEW_SOURCES: 1542 if (gdeleted || !psf->sf_crcount) 1543 return false; 1544 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; 1545 case MLD2_BLOCK_OLD_SOURCES: 1546 if (pmc->mca_sfmode == MCAST_INCLUDE) 1547 return gdeleted || (psf->sf_crcount && sdeleted); 1548 return psf->sf_crcount && !gdeleted && !sdeleted; 1549 } 1550 return false; 1551 } 1552 1553 static int 1554 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) 1555 { 1556 struct ip6_sf_list *psf; 1557 int scount = 0; 1558 1559 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 1560 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) 1561 continue; 1562 scount++; 1563 } 1564 return scount; 1565 } 1566 1567 static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, 1568 struct net_device *dev, 1569 const struct in6_addr *saddr, 1570 const struct in6_addr *daddr, 1571 int proto, int len) 1572 { 1573 struct ipv6hdr *hdr; 1574 1575 skb->protocol = htons(ETH_P_IPV6); 1576 skb->dev = dev; 1577 1578 skb_reset_network_header(skb); 1579 skb_put(skb, sizeof(struct ipv6hdr)); 1580 hdr = ipv6_hdr(skb); 1581 1582 ip6_flow_hdr(hdr, 0, 0); 1583 1584 hdr->payload_len = htons(len); 1585 hdr->nexthdr = proto; 1586 hdr->hop_limit = inet6_sk(sk)->hop_limit; 1587 1588 hdr->saddr = *saddr; 1589 hdr->daddr = *daddr; 1590 } 1591 1592 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) 1593 { 1594 struct net_device *dev = idev->dev; 1595 struct net *net = dev_net(dev); 1596 struct sock *sk = net->ipv6.igmp_sk; 1597 struct sk_buff *skb; 1598 struct mld2_report *pmr; 1599 struct in6_addr addr_buf; 1600 const struct in6_addr *saddr; 1601 int hlen = LL_RESERVED_SPACE(dev); 1602 int tlen = dev->needed_tailroom; 1603 unsigned int size = mtu + hlen + tlen; 1604 int err; 1605 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1606 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1607 IPV6_TLV_PADN, 0 }; 1608 1609 /* we assume size > sizeof(ra) here */ 1610 /* limit our allocations to order-0 page */ 1611 size = min_t(int, size, SKB_MAX_ORDER(0, 0)); 1612 skb = sock_alloc_send_skb(sk, size, 1, &err); 1613 1614 if (!skb) 1615 return NULL; 1616 1617 skb->priority = TC_PRIO_CONTROL; 1618 skb_reserve(skb, hlen); 1619 skb_tailroom_reserve(skb, mtu, tlen); 1620 1621 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { 1622 /* <draft-ietf-magma-mld-source-05.txt>: 1623 * use unspecified address as the source address 1624 * when a valid link-local address is not available. 1625 */ 1626 saddr = &in6addr_any; 1627 } else 1628 saddr = &addr_buf; 1629 1630 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0); 1631 1632 skb_put_data(skb, ra, sizeof(ra)); 1633 1634 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); 1635 skb_put(skb, sizeof(*pmr)); 1636 pmr = (struct mld2_report *)skb_transport_header(skb); 1637 pmr->mld2r_type = ICMPV6_MLD2_REPORT; 1638 pmr->mld2r_resv1 = 0; 1639 pmr->mld2r_cksum = 0; 1640 pmr->mld2r_resv2 = 0; 1641 pmr->mld2r_ngrec = 0; 1642 return skb; 1643 } 1644 1645 static void mld_sendpack(struct sk_buff *skb) 1646 { 1647 struct ipv6hdr *pip6 = ipv6_hdr(skb); 1648 struct mld2_report *pmr = 1649 (struct mld2_report *)skb_transport_header(skb); 1650 int payload_len, mldlen; 1651 struct inet6_dev *idev; 1652 struct net *net = dev_net(skb->dev); 1653 int err; 1654 struct flowi6 fl6; 1655 struct dst_entry *dst; 1656 1657 rcu_read_lock(); 1658 idev = __in6_dev_get(skb->dev); 1659 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 1660 1661 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) - 1662 sizeof(*pip6); 1663 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb); 1664 pip6->payload_len = htons(payload_len); 1665 1666 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, 1667 IPPROTO_ICMPV6, 1668 csum_partial(skb_transport_header(skb), 1669 mldlen, 0)); 1670 1671 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT, 1672 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1673 skb->dev->ifindex); 1674 dst = icmp6_dst_alloc(skb->dev, &fl6); 1675 1676 err = 0; 1677 if (IS_ERR(dst)) { 1678 err = PTR_ERR(dst); 1679 dst = NULL; 1680 } 1681 skb_dst_set(skb, dst); 1682 if (err) 1683 goto err_out; 1684 1685 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, 1686 net, net->ipv6.igmp_sk, skb, NULL, skb->dev, 1687 dst_output); 1688 out: 1689 if (!err) { 1690 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); 1691 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1692 } else { 1693 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1694 } 1695 1696 rcu_read_unlock(); 1697 return; 1698 1699 err_out: 1700 kfree_skb(skb); 1701 goto out; 1702 } 1703 1704 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1705 { 1706 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); 1707 } 1708 1709 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1710 int type, struct mld2_grec **ppgr, unsigned int mtu) 1711 { 1712 struct mld2_report *pmr; 1713 struct mld2_grec *pgr; 1714 1715 if (!skb) { 1716 skb = mld_newpack(pmc->idev, mtu); 1717 if (!skb) 1718 return NULL; 1719 } 1720 pgr = skb_put(skb, sizeof(struct mld2_grec)); 1721 pgr->grec_type = type; 1722 pgr->grec_auxwords = 0; 1723 pgr->grec_nsrcs = 0; 1724 pgr->grec_mca = pmc->mca_addr; /* structure copy */ 1725 pmr = (struct mld2_report *)skb_transport_header(skb); 1726 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1); 1727 *ppgr = pgr; 1728 return skb; 1729 } 1730 1731 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) 1732 1733 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1734 int type, int gdeleted, int sdeleted, int crsend) 1735 { 1736 struct inet6_dev *idev = pmc->idev; 1737 struct net_device *dev = idev->dev; 1738 struct mld2_report *pmr; 1739 struct mld2_grec *pgr = NULL; 1740 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; 1741 int scount, stotal, first, isquery, truncate; 1742 unsigned int mtu; 1743 1744 if (pmc->mca_flags & MAF_NOREPORT) 1745 return skb; 1746 1747 mtu = READ_ONCE(dev->mtu); 1748 if (mtu < IPV6_MIN_MTU) 1749 return skb; 1750 1751 isquery = type == MLD2_MODE_IS_INCLUDE || 1752 type == MLD2_MODE_IS_EXCLUDE; 1753 truncate = type == MLD2_MODE_IS_EXCLUDE || 1754 type == MLD2_CHANGE_TO_EXCLUDE; 1755 1756 stotal = scount = 0; 1757 1758 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources; 1759 1760 if (!*psf_list) 1761 goto empty_source; 1762 1763 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL; 1764 1765 /* EX and TO_EX get a fresh packet, if needed */ 1766 if (truncate) { 1767 if (pmr && pmr->mld2r_ngrec && 1768 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 1769 if (skb) 1770 mld_sendpack(skb); 1771 skb = mld_newpack(idev, mtu); 1772 } 1773 } 1774 first = 1; 1775 psf_prev = NULL; 1776 for (psf = *psf_list; psf; psf = psf_next) { 1777 struct in6_addr *psrc; 1778 1779 psf_next = psf->sf_next; 1780 1781 if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) { 1782 psf_prev = psf; 1783 continue; 1784 } 1785 1786 /* Based on RFC3810 6.1. Should not send source-list change 1787 * records when there is a filter mode change. 1788 */ 1789 if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) || 1790 (!gdeleted && pmc->mca_crcount)) && 1791 (type == MLD2_ALLOW_NEW_SOURCES || 1792 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) 1793 goto decrease_sf_crcount; 1794 1795 /* clear marks on query responses */ 1796 if (isquery) 1797 psf->sf_gsresp = 0; 1798 1799 if (AVAILABLE(skb) < sizeof(*psrc) + 1800 first*sizeof(struct mld2_grec)) { 1801 if (truncate && !first) 1802 break; /* truncate these */ 1803 if (pgr) 1804 pgr->grec_nsrcs = htons(scount); 1805 if (skb) 1806 mld_sendpack(skb); 1807 skb = mld_newpack(idev, mtu); 1808 first = 1; 1809 scount = 0; 1810 } 1811 if (first) { 1812 skb = add_grhead(skb, pmc, type, &pgr, mtu); 1813 first = 0; 1814 } 1815 if (!skb) 1816 return NULL; 1817 psrc = skb_put(skb, sizeof(*psrc)); 1818 *psrc = psf->sf_addr; 1819 scount++; stotal++; 1820 if ((type == MLD2_ALLOW_NEW_SOURCES || 1821 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 1822 decrease_sf_crcount: 1823 psf->sf_crcount--; 1824 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { 1825 if (psf_prev) 1826 psf_prev->sf_next = psf->sf_next; 1827 else 1828 *psf_list = psf->sf_next; 1829 kfree(psf); 1830 continue; 1831 } 1832 } 1833 psf_prev = psf; 1834 } 1835 1836 empty_source: 1837 if (!stotal) { 1838 if (type == MLD2_ALLOW_NEW_SOURCES || 1839 type == MLD2_BLOCK_OLD_SOURCES) 1840 return skb; 1841 if (pmc->mca_crcount || isquery || crsend) { 1842 /* make sure we have room for group header */ 1843 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) { 1844 mld_sendpack(skb); 1845 skb = NULL; /* add_grhead will get a new one */ 1846 } 1847 skb = add_grhead(skb, pmc, type, &pgr, mtu); 1848 } 1849 } 1850 if (pgr) 1851 pgr->grec_nsrcs = htons(scount); 1852 1853 if (isquery) 1854 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */ 1855 return skb; 1856 } 1857 1858 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) 1859 { 1860 struct sk_buff *skb = NULL; 1861 int type; 1862 1863 read_lock_bh(&idev->lock); 1864 if (!pmc) { 1865 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 1866 if (pmc->mca_flags & MAF_NOREPORT) 1867 continue; 1868 spin_lock_bh(&pmc->mca_lock); 1869 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1870 type = MLD2_MODE_IS_EXCLUDE; 1871 else 1872 type = MLD2_MODE_IS_INCLUDE; 1873 skb = add_grec(skb, pmc, type, 0, 0, 0); 1874 spin_unlock_bh(&pmc->mca_lock); 1875 } 1876 } else { 1877 spin_lock_bh(&pmc->mca_lock); 1878 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1879 type = MLD2_MODE_IS_EXCLUDE; 1880 else 1881 type = MLD2_MODE_IS_INCLUDE; 1882 skb = add_grec(skb, pmc, type, 0, 0, 0); 1883 spin_unlock_bh(&pmc->mca_lock); 1884 } 1885 read_unlock_bh(&idev->lock); 1886 if (skb) 1887 mld_sendpack(skb); 1888 } 1889 1890 /* 1891 * remove zero-count source records from a source filter list 1892 */ 1893 static void mld_clear_zeros(struct ip6_sf_list **ppsf) 1894 { 1895 struct ip6_sf_list *psf_prev, *psf_next, *psf; 1896 1897 psf_prev = NULL; 1898 for (psf = *ppsf; psf; psf = psf_next) { 1899 psf_next = psf->sf_next; 1900 if (psf->sf_crcount == 0) { 1901 if (psf_prev) 1902 psf_prev->sf_next = psf->sf_next; 1903 else 1904 *ppsf = psf->sf_next; 1905 kfree(psf); 1906 } else 1907 psf_prev = psf; 1908 } 1909 } 1910 1911 static void mld_send_cr(struct inet6_dev *idev) 1912 { 1913 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next; 1914 struct sk_buff *skb = NULL; 1915 int type, dtype; 1916 1917 read_lock_bh(&idev->lock); 1918 spin_lock(&idev->mc_lock); 1919 1920 /* deleted MCA's */ 1921 pmc_prev = NULL; 1922 for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) { 1923 pmc_next = pmc->next; 1924 if (pmc->mca_sfmode == MCAST_INCLUDE) { 1925 type = MLD2_BLOCK_OLD_SOURCES; 1926 dtype = MLD2_BLOCK_OLD_SOURCES; 1927 skb = add_grec(skb, pmc, type, 1, 0, 0); 1928 skb = add_grec(skb, pmc, dtype, 1, 1, 0); 1929 } 1930 if (pmc->mca_crcount) { 1931 if (pmc->mca_sfmode == MCAST_EXCLUDE) { 1932 type = MLD2_CHANGE_TO_INCLUDE; 1933 skb = add_grec(skb, pmc, type, 1, 0, 0); 1934 } 1935 pmc->mca_crcount--; 1936 if (pmc->mca_crcount == 0) { 1937 mld_clear_zeros(&pmc->mca_tomb); 1938 mld_clear_zeros(&pmc->mca_sources); 1939 } 1940 } 1941 if (pmc->mca_crcount == 0 && !pmc->mca_tomb && 1942 !pmc->mca_sources) { 1943 if (pmc_prev) 1944 pmc_prev->next = pmc_next; 1945 else 1946 idev->mc_tomb = pmc_next; 1947 in6_dev_put(pmc->idev); 1948 kfree(pmc); 1949 } else 1950 pmc_prev = pmc; 1951 } 1952 spin_unlock(&idev->mc_lock); 1953 1954 /* change recs */ 1955 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 1956 spin_lock_bh(&pmc->mca_lock); 1957 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 1958 type = MLD2_BLOCK_OLD_SOURCES; 1959 dtype = MLD2_ALLOW_NEW_SOURCES; 1960 } else { 1961 type = MLD2_ALLOW_NEW_SOURCES; 1962 dtype = MLD2_BLOCK_OLD_SOURCES; 1963 } 1964 skb = add_grec(skb, pmc, type, 0, 0, 0); 1965 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */ 1966 1967 /* filter mode changes */ 1968 if (pmc->mca_crcount) { 1969 if (pmc->mca_sfmode == MCAST_EXCLUDE) 1970 type = MLD2_CHANGE_TO_EXCLUDE; 1971 else 1972 type = MLD2_CHANGE_TO_INCLUDE; 1973 skb = add_grec(skb, pmc, type, 0, 0, 0); 1974 pmc->mca_crcount--; 1975 } 1976 spin_unlock_bh(&pmc->mca_lock); 1977 } 1978 read_unlock_bh(&idev->lock); 1979 if (!skb) 1980 return; 1981 (void) mld_sendpack(skb); 1982 } 1983 1984 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) 1985 { 1986 struct net *net = dev_net(dev); 1987 struct sock *sk = net->ipv6.igmp_sk; 1988 struct inet6_dev *idev; 1989 struct sk_buff *skb; 1990 struct mld_msg *hdr; 1991 const struct in6_addr *snd_addr, *saddr; 1992 struct in6_addr addr_buf; 1993 int hlen = LL_RESERVED_SPACE(dev); 1994 int tlen = dev->needed_tailroom; 1995 int err, len, payload_len, full_len; 1996 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1997 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1998 IPV6_TLV_PADN, 0 }; 1999 struct flowi6 fl6; 2000 struct dst_entry *dst; 2001 2002 if (type == ICMPV6_MGM_REDUCTION) 2003 snd_addr = &in6addr_linklocal_allrouters; 2004 else 2005 snd_addr = addr; 2006 2007 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr); 2008 payload_len = len + sizeof(ra); 2009 full_len = sizeof(struct ipv6hdr) + payload_len; 2010 2011 rcu_read_lock(); 2012 IP6_UPD_PO_STATS(net, __in6_dev_get(dev), 2013 IPSTATS_MIB_OUT, full_len); 2014 rcu_read_unlock(); 2015 2016 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err); 2017 2018 if (!skb) { 2019 rcu_read_lock(); 2020 IP6_INC_STATS(net, __in6_dev_get(dev), 2021 IPSTATS_MIB_OUTDISCARDS); 2022 rcu_read_unlock(); 2023 return; 2024 } 2025 skb->priority = TC_PRIO_CONTROL; 2026 skb_reserve(skb, hlen); 2027 2028 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { 2029 /* <draft-ietf-magma-mld-source-05.txt>: 2030 * use unspecified address as the source address 2031 * when a valid link-local address is not available. 2032 */ 2033 saddr = &in6addr_any; 2034 } else 2035 saddr = &addr_buf; 2036 2037 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len); 2038 2039 skb_put_data(skb, ra, sizeof(ra)); 2040 2041 hdr = skb_put_zero(skb, sizeof(struct mld_msg)); 2042 hdr->mld_type = type; 2043 hdr->mld_mca = *addr; 2044 2045 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, 2046 IPPROTO_ICMPV6, 2047 csum_partial(hdr, len, 0)); 2048 2049 rcu_read_lock(); 2050 idev = __in6_dev_get(skb->dev); 2051 2052 icmpv6_flow_init(sk, &fl6, type, 2053 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 2054 skb->dev->ifindex); 2055 dst = icmp6_dst_alloc(skb->dev, &fl6); 2056 if (IS_ERR(dst)) { 2057 err = PTR_ERR(dst); 2058 goto err_out; 2059 } 2060 2061 skb_dst_set(skb, dst); 2062 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, 2063 net, sk, skb, NULL, skb->dev, 2064 dst_output); 2065 out: 2066 if (!err) { 2067 ICMP6MSGOUT_INC_STATS(net, idev, type); 2068 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 2069 } else 2070 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 2071 2072 rcu_read_unlock(); 2073 return; 2074 2075 err_out: 2076 kfree_skb(skb); 2077 goto out; 2078 } 2079 2080 static void mld_send_initial_cr(struct inet6_dev *idev) 2081 { 2082 struct sk_buff *skb; 2083 struct ifmcaddr6 *pmc; 2084 int type; 2085 2086 if (mld_in_v1_mode(idev)) 2087 return; 2088 2089 skb = NULL; 2090 read_lock_bh(&idev->lock); 2091 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 2092 spin_lock_bh(&pmc->mca_lock); 2093 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 2094 type = MLD2_CHANGE_TO_EXCLUDE; 2095 else 2096 type = MLD2_ALLOW_NEW_SOURCES; 2097 skb = add_grec(skb, pmc, type, 0, 0, 1); 2098 spin_unlock_bh(&pmc->mca_lock); 2099 } 2100 read_unlock_bh(&idev->lock); 2101 if (skb) 2102 mld_sendpack(skb); 2103 } 2104 2105 void ipv6_mc_dad_complete(struct inet6_dev *idev) 2106 { 2107 idev->mc_dad_count = idev->mc_qrv; 2108 if (idev->mc_dad_count) { 2109 mld_send_initial_cr(idev); 2110 idev->mc_dad_count--; 2111 if (idev->mc_dad_count) 2112 mld_dad_start_timer(idev, 2113 unsolicited_report_interval(idev)); 2114 } 2115 } 2116 2117 static void mld_dad_timer_expire(struct timer_list *t) 2118 { 2119 struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer); 2120 2121 mld_send_initial_cr(idev); 2122 if (idev->mc_dad_count) { 2123 idev->mc_dad_count--; 2124 if (idev->mc_dad_count) 2125 mld_dad_start_timer(idev, 2126 unsolicited_report_interval(idev)); 2127 } 2128 in6_dev_put(idev); 2129 } 2130 2131 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 2132 const struct in6_addr *psfsrc) 2133 { 2134 struct ip6_sf_list *psf, *psf_prev; 2135 int rv = 0; 2136 2137 psf_prev = NULL; 2138 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 2139 if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) 2140 break; 2141 psf_prev = psf; 2142 } 2143 if (!psf || psf->sf_count[sfmode] == 0) { 2144 /* source filter not found, or count wrong => bug */ 2145 return -ESRCH; 2146 } 2147 psf->sf_count[sfmode]--; 2148 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { 2149 struct inet6_dev *idev = pmc->idev; 2150 2151 /* no more filters for this source */ 2152 if (psf_prev) 2153 psf_prev->sf_next = psf->sf_next; 2154 else 2155 pmc->mca_sources = psf->sf_next; 2156 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) && 2157 !mld_in_v1_mode(idev)) { 2158 psf->sf_crcount = idev->mc_qrv; 2159 psf->sf_next = pmc->mca_tomb; 2160 pmc->mca_tomb = psf; 2161 rv = 1; 2162 } else 2163 kfree(psf); 2164 } 2165 return rv; 2166 } 2167 2168 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, 2169 int sfmode, int sfcount, const struct in6_addr *psfsrc, 2170 int delta) 2171 { 2172 struct ifmcaddr6 *pmc; 2173 int changerec = 0; 2174 int i, err; 2175 2176 if (!idev) 2177 return -ENODEV; 2178 read_lock_bh(&idev->lock); 2179 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 2180 if (ipv6_addr_equal(pmca, &pmc->mca_addr)) 2181 break; 2182 } 2183 if (!pmc) { 2184 /* MCA not found?? bug */ 2185 read_unlock_bh(&idev->lock); 2186 return -ESRCH; 2187 } 2188 spin_lock_bh(&pmc->mca_lock); 2189 sf_markstate(pmc); 2190 if (!delta) { 2191 if (!pmc->mca_sfcount[sfmode]) { 2192 spin_unlock_bh(&pmc->mca_lock); 2193 read_unlock_bh(&idev->lock); 2194 return -EINVAL; 2195 } 2196 pmc->mca_sfcount[sfmode]--; 2197 } 2198 err = 0; 2199 for (i = 0; i < sfcount; i++) { 2200 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); 2201 2202 changerec |= rv > 0; 2203 if (!err && rv < 0) 2204 err = rv; 2205 } 2206 if (pmc->mca_sfmode == MCAST_EXCLUDE && 2207 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 && 2208 pmc->mca_sfcount[MCAST_INCLUDE]) { 2209 struct ip6_sf_list *psf; 2210 2211 /* filter mode change */ 2212 pmc->mca_sfmode = MCAST_INCLUDE; 2213 pmc->mca_crcount = idev->mc_qrv; 2214 idev->mc_ifc_count = pmc->mca_crcount; 2215 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 2216 psf->sf_crcount = 0; 2217 mld_ifc_event(pmc->idev); 2218 } else if (sf_setstate(pmc) || changerec) 2219 mld_ifc_event(pmc->idev); 2220 spin_unlock_bh(&pmc->mca_lock); 2221 read_unlock_bh(&idev->lock); 2222 return err; 2223 } 2224 2225 /* 2226 * Add multicast single-source filter to the interface list 2227 */ 2228 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, 2229 const struct in6_addr *psfsrc) 2230 { 2231 struct ip6_sf_list *psf, *psf_prev; 2232 2233 psf_prev = NULL; 2234 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 2235 if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) 2236 break; 2237 psf_prev = psf; 2238 } 2239 if (!psf) { 2240 psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 2241 if (!psf) 2242 return -ENOBUFS; 2243 2244 psf->sf_addr = *psfsrc; 2245 if (psf_prev) { 2246 psf_prev->sf_next = psf; 2247 } else 2248 pmc->mca_sources = psf; 2249 } 2250 psf->sf_count[sfmode]++; 2251 return 0; 2252 } 2253 2254 static void sf_markstate(struct ifmcaddr6 *pmc) 2255 { 2256 struct ip6_sf_list *psf; 2257 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; 2258 2259 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 2260 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 2261 psf->sf_oldin = mca_xcount == 2262 psf->sf_count[MCAST_EXCLUDE] && 2263 !psf->sf_count[MCAST_INCLUDE]; 2264 } else 2265 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; 2266 } 2267 2268 static int sf_setstate(struct ifmcaddr6 *pmc) 2269 { 2270 struct ip6_sf_list *psf, *dpsf; 2271 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; 2272 int qrv = pmc->idev->mc_qrv; 2273 int new_in, rv; 2274 2275 rv = 0; 2276 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 2277 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 2278 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && 2279 !psf->sf_count[MCAST_INCLUDE]; 2280 } else 2281 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 2282 if (new_in) { 2283 if (!psf->sf_oldin) { 2284 struct ip6_sf_list *prev = NULL; 2285 2286 for (dpsf = pmc->mca_tomb; dpsf; 2287 dpsf = dpsf->sf_next) { 2288 if (ipv6_addr_equal(&dpsf->sf_addr, 2289 &psf->sf_addr)) 2290 break; 2291 prev = dpsf; 2292 } 2293 if (dpsf) { 2294 if (prev) 2295 prev->sf_next = dpsf->sf_next; 2296 else 2297 pmc->mca_tomb = dpsf->sf_next; 2298 kfree(dpsf); 2299 } 2300 psf->sf_crcount = qrv; 2301 rv++; 2302 } 2303 } else if (psf->sf_oldin) { 2304 psf->sf_crcount = 0; 2305 /* 2306 * add or update "delete" records if an active filter 2307 * is now inactive 2308 */ 2309 for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next) 2310 if (ipv6_addr_equal(&dpsf->sf_addr, 2311 &psf->sf_addr)) 2312 break; 2313 if (!dpsf) { 2314 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); 2315 if (!dpsf) 2316 continue; 2317 *dpsf = *psf; 2318 /* pmc->mca_lock held by callers */ 2319 dpsf->sf_next = pmc->mca_tomb; 2320 pmc->mca_tomb = dpsf; 2321 } 2322 dpsf->sf_crcount = qrv; 2323 rv++; 2324 } 2325 } 2326 return rv; 2327 } 2328 2329 /* 2330 * Add multicast source filter list to the interface list 2331 */ 2332 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, 2333 int sfmode, int sfcount, const struct in6_addr *psfsrc, 2334 int delta) 2335 { 2336 struct ifmcaddr6 *pmc; 2337 int isexclude; 2338 int i, err; 2339 2340 if (!idev) 2341 return -ENODEV; 2342 read_lock_bh(&idev->lock); 2343 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 2344 if (ipv6_addr_equal(pmca, &pmc->mca_addr)) 2345 break; 2346 } 2347 if (!pmc) { 2348 /* MCA not found?? bug */ 2349 read_unlock_bh(&idev->lock); 2350 return -ESRCH; 2351 } 2352 spin_lock_bh(&pmc->mca_lock); 2353 2354 sf_markstate(pmc); 2355 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE; 2356 if (!delta) 2357 pmc->mca_sfcount[sfmode]++; 2358 err = 0; 2359 for (i = 0; i < sfcount; i++) { 2360 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]); 2361 if (err) 2362 break; 2363 } 2364 if (err) { 2365 int j; 2366 2367 if (!delta) 2368 pmc->mca_sfcount[sfmode]--; 2369 for (j = 0; j < i; j++) 2370 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]); 2371 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { 2372 struct ip6_sf_list *psf; 2373 2374 /* filter mode change */ 2375 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 2376 pmc->mca_sfmode = MCAST_EXCLUDE; 2377 else if (pmc->mca_sfcount[MCAST_INCLUDE]) 2378 pmc->mca_sfmode = MCAST_INCLUDE; 2379 /* else no filters; keep old mode for reports */ 2380 2381 pmc->mca_crcount = idev->mc_qrv; 2382 idev->mc_ifc_count = pmc->mca_crcount; 2383 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 2384 psf->sf_crcount = 0; 2385 mld_ifc_event(idev); 2386 } else if (sf_setstate(pmc)) 2387 mld_ifc_event(idev); 2388 spin_unlock_bh(&pmc->mca_lock); 2389 read_unlock_bh(&idev->lock); 2390 return err; 2391 } 2392 2393 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc) 2394 { 2395 struct ip6_sf_list *psf, *nextpsf; 2396 2397 for (psf = pmc->mca_tomb; psf; psf = nextpsf) { 2398 nextpsf = psf->sf_next; 2399 kfree(psf); 2400 } 2401 pmc->mca_tomb = NULL; 2402 for (psf = pmc->mca_sources; psf; psf = nextpsf) { 2403 nextpsf = psf->sf_next; 2404 kfree(psf); 2405 } 2406 pmc->mca_sources = NULL; 2407 pmc->mca_sfmode = MCAST_EXCLUDE; 2408 pmc->mca_sfcount[MCAST_INCLUDE] = 0; 2409 pmc->mca_sfcount[MCAST_EXCLUDE] = 1; 2410 } 2411 2412 2413 static void igmp6_join_group(struct ifmcaddr6 *ma) 2414 { 2415 unsigned long delay; 2416 2417 if (ma->mca_flags & MAF_NOREPORT) 2418 return; 2419 2420 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2421 2422 delay = prandom_u32() % unsolicited_report_interval(ma->idev); 2423 2424 spin_lock_bh(&ma->mca_lock); 2425 if (del_timer(&ma->mca_timer)) { 2426 refcount_dec(&ma->mca_refcnt); 2427 delay = ma->mca_timer.expires - jiffies; 2428 } 2429 2430 if (!mod_timer(&ma->mca_timer, jiffies + delay)) 2431 refcount_inc(&ma->mca_refcnt); 2432 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER; 2433 spin_unlock_bh(&ma->mca_lock); 2434 } 2435 2436 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 2437 struct inet6_dev *idev) 2438 { 2439 int err; 2440 2441 write_lock_bh(&iml->sflock); 2442 if (!iml->sflist) { 2443 /* any-source empty exclude case */ 2444 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); 2445 } else { 2446 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 2447 iml->sflist->sl_count, iml->sflist->sl_addr, 0); 2448 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); 2449 iml->sflist = NULL; 2450 } 2451 write_unlock_bh(&iml->sflock); 2452 return err; 2453 } 2454 2455 static void igmp6_leave_group(struct ifmcaddr6 *ma) 2456 { 2457 if (mld_in_v1_mode(ma->idev)) { 2458 if (ma->mca_flags & MAF_LAST_REPORTER) 2459 igmp6_send(&ma->mca_addr, ma->idev->dev, 2460 ICMPV6_MGM_REDUCTION); 2461 } else { 2462 mld_add_delrec(ma->idev, ma); 2463 mld_ifc_event(ma->idev); 2464 } 2465 } 2466 2467 static void mld_gq_timer_expire(struct timer_list *t) 2468 { 2469 struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer); 2470 2471 idev->mc_gq_running = 0; 2472 mld_send_report(idev, NULL); 2473 in6_dev_put(idev); 2474 } 2475 2476 static void mld_ifc_timer_expire(struct timer_list *t) 2477 { 2478 struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer); 2479 2480 mld_send_cr(idev); 2481 if (idev->mc_ifc_count) { 2482 idev->mc_ifc_count--; 2483 if (idev->mc_ifc_count) 2484 mld_ifc_start_timer(idev, 2485 unsolicited_report_interval(idev)); 2486 } 2487 in6_dev_put(idev); 2488 } 2489 2490 static void mld_ifc_event(struct inet6_dev *idev) 2491 { 2492 if (mld_in_v1_mode(idev)) 2493 return; 2494 idev->mc_ifc_count = idev->mc_qrv; 2495 mld_ifc_start_timer(idev, 1); 2496 } 2497 2498 static void igmp6_timer_handler(struct timer_list *t) 2499 { 2500 struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer); 2501 2502 if (mld_in_v1_mode(ma->idev)) 2503 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2504 else 2505 mld_send_report(ma->idev, ma); 2506 2507 spin_lock(&ma->mca_lock); 2508 ma->mca_flags |= MAF_LAST_REPORTER; 2509 ma->mca_flags &= ~MAF_TIMER_RUNNING; 2510 spin_unlock(&ma->mca_lock); 2511 ma_put(ma); 2512 } 2513 2514 /* Device changing type */ 2515 2516 void ipv6_mc_unmap(struct inet6_dev *idev) 2517 { 2518 struct ifmcaddr6 *i; 2519 2520 /* Install multicast list, except for all-nodes (already installed) */ 2521 2522 read_lock_bh(&idev->lock); 2523 for (i = idev->mc_list; i; i = i->next) 2524 igmp6_group_dropped(i); 2525 read_unlock_bh(&idev->lock); 2526 } 2527 2528 void ipv6_mc_remap(struct inet6_dev *idev) 2529 { 2530 ipv6_mc_up(idev); 2531 } 2532 2533 /* Device going down */ 2534 2535 void ipv6_mc_down(struct inet6_dev *idev) 2536 { 2537 struct ifmcaddr6 *i; 2538 2539 /* Withdraw multicast list */ 2540 2541 read_lock_bh(&idev->lock); 2542 2543 for (i = idev->mc_list; i; i = i->next) 2544 igmp6_group_dropped(i); 2545 2546 /* Should stop timer after group drop. or we will 2547 * start timer again in mld_ifc_event() 2548 */ 2549 mld_ifc_stop_timer(idev); 2550 mld_gq_stop_timer(idev); 2551 mld_dad_stop_timer(idev); 2552 read_unlock_bh(&idev->lock); 2553 } 2554 2555 static void ipv6_mc_reset(struct inet6_dev *idev) 2556 { 2557 idev->mc_qrv = sysctl_mld_qrv; 2558 idev->mc_qi = MLD_QI_DEFAULT; 2559 idev->mc_qri = MLD_QRI_DEFAULT; 2560 idev->mc_v1_seen = 0; 2561 idev->mc_maxdelay = unsolicited_report_interval(idev); 2562 } 2563 2564 /* Device going up */ 2565 2566 void ipv6_mc_up(struct inet6_dev *idev) 2567 { 2568 struct ifmcaddr6 *i; 2569 2570 /* Install multicast list, except for all-nodes (already installed) */ 2571 2572 read_lock_bh(&idev->lock); 2573 ipv6_mc_reset(idev); 2574 for (i = idev->mc_list; i; i = i->next) { 2575 mld_del_delrec(idev, i); 2576 igmp6_group_added(i); 2577 } 2578 read_unlock_bh(&idev->lock); 2579 } 2580 2581 /* IPv6 device initialization. */ 2582 2583 void ipv6_mc_init_dev(struct inet6_dev *idev) 2584 { 2585 write_lock_bh(&idev->lock); 2586 spin_lock_init(&idev->mc_lock); 2587 idev->mc_gq_running = 0; 2588 timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0); 2589 idev->mc_tomb = NULL; 2590 idev->mc_ifc_count = 0; 2591 timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0); 2592 timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0); 2593 ipv6_mc_reset(idev); 2594 write_unlock_bh(&idev->lock); 2595 } 2596 2597 /* 2598 * Device is about to be destroyed: clean up. 2599 */ 2600 2601 void ipv6_mc_destroy_dev(struct inet6_dev *idev) 2602 { 2603 struct ifmcaddr6 *i; 2604 2605 /* Deactivate timers */ 2606 ipv6_mc_down(idev); 2607 mld_clear_delrec(idev); 2608 2609 /* Delete all-nodes address. */ 2610 /* We cannot call ipv6_dev_mc_dec() directly, our caller in 2611 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will 2612 * fail. 2613 */ 2614 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes); 2615 2616 if (idev->cnf.forwarding) 2617 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters); 2618 2619 write_lock_bh(&idev->lock); 2620 while ((i = idev->mc_list) != NULL) { 2621 idev->mc_list = i->next; 2622 2623 write_unlock_bh(&idev->lock); 2624 ma_put(i); 2625 write_lock_bh(&idev->lock); 2626 } 2627 write_unlock_bh(&idev->lock); 2628 } 2629 2630 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev) 2631 { 2632 struct ifmcaddr6 *pmc; 2633 2634 ASSERT_RTNL(); 2635 2636 if (mld_in_v1_mode(idev)) { 2637 read_lock_bh(&idev->lock); 2638 for (pmc = idev->mc_list; pmc; pmc = pmc->next) 2639 igmp6_join_group(pmc); 2640 read_unlock_bh(&idev->lock); 2641 } else 2642 mld_send_report(idev, NULL); 2643 } 2644 2645 static int ipv6_mc_netdev_event(struct notifier_block *this, 2646 unsigned long event, 2647 void *ptr) 2648 { 2649 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2650 struct inet6_dev *idev = __in6_dev_get(dev); 2651 2652 switch (event) { 2653 case NETDEV_RESEND_IGMP: 2654 if (idev) 2655 ipv6_mc_rejoin_groups(idev); 2656 break; 2657 default: 2658 break; 2659 } 2660 2661 return NOTIFY_DONE; 2662 } 2663 2664 static struct notifier_block igmp6_netdev_notifier = { 2665 .notifier_call = ipv6_mc_netdev_event, 2666 }; 2667 2668 #ifdef CONFIG_PROC_FS 2669 struct igmp6_mc_iter_state { 2670 struct seq_net_private p; 2671 struct net_device *dev; 2672 struct inet6_dev *idev; 2673 }; 2674 2675 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private) 2676 2677 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) 2678 { 2679 struct ifmcaddr6 *im = NULL; 2680 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2681 struct net *net = seq_file_net(seq); 2682 2683 state->idev = NULL; 2684 for_each_netdev_rcu(net, state->dev) { 2685 struct inet6_dev *idev; 2686 idev = __in6_dev_get(state->dev); 2687 if (!idev) 2688 continue; 2689 read_lock_bh(&idev->lock); 2690 im = idev->mc_list; 2691 if (im) { 2692 state->idev = idev; 2693 break; 2694 } 2695 read_unlock_bh(&idev->lock); 2696 } 2697 return im; 2698 } 2699 2700 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im) 2701 { 2702 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2703 2704 im = im->next; 2705 while (!im) { 2706 if (likely(state->idev)) 2707 read_unlock_bh(&state->idev->lock); 2708 2709 state->dev = next_net_device_rcu(state->dev); 2710 if (!state->dev) { 2711 state->idev = NULL; 2712 break; 2713 } 2714 state->idev = __in6_dev_get(state->dev); 2715 if (!state->idev) 2716 continue; 2717 read_lock_bh(&state->idev->lock); 2718 im = state->idev->mc_list; 2719 } 2720 return im; 2721 } 2722 2723 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos) 2724 { 2725 struct ifmcaddr6 *im = igmp6_mc_get_first(seq); 2726 if (im) 2727 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL) 2728 --pos; 2729 return pos ? NULL : im; 2730 } 2731 2732 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) 2733 __acquires(RCU) 2734 { 2735 rcu_read_lock(); 2736 return igmp6_mc_get_idx(seq, *pos); 2737 } 2738 2739 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2740 { 2741 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v); 2742 2743 ++*pos; 2744 return im; 2745 } 2746 2747 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) 2748 __releases(RCU) 2749 { 2750 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2751 2752 if (likely(state->idev)) { 2753 read_unlock_bh(&state->idev->lock); 2754 state->idev = NULL; 2755 } 2756 state->dev = NULL; 2757 rcu_read_unlock(); 2758 } 2759 2760 static int igmp6_mc_seq_show(struct seq_file *seq, void *v) 2761 { 2762 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v; 2763 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2764 2765 seq_printf(seq, 2766 "%-4d %-15s %pi6 %5d %08X %ld\n", 2767 state->dev->ifindex, state->dev->name, 2768 &im->mca_addr, 2769 im->mca_users, im->mca_flags, 2770 (im->mca_flags&MAF_TIMER_RUNNING) ? 2771 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0); 2772 return 0; 2773 } 2774 2775 static const struct seq_operations igmp6_mc_seq_ops = { 2776 .start = igmp6_mc_seq_start, 2777 .next = igmp6_mc_seq_next, 2778 .stop = igmp6_mc_seq_stop, 2779 .show = igmp6_mc_seq_show, 2780 }; 2781 2782 struct igmp6_mcf_iter_state { 2783 struct seq_net_private p; 2784 struct net_device *dev; 2785 struct inet6_dev *idev; 2786 struct ifmcaddr6 *im; 2787 }; 2788 2789 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private) 2790 2791 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) 2792 { 2793 struct ip6_sf_list *psf = NULL; 2794 struct ifmcaddr6 *im = NULL; 2795 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2796 struct net *net = seq_file_net(seq); 2797 2798 state->idev = NULL; 2799 state->im = NULL; 2800 for_each_netdev_rcu(net, state->dev) { 2801 struct inet6_dev *idev; 2802 idev = __in6_dev_get(state->dev); 2803 if (unlikely(idev == NULL)) 2804 continue; 2805 read_lock_bh(&idev->lock); 2806 im = idev->mc_list; 2807 if (likely(im)) { 2808 spin_lock_bh(&im->mca_lock); 2809 psf = im->mca_sources; 2810 if (likely(psf)) { 2811 state->im = im; 2812 state->idev = idev; 2813 break; 2814 } 2815 spin_unlock_bh(&im->mca_lock); 2816 } 2817 read_unlock_bh(&idev->lock); 2818 } 2819 return psf; 2820 } 2821 2822 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf) 2823 { 2824 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2825 2826 psf = psf->sf_next; 2827 while (!psf) { 2828 spin_unlock_bh(&state->im->mca_lock); 2829 state->im = state->im->next; 2830 while (!state->im) { 2831 if (likely(state->idev)) 2832 read_unlock_bh(&state->idev->lock); 2833 2834 state->dev = next_net_device_rcu(state->dev); 2835 if (!state->dev) { 2836 state->idev = NULL; 2837 goto out; 2838 } 2839 state->idev = __in6_dev_get(state->dev); 2840 if (!state->idev) 2841 continue; 2842 read_lock_bh(&state->idev->lock); 2843 state->im = state->idev->mc_list; 2844 } 2845 if (!state->im) 2846 break; 2847 spin_lock_bh(&state->im->mca_lock); 2848 psf = state->im->mca_sources; 2849 } 2850 out: 2851 return psf; 2852 } 2853 2854 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos) 2855 { 2856 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq); 2857 if (psf) 2858 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL) 2859 --pos; 2860 return pos ? NULL : psf; 2861 } 2862 2863 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2864 __acquires(RCU) 2865 { 2866 rcu_read_lock(); 2867 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2868 } 2869 2870 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2871 { 2872 struct ip6_sf_list *psf; 2873 if (v == SEQ_START_TOKEN) 2874 psf = igmp6_mcf_get_first(seq); 2875 else 2876 psf = igmp6_mcf_get_next(seq, v); 2877 ++*pos; 2878 return psf; 2879 } 2880 2881 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) 2882 __releases(RCU) 2883 { 2884 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2885 if (likely(state->im)) { 2886 spin_unlock_bh(&state->im->mca_lock); 2887 state->im = NULL; 2888 } 2889 if (likely(state->idev)) { 2890 read_unlock_bh(&state->idev->lock); 2891 state->idev = NULL; 2892 } 2893 state->dev = NULL; 2894 rcu_read_unlock(); 2895 } 2896 2897 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) 2898 { 2899 struct ip6_sf_list *psf = (struct ip6_sf_list *)v; 2900 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2901 2902 if (v == SEQ_START_TOKEN) { 2903 seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n"); 2904 } else { 2905 seq_printf(seq, 2906 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", 2907 state->dev->ifindex, state->dev->name, 2908 &state->im->mca_addr, 2909 &psf->sf_addr, 2910 psf->sf_count[MCAST_INCLUDE], 2911 psf->sf_count[MCAST_EXCLUDE]); 2912 } 2913 return 0; 2914 } 2915 2916 static const struct seq_operations igmp6_mcf_seq_ops = { 2917 .start = igmp6_mcf_seq_start, 2918 .next = igmp6_mcf_seq_next, 2919 .stop = igmp6_mcf_seq_stop, 2920 .show = igmp6_mcf_seq_show, 2921 }; 2922 2923 static int __net_init igmp6_proc_init(struct net *net) 2924 { 2925 int err; 2926 2927 err = -ENOMEM; 2928 if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops, 2929 sizeof(struct igmp6_mc_iter_state))) 2930 goto out; 2931 if (!proc_create_net("mcfilter6", 0444, net->proc_net, 2932 &igmp6_mcf_seq_ops, 2933 sizeof(struct igmp6_mcf_iter_state))) 2934 goto out_proc_net_igmp6; 2935 2936 err = 0; 2937 out: 2938 return err; 2939 2940 out_proc_net_igmp6: 2941 remove_proc_entry("igmp6", net->proc_net); 2942 goto out; 2943 } 2944 2945 static void __net_exit igmp6_proc_exit(struct net *net) 2946 { 2947 remove_proc_entry("mcfilter6", net->proc_net); 2948 remove_proc_entry("igmp6", net->proc_net); 2949 } 2950 #else 2951 static inline int igmp6_proc_init(struct net *net) 2952 { 2953 return 0; 2954 } 2955 static inline void igmp6_proc_exit(struct net *net) 2956 { 2957 } 2958 #endif 2959 2960 static int __net_init igmp6_net_init(struct net *net) 2961 { 2962 int err; 2963 2964 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, 2965 SOCK_RAW, IPPROTO_ICMPV6, net); 2966 if (err < 0) { 2967 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n", 2968 err); 2969 goto out; 2970 } 2971 2972 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1; 2973 2974 err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6, 2975 SOCK_RAW, IPPROTO_ICMPV6, net); 2976 if (err < 0) { 2977 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n", 2978 err); 2979 goto out_sock_create; 2980 } 2981 2982 err = igmp6_proc_init(net); 2983 if (err) 2984 goto out_sock_create_autojoin; 2985 2986 return 0; 2987 2988 out_sock_create_autojoin: 2989 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk); 2990 out_sock_create: 2991 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2992 out: 2993 return err; 2994 } 2995 2996 static void __net_exit igmp6_net_exit(struct net *net) 2997 { 2998 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2999 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk); 3000 igmp6_proc_exit(net); 3001 } 3002 3003 static struct pernet_operations igmp6_net_ops = { 3004 .init = igmp6_net_init, 3005 .exit = igmp6_net_exit, 3006 }; 3007 3008 int __init igmp6_init(void) 3009 { 3010 return register_pernet_subsys(&igmp6_net_ops); 3011 } 3012 3013 int __init igmp6_late_init(void) 3014 { 3015 return register_netdevice_notifier(&igmp6_netdev_notifier); 3016 } 3017 3018 void igmp6_cleanup(void) 3019 { 3020 unregister_pernet_subsys(&igmp6_net_ops); 3021 } 3022 3023 void igmp6_late_cleanup(void) 3024 { 3025 unregister_netdevice_notifier(&igmp6_netdev_notifier); 3026 } 3027