1 /* 2 * Multicast support for IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* Changes: 17 * 18 * yoshfuji : fix format of router-alert option 19 * YOSHIFUJI Hideaki @USAGI: 20 * Fixed source address for MLD message based on 21 * <draft-ietf-magma-mld-source-05.txt>. 22 * YOSHIFUJI Hideaki @USAGI: 23 * - Ignore Queries for invalid addresses. 24 * - MLD for link-local addresses. 25 * David L Stevens <dlstevens@us.ibm.com>: 26 * - MLDv2 support 27 */ 28 29 #include <linux/module.h> 30 #include <linux/errno.h> 31 #include <linux/types.h> 32 #include <linux/string.h> 33 #include <linux/socket.h> 34 #include <linux/sockios.h> 35 #include <linux/jiffies.h> 36 #include <linux/times.h> 37 #include <linux/net.h> 38 #include <linux/in.h> 39 #include <linux/in6.h> 40 #include <linux/netdevice.h> 41 #include <linux/if_arp.h> 42 #include <linux/route.h> 43 #include <linux/init.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <linux/slab.h> 47 #include <net/mld.h> 48 49 #include <linux/netfilter.h> 50 #include <linux/netfilter_ipv6.h> 51 52 #include <net/net_namespace.h> 53 #include <net/sock.h> 54 #include <net/snmp.h> 55 56 #include <net/ipv6.h> 57 #include <net/protocol.h> 58 #include <net/if_inet6.h> 59 #include <net/ndisc.h> 60 #include <net/addrconf.h> 61 #include <net/ip6_route.h> 62 #include <net/inet_common.h> 63 64 #include <net/ip6_checksum.h> 65 66 /* Set to 3 to get tracing... */ 67 #define MCAST_DEBUG 2 68 69 #if MCAST_DEBUG >= 3 70 #define MDBG(x) printk x 71 #else 72 #define MDBG(x) 73 #endif 74 75 /* Ensure that we have struct in6_addr aligned on 32bit word. */ 76 static void *__mld2_query_bugs[] __attribute__((__unused__)) = { 77 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), 78 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4), 79 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4) 80 }; 81 82 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; 83 84 /* Big mc list lock for all the sockets */ 85 static DEFINE_SPINLOCK(ipv6_sk_mc_lock); 86 87 static void igmp6_join_group(struct ifmcaddr6 *ma); 88 static void igmp6_leave_group(struct ifmcaddr6 *ma); 89 static void igmp6_timer_handler(unsigned long data); 90 91 static void mld_gq_timer_expire(unsigned long data); 92 static void mld_ifc_timer_expire(unsigned long data); 93 static void mld_ifc_event(struct inet6_dev *idev); 94 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 95 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr); 96 static void mld_clear_delrec(struct inet6_dev *idev); 97 static int sf_setstate(struct ifmcaddr6 *pmc); 98 static void sf_markstate(struct ifmcaddr6 *pmc); 99 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); 100 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, 101 int sfmode, int sfcount, const struct in6_addr *psfsrc, 102 int delta); 103 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, 104 int sfmode, int sfcount, const struct in6_addr *psfsrc, 105 int delta); 106 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 107 struct inet6_dev *idev); 108 109 110 #define IGMP6_UNSOLICITED_IVAL (10*HZ) 111 #define MLD_QRV_DEFAULT 2 112 113 #define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \ 114 (idev)->cnf.force_mld_version == 1 || \ 115 ((idev)->mc_v1_seen && \ 116 time_before(jiffies, (idev)->mc_v1_seen))) 117 118 #define IPV6_MLD_MAX_MSF 64 119 120 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; 121 122 /* 123 * socket join on multicast group 124 */ 125 126 #define for_each_pmc_rcu(np, pmc) \ 127 for (pmc = rcu_dereference(np->ipv6_mc_list); \ 128 pmc != NULL; \ 129 pmc = rcu_dereference(pmc->next)) 130 131 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) 132 { 133 struct net_device *dev = NULL; 134 struct ipv6_mc_socklist *mc_lst; 135 struct ipv6_pinfo *np = inet6_sk(sk); 136 struct net *net = sock_net(sk); 137 int err; 138 139 if (!ipv6_addr_is_multicast(addr)) 140 return -EINVAL; 141 142 rcu_read_lock(); 143 for_each_pmc_rcu(np, mc_lst) { 144 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 145 ipv6_addr_equal(&mc_lst->addr, addr)) { 146 rcu_read_unlock(); 147 return -EADDRINUSE; 148 } 149 } 150 rcu_read_unlock(); 151 152 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL); 153 154 if (mc_lst == NULL) 155 return -ENOMEM; 156 157 mc_lst->next = NULL; 158 mc_lst->addr = *addr; 159 160 rcu_read_lock(); 161 if (ifindex == 0) { 162 struct rt6_info *rt; 163 rt = rt6_lookup(net, addr, NULL, 0, 0); 164 if (rt) { 165 dev = rt->dst.dev; 166 dst_release(&rt->dst); 167 } 168 } else 169 dev = dev_get_by_index_rcu(net, ifindex); 170 171 if (dev == NULL) { 172 rcu_read_unlock(); 173 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 174 return -ENODEV; 175 } 176 177 mc_lst->ifindex = dev->ifindex; 178 mc_lst->sfmode = MCAST_EXCLUDE; 179 rwlock_init(&mc_lst->sflock); 180 mc_lst->sflist = NULL; 181 182 /* 183 * now add/increase the group membership on the device 184 */ 185 186 err = ipv6_dev_mc_inc(dev, addr); 187 188 if (err) { 189 rcu_read_unlock(); 190 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 191 return err; 192 } 193 194 spin_lock(&ipv6_sk_mc_lock); 195 mc_lst->next = np->ipv6_mc_list; 196 rcu_assign_pointer(np->ipv6_mc_list, mc_lst); 197 spin_unlock(&ipv6_sk_mc_lock); 198 199 rcu_read_unlock(); 200 201 return 0; 202 } 203 204 /* 205 * socket leave on multicast group 206 */ 207 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) 208 { 209 struct ipv6_pinfo *np = inet6_sk(sk); 210 struct ipv6_mc_socklist *mc_lst; 211 struct ipv6_mc_socklist __rcu **lnk; 212 struct net *net = sock_net(sk); 213 214 if (!ipv6_addr_is_multicast(addr)) 215 return -EINVAL; 216 217 spin_lock(&ipv6_sk_mc_lock); 218 for (lnk = &np->ipv6_mc_list; 219 (mc_lst = rcu_dereference_protected(*lnk, 220 lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ; 221 lnk = &mc_lst->next) { 222 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 223 ipv6_addr_equal(&mc_lst->addr, addr)) { 224 struct net_device *dev; 225 226 *lnk = mc_lst->next; 227 spin_unlock(&ipv6_sk_mc_lock); 228 229 rcu_read_lock(); 230 dev = dev_get_by_index_rcu(net, mc_lst->ifindex); 231 if (dev != NULL) { 232 struct inet6_dev *idev = __in6_dev_get(dev); 233 234 (void) ip6_mc_leave_src(sk, mc_lst, idev); 235 if (idev) 236 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 237 } else 238 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 239 rcu_read_unlock(); 240 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 241 kfree_rcu(mc_lst, rcu); 242 return 0; 243 } 244 } 245 spin_unlock(&ipv6_sk_mc_lock); 246 247 return -EADDRNOTAVAIL; 248 } 249 250 /* called with rcu_read_lock() */ 251 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, 252 const struct in6_addr *group, 253 int ifindex) 254 { 255 struct net_device *dev = NULL; 256 struct inet6_dev *idev = NULL; 257 258 if (ifindex == 0) { 259 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0); 260 261 if (rt) { 262 dev = rt->dst.dev; 263 dst_release(&rt->dst); 264 } 265 } else 266 dev = dev_get_by_index_rcu(net, ifindex); 267 268 if (!dev) 269 return NULL; 270 idev = __in6_dev_get(dev); 271 if (!idev) 272 return NULL; 273 read_lock_bh(&idev->lock); 274 if (idev->dead) { 275 read_unlock_bh(&idev->lock); 276 return NULL; 277 } 278 return idev; 279 } 280 281 void ipv6_sock_mc_close(struct sock *sk) 282 { 283 struct ipv6_pinfo *np = inet6_sk(sk); 284 struct ipv6_mc_socklist *mc_lst; 285 struct net *net = sock_net(sk); 286 287 spin_lock(&ipv6_sk_mc_lock); 288 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, 289 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { 290 struct net_device *dev; 291 292 np->ipv6_mc_list = mc_lst->next; 293 spin_unlock(&ipv6_sk_mc_lock); 294 295 rcu_read_lock(); 296 dev = dev_get_by_index_rcu(net, mc_lst->ifindex); 297 if (dev) { 298 struct inet6_dev *idev = __in6_dev_get(dev); 299 300 (void) ip6_mc_leave_src(sk, mc_lst, idev); 301 if (idev) 302 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 303 } else 304 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 305 rcu_read_unlock(); 306 307 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 308 kfree_rcu(mc_lst, rcu); 309 310 spin_lock(&ipv6_sk_mc_lock); 311 } 312 spin_unlock(&ipv6_sk_mc_lock); 313 } 314 315 int ip6_mc_source(int add, int omode, struct sock *sk, 316 struct group_source_req *pgsr) 317 { 318 struct in6_addr *source, *group; 319 struct ipv6_mc_socklist *pmc; 320 struct inet6_dev *idev; 321 struct ipv6_pinfo *inet6 = inet6_sk(sk); 322 struct ip6_sf_socklist *psl; 323 struct net *net = sock_net(sk); 324 int i, j, rv; 325 int leavegroup = 0; 326 int pmclocked = 0; 327 int err; 328 329 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr; 330 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr; 331 332 if (!ipv6_addr_is_multicast(group)) 333 return -EINVAL; 334 335 rcu_read_lock(); 336 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface); 337 if (!idev) { 338 rcu_read_unlock(); 339 return -ENODEV; 340 } 341 342 err = -EADDRNOTAVAIL; 343 344 for_each_pmc_rcu(inet6, pmc) { 345 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) 346 continue; 347 if (ipv6_addr_equal(&pmc->addr, group)) 348 break; 349 } 350 if (!pmc) { /* must have a prior join */ 351 err = -EINVAL; 352 goto done; 353 } 354 /* if a source filter was set, must be the same mode as before */ 355 if (pmc->sflist) { 356 if (pmc->sfmode != omode) { 357 err = -EINVAL; 358 goto done; 359 } 360 } else if (pmc->sfmode != omode) { 361 /* allow mode switches for empty-set filters */ 362 ip6_mc_add_src(idev, group, omode, 0, NULL, 0); 363 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 364 pmc->sfmode = omode; 365 } 366 367 write_lock(&pmc->sflock); 368 pmclocked = 1; 369 370 psl = pmc->sflist; 371 if (!add) { 372 if (!psl) 373 goto done; /* err = -EADDRNOTAVAIL */ 374 rv = !0; 375 for (i=0; i<psl->sl_count; i++) { 376 rv = memcmp(&psl->sl_addr[i], source, 377 sizeof(struct in6_addr)); 378 if (rv == 0) 379 break; 380 } 381 if (rv) /* source not found */ 382 goto done; /* err = -EADDRNOTAVAIL */ 383 384 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 385 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { 386 leavegroup = 1; 387 goto done; 388 } 389 390 /* update the interface filter */ 391 ip6_mc_del_src(idev, group, omode, 1, source, 1); 392 393 for (j=i+1; j<psl->sl_count; j++) 394 psl->sl_addr[j-1] = psl->sl_addr[j]; 395 psl->sl_count--; 396 err = 0; 397 goto done; 398 } 399 /* else, add a new source to the filter */ 400 401 if (psl && psl->sl_count >= sysctl_mld_max_msf) { 402 err = -ENOBUFS; 403 goto done; 404 } 405 if (!psl || psl->sl_count == psl->sl_max) { 406 struct ip6_sf_socklist *newpsl; 407 int count = IP6_SFBLOCK; 408 409 if (psl) 410 count += psl->sl_max; 411 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC); 412 if (!newpsl) { 413 err = -ENOBUFS; 414 goto done; 415 } 416 newpsl->sl_max = count; 417 newpsl->sl_count = count - IP6_SFBLOCK; 418 if (psl) { 419 for (i=0; i<psl->sl_count; i++) 420 newpsl->sl_addr[i] = psl->sl_addr[i]; 421 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); 422 } 423 pmc->sflist = psl = newpsl; 424 } 425 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 426 for (i=0; i<psl->sl_count; i++) { 427 rv = memcmp(&psl->sl_addr[i], source, sizeof(struct in6_addr)); 428 if (rv == 0) 429 break; 430 } 431 if (rv == 0) /* address already there is an error */ 432 goto done; 433 for (j=psl->sl_count-1; j>=i; j--) 434 psl->sl_addr[j+1] = psl->sl_addr[j]; 435 psl->sl_addr[i] = *source; 436 psl->sl_count++; 437 err = 0; 438 /* update the interface list */ 439 ip6_mc_add_src(idev, group, omode, 1, source, 1); 440 done: 441 if (pmclocked) 442 write_unlock(&pmc->sflock); 443 read_unlock_bh(&idev->lock); 444 rcu_read_unlock(); 445 if (leavegroup) 446 return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); 447 return err; 448 } 449 450 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) 451 { 452 const struct in6_addr *group; 453 struct ipv6_mc_socklist *pmc; 454 struct inet6_dev *idev; 455 struct ipv6_pinfo *inet6 = inet6_sk(sk); 456 struct ip6_sf_socklist *newpsl, *psl; 457 struct net *net = sock_net(sk); 458 int leavegroup = 0; 459 int i, err; 460 461 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 462 463 if (!ipv6_addr_is_multicast(group)) 464 return -EINVAL; 465 if (gsf->gf_fmode != MCAST_INCLUDE && 466 gsf->gf_fmode != MCAST_EXCLUDE) 467 return -EINVAL; 468 469 rcu_read_lock(); 470 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); 471 472 if (!idev) { 473 rcu_read_unlock(); 474 return -ENODEV; 475 } 476 477 err = 0; 478 479 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { 480 leavegroup = 1; 481 goto done; 482 } 483 484 for_each_pmc_rcu(inet6, pmc) { 485 if (pmc->ifindex != gsf->gf_interface) 486 continue; 487 if (ipv6_addr_equal(&pmc->addr, group)) 488 break; 489 } 490 if (!pmc) { /* must have a prior join */ 491 err = -EINVAL; 492 goto done; 493 } 494 if (gsf->gf_numsrc) { 495 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), 496 GFP_ATOMIC); 497 if (!newpsl) { 498 err = -ENOBUFS; 499 goto done; 500 } 501 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc; 502 for (i=0; i<newpsl->sl_count; ++i) { 503 struct sockaddr_in6 *psin6; 504 505 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i]; 506 newpsl->sl_addr[i] = psin6->sin6_addr; 507 } 508 err = ip6_mc_add_src(idev, group, gsf->gf_fmode, 509 newpsl->sl_count, newpsl->sl_addr, 0); 510 if (err) { 511 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max)); 512 goto done; 513 } 514 } else { 515 newpsl = NULL; 516 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); 517 } 518 519 write_lock(&pmc->sflock); 520 psl = pmc->sflist; 521 if (psl) { 522 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 523 psl->sl_count, psl->sl_addr, 0); 524 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); 525 } else 526 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 527 pmc->sflist = newpsl; 528 pmc->sfmode = gsf->gf_fmode; 529 write_unlock(&pmc->sflock); 530 err = 0; 531 done: 532 read_unlock_bh(&idev->lock); 533 rcu_read_unlock(); 534 if (leavegroup) 535 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); 536 return err; 537 } 538 539 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, 540 struct group_filter __user *optval, int __user *optlen) 541 { 542 int err, i, count, copycount; 543 const struct in6_addr *group; 544 struct ipv6_mc_socklist *pmc; 545 struct inet6_dev *idev; 546 struct ipv6_pinfo *inet6 = inet6_sk(sk); 547 struct ip6_sf_socklist *psl; 548 struct net *net = sock_net(sk); 549 550 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 551 552 if (!ipv6_addr_is_multicast(group)) 553 return -EINVAL; 554 555 rcu_read_lock(); 556 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); 557 558 if (!idev) { 559 rcu_read_unlock(); 560 return -ENODEV; 561 } 562 563 err = -EADDRNOTAVAIL; 564 /* 565 * changes to the ipv6_mc_list require the socket lock and 566 * a read lock on ip6_sk_mc_lock. We have the socket lock, 567 * so reading the list is safe. 568 */ 569 570 for_each_pmc_rcu(inet6, pmc) { 571 if (pmc->ifindex != gsf->gf_interface) 572 continue; 573 if (ipv6_addr_equal(group, &pmc->addr)) 574 break; 575 } 576 if (!pmc) /* must have a prior join */ 577 goto done; 578 gsf->gf_fmode = pmc->sfmode; 579 psl = pmc->sflist; 580 count = psl ? psl->sl_count : 0; 581 read_unlock_bh(&idev->lock); 582 rcu_read_unlock(); 583 584 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 585 gsf->gf_numsrc = count; 586 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) || 587 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { 588 return -EFAULT; 589 } 590 /* changes to psl require the socket lock, a read lock on 591 * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We 592 * have the socket lock, so reading here is safe. 593 */ 594 for (i=0; i<copycount; i++) { 595 struct sockaddr_in6 *psin6; 596 struct sockaddr_storage ss; 597 598 psin6 = (struct sockaddr_in6 *)&ss; 599 memset(&ss, 0, sizeof(ss)); 600 psin6->sin6_family = AF_INET6; 601 psin6->sin6_addr = psl->sl_addr[i]; 602 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss))) 603 return -EFAULT; 604 } 605 return 0; 606 done: 607 read_unlock_bh(&idev->lock); 608 rcu_read_unlock(); 609 return err; 610 } 611 612 bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, 613 const struct in6_addr *src_addr) 614 { 615 struct ipv6_pinfo *np = inet6_sk(sk); 616 struct ipv6_mc_socklist *mc; 617 struct ip6_sf_socklist *psl; 618 bool rv = true; 619 620 rcu_read_lock(); 621 for_each_pmc_rcu(np, mc) { 622 if (ipv6_addr_equal(&mc->addr, mc_addr)) 623 break; 624 } 625 if (!mc) { 626 rcu_read_unlock(); 627 return true; 628 } 629 read_lock(&mc->sflock); 630 psl = mc->sflist; 631 if (!psl) { 632 rv = mc->sfmode == MCAST_EXCLUDE; 633 } else { 634 int i; 635 636 for (i=0; i<psl->sl_count; i++) { 637 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr)) 638 break; 639 } 640 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 641 rv = false; 642 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 643 rv = false; 644 } 645 read_unlock(&mc->sflock); 646 rcu_read_unlock(); 647 648 return rv; 649 } 650 651 static void ma_put(struct ifmcaddr6 *mc) 652 { 653 if (atomic_dec_and_test(&mc->mca_refcnt)) { 654 in6_dev_put(mc->idev); 655 kfree(mc); 656 } 657 } 658 659 static void igmp6_group_added(struct ifmcaddr6 *mc) 660 { 661 struct net_device *dev = mc->idev->dev; 662 char buf[MAX_ADDR_LEN]; 663 664 spin_lock_bh(&mc->mca_lock); 665 if (!(mc->mca_flags&MAF_LOADED)) { 666 mc->mca_flags |= MAF_LOADED; 667 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 668 dev_mc_add(dev, buf); 669 } 670 spin_unlock_bh(&mc->mca_lock); 671 672 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) 673 return; 674 675 if (MLD_V1_SEEN(mc->idev)) { 676 igmp6_join_group(mc); 677 return; 678 } 679 /* else v2 */ 680 681 mc->mca_crcount = mc->idev->mc_qrv; 682 mld_ifc_event(mc->idev); 683 } 684 685 static void igmp6_group_dropped(struct ifmcaddr6 *mc) 686 { 687 struct net_device *dev = mc->idev->dev; 688 char buf[MAX_ADDR_LEN]; 689 690 spin_lock_bh(&mc->mca_lock); 691 if (mc->mca_flags&MAF_LOADED) { 692 mc->mca_flags &= ~MAF_LOADED; 693 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 694 dev_mc_del(dev, buf); 695 } 696 697 if (mc->mca_flags & MAF_NOREPORT) 698 goto done; 699 spin_unlock_bh(&mc->mca_lock); 700 701 if (!mc->idev->dead) 702 igmp6_leave_group(mc); 703 704 spin_lock_bh(&mc->mca_lock); 705 if (del_timer(&mc->mca_timer)) 706 atomic_dec(&mc->mca_refcnt); 707 done: 708 ip6_mc_clear_src(mc); 709 spin_unlock_bh(&mc->mca_lock); 710 } 711 712 /* 713 * deleted ifmcaddr6 manipulation 714 */ 715 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) 716 { 717 struct ifmcaddr6 *pmc; 718 719 /* this is an "ifmcaddr6" for convenience; only the fields below 720 * are actually used. In particular, the refcnt and users are not 721 * used for management of the delete list. Using the same structure 722 * for deleted items allows change reports to use common code with 723 * non-deleted or query-response MCA's. 724 */ 725 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC); 726 if (!pmc) 727 return; 728 729 spin_lock_bh(&im->mca_lock); 730 spin_lock_init(&pmc->mca_lock); 731 pmc->idev = im->idev; 732 in6_dev_hold(idev); 733 pmc->mca_addr = im->mca_addr; 734 pmc->mca_crcount = idev->mc_qrv; 735 pmc->mca_sfmode = im->mca_sfmode; 736 if (pmc->mca_sfmode == MCAST_INCLUDE) { 737 struct ip6_sf_list *psf; 738 739 pmc->mca_tomb = im->mca_tomb; 740 pmc->mca_sources = im->mca_sources; 741 im->mca_tomb = im->mca_sources = NULL; 742 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) 743 psf->sf_crcount = pmc->mca_crcount; 744 } 745 spin_unlock_bh(&im->mca_lock); 746 747 spin_lock_bh(&idev->mc_lock); 748 pmc->next = idev->mc_tomb; 749 idev->mc_tomb = pmc; 750 spin_unlock_bh(&idev->mc_lock); 751 } 752 753 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) 754 { 755 struct ifmcaddr6 *pmc, *pmc_prev; 756 struct ip6_sf_list *psf, *psf_next; 757 758 spin_lock_bh(&idev->mc_lock); 759 pmc_prev = NULL; 760 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { 761 if (ipv6_addr_equal(&pmc->mca_addr, pmca)) 762 break; 763 pmc_prev = pmc; 764 } 765 if (pmc) { 766 if (pmc_prev) 767 pmc_prev->next = pmc->next; 768 else 769 idev->mc_tomb = pmc->next; 770 } 771 spin_unlock_bh(&idev->mc_lock); 772 773 if (pmc) { 774 for (psf=pmc->mca_tomb; psf; psf=psf_next) { 775 psf_next = psf->sf_next; 776 kfree(psf); 777 } 778 in6_dev_put(pmc->idev); 779 kfree(pmc); 780 } 781 } 782 783 static void mld_clear_delrec(struct inet6_dev *idev) 784 { 785 struct ifmcaddr6 *pmc, *nextpmc; 786 787 spin_lock_bh(&idev->mc_lock); 788 pmc = idev->mc_tomb; 789 idev->mc_tomb = NULL; 790 spin_unlock_bh(&idev->mc_lock); 791 792 for (; pmc; pmc = nextpmc) { 793 nextpmc = pmc->next; 794 ip6_mc_clear_src(pmc); 795 in6_dev_put(pmc->idev); 796 kfree(pmc); 797 } 798 799 /* clear dead sources, too */ 800 read_lock_bh(&idev->lock); 801 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 802 struct ip6_sf_list *psf, *psf_next; 803 804 spin_lock_bh(&pmc->mca_lock); 805 psf = pmc->mca_tomb; 806 pmc->mca_tomb = NULL; 807 spin_unlock_bh(&pmc->mca_lock); 808 for (; psf; psf=psf_next) { 809 psf_next = psf->sf_next; 810 kfree(psf); 811 } 812 } 813 read_unlock_bh(&idev->lock); 814 } 815 816 817 /* 818 * device multicast group inc (add if not found) 819 */ 820 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) 821 { 822 struct ifmcaddr6 *mc; 823 struct inet6_dev *idev; 824 825 /* we need to take a reference on idev */ 826 idev = in6_dev_get(dev); 827 828 if (idev == NULL) 829 return -EINVAL; 830 831 write_lock_bh(&idev->lock); 832 if (idev->dead) { 833 write_unlock_bh(&idev->lock); 834 in6_dev_put(idev); 835 return -ENODEV; 836 } 837 838 for (mc = idev->mc_list; mc; mc = mc->next) { 839 if (ipv6_addr_equal(&mc->mca_addr, addr)) { 840 mc->mca_users++; 841 write_unlock_bh(&idev->lock); 842 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, 843 NULL, 0); 844 in6_dev_put(idev); 845 return 0; 846 } 847 } 848 849 /* 850 * not found: create a new one. 851 */ 852 853 mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC); 854 855 if (mc == NULL) { 856 write_unlock_bh(&idev->lock); 857 in6_dev_put(idev); 858 return -ENOMEM; 859 } 860 861 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); 862 863 mc->mca_addr = *addr; 864 mc->idev = idev; /* (reference taken) */ 865 mc->mca_users = 1; 866 /* mca_stamp should be updated upon changes */ 867 mc->mca_cstamp = mc->mca_tstamp = jiffies; 868 atomic_set(&mc->mca_refcnt, 2); 869 spin_lock_init(&mc->mca_lock); 870 871 /* initial mode is (EX, empty) */ 872 mc->mca_sfmode = MCAST_EXCLUDE; 873 mc->mca_sfcount[MCAST_EXCLUDE] = 1; 874 875 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || 876 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) 877 mc->mca_flags |= MAF_NOREPORT; 878 879 mc->next = idev->mc_list; 880 idev->mc_list = mc; 881 write_unlock_bh(&idev->lock); 882 883 mld_del_delrec(idev, &mc->mca_addr); 884 igmp6_group_added(mc); 885 ma_put(mc); 886 return 0; 887 } 888 889 /* 890 * device multicast group del 891 */ 892 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) 893 { 894 struct ifmcaddr6 *ma, **map; 895 896 write_lock_bh(&idev->lock); 897 for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { 898 if (ipv6_addr_equal(&ma->mca_addr, addr)) { 899 if (--ma->mca_users == 0) { 900 *map = ma->next; 901 write_unlock_bh(&idev->lock); 902 903 igmp6_group_dropped(ma); 904 905 ma_put(ma); 906 return 0; 907 } 908 write_unlock_bh(&idev->lock); 909 return 0; 910 } 911 } 912 write_unlock_bh(&idev->lock); 913 914 return -ENOENT; 915 } 916 917 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) 918 { 919 struct inet6_dev *idev; 920 int err; 921 922 rcu_read_lock(); 923 924 idev = __in6_dev_get(dev); 925 if (!idev) 926 err = -ENODEV; 927 else 928 err = __ipv6_dev_mc_dec(idev, addr); 929 930 rcu_read_unlock(); 931 return err; 932 } 933 934 /* 935 * identify MLD packets for MLD filter exceptions 936 */ 937 bool ipv6_is_mld(struct sk_buff *skb, int nexthdr) 938 { 939 struct icmp6hdr *pic; 940 941 if (nexthdr != IPPROTO_ICMPV6) 942 return false; 943 944 if (!pskb_may_pull(skb, sizeof(struct icmp6hdr))) 945 return false; 946 947 pic = icmp6_hdr(skb); 948 949 switch (pic->icmp6_type) { 950 case ICMPV6_MGM_QUERY: 951 case ICMPV6_MGM_REPORT: 952 case ICMPV6_MGM_REDUCTION: 953 case ICMPV6_MLD2_REPORT: 954 return true; 955 default: 956 break; 957 } 958 return false; 959 } 960 961 /* 962 * check if the interface/address pair is valid 963 */ 964 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, 965 const struct in6_addr *src_addr) 966 { 967 struct inet6_dev *idev; 968 struct ifmcaddr6 *mc; 969 bool rv = false; 970 971 rcu_read_lock(); 972 idev = __in6_dev_get(dev); 973 if (idev) { 974 read_lock_bh(&idev->lock); 975 for (mc = idev->mc_list; mc; mc=mc->next) { 976 if (ipv6_addr_equal(&mc->mca_addr, group)) 977 break; 978 } 979 if (mc) { 980 if (src_addr && !ipv6_addr_any(src_addr)) { 981 struct ip6_sf_list *psf; 982 983 spin_lock_bh(&mc->mca_lock); 984 for (psf=mc->mca_sources;psf;psf=psf->sf_next) { 985 if (ipv6_addr_equal(&psf->sf_addr, src_addr)) 986 break; 987 } 988 if (psf) 989 rv = psf->sf_count[MCAST_INCLUDE] || 990 psf->sf_count[MCAST_EXCLUDE] != 991 mc->mca_sfcount[MCAST_EXCLUDE]; 992 else 993 rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; 994 spin_unlock_bh(&mc->mca_lock); 995 } else 996 rv = true; /* don't filter unspecified source */ 997 } 998 read_unlock_bh(&idev->lock); 999 } 1000 rcu_read_unlock(); 1001 return rv; 1002 } 1003 1004 static void mld_gq_start_timer(struct inet6_dev *idev) 1005 { 1006 int tv = net_random() % idev->mc_maxdelay; 1007 1008 idev->mc_gq_running = 1; 1009 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2)) 1010 in6_dev_hold(idev); 1011 } 1012 1013 static void mld_ifc_start_timer(struct inet6_dev *idev, int delay) 1014 { 1015 int tv = net_random() % delay; 1016 1017 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2)) 1018 in6_dev_hold(idev); 1019 } 1020 1021 /* 1022 * IGMP handling (alias multicast ICMPv6 messages) 1023 */ 1024 1025 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) 1026 { 1027 unsigned long delay = resptime; 1028 1029 /* Do not start timer for these addresses */ 1030 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) || 1031 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1032 return; 1033 1034 if (del_timer(&ma->mca_timer)) { 1035 atomic_dec(&ma->mca_refcnt); 1036 delay = ma->mca_timer.expires - jiffies; 1037 } 1038 1039 if (delay >= resptime) { 1040 if (resptime) 1041 delay = net_random() % resptime; 1042 else 1043 delay = 1; 1044 } 1045 ma->mca_timer.expires = jiffies + delay; 1046 if (!mod_timer(&ma->mca_timer, jiffies + delay)) 1047 atomic_inc(&ma->mca_refcnt); 1048 ma->mca_flags |= MAF_TIMER_RUNNING; 1049 } 1050 1051 /* mark EXCLUDE-mode sources */ 1052 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1053 const struct in6_addr *srcs) 1054 { 1055 struct ip6_sf_list *psf; 1056 int i, scount; 1057 1058 scount = 0; 1059 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { 1060 if (scount == nsrcs) 1061 break; 1062 for (i=0; i<nsrcs; i++) { 1063 /* skip inactive filters */ 1064 if (psf->sf_count[MCAST_INCLUDE] || 1065 pmc->mca_sfcount[MCAST_EXCLUDE] != 1066 psf->sf_count[MCAST_EXCLUDE]) 1067 break; 1068 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1069 scount++; 1070 break; 1071 } 1072 } 1073 } 1074 pmc->mca_flags &= ~MAF_GSQUERY; 1075 if (scount == nsrcs) /* all sources excluded */ 1076 return false; 1077 return true; 1078 } 1079 1080 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1081 const struct in6_addr *srcs) 1082 { 1083 struct ip6_sf_list *psf; 1084 int i, scount; 1085 1086 if (pmc->mca_sfmode == MCAST_EXCLUDE) 1087 return mld_xmarksources(pmc, nsrcs, srcs); 1088 1089 /* mark INCLUDE-mode sources */ 1090 1091 scount = 0; 1092 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { 1093 if (scount == nsrcs) 1094 break; 1095 for (i=0; i<nsrcs; i++) { 1096 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1097 psf->sf_gsresp = 1; 1098 scount++; 1099 break; 1100 } 1101 } 1102 } 1103 if (!scount) { 1104 pmc->mca_flags &= ~MAF_GSQUERY; 1105 return false; 1106 } 1107 pmc->mca_flags |= MAF_GSQUERY; 1108 return true; 1109 } 1110 1111 /* called with rcu_read_lock() */ 1112 int igmp6_event_query(struct sk_buff *skb) 1113 { 1114 struct mld2_query *mlh2 = NULL; 1115 struct ifmcaddr6 *ma; 1116 const struct in6_addr *group; 1117 unsigned long max_delay; 1118 struct inet6_dev *idev; 1119 struct mld_msg *mld; 1120 int group_type; 1121 int mark = 0; 1122 int len; 1123 1124 if (!pskb_may_pull(skb, sizeof(struct in6_addr))) 1125 return -EINVAL; 1126 1127 /* compute payload length excluding extension headers */ 1128 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1129 len -= skb_network_header_len(skb); 1130 1131 /* Drop queries with not link local source */ 1132 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) 1133 return -EINVAL; 1134 1135 idev = __in6_dev_get(skb->dev); 1136 1137 if (idev == NULL) 1138 return 0; 1139 1140 mld = (struct mld_msg *)icmp6_hdr(skb); 1141 group = &mld->mld_mca; 1142 group_type = ipv6_addr_type(group); 1143 1144 if (group_type != IPV6_ADDR_ANY && 1145 !(group_type&IPV6_ADDR_MULTICAST)) 1146 return -EINVAL; 1147 1148 if (len == 24) { 1149 int switchback; 1150 /* MLDv1 router present */ 1151 1152 /* Translate milliseconds to jiffies */ 1153 max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000; 1154 1155 switchback = (idev->mc_qrv + 1) * max_delay; 1156 idev->mc_v1_seen = jiffies + switchback; 1157 1158 /* cancel the interface change timer */ 1159 idev->mc_ifc_count = 0; 1160 if (del_timer(&idev->mc_ifc_timer)) 1161 __in6_dev_put(idev); 1162 /* clear deleted report items */ 1163 mld_clear_delrec(idev); 1164 } else if (len >= 28) { 1165 int srcs_offset = sizeof(struct mld2_query) - 1166 sizeof(struct icmp6hdr); 1167 if (!pskb_may_pull(skb, srcs_offset)) 1168 return -EINVAL; 1169 1170 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1171 max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; 1172 if (!max_delay) 1173 max_delay = 1; 1174 idev->mc_maxdelay = max_delay; 1175 if (mlh2->mld2q_qrv) 1176 idev->mc_qrv = mlh2->mld2q_qrv; 1177 if (group_type == IPV6_ADDR_ANY) { /* general query */ 1178 if (mlh2->mld2q_nsrcs) 1179 return -EINVAL; /* no sources allowed */ 1180 1181 mld_gq_start_timer(idev); 1182 return 0; 1183 } 1184 /* mark sources to include, if group & source-specific */ 1185 if (mlh2->mld2q_nsrcs != 0) { 1186 if (!pskb_may_pull(skb, srcs_offset + 1187 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) 1188 return -EINVAL; 1189 1190 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1191 mark = 1; 1192 } 1193 } else 1194 return -EINVAL; 1195 1196 read_lock_bh(&idev->lock); 1197 if (group_type == IPV6_ADDR_ANY) { 1198 for (ma = idev->mc_list; ma; ma=ma->next) { 1199 spin_lock_bh(&ma->mca_lock); 1200 igmp6_group_queried(ma, max_delay); 1201 spin_unlock_bh(&ma->mca_lock); 1202 } 1203 } else { 1204 for (ma = idev->mc_list; ma; ma=ma->next) { 1205 if (!ipv6_addr_equal(group, &ma->mca_addr)) 1206 continue; 1207 spin_lock_bh(&ma->mca_lock); 1208 if (ma->mca_flags & MAF_TIMER_RUNNING) { 1209 /* gsquery <- gsquery && mark */ 1210 if (!mark) 1211 ma->mca_flags &= ~MAF_GSQUERY; 1212 } else { 1213 /* gsquery <- mark */ 1214 if (mark) 1215 ma->mca_flags |= MAF_GSQUERY; 1216 else 1217 ma->mca_flags &= ~MAF_GSQUERY; 1218 } 1219 if (!(ma->mca_flags & MAF_GSQUERY) || 1220 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) 1221 igmp6_group_queried(ma, max_delay); 1222 spin_unlock_bh(&ma->mca_lock); 1223 break; 1224 } 1225 } 1226 read_unlock_bh(&idev->lock); 1227 1228 return 0; 1229 } 1230 1231 /* called with rcu_read_lock() */ 1232 int igmp6_event_report(struct sk_buff *skb) 1233 { 1234 struct ifmcaddr6 *ma; 1235 struct inet6_dev *idev; 1236 struct mld_msg *mld; 1237 int addr_type; 1238 1239 /* Our own report looped back. Ignore it. */ 1240 if (skb->pkt_type == PACKET_LOOPBACK) 1241 return 0; 1242 1243 /* send our report if the MC router may not have heard this report */ 1244 if (skb->pkt_type != PACKET_MULTICAST && 1245 skb->pkt_type != PACKET_BROADCAST) 1246 return 0; 1247 1248 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) 1249 return -EINVAL; 1250 1251 mld = (struct mld_msg *)icmp6_hdr(skb); 1252 1253 /* Drop reports with not link local source */ 1254 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); 1255 if (addr_type != IPV6_ADDR_ANY && 1256 !(addr_type&IPV6_ADDR_LINKLOCAL)) 1257 return -EINVAL; 1258 1259 idev = __in6_dev_get(skb->dev); 1260 if (idev == NULL) 1261 return -ENODEV; 1262 1263 /* 1264 * Cancel the timer for this group 1265 */ 1266 1267 read_lock_bh(&idev->lock); 1268 for (ma = idev->mc_list; ma; ma=ma->next) { 1269 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { 1270 spin_lock(&ma->mca_lock); 1271 if (del_timer(&ma->mca_timer)) 1272 atomic_dec(&ma->mca_refcnt); 1273 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING); 1274 spin_unlock(&ma->mca_lock); 1275 break; 1276 } 1277 } 1278 read_unlock_bh(&idev->lock); 1279 return 0; 1280 } 1281 1282 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, 1283 int gdeleted, int sdeleted) 1284 { 1285 switch (type) { 1286 case MLD2_MODE_IS_INCLUDE: 1287 case MLD2_MODE_IS_EXCLUDE: 1288 if (gdeleted || sdeleted) 1289 return false; 1290 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { 1291 if (pmc->mca_sfmode == MCAST_INCLUDE) 1292 return true; 1293 /* don't include if this source is excluded 1294 * in all filters 1295 */ 1296 if (psf->sf_count[MCAST_INCLUDE]) 1297 return type == MLD2_MODE_IS_INCLUDE; 1298 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1299 psf->sf_count[MCAST_EXCLUDE]; 1300 } 1301 return false; 1302 case MLD2_CHANGE_TO_INCLUDE: 1303 if (gdeleted || sdeleted) 1304 return false; 1305 return psf->sf_count[MCAST_INCLUDE] != 0; 1306 case MLD2_CHANGE_TO_EXCLUDE: 1307 if (gdeleted || sdeleted) 1308 return false; 1309 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || 1310 psf->sf_count[MCAST_INCLUDE]) 1311 return false; 1312 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1313 psf->sf_count[MCAST_EXCLUDE]; 1314 case MLD2_ALLOW_NEW_SOURCES: 1315 if (gdeleted || !psf->sf_crcount) 1316 return false; 1317 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; 1318 case MLD2_BLOCK_OLD_SOURCES: 1319 if (pmc->mca_sfmode == MCAST_INCLUDE) 1320 return gdeleted || (psf->sf_crcount && sdeleted); 1321 return psf->sf_crcount && !gdeleted && !sdeleted; 1322 } 1323 return false; 1324 } 1325 1326 static int 1327 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) 1328 { 1329 struct ip6_sf_list *psf; 1330 int scount = 0; 1331 1332 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { 1333 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) 1334 continue; 1335 scount++; 1336 } 1337 return scount; 1338 } 1339 1340 static struct sk_buff *mld_newpack(struct net_device *dev, int size) 1341 { 1342 struct net *net = dev_net(dev); 1343 struct sock *sk = net->ipv6.igmp_sk; 1344 struct sk_buff *skb; 1345 struct mld2_report *pmr; 1346 struct in6_addr addr_buf; 1347 const struct in6_addr *saddr; 1348 int hlen = LL_RESERVED_SPACE(dev); 1349 int tlen = dev->needed_tailroom; 1350 int err; 1351 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1352 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1353 IPV6_TLV_PADN, 0 }; 1354 1355 /* we assume size > sizeof(ra) here */ 1356 size += hlen + tlen; 1357 /* limit our allocations to order-0 page */ 1358 size = min_t(int, size, SKB_MAX_ORDER(0, 0)); 1359 skb = sock_alloc_send_skb(sk, size, 1, &err); 1360 1361 if (!skb) 1362 return NULL; 1363 1364 skb_reserve(skb, hlen); 1365 1366 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { 1367 /* <draft-ietf-magma-mld-source-05.txt>: 1368 * use unspecified address as the source address 1369 * when a valid link-local address is not available. 1370 */ 1371 saddr = &in6addr_any; 1372 } else 1373 saddr = &addr_buf; 1374 1375 ip6_nd_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0); 1376 1377 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1378 1379 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); 1380 skb_put(skb, sizeof(*pmr)); 1381 pmr = (struct mld2_report *)skb_transport_header(skb); 1382 pmr->mld2r_type = ICMPV6_MLD2_REPORT; 1383 pmr->mld2r_resv1 = 0; 1384 pmr->mld2r_cksum = 0; 1385 pmr->mld2r_resv2 = 0; 1386 pmr->mld2r_ngrec = 0; 1387 return skb; 1388 } 1389 1390 static void mld_sendpack(struct sk_buff *skb) 1391 { 1392 struct ipv6hdr *pip6 = ipv6_hdr(skb); 1393 struct mld2_report *pmr = 1394 (struct mld2_report *)skb_transport_header(skb); 1395 int payload_len, mldlen; 1396 struct inet6_dev *idev; 1397 struct net *net = dev_net(skb->dev); 1398 int err; 1399 struct flowi6 fl6; 1400 struct dst_entry *dst; 1401 1402 rcu_read_lock(); 1403 idev = __in6_dev_get(skb->dev); 1404 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 1405 1406 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); 1407 mldlen = skb->tail - skb->transport_header; 1408 pip6->payload_len = htons(payload_len); 1409 1410 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, 1411 IPPROTO_ICMPV6, 1412 csum_partial(skb_transport_header(skb), 1413 mldlen, 0)); 1414 1415 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT, 1416 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1417 skb->dev->ifindex); 1418 dst = icmp6_dst_alloc(skb->dev, NULL, &fl6); 1419 1420 err = 0; 1421 if (IS_ERR(dst)) { 1422 err = PTR_ERR(dst); 1423 dst = NULL; 1424 } 1425 skb_dst_set(skb, dst); 1426 if (err) 1427 goto err_out; 1428 1429 payload_len = skb->len; 1430 1431 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1432 dst_output); 1433 out: 1434 if (!err) { 1435 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT); 1436 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 1437 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len); 1438 } else 1439 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); 1440 1441 rcu_read_unlock(); 1442 return; 1443 1444 err_out: 1445 kfree_skb(skb); 1446 goto out; 1447 } 1448 1449 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1450 { 1451 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); 1452 } 1453 1454 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1455 int type, struct mld2_grec **ppgr) 1456 { 1457 struct net_device *dev = pmc->idev->dev; 1458 struct mld2_report *pmr; 1459 struct mld2_grec *pgr; 1460 1461 if (!skb) 1462 skb = mld_newpack(dev, dev->mtu); 1463 if (!skb) 1464 return NULL; 1465 pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); 1466 pgr->grec_type = type; 1467 pgr->grec_auxwords = 0; 1468 pgr->grec_nsrcs = 0; 1469 pgr->grec_mca = pmc->mca_addr; /* structure copy */ 1470 pmr = (struct mld2_report *)skb_transport_header(skb); 1471 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1); 1472 *ppgr = pgr; 1473 return skb; 1474 } 1475 1476 #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ 1477 skb_tailroom(skb)) : 0) 1478 1479 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1480 int type, int gdeleted, int sdeleted) 1481 { 1482 struct net_device *dev = pmc->idev->dev; 1483 struct mld2_report *pmr; 1484 struct mld2_grec *pgr = NULL; 1485 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; 1486 int scount, stotal, first, isquery, truncate; 1487 1488 if (pmc->mca_flags & MAF_NOREPORT) 1489 return skb; 1490 1491 isquery = type == MLD2_MODE_IS_INCLUDE || 1492 type == MLD2_MODE_IS_EXCLUDE; 1493 truncate = type == MLD2_MODE_IS_EXCLUDE || 1494 type == MLD2_CHANGE_TO_EXCLUDE; 1495 1496 stotal = scount = 0; 1497 1498 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources; 1499 1500 if (!*psf_list) 1501 goto empty_source; 1502 1503 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL; 1504 1505 /* EX and TO_EX get a fresh packet, if needed */ 1506 if (truncate) { 1507 if (pmr && pmr->mld2r_ngrec && 1508 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 1509 if (skb) 1510 mld_sendpack(skb); 1511 skb = mld_newpack(dev, dev->mtu); 1512 } 1513 } 1514 first = 1; 1515 psf_prev = NULL; 1516 for (psf=*psf_list; psf; psf=psf_next) { 1517 struct in6_addr *psrc; 1518 1519 psf_next = psf->sf_next; 1520 1521 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { 1522 psf_prev = psf; 1523 continue; 1524 } 1525 1526 /* clear marks on query responses */ 1527 if (isquery) 1528 psf->sf_gsresp = 0; 1529 1530 if (AVAILABLE(skb) < sizeof(*psrc) + 1531 first*sizeof(struct mld2_grec)) { 1532 if (truncate && !first) 1533 break; /* truncate these */ 1534 if (pgr) 1535 pgr->grec_nsrcs = htons(scount); 1536 if (skb) 1537 mld_sendpack(skb); 1538 skb = mld_newpack(dev, dev->mtu); 1539 first = 1; 1540 scount = 0; 1541 } 1542 if (first) { 1543 skb = add_grhead(skb, pmc, type, &pgr); 1544 first = 0; 1545 } 1546 if (!skb) 1547 return NULL; 1548 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc)); 1549 *psrc = psf->sf_addr; 1550 scount++; stotal++; 1551 if ((type == MLD2_ALLOW_NEW_SOURCES || 1552 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 1553 psf->sf_crcount--; 1554 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { 1555 if (psf_prev) 1556 psf_prev->sf_next = psf->sf_next; 1557 else 1558 *psf_list = psf->sf_next; 1559 kfree(psf); 1560 continue; 1561 } 1562 } 1563 psf_prev = psf; 1564 } 1565 1566 empty_source: 1567 if (!stotal) { 1568 if (type == MLD2_ALLOW_NEW_SOURCES || 1569 type == MLD2_BLOCK_OLD_SOURCES) 1570 return skb; 1571 if (pmc->mca_crcount || isquery) { 1572 /* make sure we have room for group header */ 1573 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) { 1574 mld_sendpack(skb); 1575 skb = NULL; /* add_grhead will get a new one */ 1576 } 1577 skb = add_grhead(skb, pmc, type, &pgr); 1578 } 1579 } 1580 if (pgr) 1581 pgr->grec_nsrcs = htons(scount); 1582 1583 if (isquery) 1584 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */ 1585 return skb; 1586 } 1587 1588 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) 1589 { 1590 struct sk_buff *skb = NULL; 1591 int type; 1592 1593 if (!pmc) { 1594 read_lock_bh(&idev->lock); 1595 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 1596 if (pmc->mca_flags & MAF_NOREPORT) 1597 continue; 1598 spin_lock_bh(&pmc->mca_lock); 1599 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1600 type = MLD2_MODE_IS_EXCLUDE; 1601 else 1602 type = MLD2_MODE_IS_INCLUDE; 1603 skb = add_grec(skb, pmc, type, 0, 0); 1604 spin_unlock_bh(&pmc->mca_lock); 1605 } 1606 read_unlock_bh(&idev->lock); 1607 } else { 1608 spin_lock_bh(&pmc->mca_lock); 1609 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1610 type = MLD2_MODE_IS_EXCLUDE; 1611 else 1612 type = MLD2_MODE_IS_INCLUDE; 1613 skb = add_grec(skb, pmc, type, 0, 0); 1614 spin_unlock_bh(&pmc->mca_lock); 1615 } 1616 if (skb) 1617 mld_sendpack(skb); 1618 } 1619 1620 /* 1621 * remove zero-count source records from a source filter list 1622 */ 1623 static void mld_clear_zeros(struct ip6_sf_list **ppsf) 1624 { 1625 struct ip6_sf_list *psf_prev, *psf_next, *psf; 1626 1627 psf_prev = NULL; 1628 for (psf=*ppsf; psf; psf = psf_next) { 1629 psf_next = psf->sf_next; 1630 if (psf->sf_crcount == 0) { 1631 if (psf_prev) 1632 psf_prev->sf_next = psf->sf_next; 1633 else 1634 *ppsf = psf->sf_next; 1635 kfree(psf); 1636 } else 1637 psf_prev = psf; 1638 } 1639 } 1640 1641 static void mld_send_cr(struct inet6_dev *idev) 1642 { 1643 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next; 1644 struct sk_buff *skb = NULL; 1645 int type, dtype; 1646 1647 read_lock_bh(&idev->lock); 1648 spin_lock(&idev->mc_lock); 1649 1650 /* deleted MCA's */ 1651 pmc_prev = NULL; 1652 for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) { 1653 pmc_next = pmc->next; 1654 if (pmc->mca_sfmode == MCAST_INCLUDE) { 1655 type = MLD2_BLOCK_OLD_SOURCES; 1656 dtype = MLD2_BLOCK_OLD_SOURCES; 1657 skb = add_grec(skb, pmc, type, 1, 0); 1658 skb = add_grec(skb, pmc, dtype, 1, 1); 1659 } 1660 if (pmc->mca_crcount) { 1661 if (pmc->mca_sfmode == MCAST_EXCLUDE) { 1662 type = MLD2_CHANGE_TO_INCLUDE; 1663 skb = add_grec(skb, pmc, type, 1, 0); 1664 } 1665 pmc->mca_crcount--; 1666 if (pmc->mca_crcount == 0) { 1667 mld_clear_zeros(&pmc->mca_tomb); 1668 mld_clear_zeros(&pmc->mca_sources); 1669 } 1670 } 1671 if (pmc->mca_crcount == 0 && !pmc->mca_tomb && 1672 !pmc->mca_sources) { 1673 if (pmc_prev) 1674 pmc_prev->next = pmc_next; 1675 else 1676 idev->mc_tomb = pmc_next; 1677 in6_dev_put(pmc->idev); 1678 kfree(pmc); 1679 } else 1680 pmc_prev = pmc; 1681 } 1682 spin_unlock(&idev->mc_lock); 1683 1684 /* change recs */ 1685 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 1686 spin_lock_bh(&pmc->mca_lock); 1687 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 1688 type = MLD2_BLOCK_OLD_SOURCES; 1689 dtype = MLD2_ALLOW_NEW_SOURCES; 1690 } else { 1691 type = MLD2_ALLOW_NEW_SOURCES; 1692 dtype = MLD2_BLOCK_OLD_SOURCES; 1693 } 1694 skb = add_grec(skb, pmc, type, 0, 0); 1695 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */ 1696 1697 /* filter mode changes */ 1698 if (pmc->mca_crcount) { 1699 if (pmc->mca_sfmode == MCAST_EXCLUDE) 1700 type = MLD2_CHANGE_TO_EXCLUDE; 1701 else 1702 type = MLD2_CHANGE_TO_INCLUDE; 1703 skb = add_grec(skb, pmc, type, 0, 0); 1704 pmc->mca_crcount--; 1705 } 1706 spin_unlock_bh(&pmc->mca_lock); 1707 } 1708 read_unlock_bh(&idev->lock); 1709 if (!skb) 1710 return; 1711 (void) mld_sendpack(skb); 1712 } 1713 1714 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) 1715 { 1716 struct net *net = dev_net(dev); 1717 struct sock *sk = net->ipv6.igmp_sk; 1718 struct inet6_dev *idev; 1719 struct sk_buff *skb; 1720 struct mld_msg *hdr; 1721 const struct in6_addr *snd_addr, *saddr; 1722 struct in6_addr addr_buf; 1723 int hlen = LL_RESERVED_SPACE(dev); 1724 int tlen = dev->needed_tailroom; 1725 int err, len, payload_len, full_len; 1726 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1727 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1728 IPV6_TLV_PADN, 0 }; 1729 struct flowi6 fl6; 1730 struct dst_entry *dst; 1731 1732 if (type == ICMPV6_MGM_REDUCTION) 1733 snd_addr = &in6addr_linklocal_allrouters; 1734 else 1735 snd_addr = addr; 1736 1737 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr); 1738 payload_len = len + sizeof(ra); 1739 full_len = sizeof(struct ipv6hdr) + payload_len; 1740 1741 rcu_read_lock(); 1742 IP6_UPD_PO_STATS(net, __in6_dev_get(dev), 1743 IPSTATS_MIB_OUT, full_len); 1744 rcu_read_unlock(); 1745 1746 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err); 1747 1748 if (skb == NULL) { 1749 rcu_read_lock(); 1750 IP6_INC_STATS(net, __in6_dev_get(dev), 1751 IPSTATS_MIB_OUTDISCARDS); 1752 rcu_read_unlock(); 1753 return; 1754 } 1755 1756 skb_reserve(skb, hlen); 1757 1758 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { 1759 /* <draft-ietf-magma-mld-source-05.txt>: 1760 * use unspecified address as the source address 1761 * when a valid link-local address is not available. 1762 */ 1763 saddr = &in6addr_any; 1764 } else 1765 saddr = &addr_buf; 1766 1767 ip6_nd_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len); 1768 1769 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1770 1771 hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg)); 1772 memset(hdr, 0, sizeof(struct mld_msg)); 1773 hdr->mld_type = type; 1774 hdr->mld_mca = *addr; 1775 1776 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, 1777 IPPROTO_ICMPV6, 1778 csum_partial(hdr, len, 0)); 1779 1780 rcu_read_lock(); 1781 idev = __in6_dev_get(skb->dev); 1782 1783 icmpv6_flow_init(sk, &fl6, type, 1784 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1785 skb->dev->ifindex); 1786 dst = icmp6_dst_alloc(skb->dev, NULL, &fl6); 1787 if (IS_ERR(dst)) { 1788 err = PTR_ERR(dst); 1789 goto err_out; 1790 } 1791 1792 skb_dst_set(skb, dst); 1793 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1794 dst_output); 1795 out: 1796 if (!err) { 1797 ICMP6MSGOUT_INC_STATS(net, idev, type); 1798 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1799 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len); 1800 } else 1801 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1802 1803 rcu_read_unlock(); 1804 return; 1805 1806 err_out: 1807 kfree_skb(skb); 1808 goto out; 1809 } 1810 1811 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 1812 const struct in6_addr *psfsrc) 1813 { 1814 struct ip6_sf_list *psf, *psf_prev; 1815 int rv = 0; 1816 1817 psf_prev = NULL; 1818 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { 1819 if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) 1820 break; 1821 psf_prev = psf; 1822 } 1823 if (!psf || psf->sf_count[sfmode] == 0) { 1824 /* source filter not found, or count wrong => bug */ 1825 return -ESRCH; 1826 } 1827 psf->sf_count[sfmode]--; 1828 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { 1829 struct inet6_dev *idev = pmc->idev; 1830 1831 /* no more filters for this source */ 1832 if (psf_prev) 1833 psf_prev->sf_next = psf->sf_next; 1834 else 1835 pmc->mca_sources = psf->sf_next; 1836 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) && 1837 !MLD_V1_SEEN(idev)) { 1838 psf->sf_crcount = idev->mc_qrv; 1839 psf->sf_next = pmc->mca_tomb; 1840 pmc->mca_tomb = psf; 1841 rv = 1; 1842 } else 1843 kfree(psf); 1844 } 1845 return rv; 1846 } 1847 1848 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, 1849 int sfmode, int sfcount, const struct in6_addr *psfsrc, 1850 int delta) 1851 { 1852 struct ifmcaddr6 *pmc; 1853 int changerec = 0; 1854 int i, err; 1855 1856 if (!idev) 1857 return -ENODEV; 1858 read_lock_bh(&idev->lock); 1859 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 1860 if (ipv6_addr_equal(pmca, &pmc->mca_addr)) 1861 break; 1862 } 1863 if (!pmc) { 1864 /* MCA not found?? bug */ 1865 read_unlock_bh(&idev->lock); 1866 return -ESRCH; 1867 } 1868 spin_lock_bh(&pmc->mca_lock); 1869 sf_markstate(pmc); 1870 if (!delta) { 1871 if (!pmc->mca_sfcount[sfmode]) { 1872 spin_unlock_bh(&pmc->mca_lock); 1873 read_unlock_bh(&idev->lock); 1874 return -EINVAL; 1875 } 1876 pmc->mca_sfcount[sfmode]--; 1877 } 1878 err = 0; 1879 for (i=0; i<sfcount; i++) { 1880 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); 1881 1882 changerec |= rv > 0; 1883 if (!err && rv < 0) 1884 err = rv; 1885 } 1886 if (pmc->mca_sfmode == MCAST_EXCLUDE && 1887 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 && 1888 pmc->mca_sfcount[MCAST_INCLUDE]) { 1889 struct ip6_sf_list *psf; 1890 1891 /* filter mode change */ 1892 pmc->mca_sfmode = MCAST_INCLUDE; 1893 pmc->mca_crcount = idev->mc_qrv; 1894 idev->mc_ifc_count = pmc->mca_crcount; 1895 for (psf=pmc->mca_sources; psf; psf = psf->sf_next) 1896 psf->sf_crcount = 0; 1897 mld_ifc_event(pmc->idev); 1898 } else if (sf_setstate(pmc) || changerec) 1899 mld_ifc_event(pmc->idev); 1900 spin_unlock_bh(&pmc->mca_lock); 1901 read_unlock_bh(&idev->lock); 1902 return err; 1903 } 1904 1905 /* 1906 * Add multicast single-source filter to the interface list 1907 */ 1908 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, 1909 const struct in6_addr *psfsrc) 1910 { 1911 struct ip6_sf_list *psf, *psf_prev; 1912 1913 psf_prev = NULL; 1914 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { 1915 if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) 1916 break; 1917 psf_prev = psf; 1918 } 1919 if (!psf) { 1920 psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 1921 if (!psf) 1922 return -ENOBUFS; 1923 1924 psf->sf_addr = *psfsrc; 1925 if (psf_prev) { 1926 psf_prev->sf_next = psf; 1927 } else 1928 pmc->mca_sources = psf; 1929 } 1930 psf->sf_count[sfmode]++; 1931 return 0; 1932 } 1933 1934 static void sf_markstate(struct ifmcaddr6 *pmc) 1935 { 1936 struct ip6_sf_list *psf; 1937 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; 1938 1939 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) 1940 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 1941 psf->sf_oldin = mca_xcount == 1942 psf->sf_count[MCAST_EXCLUDE] && 1943 !psf->sf_count[MCAST_INCLUDE]; 1944 } else 1945 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; 1946 } 1947 1948 static int sf_setstate(struct ifmcaddr6 *pmc) 1949 { 1950 struct ip6_sf_list *psf, *dpsf; 1951 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; 1952 int qrv = pmc->idev->mc_qrv; 1953 int new_in, rv; 1954 1955 rv = 0; 1956 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { 1957 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 1958 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && 1959 !psf->sf_count[MCAST_INCLUDE]; 1960 } else 1961 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 1962 if (new_in) { 1963 if (!psf->sf_oldin) { 1964 struct ip6_sf_list *prev = NULL; 1965 1966 for (dpsf=pmc->mca_tomb; dpsf; 1967 dpsf=dpsf->sf_next) { 1968 if (ipv6_addr_equal(&dpsf->sf_addr, 1969 &psf->sf_addr)) 1970 break; 1971 prev = dpsf; 1972 } 1973 if (dpsf) { 1974 if (prev) 1975 prev->sf_next = dpsf->sf_next; 1976 else 1977 pmc->mca_tomb = dpsf->sf_next; 1978 kfree(dpsf); 1979 } 1980 psf->sf_crcount = qrv; 1981 rv++; 1982 } 1983 } else if (psf->sf_oldin) { 1984 psf->sf_crcount = 0; 1985 /* 1986 * add or update "delete" records if an active filter 1987 * is now inactive 1988 */ 1989 for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) 1990 if (ipv6_addr_equal(&dpsf->sf_addr, 1991 &psf->sf_addr)) 1992 break; 1993 if (!dpsf) { 1994 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); 1995 if (!dpsf) 1996 continue; 1997 *dpsf = *psf; 1998 /* pmc->mca_lock held by callers */ 1999 dpsf->sf_next = pmc->mca_tomb; 2000 pmc->mca_tomb = dpsf; 2001 } 2002 dpsf->sf_crcount = qrv; 2003 rv++; 2004 } 2005 } 2006 return rv; 2007 } 2008 2009 /* 2010 * Add multicast source filter list to the interface list 2011 */ 2012 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, 2013 int sfmode, int sfcount, const struct in6_addr *psfsrc, 2014 int delta) 2015 { 2016 struct ifmcaddr6 *pmc; 2017 int isexclude; 2018 int i, err; 2019 2020 if (!idev) 2021 return -ENODEV; 2022 read_lock_bh(&idev->lock); 2023 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 2024 if (ipv6_addr_equal(pmca, &pmc->mca_addr)) 2025 break; 2026 } 2027 if (!pmc) { 2028 /* MCA not found?? bug */ 2029 read_unlock_bh(&idev->lock); 2030 return -ESRCH; 2031 } 2032 spin_lock_bh(&pmc->mca_lock); 2033 2034 sf_markstate(pmc); 2035 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE; 2036 if (!delta) 2037 pmc->mca_sfcount[sfmode]++; 2038 err = 0; 2039 for (i=0; i<sfcount; i++) { 2040 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]); 2041 if (err) 2042 break; 2043 } 2044 if (err) { 2045 int j; 2046 2047 if (!delta) 2048 pmc->mca_sfcount[sfmode]--; 2049 for (j=0; j<i; j++) 2050 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]); 2051 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { 2052 struct ip6_sf_list *psf; 2053 2054 /* filter mode change */ 2055 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 2056 pmc->mca_sfmode = MCAST_EXCLUDE; 2057 else if (pmc->mca_sfcount[MCAST_INCLUDE]) 2058 pmc->mca_sfmode = MCAST_INCLUDE; 2059 /* else no filters; keep old mode for reports */ 2060 2061 pmc->mca_crcount = idev->mc_qrv; 2062 idev->mc_ifc_count = pmc->mca_crcount; 2063 for (psf=pmc->mca_sources; psf; psf = psf->sf_next) 2064 psf->sf_crcount = 0; 2065 mld_ifc_event(idev); 2066 } else if (sf_setstate(pmc)) 2067 mld_ifc_event(idev); 2068 spin_unlock_bh(&pmc->mca_lock); 2069 read_unlock_bh(&idev->lock); 2070 return err; 2071 } 2072 2073 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc) 2074 { 2075 struct ip6_sf_list *psf, *nextpsf; 2076 2077 for (psf=pmc->mca_tomb; psf; psf=nextpsf) { 2078 nextpsf = psf->sf_next; 2079 kfree(psf); 2080 } 2081 pmc->mca_tomb = NULL; 2082 for (psf=pmc->mca_sources; psf; psf=nextpsf) { 2083 nextpsf = psf->sf_next; 2084 kfree(psf); 2085 } 2086 pmc->mca_sources = NULL; 2087 pmc->mca_sfmode = MCAST_EXCLUDE; 2088 pmc->mca_sfcount[MCAST_INCLUDE] = 0; 2089 pmc->mca_sfcount[MCAST_EXCLUDE] = 1; 2090 } 2091 2092 2093 static void igmp6_join_group(struct ifmcaddr6 *ma) 2094 { 2095 unsigned long delay; 2096 2097 if (ma->mca_flags & MAF_NOREPORT) 2098 return; 2099 2100 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2101 2102 delay = net_random() % IGMP6_UNSOLICITED_IVAL; 2103 2104 spin_lock_bh(&ma->mca_lock); 2105 if (del_timer(&ma->mca_timer)) { 2106 atomic_dec(&ma->mca_refcnt); 2107 delay = ma->mca_timer.expires - jiffies; 2108 } 2109 2110 if (!mod_timer(&ma->mca_timer, jiffies + delay)) 2111 atomic_inc(&ma->mca_refcnt); 2112 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER; 2113 spin_unlock_bh(&ma->mca_lock); 2114 } 2115 2116 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 2117 struct inet6_dev *idev) 2118 { 2119 int err; 2120 2121 /* callers have the socket lock and a write lock on ipv6_sk_mc_lock, 2122 * so no other readers or writers of iml or its sflist 2123 */ 2124 if (!iml->sflist) { 2125 /* any-source empty exclude case */ 2126 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); 2127 } 2128 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 2129 iml->sflist->sl_count, iml->sflist->sl_addr, 0); 2130 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); 2131 iml->sflist = NULL; 2132 return err; 2133 } 2134 2135 static void igmp6_leave_group(struct ifmcaddr6 *ma) 2136 { 2137 if (MLD_V1_SEEN(ma->idev)) { 2138 if (ma->mca_flags & MAF_LAST_REPORTER) 2139 igmp6_send(&ma->mca_addr, ma->idev->dev, 2140 ICMPV6_MGM_REDUCTION); 2141 } else { 2142 mld_add_delrec(ma->idev, ma); 2143 mld_ifc_event(ma->idev); 2144 } 2145 } 2146 2147 static void mld_gq_timer_expire(unsigned long data) 2148 { 2149 struct inet6_dev *idev = (struct inet6_dev *)data; 2150 2151 idev->mc_gq_running = 0; 2152 mld_send_report(idev, NULL); 2153 __in6_dev_put(idev); 2154 } 2155 2156 static void mld_ifc_timer_expire(unsigned long data) 2157 { 2158 struct inet6_dev *idev = (struct inet6_dev *)data; 2159 2160 mld_send_cr(idev); 2161 if (idev->mc_ifc_count) { 2162 idev->mc_ifc_count--; 2163 if (idev->mc_ifc_count) 2164 mld_ifc_start_timer(idev, idev->mc_maxdelay); 2165 } 2166 __in6_dev_put(idev); 2167 } 2168 2169 static void mld_ifc_event(struct inet6_dev *idev) 2170 { 2171 if (MLD_V1_SEEN(idev)) 2172 return; 2173 idev->mc_ifc_count = idev->mc_qrv; 2174 mld_ifc_start_timer(idev, 1); 2175 } 2176 2177 2178 static void igmp6_timer_handler(unsigned long data) 2179 { 2180 struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data; 2181 2182 if (MLD_V1_SEEN(ma->idev)) 2183 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2184 else 2185 mld_send_report(ma->idev, ma); 2186 2187 spin_lock(&ma->mca_lock); 2188 ma->mca_flags |= MAF_LAST_REPORTER; 2189 ma->mca_flags &= ~MAF_TIMER_RUNNING; 2190 spin_unlock(&ma->mca_lock); 2191 ma_put(ma); 2192 } 2193 2194 /* Device changing type */ 2195 2196 void ipv6_mc_unmap(struct inet6_dev *idev) 2197 { 2198 struct ifmcaddr6 *i; 2199 2200 /* Install multicast list, except for all-nodes (already installed) */ 2201 2202 read_lock_bh(&idev->lock); 2203 for (i = idev->mc_list; i; i = i->next) 2204 igmp6_group_dropped(i); 2205 read_unlock_bh(&idev->lock); 2206 } 2207 2208 void ipv6_mc_remap(struct inet6_dev *idev) 2209 { 2210 ipv6_mc_up(idev); 2211 } 2212 2213 /* Device going down */ 2214 2215 void ipv6_mc_down(struct inet6_dev *idev) 2216 { 2217 struct ifmcaddr6 *i; 2218 2219 /* Withdraw multicast list */ 2220 2221 read_lock_bh(&idev->lock); 2222 idev->mc_ifc_count = 0; 2223 if (del_timer(&idev->mc_ifc_timer)) 2224 __in6_dev_put(idev); 2225 idev->mc_gq_running = 0; 2226 if (del_timer(&idev->mc_gq_timer)) 2227 __in6_dev_put(idev); 2228 2229 for (i = idev->mc_list; i; i=i->next) 2230 igmp6_group_dropped(i); 2231 read_unlock_bh(&idev->lock); 2232 2233 mld_clear_delrec(idev); 2234 } 2235 2236 2237 /* Device going up */ 2238 2239 void ipv6_mc_up(struct inet6_dev *idev) 2240 { 2241 struct ifmcaddr6 *i; 2242 2243 /* Install multicast list, except for all-nodes (already installed) */ 2244 2245 read_lock_bh(&idev->lock); 2246 for (i = idev->mc_list; i; i=i->next) 2247 igmp6_group_added(i); 2248 read_unlock_bh(&idev->lock); 2249 } 2250 2251 /* IPv6 device initialization. */ 2252 2253 void ipv6_mc_init_dev(struct inet6_dev *idev) 2254 { 2255 write_lock_bh(&idev->lock); 2256 spin_lock_init(&idev->mc_lock); 2257 idev->mc_gq_running = 0; 2258 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire, 2259 (unsigned long)idev); 2260 idev->mc_tomb = NULL; 2261 idev->mc_ifc_count = 0; 2262 setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire, 2263 (unsigned long)idev); 2264 idev->mc_qrv = MLD_QRV_DEFAULT; 2265 idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL; 2266 idev->mc_v1_seen = 0; 2267 write_unlock_bh(&idev->lock); 2268 } 2269 2270 /* 2271 * Device is about to be destroyed: clean up. 2272 */ 2273 2274 void ipv6_mc_destroy_dev(struct inet6_dev *idev) 2275 { 2276 struct ifmcaddr6 *i; 2277 2278 /* Deactivate timers */ 2279 ipv6_mc_down(idev); 2280 2281 /* Delete all-nodes address. */ 2282 /* We cannot call ipv6_dev_mc_dec() directly, our caller in 2283 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will 2284 * fail. 2285 */ 2286 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes); 2287 2288 if (idev->cnf.forwarding) 2289 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters); 2290 2291 write_lock_bh(&idev->lock); 2292 while ((i = idev->mc_list) != NULL) { 2293 idev->mc_list = i->next; 2294 write_unlock_bh(&idev->lock); 2295 2296 igmp6_group_dropped(i); 2297 ma_put(i); 2298 2299 write_lock_bh(&idev->lock); 2300 } 2301 write_unlock_bh(&idev->lock); 2302 } 2303 2304 #ifdef CONFIG_PROC_FS 2305 struct igmp6_mc_iter_state { 2306 struct seq_net_private p; 2307 struct net_device *dev; 2308 struct inet6_dev *idev; 2309 }; 2310 2311 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private) 2312 2313 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) 2314 { 2315 struct ifmcaddr6 *im = NULL; 2316 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2317 struct net *net = seq_file_net(seq); 2318 2319 state->idev = NULL; 2320 for_each_netdev_rcu(net, state->dev) { 2321 struct inet6_dev *idev; 2322 idev = __in6_dev_get(state->dev); 2323 if (!idev) 2324 continue; 2325 read_lock_bh(&idev->lock); 2326 im = idev->mc_list; 2327 if (im) { 2328 state->idev = idev; 2329 break; 2330 } 2331 read_unlock_bh(&idev->lock); 2332 } 2333 return im; 2334 } 2335 2336 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im) 2337 { 2338 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2339 2340 im = im->next; 2341 while (!im) { 2342 if (likely(state->idev != NULL)) 2343 read_unlock_bh(&state->idev->lock); 2344 2345 state->dev = next_net_device_rcu(state->dev); 2346 if (!state->dev) { 2347 state->idev = NULL; 2348 break; 2349 } 2350 state->idev = __in6_dev_get(state->dev); 2351 if (!state->idev) 2352 continue; 2353 read_lock_bh(&state->idev->lock); 2354 im = state->idev->mc_list; 2355 } 2356 return im; 2357 } 2358 2359 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos) 2360 { 2361 struct ifmcaddr6 *im = igmp6_mc_get_first(seq); 2362 if (im) 2363 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL) 2364 --pos; 2365 return pos ? NULL : im; 2366 } 2367 2368 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) 2369 __acquires(RCU) 2370 { 2371 rcu_read_lock(); 2372 return igmp6_mc_get_idx(seq, *pos); 2373 } 2374 2375 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2376 { 2377 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v); 2378 2379 ++*pos; 2380 return im; 2381 } 2382 2383 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) 2384 __releases(RCU) 2385 { 2386 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2387 2388 if (likely(state->idev != NULL)) { 2389 read_unlock_bh(&state->idev->lock); 2390 state->idev = NULL; 2391 } 2392 state->dev = NULL; 2393 rcu_read_unlock(); 2394 } 2395 2396 static int igmp6_mc_seq_show(struct seq_file *seq, void *v) 2397 { 2398 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v; 2399 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2400 2401 seq_printf(seq, 2402 "%-4d %-15s %pi6 %5d %08X %ld\n", 2403 state->dev->ifindex, state->dev->name, 2404 &im->mca_addr, 2405 im->mca_users, im->mca_flags, 2406 (im->mca_flags&MAF_TIMER_RUNNING) ? 2407 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0); 2408 return 0; 2409 } 2410 2411 static const struct seq_operations igmp6_mc_seq_ops = { 2412 .start = igmp6_mc_seq_start, 2413 .next = igmp6_mc_seq_next, 2414 .stop = igmp6_mc_seq_stop, 2415 .show = igmp6_mc_seq_show, 2416 }; 2417 2418 static int igmp6_mc_seq_open(struct inode *inode, struct file *file) 2419 { 2420 return seq_open_net(inode, file, &igmp6_mc_seq_ops, 2421 sizeof(struct igmp6_mc_iter_state)); 2422 } 2423 2424 static const struct file_operations igmp6_mc_seq_fops = { 2425 .owner = THIS_MODULE, 2426 .open = igmp6_mc_seq_open, 2427 .read = seq_read, 2428 .llseek = seq_lseek, 2429 .release = seq_release_net, 2430 }; 2431 2432 struct igmp6_mcf_iter_state { 2433 struct seq_net_private p; 2434 struct net_device *dev; 2435 struct inet6_dev *idev; 2436 struct ifmcaddr6 *im; 2437 }; 2438 2439 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private) 2440 2441 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) 2442 { 2443 struct ip6_sf_list *psf = NULL; 2444 struct ifmcaddr6 *im = NULL; 2445 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2446 struct net *net = seq_file_net(seq); 2447 2448 state->idev = NULL; 2449 state->im = NULL; 2450 for_each_netdev_rcu(net, state->dev) { 2451 struct inet6_dev *idev; 2452 idev = __in6_dev_get(state->dev); 2453 if (unlikely(idev == NULL)) 2454 continue; 2455 read_lock_bh(&idev->lock); 2456 im = idev->mc_list; 2457 if (likely(im != NULL)) { 2458 spin_lock_bh(&im->mca_lock); 2459 psf = im->mca_sources; 2460 if (likely(psf != NULL)) { 2461 state->im = im; 2462 state->idev = idev; 2463 break; 2464 } 2465 spin_unlock_bh(&im->mca_lock); 2466 } 2467 read_unlock_bh(&idev->lock); 2468 } 2469 return psf; 2470 } 2471 2472 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf) 2473 { 2474 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2475 2476 psf = psf->sf_next; 2477 while (!psf) { 2478 spin_unlock_bh(&state->im->mca_lock); 2479 state->im = state->im->next; 2480 while (!state->im) { 2481 if (likely(state->idev != NULL)) 2482 read_unlock_bh(&state->idev->lock); 2483 2484 state->dev = next_net_device_rcu(state->dev); 2485 if (!state->dev) { 2486 state->idev = NULL; 2487 goto out; 2488 } 2489 state->idev = __in6_dev_get(state->dev); 2490 if (!state->idev) 2491 continue; 2492 read_lock_bh(&state->idev->lock); 2493 state->im = state->idev->mc_list; 2494 } 2495 if (!state->im) 2496 break; 2497 spin_lock_bh(&state->im->mca_lock); 2498 psf = state->im->mca_sources; 2499 } 2500 out: 2501 return psf; 2502 } 2503 2504 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos) 2505 { 2506 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq); 2507 if (psf) 2508 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL) 2509 --pos; 2510 return pos ? NULL : psf; 2511 } 2512 2513 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2514 __acquires(RCU) 2515 { 2516 rcu_read_lock(); 2517 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2518 } 2519 2520 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2521 { 2522 struct ip6_sf_list *psf; 2523 if (v == SEQ_START_TOKEN) 2524 psf = igmp6_mcf_get_first(seq); 2525 else 2526 psf = igmp6_mcf_get_next(seq, v); 2527 ++*pos; 2528 return psf; 2529 } 2530 2531 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) 2532 __releases(RCU) 2533 { 2534 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2535 if (likely(state->im != NULL)) { 2536 spin_unlock_bh(&state->im->mca_lock); 2537 state->im = NULL; 2538 } 2539 if (likely(state->idev != NULL)) { 2540 read_unlock_bh(&state->idev->lock); 2541 state->idev = NULL; 2542 } 2543 state->dev = NULL; 2544 rcu_read_unlock(); 2545 } 2546 2547 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) 2548 { 2549 struct ip6_sf_list *psf = (struct ip6_sf_list *)v; 2550 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2551 2552 if (v == SEQ_START_TOKEN) { 2553 seq_printf(seq, 2554 "%3s %6s " 2555 "%32s %32s %6s %6s\n", "Idx", 2556 "Device", "Multicast Address", 2557 "Source Address", "INC", "EXC"); 2558 } else { 2559 seq_printf(seq, 2560 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", 2561 state->dev->ifindex, state->dev->name, 2562 &state->im->mca_addr, 2563 &psf->sf_addr, 2564 psf->sf_count[MCAST_INCLUDE], 2565 psf->sf_count[MCAST_EXCLUDE]); 2566 } 2567 return 0; 2568 } 2569 2570 static const struct seq_operations igmp6_mcf_seq_ops = { 2571 .start = igmp6_mcf_seq_start, 2572 .next = igmp6_mcf_seq_next, 2573 .stop = igmp6_mcf_seq_stop, 2574 .show = igmp6_mcf_seq_show, 2575 }; 2576 2577 static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) 2578 { 2579 return seq_open_net(inode, file, &igmp6_mcf_seq_ops, 2580 sizeof(struct igmp6_mcf_iter_state)); 2581 } 2582 2583 static const struct file_operations igmp6_mcf_seq_fops = { 2584 .owner = THIS_MODULE, 2585 .open = igmp6_mcf_seq_open, 2586 .read = seq_read, 2587 .llseek = seq_lseek, 2588 .release = seq_release_net, 2589 }; 2590 2591 static int __net_init igmp6_proc_init(struct net *net) 2592 { 2593 int err; 2594 2595 err = -ENOMEM; 2596 if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops)) 2597 goto out; 2598 if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO, 2599 &igmp6_mcf_seq_fops)) 2600 goto out_proc_net_igmp6; 2601 2602 err = 0; 2603 out: 2604 return err; 2605 2606 out_proc_net_igmp6: 2607 proc_net_remove(net, "igmp6"); 2608 goto out; 2609 } 2610 2611 static void __net_exit igmp6_proc_exit(struct net *net) 2612 { 2613 proc_net_remove(net, "mcfilter6"); 2614 proc_net_remove(net, "igmp6"); 2615 } 2616 #else 2617 static inline int igmp6_proc_init(struct net *net) 2618 { 2619 return 0; 2620 } 2621 static inline void igmp6_proc_exit(struct net *net) 2622 { 2623 } 2624 #endif 2625 2626 static int __net_init igmp6_net_init(struct net *net) 2627 { 2628 int err; 2629 2630 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, 2631 SOCK_RAW, IPPROTO_ICMPV6, net); 2632 if (err < 0) { 2633 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n", 2634 err); 2635 goto out; 2636 } 2637 2638 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1; 2639 2640 err = igmp6_proc_init(net); 2641 if (err) 2642 goto out_sock_create; 2643 out: 2644 return err; 2645 2646 out_sock_create: 2647 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2648 goto out; 2649 } 2650 2651 static void __net_exit igmp6_net_exit(struct net *net) 2652 { 2653 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2654 igmp6_proc_exit(net); 2655 } 2656 2657 static struct pernet_operations igmp6_net_ops = { 2658 .init = igmp6_net_init, 2659 .exit = igmp6_net_exit, 2660 }; 2661 2662 int __init igmp6_init(void) 2663 { 2664 return register_pernet_subsys(&igmp6_net_ops); 2665 } 2666 2667 void igmp6_cleanup(void) 2668 { 2669 unregister_pernet_subsys(&igmp6_net_ops); 2670 } 2671