1 /* 2 * IP multicast routing support for mrouted 3.6/3.8 3 * 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk> 5 * Linux Consultancy and Custom Driver Development 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 * 12 * Fixes: 13 * Michael Chastain : Incorrect size of copying. 14 * Alan Cox : Added the cache manager code 15 * Alan Cox : Fixed the clone/copy bug and device race. 16 * Mike McLagan : Routing by source 17 * Malcolm Beattie : Buffer handling fixes. 18 * Alexey Kuznetsov : Double buffer free and other fixes. 19 * SVR Anand : Fixed several multicast bugs and problems. 20 * Alexey Kuznetsov : Status, optimisations and more. 21 * Brad Parker : Better behaviour on mrouted upcall 22 * overflow. 23 * Carlos Picoto : PIMv1 Support 24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header 25 * Relax this requirement to work with older peers. 26 * 27 */ 28 29 #include <asm/uaccess.h> 30 #include <linux/types.h> 31 #include <linux/capability.h> 32 #include <linux/errno.h> 33 #include <linux/timer.h> 34 #include <linux/mm.h> 35 #include <linux/kernel.h> 36 #include <linux/fcntl.h> 37 #include <linux/stat.h> 38 #include <linux/socket.h> 39 #include <linux/in.h> 40 #include <linux/inet.h> 41 #include <linux/netdevice.h> 42 #include <linux/inetdevice.h> 43 #include <linux/igmp.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <linux/mroute.h> 47 #include <linux/init.h> 48 #include <linux/if_ether.h> 49 #include <linux/slab.h> 50 #include <net/net_namespace.h> 51 #include <net/ip.h> 52 #include <net/protocol.h> 53 #include <linux/skbuff.h> 54 #include <net/route.h> 55 #include <net/sock.h> 56 #include <net/icmp.h> 57 #include <net/udp.h> 58 #include <net/raw.h> 59 #include <linux/notifier.h> 60 #include <linux/if_arp.h> 61 #include <linux/netfilter_ipv4.h> 62 #include <linux/compat.h> 63 #include <linux/export.h> 64 #include <net/ipip.h> 65 #include <net/checksum.h> 66 #include <net/netlink.h> 67 #include <net/fib_rules.h> 68 69 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 70 #define CONFIG_IP_PIMSM 1 71 #endif 72 73 struct mr_table { 74 struct list_head list; 75 #ifdef CONFIG_NET_NS 76 struct net *net; 77 #endif 78 u32 id; 79 struct sock __rcu *mroute_sk; 80 struct timer_list ipmr_expire_timer; 81 struct list_head mfc_unres_queue; 82 struct list_head mfc_cache_array[MFC_LINES]; 83 struct vif_device vif_table[MAXVIFS]; 84 int maxvif; 85 atomic_t cache_resolve_queue_len; 86 int mroute_do_assert; 87 int mroute_do_pim; 88 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 89 int mroute_reg_vif_num; 90 #endif 91 }; 92 93 struct ipmr_rule { 94 struct fib_rule common; 95 }; 96 97 struct ipmr_result { 98 struct mr_table *mrt; 99 }; 100 101 /* Big lock, protecting vif table, mrt cache and mroute socket state. 102 * Note that the changes are semaphored via rtnl_lock. 103 */ 104 105 static DEFINE_RWLOCK(mrt_lock); 106 107 /* 108 * Multicast router control variables 109 */ 110 111 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) 112 113 /* Special spinlock for queue of unresolved entries */ 114 static DEFINE_SPINLOCK(mfc_unres_lock); 115 116 /* We return to original Alan's scheme. Hash table of resolved 117 * entries is changed only in process context and protected 118 * with weak lock mrt_lock. Queue of unresolved entries is protected 119 * with strong spinlock mfc_unres_lock. 120 * 121 * In this case data path is free of exclusive locks at all. 122 */ 123 124 static struct kmem_cache *mrt_cachep __read_mostly; 125 126 static struct mr_table *ipmr_new_table(struct net *net, u32 id); 127 static void ipmr_free_table(struct mr_table *mrt); 128 129 static int ip_mr_forward(struct net *net, struct mr_table *mrt, 130 struct sk_buff *skb, struct mfc_cache *cache, 131 int local); 132 static int ipmr_cache_report(struct mr_table *mrt, 133 struct sk_buff *pkt, vifi_t vifi, int assert); 134 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 135 struct mfc_cache *c, struct rtmsg *rtm); 136 static void mroute_clean_tables(struct mr_table *mrt); 137 static void ipmr_expire_process(unsigned long arg); 138 139 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 140 #define ipmr_for_each_table(mrt, net) \ 141 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) 142 143 static struct mr_table *ipmr_get_table(struct net *net, u32 id) 144 { 145 struct mr_table *mrt; 146 147 ipmr_for_each_table(mrt, net) { 148 if (mrt->id == id) 149 return mrt; 150 } 151 return NULL; 152 } 153 154 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 155 struct mr_table **mrt) 156 { 157 struct ipmr_result res; 158 struct fib_lookup_arg arg = { .result = &res, }; 159 int err; 160 161 err = fib_rules_lookup(net->ipv4.mr_rules_ops, 162 flowi4_to_flowi(flp4), 0, &arg); 163 if (err < 0) 164 return err; 165 *mrt = res.mrt; 166 return 0; 167 } 168 169 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, 170 int flags, struct fib_lookup_arg *arg) 171 { 172 struct ipmr_result *res = arg->result; 173 struct mr_table *mrt; 174 175 switch (rule->action) { 176 case FR_ACT_TO_TBL: 177 break; 178 case FR_ACT_UNREACHABLE: 179 return -ENETUNREACH; 180 case FR_ACT_PROHIBIT: 181 return -EACCES; 182 case FR_ACT_BLACKHOLE: 183 default: 184 return -EINVAL; 185 } 186 187 mrt = ipmr_get_table(rule->fr_net, rule->table); 188 if (mrt == NULL) 189 return -EAGAIN; 190 res->mrt = mrt; 191 return 0; 192 } 193 194 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) 195 { 196 return 1; 197 } 198 199 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { 200 FRA_GENERIC_POLICY, 201 }; 202 203 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 204 struct fib_rule_hdr *frh, struct nlattr **tb) 205 { 206 return 0; 207 } 208 209 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 210 struct nlattr **tb) 211 { 212 return 1; 213 } 214 215 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 216 struct fib_rule_hdr *frh) 217 { 218 frh->dst_len = 0; 219 frh->src_len = 0; 220 frh->tos = 0; 221 return 0; 222 } 223 224 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { 225 .family = RTNL_FAMILY_IPMR, 226 .rule_size = sizeof(struct ipmr_rule), 227 .addr_size = sizeof(u32), 228 .action = ipmr_rule_action, 229 .match = ipmr_rule_match, 230 .configure = ipmr_rule_configure, 231 .compare = ipmr_rule_compare, 232 .default_pref = fib_default_rule_pref, 233 .fill = ipmr_rule_fill, 234 .nlgroup = RTNLGRP_IPV4_RULE, 235 .policy = ipmr_rule_policy, 236 .owner = THIS_MODULE, 237 }; 238 239 static int __net_init ipmr_rules_init(struct net *net) 240 { 241 struct fib_rules_ops *ops; 242 struct mr_table *mrt; 243 int err; 244 245 ops = fib_rules_register(&ipmr_rules_ops_template, net); 246 if (IS_ERR(ops)) 247 return PTR_ERR(ops); 248 249 INIT_LIST_HEAD(&net->ipv4.mr_tables); 250 251 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 252 if (mrt == NULL) { 253 err = -ENOMEM; 254 goto err1; 255 } 256 257 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); 258 if (err < 0) 259 goto err2; 260 261 net->ipv4.mr_rules_ops = ops; 262 return 0; 263 264 err2: 265 kfree(mrt); 266 err1: 267 fib_rules_unregister(ops); 268 return err; 269 } 270 271 static void __net_exit ipmr_rules_exit(struct net *net) 272 { 273 struct mr_table *mrt, *next; 274 275 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 276 list_del(&mrt->list); 277 ipmr_free_table(mrt); 278 } 279 fib_rules_unregister(net->ipv4.mr_rules_ops); 280 } 281 #else 282 #define ipmr_for_each_table(mrt, net) \ 283 for (mrt = net->ipv4.mrt; mrt; mrt = NULL) 284 285 static struct mr_table *ipmr_get_table(struct net *net, u32 id) 286 { 287 return net->ipv4.mrt; 288 } 289 290 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 291 struct mr_table **mrt) 292 { 293 *mrt = net->ipv4.mrt; 294 return 0; 295 } 296 297 static int __net_init ipmr_rules_init(struct net *net) 298 { 299 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 300 return net->ipv4.mrt ? 0 : -ENOMEM; 301 } 302 303 static void __net_exit ipmr_rules_exit(struct net *net) 304 { 305 ipmr_free_table(net->ipv4.mrt); 306 } 307 #endif 308 309 static struct mr_table *ipmr_new_table(struct net *net, u32 id) 310 { 311 struct mr_table *mrt; 312 unsigned int i; 313 314 mrt = ipmr_get_table(net, id); 315 if (mrt != NULL) 316 return mrt; 317 318 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 319 if (mrt == NULL) 320 return NULL; 321 write_pnet(&mrt->net, net); 322 mrt->id = id; 323 324 /* Forwarding cache */ 325 for (i = 0; i < MFC_LINES; i++) 326 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); 327 328 INIT_LIST_HEAD(&mrt->mfc_unres_queue); 329 330 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, 331 (unsigned long)mrt); 332 333 #ifdef CONFIG_IP_PIMSM 334 mrt->mroute_reg_vif_num = -1; 335 #endif 336 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 337 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); 338 #endif 339 return mrt; 340 } 341 342 static void ipmr_free_table(struct mr_table *mrt) 343 { 344 del_timer_sync(&mrt->ipmr_expire_timer); 345 mroute_clean_tables(mrt); 346 kfree(mrt); 347 } 348 349 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 350 351 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 352 { 353 struct net *net = dev_net(dev); 354 355 dev_close(dev); 356 357 dev = __dev_get_by_name(net, "tunl0"); 358 if (dev) { 359 const struct net_device_ops *ops = dev->netdev_ops; 360 struct ifreq ifr; 361 struct ip_tunnel_parm p; 362 363 memset(&p, 0, sizeof(p)); 364 p.iph.daddr = v->vifc_rmt_addr.s_addr; 365 p.iph.saddr = v->vifc_lcl_addr.s_addr; 366 p.iph.version = 4; 367 p.iph.ihl = 5; 368 p.iph.protocol = IPPROTO_IPIP; 369 sprintf(p.name, "dvmrp%d", v->vifc_vifi); 370 ifr.ifr_ifru.ifru_data = (__force void __user *)&p; 371 372 if (ops->ndo_do_ioctl) { 373 mm_segment_t oldfs = get_fs(); 374 375 set_fs(KERNEL_DS); 376 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL); 377 set_fs(oldfs); 378 } 379 } 380 } 381 382 static 383 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) 384 { 385 struct net_device *dev; 386 387 dev = __dev_get_by_name(net, "tunl0"); 388 389 if (dev) { 390 const struct net_device_ops *ops = dev->netdev_ops; 391 int err; 392 struct ifreq ifr; 393 struct ip_tunnel_parm p; 394 struct in_device *in_dev; 395 396 memset(&p, 0, sizeof(p)); 397 p.iph.daddr = v->vifc_rmt_addr.s_addr; 398 p.iph.saddr = v->vifc_lcl_addr.s_addr; 399 p.iph.version = 4; 400 p.iph.ihl = 5; 401 p.iph.protocol = IPPROTO_IPIP; 402 sprintf(p.name, "dvmrp%d", v->vifc_vifi); 403 ifr.ifr_ifru.ifru_data = (__force void __user *)&p; 404 405 if (ops->ndo_do_ioctl) { 406 mm_segment_t oldfs = get_fs(); 407 408 set_fs(KERNEL_DS); 409 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); 410 set_fs(oldfs); 411 } else { 412 err = -EOPNOTSUPP; 413 } 414 dev = NULL; 415 416 if (err == 0 && 417 (dev = __dev_get_by_name(net, p.name)) != NULL) { 418 dev->flags |= IFF_MULTICAST; 419 420 in_dev = __in_dev_get_rtnl(dev); 421 if (in_dev == NULL) 422 goto failure; 423 424 ipv4_devconf_setall(in_dev); 425 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; 426 427 if (dev_open(dev)) 428 goto failure; 429 dev_hold(dev); 430 } 431 } 432 return dev; 433 434 failure: 435 /* allow the register to be completed before unregistering. */ 436 rtnl_unlock(); 437 rtnl_lock(); 438 439 unregister_netdevice(dev); 440 return NULL; 441 } 442 443 #ifdef CONFIG_IP_PIMSM 444 445 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 446 { 447 struct net *net = dev_net(dev); 448 struct mr_table *mrt; 449 struct flowi4 fl4 = { 450 .flowi4_oif = dev->ifindex, 451 .flowi4_iif = skb->skb_iif, 452 .flowi4_mark = skb->mark, 453 }; 454 int err; 455 456 err = ipmr_fib_lookup(net, &fl4, &mrt); 457 if (err < 0) { 458 kfree_skb(skb); 459 return err; 460 } 461 462 read_lock(&mrt_lock); 463 dev->stats.tx_bytes += skb->len; 464 dev->stats.tx_packets++; 465 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); 466 read_unlock(&mrt_lock); 467 kfree_skb(skb); 468 return NETDEV_TX_OK; 469 } 470 471 static const struct net_device_ops reg_vif_netdev_ops = { 472 .ndo_start_xmit = reg_vif_xmit, 473 }; 474 475 static void reg_vif_setup(struct net_device *dev) 476 { 477 dev->type = ARPHRD_PIMREG; 478 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 479 dev->flags = IFF_NOARP; 480 dev->netdev_ops = ®_vif_netdev_ops, 481 dev->destructor = free_netdev; 482 dev->features |= NETIF_F_NETNS_LOCAL; 483 } 484 485 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) 486 { 487 struct net_device *dev; 488 struct in_device *in_dev; 489 char name[IFNAMSIZ]; 490 491 if (mrt->id == RT_TABLE_DEFAULT) 492 sprintf(name, "pimreg"); 493 else 494 sprintf(name, "pimreg%u", mrt->id); 495 496 dev = alloc_netdev(0, name, reg_vif_setup); 497 498 if (dev == NULL) 499 return NULL; 500 501 dev_net_set(dev, net); 502 503 if (register_netdevice(dev)) { 504 free_netdev(dev); 505 return NULL; 506 } 507 dev->iflink = 0; 508 509 rcu_read_lock(); 510 in_dev = __in_dev_get_rcu(dev); 511 if (!in_dev) { 512 rcu_read_unlock(); 513 goto failure; 514 } 515 516 ipv4_devconf_setall(in_dev); 517 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; 518 rcu_read_unlock(); 519 520 if (dev_open(dev)) 521 goto failure; 522 523 dev_hold(dev); 524 525 return dev; 526 527 failure: 528 /* allow the register to be completed before unregistering. */ 529 rtnl_unlock(); 530 rtnl_lock(); 531 532 unregister_netdevice(dev); 533 return NULL; 534 } 535 #endif 536 537 /** 538 * vif_delete - Delete a VIF entry 539 * @notify: Set to 1, if the caller is a notifier_call 540 */ 541 542 static int vif_delete(struct mr_table *mrt, int vifi, int notify, 543 struct list_head *head) 544 { 545 struct vif_device *v; 546 struct net_device *dev; 547 struct in_device *in_dev; 548 549 if (vifi < 0 || vifi >= mrt->maxvif) 550 return -EADDRNOTAVAIL; 551 552 v = &mrt->vif_table[vifi]; 553 554 write_lock_bh(&mrt_lock); 555 dev = v->dev; 556 v->dev = NULL; 557 558 if (!dev) { 559 write_unlock_bh(&mrt_lock); 560 return -EADDRNOTAVAIL; 561 } 562 563 #ifdef CONFIG_IP_PIMSM 564 if (vifi == mrt->mroute_reg_vif_num) 565 mrt->mroute_reg_vif_num = -1; 566 #endif 567 568 if (vifi + 1 == mrt->maxvif) { 569 int tmp; 570 571 for (tmp = vifi - 1; tmp >= 0; tmp--) { 572 if (VIF_EXISTS(mrt, tmp)) 573 break; 574 } 575 mrt->maxvif = tmp+1; 576 } 577 578 write_unlock_bh(&mrt_lock); 579 580 dev_set_allmulti(dev, -1); 581 582 in_dev = __in_dev_get_rtnl(dev); 583 if (in_dev) { 584 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; 585 ip_rt_multicast_event(in_dev); 586 } 587 588 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) 589 unregister_netdevice_queue(dev, head); 590 591 dev_put(dev); 592 return 0; 593 } 594 595 static void ipmr_cache_free_rcu(struct rcu_head *head) 596 { 597 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu); 598 599 kmem_cache_free(mrt_cachep, c); 600 } 601 602 static inline void ipmr_cache_free(struct mfc_cache *c) 603 { 604 call_rcu(&c->rcu, ipmr_cache_free_rcu); 605 } 606 607 /* Destroy an unresolved cache entry, killing queued skbs 608 * and reporting error to netlink readers. 609 */ 610 611 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 612 { 613 struct net *net = read_pnet(&mrt->net); 614 struct sk_buff *skb; 615 struct nlmsgerr *e; 616 617 atomic_dec(&mrt->cache_resolve_queue_len); 618 619 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 620 if (ip_hdr(skb)->version == 0) { 621 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 622 nlh->nlmsg_type = NLMSG_ERROR; 623 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 624 skb_trim(skb, nlh->nlmsg_len); 625 e = NLMSG_DATA(nlh); 626 e->error = -ETIMEDOUT; 627 memset(&e->msg, 0, sizeof(e->msg)); 628 629 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 630 } else { 631 kfree_skb(skb); 632 } 633 } 634 635 ipmr_cache_free(c); 636 } 637 638 639 /* Timer process for the unresolved queue. */ 640 641 static void ipmr_expire_process(unsigned long arg) 642 { 643 struct mr_table *mrt = (struct mr_table *)arg; 644 unsigned long now; 645 unsigned long expires; 646 struct mfc_cache *c, *next; 647 648 if (!spin_trylock(&mfc_unres_lock)) { 649 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); 650 return; 651 } 652 653 if (list_empty(&mrt->mfc_unres_queue)) 654 goto out; 655 656 now = jiffies; 657 expires = 10*HZ; 658 659 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 660 if (time_after(c->mfc_un.unres.expires, now)) { 661 unsigned long interval = c->mfc_un.unres.expires - now; 662 if (interval < expires) 663 expires = interval; 664 continue; 665 } 666 667 list_del(&c->list); 668 ipmr_destroy_unres(mrt, c); 669 } 670 671 if (!list_empty(&mrt->mfc_unres_queue)) 672 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); 673 674 out: 675 spin_unlock(&mfc_unres_lock); 676 } 677 678 /* Fill oifs list. It is called under write locked mrt_lock. */ 679 680 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, 681 unsigned char *ttls) 682 { 683 int vifi; 684 685 cache->mfc_un.res.minvif = MAXVIFS; 686 cache->mfc_un.res.maxvif = 0; 687 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 688 689 for (vifi = 0; vifi < mrt->maxvif; vifi++) { 690 if (VIF_EXISTS(mrt, vifi) && 691 ttls[vifi] && ttls[vifi] < 255) { 692 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 693 if (cache->mfc_un.res.minvif > vifi) 694 cache->mfc_un.res.minvif = vifi; 695 if (cache->mfc_un.res.maxvif <= vifi) 696 cache->mfc_un.res.maxvif = vifi + 1; 697 } 698 } 699 } 700 701 static int vif_add(struct net *net, struct mr_table *mrt, 702 struct vifctl *vifc, int mrtsock) 703 { 704 int vifi = vifc->vifc_vifi; 705 struct vif_device *v = &mrt->vif_table[vifi]; 706 struct net_device *dev; 707 struct in_device *in_dev; 708 int err; 709 710 /* Is vif busy ? */ 711 if (VIF_EXISTS(mrt, vifi)) 712 return -EADDRINUSE; 713 714 switch (vifc->vifc_flags) { 715 #ifdef CONFIG_IP_PIMSM 716 case VIFF_REGISTER: 717 /* 718 * Special Purpose VIF in PIM 719 * All the packets will be sent to the daemon 720 */ 721 if (mrt->mroute_reg_vif_num >= 0) 722 return -EADDRINUSE; 723 dev = ipmr_reg_vif(net, mrt); 724 if (!dev) 725 return -ENOBUFS; 726 err = dev_set_allmulti(dev, 1); 727 if (err) { 728 unregister_netdevice(dev); 729 dev_put(dev); 730 return err; 731 } 732 break; 733 #endif 734 case VIFF_TUNNEL: 735 dev = ipmr_new_tunnel(net, vifc); 736 if (!dev) 737 return -ENOBUFS; 738 err = dev_set_allmulti(dev, 1); 739 if (err) { 740 ipmr_del_tunnel(dev, vifc); 741 dev_put(dev); 742 return err; 743 } 744 break; 745 746 case VIFF_USE_IFINDEX: 747 case 0: 748 if (vifc->vifc_flags == VIFF_USE_IFINDEX) { 749 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); 750 if (dev && __in_dev_get_rtnl(dev) == NULL) { 751 dev_put(dev); 752 return -EADDRNOTAVAIL; 753 } 754 } else { 755 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); 756 } 757 if (!dev) 758 return -EADDRNOTAVAIL; 759 err = dev_set_allmulti(dev, 1); 760 if (err) { 761 dev_put(dev); 762 return err; 763 } 764 break; 765 default: 766 return -EINVAL; 767 } 768 769 in_dev = __in_dev_get_rtnl(dev); 770 if (!in_dev) { 771 dev_put(dev); 772 return -EADDRNOTAVAIL; 773 } 774 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 775 ip_rt_multicast_event(in_dev); 776 777 /* Fill in the VIF structures */ 778 779 v->rate_limit = vifc->vifc_rate_limit; 780 v->local = vifc->vifc_lcl_addr.s_addr; 781 v->remote = vifc->vifc_rmt_addr.s_addr; 782 v->flags = vifc->vifc_flags; 783 if (!mrtsock) 784 v->flags |= VIFF_STATIC; 785 v->threshold = vifc->vifc_threshold; 786 v->bytes_in = 0; 787 v->bytes_out = 0; 788 v->pkt_in = 0; 789 v->pkt_out = 0; 790 v->link = dev->ifindex; 791 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) 792 v->link = dev->iflink; 793 794 /* And finish update writing critical data */ 795 write_lock_bh(&mrt_lock); 796 v->dev = dev; 797 #ifdef CONFIG_IP_PIMSM 798 if (v->flags & VIFF_REGISTER) 799 mrt->mroute_reg_vif_num = vifi; 800 #endif 801 if (vifi+1 > mrt->maxvif) 802 mrt->maxvif = vifi+1; 803 write_unlock_bh(&mrt_lock); 804 return 0; 805 } 806 807 /* called with rcu_read_lock() */ 808 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, 809 __be32 origin, 810 __be32 mcastgrp) 811 { 812 int line = MFC_HASH(mcastgrp, origin); 813 struct mfc_cache *c; 814 815 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) { 816 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) 817 return c; 818 } 819 return NULL; 820 } 821 822 /* 823 * Allocate a multicast cache entry 824 */ 825 static struct mfc_cache *ipmr_cache_alloc(void) 826 { 827 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 828 829 if (c) 830 c->mfc_un.res.minvif = MAXVIFS; 831 return c; 832 } 833 834 static struct mfc_cache *ipmr_cache_alloc_unres(void) 835 { 836 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 837 838 if (c) { 839 skb_queue_head_init(&c->mfc_un.unres.unresolved); 840 c->mfc_un.unres.expires = jiffies + 10*HZ; 841 } 842 return c; 843 } 844 845 /* 846 * A cache entry has gone into a resolved state from queued 847 */ 848 849 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, 850 struct mfc_cache *uc, struct mfc_cache *c) 851 { 852 struct sk_buff *skb; 853 struct nlmsgerr *e; 854 855 /* Play the pending entries through our router */ 856 857 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { 858 if (ip_hdr(skb)->version == 0) { 859 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 860 861 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 862 nlh->nlmsg_len = skb_tail_pointer(skb) - 863 (u8 *)nlh; 864 } else { 865 nlh->nlmsg_type = NLMSG_ERROR; 866 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 867 skb_trim(skb, nlh->nlmsg_len); 868 e = NLMSG_DATA(nlh); 869 e->error = -EMSGSIZE; 870 memset(&e->msg, 0, sizeof(e->msg)); 871 } 872 873 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 874 } else { 875 ip_mr_forward(net, mrt, skb, c, 0); 876 } 877 } 878 } 879 880 /* 881 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted 882 * expects the following bizarre scheme. 883 * 884 * Called under mrt_lock. 885 */ 886 887 static int ipmr_cache_report(struct mr_table *mrt, 888 struct sk_buff *pkt, vifi_t vifi, int assert) 889 { 890 struct sk_buff *skb; 891 const int ihl = ip_hdrlen(pkt); 892 struct igmphdr *igmp; 893 struct igmpmsg *msg; 894 struct sock *mroute_sk; 895 int ret; 896 897 #ifdef CONFIG_IP_PIMSM 898 if (assert == IGMPMSG_WHOLEPKT) 899 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); 900 else 901 #endif 902 skb = alloc_skb(128, GFP_ATOMIC); 903 904 if (!skb) 905 return -ENOBUFS; 906 907 #ifdef CONFIG_IP_PIMSM 908 if (assert == IGMPMSG_WHOLEPKT) { 909 /* Ugly, but we have no choice with this interface. 910 * Duplicate old header, fix ihl, length etc. 911 * And all this only to mangle msg->im_msgtype and 912 * to set msg->im_mbz to "mbz" :-) 913 */ 914 skb_push(skb, sizeof(struct iphdr)); 915 skb_reset_network_header(skb); 916 skb_reset_transport_header(skb); 917 msg = (struct igmpmsg *)skb_network_header(skb); 918 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 919 msg->im_msgtype = IGMPMSG_WHOLEPKT; 920 msg->im_mbz = 0; 921 msg->im_vif = mrt->mroute_reg_vif_num; 922 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 923 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 924 sizeof(struct iphdr)); 925 } else 926 #endif 927 { 928 929 /* Copy the IP header */ 930 931 skb->network_header = skb->tail; 932 skb_put(skb, ihl); 933 skb_copy_to_linear_data(skb, pkt->data, ihl); 934 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ 935 msg = (struct igmpmsg *)skb_network_header(skb); 936 msg->im_vif = vifi; 937 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 938 939 /* Add our header */ 940 941 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); 942 igmp->type = 943 msg->im_msgtype = assert; 944 igmp->code = 0; 945 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ 946 skb->transport_header = skb->network_header; 947 } 948 949 rcu_read_lock(); 950 mroute_sk = rcu_dereference(mrt->mroute_sk); 951 if (mroute_sk == NULL) { 952 rcu_read_unlock(); 953 kfree_skb(skb); 954 return -EINVAL; 955 } 956 957 /* Deliver to mrouted */ 958 959 ret = sock_queue_rcv_skb(mroute_sk, skb); 960 rcu_read_unlock(); 961 if (ret < 0) { 962 net_warn_ratelimited("mroute: pending queue full, dropping entries\n"); 963 kfree_skb(skb); 964 } 965 966 return ret; 967 } 968 969 /* 970 * Queue a packet for resolution. It gets locked cache entry! 971 */ 972 973 static int 974 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) 975 { 976 bool found = false; 977 int err; 978 struct mfc_cache *c; 979 const struct iphdr *iph = ip_hdr(skb); 980 981 spin_lock_bh(&mfc_unres_lock); 982 list_for_each_entry(c, &mrt->mfc_unres_queue, list) { 983 if (c->mfc_mcastgrp == iph->daddr && 984 c->mfc_origin == iph->saddr) { 985 found = true; 986 break; 987 } 988 } 989 990 if (!found) { 991 /* Create a new entry if allowable */ 992 993 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || 994 (c = ipmr_cache_alloc_unres()) == NULL) { 995 spin_unlock_bh(&mfc_unres_lock); 996 997 kfree_skb(skb); 998 return -ENOBUFS; 999 } 1000 1001 /* Fill in the new cache entry */ 1002 1003 c->mfc_parent = -1; 1004 c->mfc_origin = iph->saddr; 1005 c->mfc_mcastgrp = iph->daddr; 1006 1007 /* Reflect first query at mrouted. */ 1008 1009 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); 1010 if (err < 0) { 1011 /* If the report failed throw the cache entry 1012 out - Brad Parker 1013 */ 1014 spin_unlock_bh(&mfc_unres_lock); 1015 1016 ipmr_cache_free(c); 1017 kfree_skb(skb); 1018 return err; 1019 } 1020 1021 atomic_inc(&mrt->cache_resolve_queue_len); 1022 list_add(&c->list, &mrt->mfc_unres_queue); 1023 1024 if (atomic_read(&mrt->cache_resolve_queue_len) == 1) 1025 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); 1026 } 1027 1028 /* See if we can append the packet */ 1029 1030 if (c->mfc_un.unres.unresolved.qlen > 3) { 1031 kfree_skb(skb); 1032 err = -ENOBUFS; 1033 } else { 1034 skb_queue_tail(&c->mfc_un.unres.unresolved, skb); 1035 err = 0; 1036 } 1037 1038 spin_unlock_bh(&mfc_unres_lock); 1039 return err; 1040 } 1041 1042 /* 1043 * MFC cache manipulation by user space mroute daemon 1044 */ 1045 1046 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) 1047 { 1048 int line; 1049 struct mfc_cache *c, *next; 1050 1051 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1052 1053 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { 1054 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1055 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1056 list_del_rcu(&c->list); 1057 1058 ipmr_cache_free(c); 1059 return 0; 1060 } 1061 } 1062 return -ENOENT; 1063 } 1064 1065 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, 1066 struct mfcctl *mfc, int mrtsock) 1067 { 1068 bool found = false; 1069 int line; 1070 struct mfc_cache *uc, *c; 1071 1072 if (mfc->mfcc_parent >= MAXVIFS) 1073 return -ENFILE; 1074 1075 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1076 1077 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { 1078 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1079 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1080 found = true; 1081 break; 1082 } 1083 } 1084 1085 if (found) { 1086 write_lock_bh(&mrt_lock); 1087 c->mfc_parent = mfc->mfcc_parent; 1088 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); 1089 if (!mrtsock) 1090 c->mfc_flags |= MFC_STATIC; 1091 write_unlock_bh(&mrt_lock); 1092 return 0; 1093 } 1094 1095 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 1096 return -EINVAL; 1097 1098 c = ipmr_cache_alloc(); 1099 if (c == NULL) 1100 return -ENOMEM; 1101 1102 c->mfc_origin = mfc->mfcc_origin.s_addr; 1103 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 1104 c->mfc_parent = mfc->mfcc_parent; 1105 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); 1106 if (!mrtsock) 1107 c->mfc_flags |= MFC_STATIC; 1108 1109 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); 1110 1111 /* 1112 * Check to see if we resolved a queued list. If so we 1113 * need to send on the frames and tidy up. 1114 */ 1115 found = false; 1116 spin_lock_bh(&mfc_unres_lock); 1117 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { 1118 if (uc->mfc_origin == c->mfc_origin && 1119 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1120 list_del(&uc->list); 1121 atomic_dec(&mrt->cache_resolve_queue_len); 1122 found = true; 1123 break; 1124 } 1125 } 1126 if (list_empty(&mrt->mfc_unres_queue)) 1127 del_timer(&mrt->ipmr_expire_timer); 1128 spin_unlock_bh(&mfc_unres_lock); 1129 1130 if (found) { 1131 ipmr_cache_resolve(net, mrt, uc, c); 1132 ipmr_cache_free(uc); 1133 } 1134 return 0; 1135 } 1136 1137 /* 1138 * Close the multicast socket, and clear the vif tables etc 1139 */ 1140 1141 static void mroute_clean_tables(struct mr_table *mrt) 1142 { 1143 int i; 1144 LIST_HEAD(list); 1145 struct mfc_cache *c, *next; 1146 1147 /* Shut down all active vif entries */ 1148 1149 for (i = 0; i < mrt->maxvif; i++) { 1150 if (!(mrt->vif_table[i].flags & VIFF_STATIC)) 1151 vif_delete(mrt, i, 0, &list); 1152 } 1153 unregister_netdevice_many(&list); 1154 1155 /* Wipe the cache */ 1156 1157 for (i = 0; i < MFC_LINES; i++) { 1158 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1159 if (c->mfc_flags & MFC_STATIC) 1160 continue; 1161 list_del_rcu(&c->list); 1162 ipmr_cache_free(c); 1163 } 1164 } 1165 1166 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { 1167 spin_lock_bh(&mfc_unres_lock); 1168 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 1169 list_del(&c->list); 1170 ipmr_destroy_unres(mrt, c); 1171 } 1172 spin_unlock_bh(&mfc_unres_lock); 1173 } 1174 } 1175 1176 /* called from ip_ra_control(), before an RCU grace period, 1177 * we dont need to call synchronize_rcu() here 1178 */ 1179 static void mrtsock_destruct(struct sock *sk) 1180 { 1181 struct net *net = sock_net(sk); 1182 struct mr_table *mrt; 1183 1184 rtnl_lock(); 1185 ipmr_for_each_table(mrt, net) { 1186 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1187 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1188 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1189 mroute_clean_tables(mrt); 1190 } 1191 } 1192 rtnl_unlock(); 1193 } 1194 1195 /* 1196 * Socket options and virtual interface manipulation. The whole 1197 * virtual interface system is a complete heap, but unfortunately 1198 * that's how BSD mrouted happens to think. Maybe one day with a proper 1199 * MOSPF/PIM router set up we can clean this up. 1200 */ 1201 1202 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) 1203 { 1204 int ret; 1205 struct vifctl vif; 1206 struct mfcctl mfc; 1207 struct net *net = sock_net(sk); 1208 struct mr_table *mrt; 1209 1210 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1211 if (mrt == NULL) 1212 return -ENOENT; 1213 1214 if (optname != MRT_INIT) { 1215 if (sk != rcu_access_pointer(mrt->mroute_sk) && 1216 !capable(CAP_NET_ADMIN)) 1217 return -EACCES; 1218 } 1219 1220 switch (optname) { 1221 case MRT_INIT: 1222 if (sk->sk_type != SOCK_RAW || 1223 inet_sk(sk)->inet_num != IPPROTO_IGMP) 1224 return -EOPNOTSUPP; 1225 if (optlen != sizeof(int)) 1226 return -ENOPROTOOPT; 1227 1228 rtnl_lock(); 1229 if (rtnl_dereference(mrt->mroute_sk)) { 1230 rtnl_unlock(); 1231 return -EADDRINUSE; 1232 } 1233 1234 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1235 if (ret == 0) { 1236 rcu_assign_pointer(mrt->mroute_sk, sk); 1237 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1238 } 1239 rtnl_unlock(); 1240 return ret; 1241 case MRT_DONE: 1242 if (sk != rcu_access_pointer(mrt->mroute_sk)) 1243 return -EACCES; 1244 return ip_ra_control(sk, 0, NULL); 1245 case MRT_ADD_VIF: 1246 case MRT_DEL_VIF: 1247 if (optlen != sizeof(vif)) 1248 return -EINVAL; 1249 if (copy_from_user(&vif, optval, sizeof(vif))) 1250 return -EFAULT; 1251 if (vif.vifc_vifi >= MAXVIFS) 1252 return -ENFILE; 1253 rtnl_lock(); 1254 if (optname == MRT_ADD_VIF) { 1255 ret = vif_add(net, mrt, &vif, 1256 sk == rtnl_dereference(mrt->mroute_sk)); 1257 } else { 1258 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); 1259 } 1260 rtnl_unlock(); 1261 return ret; 1262 1263 /* 1264 * Manipulate the forwarding caches. These live 1265 * in a sort of kernel/user symbiosis. 1266 */ 1267 case MRT_ADD_MFC: 1268 case MRT_DEL_MFC: 1269 if (optlen != sizeof(mfc)) 1270 return -EINVAL; 1271 if (copy_from_user(&mfc, optval, sizeof(mfc))) 1272 return -EFAULT; 1273 rtnl_lock(); 1274 if (optname == MRT_DEL_MFC) 1275 ret = ipmr_mfc_delete(mrt, &mfc); 1276 else 1277 ret = ipmr_mfc_add(net, mrt, &mfc, 1278 sk == rtnl_dereference(mrt->mroute_sk)); 1279 rtnl_unlock(); 1280 return ret; 1281 /* 1282 * Control PIM assert. 1283 */ 1284 case MRT_ASSERT: 1285 { 1286 int v; 1287 if (get_user(v, (int __user *)optval)) 1288 return -EFAULT; 1289 mrt->mroute_do_assert = (v) ? 1 : 0; 1290 return 0; 1291 } 1292 #ifdef CONFIG_IP_PIMSM 1293 case MRT_PIM: 1294 { 1295 int v; 1296 1297 if (get_user(v, (int __user *)optval)) 1298 return -EFAULT; 1299 v = (v) ? 1 : 0; 1300 1301 rtnl_lock(); 1302 ret = 0; 1303 if (v != mrt->mroute_do_pim) { 1304 mrt->mroute_do_pim = v; 1305 mrt->mroute_do_assert = v; 1306 } 1307 rtnl_unlock(); 1308 return ret; 1309 } 1310 #endif 1311 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 1312 case MRT_TABLE: 1313 { 1314 u32 v; 1315 1316 if (optlen != sizeof(u32)) 1317 return -EINVAL; 1318 if (get_user(v, (u32 __user *)optval)) 1319 return -EFAULT; 1320 1321 rtnl_lock(); 1322 ret = 0; 1323 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1324 ret = -EBUSY; 1325 } else { 1326 if (!ipmr_new_table(net, v)) 1327 ret = -ENOMEM; 1328 raw_sk(sk)->ipmr_table = v; 1329 } 1330 rtnl_unlock(); 1331 return ret; 1332 } 1333 #endif 1334 /* 1335 * Spurious command, or MRT_VERSION which you cannot 1336 * set. 1337 */ 1338 default: 1339 return -ENOPROTOOPT; 1340 } 1341 } 1342 1343 /* 1344 * Getsock opt support for the multicast routing system. 1345 */ 1346 1347 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) 1348 { 1349 int olr; 1350 int val; 1351 struct net *net = sock_net(sk); 1352 struct mr_table *mrt; 1353 1354 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1355 if (mrt == NULL) 1356 return -ENOENT; 1357 1358 if (optname != MRT_VERSION && 1359 #ifdef CONFIG_IP_PIMSM 1360 optname != MRT_PIM && 1361 #endif 1362 optname != MRT_ASSERT) 1363 return -ENOPROTOOPT; 1364 1365 if (get_user(olr, optlen)) 1366 return -EFAULT; 1367 1368 olr = min_t(unsigned int, olr, sizeof(int)); 1369 if (olr < 0) 1370 return -EINVAL; 1371 1372 if (put_user(olr, optlen)) 1373 return -EFAULT; 1374 if (optname == MRT_VERSION) 1375 val = 0x0305; 1376 #ifdef CONFIG_IP_PIMSM 1377 else if (optname == MRT_PIM) 1378 val = mrt->mroute_do_pim; 1379 #endif 1380 else 1381 val = mrt->mroute_do_assert; 1382 if (copy_to_user(optval, &val, olr)) 1383 return -EFAULT; 1384 return 0; 1385 } 1386 1387 /* 1388 * The IP multicast ioctl support routines. 1389 */ 1390 1391 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) 1392 { 1393 struct sioc_sg_req sr; 1394 struct sioc_vif_req vr; 1395 struct vif_device *vif; 1396 struct mfc_cache *c; 1397 struct net *net = sock_net(sk); 1398 struct mr_table *mrt; 1399 1400 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1401 if (mrt == NULL) 1402 return -ENOENT; 1403 1404 switch (cmd) { 1405 case SIOCGETVIFCNT: 1406 if (copy_from_user(&vr, arg, sizeof(vr))) 1407 return -EFAULT; 1408 if (vr.vifi >= mrt->maxvif) 1409 return -EINVAL; 1410 read_lock(&mrt_lock); 1411 vif = &mrt->vif_table[vr.vifi]; 1412 if (VIF_EXISTS(mrt, vr.vifi)) { 1413 vr.icount = vif->pkt_in; 1414 vr.ocount = vif->pkt_out; 1415 vr.ibytes = vif->bytes_in; 1416 vr.obytes = vif->bytes_out; 1417 read_unlock(&mrt_lock); 1418 1419 if (copy_to_user(arg, &vr, sizeof(vr))) 1420 return -EFAULT; 1421 return 0; 1422 } 1423 read_unlock(&mrt_lock); 1424 return -EADDRNOTAVAIL; 1425 case SIOCGETSGCNT: 1426 if (copy_from_user(&sr, arg, sizeof(sr))) 1427 return -EFAULT; 1428 1429 rcu_read_lock(); 1430 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1431 if (c) { 1432 sr.pktcnt = c->mfc_un.res.pkt; 1433 sr.bytecnt = c->mfc_un.res.bytes; 1434 sr.wrong_if = c->mfc_un.res.wrong_if; 1435 rcu_read_unlock(); 1436 1437 if (copy_to_user(arg, &sr, sizeof(sr))) 1438 return -EFAULT; 1439 return 0; 1440 } 1441 rcu_read_unlock(); 1442 return -EADDRNOTAVAIL; 1443 default: 1444 return -ENOIOCTLCMD; 1445 } 1446 } 1447 1448 #ifdef CONFIG_COMPAT 1449 struct compat_sioc_sg_req { 1450 struct in_addr src; 1451 struct in_addr grp; 1452 compat_ulong_t pktcnt; 1453 compat_ulong_t bytecnt; 1454 compat_ulong_t wrong_if; 1455 }; 1456 1457 struct compat_sioc_vif_req { 1458 vifi_t vifi; /* Which iface */ 1459 compat_ulong_t icount; 1460 compat_ulong_t ocount; 1461 compat_ulong_t ibytes; 1462 compat_ulong_t obytes; 1463 }; 1464 1465 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 1466 { 1467 struct compat_sioc_sg_req sr; 1468 struct compat_sioc_vif_req vr; 1469 struct vif_device *vif; 1470 struct mfc_cache *c; 1471 struct net *net = sock_net(sk); 1472 struct mr_table *mrt; 1473 1474 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1475 if (mrt == NULL) 1476 return -ENOENT; 1477 1478 switch (cmd) { 1479 case SIOCGETVIFCNT: 1480 if (copy_from_user(&vr, arg, sizeof(vr))) 1481 return -EFAULT; 1482 if (vr.vifi >= mrt->maxvif) 1483 return -EINVAL; 1484 read_lock(&mrt_lock); 1485 vif = &mrt->vif_table[vr.vifi]; 1486 if (VIF_EXISTS(mrt, vr.vifi)) { 1487 vr.icount = vif->pkt_in; 1488 vr.ocount = vif->pkt_out; 1489 vr.ibytes = vif->bytes_in; 1490 vr.obytes = vif->bytes_out; 1491 read_unlock(&mrt_lock); 1492 1493 if (copy_to_user(arg, &vr, sizeof(vr))) 1494 return -EFAULT; 1495 return 0; 1496 } 1497 read_unlock(&mrt_lock); 1498 return -EADDRNOTAVAIL; 1499 case SIOCGETSGCNT: 1500 if (copy_from_user(&sr, arg, sizeof(sr))) 1501 return -EFAULT; 1502 1503 rcu_read_lock(); 1504 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1505 if (c) { 1506 sr.pktcnt = c->mfc_un.res.pkt; 1507 sr.bytecnt = c->mfc_un.res.bytes; 1508 sr.wrong_if = c->mfc_un.res.wrong_if; 1509 rcu_read_unlock(); 1510 1511 if (copy_to_user(arg, &sr, sizeof(sr))) 1512 return -EFAULT; 1513 return 0; 1514 } 1515 rcu_read_unlock(); 1516 return -EADDRNOTAVAIL; 1517 default: 1518 return -ENOIOCTLCMD; 1519 } 1520 } 1521 #endif 1522 1523 1524 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1525 { 1526 struct net_device *dev = ptr; 1527 struct net *net = dev_net(dev); 1528 struct mr_table *mrt; 1529 struct vif_device *v; 1530 int ct; 1531 1532 if (event != NETDEV_UNREGISTER) 1533 return NOTIFY_DONE; 1534 1535 ipmr_for_each_table(mrt, net) { 1536 v = &mrt->vif_table[0]; 1537 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1538 if (v->dev == dev) 1539 vif_delete(mrt, ct, 1, NULL); 1540 } 1541 } 1542 return NOTIFY_DONE; 1543 } 1544 1545 1546 static struct notifier_block ip_mr_notifier = { 1547 .notifier_call = ipmr_device_event, 1548 }; 1549 1550 /* 1551 * Encapsulate a packet by attaching a valid IPIP header to it. 1552 * This avoids tunnel drivers and other mess and gives us the speed so 1553 * important for multicast video. 1554 */ 1555 1556 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1557 { 1558 struct iphdr *iph; 1559 const struct iphdr *old_iph = ip_hdr(skb); 1560 1561 skb_push(skb, sizeof(struct iphdr)); 1562 skb->transport_header = skb->network_header; 1563 skb_reset_network_header(skb); 1564 iph = ip_hdr(skb); 1565 1566 iph->version = 4; 1567 iph->tos = old_iph->tos; 1568 iph->ttl = old_iph->ttl; 1569 iph->frag_off = 0; 1570 iph->daddr = daddr; 1571 iph->saddr = saddr; 1572 iph->protocol = IPPROTO_IPIP; 1573 iph->ihl = 5; 1574 iph->tot_len = htons(skb->len); 1575 ip_select_ident(iph, skb_dst(skb), NULL); 1576 ip_send_check(iph); 1577 1578 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1579 nf_reset(skb); 1580 } 1581 1582 static inline int ipmr_forward_finish(struct sk_buff *skb) 1583 { 1584 struct ip_options *opt = &(IPCB(skb)->opt); 1585 1586 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1587 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); 1588 1589 if (unlikely(opt->optlen)) 1590 ip_forward_options(skb); 1591 1592 return dst_output(skb); 1593 } 1594 1595 /* 1596 * Processing handlers for ipmr_forward 1597 */ 1598 1599 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, 1600 struct sk_buff *skb, struct mfc_cache *c, int vifi) 1601 { 1602 const struct iphdr *iph = ip_hdr(skb); 1603 struct vif_device *vif = &mrt->vif_table[vifi]; 1604 struct net_device *dev; 1605 struct rtable *rt; 1606 struct flowi4 fl4; 1607 int encap = 0; 1608 1609 if (vif->dev == NULL) 1610 goto out_free; 1611 1612 #ifdef CONFIG_IP_PIMSM 1613 if (vif->flags & VIFF_REGISTER) { 1614 vif->pkt_out++; 1615 vif->bytes_out += skb->len; 1616 vif->dev->stats.tx_bytes += skb->len; 1617 vif->dev->stats.tx_packets++; 1618 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); 1619 goto out_free; 1620 } 1621 #endif 1622 1623 if (vif->flags & VIFF_TUNNEL) { 1624 rt = ip_route_output_ports(net, &fl4, NULL, 1625 vif->remote, vif->local, 1626 0, 0, 1627 IPPROTO_IPIP, 1628 RT_TOS(iph->tos), vif->link); 1629 if (IS_ERR(rt)) 1630 goto out_free; 1631 encap = sizeof(struct iphdr); 1632 } else { 1633 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, 1634 0, 0, 1635 IPPROTO_IPIP, 1636 RT_TOS(iph->tos), vif->link); 1637 if (IS_ERR(rt)) 1638 goto out_free; 1639 } 1640 1641 dev = rt->dst.dev; 1642 1643 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { 1644 /* Do not fragment multicasts. Alas, IPv4 does not 1645 * allow to send ICMP, so that packets will disappear 1646 * to blackhole. 1647 */ 1648 1649 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 1650 ip_rt_put(rt); 1651 goto out_free; 1652 } 1653 1654 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; 1655 1656 if (skb_cow(skb, encap)) { 1657 ip_rt_put(rt); 1658 goto out_free; 1659 } 1660 1661 vif->pkt_out++; 1662 vif->bytes_out += skb->len; 1663 1664 skb_dst_drop(skb); 1665 skb_dst_set(skb, &rt->dst); 1666 ip_decrease_ttl(ip_hdr(skb)); 1667 1668 /* FIXME: forward and output firewalls used to be called here. 1669 * What do we do with netfilter? -- RR 1670 */ 1671 if (vif->flags & VIFF_TUNNEL) { 1672 ip_encap(skb, vif->local, vif->remote); 1673 /* FIXME: extra output firewall step used to be here. --RR */ 1674 vif->dev->stats.tx_packets++; 1675 vif->dev->stats.tx_bytes += skb->len; 1676 } 1677 1678 IPCB(skb)->flags |= IPSKB_FORWARDED; 1679 1680 /* 1681 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1682 * not only before forwarding, but after forwarding on all output 1683 * interfaces. It is clear, if mrouter runs a multicasting 1684 * program, it should receive packets not depending to what interface 1685 * program is joined. 1686 * If we will not make it, the program will have to join on all 1687 * interfaces. On the other hand, multihoming host (or router, but 1688 * not mrouter) cannot join to more than one interface - it will 1689 * result in receiving multiple packets. 1690 */ 1691 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, 1692 ipmr_forward_finish); 1693 return; 1694 1695 out_free: 1696 kfree_skb(skb); 1697 } 1698 1699 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) 1700 { 1701 int ct; 1702 1703 for (ct = mrt->maxvif-1; ct >= 0; ct--) { 1704 if (mrt->vif_table[ct].dev == dev) 1705 break; 1706 } 1707 return ct; 1708 } 1709 1710 /* "local" means that we should preserve one skb (for local delivery) */ 1711 1712 static int ip_mr_forward(struct net *net, struct mr_table *mrt, 1713 struct sk_buff *skb, struct mfc_cache *cache, 1714 int local) 1715 { 1716 int psend = -1; 1717 int vif, ct; 1718 1719 vif = cache->mfc_parent; 1720 cache->mfc_un.res.pkt++; 1721 cache->mfc_un.res.bytes += skb->len; 1722 1723 /* 1724 * Wrong interface: drop packet and (maybe) send PIM assert. 1725 */ 1726 if (mrt->vif_table[vif].dev != skb->dev) { 1727 int true_vifi; 1728 1729 if (rt_is_output_route(skb_rtable(skb))) { 1730 /* It is our own packet, looped back. 1731 * Very complicated situation... 1732 * 1733 * The best workaround until routing daemons will be 1734 * fixed is not to redistribute packet, if it was 1735 * send through wrong interface. It means, that 1736 * multicast applications WILL NOT work for 1737 * (S,G), which have default multicast route pointing 1738 * to wrong oif. In any case, it is not a good 1739 * idea to use multicasting applications on router. 1740 */ 1741 goto dont_forward; 1742 } 1743 1744 cache->mfc_un.res.wrong_if++; 1745 true_vifi = ipmr_find_vif(mrt, skb->dev); 1746 1747 if (true_vifi >= 0 && mrt->mroute_do_assert && 1748 /* pimsm uses asserts, when switching from RPT to SPT, 1749 * so that we cannot check that packet arrived on an oif. 1750 * It is bad, but otherwise we would need to move pretty 1751 * large chunk of pimd to kernel. Ough... --ANK 1752 */ 1753 (mrt->mroute_do_pim || 1754 cache->mfc_un.res.ttls[true_vifi] < 255) && 1755 time_after(jiffies, 1756 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1757 cache->mfc_un.res.last_assert = jiffies; 1758 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); 1759 } 1760 goto dont_forward; 1761 } 1762 1763 mrt->vif_table[vif].pkt_in++; 1764 mrt->vif_table[vif].bytes_in += skb->len; 1765 1766 /* 1767 * Forward the frame 1768 */ 1769 for (ct = cache->mfc_un.res.maxvif - 1; 1770 ct >= cache->mfc_un.res.minvif; ct--) { 1771 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { 1772 if (psend != -1) { 1773 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1774 1775 if (skb2) 1776 ipmr_queue_xmit(net, mrt, skb2, cache, 1777 psend); 1778 } 1779 psend = ct; 1780 } 1781 } 1782 if (psend != -1) { 1783 if (local) { 1784 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1785 1786 if (skb2) 1787 ipmr_queue_xmit(net, mrt, skb2, cache, psend); 1788 } else { 1789 ipmr_queue_xmit(net, mrt, skb, cache, psend); 1790 return 0; 1791 } 1792 } 1793 1794 dont_forward: 1795 if (!local) 1796 kfree_skb(skb); 1797 return 0; 1798 } 1799 1800 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb) 1801 { 1802 struct rtable *rt = skb_rtable(skb); 1803 struct iphdr *iph = ip_hdr(skb); 1804 struct flowi4 fl4 = { 1805 .daddr = iph->daddr, 1806 .saddr = iph->saddr, 1807 .flowi4_tos = RT_TOS(iph->tos), 1808 .flowi4_oif = (rt_is_output_route(rt) ? 1809 skb->dev->ifindex : 0), 1810 .flowi4_iif = (rt_is_output_route(rt) ? 1811 LOOPBACK_IFINDEX : 1812 skb->dev->ifindex), 1813 .flowi4_mark = skb->mark, 1814 }; 1815 struct mr_table *mrt; 1816 int err; 1817 1818 err = ipmr_fib_lookup(net, &fl4, &mrt); 1819 if (err) 1820 return ERR_PTR(err); 1821 return mrt; 1822 } 1823 1824 /* 1825 * Multicast packets for forwarding arrive here 1826 * Called with rcu_read_lock(); 1827 */ 1828 1829 int ip_mr_input(struct sk_buff *skb) 1830 { 1831 struct mfc_cache *cache; 1832 struct net *net = dev_net(skb->dev); 1833 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1834 struct mr_table *mrt; 1835 1836 /* Packet is looped back after forward, it should not be 1837 * forwarded second time, but still can be delivered locally. 1838 */ 1839 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1840 goto dont_forward; 1841 1842 mrt = ipmr_rt_fib_lookup(net, skb); 1843 if (IS_ERR(mrt)) { 1844 kfree_skb(skb); 1845 return PTR_ERR(mrt); 1846 } 1847 if (!local) { 1848 if (IPCB(skb)->opt.router_alert) { 1849 if (ip_call_ra_chain(skb)) 1850 return 0; 1851 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) { 1852 /* IGMPv1 (and broken IGMPv2 implementations sort of 1853 * Cisco IOS <= 11.2(8)) do not put router alert 1854 * option to IGMP packets destined to routable 1855 * groups. It is very bad, because it means 1856 * that we can forward NO IGMP messages. 1857 */ 1858 struct sock *mroute_sk; 1859 1860 mroute_sk = rcu_dereference(mrt->mroute_sk); 1861 if (mroute_sk) { 1862 nf_reset(skb); 1863 raw_rcv(mroute_sk, skb); 1864 return 0; 1865 } 1866 } 1867 } 1868 1869 /* already under rcu_read_lock() */ 1870 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1871 1872 /* 1873 * No usable cache entry 1874 */ 1875 if (cache == NULL) { 1876 int vif; 1877 1878 if (local) { 1879 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1880 ip_local_deliver(skb); 1881 if (skb2 == NULL) 1882 return -ENOBUFS; 1883 skb = skb2; 1884 } 1885 1886 read_lock(&mrt_lock); 1887 vif = ipmr_find_vif(mrt, skb->dev); 1888 if (vif >= 0) { 1889 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 1890 read_unlock(&mrt_lock); 1891 1892 return err2; 1893 } 1894 read_unlock(&mrt_lock); 1895 kfree_skb(skb); 1896 return -ENODEV; 1897 } 1898 1899 read_lock(&mrt_lock); 1900 ip_mr_forward(net, mrt, skb, cache, local); 1901 read_unlock(&mrt_lock); 1902 1903 if (local) 1904 return ip_local_deliver(skb); 1905 1906 return 0; 1907 1908 dont_forward: 1909 if (local) 1910 return ip_local_deliver(skb); 1911 kfree_skb(skb); 1912 return 0; 1913 } 1914 1915 #ifdef CONFIG_IP_PIMSM 1916 /* called with rcu_read_lock() */ 1917 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, 1918 unsigned int pimlen) 1919 { 1920 struct net_device *reg_dev = NULL; 1921 struct iphdr *encap; 1922 1923 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1924 /* 1925 * Check that: 1926 * a. packet is really sent to a multicast group 1927 * b. packet is not a NULL-REGISTER 1928 * c. packet is not truncated 1929 */ 1930 if (!ipv4_is_multicast(encap->daddr) || 1931 encap->tot_len == 0 || 1932 ntohs(encap->tot_len) + pimlen > skb->len) 1933 return 1; 1934 1935 read_lock(&mrt_lock); 1936 if (mrt->mroute_reg_vif_num >= 0) 1937 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; 1938 read_unlock(&mrt_lock); 1939 1940 if (reg_dev == NULL) 1941 return 1; 1942 1943 skb->mac_header = skb->network_header; 1944 skb_pull(skb, (u8 *)encap - skb->data); 1945 skb_reset_network_header(skb); 1946 skb->protocol = htons(ETH_P_IP); 1947 skb->ip_summed = CHECKSUM_NONE; 1948 skb->pkt_type = PACKET_HOST; 1949 1950 skb_tunnel_rx(skb, reg_dev); 1951 1952 netif_rx(skb); 1953 1954 return NET_RX_SUCCESS; 1955 } 1956 #endif 1957 1958 #ifdef CONFIG_IP_PIMSM_V1 1959 /* 1960 * Handle IGMP messages of PIMv1 1961 */ 1962 1963 int pim_rcv_v1(struct sk_buff *skb) 1964 { 1965 struct igmphdr *pim; 1966 struct net *net = dev_net(skb->dev); 1967 struct mr_table *mrt; 1968 1969 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1970 goto drop; 1971 1972 pim = igmp_hdr(skb); 1973 1974 mrt = ipmr_rt_fib_lookup(net, skb); 1975 if (IS_ERR(mrt)) 1976 goto drop; 1977 if (!mrt->mroute_do_pim || 1978 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1979 goto drop; 1980 1981 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 1982 drop: 1983 kfree_skb(skb); 1984 } 1985 return 0; 1986 } 1987 #endif 1988 1989 #ifdef CONFIG_IP_PIMSM_V2 1990 static int pim_rcv(struct sk_buff *skb) 1991 { 1992 struct pimreghdr *pim; 1993 struct net *net = dev_net(skb->dev); 1994 struct mr_table *mrt; 1995 1996 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1997 goto drop; 1998 1999 pim = (struct pimreghdr *)skb_transport_header(skb); 2000 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) || 2001 (pim->flags & PIM_NULL_REGISTER) || 2002 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 2003 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 2004 goto drop; 2005 2006 mrt = ipmr_rt_fib_lookup(net, skb); 2007 if (IS_ERR(mrt)) 2008 goto drop; 2009 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 2010 drop: 2011 kfree_skb(skb); 2012 } 2013 return 0; 2014 } 2015 #endif 2016 2017 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2018 struct mfc_cache *c, struct rtmsg *rtm) 2019 { 2020 int ct; 2021 struct rtnexthop *nhp; 2022 struct nlattr *mp_attr; 2023 2024 /* If cache is unresolved, don't try to parse IIF and OIF */ 2025 if (c->mfc_parent >= MAXVIFS) 2026 return -ENOENT; 2027 2028 if (VIF_EXISTS(mrt, c->mfc_parent) && 2029 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0) 2030 return -EMSGSIZE; 2031 2032 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH))) 2033 return -EMSGSIZE; 2034 2035 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 2036 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { 2037 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) { 2038 nla_nest_cancel(skb, mp_attr); 2039 return -EMSGSIZE; 2040 } 2041 2042 nhp->rtnh_flags = 0; 2043 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 2044 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; 2045 nhp->rtnh_len = sizeof(*nhp); 2046 } 2047 } 2048 2049 nla_nest_end(skb, mp_attr); 2050 2051 rtm->rtm_type = RTN_MULTICAST; 2052 return 1; 2053 } 2054 2055 int ipmr_get_route(struct net *net, struct sk_buff *skb, 2056 __be32 saddr, __be32 daddr, 2057 struct rtmsg *rtm, int nowait) 2058 { 2059 struct mfc_cache *cache; 2060 struct mr_table *mrt; 2061 int err; 2062 2063 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2064 if (mrt == NULL) 2065 return -ENOENT; 2066 2067 rcu_read_lock(); 2068 cache = ipmr_cache_find(mrt, saddr, daddr); 2069 2070 if (cache == NULL) { 2071 struct sk_buff *skb2; 2072 struct iphdr *iph; 2073 struct net_device *dev; 2074 int vif = -1; 2075 2076 if (nowait) { 2077 rcu_read_unlock(); 2078 return -EAGAIN; 2079 } 2080 2081 dev = skb->dev; 2082 read_lock(&mrt_lock); 2083 if (dev) 2084 vif = ipmr_find_vif(mrt, dev); 2085 if (vif < 0) { 2086 read_unlock(&mrt_lock); 2087 rcu_read_unlock(); 2088 return -ENODEV; 2089 } 2090 skb2 = skb_clone(skb, GFP_ATOMIC); 2091 if (!skb2) { 2092 read_unlock(&mrt_lock); 2093 rcu_read_unlock(); 2094 return -ENOMEM; 2095 } 2096 2097 skb_push(skb2, sizeof(struct iphdr)); 2098 skb_reset_network_header(skb2); 2099 iph = ip_hdr(skb2); 2100 iph->ihl = sizeof(struct iphdr) >> 2; 2101 iph->saddr = saddr; 2102 iph->daddr = daddr; 2103 iph->version = 0; 2104 err = ipmr_cache_unresolved(mrt, vif, skb2); 2105 read_unlock(&mrt_lock); 2106 rcu_read_unlock(); 2107 return err; 2108 } 2109 2110 read_lock(&mrt_lock); 2111 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY)) 2112 cache->mfc_flags |= MFC_NOTIFY; 2113 err = __ipmr_fill_mroute(mrt, skb, cache, rtm); 2114 read_unlock(&mrt_lock); 2115 rcu_read_unlock(); 2116 return err; 2117 } 2118 2119 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2120 u32 portid, u32 seq, struct mfc_cache *c) 2121 { 2122 struct nlmsghdr *nlh; 2123 struct rtmsg *rtm; 2124 2125 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2126 if (nlh == NULL) 2127 return -EMSGSIZE; 2128 2129 rtm = nlmsg_data(nlh); 2130 rtm->rtm_family = RTNL_FAMILY_IPMR; 2131 rtm->rtm_dst_len = 32; 2132 rtm->rtm_src_len = 32; 2133 rtm->rtm_tos = 0; 2134 rtm->rtm_table = mrt->id; 2135 if (nla_put_u32(skb, RTA_TABLE, mrt->id)) 2136 goto nla_put_failure; 2137 rtm->rtm_type = RTN_MULTICAST; 2138 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2139 rtm->rtm_protocol = RTPROT_UNSPEC; 2140 rtm->rtm_flags = 0; 2141 2142 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) || 2143 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp)) 2144 goto nla_put_failure; 2145 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) 2146 goto nla_put_failure; 2147 2148 return nlmsg_end(skb, nlh); 2149 2150 nla_put_failure: 2151 nlmsg_cancel(skb, nlh); 2152 return -EMSGSIZE; 2153 } 2154 2155 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2156 { 2157 struct net *net = sock_net(skb->sk); 2158 struct mr_table *mrt; 2159 struct mfc_cache *mfc; 2160 unsigned int t = 0, s_t; 2161 unsigned int h = 0, s_h; 2162 unsigned int e = 0, s_e; 2163 2164 s_t = cb->args[0]; 2165 s_h = cb->args[1]; 2166 s_e = cb->args[2]; 2167 2168 rcu_read_lock(); 2169 ipmr_for_each_table(mrt, net) { 2170 if (t < s_t) 2171 goto next_table; 2172 if (t > s_t) 2173 s_h = 0; 2174 for (h = s_h; h < MFC_LINES; h++) { 2175 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) { 2176 if (e < s_e) 2177 goto next_entry; 2178 if (ipmr_fill_mroute(mrt, skb, 2179 NETLINK_CB(cb->skb).portid, 2180 cb->nlh->nlmsg_seq, 2181 mfc) < 0) 2182 goto done; 2183 next_entry: 2184 e++; 2185 } 2186 e = s_e = 0; 2187 } 2188 s_h = 0; 2189 next_table: 2190 t++; 2191 } 2192 done: 2193 rcu_read_unlock(); 2194 2195 cb->args[2] = e; 2196 cb->args[1] = h; 2197 cb->args[0] = t; 2198 2199 return skb->len; 2200 } 2201 2202 #ifdef CONFIG_PROC_FS 2203 /* 2204 * The /proc interfaces to multicast routing : 2205 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif 2206 */ 2207 struct ipmr_vif_iter { 2208 struct seq_net_private p; 2209 struct mr_table *mrt; 2210 int ct; 2211 }; 2212 2213 static struct vif_device *ipmr_vif_seq_idx(struct net *net, 2214 struct ipmr_vif_iter *iter, 2215 loff_t pos) 2216 { 2217 struct mr_table *mrt = iter->mrt; 2218 2219 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { 2220 if (!VIF_EXISTS(mrt, iter->ct)) 2221 continue; 2222 if (pos-- == 0) 2223 return &mrt->vif_table[iter->ct]; 2224 } 2225 return NULL; 2226 } 2227 2228 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 2229 __acquires(mrt_lock) 2230 { 2231 struct ipmr_vif_iter *iter = seq->private; 2232 struct net *net = seq_file_net(seq); 2233 struct mr_table *mrt; 2234 2235 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2236 if (mrt == NULL) 2237 return ERR_PTR(-ENOENT); 2238 2239 iter->mrt = mrt; 2240 2241 read_lock(&mrt_lock); 2242 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) 2243 : SEQ_START_TOKEN; 2244 } 2245 2246 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2247 { 2248 struct ipmr_vif_iter *iter = seq->private; 2249 struct net *net = seq_file_net(seq); 2250 struct mr_table *mrt = iter->mrt; 2251 2252 ++*pos; 2253 if (v == SEQ_START_TOKEN) 2254 return ipmr_vif_seq_idx(net, iter, 0); 2255 2256 while (++iter->ct < mrt->maxvif) { 2257 if (!VIF_EXISTS(mrt, iter->ct)) 2258 continue; 2259 return &mrt->vif_table[iter->ct]; 2260 } 2261 return NULL; 2262 } 2263 2264 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) 2265 __releases(mrt_lock) 2266 { 2267 read_unlock(&mrt_lock); 2268 } 2269 2270 static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 2271 { 2272 struct ipmr_vif_iter *iter = seq->private; 2273 struct mr_table *mrt = iter->mrt; 2274 2275 if (v == SEQ_START_TOKEN) { 2276 seq_puts(seq, 2277 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); 2278 } else { 2279 const struct vif_device *vif = v; 2280 const char *name = vif->dev ? vif->dev->name : "none"; 2281 2282 seq_printf(seq, 2283 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 2284 vif - mrt->vif_table, 2285 name, vif->bytes_in, vif->pkt_in, 2286 vif->bytes_out, vif->pkt_out, 2287 vif->flags, vif->local, vif->remote); 2288 } 2289 return 0; 2290 } 2291 2292 static const struct seq_operations ipmr_vif_seq_ops = { 2293 .start = ipmr_vif_seq_start, 2294 .next = ipmr_vif_seq_next, 2295 .stop = ipmr_vif_seq_stop, 2296 .show = ipmr_vif_seq_show, 2297 }; 2298 2299 static int ipmr_vif_open(struct inode *inode, struct file *file) 2300 { 2301 return seq_open_net(inode, file, &ipmr_vif_seq_ops, 2302 sizeof(struct ipmr_vif_iter)); 2303 } 2304 2305 static const struct file_operations ipmr_vif_fops = { 2306 .owner = THIS_MODULE, 2307 .open = ipmr_vif_open, 2308 .read = seq_read, 2309 .llseek = seq_lseek, 2310 .release = seq_release_net, 2311 }; 2312 2313 struct ipmr_mfc_iter { 2314 struct seq_net_private p; 2315 struct mr_table *mrt; 2316 struct list_head *cache; 2317 int ct; 2318 }; 2319 2320 2321 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, 2322 struct ipmr_mfc_iter *it, loff_t pos) 2323 { 2324 struct mr_table *mrt = it->mrt; 2325 struct mfc_cache *mfc; 2326 2327 rcu_read_lock(); 2328 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { 2329 it->cache = &mrt->mfc_cache_array[it->ct]; 2330 list_for_each_entry_rcu(mfc, it->cache, list) 2331 if (pos-- == 0) 2332 return mfc; 2333 } 2334 rcu_read_unlock(); 2335 2336 spin_lock_bh(&mfc_unres_lock); 2337 it->cache = &mrt->mfc_unres_queue; 2338 list_for_each_entry(mfc, it->cache, list) 2339 if (pos-- == 0) 2340 return mfc; 2341 spin_unlock_bh(&mfc_unres_lock); 2342 2343 it->cache = NULL; 2344 return NULL; 2345 } 2346 2347 2348 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 2349 { 2350 struct ipmr_mfc_iter *it = seq->private; 2351 struct net *net = seq_file_net(seq); 2352 struct mr_table *mrt; 2353 2354 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2355 if (mrt == NULL) 2356 return ERR_PTR(-ENOENT); 2357 2358 it->mrt = mrt; 2359 it->cache = NULL; 2360 it->ct = 0; 2361 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) 2362 : SEQ_START_TOKEN; 2363 } 2364 2365 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2366 { 2367 struct mfc_cache *mfc = v; 2368 struct ipmr_mfc_iter *it = seq->private; 2369 struct net *net = seq_file_net(seq); 2370 struct mr_table *mrt = it->mrt; 2371 2372 ++*pos; 2373 2374 if (v == SEQ_START_TOKEN) 2375 return ipmr_mfc_seq_idx(net, seq->private, 0); 2376 2377 if (mfc->list.next != it->cache) 2378 return list_entry(mfc->list.next, struct mfc_cache, list); 2379 2380 if (it->cache == &mrt->mfc_unres_queue) 2381 goto end_of_list; 2382 2383 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); 2384 2385 while (++it->ct < MFC_LINES) { 2386 it->cache = &mrt->mfc_cache_array[it->ct]; 2387 if (list_empty(it->cache)) 2388 continue; 2389 return list_first_entry(it->cache, struct mfc_cache, list); 2390 } 2391 2392 /* exhausted cache_array, show unresolved */ 2393 rcu_read_unlock(); 2394 it->cache = &mrt->mfc_unres_queue; 2395 it->ct = 0; 2396 2397 spin_lock_bh(&mfc_unres_lock); 2398 if (!list_empty(it->cache)) 2399 return list_first_entry(it->cache, struct mfc_cache, list); 2400 2401 end_of_list: 2402 spin_unlock_bh(&mfc_unres_lock); 2403 it->cache = NULL; 2404 2405 return NULL; 2406 } 2407 2408 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) 2409 { 2410 struct ipmr_mfc_iter *it = seq->private; 2411 struct mr_table *mrt = it->mrt; 2412 2413 if (it->cache == &mrt->mfc_unres_queue) 2414 spin_unlock_bh(&mfc_unres_lock); 2415 else if (it->cache == &mrt->mfc_cache_array[it->ct]) 2416 rcu_read_unlock(); 2417 } 2418 2419 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 2420 { 2421 int n; 2422 2423 if (v == SEQ_START_TOKEN) { 2424 seq_puts(seq, 2425 "Group Origin Iif Pkts Bytes Wrong Oifs\n"); 2426 } else { 2427 const struct mfc_cache *mfc = v; 2428 const struct ipmr_mfc_iter *it = seq->private; 2429 const struct mr_table *mrt = it->mrt; 2430 2431 seq_printf(seq, "%08X %08X %-3hd", 2432 (__force u32) mfc->mfc_mcastgrp, 2433 (__force u32) mfc->mfc_origin, 2434 mfc->mfc_parent); 2435 2436 if (it->cache != &mrt->mfc_unres_queue) { 2437 seq_printf(seq, " %8lu %8lu %8lu", 2438 mfc->mfc_un.res.pkt, 2439 mfc->mfc_un.res.bytes, 2440 mfc->mfc_un.res.wrong_if); 2441 for (n = mfc->mfc_un.res.minvif; 2442 n < mfc->mfc_un.res.maxvif; n++) { 2443 if (VIF_EXISTS(mrt, n) && 2444 mfc->mfc_un.res.ttls[n] < 255) 2445 seq_printf(seq, 2446 " %2d:%-3d", 2447 n, mfc->mfc_un.res.ttls[n]); 2448 } 2449 } else { 2450 /* unresolved mfc_caches don't contain 2451 * pkt, bytes and wrong_if values 2452 */ 2453 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); 2454 } 2455 seq_putc(seq, '\n'); 2456 } 2457 return 0; 2458 } 2459 2460 static const struct seq_operations ipmr_mfc_seq_ops = { 2461 .start = ipmr_mfc_seq_start, 2462 .next = ipmr_mfc_seq_next, 2463 .stop = ipmr_mfc_seq_stop, 2464 .show = ipmr_mfc_seq_show, 2465 }; 2466 2467 static int ipmr_mfc_open(struct inode *inode, struct file *file) 2468 { 2469 return seq_open_net(inode, file, &ipmr_mfc_seq_ops, 2470 sizeof(struct ipmr_mfc_iter)); 2471 } 2472 2473 static const struct file_operations ipmr_mfc_fops = { 2474 .owner = THIS_MODULE, 2475 .open = ipmr_mfc_open, 2476 .read = seq_read, 2477 .llseek = seq_lseek, 2478 .release = seq_release_net, 2479 }; 2480 #endif 2481 2482 #ifdef CONFIG_IP_PIMSM_V2 2483 static const struct net_protocol pim_protocol = { 2484 .handler = pim_rcv, 2485 .netns_ok = 1, 2486 }; 2487 #endif 2488 2489 2490 /* 2491 * Setup for IP multicast routing 2492 */ 2493 static int __net_init ipmr_net_init(struct net *net) 2494 { 2495 int err; 2496 2497 err = ipmr_rules_init(net); 2498 if (err < 0) 2499 goto fail; 2500 2501 #ifdef CONFIG_PROC_FS 2502 err = -ENOMEM; 2503 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) 2504 goto proc_vif_fail; 2505 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) 2506 goto proc_cache_fail; 2507 #endif 2508 return 0; 2509 2510 #ifdef CONFIG_PROC_FS 2511 proc_cache_fail: 2512 proc_net_remove(net, "ip_mr_vif"); 2513 proc_vif_fail: 2514 ipmr_rules_exit(net); 2515 #endif 2516 fail: 2517 return err; 2518 } 2519 2520 static void __net_exit ipmr_net_exit(struct net *net) 2521 { 2522 #ifdef CONFIG_PROC_FS 2523 proc_net_remove(net, "ip_mr_cache"); 2524 proc_net_remove(net, "ip_mr_vif"); 2525 #endif 2526 ipmr_rules_exit(net); 2527 } 2528 2529 static struct pernet_operations ipmr_net_ops = { 2530 .init = ipmr_net_init, 2531 .exit = ipmr_net_exit, 2532 }; 2533 2534 int __init ip_mr_init(void) 2535 { 2536 int err; 2537 2538 mrt_cachep = kmem_cache_create("ip_mrt_cache", 2539 sizeof(struct mfc_cache), 2540 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 2541 NULL); 2542 if (!mrt_cachep) 2543 return -ENOMEM; 2544 2545 err = register_pernet_subsys(&ipmr_net_ops); 2546 if (err) 2547 goto reg_pernet_fail; 2548 2549 err = register_netdevice_notifier(&ip_mr_notifier); 2550 if (err) 2551 goto reg_notif_fail; 2552 #ifdef CONFIG_IP_PIMSM_V2 2553 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { 2554 pr_err("%s: can't add PIM protocol\n", __func__); 2555 err = -EAGAIN; 2556 goto add_proto_fail; 2557 } 2558 #endif 2559 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, 2560 NULL, ipmr_rtm_dumproute, NULL); 2561 return 0; 2562 2563 #ifdef CONFIG_IP_PIMSM_V2 2564 add_proto_fail: 2565 unregister_netdevice_notifier(&ip_mr_notifier); 2566 #endif 2567 reg_notif_fail: 2568 unregister_pernet_subsys(&ipmr_net_ops); 2569 reg_pernet_fail: 2570 kmem_cache_destroy(mrt_cachep); 2571 return err; 2572 } 2573