1 /* 2 * IP multicast routing support for mrouted 3.6/3.8 3 * 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk> 5 * Linux Consultancy and Custom Driver Development 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 * 12 * Fixes: 13 * Michael Chastain : Incorrect size of copying. 14 * Alan Cox : Added the cache manager code 15 * Alan Cox : Fixed the clone/copy bug and device race. 16 * Mike McLagan : Routing by source 17 * Malcolm Beattie : Buffer handling fixes. 18 * Alexey Kuznetsov : Double buffer free and other fixes. 19 * SVR Anand : Fixed several multicast bugs and problems. 20 * Alexey Kuznetsov : Status, optimisations and more. 21 * Brad Parker : Better behaviour on mrouted upcall 22 * overflow. 23 * Carlos Picoto : PIMv1 Support 24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header 25 * Relax this requirement to work with older peers. 26 * 27 */ 28 29 #include <asm/system.h> 30 #include <asm/uaccess.h> 31 #include <linux/types.h> 32 #include <linux/capability.h> 33 #include <linux/errno.h> 34 #include <linux/timer.h> 35 #include <linux/mm.h> 36 #include <linux/kernel.h> 37 #include <linux/fcntl.h> 38 #include <linux/stat.h> 39 #include <linux/socket.h> 40 #include <linux/in.h> 41 #include <linux/inet.h> 42 #include <linux/netdevice.h> 43 #include <linux/inetdevice.h> 44 #include <linux/igmp.h> 45 #include <linux/proc_fs.h> 46 #include <linux/seq_file.h> 47 #include <linux/mroute.h> 48 #include <linux/init.h> 49 #include <linux/if_ether.h> 50 #include <linux/slab.h> 51 #include <net/net_namespace.h> 52 #include <net/ip.h> 53 #include <net/protocol.h> 54 #include <linux/skbuff.h> 55 #include <net/route.h> 56 #include <net/sock.h> 57 #include <net/icmp.h> 58 #include <net/udp.h> 59 #include <net/raw.h> 60 #include <linux/notifier.h> 61 #include <linux/if_arp.h> 62 #include <linux/netfilter_ipv4.h> 63 #include <linux/compat.h> 64 #include <net/ipip.h> 65 #include <net/checksum.h> 66 #include <net/netlink.h> 67 #include <net/fib_rules.h> 68 69 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 70 #define CONFIG_IP_PIMSM 1 71 #endif 72 73 struct mr_table { 74 struct list_head list; 75 #ifdef CONFIG_NET_NS 76 struct net *net; 77 #endif 78 u32 id; 79 struct sock __rcu *mroute_sk; 80 struct timer_list ipmr_expire_timer; 81 struct list_head mfc_unres_queue; 82 struct list_head mfc_cache_array[MFC_LINES]; 83 struct vif_device vif_table[MAXVIFS]; 84 int maxvif; 85 atomic_t cache_resolve_queue_len; 86 int mroute_do_assert; 87 int mroute_do_pim; 88 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 89 int mroute_reg_vif_num; 90 #endif 91 }; 92 93 struct ipmr_rule { 94 struct fib_rule common; 95 }; 96 97 struct ipmr_result { 98 struct mr_table *mrt; 99 }; 100 101 /* Big lock, protecting vif table, mrt cache and mroute socket state. 102 * Note that the changes are semaphored via rtnl_lock. 103 */ 104 105 static DEFINE_RWLOCK(mrt_lock); 106 107 /* 108 * Multicast router control variables 109 */ 110 111 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) 112 113 /* Special spinlock for queue of unresolved entries */ 114 static DEFINE_SPINLOCK(mfc_unres_lock); 115 116 /* We return to original Alan's scheme. Hash table of resolved 117 * entries is changed only in process context and protected 118 * with weak lock mrt_lock. Queue of unresolved entries is protected 119 * with strong spinlock mfc_unres_lock. 120 * 121 * In this case data path is free of exclusive locks at all. 122 */ 123 124 static struct kmem_cache *mrt_cachep __read_mostly; 125 126 static struct mr_table *ipmr_new_table(struct net *net, u32 id); 127 static int ip_mr_forward(struct net *net, struct mr_table *mrt, 128 struct sk_buff *skb, struct mfc_cache *cache, 129 int local); 130 static int ipmr_cache_report(struct mr_table *mrt, 131 struct sk_buff *pkt, vifi_t vifi, int assert); 132 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 133 struct mfc_cache *c, struct rtmsg *rtm); 134 static void ipmr_expire_process(unsigned long arg); 135 136 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 137 #define ipmr_for_each_table(mrt, net) \ 138 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) 139 140 static struct mr_table *ipmr_get_table(struct net *net, u32 id) 141 { 142 struct mr_table *mrt; 143 144 ipmr_for_each_table(mrt, net) { 145 if (mrt->id == id) 146 return mrt; 147 } 148 return NULL; 149 } 150 151 static int ipmr_fib_lookup(struct net *net, struct flowi *flp, 152 struct mr_table **mrt) 153 { 154 struct ipmr_result res; 155 struct fib_lookup_arg arg = { .result = &res, }; 156 int err; 157 158 err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg); 159 if (err < 0) 160 return err; 161 *mrt = res.mrt; 162 return 0; 163 } 164 165 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, 166 int flags, struct fib_lookup_arg *arg) 167 { 168 struct ipmr_result *res = arg->result; 169 struct mr_table *mrt; 170 171 switch (rule->action) { 172 case FR_ACT_TO_TBL: 173 break; 174 case FR_ACT_UNREACHABLE: 175 return -ENETUNREACH; 176 case FR_ACT_PROHIBIT: 177 return -EACCES; 178 case FR_ACT_BLACKHOLE: 179 default: 180 return -EINVAL; 181 } 182 183 mrt = ipmr_get_table(rule->fr_net, rule->table); 184 if (mrt == NULL) 185 return -EAGAIN; 186 res->mrt = mrt; 187 return 0; 188 } 189 190 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) 191 { 192 return 1; 193 } 194 195 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { 196 FRA_GENERIC_POLICY, 197 }; 198 199 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 200 struct fib_rule_hdr *frh, struct nlattr **tb) 201 { 202 return 0; 203 } 204 205 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 206 struct nlattr **tb) 207 { 208 return 1; 209 } 210 211 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 212 struct fib_rule_hdr *frh) 213 { 214 frh->dst_len = 0; 215 frh->src_len = 0; 216 frh->tos = 0; 217 return 0; 218 } 219 220 static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = { 221 .family = RTNL_FAMILY_IPMR, 222 .rule_size = sizeof(struct ipmr_rule), 223 .addr_size = sizeof(u32), 224 .action = ipmr_rule_action, 225 .match = ipmr_rule_match, 226 .configure = ipmr_rule_configure, 227 .compare = ipmr_rule_compare, 228 .default_pref = fib_default_rule_pref, 229 .fill = ipmr_rule_fill, 230 .nlgroup = RTNLGRP_IPV4_RULE, 231 .policy = ipmr_rule_policy, 232 .owner = THIS_MODULE, 233 }; 234 235 static int __net_init ipmr_rules_init(struct net *net) 236 { 237 struct fib_rules_ops *ops; 238 struct mr_table *mrt; 239 int err; 240 241 ops = fib_rules_register(&ipmr_rules_ops_template, net); 242 if (IS_ERR(ops)) 243 return PTR_ERR(ops); 244 245 INIT_LIST_HEAD(&net->ipv4.mr_tables); 246 247 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 248 if (mrt == NULL) { 249 err = -ENOMEM; 250 goto err1; 251 } 252 253 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); 254 if (err < 0) 255 goto err2; 256 257 net->ipv4.mr_rules_ops = ops; 258 return 0; 259 260 err2: 261 kfree(mrt); 262 err1: 263 fib_rules_unregister(ops); 264 return err; 265 } 266 267 static void __net_exit ipmr_rules_exit(struct net *net) 268 { 269 struct mr_table *mrt, *next; 270 271 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 272 list_del(&mrt->list); 273 kfree(mrt); 274 } 275 fib_rules_unregister(net->ipv4.mr_rules_ops); 276 } 277 #else 278 #define ipmr_for_each_table(mrt, net) \ 279 for (mrt = net->ipv4.mrt; mrt; mrt = NULL) 280 281 static struct mr_table *ipmr_get_table(struct net *net, u32 id) 282 { 283 return net->ipv4.mrt; 284 } 285 286 static int ipmr_fib_lookup(struct net *net, struct flowi *flp, 287 struct mr_table **mrt) 288 { 289 *mrt = net->ipv4.mrt; 290 return 0; 291 } 292 293 static int __net_init ipmr_rules_init(struct net *net) 294 { 295 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 296 return net->ipv4.mrt ? 0 : -ENOMEM; 297 } 298 299 static void __net_exit ipmr_rules_exit(struct net *net) 300 { 301 kfree(net->ipv4.mrt); 302 } 303 #endif 304 305 static struct mr_table *ipmr_new_table(struct net *net, u32 id) 306 { 307 struct mr_table *mrt; 308 unsigned int i; 309 310 mrt = ipmr_get_table(net, id); 311 if (mrt != NULL) 312 return mrt; 313 314 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 315 if (mrt == NULL) 316 return NULL; 317 write_pnet(&mrt->net, net); 318 mrt->id = id; 319 320 /* Forwarding cache */ 321 for (i = 0; i < MFC_LINES; i++) 322 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); 323 324 INIT_LIST_HEAD(&mrt->mfc_unres_queue); 325 326 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, 327 (unsigned long)mrt); 328 329 #ifdef CONFIG_IP_PIMSM 330 mrt->mroute_reg_vif_num = -1; 331 #endif 332 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 333 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); 334 #endif 335 return mrt; 336 } 337 338 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 339 340 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 341 { 342 struct net *net = dev_net(dev); 343 344 dev_close(dev); 345 346 dev = __dev_get_by_name(net, "tunl0"); 347 if (dev) { 348 const struct net_device_ops *ops = dev->netdev_ops; 349 struct ifreq ifr; 350 struct ip_tunnel_parm p; 351 352 memset(&p, 0, sizeof(p)); 353 p.iph.daddr = v->vifc_rmt_addr.s_addr; 354 p.iph.saddr = v->vifc_lcl_addr.s_addr; 355 p.iph.version = 4; 356 p.iph.ihl = 5; 357 p.iph.protocol = IPPROTO_IPIP; 358 sprintf(p.name, "dvmrp%d", v->vifc_vifi); 359 ifr.ifr_ifru.ifru_data = (__force void __user *)&p; 360 361 if (ops->ndo_do_ioctl) { 362 mm_segment_t oldfs = get_fs(); 363 364 set_fs(KERNEL_DS); 365 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL); 366 set_fs(oldfs); 367 } 368 } 369 } 370 371 static 372 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) 373 { 374 struct net_device *dev; 375 376 dev = __dev_get_by_name(net, "tunl0"); 377 378 if (dev) { 379 const struct net_device_ops *ops = dev->netdev_ops; 380 int err; 381 struct ifreq ifr; 382 struct ip_tunnel_parm p; 383 struct in_device *in_dev; 384 385 memset(&p, 0, sizeof(p)); 386 p.iph.daddr = v->vifc_rmt_addr.s_addr; 387 p.iph.saddr = v->vifc_lcl_addr.s_addr; 388 p.iph.version = 4; 389 p.iph.ihl = 5; 390 p.iph.protocol = IPPROTO_IPIP; 391 sprintf(p.name, "dvmrp%d", v->vifc_vifi); 392 ifr.ifr_ifru.ifru_data = (__force void __user *)&p; 393 394 if (ops->ndo_do_ioctl) { 395 mm_segment_t oldfs = get_fs(); 396 397 set_fs(KERNEL_DS); 398 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); 399 set_fs(oldfs); 400 } else { 401 err = -EOPNOTSUPP; 402 } 403 dev = NULL; 404 405 if (err == 0 && 406 (dev = __dev_get_by_name(net, p.name)) != NULL) { 407 dev->flags |= IFF_MULTICAST; 408 409 in_dev = __in_dev_get_rtnl(dev); 410 if (in_dev == NULL) 411 goto failure; 412 413 ipv4_devconf_setall(in_dev); 414 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; 415 416 if (dev_open(dev)) 417 goto failure; 418 dev_hold(dev); 419 } 420 } 421 return dev; 422 423 failure: 424 /* allow the register to be completed before unregistering. */ 425 rtnl_unlock(); 426 rtnl_lock(); 427 428 unregister_netdevice(dev); 429 return NULL; 430 } 431 432 #ifdef CONFIG_IP_PIMSM 433 434 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 435 { 436 struct net *net = dev_net(dev); 437 struct mr_table *mrt; 438 struct flowi fl = { 439 .oif = dev->ifindex, 440 .iif = skb->skb_iif, 441 .mark = skb->mark, 442 }; 443 int err; 444 445 err = ipmr_fib_lookup(net, &fl, &mrt); 446 if (err < 0) { 447 kfree_skb(skb); 448 return err; 449 } 450 451 read_lock(&mrt_lock); 452 dev->stats.tx_bytes += skb->len; 453 dev->stats.tx_packets++; 454 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); 455 read_unlock(&mrt_lock); 456 kfree_skb(skb); 457 return NETDEV_TX_OK; 458 } 459 460 static const struct net_device_ops reg_vif_netdev_ops = { 461 .ndo_start_xmit = reg_vif_xmit, 462 }; 463 464 static void reg_vif_setup(struct net_device *dev) 465 { 466 dev->type = ARPHRD_PIMREG; 467 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 468 dev->flags = IFF_NOARP; 469 dev->netdev_ops = ®_vif_netdev_ops, 470 dev->destructor = free_netdev; 471 dev->features |= NETIF_F_NETNS_LOCAL; 472 } 473 474 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) 475 { 476 struct net_device *dev; 477 struct in_device *in_dev; 478 char name[IFNAMSIZ]; 479 480 if (mrt->id == RT_TABLE_DEFAULT) 481 sprintf(name, "pimreg"); 482 else 483 sprintf(name, "pimreg%u", mrt->id); 484 485 dev = alloc_netdev(0, name, reg_vif_setup); 486 487 if (dev == NULL) 488 return NULL; 489 490 dev_net_set(dev, net); 491 492 if (register_netdevice(dev)) { 493 free_netdev(dev); 494 return NULL; 495 } 496 dev->iflink = 0; 497 498 rcu_read_lock(); 499 in_dev = __in_dev_get_rcu(dev); 500 if (!in_dev) { 501 rcu_read_unlock(); 502 goto failure; 503 } 504 505 ipv4_devconf_setall(in_dev); 506 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; 507 rcu_read_unlock(); 508 509 if (dev_open(dev)) 510 goto failure; 511 512 dev_hold(dev); 513 514 return dev; 515 516 failure: 517 /* allow the register to be completed before unregistering. */ 518 rtnl_unlock(); 519 rtnl_lock(); 520 521 unregister_netdevice(dev); 522 return NULL; 523 } 524 #endif 525 526 /* 527 * Delete a VIF entry 528 * @notify: Set to 1, if the caller is a notifier_call 529 */ 530 531 static int vif_delete(struct mr_table *mrt, int vifi, int notify, 532 struct list_head *head) 533 { 534 struct vif_device *v; 535 struct net_device *dev; 536 struct in_device *in_dev; 537 538 if (vifi < 0 || vifi >= mrt->maxvif) 539 return -EADDRNOTAVAIL; 540 541 v = &mrt->vif_table[vifi]; 542 543 write_lock_bh(&mrt_lock); 544 dev = v->dev; 545 v->dev = NULL; 546 547 if (!dev) { 548 write_unlock_bh(&mrt_lock); 549 return -EADDRNOTAVAIL; 550 } 551 552 #ifdef CONFIG_IP_PIMSM 553 if (vifi == mrt->mroute_reg_vif_num) 554 mrt->mroute_reg_vif_num = -1; 555 #endif 556 557 if (vifi + 1 == mrt->maxvif) { 558 int tmp; 559 560 for (tmp = vifi - 1; tmp >= 0; tmp--) { 561 if (VIF_EXISTS(mrt, tmp)) 562 break; 563 } 564 mrt->maxvif = tmp+1; 565 } 566 567 write_unlock_bh(&mrt_lock); 568 569 dev_set_allmulti(dev, -1); 570 571 in_dev = __in_dev_get_rtnl(dev); 572 if (in_dev) { 573 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; 574 ip_rt_multicast_event(in_dev); 575 } 576 577 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) 578 unregister_netdevice_queue(dev, head); 579 580 dev_put(dev); 581 return 0; 582 } 583 584 static void ipmr_cache_free_rcu(struct rcu_head *head) 585 { 586 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu); 587 588 kmem_cache_free(mrt_cachep, c); 589 } 590 591 static inline void ipmr_cache_free(struct mfc_cache *c) 592 { 593 call_rcu(&c->rcu, ipmr_cache_free_rcu); 594 } 595 596 /* Destroy an unresolved cache entry, killing queued skbs 597 * and reporting error to netlink readers. 598 */ 599 600 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 601 { 602 struct net *net = read_pnet(&mrt->net); 603 struct sk_buff *skb; 604 struct nlmsgerr *e; 605 606 atomic_dec(&mrt->cache_resolve_queue_len); 607 608 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 609 if (ip_hdr(skb)->version == 0) { 610 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 611 nlh->nlmsg_type = NLMSG_ERROR; 612 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 613 skb_trim(skb, nlh->nlmsg_len); 614 e = NLMSG_DATA(nlh); 615 e->error = -ETIMEDOUT; 616 memset(&e->msg, 0, sizeof(e->msg)); 617 618 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 619 } else { 620 kfree_skb(skb); 621 } 622 } 623 624 ipmr_cache_free(c); 625 } 626 627 628 /* Timer process for the unresolved queue. */ 629 630 static void ipmr_expire_process(unsigned long arg) 631 { 632 struct mr_table *mrt = (struct mr_table *)arg; 633 unsigned long now; 634 unsigned long expires; 635 struct mfc_cache *c, *next; 636 637 if (!spin_trylock(&mfc_unres_lock)) { 638 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); 639 return; 640 } 641 642 if (list_empty(&mrt->mfc_unres_queue)) 643 goto out; 644 645 now = jiffies; 646 expires = 10*HZ; 647 648 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 649 if (time_after(c->mfc_un.unres.expires, now)) { 650 unsigned long interval = c->mfc_un.unres.expires - now; 651 if (interval < expires) 652 expires = interval; 653 continue; 654 } 655 656 list_del(&c->list); 657 ipmr_destroy_unres(mrt, c); 658 } 659 660 if (!list_empty(&mrt->mfc_unres_queue)) 661 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); 662 663 out: 664 spin_unlock(&mfc_unres_lock); 665 } 666 667 /* Fill oifs list. It is called under write locked mrt_lock. */ 668 669 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, 670 unsigned char *ttls) 671 { 672 int vifi; 673 674 cache->mfc_un.res.minvif = MAXVIFS; 675 cache->mfc_un.res.maxvif = 0; 676 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 677 678 for (vifi = 0; vifi < mrt->maxvif; vifi++) { 679 if (VIF_EXISTS(mrt, vifi) && 680 ttls[vifi] && ttls[vifi] < 255) { 681 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 682 if (cache->mfc_un.res.minvif > vifi) 683 cache->mfc_un.res.minvif = vifi; 684 if (cache->mfc_un.res.maxvif <= vifi) 685 cache->mfc_un.res.maxvif = vifi + 1; 686 } 687 } 688 } 689 690 static int vif_add(struct net *net, struct mr_table *mrt, 691 struct vifctl *vifc, int mrtsock) 692 { 693 int vifi = vifc->vifc_vifi; 694 struct vif_device *v = &mrt->vif_table[vifi]; 695 struct net_device *dev; 696 struct in_device *in_dev; 697 int err; 698 699 /* Is vif busy ? */ 700 if (VIF_EXISTS(mrt, vifi)) 701 return -EADDRINUSE; 702 703 switch (vifc->vifc_flags) { 704 #ifdef CONFIG_IP_PIMSM 705 case VIFF_REGISTER: 706 /* 707 * Special Purpose VIF in PIM 708 * All the packets will be sent to the daemon 709 */ 710 if (mrt->mroute_reg_vif_num >= 0) 711 return -EADDRINUSE; 712 dev = ipmr_reg_vif(net, mrt); 713 if (!dev) 714 return -ENOBUFS; 715 err = dev_set_allmulti(dev, 1); 716 if (err) { 717 unregister_netdevice(dev); 718 dev_put(dev); 719 return err; 720 } 721 break; 722 #endif 723 case VIFF_TUNNEL: 724 dev = ipmr_new_tunnel(net, vifc); 725 if (!dev) 726 return -ENOBUFS; 727 err = dev_set_allmulti(dev, 1); 728 if (err) { 729 ipmr_del_tunnel(dev, vifc); 730 dev_put(dev); 731 return err; 732 } 733 break; 734 735 case VIFF_USE_IFINDEX: 736 case 0: 737 if (vifc->vifc_flags == VIFF_USE_IFINDEX) { 738 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); 739 if (dev && __in_dev_get_rtnl(dev) == NULL) { 740 dev_put(dev); 741 return -EADDRNOTAVAIL; 742 } 743 } else { 744 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); 745 } 746 if (!dev) 747 return -EADDRNOTAVAIL; 748 err = dev_set_allmulti(dev, 1); 749 if (err) { 750 dev_put(dev); 751 return err; 752 } 753 break; 754 default: 755 return -EINVAL; 756 } 757 758 in_dev = __in_dev_get_rtnl(dev); 759 if (!in_dev) { 760 dev_put(dev); 761 return -EADDRNOTAVAIL; 762 } 763 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 764 ip_rt_multicast_event(in_dev); 765 766 /* Fill in the VIF structures */ 767 768 v->rate_limit = vifc->vifc_rate_limit; 769 v->local = vifc->vifc_lcl_addr.s_addr; 770 v->remote = vifc->vifc_rmt_addr.s_addr; 771 v->flags = vifc->vifc_flags; 772 if (!mrtsock) 773 v->flags |= VIFF_STATIC; 774 v->threshold = vifc->vifc_threshold; 775 v->bytes_in = 0; 776 v->bytes_out = 0; 777 v->pkt_in = 0; 778 v->pkt_out = 0; 779 v->link = dev->ifindex; 780 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) 781 v->link = dev->iflink; 782 783 /* And finish update writing critical data */ 784 write_lock_bh(&mrt_lock); 785 v->dev = dev; 786 #ifdef CONFIG_IP_PIMSM 787 if (v->flags & VIFF_REGISTER) 788 mrt->mroute_reg_vif_num = vifi; 789 #endif 790 if (vifi+1 > mrt->maxvif) 791 mrt->maxvif = vifi+1; 792 write_unlock_bh(&mrt_lock); 793 return 0; 794 } 795 796 /* called with rcu_read_lock() */ 797 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, 798 __be32 origin, 799 __be32 mcastgrp) 800 { 801 int line = MFC_HASH(mcastgrp, origin); 802 struct mfc_cache *c; 803 804 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) { 805 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) 806 return c; 807 } 808 return NULL; 809 } 810 811 /* 812 * Allocate a multicast cache entry 813 */ 814 static struct mfc_cache *ipmr_cache_alloc(void) 815 { 816 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 817 818 if (c) 819 c->mfc_un.res.minvif = MAXVIFS; 820 return c; 821 } 822 823 static struct mfc_cache *ipmr_cache_alloc_unres(void) 824 { 825 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 826 827 if (c) { 828 skb_queue_head_init(&c->mfc_un.unres.unresolved); 829 c->mfc_un.unres.expires = jiffies + 10*HZ; 830 } 831 return c; 832 } 833 834 /* 835 * A cache entry has gone into a resolved state from queued 836 */ 837 838 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, 839 struct mfc_cache *uc, struct mfc_cache *c) 840 { 841 struct sk_buff *skb; 842 struct nlmsgerr *e; 843 844 /* Play the pending entries through our router */ 845 846 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { 847 if (ip_hdr(skb)->version == 0) { 848 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 849 850 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 851 nlh->nlmsg_len = skb_tail_pointer(skb) - 852 (u8 *)nlh; 853 } else { 854 nlh->nlmsg_type = NLMSG_ERROR; 855 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 856 skb_trim(skb, nlh->nlmsg_len); 857 e = NLMSG_DATA(nlh); 858 e->error = -EMSGSIZE; 859 memset(&e->msg, 0, sizeof(e->msg)); 860 } 861 862 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 863 } else { 864 ip_mr_forward(net, mrt, skb, c, 0); 865 } 866 } 867 } 868 869 /* 870 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted 871 * expects the following bizarre scheme. 872 * 873 * Called under mrt_lock. 874 */ 875 876 static int ipmr_cache_report(struct mr_table *mrt, 877 struct sk_buff *pkt, vifi_t vifi, int assert) 878 { 879 struct sk_buff *skb; 880 const int ihl = ip_hdrlen(pkt); 881 struct igmphdr *igmp; 882 struct igmpmsg *msg; 883 struct sock *mroute_sk; 884 int ret; 885 886 #ifdef CONFIG_IP_PIMSM 887 if (assert == IGMPMSG_WHOLEPKT) 888 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); 889 else 890 #endif 891 skb = alloc_skb(128, GFP_ATOMIC); 892 893 if (!skb) 894 return -ENOBUFS; 895 896 #ifdef CONFIG_IP_PIMSM 897 if (assert == IGMPMSG_WHOLEPKT) { 898 /* Ugly, but we have no choice with this interface. 899 * Duplicate old header, fix ihl, length etc. 900 * And all this only to mangle msg->im_msgtype and 901 * to set msg->im_mbz to "mbz" :-) 902 */ 903 skb_push(skb, sizeof(struct iphdr)); 904 skb_reset_network_header(skb); 905 skb_reset_transport_header(skb); 906 msg = (struct igmpmsg *)skb_network_header(skb); 907 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 908 msg->im_msgtype = IGMPMSG_WHOLEPKT; 909 msg->im_mbz = 0; 910 msg->im_vif = mrt->mroute_reg_vif_num; 911 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 912 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 913 sizeof(struct iphdr)); 914 } else 915 #endif 916 { 917 918 /* Copy the IP header */ 919 920 skb->network_header = skb->tail; 921 skb_put(skb, ihl); 922 skb_copy_to_linear_data(skb, pkt->data, ihl); 923 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ 924 msg = (struct igmpmsg *)skb_network_header(skb); 925 msg->im_vif = vifi; 926 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 927 928 /* Add our header */ 929 930 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); 931 igmp->type = 932 msg->im_msgtype = assert; 933 igmp->code = 0; 934 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ 935 skb->transport_header = skb->network_header; 936 } 937 938 rcu_read_lock(); 939 mroute_sk = rcu_dereference(mrt->mroute_sk); 940 if (mroute_sk == NULL) { 941 rcu_read_unlock(); 942 kfree_skb(skb); 943 return -EINVAL; 944 } 945 946 /* Deliver to mrouted */ 947 948 ret = sock_queue_rcv_skb(mroute_sk, skb); 949 rcu_read_unlock(); 950 if (ret < 0) { 951 if (net_ratelimit()) 952 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 953 kfree_skb(skb); 954 } 955 956 return ret; 957 } 958 959 /* 960 * Queue a packet for resolution. It gets locked cache entry! 961 */ 962 963 static int 964 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) 965 { 966 bool found = false; 967 int err; 968 struct mfc_cache *c; 969 const struct iphdr *iph = ip_hdr(skb); 970 971 spin_lock_bh(&mfc_unres_lock); 972 list_for_each_entry(c, &mrt->mfc_unres_queue, list) { 973 if (c->mfc_mcastgrp == iph->daddr && 974 c->mfc_origin == iph->saddr) { 975 found = true; 976 break; 977 } 978 } 979 980 if (!found) { 981 /* Create a new entry if allowable */ 982 983 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || 984 (c = ipmr_cache_alloc_unres()) == NULL) { 985 spin_unlock_bh(&mfc_unres_lock); 986 987 kfree_skb(skb); 988 return -ENOBUFS; 989 } 990 991 /* Fill in the new cache entry */ 992 993 c->mfc_parent = -1; 994 c->mfc_origin = iph->saddr; 995 c->mfc_mcastgrp = iph->daddr; 996 997 /* Reflect first query at mrouted. */ 998 999 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); 1000 if (err < 0) { 1001 /* If the report failed throw the cache entry 1002 out - Brad Parker 1003 */ 1004 spin_unlock_bh(&mfc_unres_lock); 1005 1006 ipmr_cache_free(c); 1007 kfree_skb(skb); 1008 return err; 1009 } 1010 1011 atomic_inc(&mrt->cache_resolve_queue_len); 1012 list_add(&c->list, &mrt->mfc_unres_queue); 1013 1014 if (atomic_read(&mrt->cache_resolve_queue_len) == 1) 1015 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); 1016 } 1017 1018 /* See if we can append the packet */ 1019 1020 if (c->mfc_un.unres.unresolved.qlen > 3) { 1021 kfree_skb(skb); 1022 err = -ENOBUFS; 1023 } else { 1024 skb_queue_tail(&c->mfc_un.unres.unresolved, skb); 1025 err = 0; 1026 } 1027 1028 spin_unlock_bh(&mfc_unres_lock); 1029 return err; 1030 } 1031 1032 /* 1033 * MFC cache manipulation by user space mroute daemon 1034 */ 1035 1036 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) 1037 { 1038 int line; 1039 struct mfc_cache *c, *next; 1040 1041 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1042 1043 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { 1044 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1045 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1046 list_del_rcu(&c->list); 1047 1048 ipmr_cache_free(c); 1049 return 0; 1050 } 1051 } 1052 return -ENOENT; 1053 } 1054 1055 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, 1056 struct mfcctl *mfc, int mrtsock) 1057 { 1058 bool found = false; 1059 int line; 1060 struct mfc_cache *uc, *c; 1061 1062 if (mfc->mfcc_parent >= MAXVIFS) 1063 return -ENFILE; 1064 1065 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1066 1067 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { 1068 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1069 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1070 found = true; 1071 break; 1072 } 1073 } 1074 1075 if (found) { 1076 write_lock_bh(&mrt_lock); 1077 c->mfc_parent = mfc->mfcc_parent; 1078 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); 1079 if (!mrtsock) 1080 c->mfc_flags |= MFC_STATIC; 1081 write_unlock_bh(&mrt_lock); 1082 return 0; 1083 } 1084 1085 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 1086 return -EINVAL; 1087 1088 c = ipmr_cache_alloc(); 1089 if (c == NULL) 1090 return -ENOMEM; 1091 1092 c->mfc_origin = mfc->mfcc_origin.s_addr; 1093 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 1094 c->mfc_parent = mfc->mfcc_parent; 1095 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); 1096 if (!mrtsock) 1097 c->mfc_flags |= MFC_STATIC; 1098 1099 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); 1100 1101 /* 1102 * Check to see if we resolved a queued list. If so we 1103 * need to send on the frames and tidy up. 1104 */ 1105 found = false; 1106 spin_lock_bh(&mfc_unres_lock); 1107 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { 1108 if (uc->mfc_origin == c->mfc_origin && 1109 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1110 list_del(&uc->list); 1111 atomic_dec(&mrt->cache_resolve_queue_len); 1112 found = true; 1113 break; 1114 } 1115 } 1116 if (list_empty(&mrt->mfc_unres_queue)) 1117 del_timer(&mrt->ipmr_expire_timer); 1118 spin_unlock_bh(&mfc_unres_lock); 1119 1120 if (found) { 1121 ipmr_cache_resolve(net, mrt, uc, c); 1122 ipmr_cache_free(uc); 1123 } 1124 return 0; 1125 } 1126 1127 /* 1128 * Close the multicast socket, and clear the vif tables etc 1129 */ 1130 1131 static void mroute_clean_tables(struct mr_table *mrt) 1132 { 1133 int i; 1134 LIST_HEAD(list); 1135 struct mfc_cache *c, *next; 1136 1137 /* Shut down all active vif entries */ 1138 1139 for (i = 0; i < mrt->maxvif; i++) { 1140 if (!(mrt->vif_table[i].flags & VIFF_STATIC)) 1141 vif_delete(mrt, i, 0, &list); 1142 } 1143 unregister_netdevice_many(&list); 1144 1145 /* Wipe the cache */ 1146 1147 for (i = 0; i < MFC_LINES; i++) { 1148 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1149 if (c->mfc_flags & MFC_STATIC) 1150 continue; 1151 list_del_rcu(&c->list); 1152 ipmr_cache_free(c); 1153 } 1154 } 1155 1156 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { 1157 spin_lock_bh(&mfc_unres_lock); 1158 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 1159 list_del(&c->list); 1160 ipmr_destroy_unres(mrt, c); 1161 } 1162 spin_unlock_bh(&mfc_unres_lock); 1163 } 1164 } 1165 1166 /* called from ip_ra_control(), before an RCU grace period, 1167 * we dont need to call synchronize_rcu() here 1168 */ 1169 static void mrtsock_destruct(struct sock *sk) 1170 { 1171 struct net *net = sock_net(sk); 1172 struct mr_table *mrt; 1173 1174 rtnl_lock(); 1175 ipmr_for_each_table(mrt, net) { 1176 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1177 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1178 rcu_assign_pointer(mrt->mroute_sk, NULL); 1179 mroute_clean_tables(mrt); 1180 } 1181 } 1182 rtnl_unlock(); 1183 } 1184 1185 /* 1186 * Socket options and virtual interface manipulation. The whole 1187 * virtual interface system is a complete heap, but unfortunately 1188 * that's how BSD mrouted happens to think. Maybe one day with a proper 1189 * MOSPF/PIM router set up we can clean this up. 1190 */ 1191 1192 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) 1193 { 1194 int ret; 1195 struct vifctl vif; 1196 struct mfcctl mfc; 1197 struct net *net = sock_net(sk); 1198 struct mr_table *mrt; 1199 1200 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1201 if (mrt == NULL) 1202 return -ENOENT; 1203 1204 if (optname != MRT_INIT) { 1205 if (sk != rcu_dereference_raw(mrt->mroute_sk) && 1206 !capable(CAP_NET_ADMIN)) 1207 return -EACCES; 1208 } 1209 1210 switch (optname) { 1211 case MRT_INIT: 1212 if (sk->sk_type != SOCK_RAW || 1213 inet_sk(sk)->inet_num != IPPROTO_IGMP) 1214 return -EOPNOTSUPP; 1215 if (optlen != sizeof(int)) 1216 return -ENOPROTOOPT; 1217 1218 rtnl_lock(); 1219 if (rtnl_dereference(mrt->mroute_sk)) { 1220 rtnl_unlock(); 1221 return -EADDRINUSE; 1222 } 1223 1224 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1225 if (ret == 0) { 1226 rcu_assign_pointer(mrt->mroute_sk, sk); 1227 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1228 } 1229 rtnl_unlock(); 1230 return ret; 1231 case MRT_DONE: 1232 if (sk != rcu_dereference_raw(mrt->mroute_sk)) 1233 return -EACCES; 1234 return ip_ra_control(sk, 0, NULL); 1235 case MRT_ADD_VIF: 1236 case MRT_DEL_VIF: 1237 if (optlen != sizeof(vif)) 1238 return -EINVAL; 1239 if (copy_from_user(&vif, optval, sizeof(vif))) 1240 return -EFAULT; 1241 if (vif.vifc_vifi >= MAXVIFS) 1242 return -ENFILE; 1243 rtnl_lock(); 1244 if (optname == MRT_ADD_VIF) { 1245 ret = vif_add(net, mrt, &vif, 1246 sk == rtnl_dereference(mrt->mroute_sk)); 1247 } else { 1248 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); 1249 } 1250 rtnl_unlock(); 1251 return ret; 1252 1253 /* 1254 * Manipulate the forwarding caches. These live 1255 * in a sort of kernel/user symbiosis. 1256 */ 1257 case MRT_ADD_MFC: 1258 case MRT_DEL_MFC: 1259 if (optlen != sizeof(mfc)) 1260 return -EINVAL; 1261 if (copy_from_user(&mfc, optval, sizeof(mfc))) 1262 return -EFAULT; 1263 rtnl_lock(); 1264 if (optname == MRT_DEL_MFC) 1265 ret = ipmr_mfc_delete(mrt, &mfc); 1266 else 1267 ret = ipmr_mfc_add(net, mrt, &mfc, 1268 sk == rtnl_dereference(mrt->mroute_sk)); 1269 rtnl_unlock(); 1270 return ret; 1271 /* 1272 * Control PIM assert. 1273 */ 1274 case MRT_ASSERT: 1275 { 1276 int v; 1277 if (get_user(v, (int __user *)optval)) 1278 return -EFAULT; 1279 mrt->mroute_do_assert = (v) ? 1 : 0; 1280 return 0; 1281 } 1282 #ifdef CONFIG_IP_PIMSM 1283 case MRT_PIM: 1284 { 1285 int v; 1286 1287 if (get_user(v, (int __user *)optval)) 1288 return -EFAULT; 1289 v = (v) ? 1 : 0; 1290 1291 rtnl_lock(); 1292 ret = 0; 1293 if (v != mrt->mroute_do_pim) { 1294 mrt->mroute_do_pim = v; 1295 mrt->mroute_do_assert = v; 1296 } 1297 rtnl_unlock(); 1298 return ret; 1299 } 1300 #endif 1301 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 1302 case MRT_TABLE: 1303 { 1304 u32 v; 1305 1306 if (optlen != sizeof(u32)) 1307 return -EINVAL; 1308 if (get_user(v, (u32 __user *)optval)) 1309 return -EFAULT; 1310 1311 rtnl_lock(); 1312 ret = 0; 1313 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1314 ret = -EBUSY; 1315 } else { 1316 if (!ipmr_new_table(net, v)) 1317 ret = -ENOMEM; 1318 raw_sk(sk)->ipmr_table = v; 1319 } 1320 rtnl_unlock(); 1321 return ret; 1322 } 1323 #endif 1324 /* 1325 * Spurious command, or MRT_VERSION which you cannot 1326 * set. 1327 */ 1328 default: 1329 return -ENOPROTOOPT; 1330 } 1331 } 1332 1333 /* 1334 * Getsock opt support for the multicast routing system. 1335 */ 1336 1337 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) 1338 { 1339 int olr; 1340 int val; 1341 struct net *net = sock_net(sk); 1342 struct mr_table *mrt; 1343 1344 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1345 if (mrt == NULL) 1346 return -ENOENT; 1347 1348 if (optname != MRT_VERSION && 1349 #ifdef CONFIG_IP_PIMSM 1350 optname != MRT_PIM && 1351 #endif 1352 optname != MRT_ASSERT) 1353 return -ENOPROTOOPT; 1354 1355 if (get_user(olr, optlen)) 1356 return -EFAULT; 1357 1358 olr = min_t(unsigned int, olr, sizeof(int)); 1359 if (olr < 0) 1360 return -EINVAL; 1361 1362 if (put_user(olr, optlen)) 1363 return -EFAULT; 1364 if (optname == MRT_VERSION) 1365 val = 0x0305; 1366 #ifdef CONFIG_IP_PIMSM 1367 else if (optname == MRT_PIM) 1368 val = mrt->mroute_do_pim; 1369 #endif 1370 else 1371 val = mrt->mroute_do_assert; 1372 if (copy_to_user(optval, &val, olr)) 1373 return -EFAULT; 1374 return 0; 1375 } 1376 1377 /* 1378 * The IP multicast ioctl support routines. 1379 */ 1380 1381 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) 1382 { 1383 struct sioc_sg_req sr; 1384 struct sioc_vif_req vr; 1385 struct vif_device *vif; 1386 struct mfc_cache *c; 1387 struct net *net = sock_net(sk); 1388 struct mr_table *mrt; 1389 1390 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1391 if (mrt == NULL) 1392 return -ENOENT; 1393 1394 switch (cmd) { 1395 case SIOCGETVIFCNT: 1396 if (copy_from_user(&vr, arg, sizeof(vr))) 1397 return -EFAULT; 1398 if (vr.vifi >= mrt->maxvif) 1399 return -EINVAL; 1400 read_lock(&mrt_lock); 1401 vif = &mrt->vif_table[vr.vifi]; 1402 if (VIF_EXISTS(mrt, vr.vifi)) { 1403 vr.icount = vif->pkt_in; 1404 vr.ocount = vif->pkt_out; 1405 vr.ibytes = vif->bytes_in; 1406 vr.obytes = vif->bytes_out; 1407 read_unlock(&mrt_lock); 1408 1409 if (copy_to_user(arg, &vr, sizeof(vr))) 1410 return -EFAULT; 1411 return 0; 1412 } 1413 read_unlock(&mrt_lock); 1414 return -EADDRNOTAVAIL; 1415 case SIOCGETSGCNT: 1416 if (copy_from_user(&sr, arg, sizeof(sr))) 1417 return -EFAULT; 1418 1419 rcu_read_lock(); 1420 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1421 if (c) { 1422 sr.pktcnt = c->mfc_un.res.pkt; 1423 sr.bytecnt = c->mfc_un.res.bytes; 1424 sr.wrong_if = c->mfc_un.res.wrong_if; 1425 rcu_read_unlock(); 1426 1427 if (copy_to_user(arg, &sr, sizeof(sr))) 1428 return -EFAULT; 1429 return 0; 1430 } 1431 rcu_read_unlock(); 1432 return -EADDRNOTAVAIL; 1433 default: 1434 return -ENOIOCTLCMD; 1435 } 1436 } 1437 1438 #ifdef CONFIG_COMPAT 1439 struct compat_sioc_sg_req { 1440 struct in_addr src; 1441 struct in_addr grp; 1442 compat_ulong_t pktcnt; 1443 compat_ulong_t bytecnt; 1444 compat_ulong_t wrong_if; 1445 }; 1446 1447 struct compat_sioc_vif_req { 1448 vifi_t vifi; /* Which iface */ 1449 compat_ulong_t icount; 1450 compat_ulong_t ocount; 1451 compat_ulong_t ibytes; 1452 compat_ulong_t obytes; 1453 }; 1454 1455 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 1456 { 1457 struct compat_sioc_sg_req sr; 1458 struct compat_sioc_vif_req vr; 1459 struct vif_device *vif; 1460 struct mfc_cache *c; 1461 struct net *net = sock_net(sk); 1462 struct mr_table *mrt; 1463 1464 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1465 if (mrt == NULL) 1466 return -ENOENT; 1467 1468 switch (cmd) { 1469 case SIOCGETVIFCNT: 1470 if (copy_from_user(&vr, arg, sizeof(vr))) 1471 return -EFAULT; 1472 if (vr.vifi >= mrt->maxvif) 1473 return -EINVAL; 1474 read_lock(&mrt_lock); 1475 vif = &mrt->vif_table[vr.vifi]; 1476 if (VIF_EXISTS(mrt, vr.vifi)) { 1477 vr.icount = vif->pkt_in; 1478 vr.ocount = vif->pkt_out; 1479 vr.ibytes = vif->bytes_in; 1480 vr.obytes = vif->bytes_out; 1481 read_unlock(&mrt_lock); 1482 1483 if (copy_to_user(arg, &vr, sizeof(vr))) 1484 return -EFAULT; 1485 return 0; 1486 } 1487 read_unlock(&mrt_lock); 1488 return -EADDRNOTAVAIL; 1489 case SIOCGETSGCNT: 1490 if (copy_from_user(&sr, arg, sizeof(sr))) 1491 return -EFAULT; 1492 1493 rcu_read_lock(); 1494 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1495 if (c) { 1496 sr.pktcnt = c->mfc_un.res.pkt; 1497 sr.bytecnt = c->mfc_un.res.bytes; 1498 sr.wrong_if = c->mfc_un.res.wrong_if; 1499 rcu_read_unlock(); 1500 1501 if (copy_to_user(arg, &sr, sizeof(sr))) 1502 return -EFAULT; 1503 return 0; 1504 } 1505 rcu_read_unlock(); 1506 return -EADDRNOTAVAIL; 1507 default: 1508 return -ENOIOCTLCMD; 1509 } 1510 } 1511 #endif 1512 1513 1514 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1515 { 1516 struct net_device *dev = ptr; 1517 struct net *net = dev_net(dev); 1518 struct mr_table *mrt; 1519 struct vif_device *v; 1520 int ct; 1521 LIST_HEAD(list); 1522 1523 if (event != NETDEV_UNREGISTER) 1524 return NOTIFY_DONE; 1525 1526 ipmr_for_each_table(mrt, net) { 1527 v = &mrt->vif_table[0]; 1528 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1529 if (v->dev == dev) 1530 vif_delete(mrt, ct, 1, &list); 1531 } 1532 } 1533 unregister_netdevice_many(&list); 1534 return NOTIFY_DONE; 1535 } 1536 1537 1538 static struct notifier_block ip_mr_notifier = { 1539 .notifier_call = ipmr_device_event, 1540 }; 1541 1542 /* 1543 * Encapsulate a packet by attaching a valid IPIP header to it. 1544 * This avoids tunnel drivers and other mess and gives us the speed so 1545 * important for multicast video. 1546 */ 1547 1548 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1549 { 1550 struct iphdr *iph; 1551 struct iphdr *old_iph = ip_hdr(skb); 1552 1553 skb_push(skb, sizeof(struct iphdr)); 1554 skb->transport_header = skb->network_header; 1555 skb_reset_network_header(skb); 1556 iph = ip_hdr(skb); 1557 1558 iph->version = 4; 1559 iph->tos = old_iph->tos; 1560 iph->ttl = old_iph->ttl; 1561 iph->frag_off = 0; 1562 iph->daddr = daddr; 1563 iph->saddr = saddr; 1564 iph->protocol = IPPROTO_IPIP; 1565 iph->ihl = 5; 1566 iph->tot_len = htons(skb->len); 1567 ip_select_ident(iph, skb_dst(skb), NULL); 1568 ip_send_check(iph); 1569 1570 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1571 nf_reset(skb); 1572 } 1573 1574 static inline int ipmr_forward_finish(struct sk_buff *skb) 1575 { 1576 struct ip_options *opt = &(IPCB(skb)->opt); 1577 1578 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1579 1580 if (unlikely(opt->optlen)) 1581 ip_forward_options(skb); 1582 1583 return dst_output(skb); 1584 } 1585 1586 /* 1587 * Processing handlers for ipmr_forward 1588 */ 1589 1590 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, 1591 struct sk_buff *skb, struct mfc_cache *c, int vifi) 1592 { 1593 const struct iphdr *iph = ip_hdr(skb); 1594 struct vif_device *vif = &mrt->vif_table[vifi]; 1595 struct net_device *dev; 1596 struct rtable *rt; 1597 int encap = 0; 1598 1599 if (vif->dev == NULL) 1600 goto out_free; 1601 1602 #ifdef CONFIG_IP_PIMSM 1603 if (vif->flags & VIFF_REGISTER) { 1604 vif->pkt_out++; 1605 vif->bytes_out += skb->len; 1606 vif->dev->stats.tx_bytes += skb->len; 1607 vif->dev->stats.tx_packets++; 1608 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); 1609 goto out_free; 1610 } 1611 #endif 1612 1613 if (vif->flags & VIFF_TUNNEL) { 1614 struct flowi fl = { 1615 .oif = vif->link, 1616 .fl4_dst = vif->remote, 1617 .fl4_src = vif->local, 1618 .fl4_tos = RT_TOS(iph->tos), 1619 .proto = IPPROTO_IPIP 1620 }; 1621 1622 if (ip_route_output_key(net, &rt, &fl)) 1623 goto out_free; 1624 encap = sizeof(struct iphdr); 1625 } else { 1626 struct flowi fl = { 1627 .oif = vif->link, 1628 .fl4_dst = iph->daddr, 1629 .fl4_tos = RT_TOS(iph->tos), 1630 .proto = IPPROTO_IPIP 1631 }; 1632 1633 if (ip_route_output_key(net, &rt, &fl)) 1634 goto out_free; 1635 } 1636 1637 dev = rt->dst.dev; 1638 1639 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { 1640 /* Do not fragment multicasts. Alas, IPv4 does not 1641 * allow to send ICMP, so that packets will disappear 1642 * to blackhole. 1643 */ 1644 1645 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 1646 ip_rt_put(rt); 1647 goto out_free; 1648 } 1649 1650 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; 1651 1652 if (skb_cow(skb, encap)) { 1653 ip_rt_put(rt); 1654 goto out_free; 1655 } 1656 1657 vif->pkt_out++; 1658 vif->bytes_out += skb->len; 1659 1660 skb_dst_drop(skb); 1661 skb_dst_set(skb, &rt->dst); 1662 ip_decrease_ttl(ip_hdr(skb)); 1663 1664 /* FIXME: forward and output firewalls used to be called here. 1665 * What do we do with netfilter? -- RR 1666 */ 1667 if (vif->flags & VIFF_TUNNEL) { 1668 ip_encap(skb, vif->local, vif->remote); 1669 /* FIXME: extra output firewall step used to be here. --RR */ 1670 vif->dev->stats.tx_packets++; 1671 vif->dev->stats.tx_bytes += skb->len; 1672 } 1673 1674 IPCB(skb)->flags |= IPSKB_FORWARDED; 1675 1676 /* 1677 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1678 * not only before forwarding, but after forwarding on all output 1679 * interfaces. It is clear, if mrouter runs a multicasting 1680 * program, it should receive packets not depending to what interface 1681 * program is joined. 1682 * If we will not make it, the program will have to join on all 1683 * interfaces. On the other hand, multihoming host (or router, but 1684 * not mrouter) cannot join to more than one interface - it will 1685 * result in receiving multiple packets. 1686 */ 1687 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, 1688 ipmr_forward_finish); 1689 return; 1690 1691 out_free: 1692 kfree_skb(skb); 1693 } 1694 1695 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) 1696 { 1697 int ct; 1698 1699 for (ct = mrt->maxvif-1; ct >= 0; ct--) { 1700 if (mrt->vif_table[ct].dev == dev) 1701 break; 1702 } 1703 return ct; 1704 } 1705 1706 /* "local" means that we should preserve one skb (for local delivery) */ 1707 1708 static int ip_mr_forward(struct net *net, struct mr_table *mrt, 1709 struct sk_buff *skb, struct mfc_cache *cache, 1710 int local) 1711 { 1712 int psend = -1; 1713 int vif, ct; 1714 1715 vif = cache->mfc_parent; 1716 cache->mfc_un.res.pkt++; 1717 cache->mfc_un.res.bytes += skb->len; 1718 1719 /* 1720 * Wrong interface: drop packet and (maybe) send PIM assert. 1721 */ 1722 if (mrt->vif_table[vif].dev != skb->dev) { 1723 int true_vifi; 1724 1725 if (rt_is_output_route(skb_rtable(skb))) { 1726 /* It is our own packet, looped back. 1727 * Very complicated situation... 1728 * 1729 * The best workaround until routing daemons will be 1730 * fixed is not to redistribute packet, if it was 1731 * send through wrong interface. It means, that 1732 * multicast applications WILL NOT work for 1733 * (S,G), which have default multicast route pointing 1734 * to wrong oif. In any case, it is not a good 1735 * idea to use multicasting applications on router. 1736 */ 1737 goto dont_forward; 1738 } 1739 1740 cache->mfc_un.res.wrong_if++; 1741 true_vifi = ipmr_find_vif(mrt, skb->dev); 1742 1743 if (true_vifi >= 0 && mrt->mroute_do_assert && 1744 /* pimsm uses asserts, when switching from RPT to SPT, 1745 * so that we cannot check that packet arrived on an oif. 1746 * It is bad, but otherwise we would need to move pretty 1747 * large chunk of pimd to kernel. Ough... --ANK 1748 */ 1749 (mrt->mroute_do_pim || 1750 cache->mfc_un.res.ttls[true_vifi] < 255) && 1751 time_after(jiffies, 1752 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1753 cache->mfc_un.res.last_assert = jiffies; 1754 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); 1755 } 1756 goto dont_forward; 1757 } 1758 1759 mrt->vif_table[vif].pkt_in++; 1760 mrt->vif_table[vif].bytes_in += skb->len; 1761 1762 /* 1763 * Forward the frame 1764 */ 1765 for (ct = cache->mfc_un.res.maxvif - 1; 1766 ct >= cache->mfc_un.res.minvif; ct--) { 1767 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { 1768 if (psend != -1) { 1769 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1770 1771 if (skb2) 1772 ipmr_queue_xmit(net, mrt, skb2, cache, 1773 psend); 1774 } 1775 psend = ct; 1776 } 1777 } 1778 if (psend != -1) { 1779 if (local) { 1780 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1781 1782 if (skb2) 1783 ipmr_queue_xmit(net, mrt, skb2, cache, psend); 1784 } else { 1785 ipmr_queue_xmit(net, mrt, skb, cache, psend); 1786 return 0; 1787 } 1788 } 1789 1790 dont_forward: 1791 if (!local) 1792 kfree_skb(skb); 1793 return 0; 1794 } 1795 1796 1797 /* 1798 * Multicast packets for forwarding arrive here 1799 * Called with rcu_read_lock(); 1800 */ 1801 1802 int ip_mr_input(struct sk_buff *skb) 1803 { 1804 struct mfc_cache *cache; 1805 struct net *net = dev_net(skb->dev); 1806 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1807 struct mr_table *mrt; 1808 int err; 1809 1810 /* Packet is looped back after forward, it should not be 1811 * forwarded second time, but still can be delivered locally. 1812 */ 1813 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1814 goto dont_forward; 1815 1816 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); 1817 if (err < 0) { 1818 kfree_skb(skb); 1819 return err; 1820 } 1821 1822 if (!local) { 1823 if (IPCB(skb)->opt.router_alert) { 1824 if (ip_call_ra_chain(skb)) 1825 return 0; 1826 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) { 1827 /* IGMPv1 (and broken IGMPv2 implementations sort of 1828 * Cisco IOS <= 11.2(8)) do not put router alert 1829 * option to IGMP packets destined to routable 1830 * groups. It is very bad, because it means 1831 * that we can forward NO IGMP messages. 1832 */ 1833 struct sock *mroute_sk; 1834 1835 mroute_sk = rcu_dereference(mrt->mroute_sk); 1836 if (mroute_sk) { 1837 nf_reset(skb); 1838 raw_rcv(mroute_sk, skb); 1839 return 0; 1840 } 1841 } 1842 } 1843 1844 /* already under rcu_read_lock() */ 1845 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1846 1847 /* 1848 * No usable cache entry 1849 */ 1850 if (cache == NULL) { 1851 int vif; 1852 1853 if (local) { 1854 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1855 ip_local_deliver(skb); 1856 if (skb2 == NULL) 1857 return -ENOBUFS; 1858 skb = skb2; 1859 } 1860 1861 read_lock(&mrt_lock); 1862 vif = ipmr_find_vif(mrt, skb->dev); 1863 if (vif >= 0) { 1864 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 1865 read_unlock(&mrt_lock); 1866 1867 return err2; 1868 } 1869 read_unlock(&mrt_lock); 1870 kfree_skb(skb); 1871 return -ENODEV; 1872 } 1873 1874 read_lock(&mrt_lock); 1875 ip_mr_forward(net, mrt, skb, cache, local); 1876 read_unlock(&mrt_lock); 1877 1878 if (local) 1879 return ip_local_deliver(skb); 1880 1881 return 0; 1882 1883 dont_forward: 1884 if (local) 1885 return ip_local_deliver(skb); 1886 kfree_skb(skb); 1887 return 0; 1888 } 1889 1890 #ifdef CONFIG_IP_PIMSM 1891 /* called with rcu_read_lock() */ 1892 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, 1893 unsigned int pimlen) 1894 { 1895 struct net_device *reg_dev = NULL; 1896 struct iphdr *encap; 1897 1898 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1899 /* 1900 * Check that: 1901 * a. packet is really sent to a multicast group 1902 * b. packet is not a NULL-REGISTER 1903 * c. packet is not truncated 1904 */ 1905 if (!ipv4_is_multicast(encap->daddr) || 1906 encap->tot_len == 0 || 1907 ntohs(encap->tot_len) + pimlen > skb->len) 1908 return 1; 1909 1910 read_lock(&mrt_lock); 1911 if (mrt->mroute_reg_vif_num >= 0) 1912 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; 1913 read_unlock(&mrt_lock); 1914 1915 if (reg_dev == NULL) 1916 return 1; 1917 1918 skb->mac_header = skb->network_header; 1919 skb_pull(skb, (u8 *)encap - skb->data); 1920 skb_reset_network_header(skb); 1921 skb->protocol = htons(ETH_P_IP); 1922 skb->ip_summed = CHECKSUM_NONE; 1923 skb->pkt_type = PACKET_HOST; 1924 1925 skb_tunnel_rx(skb, reg_dev); 1926 1927 netif_rx(skb); 1928 1929 return NET_RX_SUCCESS; 1930 } 1931 #endif 1932 1933 #ifdef CONFIG_IP_PIMSM_V1 1934 /* 1935 * Handle IGMP messages of PIMv1 1936 */ 1937 1938 int pim_rcv_v1(struct sk_buff *skb) 1939 { 1940 struct igmphdr *pim; 1941 struct net *net = dev_net(skb->dev); 1942 struct mr_table *mrt; 1943 1944 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1945 goto drop; 1946 1947 pim = igmp_hdr(skb); 1948 1949 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) 1950 goto drop; 1951 1952 if (!mrt->mroute_do_pim || 1953 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1954 goto drop; 1955 1956 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 1957 drop: 1958 kfree_skb(skb); 1959 } 1960 return 0; 1961 } 1962 #endif 1963 1964 #ifdef CONFIG_IP_PIMSM_V2 1965 static int pim_rcv(struct sk_buff *skb) 1966 { 1967 struct pimreghdr *pim; 1968 struct net *net = dev_net(skb->dev); 1969 struct mr_table *mrt; 1970 1971 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1972 goto drop; 1973 1974 pim = (struct pimreghdr *)skb_transport_header(skb); 1975 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) || 1976 (pim->flags & PIM_NULL_REGISTER) || 1977 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 1978 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1979 goto drop; 1980 1981 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) 1982 goto drop; 1983 1984 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 1985 drop: 1986 kfree_skb(skb); 1987 } 1988 return 0; 1989 } 1990 #endif 1991 1992 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 1993 struct mfc_cache *c, struct rtmsg *rtm) 1994 { 1995 int ct; 1996 struct rtnexthop *nhp; 1997 u8 *b = skb_tail_pointer(skb); 1998 struct rtattr *mp_head; 1999 2000 /* If cache is unresolved, don't try to parse IIF and OIF */ 2001 if (c->mfc_parent >= MAXVIFS) 2002 return -ENOENT; 2003 2004 if (VIF_EXISTS(mrt, c->mfc_parent)) 2005 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); 2006 2007 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 2008 2009 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 2010 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { 2011 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 2012 goto rtattr_failure; 2013 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 2014 nhp->rtnh_flags = 0; 2015 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 2016 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; 2017 nhp->rtnh_len = sizeof(*nhp); 2018 } 2019 } 2020 mp_head->rta_type = RTA_MULTIPATH; 2021 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; 2022 rtm->rtm_type = RTN_MULTICAST; 2023 return 1; 2024 2025 rtattr_failure: 2026 nlmsg_trim(skb, b); 2027 return -EMSGSIZE; 2028 } 2029 2030 int ipmr_get_route(struct net *net, 2031 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 2032 { 2033 int err; 2034 struct mr_table *mrt; 2035 struct mfc_cache *cache; 2036 struct rtable *rt = skb_rtable(skb); 2037 2038 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2039 if (mrt == NULL) 2040 return -ENOENT; 2041 2042 rcu_read_lock(); 2043 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); 2044 2045 if (cache == NULL) { 2046 struct sk_buff *skb2; 2047 struct iphdr *iph; 2048 struct net_device *dev; 2049 int vif = -1; 2050 2051 if (nowait) { 2052 rcu_read_unlock(); 2053 return -EAGAIN; 2054 } 2055 2056 dev = skb->dev; 2057 read_lock(&mrt_lock); 2058 if (dev) 2059 vif = ipmr_find_vif(mrt, dev); 2060 if (vif < 0) { 2061 read_unlock(&mrt_lock); 2062 rcu_read_unlock(); 2063 return -ENODEV; 2064 } 2065 skb2 = skb_clone(skb, GFP_ATOMIC); 2066 if (!skb2) { 2067 read_unlock(&mrt_lock); 2068 rcu_read_unlock(); 2069 return -ENOMEM; 2070 } 2071 2072 skb_push(skb2, sizeof(struct iphdr)); 2073 skb_reset_network_header(skb2); 2074 iph = ip_hdr(skb2); 2075 iph->ihl = sizeof(struct iphdr) >> 2; 2076 iph->saddr = rt->rt_src; 2077 iph->daddr = rt->rt_dst; 2078 iph->version = 0; 2079 err = ipmr_cache_unresolved(mrt, vif, skb2); 2080 read_unlock(&mrt_lock); 2081 rcu_read_unlock(); 2082 return err; 2083 } 2084 2085 read_lock(&mrt_lock); 2086 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY)) 2087 cache->mfc_flags |= MFC_NOTIFY; 2088 err = __ipmr_fill_mroute(mrt, skb, cache, rtm); 2089 read_unlock(&mrt_lock); 2090 rcu_read_unlock(); 2091 return err; 2092 } 2093 2094 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2095 u32 pid, u32 seq, struct mfc_cache *c) 2096 { 2097 struct nlmsghdr *nlh; 2098 struct rtmsg *rtm; 2099 2100 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2101 if (nlh == NULL) 2102 return -EMSGSIZE; 2103 2104 rtm = nlmsg_data(nlh); 2105 rtm->rtm_family = RTNL_FAMILY_IPMR; 2106 rtm->rtm_dst_len = 32; 2107 rtm->rtm_src_len = 32; 2108 rtm->rtm_tos = 0; 2109 rtm->rtm_table = mrt->id; 2110 NLA_PUT_U32(skb, RTA_TABLE, mrt->id); 2111 rtm->rtm_type = RTN_MULTICAST; 2112 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2113 rtm->rtm_protocol = RTPROT_UNSPEC; 2114 rtm->rtm_flags = 0; 2115 2116 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); 2117 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); 2118 2119 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) 2120 goto nla_put_failure; 2121 2122 return nlmsg_end(skb, nlh); 2123 2124 nla_put_failure: 2125 nlmsg_cancel(skb, nlh); 2126 return -EMSGSIZE; 2127 } 2128 2129 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2130 { 2131 struct net *net = sock_net(skb->sk); 2132 struct mr_table *mrt; 2133 struct mfc_cache *mfc; 2134 unsigned int t = 0, s_t; 2135 unsigned int h = 0, s_h; 2136 unsigned int e = 0, s_e; 2137 2138 s_t = cb->args[0]; 2139 s_h = cb->args[1]; 2140 s_e = cb->args[2]; 2141 2142 rcu_read_lock(); 2143 ipmr_for_each_table(mrt, net) { 2144 if (t < s_t) 2145 goto next_table; 2146 if (t > s_t) 2147 s_h = 0; 2148 for (h = s_h; h < MFC_LINES; h++) { 2149 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) { 2150 if (e < s_e) 2151 goto next_entry; 2152 if (ipmr_fill_mroute(mrt, skb, 2153 NETLINK_CB(cb->skb).pid, 2154 cb->nlh->nlmsg_seq, 2155 mfc) < 0) 2156 goto done; 2157 next_entry: 2158 e++; 2159 } 2160 e = s_e = 0; 2161 } 2162 s_h = 0; 2163 next_table: 2164 t++; 2165 } 2166 done: 2167 rcu_read_unlock(); 2168 2169 cb->args[2] = e; 2170 cb->args[1] = h; 2171 cb->args[0] = t; 2172 2173 return skb->len; 2174 } 2175 2176 #ifdef CONFIG_PROC_FS 2177 /* 2178 * The /proc interfaces to multicast routing : 2179 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif 2180 */ 2181 struct ipmr_vif_iter { 2182 struct seq_net_private p; 2183 struct mr_table *mrt; 2184 int ct; 2185 }; 2186 2187 static struct vif_device *ipmr_vif_seq_idx(struct net *net, 2188 struct ipmr_vif_iter *iter, 2189 loff_t pos) 2190 { 2191 struct mr_table *mrt = iter->mrt; 2192 2193 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { 2194 if (!VIF_EXISTS(mrt, iter->ct)) 2195 continue; 2196 if (pos-- == 0) 2197 return &mrt->vif_table[iter->ct]; 2198 } 2199 return NULL; 2200 } 2201 2202 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 2203 __acquires(mrt_lock) 2204 { 2205 struct ipmr_vif_iter *iter = seq->private; 2206 struct net *net = seq_file_net(seq); 2207 struct mr_table *mrt; 2208 2209 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2210 if (mrt == NULL) 2211 return ERR_PTR(-ENOENT); 2212 2213 iter->mrt = mrt; 2214 2215 read_lock(&mrt_lock); 2216 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) 2217 : SEQ_START_TOKEN; 2218 } 2219 2220 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2221 { 2222 struct ipmr_vif_iter *iter = seq->private; 2223 struct net *net = seq_file_net(seq); 2224 struct mr_table *mrt = iter->mrt; 2225 2226 ++*pos; 2227 if (v == SEQ_START_TOKEN) 2228 return ipmr_vif_seq_idx(net, iter, 0); 2229 2230 while (++iter->ct < mrt->maxvif) { 2231 if (!VIF_EXISTS(mrt, iter->ct)) 2232 continue; 2233 return &mrt->vif_table[iter->ct]; 2234 } 2235 return NULL; 2236 } 2237 2238 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) 2239 __releases(mrt_lock) 2240 { 2241 read_unlock(&mrt_lock); 2242 } 2243 2244 static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 2245 { 2246 struct ipmr_vif_iter *iter = seq->private; 2247 struct mr_table *mrt = iter->mrt; 2248 2249 if (v == SEQ_START_TOKEN) { 2250 seq_puts(seq, 2251 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); 2252 } else { 2253 const struct vif_device *vif = v; 2254 const char *name = vif->dev ? vif->dev->name : "none"; 2255 2256 seq_printf(seq, 2257 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 2258 vif - mrt->vif_table, 2259 name, vif->bytes_in, vif->pkt_in, 2260 vif->bytes_out, vif->pkt_out, 2261 vif->flags, vif->local, vif->remote); 2262 } 2263 return 0; 2264 } 2265 2266 static const struct seq_operations ipmr_vif_seq_ops = { 2267 .start = ipmr_vif_seq_start, 2268 .next = ipmr_vif_seq_next, 2269 .stop = ipmr_vif_seq_stop, 2270 .show = ipmr_vif_seq_show, 2271 }; 2272 2273 static int ipmr_vif_open(struct inode *inode, struct file *file) 2274 { 2275 return seq_open_net(inode, file, &ipmr_vif_seq_ops, 2276 sizeof(struct ipmr_vif_iter)); 2277 } 2278 2279 static const struct file_operations ipmr_vif_fops = { 2280 .owner = THIS_MODULE, 2281 .open = ipmr_vif_open, 2282 .read = seq_read, 2283 .llseek = seq_lseek, 2284 .release = seq_release_net, 2285 }; 2286 2287 struct ipmr_mfc_iter { 2288 struct seq_net_private p; 2289 struct mr_table *mrt; 2290 struct list_head *cache; 2291 int ct; 2292 }; 2293 2294 2295 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, 2296 struct ipmr_mfc_iter *it, loff_t pos) 2297 { 2298 struct mr_table *mrt = it->mrt; 2299 struct mfc_cache *mfc; 2300 2301 rcu_read_lock(); 2302 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { 2303 it->cache = &mrt->mfc_cache_array[it->ct]; 2304 list_for_each_entry_rcu(mfc, it->cache, list) 2305 if (pos-- == 0) 2306 return mfc; 2307 } 2308 rcu_read_unlock(); 2309 2310 spin_lock_bh(&mfc_unres_lock); 2311 it->cache = &mrt->mfc_unres_queue; 2312 list_for_each_entry(mfc, it->cache, list) 2313 if (pos-- == 0) 2314 return mfc; 2315 spin_unlock_bh(&mfc_unres_lock); 2316 2317 it->cache = NULL; 2318 return NULL; 2319 } 2320 2321 2322 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 2323 { 2324 struct ipmr_mfc_iter *it = seq->private; 2325 struct net *net = seq_file_net(seq); 2326 struct mr_table *mrt; 2327 2328 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2329 if (mrt == NULL) 2330 return ERR_PTR(-ENOENT); 2331 2332 it->mrt = mrt; 2333 it->cache = NULL; 2334 it->ct = 0; 2335 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) 2336 : SEQ_START_TOKEN; 2337 } 2338 2339 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2340 { 2341 struct mfc_cache *mfc = v; 2342 struct ipmr_mfc_iter *it = seq->private; 2343 struct net *net = seq_file_net(seq); 2344 struct mr_table *mrt = it->mrt; 2345 2346 ++*pos; 2347 2348 if (v == SEQ_START_TOKEN) 2349 return ipmr_mfc_seq_idx(net, seq->private, 0); 2350 2351 if (mfc->list.next != it->cache) 2352 return list_entry(mfc->list.next, struct mfc_cache, list); 2353 2354 if (it->cache == &mrt->mfc_unres_queue) 2355 goto end_of_list; 2356 2357 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); 2358 2359 while (++it->ct < MFC_LINES) { 2360 it->cache = &mrt->mfc_cache_array[it->ct]; 2361 if (list_empty(it->cache)) 2362 continue; 2363 return list_first_entry(it->cache, struct mfc_cache, list); 2364 } 2365 2366 /* exhausted cache_array, show unresolved */ 2367 rcu_read_unlock(); 2368 it->cache = &mrt->mfc_unres_queue; 2369 it->ct = 0; 2370 2371 spin_lock_bh(&mfc_unres_lock); 2372 if (!list_empty(it->cache)) 2373 return list_first_entry(it->cache, struct mfc_cache, list); 2374 2375 end_of_list: 2376 spin_unlock_bh(&mfc_unres_lock); 2377 it->cache = NULL; 2378 2379 return NULL; 2380 } 2381 2382 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) 2383 { 2384 struct ipmr_mfc_iter *it = seq->private; 2385 struct mr_table *mrt = it->mrt; 2386 2387 if (it->cache == &mrt->mfc_unres_queue) 2388 spin_unlock_bh(&mfc_unres_lock); 2389 else if (it->cache == &mrt->mfc_cache_array[it->ct]) 2390 rcu_read_unlock(); 2391 } 2392 2393 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 2394 { 2395 int n; 2396 2397 if (v == SEQ_START_TOKEN) { 2398 seq_puts(seq, 2399 "Group Origin Iif Pkts Bytes Wrong Oifs\n"); 2400 } else { 2401 const struct mfc_cache *mfc = v; 2402 const struct ipmr_mfc_iter *it = seq->private; 2403 const struct mr_table *mrt = it->mrt; 2404 2405 seq_printf(seq, "%08X %08X %-3hd", 2406 (__force u32) mfc->mfc_mcastgrp, 2407 (__force u32) mfc->mfc_origin, 2408 mfc->mfc_parent); 2409 2410 if (it->cache != &mrt->mfc_unres_queue) { 2411 seq_printf(seq, " %8lu %8lu %8lu", 2412 mfc->mfc_un.res.pkt, 2413 mfc->mfc_un.res.bytes, 2414 mfc->mfc_un.res.wrong_if); 2415 for (n = mfc->mfc_un.res.minvif; 2416 n < mfc->mfc_un.res.maxvif; n++) { 2417 if (VIF_EXISTS(mrt, n) && 2418 mfc->mfc_un.res.ttls[n] < 255) 2419 seq_printf(seq, 2420 " %2d:%-3d", 2421 n, mfc->mfc_un.res.ttls[n]); 2422 } 2423 } else { 2424 /* unresolved mfc_caches don't contain 2425 * pkt, bytes and wrong_if values 2426 */ 2427 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); 2428 } 2429 seq_putc(seq, '\n'); 2430 } 2431 return 0; 2432 } 2433 2434 static const struct seq_operations ipmr_mfc_seq_ops = { 2435 .start = ipmr_mfc_seq_start, 2436 .next = ipmr_mfc_seq_next, 2437 .stop = ipmr_mfc_seq_stop, 2438 .show = ipmr_mfc_seq_show, 2439 }; 2440 2441 static int ipmr_mfc_open(struct inode *inode, struct file *file) 2442 { 2443 return seq_open_net(inode, file, &ipmr_mfc_seq_ops, 2444 sizeof(struct ipmr_mfc_iter)); 2445 } 2446 2447 static const struct file_operations ipmr_mfc_fops = { 2448 .owner = THIS_MODULE, 2449 .open = ipmr_mfc_open, 2450 .read = seq_read, 2451 .llseek = seq_lseek, 2452 .release = seq_release_net, 2453 }; 2454 #endif 2455 2456 #ifdef CONFIG_IP_PIMSM_V2 2457 static const struct net_protocol pim_protocol = { 2458 .handler = pim_rcv, 2459 .netns_ok = 1, 2460 }; 2461 #endif 2462 2463 2464 /* 2465 * Setup for IP multicast routing 2466 */ 2467 static int __net_init ipmr_net_init(struct net *net) 2468 { 2469 int err; 2470 2471 err = ipmr_rules_init(net); 2472 if (err < 0) 2473 goto fail; 2474 2475 #ifdef CONFIG_PROC_FS 2476 err = -ENOMEM; 2477 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) 2478 goto proc_vif_fail; 2479 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) 2480 goto proc_cache_fail; 2481 #endif 2482 return 0; 2483 2484 #ifdef CONFIG_PROC_FS 2485 proc_cache_fail: 2486 proc_net_remove(net, "ip_mr_vif"); 2487 proc_vif_fail: 2488 ipmr_rules_exit(net); 2489 #endif 2490 fail: 2491 return err; 2492 } 2493 2494 static void __net_exit ipmr_net_exit(struct net *net) 2495 { 2496 #ifdef CONFIG_PROC_FS 2497 proc_net_remove(net, "ip_mr_cache"); 2498 proc_net_remove(net, "ip_mr_vif"); 2499 #endif 2500 ipmr_rules_exit(net); 2501 } 2502 2503 static struct pernet_operations ipmr_net_ops = { 2504 .init = ipmr_net_init, 2505 .exit = ipmr_net_exit, 2506 }; 2507 2508 int __init ip_mr_init(void) 2509 { 2510 int err; 2511 2512 mrt_cachep = kmem_cache_create("ip_mrt_cache", 2513 sizeof(struct mfc_cache), 2514 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 2515 NULL); 2516 if (!mrt_cachep) 2517 return -ENOMEM; 2518 2519 err = register_pernet_subsys(&ipmr_net_ops); 2520 if (err) 2521 goto reg_pernet_fail; 2522 2523 err = register_netdevice_notifier(&ip_mr_notifier); 2524 if (err) 2525 goto reg_notif_fail; 2526 #ifdef CONFIG_IP_PIMSM_V2 2527 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { 2528 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n"); 2529 err = -EAGAIN; 2530 goto add_proto_fail; 2531 } 2532 #endif 2533 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute); 2534 return 0; 2535 2536 #ifdef CONFIG_IP_PIMSM_V2 2537 add_proto_fail: 2538 unregister_netdevice_notifier(&ip_mr_notifier); 2539 #endif 2540 reg_notif_fail: 2541 unregister_pernet_subsys(&ipmr_net_ops); 2542 reg_pernet_fail: 2543 kmem_cache_destroy(mrt_cachep); 2544 return err; 2545 } 2546