1 /* 2 * net/core/fib_rules.c Generic Routing Rules 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation, version 2. 7 * 8 * Authors: Thomas Graf <tgraf@suug.ch> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 #include <linux/module.h> 16 #include <net/net_namespace.h> 17 #include <net/sock.h> 18 #include <net/fib_rules.h> 19 #include <net/ip_tunnels.h> 20 21 int fib_default_rule_add(struct fib_rules_ops *ops, 22 u32 pref, u32 table, u32 flags) 23 { 24 struct fib_rule *r; 25 26 r = kzalloc(ops->rule_size, GFP_KERNEL); 27 if (r == NULL) 28 return -ENOMEM; 29 30 atomic_set(&r->refcnt, 1); 31 r->action = FR_ACT_TO_TBL; 32 r->pref = pref; 33 r->table = table; 34 r->flags = flags; 35 r->fr_net = ops->fro_net; 36 37 r->suppress_prefixlen = -1; 38 r->suppress_ifgroup = -1; 39 40 /* The lock is not required here, the list in unreacheable 41 * at the moment this function is called */ 42 list_add_tail(&r->list, &ops->rules_list); 43 return 0; 44 } 45 EXPORT_SYMBOL(fib_default_rule_add); 46 47 static u32 fib_default_rule_pref(struct fib_rules_ops *ops) 48 { 49 struct list_head *pos; 50 struct fib_rule *rule; 51 52 if (!list_empty(&ops->rules_list)) { 53 pos = ops->rules_list.next; 54 if (pos->next != &ops->rules_list) { 55 rule = list_entry(pos->next, struct fib_rule, list); 56 if (rule->pref) 57 return rule->pref - 1; 58 } 59 } 60 61 return 0; 62 } 63 64 static void notify_rule_change(int event, struct fib_rule *rule, 65 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 66 u32 pid); 67 68 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) 69 { 70 struct fib_rules_ops *ops; 71 72 rcu_read_lock(); 73 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 74 if (ops->family == family) { 75 if (!try_module_get(ops->owner)) 76 ops = NULL; 77 rcu_read_unlock(); 78 return ops; 79 } 80 } 81 rcu_read_unlock(); 82 83 return NULL; 84 } 85 86 static void rules_ops_put(struct fib_rules_ops *ops) 87 { 88 if (ops) 89 module_put(ops->owner); 90 } 91 92 static void flush_route_cache(struct fib_rules_ops *ops) 93 { 94 if (ops->flush_cache) 95 ops->flush_cache(ops); 96 } 97 98 static int __fib_rules_register(struct fib_rules_ops *ops) 99 { 100 int err = -EEXIST; 101 struct fib_rules_ops *o; 102 struct net *net; 103 104 net = ops->fro_net; 105 106 if (ops->rule_size < sizeof(struct fib_rule)) 107 return -EINVAL; 108 109 if (ops->match == NULL || ops->configure == NULL || 110 ops->compare == NULL || ops->fill == NULL || 111 ops->action == NULL) 112 return -EINVAL; 113 114 spin_lock(&net->rules_mod_lock); 115 list_for_each_entry(o, &net->rules_ops, list) 116 if (ops->family == o->family) 117 goto errout; 118 119 list_add_tail_rcu(&ops->list, &net->rules_ops); 120 err = 0; 121 errout: 122 spin_unlock(&net->rules_mod_lock); 123 124 return err; 125 } 126 127 struct fib_rules_ops * 128 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 129 { 130 struct fib_rules_ops *ops; 131 int err; 132 133 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 134 if (ops == NULL) 135 return ERR_PTR(-ENOMEM); 136 137 INIT_LIST_HEAD(&ops->rules_list); 138 ops->fro_net = net; 139 140 err = __fib_rules_register(ops); 141 if (err) { 142 kfree(ops); 143 ops = ERR_PTR(err); 144 } 145 146 return ops; 147 } 148 EXPORT_SYMBOL_GPL(fib_rules_register); 149 150 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 151 { 152 struct fib_rule *rule, *tmp; 153 154 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 155 list_del_rcu(&rule->list); 156 if (ops->delete) 157 ops->delete(rule); 158 fib_rule_put(rule); 159 } 160 } 161 162 void fib_rules_unregister(struct fib_rules_ops *ops) 163 { 164 struct net *net = ops->fro_net; 165 166 spin_lock(&net->rules_mod_lock); 167 list_del_rcu(&ops->list); 168 spin_unlock(&net->rules_mod_lock); 169 170 fib_rules_cleanup_ops(ops); 171 kfree_rcu(ops, rcu); 172 } 173 EXPORT_SYMBOL_GPL(fib_rules_unregister); 174 175 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 176 struct flowi *fl, int flags, 177 struct fib_lookup_arg *arg) 178 { 179 int ret = 0; 180 181 if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) 182 goto out; 183 184 if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) 185 goto out; 186 187 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) 188 goto out; 189 190 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) 191 goto out; 192 193 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg)) 194 goto out; 195 196 ret = ops->match(rule, fl, flags); 197 out: 198 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 199 } 200 201 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 202 int flags, struct fib_lookup_arg *arg) 203 { 204 struct fib_rule *rule; 205 int err; 206 207 rcu_read_lock(); 208 209 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 210 jumped: 211 if (!fib_rule_match(rule, ops, fl, flags, arg)) 212 continue; 213 214 if (rule->action == FR_ACT_GOTO) { 215 struct fib_rule *target; 216 217 target = rcu_dereference(rule->ctarget); 218 if (target == NULL) { 219 continue; 220 } else { 221 rule = target; 222 goto jumped; 223 } 224 } else if (rule->action == FR_ACT_NOP) 225 continue; 226 else 227 err = ops->action(rule, fl, flags, arg); 228 229 if (!err && ops->suppress && ops->suppress(rule, arg)) 230 continue; 231 232 if (err != -EAGAIN) { 233 if ((arg->flags & FIB_LOOKUP_NOREF) || 234 likely(atomic_inc_not_zero(&rule->refcnt))) { 235 arg->rule = rule; 236 goto out; 237 } 238 break; 239 } 240 } 241 242 err = -ESRCH; 243 out: 244 rcu_read_unlock(); 245 246 return err; 247 } 248 EXPORT_SYMBOL_GPL(fib_rules_lookup); 249 250 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, 251 struct fib_rules_ops *ops) 252 { 253 int err = -EINVAL; 254 255 if (frh->src_len) 256 if (tb[FRA_SRC] == NULL || 257 frh->src_len > (ops->addr_size * 8) || 258 nla_len(tb[FRA_SRC]) != ops->addr_size) 259 goto errout; 260 261 if (frh->dst_len) 262 if (tb[FRA_DST] == NULL || 263 frh->dst_len > (ops->addr_size * 8) || 264 nla_len(tb[FRA_DST]) != ops->addr_size) 265 goto errout; 266 267 err = 0; 268 errout: 269 return err; 270 } 271 272 static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, 273 struct nlattr **tb, struct fib_rule *rule) 274 { 275 struct fib_rule *r; 276 277 list_for_each_entry(r, &ops->rules_list, list) { 278 if (r->action != rule->action) 279 continue; 280 281 if (r->table != rule->table) 282 continue; 283 284 if (r->pref != rule->pref) 285 continue; 286 287 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) 288 continue; 289 290 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) 291 continue; 292 293 if (r->mark != rule->mark) 294 continue; 295 296 if (r->mark_mask != rule->mark_mask) 297 continue; 298 299 if (r->tun_id != rule->tun_id) 300 continue; 301 302 if (r->fr_net != rule->fr_net) 303 continue; 304 305 if (r->l3mdev != rule->l3mdev) 306 continue; 307 308 if (!ops->compare(r, frh, tb)) 309 continue; 310 return 1; 311 } 312 return 0; 313 } 314 315 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) 316 { 317 struct net *net = sock_net(skb->sk); 318 struct fib_rule_hdr *frh = nlmsg_data(nlh); 319 struct fib_rules_ops *ops = NULL; 320 struct fib_rule *rule, *r, *last = NULL; 321 struct nlattr *tb[FRA_MAX+1]; 322 int err = -EINVAL, unresolved = 0; 323 324 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 325 goto errout; 326 327 ops = lookup_rules_ops(net, frh->family); 328 if (ops == NULL) { 329 err = -EAFNOSUPPORT; 330 goto errout; 331 } 332 333 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 334 if (err < 0) 335 goto errout; 336 337 err = validate_rulemsg(frh, tb, ops); 338 if (err < 0) 339 goto errout; 340 341 rule = kzalloc(ops->rule_size, GFP_KERNEL); 342 if (rule == NULL) { 343 err = -ENOMEM; 344 goto errout; 345 } 346 rule->fr_net = net; 347 348 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY]) 349 : fib_default_rule_pref(ops); 350 351 if (tb[FRA_IIFNAME]) { 352 struct net_device *dev; 353 354 rule->iifindex = -1; 355 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 356 dev = __dev_get_by_name(net, rule->iifname); 357 if (dev) 358 rule->iifindex = dev->ifindex; 359 } 360 361 if (tb[FRA_OIFNAME]) { 362 struct net_device *dev; 363 364 rule->oifindex = -1; 365 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 366 dev = __dev_get_by_name(net, rule->oifname); 367 if (dev) 368 rule->oifindex = dev->ifindex; 369 } 370 371 if (tb[FRA_FWMARK]) { 372 rule->mark = nla_get_u32(tb[FRA_FWMARK]); 373 if (rule->mark) 374 /* compatibility: if the mark value is non-zero all bits 375 * are compared unless a mask is explicitly specified. 376 */ 377 rule->mark_mask = 0xFFFFFFFF; 378 } 379 380 if (tb[FRA_FWMASK]) 381 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 382 383 if (tb[FRA_TUN_ID]) 384 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); 385 386 if (tb[FRA_L3MDEV]) { 387 #ifdef CONFIG_NET_L3_MASTER_DEV 388 rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]); 389 if (rule->l3mdev != 1) 390 #endif 391 goto errout_free; 392 } 393 394 rule->action = frh->action; 395 rule->flags = frh->flags; 396 rule->table = frh_get_table(frh, tb); 397 if (tb[FRA_SUPPRESS_PREFIXLEN]) 398 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); 399 else 400 rule->suppress_prefixlen = -1; 401 402 if (tb[FRA_SUPPRESS_IFGROUP]) 403 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); 404 else 405 rule->suppress_ifgroup = -1; 406 407 err = -EINVAL; 408 if (tb[FRA_GOTO]) { 409 if (rule->action != FR_ACT_GOTO) 410 goto errout_free; 411 412 rule->target = nla_get_u32(tb[FRA_GOTO]); 413 /* Backward jumps are prohibited to avoid endless loops */ 414 if (rule->target <= rule->pref) 415 goto errout_free; 416 417 list_for_each_entry(r, &ops->rules_list, list) { 418 if (r->pref == rule->target) { 419 RCU_INIT_POINTER(rule->ctarget, r); 420 break; 421 } 422 } 423 424 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 425 unresolved = 1; 426 } else if (rule->action == FR_ACT_GOTO) 427 goto errout_free; 428 429 if (rule->l3mdev && rule->table) 430 goto errout_free; 431 432 if ((nlh->nlmsg_flags & NLM_F_EXCL) && 433 rule_exists(ops, frh, tb, rule)) { 434 err = -EEXIST; 435 goto errout_free; 436 } 437 438 err = ops->configure(rule, skb, frh, tb); 439 if (err < 0) 440 goto errout_free; 441 442 list_for_each_entry(r, &ops->rules_list, list) { 443 if (r->pref > rule->pref) 444 break; 445 last = r; 446 } 447 448 fib_rule_get(rule); 449 450 if (last) 451 list_add_rcu(&rule->list, &last->list); 452 else 453 list_add_rcu(&rule->list, &ops->rules_list); 454 455 if (ops->unresolved_rules) { 456 /* 457 * There are unresolved goto rules in the list, check if 458 * any of them are pointing to this new rule. 459 */ 460 list_for_each_entry(r, &ops->rules_list, list) { 461 if (r->action == FR_ACT_GOTO && 462 r->target == rule->pref && 463 rtnl_dereference(r->ctarget) == NULL) { 464 rcu_assign_pointer(r->ctarget, rule); 465 if (--ops->unresolved_rules == 0) 466 break; 467 } 468 } 469 } 470 471 if (rule->action == FR_ACT_GOTO) 472 ops->nr_goto_rules++; 473 474 if (unresolved) 475 ops->unresolved_rules++; 476 477 if (rule->tun_id) 478 ip_tunnel_need_metadata(); 479 480 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 481 flush_route_cache(ops); 482 rules_ops_put(ops); 483 return 0; 484 485 errout_free: 486 kfree(rule); 487 errout: 488 rules_ops_put(ops); 489 return err; 490 } 491 EXPORT_SYMBOL_GPL(fib_nl_newrule); 492 493 int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh) 494 { 495 struct net *net = sock_net(skb->sk); 496 struct fib_rule_hdr *frh = nlmsg_data(nlh); 497 struct fib_rules_ops *ops = NULL; 498 struct fib_rule *rule, *tmp; 499 struct nlattr *tb[FRA_MAX+1]; 500 int err = -EINVAL; 501 502 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 503 goto errout; 504 505 ops = lookup_rules_ops(net, frh->family); 506 if (ops == NULL) { 507 err = -EAFNOSUPPORT; 508 goto errout; 509 } 510 511 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 512 if (err < 0) 513 goto errout; 514 515 err = validate_rulemsg(frh, tb, ops); 516 if (err < 0) 517 goto errout; 518 519 list_for_each_entry(rule, &ops->rules_list, list) { 520 if (frh->action && (frh->action != rule->action)) 521 continue; 522 523 if (frh_get_table(frh, tb) && 524 (frh_get_table(frh, tb) != rule->table)) 525 continue; 526 527 if (tb[FRA_PRIORITY] && 528 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) 529 continue; 530 531 if (tb[FRA_IIFNAME] && 532 nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) 533 continue; 534 535 if (tb[FRA_OIFNAME] && 536 nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) 537 continue; 538 539 if (tb[FRA_FWMARK] && 540 (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) 541 continue; 542 543 if (tb[FRA_FWMASK] && 544 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) 545 continue; 546 547 if (tb[FRA_TUN_ID] && 548 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID]))) 549 continue; 550 551 if (tb[FRA_L3MDEV] && 552 (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV]))) 553 continue; 554 555 if (!ops->compare(rule, frh, tb)) 556 continue; 557 558 if (rule->flags & FIB_RULE_PERMANENT) { 559 err = -EPERM; 560 goto errout; 561 } 562 563 if (ops->delete) { 564 err = ops->delete(rule); 565 if (err) 566 goto errout; 567 } 568 569 if (rule->tun_id) 570 ip_tunnel_unneed_metadata(); 571 572 list_del_rcu(&rule->list); 573 574 if (rule->action == FR_ACT_GOTO) { 575 ops->nr_goto_rules--; 576 if (rtnl_dereference(rule->ctarget) == NULL) 577 ops->unresolved_rules--; 578 } 579 580 /* 581 * Check if this rule is a target to any of them. If so, 582 * disable them. As this operation is eventually very 583 * expensive, it is only performed if goto rules have 584 * actually been added. 585 */ 586 if (ops->nr_goto_rules > 0) { 587 list_for_each_entry(tmp, &ops->rules_list, list) { 588 if (rtnl_dereference(tmp->ctarget) == rule) { 589 RCU_INIT_POINTER(tmp->ctarget, NULL); 590 ops->unresolved_rules++; 591 } 592 } 593 } 594 595 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 596 NETLINK_CB(skb).portid); 597 fib_rule_put(rule); 598 flush_route_cache(ops); 599 rules_ops_put(ops); 600 return 0; 601 } 602 603 err = -ENOENT; 604 errout: 605 rules_ops_put(ops); 606 return err; 607 } 608 EXPORT_SYMBOL_GPL(fib_nl_delrule); 609 610 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 611 struct fib_rule *rule) 612 { 613 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 614 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 615 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 616 + nla_total_size(4) /* FRA_PRIORITY */ 617 + nla_total_size(4) /* FRA_TABLE */ 618 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ 619 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 620 + nla_total_size(4) /* FRA_FWMARK */ 621 + nla_total_size(4) /* FRA_FWMASK */ 622 + nla_total_size_64bit(8); /* FRA_TUN_ID */ 623 624 if (ops->nlmsg_payload) 625 payload += ops->nlmsg_payload(rule); 626 627 return payload; 628 } 629 630 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 631 u32 pid, u32 seq, int type, int flags, 632 struct fib_rules_ops *ops) 633 { 634 struct nlmsghdr *nlh; 635 struct fib_rule_hdr *frh; 636 637 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 638 if (nlh == NULL) 639 return -EMSGSIZE; 640 641 frh = nlmsg_data(nlh); 642 frh->family = ops->family; 643 frh->table = rule->table; 644 if (nla_put_u32(skb, FRA_TABLE, rule->table)) 645 goto nla_put_failure; 646 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) 647 goto nla_put_failure; 648 frh->res1 = 0; 649 frh->res2 = 0; 650 frh->action = rule->action; 651 frh->flags = rule->flags; 652 653 if (rule->action == FR_ACT_GOTO && 654 rcu_access_pointer(rule->ctarget) == NULL) 655 frh->flags |= FIB_RULE_UNRESOLVED; 656 657 if (rule->iifname[0]) { 658 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) 659 goto nla_put_failure; 660 if (rule->iifindex == -1) 661 frh->flags |= FIB_RULE_IIF_DETACHED; 662 } 663 664 if (rule->oifname[0]) { 665 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) 666 goto nla_put_failure; 667 if (rule->oifindex == -1) 668 frh->flags |= FIB_RULE_OIF_DETACHED; 669 } 670 671 if ((rule->pref && 672 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || 673 (rule->mark && 674 nla_put_u32(skb, FRA_FWMARK, rule->mark)) || 675 ((rule->mark_mask || rule->mark) && 676 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || 677 (rule->target && 678 nla_put_u32(skb, FRA_GOTO, rule->target)) || 679 (rule->tun_id && 680 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) || 681 (rule->l3mdev && 682 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev))) 683 goto nla_put_failure; 684 685 if (rule->suppress_ifgroup != -1) { 686 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) 687 goto nla_put_failure; 688 } 689 690 if (ops->fill(rule, skb, frh) < 0) 691 goto nla_put_failure; 692 693 nlmsg_end(skb, nlh); 694 return 0; 695 696 nla_put_failure: 697 nlmsg_cancel(skb, nlh); 698 return -EMSGSIZE; 699 } 700 701 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 702 struct fib_rules_ops *ops) 703 { 704 int idx = 0; 705 struct fib_rule *rule; 706 int err = 0; 707 708 rcu_read_lock(); 709 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 710 if (idx < cb->args[1]) 711 goto skip; 712 713 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 714 cb->nlh->nlmsg_seq, RTM_NEWRULE, 715 NLM_F_MULTI, ops); 716 if (err) 717 break; 718 skip: 719 idx++; 720 } 721 rcu_read_unlock(); 722 cb->args[1] = idx; 723 rules_ops_put(ops); 724 725 return err; 726 } 727 728 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 729 { 730 struct net *net = sock_net(skb->sk); 731 struct fib_rules_ops *ops; 732 int idx = 0, family; 733 734 family = rtnl_msg_family(cb->nlh); 735 if (family != AF_UNSPEC) { 736 /* Protocol specific dump request */ 737 ops = lookup_rules_ops(net, family); 738 if (ops == NULL) 739 return -EAFNOSUPPORT; 740 741 dump_rules(skb, cb, ops); 742 743 return skb->len; 744 } 745 746 rcu_read_lock(); 747 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 748 if (idx < cb->args[0] || !try_module_get(ops->owner)) 749 goto skip; 750 751 if (dump_rules(skb, cb, ops) < 0) 752 break; 753 754 cb->args[1] = 0; 755 skip: 756 idx++; 757 } 758 rcu_read_unlock(); 759 cb->args[0] = idx; 760 761 return skb->len; 762 } 763 764 static void notify_rule_change(int event, struct fib_rule *rule, 765 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 766 u32 pid) 767 { 768 struct net *net; 769 struct sk_buff *skb; 770 int err = -ENOBUFS; 771 772 net = ops->fro_net; 773 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 774 if (skb == NULL) 775 goto errout; 776 777 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 778 if (err < 0) { 779 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 780 WARN_ON(err == -EMSGSIZE); 781 kfree_skb(skb); 782 goto errout; 783 } 784 785 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 786 return; 787 errout: 788 if (err < 0) 789 rtnl_set_sk_err(net, ops->nlgroup, err); 790 } 791 792 static void attach_rules(struct list_head *rules, struct net_device *dev) 793 { 794 struct fib_rule *rule; 795 796 list_for_each_entry(rule, rules, list) { 797 if (rule->iifindex == -1 && 798 strcmp(dev->name, rule->iifname) == 0) 799 rule->iifindex = dev->ifindex; 800 if (rule->oifindex == -1 && 801 strcmp(dev->name, rule->oifname) == 0) 802 rule->oifindex = dev->ifindex; 803 } 804 } 805 806 static void detach_rules(struct list_head *rules, struct net_device *dev) 807 { 808 struct fib_rule *rule; 809 810 list_for_each_entry(rule, rules, list) { 811 if (rule->iifindex == dev->ifindex) 812 rule->iifindex = -1; 813 if (rule->oifindex == dev->ifindex) 814 rule->oifindex = -1; 815 } 816 } 817 818 819 static int fib_rules_event(struct notifier_block *this, unsigned long event, 820 void *ptr) 821 { 822 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 823 struct net *net = dev_net(dev); 824 struct fib_rules_ops *ops; 825 826 ASSERT_RTNL(); 827 828 switch (event) { 829 case NETDEV_REGISTER: 830 list_for_each_entry(ops, &net->rules_ops, list) 831 attach_rules(&ops->rules_list, dev); 832 break; 833 834 case NETDEV_CHANGENAME: 835 list_for_each_entry(ops, &net->rules_ops, list) { 836 detach_rules(&ops->rules_list, dev); 837 attach_rules(&ops->rules_list, dev); 838 } 839 break; 840 841 case NETDEV_UNREGISTER: 842 list_for_each_entry(ops, &net->rules_ops, list) 843 detach_rules(&ops->rules_list, dev); 844 break; 845 } 846 847 return NOTIFY_DONE; 848 } 849 850 static struct notifier_block fib_rules_notifier = { 851 .notifier_call = fib_rules_event, 852 }; 853 854 static int __net_init fib_rules_net_init(struct net *net) 855 { 856 INIT_LIST_HEAD(&net->rules_ops); 857 spin_lock_init(&net->rules_mod_lock); 858 return 0; 859 } 860 861 static struct pernet_operations fib_rules_net_ops = { 862 .init = fib_rules_net_init, 863 }; 864 865 static int __init fib_rules_init(void) 866 { 867 int err; 868 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); 869 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); 870 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); 871 872 err = register_pernet_subsys(&fib_rules_net_ops); 873 if (err < 0) 874 goto fail; 875 876 err = register_netdevice_notifier(&fib_rules_notifier); 877 if (err < 0) 878 goto fail_unregister; 879 880 return 0; 881 882 fail_unregister: 883 unregister_pernet_subsys(&fib_rules_net_ops); 884 fail: 885 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); 886 rtnl_unregister(PF_UNSPEC, RTM_DELRULE); 887 rtnl_unregister(PF_UNSPEC, RTM_GETRULE); 888 return err; 889 } 890 891 subsys_initcall(fib_rules_init); 892