1 /* 2 * net/core/fib_rules.c Generic Routing Rules 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation, version 2. 7 * 8 * Authors: Thomas Graf <tgraf@suug.ch> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 #include <linux/module.h> 16 #include <net/net_namespace.h> 17 #include <net/sock.h> 18 #include <net/fib_rules.h> 19 20 int fib_default_rule_add(struct fib_rules_ops *ops, 21 u32 pref, u32 table, u32 flags) 22 { 23 struct fib_rule *r; 24 25 r = kzalloc(ops->rule_size, GFP_KERNEL); 26 if (r == NULL) 27 return -ENOMEM; 28 29 atomic_set(&r->refcnt, 1); 30 r->action = FR_ACT_TO_TBL; 31 r->pref = pref; 32 r->table = table; 33 r->flags = flags; 34 r->fr_net = ops->fro_net; 35 36 r->suppress_prefixlen = -1; 37 r->suppress_ifgroup = -1; 38 39 /* The lock is not required here, the list in unreacheable 40 * at the moment this function is called */ 41 list_add_tail(&r->list, &ops->rules_list); 42 return 0; 43 } 44 EXPORT_SYMBOL(fib_default_rule_add); 45 46 u32 fib_default_rule_pref(struct fib_rules_ops *ops) 47 { 48 struct list_head *pos; 49 struct fib_rule *rule; 50 51 if (!list_empty(&ops->rules_list)) { 52 pos = ops->rules_list.next; 53 if (pos->next != &ops->rules_list) { 54 rule = list_entry(pos->next, struct fib_rule, list); 55 if (rule->pref) 56 return rule->pref - 1; 57 } 58 } 59 60 return 0; 61 } 62 EXPORT_SYMBOL(fib_default_rule_pref); 63 64 static void notify_rule_change(int event, struct fib_rule *rule, 65 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 66 u32 pid); 67 68 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) 69 { 70 struct fib_rules_ops *ops; 71 72 rcu_read_lock(); 73 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 74 if (ops->family == family) { 75 if (!try_module_get(ops->owner)) 76 ops = NULL; 77 rcu_read_unlock(); 78 return ops; 79 } 80 } 81 rcu_read_unlock(); 82 83 return NULL; 84 } 85 86 static void rules_ops_put(struct fib_rules_ops *ops) 87 { 88 if (ops) 89 module_put(ops->owner); 90 } 91 92 static void flush_route_cache(struct fib_rules_ops *ops) 93 { 94 if (ops->flush_cache) 95 ops->flush_cache(ops); 96 } 97 98 static int __fib_rules_register(struct fib_rules_ops *ops) 99 { 100 int err = -EEXIST; 101 struct fib_rules_ops *o; 102 struct net *net; 103 104 net = ops->fro_net; 105 106 if (ops->rule_size < sizeof(struct fib_rule)) 107 return -EINVAL; 108 109 if (ops->match == NULL || ops->configure == NULL || 110 ops->compare == NULL || ops->fill == NULL || 111 ops->action == NULL) 112 return -EINVAL; 113 114 spin_lock(&net->rules_mod_lock); 115 list_for_each_entry(o, &net->rules_ops, list) 116 if (ops->family == o->family) 117 goto errout; 118 119 list_add_tail_rcu(&ops->list, &net->rules_ops); 120 err = 0; 121 errout: 122 spin_unlock(&net->rules_mod_lock); 123 124 return err; 125 } 126 127 struct fib_rules_ops * 128 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 129 { 130 struct fib_rules_ops *ops; 131 int err; 132 133 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 134 if (ops == NULL) 135 return ERR_PTR(-ENOMEM); 136 137 INIT_LIST_HEAD(&ops->rules_list); 138 ops->fro_net = net; 139 140 err = __fib_rules_register(ops); 141 if (err) { 142 kfree(ops); 143 ops = ERR_PTR(err); 144 } 145 146 return ops; 147 } 148 EXPORT_SYMBOL_GPL(fib_rules_register); 149 150 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 151 { 152 struct fib_rule *rule, *tmp; 153 154 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 155 list_del_rcu(&rule->list); 156 if (ops->delete) 157 ops->delete(rule); 158 fib_rule_put(rule); 159 } 160 } 161 162 void fib_rules_unregister(struct fib_rules_ops *ops) 163 { 164 struct net *net = ops->fro_net; 165 166 spin_lock(&net->rules_mod_lock); 167 list_del_rcu(&ops->list); 168 spin_unlock(&net->rules_mod_lock); 169 170 fib_rules_cleanup_ops(ops); 171 kfree_rcu(ops, rcu); 172 } 173 EXPORT_SYMBOL_GPL(fib_rules_unregister); 174 175 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 176 struct flowi *fl, int flags) 177 { 178 int ret = 0; 179 180 if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) 181 goto out; 182 183 if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) 184 goto out; 185 186 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) 187 goto out; 188 189 ret = ops->match(rule, fl, flags); 190 out: 191 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 192 } 193 194 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 195 int flags, struct fib_lookup_arg *arg) 196 { 197 struct fib_rule *rule; 198 int err; 199 200 rcu_read_lock(); 201 202 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 203 jumped: 204 if (!fib_rule_match(rule, ops, fl, flags)) 205 continue; 206 207 if (rule->action == FR_ACT_GOTO) { 208 struct fib_rule *target; 209 210 target = rcu_dereference(rule->ctarget); 211 if (target == NULL) { 212 continue; 213 } else { 214 rule = target; 215 goto jumped; 216 } 217 } else if (rule->action == FR_ACT_NOP) 218 continue; 219 else 220 err = ops->action(rule, fl, flags, arg); 221 222 if (!err && ops->suppress && ops->suppress(rule, arg)) 223 continue; 224 225 if (err != -EAGAIN) { 226 if ((arg->flags & FIB_LOOKUP_NOREF) || 227 likely(atomic_inc_not_zero(&rule->refcnt))) { 228 arg->rule = rule; 229 goto out; 230 } 231 break; 232 } 233 } 234 235 err = -ESRCH; 236 out: 237 rcu_read_unlock(); 238 239 return err; 240 } 241 EXPORT_SYMBOL_GPL(fib_rules_lookup); 242 243 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, 244 struct fib_rules_ops *ops) 245 { 246 int err = -EINVAL; 247 248 if (frh->src_len) 249 if (tb[FRA_SRC] == NULL || 250 frh->src_len > (ops->addr_size * 8) || 251 nla_len(tb[FRA_SRC]) != ops->addr_size) 252 goto errout; 253 254 if (frh->dst_len) 255 if (tb[FRA_DST] == NULL || 256 frh->dst_len > (ops->addr_size * 8) || 257 nla_len(tb[FRA_DST]) != ops->addr_size) 258 goto errout; 259 260 err = 0; 261 errout: 262 return err; 263 } 264 265 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) 266 { 267 struct net *net = sock_net(skb->sk); 268 struct fib_rule_hdr *frh = nlmsg_data(nlh); 269 struct fib_rules_ops *ops = NULL; 270 struct fib_rule *rule, *r, *last = NULL; 271 struct nlattr *tb[FRA_MAX+1]; 272 int err = -EINVAL, unresolved = 0; 273 274 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 275 goto errout; 276 277 ops = lookup_rules_ops(net, frh->family); 278 if (ops == NULL) { 279 err = -EAFNOSUPPORT; 280 goto errout; 281 } 282 283 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 284 if (err < 0) 285 goto errout; 286 287 err = validate_rulemsg(frh, tb, ops); 288 if (err < 0) 289 goto errout; 290 291 rule = kzalloc(ops->rule_size, GFP_KERNEL); 292 if (rule == NULL) { 293 err = -ENOMEM; 294 goto errout; 295 } 296 rule->fr_net = net; 297 298 if (tb[FRA_PRIORITY]) 299 rule->pref = nla_get_u32(tb[FRA_PRIORITY]); 300 301 if (tb[FRA_IIFNAME]) { 302 struct net_device *dev; 303 304 rule->iifindex = -1; 305 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 306 dev = __dev_get_by_name(net, rule->iifname); 307 if (dev) 308 rule->iifindex = dev->ifindex; 309 } 310 311 if (tb[FRA_OIFNAME]) { 312 struct net_device *dev; 313 314 rule->oifindex = -1; 315 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 316 dev = __dev_get_by_name(net, rule->oifname); 317 if (dev) 318 rule->oifindex = dev->ifindex; 319 } 320 321 if (tb[FRA_FWMARK]) { 322 rule->mark = nla_get_u32(tb[FRA_FWMARK]); 323 if (rule->mark) 324 /* compatibility: if the mark value is non-zero all bits 325 * are compared unless a mask is explicitly specified. 326 */ 327 rule->mark_mask = 0xFFFFFFFF; 328 } 329 330 if (tb[FRA_FWMASK]) 331 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 332 333 rule->action = frh->action; 334 rule->flags = frh->flags; 335 rule->table = frh_get_table(frh, tb); 336 if (tb[FRA_SUPPRESS_PREFIXLEN]) 337 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); 338 else 339 rule->suppress_prefixlen = -1; 340 341 if (tb[FRA_SUPPRESS_IFGROUP]) 342 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); 343 else 344 rule->suppress_ifgroup = -1; 345 346 if (!tb[FRA_PRIORITY] && ops->default_pref) 347 rule->pref = ops->default_pref(ops); 348 349 err = -EINVAL; 350 if (tb[FRA_GOTO]) { 351 if (rule->action != FR_ACT_GOTO) 352 goto errout_free; 353 354 rule->target = nla_get_u32(tb[FRA_GOTO]); 355 /* Backward jumps are prohibited to avoid endless loops */ 356 if (rule->target <= rule->pref) 357 goto errout_free; 358 359 list_for_each_entry(r, &ops->rules_list, list) { 360 if (r->pref == rule->target) { 361 RCU_INIT_POINTER(rule->ctarget, r); 362 break; 363 } 364 } 365 366 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 367 unresolved = 1; 368 } else if (rule->action == FR_ACT_GOTO) 369 goto errout_free; 370 371 err = ops->configure(rule, skb, frh, tb); 372 if (err < 0) 373 goto errout_free; 374 375 list_for_each_entry(r, &ops->rules_list, list) { 376 if (r->pref > rule->pref) 377 break; 378 last = r; 379 } 380 381 fib_rule_get(rule); 382 383 if (last) 384 list_add_rcu(&rule->list, &last->list); 385 else 386 list_add_rcu(&rule->list, &ops->rules_list); 387 388 if (ops->unresolved_rules) { 389 /* 390 * There are unresolved goto rules in the list, check if 391 * any of them are pointing to this new rule. 392 */ 393 list_for_each_entry(r, &ops->rules_list, list) { 394 if (r->action == FR_ACT_GOTO && 395 r->target == rule->pref && 396 rtnl_dereference(r->ctarget) == NULL) { 397 rcu_assign_pointer(r->ctarget, rule); 398 if (--ops->unresolved_rules == 0) 399 break; 400 } 401 } 402 } 403 404 if (rule->action == FR_ACT_GOTO) 405 ops->nr_goto_rules++; 406 407 if (unresolved) 408 ops->unresolved_rules++; 409 410 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 411 flush_route_cache(ops); 412 rules_ops_put(ops); 413 return 0; 414 415 errout_free: 416 kfree(rule); 417 errout: 418 rules_ops_put(ops); 419 return err; 420 } 421 422 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh) 423 { 424 struct net *net = sock_net(skb->sk); 425 struct fib_rule_hdr *frh = nlmsg_data(nlh); 426 struct fib_rules_ops *ops = NULL; 427 struct fib_rule *rule, *tmp; 428 struct nlattr *tb[FRA_MAX+1]; 429 int err = -EINVAL; 430 431 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 432 goto errout; 433 434 ops = lookup_rules_ops(net, frh->family); 435 if (ops == NULL) { 436 err = -EAFNOSUPPORT; 437 goto errout; 438 } 439 440 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 441 if (err < 0) 442 goto errout; 443 444 err = validate_rulemsg(frh, tb, ops); 445 if (err < 0) 446 goto errout; 447 448 list_for_each_entry(rule, &ops->rules_list, list) { 449 if (frh->action && (frh->action != rule->action)) 450 continue; 451 452 if (frh_get_table(frh, tb) && 453 (frh_get_table(frh, tb) != rule->table)) 454 continue; 455 456 if (tb[FRA_PRIORITY] && 457 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) 458 continue; 459 460 if (tb[FRA_IIFNAME] && 461 nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) 462 continue; 463 464 if (tb[FRA_OIFNAME] && 465 nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) 466 continue; 467 468 if (tb[FRA_FWMARK] && 469 (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) 470 continue; 471 472 if (tb[FRA_FWMASK] && 473 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) 474 continue; 475 476 if (!ops->compare(rule, frh, tb)) 477 continue; 478 479 if (rule->flags & FIB_RULE_PERMANENT) { 480 err = -EPERM; 481 goto errout; 482 } 483 484 if (ops->delete) { 485 err = ops->delete(rule); 486 if (err) 487 goto errout; 488 } 489 490 list_del_rcu(&rule->list); 491 492 if (rule->action == FR_ACT_GOTO) { 493 ops->nr_goto_rules--; 494 if (rtnl_dereference(rule->ctarget) == NULL) 495 ops->unresolved_rules--; 496 } 497 498 /* 499 * Check if this rule is a target to any of them. If so, 500 * disable them. As this operation is eventually very 501 * expensive, it is only performed if goto rules have 502 * actually been added. 503 */ 504 if (ops->nr_goto_rules > 0) { 505 list_for_each_entry(tmp, &ops->rules_list, list) { 506 if (rtnl_dereference(tmp->ctarget) == rule) { 507 RCU_INIT_POINTER(tmp->ctarget, NULL); 508 ops->unresolved_rules++; 509 } 510 } 511 } 512 513 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 514 NETLINK_CB(skb).portid); 515 fib_rule_put(rule); 516 flush_route_cache(ops); 517 rules_ops_put(ops); 518 return 0; 519 } 520 521 err = -ENOENT; 522 errout: 523 rules_ops_put(ops); 524 return err; 525 } 526 527 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 528 struct fib_rule *rule) 529 { 530 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 531 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 532 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 533 + nla_total_size(4) /* FRA_PRIORITY */ 534 + nla_total_size(4) /* FRA_TABLE */ 535 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ 536 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 537 + nla_total_size(4) /* FRA_FWMARK */ 538 + nla_total_size(4); /* FRA_FWMASK */ 539 540 if (ops->nlmsg_payload) 541 payload += ops->nlmsg_payload(rule); 542 543 return payload; 544 } 545 546 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 547 u32 pid, u32 seq, int type, int flags, 548 struct fib_rules_ops *ops) 549 { 550 struct nlmsghdr *nlh; 551 struct fib_rule_hdr *frh; 552 553 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 554 if (nlh == NULL) 555 return -EMSGSIZE; 556 557 frh = nlmsg_data(nlh); 558 frh->family = ops->family; 559 frh->table = rule->table; 560 if (nla_put_u32(skb, FRA_TABLE, rule->table)) 561 goto nla_put_failure; 562 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) 563 goto nla_put_failure; 564 frh->res1 = 0; 565 frh->res2 = 0; 566 frh->action = rule->action; 567 frh->flags = rule->flags; 568 569 if (rule->action == FR_ACT_GOTO && 570 rcu_access_pointer(rule->ctarget) == NULL) 571 frh->flags |= FIB_RULE_UNRESOLVED; 572 573 if (rule->iifname[0]) { 574 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) 575 goto nla_put_failure; 576 if (rule->iifindex == -1) 577 frh->flags |= FIB_RULE_IIF_DETACHED; 578 } 579 580 if (rule->oifname[0]) { 581 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) 582 goto nla_put_failure; 583 if (rule->oifindex == -1) 584 frh->flags |= FIB_RULE_OIF_DETACHED; 585 } 586 587 if ((rule->pref && 588 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || 589 (rule->mark && 590 nla_put_u32(skb, FRA_FWMARK, rule->mark)) || 591 ((rule->mark_mask || rule->mark) && 592 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || 593 (rule->target && 594 nla_put_u32(skb, FRA_GOTO, rule->target))) 595 goto nla_put_failure; 596 597 if (rule->suppress_ifgroup != -1) { 598 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) 599 goto nla_put_failure; 600 } 601 602 if (ops->fill(rule, skb, frh) < 0) 603 goto nla_put_failure; 604 605 nlmsg_end(skb, nlh); 606 return 0; 607 608 nla_put_failure: 609 nlmsg_cancel(skb, nlh); 610 return -EMSGSIZE; 611 } 612 613 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 614 struct fib_rules_ops *ops) 615 { 616 int idx = 0; 617 struct fib_rule *rule; 618 619 rcu_read_lock(); 620 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 621 if (idx < cb->args[1]) 622 goto skip; 623 624 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 625 cb->nlh->nlmsg_seq, RTM_NEWRULE, 626 NLM_F_MULTI, ops) < 0) 627 break; 628 skip: 629 idx++; 630 } 631 rcu_read_unlock(); 632 cb->args[1] = idx; 633 rules_ops_put(ops); 634 635 return skb->len; 636 } 637 638 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 639 { 640 struct net *net = sock_net(skb->sk); 641 struct fib_rules_ops *ops; 642 int idx = 0, family; 643 644 family = rtnl_msg_family(cb->nlh); 645 if (family != AF_UNSPEC) { 646 /* Protocol specific dump request */ 647 ops = lookup_rules_ops(net, family); 648 if (ops == NULL) 649 return -EAFNOSUPPORT; 650 651 return dump_rules(skb, cb, ops); 652 } 653 654 rcu_read_lock(); 655 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 656 if (idx < cb->args[0] || !try_module_get(ops->owner)) 657 goto skip; 658 659 if (dump_rules(skb, cb, ops) < 0) 660 break; 661 662 cb->args[1] = 0; 663 skip: 664 idx++; 665 } 666 rcu_read_unlock(); 667 cb->args[0] = idx; 668 669 return skb->len; 670 } 671 672 static void notify_rule_change(int event, struct fib_rule *rule, 673 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 674 u32 pid) 675 { 676 struct net *net; 677 struct sk_buff *skb; 678 int err = -ENOBUFS; 679 680 net = ops->fro_net; 681 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 682 if (skb == NULL) 683 goto errout; 684 685 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 686 if (err < 0) { 687 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 688 WARN_ON(err == -EMSGSIZE); 689 kfree_skb(skb); 690 goto errout; 691 } 692 693 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 694 return; 695 errout: 696 if (err < 0) 697 rtnl_set_sk_err(net, ops->nlgroup, err); 698 } 699 700 static void attach_rules(struct list_head *rules, struct net_device *dev) 701 { 702 struct fib_rule *rule; 703 704 list_for_each_entry(rule, rules, list) { 705 if (rule->iifindex == -1 && 706 strcmp(dev->name, rule->iifname) == 0) 707 rule->iifindex = dev->ifindex; 708 if (rule->oifindex == -1 && 709 strcmp(dev->name, rule->oifname) == 0) 710 rule->oifindex = dev->ifindex; 711 } 712 } 713 714 static void detach_rules(struct list_head *rules, struct net_device *dev) 715 { 716 struct fib_rule *rule; 717 718 list_for_each_entry(rule, rules, list) { 719 if (rule->iifindex == dev->ifindex) 720 rule->iifindex = -1; 721 if (rule->oifindex == dev->ifindex) 722 rule->oifindex = -1; 723 } 724 } 725 726 727 static int fib_rules_event(struct notifier_block *this, unsigned long event, 728 void *ptr) 729 { 730 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 731 struct net *net = dev_net(dev); 732 struct fib_rules_ops *ops; 733 734 ASSERT_RTNL(); 735 736 switch (event) { 737 case NETDEV_REGISTER: 738 list_for_each_entry(ops, &net->rules_ops, list) 739 attach_rules(&ops->rules_list, dev); 740 break; 741 742 case NETDEV_CHANGENAME: 743 list_for_each_entry(ops, &net->rules_ops, list) { 744 detach_rules(&ops->rules_list, dev); 745 attach_rules(&ops->rules_list, dev); 746 } 747 break; 748 749 case NETDEV_UNREGISTER: 750 list_for_each_entry(ops, &net->rules_ops, list) 751 detach_rules(&ops->rules_list, dev); 752 break; 753 } 754 755 return NOTIFY_DONE; 756 } 757 758 static struct notifier_block fib_rules_notifier = { 759 .notifier_call = fib_rules_event, 760 }; 761 762 static int __net_init fib_rules_net_init(struct net *net) 763 { 764 INIT_LIST_HEAD(&net->rules_ops); 765 spin_lock_init(&net->rules_mod_lock); 766 return 0; 767 } 768 769 static struct pernet_operations fib_rules_net_ops = { 770 .init = fib_rules_net_init, 771 }; 772 773 static int __init fib_rules_init(void) 774 { 775 int err; 776 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); 777 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); 778 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); 779 780 err = register_pernet_subsys(&fib_rules_net_ops); 781 if (err < 0) 782 goto fail; 783 784 err = register_netdevice_notifier(&fib_rules_notifier); 785 if (err < 0) 786 goto fail_unregister; 787 788 return 0; 789 790 fail_unregister: 791 unregister_pernet_subsys(&fib_rules_net_ops); 792 fail: 793 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); 794 rtnl_unregister(PF_UNSPEC, RTM_DELRULE); 795 rtnl_unregister(PF_UNSPEC, RTM_GETRULE); 796 return err; 797 } 798 799 subsys_initcall(fib_rules_init); 800