1 /* 2 * net/core/fib_rules.c Generic Routing Rules 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation, version 2. 7 * 8 * Authors: Thomas Graf <tgraf@suug.ch> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 #include <linux/module.h> 16 #include <net/net_namespace.h> 17 #include <net/sock.h> 18 #include <net/fib_rules.h> 19 #include <net/ip_tunnels.h> 20 21 int fib_default_rule_add(struct fib_rules_ops *ops, 22 u32 pref, u32 table, u32 flags) 23 { 24 struct fib_rule *r; 25 26 r = kzalloc(ops->rule_size, GFP_KERNEL); 27 if (r == NULL) 28 return -ENOMEM; 29 30 atomic_set(&r->refcnt, 1); 31 r->action = FR_ACT_TO_TBL; 32 r->pref = pref; 33 r->table = table; 34 r->flags = flags; 35 r->fr_net = ops->fro_net; 36 37 r->suppress_prefixlen = -1; 38 r->suppress_ifgroup = -1; 39 40 /* The lock is not required here, the list in unreacheable 41 * at the moment this function is called */ 42 list_add_tail(&r->list, &ops->rules_list); 43 return 0; 44 } 45 EXPORT_SYMBOL(fib_default_rule_add); 46 47 static u32 fib_default_rule_pref(struct fib_rules_ops *ops) 48 { 49 struct list_head *pos; 50 struct fib_rule *rule; 51 52 if (!list_empty(&ops->rules_list)) { 53 pos = ops->rules_list.next; 54 if (pos->next != &ops->rules_list) { 55 rule = list_entry(pos->next, struct fib_rule, list); 56 if (rule->pref) 57 return rule->pref - 1; 58 } 59 } 60 61 return 0; 62 } 63 64 static void notify_rule_change(int event, struct fib_rule *rule, 65 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 66 u32 pid); 67 68 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) 69 { 70 struct fib_rules_ops *ops; 71 72 rcu_read_lock(); 73 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 74 if (ops->family == family) { 75 if (!try_module_get(ops->owner)) 76 ops = NULL; 77 rcu_read_unlock(); 78 return ops; 79 } 80 } 81 rcu_read_unlock(); 82 83 return NULL; 84 } 85 86 static void rules_ops_put(struct fib_rules_ops *ops) 87 { 88 if (ops) 89 module_put(ops->owner); 90 } 91 92 static void flush_route_cache(struct fib_rules_ops *ops) 93 { 94 if (ops->flush_cache) 95 ops->flush_cache(ops); 96 } 97 98 static int __fib_rules_register(struct fib_rules_ops *ops) 99 { 100 int err = -EEXIST; 101 struct fib_rules_ops *o; 102 struct net *net; 103 104 net = ops->fro_net; 105 106 if (ops->rule_size < sizeof(struct fib_rule)) 107 return -EINVAL; 108 109 if (ops->match == NULL || ops->configure == NULL || 110 ops->compare == NULL || ops->fill == NULL || 111 ops->action == NULL) 112 return -EINVAL; 113 114 spin_lock(&net->rules_mod_lock); 115 list_for_each_entry(o, &net->rules_ops, list) 116 if (ops->family == o->family) 117 goto errout; 118 119 list_add_tail_rcu(&ops->list, &net->rules_ops); 120 err = 0; 121 errout: 122 spin_unlock(&net->rules_mod_lock); 123 124 return err; 125 } 126 127 struct fib_rules_ops * 128 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 129 { 130 struct fib_rules_ops *ops; 131 int err; 132 133 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 134 if (ops == NULL) 135 return ERR_PTR(-ENOMEM); 136 137 INIT_LIST_HEAD(&ops->rules_list); 138 ops->fro_net = net; 139 140 err = __fib_rules_register(ops); 141 if (err) { 142 kfree(ops); 143 ops = ERR_PTR(err); 144 } 145 146 return ops; 147 } 148 EXPORT_SYMBOL_GPL(fib_rules_register); 149 150 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 151 { 152 struct fib_rule *rule, *tmp; 153 154 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 155 list_del_rcu(&rule->list); 156 if (ops->delete) 157 ops->delete(rule); 158 fib_rule_put(rule); 159 } 160 } 161 162 void fib_rules_unregister(struct fib_rules_ops *ops) 163 { 164 struct net *net = ops->fro_net; 165 166 spin_lock(&net->rules_mod_lock); 167 list_del_rcu(&ops->list); 168 spin_unlock(&net->rules_mod_lock); 169 170 fib_rules_cleanup_ops(ops); 171 kfree_rcu(ops, rcu); 172 } 173 EXPORT_SYMBOL_GPL(fib_rules_unregister); 174 175 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 176 struct flowi *fl, int flags) 177 { 178 int ret = 0; 179 180 if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) 181 goto out; 182 183 if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) 184 goto out; 185 186 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) 187 goto out; 188 189 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) 190 goto out; 191 192 ret = ops->match(rule, fl, flags); 193 out: 194 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 195 } 196 197 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 198 int flags, struct fib_lookup_arg *arg) 199 { 200 struct fib_rule *rule; 201 int err; 202 203 rcu_read_lock(); 204 205 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 206 jumped: 207 if (!fib_rule_match(rule, ops, fl, flags)) 208 continue; 209 210 if (rule->action == FR_ACT_GOTO) { 211 struct fib_rule *target; 212 213 target = rcu_dereference(rule->ctarget); 214 if (target == NULL) { 215 continue; 216 } else { 217 rule = target; 218 goto jumped; 219 } 220 } else if (rule->action == FR_ACT_NOP) 221 continue; 222 else 223 err = ops->action(rule, fl, flags, arg); 224 225 if (!err && ops->suppress && ops->suppress(rule, arg)) 226 continue; 227 228 if (err != -EAGAIN) { 229 if ((arg->flags & FIB_LOOKUP_NOREF) || 230 likely(atomic_inc_not_zero(&rule->refcnt))) { 231 arg->rule = rule; 232 goto out; 233 } 234 break; 235 } 236 } 237 238 err = -ESRCH; 239 out: 240 rcu_read_unlock(); 241 242 return err; 243 } 244 EXPORT_SYMBOL_GPL(fib_rules_lookup); 245 246 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, 247 struct fib_rules_ops *ops) 248 { 249 int err = -EINVAL; 250 251 if (frh->src_len) 252 if (tb[FRA_SRC] == NULL || 253 frh->src_len > (ops->addr_size * 8) || 254 nla_len(tb[FRA_SRC]) != ops->addr_size) 255 goto errout; 256 257 if (frh->dst_len) 258 if (tb[FRA_DST] == NULL || 259 frh->dst_len > (ops->addr_size * 8) || 260 nla_len(tb[FRA_DST]) != ops->addr_size) 261 goto errout; 262 263 err = 0; 264 errout: 265 return err; 266 } 267 268 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) 269 { 270 struct net *net = sock_net(skb->sk); 271 struct fib_rule_hdr *frh = nlmsg_data(nlh); 272 struct fib_rules_ops *ops = NULL; 273 struct fib_rule *rule, *r, *last = NULL; 274 struct nlattr *tb[FRA_MAX+1]; 275 int err = -EINVAL, unresolved = 0; 276 277 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 278 goto errout; 279 280 ops = lookup_rules_ops(net, frh->family); 281 if (ops == NULL) { 282 err = -EAFNOSUPPORT; 283 goto errout; 284 } 285 286 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 287 if (err < 0) 288 goto errout; 289 290 err = validate_rulemsg(frh, tb, ops); 291 if (err < 0) 292 goto errout; 293 294 rule = kzalloc(ops->rule_size, GFP_KERNEL); 295 if (rule == NULL) { 296 err = -ENOMEM; 297 goto errout; 298 } 299 rule->fr_net = net; 300 301 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY]) 302 : fib_default_rule_pref(ops); 303 304 if (tb[FRA_IIFNAME]) { 305 struct net_device *dev; 306 307 rule->iifindex = -1; 308 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 309 dev = __dev_get_by_name(net, rule->iifname); 310 if (dev) 311 rule->iifindex = dev->ifindex; 312 } 313 314 if (tb[FRA_OIFNAME]) { 315 struct net_device *dev; 316 317 rule->oifindex = -1; 318 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 319 dev = __dev_get_by_name(net, rule->oifname); 320 if (dev) 321 rule->oifindex = dev->ifindex; 322 } 323 324 if (tb[FRA_FWMARK]) { 325 rule->mark = nla_get_u32(tb[FRA_FWMARK]); 326 if (rule->mark) 327 /* compatibility: if the mark value is non-zero all bits 328 * are compared unless a mask is explicitly specified. 329 */ 330 rule->mark_mask = 0xFFFFFFFF; 331 } 332 333 if (tb[FRA_FWMASK]) 334 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 335 336 if (tb[FRA_TUN_ID]) 337 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); 338 339 rule->action = frh->action; 340 rule->flags = frh->flags; 341 rule->table = frh_get_table(frh, tb); 342 if (tb[FRA_SUPPRESS_PREFIXLEN]) 343 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); 344 else 345 rule->suppress_prefixlen = -1; 346 347 if (tb[FRA_SUPPRESS_IFGROUP]) 348 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); 349 else 350 rule->suppress_ifgroup = -1; 351 352 err = -EINVAL; 353 if (tb[FRA_GOTO]) { 354 if (rule->action != FR_ACT_GOTO) 355 goto errout_free; 356 357 rule->target = nla_get_u32(tb[FRA_GOTO]); 358 /* Backward jumps are prohibited to avoid endless loops */ 359 if (rule->target <= rule->pref) 360 goto errout_free; 361 362 list_for_each_entry(r, &ops->rules_list, list) { 363 if (r->pref == rule->target) { 364 RCU_INIT_POINTER(rule->ctarget, r); 365 break; 366 } 367 } 368 369 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 370 unresolved = 1; 371 } else if (rule->action == FR_ACT_GOTO) 372 goto errout_free; 373 374 err = ops->configure(rule, skb, frh, tb); 375 if (err < 0) 376 goto errout_free; 377 378 list_for_each_entry(r, &ops->rules_list, list) { 379 if (r->pref > rule->pref) 380 break; 381 last = r; 382 } 383 384 fib_rule_get(rule); 385 386 if (last) 387 list_add_rcu(&rule->list, &last->list); 388 else 389 list_add_rcu(&rule->list, &ops->rules_list); 390 391 if (ops->unresolved_rules) { 392 /* 393 * There are unresolved goto rules in the list, check if 394 * any of them are pointing to this new rule. 395 */ 396 list_for_each_entry(r, &ops->rules_list, list) { 397 if (r->action == FR_ACT_GOTO && 398 r->target == rule->pref && 399 rtnl_dereference(r->ctarget) == NULL) { 400 rcu_assign_pointer(r->ctarget, rule); 401 if (--ops->unresolved_rules == 0) 402 break; 403 } 404 } 405 } 406 407 if (rule->action == FR_ACT_GOTO) 408 ops->nr_goto_rules++; 409 410 if (unresolved) 411 ops->unresolved_rules++; 412 413 if (rule->tun_id) 414 ip_tunnel_need_metadata(); 415 416 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 417 flush_route_cache(ops); 418 rules_ops_put(ops); 419 return 0; 420 421 errout_free: 422 kfree(rule); 423 errout: 424 rules_ops_put(ops); 425 return err; 426 } 427 428 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh) 429 { 430 struct net *net = sock_net(skb->sk); 431 struct fib_rule_hdr *frh = nlmsg_data(nlh); 432 struct fib_rules_ops *ops = NULL; 433 struct fib_rule *rule, *tmp; 434 struct nlattr *tb[FRA_MAX+1]; 435 int err = -EINVAL; 436 437 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 438 goto errout; 439 440 ops = lookup_rules_ops(net, frh->family); 441 if (ops == NULL) { 442 err = -EAFNOSUPPORT; 443 goto errout; 444 } 445 446 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 447 if (err < 0) 448 goto errout; 449 450 err = validate_rulemsg(frh, tb, ops); 451 if (err < 0) 452 goto errout; 453 454 list_for_each_entry(rule, &ops->rules_list, list) { 455 if (frh->action && (frh->action != rule->action)) 456 continue; 457 458 if (frh_get_table(frh, tb) && 459 (frh_get_table(frh, tb) != rule->table)) 460 continue; 461 462 if (tb[FRA_PRIORITY] && 463 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) 464 continue; 465 466 if (tb[FRA_IIFNAME] && 467 nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) 468 continue; 469 470 if (tb[FRA_OIFNAME] && 471 nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) 472 continue; 473 474 if (tb[FRA_FWMARK] && 475 (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) 476 continue; 477 478 if (tb[FRA_FWMASK] && 479 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) 480 continue; 481 482 if (tb[FRA_TUN_ID] && 483 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID]))) 484 continue; 485 486 if (!ops->compare(rule, frh, tb)) 487 continue; 488 489 if (rule->flags & FIB_RULE_PERMANENT) { 490 err = -EPERM; 491 goto errout; 492 } 493 494 if (ops->delete) { 495 err = ops->delete(rule); 496 if (err) 497 goto errout; 498 } 499 500 if (rule->tun_id) 501 ip_tunnel_unneed_metadata(); 502 503 list_del_rcu(&rule->list); 504 505 if (rule->action == FR_ACT_GOTO) { 506 ops->nr_goto_rules--; 507 if (rtnl_dereference(rule->ctarget) == NULL) 508 ops->unresolved_rules--; 509 } 510 511 /* 512 * Check if this rule is a target to any of them. If so, 513 * disable them. As this operation is eventually very 514 * expensive, it is only performed if goto rules have 515 * actually been added. 516 */ 517 if (ops->nr_goto_rules > 0) { 518 list_for_each_entry(tmp, &ops->rules_list, list) { 519 if (rtnl_dereference(tmp->ctarget) == rule) { 520 RCU_INIT_POINTER(tmp->ctarget, NULL); 521 ops->unresolved_rules++; 522 } 523 } 524 } 525 526 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 527 NETLINK_CB(skb).portid); 528 fib_rule_put(rule); 529 flush_route_cache(ops); 530 rules_ops_put(ops); 531 return 0; 532 } 533 534 err = -ENOENT; 535 errout: 536 rules_ops_put(ops); 537 return err; 538 } 539 540 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 541 struct fib_rule *rule) 542 { 543 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 544 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 545 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 546 + nla_total_size(4) /* FRA_PRIORITY */ 547 + nla_total_size(4) /* FRA_TABLE */ 548 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ 549 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 550 + nla_total_size(4) /* FRA_FWMARK */ 551 + nla_total_size(4) /* FRA_FWMASK */ 552 + nla_total_size_64bit(8); /* FRA_TUN_ID */ 553 554 if (ops->nlmsg_payload) 555 payload += ops->nlmsg_payload(rule); 556 557 return payload; 558 } 559 560 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 561 u32 pid, u32 seq, int type, int flags, 562 struct fib_rules_ops *ops) 563 { 564 struct nlmsghdr *nlh; 565 struct fib_rule_hdr *frh; 566 567 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 568 if (nlh == NULL) 569 return -EMSGSIZE; 570 571 frh = nlmsg_data(nlh); 572 frh->family = ops->family; 573 frh->table = rule->table; 574 if (nla_put_u32(skb, FRA_TABLE, rule->table)) 575 goto nla_put_failure; 576 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) 577 goto nla_put_failure; 578 frh->res1 = 0; 579 frh->res2 = 0; 580 frh->action = rule->action; 581 frh->flags = rule->flags; 582 583 if (rule->action == FR_ACT_GOTO && 584 rcu_access_pointer(rule->ctarget) == NULL) 585 frh->flags |= FIB_RULE_UNRESOLVED; 586 587 if (rule->iifname[0]) { 588 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) 589 goto nla_put_failure; 590 if (rule->iifindex == -1) 591 frh->flags |= FIB_RULE_IIF_DETACHED; 592 } 593 594 if (rule->oifname[0]) { 595 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) 596 goto nla_put_failure; 597 if (rule->oifindex == -1) 598 frh->flags |= FIB_RULE_OIF_DETACHED; 599 } 600 601 if ((rule->pref && 602 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || 603 (rule->mark && 604 nla_put_u32(skb, FRA_FWMARK, rule->mark)) || 605 ((rule->mark_mask || rule->mark) && 606 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || 607 (rule->target && 608 nla_put_u32(skb, FRA_GOTO, rule->target)) || 609 (rule->tun_id && 610 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD))) 611 goto nla_put_failure; 612 613 if (rule->suppress_ifgroup != -1) { 614 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) 615 goto nla_put_failure; 616 } 617 618 if (ops->fill(rule, skb, frh) < 0) 619 goto nla_put_failure; 620 621 nlmsg_end(skb, nlh); 622 return 0; 623 624 nla_put_failure: 625 nlmsg_cancel(skb, nlh); 626 return -EMSGSIZE; 627 } 628 629 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 630 struct fib_rules_ops *ops) 631 { 632 int idx = 0; 633 struct fib_rule *rule; 634 int err = 0; 635 636 rcu_read_lock(); 637 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 638 if (idx < cb->args[1]) 639 goto skip; 640 641 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 642 cb->nlh->nlmsg_seq, RTM_NEWRULE, 643 NLM_F_MULTI, ops); 644 if (err) 645 break; 646 skip: 647 idx++; 648 } 649 rcu_read_unlock(); 650 cb->args[1] = idx; 651 rules_ops_put(ops); 652 653 return err; 654 } 655 656 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 657 { 658 struct net *net = sock_net(skb->sk); 659 struct fib_rules_ops *ops; 660 int idx = 0, family; 661 662 family = rtnl_msg_family(cb->nlh); 663 if (family != AF_UNSPEC) { 664 /* Protocol specific dump request */ 665 ops = lookup_rules_ops(net, family); 666 if (ops == NULL) 667 return -EAFNOSUPPORT; 668 669 dump_rules(skb, cb, ops); 670 671 return skb->len; 672 } 673 674 rcu_read_lock(); 675 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 676 if (idx < cb->args[0] || !try_module_get(ops->owner)) 677 goto skip; 678 679 if (dump_rules(skb, cb, ops) < 0) 680 break; 681 682 cb->args[1] = 0; 683 skip: 684 idx++; 685 } 686 rcu_read_unlock(); 687 cb->args[0] = idx; 688 689 return skb->len; 690 } 691 692 static void notify_rule_change(int event, struct fib_rule *rule, 693 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 694 u32 pid) 695 { 696 struct net *net; 697 struct sk_buff *skb; 698 int err = -ENOBUFS; 699 700 net = ops->fro_net; 701 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 702 if (skb == NULL) 703 goto errout; 704 705 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 706 if (err < 0) { 707 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 708 WARN_ON(err == -EMSGSIZE); 709 kfree_skb(skb); 710 goto errout; 711 } 712 713 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 714 return; 715 errout: 716 if (err < 0) 717 rtnl_set_sk_err(net, ops->nlgroup, err); 718 } 719 720 static void attach_rules(struct list_head *rules, struct net_device *dev) 721 { 722 struct fib_rule *rule; 723 724 list_for_each_entry(rule, rules, list) { 725 if (rule->iifindex == -1 && 726 strcmp(dev->name, rule->iifname) == 0) 727 rule->iifindex = dev->ifindex; 728 if (rule->oifindex == -1 && 729 strcmp(dev->name, rule->oifname) == 0) 730 rule->oifindex = dev->ifindex; 731 } 732 } 733 734 static void detach_rules(struct list_head *rules, struct net_device *dev) 735 { 736 struct fib_rule *rule; 737 738 list_for_each_entry(rule, rules, list) { 739 if (rule->iifindex == dev->ifindex) 740 rule->iifindex = -1; 741 if (rule->oifindex == dev->ifindex) 742 rule->oifindex = -1; 743 } 744 } 745 746 747 static int fib_rules_event(struct notifier_block *this, unsigned long event, 748 void *ptr) 749 { 750 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 751 struct net *net = dev_net(dev); 752 struct fib_rules_ops *ops; 753 754 ASSERT_RTNL(); 755 756 switch (event) { 757 case NETDEV_REGISTER: 758 list_for_each_entry(ops, &net->rules_ops, list) 759 attach_rules(&ops->rules_list, dev); 760 break; 761 762 case NETDEV_CHANGENAME: 763 list_for_each_entry(ops, &net->rules_ops, list) { 764 detach_rules(&ops->rules_list, dev); 765 attach_rules(&ops->rules_list, dev); 766 } 767 break; 768 769 case NETDEV_UNREGISTER: 770 list_for_each_entry(ops, &net->rules_ops, list) 771 detach_rules(&ops->rules_list, dev); 772 break; 773 } 774 775 return NOTIFY_DONE; 776 } 777 778 static struct notifier_block fib_rules_notifier = { 779 .notifier_call = fib_rules_event, 780 }; 781 782 static int __net_init fib_rules_net_init(struct net *net) 783 { 784 INIT_LIST_HEAD(&net->rules_ops); 785 spin_lock_init(&net->rules_mod_lock); 786 return 0; 787 } 788 789 static struct pernet_operations fib_rules_net_ops = { 790 .init = fib_rules_net_init, 791 }; 792 793 static int __init fib_rules_init(void) 794 { 795 int err; 796 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); 797 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); 798 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); 799 800 err = register_pernet_subsys(&fib_rules_net_ops); 801 if (err < 0) 802 goto fail; 803 804 err = register_netdevice_notifier(&fib_rules_notifier); 805 if (err < 0) 806 goto fail_unregister; 807 808 return 0; 809 810 fail_unregister: 811 unregister_pernet_subsys(&fib_rules_net_ops); 812 fail: 813 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); 814 rtnl_unregister(PF_UNSPEC, RTM_DELRULE); 815 rtnl_unregister(PF_UNSPEC, RTM_GETRULE); 816 return err; 817 } 818 819 subsys_initcall(fib_rules_init); 820