1 /* 2 * net/core/fib_rules.c Generic Routing Rules 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation, version 2. 7 * 8 * Authors: Thomas Graf <tgraf@suug.ch> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 #include <linux/module.h> 16 #include <net/net_namespace.h> 17 #include <net/sock.h> 18 #include <net/fib_rules.h> 19 #include <net/ip_tunnels.h> 20 21 int fib_default_rule_add(struct fib_rules_ops *ops, 22 u32 pref, u32 table, u32 flags) 23 { 24 struct fib_rule *r; 25 26 r = kzalloc(ops->rule_size, GFP_KERNEL); 27 if (r == NULL) 28 return -ENOMEM; 29 30 atomic_set(&r->refcnt, 1); 31 r->action = FR_ACT_TO_TBL; 32 r->pref = pref; 33 r->table = table; 34 r->flags = flags; 35 r->fr_net = ops->fro_net; 36 37 r->suppress_prefixlen = -1; 38 r->suppress_ifgroup = -1; 39 40 /* The lock is not required here, the list in unreacheable 41 * at the moment this function is called */ 42 list_add_tail(&r->list, &ops->rules_list); 43 return 0; 44 } 45 EXPORT_SYMBOL(fib_default_rule_add); 46 47 u32 fib_default_rule_pref(struct fib_rules_ops *ops) 48 { 49 struct list_head *pos; 50 struct fib_rule *rule; 51 52 if (!list_empty(&ops->rules_list)) { 53 pos = ops->rules_list.next; 54 if (pos->next != &ops->rules_list) { 55 rule = list_entry(pos->next, struct fib_rule, list); 56 if (rule->pref) 57 return rule->pref - 1; 58 } 59 } 60 61 return 0; 62 } 63 EXPORT_SYMBOL(fib_default_rule_pref); 64 65 static void notify_rule_change(int event, struct fib_rule *rule, 66 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 67 u32 pid); 68 69 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) 70 { 71 struct fib_rules_ops *ops; 72 73 rcu_read_lock(); 74 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 75 if (ops->family == family) { 76 if (!try_module_get(ops->owner)) 77 ops = NULL; 78 rcu_read_unlock(); 79 return ops; 80 } 81 } 82 rcu_read_unlock(); 83 84 return NULL; 85 } 86 87 static void rules_ops_put(struct fib_rules_ops *ops) 88 { 89 if (ops) 90 module_put(ops->owner); 91 } 92 93 static void flush_route_cache(struct fib_rules_ops *ops) 94 { 95 if (ops->flush_cache) 96 ops->flush_cache(ops); 97 } 98 99 static int __fib_rules_register(struct fib_rules_ops *ops) 100 { 101 int err = -EEXIST; 102 struct fib_rules_ops *o; 103 struct net *net; 104 105 net = ops->fro_net; 106 107 if (ops->rule_size < sizeof(struct fib_rule)) 108 return -EINVAL; 109 110 if (ops->match == NULL || ops->configure == NULL || 111 ops->compare == NULL || ops->fill == NULL || 112 ops->action == NULL) 113 return -EINVAL; 114 115 spin_lock(&net->rules_mod_lock); 116 list_for_each_entry(o, &net->rules_ops, list) 117 if (ops->family == o->family) 118 goto errout; 119 120 list_add_tail_rcu(&ops->list, &net->rules_ops); 121 err = 0; 122 errout: 123 spin_unlock(&net->rules_mod_lock); 124 125 return err; 126 } 127 128 struct fib_rules_ops * 129 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 130 { 131 struct fib_rules_ops *ops; 132 int err; 133 134 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 135 if (ops == NULL) 136 return ERR_PTR(-ENOMEM); 137 138 INIT_LIST_HEAD(&ops->rules_list); 139 ops->fro_net = net; 140 141 err = __fib_rules_register(ops); 142 if (err) { 143 kfree(ops); 144 ops = ERR_PTR(err); 145 } 146 147 return ops; 148 } 149 EXPORT_SYMBOL_GPL(fib_rules_register); 150 151 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 152 { 153 struct fib_rule *rule, *tmp; 154 155 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 156 list_del_rcu(&rule->list); 157 if (ops->delete) 158 ops->delete(rule); 159 fib_rule_put(rule); 160 } 161 } 162 163 void fib_rules_unregister(struct fib_rules_ops *ops) 164 { 165 struct net *net = ops->fro_net; 166 167 spin_lock(&net->rules_mod_lock); 168 list_del_rcu(&ops->list); 169 spin_unlock(&net->rules_mod_lock); 170 171 fib_rules_cleanup_ops(ops); 172 kfree_rcu(ops, rcu); 173 } 174 EXPORT_SYMBOL_GPL(fib_rules_unregister); 175 176 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 177 struct flowi *fl, int flags) 178 { 179 int ret = 0; 180 181 if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) 182 goto out; 183 184 if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) 185 goto out; 186 187 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) 188 goto out; 189 190 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) 191 goto out; 192 193 ret = ops->match(rule, fl, flags); 194 out: 195 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 196 } 197 198 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 199 int flags, struct fib_lookup_arg *arg) 200 { 201 struct fib_rule *rule; 202 int err; 203 204 rcu_read_lock(); 205 206 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 207 jumped: 208 if (!fib_rule_match(rule, ops, fl, flags)) 209 continue; 210 211 if (rule->action == FR_ACT_GOTO) { 212 struct fib_rule *target; 213 214 target = rcu_dereference(rule->ctarget); 215 if (target == NULL) { 216 continue; 217 } else { 218 rule = target; 219 goto jumped; 220 } 221 } else if (rule->action == FR_ACT_NOP) 222 continue; 223 else 224 err = ops->action(rule, fl, flags, arg); 225 226 if (!err && ops->suppress && ops->suppress(rule, arg)) 227 continue; 228 229 if (err != -EAGAIN) { 230 if ((arg->flags & FIB_LOOKUP_NOREF) || 231 likely(atomic_inc_not_zero(&rule->refcnt))) { 232 arg->rule = rule; 233 goto out; 234 } 235 break; 236 } 237 } 238 239 err = -ESRCH; 240 out: 241 rcu_read_unlock(); 242 243 return err; 244 } 245 EXPORT_SYMBOL_GPL(fib_rules_lookup); 246 247 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, 248 struct fib_rules_ops *ops) 249 { 250 int err = -EINVAL; 251 252 if (frh->src_len) 253 if (tb[FRA_SRC] == NULL || 254 frh->src_len > (ops->addr_size * 8) || 255 nla_len(tb[FRA_SRC]) != ops->addr_size) 256 goto errout; 257 258 if (frh->dst_len) 259 if (tb[FRA_DST] == NULL || 260 frh->dst_len > (ops->addr_size * 8) || 261 nla_len(tb[FRA_DST]) != ops->addr_size) 262 goto errout; 263 264 err = 0; 265 errout: 266 return err; 267 } 268 269 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) 270 { 271 struct net *net = sock_net(skb->sk); 272 struct fib_rule_hdr *frh = nlmsg_data(nlh); 273 struct fib_rules_ops *ops = NULL; 274 struct fib_rule *rule, *r, *last = NULL; 275 struct nlattr *tb[FRA_MAX+1]; 276 int err = -EINVAL, unresolved = 0; 277 278 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 279 goto errout; 280 281 ops = lookup_rules_ops(net, frh->family); 282 if (ops == NULL) { 283 err = -EAFNOSUPPORT; 284 goto errout; 285 } 286 287 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 288 if (err < 0) 289 goto errout; 290 291 err = validate_rulemsg(frh, tb, ops); 292 if (err < 0) 293 goto errout; 294 295 rule = kzalloc(ops->rule_size, GFP_KERNEL); 296 if (rule == NULL) { 297 err = -ENOMEM; 298 goto errout; 299 } 300 rule->fr_net = net; 301 302 if (tb[FRA_PRIORITY]) 303 rule->pref = nla_get_u32(tb[FRA_PRIORITY]); 304 305 if (tb[FRA_IIFNAME]) { 306 struct net_device *dev; 307 308 rule->iifindex = -1; 309 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 310 dev = __dev_get_by_name(net, rule->iifname); 311 if (dev) 312 rule->iifindex = dev->ifindex; 313 } 314 315 if (tb[FRA_OIFNAME]) { 316 struct net_device *dev; 317 318 rule->oifindex = -1; 319 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 320 dev = __dev_get_by_name(net, rule->oifname); 321 if (dev) 322 rule->oifindex = dev->ifindex; 323 } 324 325 if (tb[FRA_FWMARK]) { 326 rule->mark = nla_get_u32(tb[FRA_FWMARK]); 327 if (rule->mark) 328 /* compatibility: if the mark value is non-zero all bits 329 * are compared unless a mask is explicitly specified. 330 */ 331 rule->mark_mask = 0xFFFFFFFF; 332 } 333 334 if (tb[FRA_FWMASK]) 335 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 336 337 if (tb[FRA_TUN_ID]) 338 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); 339 340 rule->action = frh->action; 341 rule->flags = frh->flags; 342 rule->table = frh_get_table(frh, tb); 343 if (tb[FRA_SUPPRESS_PREFIXLEN]) 344 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); 345 else 346 rule->suppress_prefixlen = -1; 347 348 if (tb[FRA_SUPPRESS_IFGROUP]) 349 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); 350 else 351 rule->suppress_ifgroup = -1; 352 353 if (!tb[FRA_PRIORITY] && ops->default_pref) 354 rule->pref = ops->default_pref(ops); 355 356 err = -EINVAL; 357 if (tb[FRA_GOTO]) { 358 if (rule->action != FR_ACT_GOTO) 359 goto errout_free; 360 361 rule->target = nla_get_u32(tb[FRA_GOTO]); 362 /* Backward jumps are prohibited to avoid endless loops */ 363 if (rule->target <= rule->pref) 364 goto errout_free; 365 366 list_for_each_entry(r, &ops->rules_list, list) { 367 if (r->pref == rule->target) { 368 RCU_INIT_POINTER(rule->ctarget, r); 369 break; 370 } 371 } 372 373 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 374 unresolved = 1; 375 } else if (rule->action == FR_ACT_GOTO) 376 goto errout_free; 377 378 err = ops->configure(rule, skb, frh, tb); 379 if (err < 0) 380 goto errout_free; 381 382 list_for_each_entry(r, &ops->rules_list, list) { 383 if (r->pref > rule->pref) 384 break; 385 last = r; 386 } 387 388 fib_rule_get(rule); 389 390 if (last) 391 list_add_rcu(&rule->list, &last->list); 392 else 393 list_add_rcu(&rule->list, &ops->rules_list); 394 395 if (ops->unresolved_rules) { 396 /* 397 * There are unresolved goto rules in the list, check if 398 * any of them are pointing to this new rule. 399 */ 400 list_for_each_entry(r, &ops->rules_list, list) { 401 if (r->action == FR_ACT_GOTO && 402 r->target == rule->pref && 403 rtnl_dereference(r->ctarget) == NULL) { 404 rcu_assign_pointer(r->ctarget, rule); 405 if (--ops->unresolved_rules == 0) 406 break; 407 } 408 } 409 } 410 411 if (rule->action == FR_ACT_GOTO) 412 ops->nr_goto_rules++; 413 414 if (unresolved) 415 ops->unresolved_rules++; 416 417 if (rule->tun_id) 418 ip_tunnel_need_metadata(); 419 420 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 421 flush_route_cache(ops); 422 rules_ops_put(ops); 423 return 0; 424 425 errout_free: 426 kfree(rule); 427 errout: 428 rules_ops_put(ops); 429 return err; 430 } 431 432 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh) 433 { 434 struct net *net = sock_net(skb->sk); 435 struct fib_rule_hdr *frh = nlmsg_data(nlh); 436 struct fib_rules_ops *ops = NULL; 437 struct fib_rule *rule, *tmp; 438 struct nlattr *tb[FRA_MAX+1]; 439 int err = -EINVAL; 440 441 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 442 goto errout; 443 444 ops = lookup_rules_ops(net, frh->family); 445 if (ops == NULL) { 446 err = -EAFNOSUPPORT; 447 goto errout; 448 } 449 450 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 451 if (err < 0) 452 goto errout; 453 454 err = validate_rulemsg(frh, tb, ops); 455 if (err < 0) 456 goto errout; 457 458 list_for_each_entry(rule, &ops->rules_list, list) { 459 if (frh->action && (frh->action != rule->action)) 460 continue; 461 462 if (frh_get_table(frh, tb) && 463 (frh_get_table(frh, tb) != rule->table)) 464 continue; 465 466 if (tb[FRA_PRIORITY] && 467 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) 468 continue; 469 470 if (tb[FRA_IIFNAME] && 471 nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) 472 continue; 473 474 if (tb[FRA_OIFNAME] && 475 nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) 476 continue; 477 478 if (tb[FRA_FWMARK] && 479 (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) 480 continue; 481 482 if (tb[FRA_FWMASK] && 483 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) 484 continue; 485 486 if (tb[FRA_TUN_ID] && 487 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID]))) 488 continue; 489 490 if (!ops->compare(rule, frh, tb)) 491 continue; 492 493 if (rule->flags & FIB_RULE_PERMANENT) { 494 err = -EPERM; 495 goto errout; 496 } 497 498 if (ops->delete) { 499 err = ops->delete(rule); 500 if (err) 501 goto errout; 502 } 503 504 if (rule->tun_id) 505 ip_tunnel_unneed_metadata(); 506 507 list_del_rcu(&rule->list); 508 509 if (rule->action == FR_ACT_GOTO) { 510 ops->nr_goto_rules--; 511 if (rtnl_dereference(rule->ctarget) == NULL) 512 ops->unresolved_rules--; 513 } 514 515 /* 516 * Check if this rule is a target to any of them. If so, 517 * disable them. As this operation is eventually very 518 * expensive, it is only performed if goto rules have 519 * actually been added. 520 */ 521 if (ops->nr_goto_rules > 0) { 522 list_for_each_entry(tmp, &ops->rules_list, list) { 523 if (rtnl_dereference(tmp->ctarget) == rule) { 524 RCU_INIT_POINTER(tmp->ctarget, NULL); 525 ops->unresolved_rules++; 526 } 527 } 528 } 529 530 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 531 NETLINK_CB(skb).portid); 532 fib_rule_put(rule); 533 flush_route_cache(ops); 534 rules_ops_put(ops); 535 return 0; 536 } 537 538 err = -ENOENT; 539 errout: 540 rules_ops_put(ops); 541 return err; 542 } 543 544 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 545 struct fib_rule *rule) 546 { 547 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 548 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 549 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 550 + nla_total_size(4) /* FRA_PRIORITY */ 551 + nla_total_size(4) /* FRA_TABLE */ 552 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ 553 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 554 + nla_total_size(4) /* FRA_FWMARK */ 555 + nla_total_size(4) /* FRA_FWMASK */ 556 + nla_total_size(8); /* FRA_TUN_ID */ 557 558 if (ops->nlmsg_payload) 559 payload += ops->nlmsg_payload(rule); 560 561 return payload; 562 } 563 564 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 565 u32 pid, u32 seq, int type, int flags, 566 struct fib_rules_ops *ops) 567 { 568 struct nlmsghdr *nlh; 569 struct fib_rule_hdr *frh; 570 571 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 572 if (nlh == NULL) 573 return -EMSGSIZE; 574 575 frh = nlmsg_data(nlh); 576 frh->family = ops->family; 577 frh->table = rule->table; 578 if (nla_put_u32(skb, FRA_TABLE, rule->table)) 579 goto nla_put_failure; 580 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) 581 goto nla_put_failure; 582 frh->res1 = 0; 583 frh->res2 = 0; 584 frh->action = rule->action; 585 frh->flags = rule->flags; 586 587 if (rule->action == FR_ACT_GOTO && 588 rcu_access_pointer(rule->ctarget) == NULL) 589 frh->flags |= FIB_RULE_UNRESOLVED; 590 591 if (rule->iifname[0]) { 592 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) 593 goto nla_put_failure; 594 if (rule->iifindex == -1) 595 frh->flags |= FIB_RULE_IIF_DETACHED; 596 } 597 598 if (rule->oifname[0]) { 599 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) 600 goto nla_put_failure; 601 if (rule->oifindex == -1) 602 frh->flags |= FIB_RULE_OIF_DETACHED; 603 } 604 605 if ((rule->pref && 606 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || 607 (rule->mark && 608 nla_put_u32(skb, FRA_FWMARK, rule->mark)) || 609 ((rule->mark_mask || rule->mark) && 610 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || 611 (rule->target && 612 nla_put_u32(skb, FRA_GOTO, rule->target)) || 613 (rule->tun_id && 614 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id))) 615 goto nla_put_failure; 616 617 if (rule->suppress_ifgroup != -1) { 618 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) 619 goto nla_put_failure; 620 } 621 622 if (ops->fill(rule, skb, frh) < 0) 623 goto nla_put_failure; 624 625 nlmsg_end(skb, nlh); 626 return 0; 627 628 nla_put_failure: 629 nlmsg_cancel(skb, nlh); 630 return -EMSGSIZE; 631 } 632 633 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 634 struct fib_rules_ops *ops) 635 { 636 int idx = 0; 637 struct fib_rule *rule; 638 639 rcu_read_lock(); 640 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 641 if (idx < cb->args[1]) 642 goto skip; 643 644 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 645 cb->nlh->nlmsg_seq, RTM_NEWRULE, 646 NLM_F_MULTI, ops) < 0) 647 break; 648 skip: 649 idx++; 650 } 651 rcu_read_unlock(); 652 cb->args[1] = idx; 653 rules_ops_put(ops); 654 655 return skb->len; 656 } 657 658 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 659 { 660 struct net *net = sock_net(skb->sk); 661 struct fib_rules_ops *ops; 662 int idx = 0, family; 663 664 family = rtnl_msg_family(cb->nlh); 665 if (family != AF_UNSPEC) { 666 /* Protocol specific dump request */ 667 ops = lookup_rules_ops(net, family); 668 if (ops == NULL) 669 return -EAFNOSUPPORT; 670 671 return dump_rules(skb, cb, ops); 672 } 673 674 rcu_read_lock(); 675 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 676 if (idx < cb->args[0] || !try_module_get(ops->owner)) 677 goto skip; 678 679 if (dump_rules(skb, cb, ops) < 0) 680 break; 681 682 cb->args[1] = 0; 683 skip: 684 idx++; 685 } 686 rcu_read_unlock(); 687 cb->args[0] = idx; 688 689 return skb->len; 690 } 691 692 static void notify_rule_change(int event, struct fib_rule *rule, 693 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 694 u32 pid) 695 { 696 struct net *net; 697 struct sk_buff *skb; 698 int err = -ENOBUFS; 699 700 net = ops->fro_net; 701 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 702 if (skb == NULL) 703 goto errout; 704 705 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 706 if (err < 0) { 707 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 708 WARN_ON(err == -EMSGSIZE); 709 kfree_skb(skb); 710 goto errout; 711 } 712 713 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 714 return; 715 errout: 716 if (err < 0) 717 rtnl_set_sk_err(net, ops->nlgroup, err); 718 } 719 720 static void attach_rules(struct list_head *rules, struct net_device *dev) 721 { 722 struct fib_rule *rule; 723 724 list_for_each_entry(rule, rules, list) { 725 if (rule->iifindex == -1 && 726 strcmp(dev->name, rule->iifname) == 0) 727 rule->iifindex = dev->ifindex; 728 if (rule->oifindex == -1 && 729 strcmp(dev->name, rule->oifname) == 0) 730 rule->oifindex = dev->ifindex; 731 } 732 } 733 734 static void detach_rules(struct list_head *rules, struct net_device *dev) 735 { 736 struct fib_rule *rule; 737 738 list_for_each_entry(rule, rules, list) { 739 if (rule->iifindex == dev->ifindex) 740 rule->iifindex = -1; 741 if (rule->oifindex == dev->ifindex) 742 rule->oifindex = -1; 743 } 744 } 745 746 747 static int fib_rules_event(struct notifier_block *this, unsigned long event, 748 void *ptr) 749 { 750 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 751 struct net *net = dev_net(dev); 752 struct fib_rules_ops *ops; 753 754 ASSERT_RTNL(); 755 756 switch (event) { 757 case NETDEV_REGISTER: 758 list_for_each_entry(ops, &net->rules_ops, list) 759 attach_rules(&ops->rules_list, dev); 760 break; 761 762 case NETDEV_CHANGENAME: 763 list_for_each_entry(ops, &net->rules_ops, list) { 764 detach_rules(&ops->rules_list, dev); 765 attach_rules(&ops->rules_list, dev); 766 } 767 break; 768 769 case NETDEV_UNREGISTER: 770 list_for_each_entry(ops, &net->rules_ops, list) 771 detach_rules(&ops->rules_list, dev); 772 break; 773 } 774 775 return NOTIFY_DONE; 776 } 777 778 static struct notifier_block fib_rules_notifier = { 779 .notifier_call = fib_rules_event, 780 }; 781 782 static int __net_init fib_rules_net_init(struct net *net) 783 { 784 INIT_LIST_HEAD(&net->rules_ops); 785 spin_lock_init(&net->rules_mod_lock); 786 return 0; 787 } 788 789 static struct pernet_operations fib_rules_net_ops = { 790 .init = fib_rules_net_init, 791 }; 792 793 static int __init fib_rules_init(void) 794 { 795 int err; 796 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); 797 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); 798 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); 799 800 err = register_pernet_subsys(&fib_rules_net_ops); 801 if (err < 0) 802 goto fail; 803 804 err = register_netdevice_notifier(&fib_rules_notifier); 805 if (err < 0) 806 goto fail_unregister; 807 808 return 0; 809 810 fail_unregister: 811 unregister_pernet_subsys(&fib_rules_net_ops); 812 fail: 813 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); 814 rtnl_unregister(PF_UNSPEC, RTM_DELRULE); 815 rtnl_unregister(PF_UNSPEC, RTM_GETRULE); 816 return err; 817 } 818 819 subsys_initcall(fib_rules_init); 820