1 /* 2 * net/core/fib_rules.c Generic Routing Rules 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation, version 2. 7 * 8 * Authors: Thomas Graf <tgraf@suug.ch> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 #include <linux/module.h> 16 #include <net/net_namespace.h> 17 #include <net/sock.h> 18 #include <net/fib_rules.h> 19 #include <net/ip_tunnels.h> 20 21 static const struct fib_kuid_range fib_kuid_range_unset = { 22 KUIDT_INIT(0), 23 KUIDT_INIT(~0), 24 }; 25 26 bool fib_rule_matchall(const struct fib_rule *rule) 27 { 28 if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id || 29 rule->flags) 30 return false; 31 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1) 32 return false; 33 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) || 34 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end)) 35 return false; 36 return true; 37 } 38 EXPORT_SYMBOL_GPL(fib_rule_matchall); 39 40 int fib_default_rule_add(struct fib_rules_ops *ops, 41 u32 pref, u32 table, u32 flags) 42 { 43 struct fib_rule *r; 44 45 r = kzalloc(ops->rule_size, GFP_KERNEL); 46 if (r == NULL) 47 return -ENOMEM; 48 49 atomic_set(&r->refcnt, 1); 50 r->action = FR_ACT_TO_TBL; 51 r->pref = pref; 52 r->table = table; 53 r->flags = flags; 54 r->fr_net = ops->fro_net; 55 r->uid_range = fib_kuid_range_unset; 56 57 r->suppress_prefixlen = -1; 58 r->suppress_ifgroup = -1; 59 60 /* The lock is not required here, the list in unreacheable 61 * at the moment this function is called */ 62 list_add_tail(&r->list, &ops->rules_list); 63 return 0; 64 } 65 EXPORT_SYMBOL(fib_default_rule_add); 66 67 static u32 fib_default_rule_pref(struct fib_rules_ops *ops) 68 { 69 struct list_head *pos; 70 struct fib_rule *rule; 71 72 if (!list_empty(&ops->rules_list)) { 73 pos = ops->rules_list.next; 74 if (pos->next != &ops->rules_list) { 75 rule = list_entry(pos->next, struct fib_rule, list); 76 if (rule->pref) 77 return rule->pref - 1; 78 } 79 } 80 81 return 0; 82 } 83 84 static void notify_rule_change(int event, struct fib_rule *rule, 85 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 86 u32 pid); 87 88 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) 89 { 90 struct fib_rules_ops *ops; 91 92 rcu_read_lock(); 93 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 94 if (ops->family == family) { 95 if (!try_module_get(ops->owner)) 96 ops = NULL; 97 rcu_read_unlock(); 98 return ops; 99 } 100 } 101 rcu_read_unlock(); 102 103 return NULL; 104 } 105 106 static void rules_ops_put(struct fib_rules_ops *ops) 107 { 108 if (ops) 109 module_put(ops->owner); 110 } 111 112 static void flush_route_cache(struct fib_rules_ops *ops) 113 { 114 if (ops->flush_cache) 115 ops->flush_cache(ops); 116 } 117 118 static int __fib_rules_register(struct fib_rules_ops *ops) 119 { 120 int err = -EEXIST; 121 struct fib_rules_ops *o; 122 struct net *net; 123 124 net = ops->fro_net; 125 126 if (ops->rule_size < sizeof(struct fib_rule)) 127 return -EINVAL; 128 129 if (ops->match == NULL || ops->configure == NULL || 130 ops->compare == NULL || ops->fill == NULL || 131 ops->action == NULL) 132 return -EINVAL; 133 134 spin_lock(&net->rules_mod_lock); 135 list_for_each_entry(o, &net->rules_ops, list) 136 if (ops->family == o->family) 137 goto errout; 138 139 list_add_tail_rcu(&ops->list, &net->rules_ops); 140 err = 0; 141 errout: 142 spin_unlock(&net->rules_mod_lock); 143 144 return err; 145 } 146 147 struct fib_rules_ops * 148 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 149 { 150 struct fib_rules_ops *ops; 151 int err; 152 153 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 154 if (ops == NULL) 155 return ERR_PTR(-ENOMEM); 156 157 INIT_LIST_HEAD(&ops->rules_list); 158 ops->fro_net = net; 159 160 err = __fib_rules_register(ops); 161 if (err) { 162 kfree(ops); 163 ops = ERR_PTR(err); 164 } 165 166 return ops; 167 } 168 EXPORT_SYMBOL_GPL(fib_rules_register); 169 170 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 171 { 172 struct fib_rule *rule, *tmp; 173 174 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 175 list_del_rcu(&rule->list); 176 if (ops->delete) 177 ops->delete(rule); 178 fib_rule_put(rule); 179 } 180 } 181 182 void fib_rules_unregister(struct fib_rules_ops *ops) 183 { 184 struct net *net = ops->fro_net; 185 186 spin_lock(&net->rules_mod_lock); 187 list_del_rcu(&ops->list); 188 spin_unlock(&net->rules_mod_lock); 189 190 fib_rules_cleanup_ops(ops); 191 kfree_rcu(ops, rcu); 192 } 193 EXPORT_SYMBOL_GPL(fib_rules_unregister); 194 195 static int uid_range_set(struct fib_kuid_range *range) 196 { 197 return uid_valid(range->start) && uid_valid(range->end); 198 } 199 200 static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb) 201 { 202 struct fib_rule_uid_range *in; 203 struct fib_kuid_range out; 204 205 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]); 206 207 out.start = make_kuid(current_user_ns(), in->start); 208 out.end = make_kuid(current_user_ns(), in->end); 209 210 return out; 211 } 212 213 static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range) 214 { 215 struct fib_rule_uid_range out = { 216 from_kuid_munged(current_user_ns(), range->start), 217 from_kuid_munged(current_user_ns(), range->end) 218 }; 219 220 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out); 221 } 222 223 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 224 struct flowi *fl, int flags, 225 struct fib_lookup_arg *arg) 226 { 227 int ret = 0; 228 229 if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) 230 goto out; 231 232 if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) 233 goto out; 234 235 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) 236 goto out; 237 238 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) 239 goto out; 240 241 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg)) 242 goto out; 243 244 if (uid_lt(fl->flowi_uid, rule->uid_range.start) || 245 uid_gt(fl->flowi_uid, rule->uid_range.end)) 246 goto out; 247 248 ret = ops->match(rule, fl, flags); 249 out: 250 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 251 } 252 253 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 254 int flags, struct fib_lookup_arg *arg) 255 { 256 struct fib_rule *rule; 257 int err; 258 259 rcu_read_lock(); 260 261 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 262 jumped: 263 if (!fib_rule_match(rule, ops, fl, flags, arg)) 264 continue; 265 266 if (rule->action == FR_ACT_GOTO) { 267 struct fib_rule *target; 268 269 target = rcu_dereference(rule->ctarget); 270 if (target == NULL) { 271 continue; 272 } else { 273 rule = target; 274 goto jumped; 275 } 276 } else if (rule->action == FR_ACT_NOP) 277 continue; 278 else 279 err = ops->action(rule, fl, flags, arg); 280 281 if (!err && ops->suppress && ops->suppress(rule, arg)) 282 continue; 283 284 if (err != -EAGAIN) { 285 if ((arg->flags & FIB_LOOKUP_NOREF) || 286 likely(atomic_inc_not_zero(&rule->refcnt))) { 287 arg->rule = rule; 288 goto out; 289 } 290 break; 291 } 292 } 293 294 err = -ESRCH; 295 out: 296 rcu_read_unlock(); 297 298 return err; 299 } 300 EXPORT_SYMBOL_GPL(fib_rules_lookup); 301 302 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, 303 struct fib_rules_ops *ops) 304 { 305 int err = -EINVAL; 306 307 if (frh->src_len) 308 if (tb[FRA_SRC] == NULL || 309 frh->src_len > (ops->addr_size * 8) || 310 nla_len(tb[FRA_SRC]) != ops->addr_size) 311 goto errout; 312 313 if (frh->dst_len) 314 if (tb[FRA_DST] == NULL || 315 frh->dst_len > (ops->addr_size * 8) || 316 nla_len(tb[FRA_DST]) != ops->addr_size) 317 goto errout; 318 319 err = 0; 320 errout: 321 return err; 322 } 323 324 static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, 325 struct nlattr **tb, struct fib_rule *rule) 326 { 327 struct fib_rule *r; 328 329 list_for_each_entry(r, &ops->rules_list, list) { 330 if (r->action != rule->action) 331 continue; 332 333 if (r->table != rule->table) 334 continue; 335 336 if (r->pref != rule->pref) 337 continue; 338 339 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) 340 continue; 341 342 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) 343 continue; 344 345 if (r->mark != rule->mark) 346 continue; 347 348 if (r->mark_mask != rule->mark_mask) 349 continue; 350 351 if (r->tun_id != rule->tun_id) 352 continue; 353 354 if (r->fr_net != rule->fr_net) 355 continue; 356 357 if (r->l3mdev != rule->l3mdev) 358 continue; 359 360 if (!uid_eq(r->uid_range.start, rule->uid_range.start) || 361 !uid_eq(r->uid_range.end, rule->uid_range.end)) 362 continue; 363 364 if (!ops->compare(r, frh, tb)) 365 continue; 366 return 1; 367 } 368 return 0; 369 } 370 371 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) 372 { 373 struct net *net = sock_net(skb->sk); 374 struct fib_rule_hdr *frh = nlmsg_data(nlh); 375 struct fib_rules_ops *ops = NULL; 376 struct fib_rule *rule, *r, *last = NULL; 377 struct nlattr *tb[FRA_MAX+1]; 378 int err = -EINVAL, unresolved = 0; 379 380 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 381 goto errout; 382 383 ops = lookup_rules_ops(net, frh->family); 384 if (ops == NULL) { 385 err = -EAFNOSUPPORT; 386 goto errout; 387 } 388 389 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 390 if (err < 0) 391 goto errout; 392 393 err = validate_rulemsg(frh, tb, ops); 394 if (err < 0) 395 goto errout; 396 397 rule = kzalloc(ops->rule_size, GFP_KERNEL); 398 if (rule == NULL) { 399 err = -ENOMEM; 400 goto errout; 401 } 402 rule->fr_net = net; 403 404 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY]) 405 : fib_default_rule_pref(ops); 406 407 if (tb[FRA_IIFNAME]) { 408 struct net_device *dev; 409 410 rule->iifindex = -1; 411 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 412 dev = __dev_get_by_name(net, rule->iifname); 413 if (dev) 414 rule->iifindex = dev->ifindex; 415 } 416 417 if (tb[FRA_OIFNAME]) { 418 struct net_device *dev; 419 420 rule->oifindex = -1; 421 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 422 dev = __dev_get_by_name(net, rule->oifname); 423 if (dev) 424 rule->oifindex = dev->ifindex; 425 } 426 427 if (tb[FRA_FWMARK]) { 428 rule->mark = nla_get_u32(tb[FRA_FWMARK]); 429 if (rule->mark) 430 /* compatibility: if the mark value is non-zero all bits 431 * are compared unless a mask is explicitly specified. 432 */ 433 rule->mark_mask = 0xFFFFFFFF; 434 } 435 436 if (tb[FRA_FWMASK]) 437 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 438 439 if (tb[FRA_TUN_ID]) 440 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); 441 442 if (tb[FRA_L3MDEV]) { 443 #ifdef CONFIG_NET_L3_MASTER_DEV 444 rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]); 445 if (rule->l3mdev != 1) 446 #endif 447 goto errout_free; 448 } 449 450 rule->action = frh->action; 451 rule->flags = frh->flags; 452 rule->table = frh_get_table(frh, tb); 453 if (tb[FRA_SUPPRESS_PREFIXLEN]) 454 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); 455 else 456 rule->suppress_prefixlen = -1; 457 458 if (tb[FRA_SUPPRESS_IFGROUP]) 459 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); 460 else 461 rule->suppress_ifgroup = -1; 462 463 err = -EINVAL; 464 if (tb[FRA_GOTO]) { 465 if (rule->action != FR_ACT_GOTO) 466 goto errout_free; 467 468 rule->target = nla_get_u32(tb[FRA_GOTO]); 469 /* Backward jumps are prohibited to avoid endless loops */ 470 if (rule->target <= rule->pref) 471 goto errout_free; 472 473 list_for_each_entry(r, &ops->rules_list, list) { 474 if (r->pref == rule->target) { 475 RCU_INIT_POINTER(rule->ctarget, r); 476 break; 477 } 478 } 479 480 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 481 unresolved = 1; 482 } else if (rule->action == FR_ACT_GOTO) 483 goto errout_free; 484 485 if (rule->l3mdev && rule->table) 486 goto errout_free; 487 488 if (tb[FRA_UID_RANGE]) { 489 if (current_user_ns() != net->user_ns) { 490 err = -EPERM; 491 goto errout_free; 492 } 493 494 rule->uid_range = nla_get_kuid_range(tb); 495 496 if (!uid_range_set(&rule->uid_range) || 497 !uid_lte(rule->uid_range.start, rule->uid_range.end)) 498 goto errout_free; 499 } else { 500 rule->uid_range = fib_kuid_range_unset; 501 } 502 503 if ((nlh->nlmsg_flags & NLM_F_EXCL) && 504 rule_exists(ops, frh, tb, rule)) { 505 err = -EEXIST; 506 goto errout_free; 507 } 508 509 err = ops->configure(rule, skb, frh, tb); 510 if (err < 0) 511 goto errout_free; 512 513 list_for_each_entry(r, &ops->rules_list, list) { 514 if (r->pref > rule->pref) 515 break; 516 last = r; 517 } 518 519 fib_rule_get(rule); 520 521 if (last) 522 list_add_rcu(&rule->list, &last->list); 523 else 524 list_add_rcu(&rule->list, &ops->rules_list); 525 526 if (ops->unresolved_rules) { 527 /* 528 * There are unresolved goto rules in the list, check if 529 * any of them are pointing to this new rule. 530 */ 531 list_for_each_entry(r, &ops->rules_list, list) { 532 if (r->action == FR_ACT_GOTO && 533 r->target == rule->pref && 534 rtnl_dereference(r->ctarget) == NULL) { 535 rcu_assign_pointer(r->ctarget, rule); 536 if (--ops->unresolved_rules == 0) 537 break; 538 } 539 } 540 } 541 542 if (rule->action == FR_ACT_GOTO) 543 ops->nr_goto_rules++; 544 545 if (unresolved) 546 ops->unresolved_rules++; 547 548 if (rule->tun_id) 549 ip_tunnel_need_metadata(); 550 551 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 552 flush_route_cache(ops); 553 rules_ops_put(ops); 554 return 0; 555 556 errout_free: 557 kfree(rule); 558 errout: 559 rules_ops_put(ops); 560 return err; 561 } 562 EXPORT_SYMBOL_GPL(fib_nl_newrule); 563 564 int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh) 565 { 566 struct net *net = sock_net(skb->sk); 567 struct fib_rule_hdr *frh = nlmsg_data(nlh); 568 struct fib_rules_ops *ops = NULL; 569 struct fib_rule *rule, *tmp; 570 struct nlattr *tb[FRA_MAX+1]; 571 struct fib_kuid_range range; 572 int err = -EINVAL; 573 574 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 575 goto errout; 576 577 ops = lookup_rules_ops(net, frh->family); 578 if (ops == NULL) { 579 err = -EAFNOSUPPORT; 580 goto errout; 581 } 582 583 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 584 if (err < 0) 585 goto errout; 586 587 err = validate_rulemsg(frh, tb, ops); 588 if (err < 0) 589 goto errout; 590 591 if (tb[FRA_UID_RANGE]) { 592 range = nla_get_kuid_range(tb); 593 if (!uid_range_set(&range)) 594 goto errout; 595 } else { 596 range = fib_kuid_range_unset; 597 } 598 599 list_for_each_entry(rule, &ops->rules_list, list) { 600 if (frh->action && (frh->action != rule->action)) 601 continue; 602 603 if (frh_get_table(frh, tb) && 604 (frh_get_table(frh, tb) != rule->table)) 605 continue; 606 607 if (tb[FRA_PRIORITY] && 608 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) 609 continue; 610 611 if (tb[FRA_IIFNAME] && 612 nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) 613 continue; 614 615 if (tb[FRA_OIFNAME] && 616 nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) 617 continue; 618 619 if (tb[FRA_FWMARK] && 620 (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) 621 continue; 622 623 if (tb[FRA_FWMASK] && 624 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) 625 continue; 626 627 if (tb[FRA_TUN_ID] && 628 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID]))) 629 continue; 630 631 if (tb[FRA_L3MDEV] && 632 (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV]))) 633 continue; 634 635 if (uid_range_set(&range) && 636 (!uid_eq(rule->uid_range.start, range.start) || 637 !uid_eq(rule->uid_range.end, range.end))) 638 continue; 639 640 if (!ops->compare(rule, frh, tb)) 641 continue; 642 643 if (rule->flags & FIB_RULE_PERMANENT) { 644 err = -EPERM; 645 goto errout; 646 } 647 648 if (ops->delete) { 649 err = ops->delete(rule); 650 if (err) 651 goto errout; 652 } 653 654 if (rule->tun_id) 655 ip_tunnel_unneed_metadata(); 656 657 list_del_rcu(&rule->list); 658 659 if (rule->action == FR_ACT_GOTO) { 660 ops->nr_goto_rules--; 661 if (rtnl_dereference(rule->ctarget) == NULL) 662 ops->unresolved_rules--; 663 } 664 665 /* 666 * Check if this rule is a target to any of them. If so, 667 * disable them. As this operation is eventually very 668 * expensive, it is only performed if goto rules have 669 * actually been added. 670 */ 671 if (ops->nr_goto_rules > 0) { 672 list_for_each_entry(tmp, &ops->rules_list, list) { 673 if (rtnl_dereference(tmp->ctarget) == rule) { 674 RCU_INIT_POINTER(tmp->ctarget, NULL); 675 ops->unresolved_rules++; 676 } 677 } 678 } 679 680 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 681 NETLINK_CB(skb).portid); 682 fib_rule_put(rule); 683 flush_route_cache(ops); 684 rules_ops_put(ops); 685 return 0; 686 } 687 688 err = -ENOENT; 689 errout: 690 rules_ops_put(ops); 691 return err; 692 } 693 EXPORT_SYMBOL_GPL(fib_nl_delrule); 694 695 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 696 struct fib_rule *rule) 697 { 698 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 699 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 700 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 701 + nla_total_size(4) /* FRA_PRIORITY */ 702 + nla_total_size(4) /* FRA_TABLE */ 703 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ 704 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 705 + nla_total_size(4) /* FRA_FWMARK */ 706 + nla_total_size(4) /* FRA_FWMASK */ 707 + nla_total_size_64bit(8) /* FRA_TUN_ID */ 708 + nla_total_size(sizeof(struct fib_kuid_range)); 709 710 if (ops->nlmsg_payload) 711 payload += ops->nlmsg_payload(rule); 712 713 return payload; 714 } 715 716 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 717 u32 pid, u32 seq, int type, int flags, 718 struct fib_rules_ops *ops) 719 { 720 struct nlmsghdr *nlh; 721 struct fib_rule_hdr *frh; 722 723 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 724 if (nlh == NULL) 725 return -EMSGSIZE; 726 727 frh = nlmsg_data(nlh); 728 frh->family = ops->family; 729 frh->table = rule->table; 730 if (nla_put_u32(skb, FRA_TABLE, rule->table)) 731 goto nla_put_failure; 732 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) 733 goto nla_put_failure; 734 frh->res1 = 0; 735 frh->res2 = 0; 736 frh->action = rule->action; 737 frh->flags = rule->flags; 738 739 if (rule->action == FR_ACT_GOTO && 740 rcu_access_pointer(rule->ctarget) == NULL) 741 frh->flags |= FIB_RULE_UNRESOLVED; 742 743 if (rule->iifname[0]) { 744 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) 745 goto nla_put_failure; 746 if (rule->iifindex == -1) 747 frh->flags |= FIB_RULE_IIF_DETACHED; 748 } 749 750 if (rule->oifname[0]) { 751 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) 752 goto nla_put_failure; 753 if (rule->oifindex == -1) 754 frh->flags |= FIB_RULE_OIF_DETACHED; 755 } 756 757 if ((rule->pref && 758 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || 759 (rule->mark && 760 nla_put_u32(skb, FRA_FWMARK, rule->mark)) || 761 ((rule->mark_mask || rule->mark) && 762 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || 763 (rule->target && 764 nla_put_u32(skb, FRA_GOTO, rule->target)) || 765 (rule->tun_id && 766 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) || 767 (rule->l3mdev && 768 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) || 769 (uid_range_set(&rule->uid_range) && 770 nla_put_uid_range(skb, &rule->uid_range))) 771 goto nla_put_failure; 772 773 if (rule->suppress_ifgroup != -1) { 774 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) 775 goto nla_put_failure; 776 } 777 778 if (ops->fill(rule, skb, frh) < 0) 779 goto nla_put_failure; 780 781 nlmsg_end(skb, nlh); 782 return 0; 783 784 nla_put_failure: 785 nlmsg_cancel(skb, nlh); 786 return -EMSGSIZE; 787 } 788 789 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 790 struct fib_rules_ops *ops) 791 { 792 int idx = 0; 793 struct fib_rule *rule; 794 int err = 0; 795 796 rcu_read_lock(); 797 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 798 if (idx < cb->args[1]) 799 goto skip; 800 801 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 802 cb->nlh->nlmsg_seq, RTM_NEWRULE, 803 NLM_F_MULTI, ops); 804 if (err) 805 break; 806 skip: 807 idx++; 808 } 809 rcu_read_unlock(); 810 cb->args[1] = idx; 811 rules_ops_put(ops); 812 813 return err; 814 } 815 816 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 817 { 818 struct net *net = sock_net(skb->sk); 819 struct fib_rules_ops *ops; 820 int idx = 0, family; 821 822 family = rtnl_msg_family(cb->nlh); 823 if (family != AF_UNSPEC) { 824 /* Protocol specific dump request */ 825 ops = lookup_rules_ops(net, family); 826 if (ops == NULL) 827 return -EAFNOSUPPORT; 828 829 dump_rules(skb, cb, ops); 830 831 return skb->len; 832 } 833 834 rcu_read_lock(); 835 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 836 if (idx < cb->args[0] || !try_module_get(ops->owner)) 837 goto skip; 838 839 if (dump_rules(skb, cb, ops) < 0) 840 break; 841 842 cb->args[1] = 0; 843 skip: 844 idx++; 845 } 846 rcu_read_unlock(); 847 cb->args[0] = idx; 848 849 return skb->len; 850 } 851 852 static void notify_rule_change(int event, struct fib_rule *rule, 853 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 854 u32 pid) 855 { 856 struct net *net; 857 struct sk_buff *skb; 858 int err = -ENOBUFS; 859 860 net = ops->fro_net; 861 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 862 if (skb == NULL) 863 goto errout; 864 865 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 866 if (err < 0) { 867 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 868 WARN_ON(err == -EMSGSIZE); 869 kfree_skb(skb); 870 goto errout; 871 } 872 873 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 874 return; 875 errout: 876 if (err < 0) 877 rtnl_set_sk_err(net, ops->nlgroup, err); 878 } 879 880 static void attach_rules(struct list_head *rules, struct net_device *dev) 881 { 882 struct fib_rule *rule; 883 884 list_for_each_entry(rule, rules, list) { 885 if (rule->iifindex == -1 && 886 strcmp(dev->name, rule->iifname) == 0) 887 rule->iifindex = dev->ifindex; 888 if (rule->oifindex == -1 && 889 strcmp(dev->name, rule->oifname) == 0) 890 rule->oifindex = dev->ifindex; 891 } 892 } 893 894 static void detach_rules(struct list_head *rules, struct net_device *dev) 895 { 896 struct fib_rule *rule; 897 898 list_for_each_entry(rule, rules, list) { 899 if (rule->iifindex == dev->ifindex) 900 rule->iifindex = -1; 901 if (rule->oifindex == dev->ifindex) 902 rule->oifindex = -1; 903 } 904 } 905 906 907 static int fib_rules_event(struct notifier_block *this, unsigned long event, 908 void *ptr) 909 { 910 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 911 struct net *net = dev_net(dev); 912 struct fib_rules_ops *ops; 913 914 ASSERT_RTNL(); 915 916 switch (event) { 917 case NETDEV_REGISTER: 918 list_for_each_entry(ops, &net->rules_ops, list) 919 attach_rules(&ops->rules_list, dev); 920 break; 921 922 case NETDEV_CHANGENAME: 923 list_for_each_entry(ops, &net->rules_ops, list) { 924 detach_rules(&ops->rules_list, dev); 925 attach_rules(&ops->rules_list, dev); 926 } 927 break; 928 929 case NETDEV_UNREGISTER: 930 list_for_each_entry(ops, &net->rules_ops, list) 931 detach_rules(&ops->rules_list, dev); 932 break; 933 } 934 935 return NOTIFY_DONE; 936 } 937 938 static struct notifier_block fib_rules_notifier = { 939 .notifier_call = fib_rules_event, 940 }; 941 942 static int __net_init fib_rules_net_init(struct net *net) 943 { 944 INIT_LIST_HEAD(&net->rules_ops); 945 spin_lock_init(&net->rules_mod_lock); 946 return 0; 947 } 948 949 static struct pernet_operations fib_rules_net_ops = { 950 .init = fib_rules_net_init, 951 }; 952 953 static int __init fib_rules_init(void) 954 { 955 int err; 956 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); 957 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); 958 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); 959 960 err = register_pernet_subsys(&fib_rules_net_ops); 961 if (err < 0) 962 goto fail; 963 964 err = register_netdevice_notifier(&fib_rules_notifier); 965 if (err < 0) 966 goto fail_unregister; 967 968 return 0; 969 970 fail_unregister: 971 unregister_pernet_subsys(&fib_rules_net_ops); 972 fail: 973 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); 974 rtnl_unregister(PF_UNSPEC, RTM_DELRULE); 975 rtnl_unregister(PF_UNSPEC, RTM_GETRULE); 976 return err; 977 } 978 979 subsys_initcall(fib_rules_init); 980