1 /* 2 * Linux INET6 implementation 3 * Forwarding Information Database 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 * 13 * Changes: 14 * Yuji SEKIYA @USAGI: Support default route on router node; 15 * remove ip6_null_entry from the top of 16 * routing table. 17 * Ville Nuorvala: Fixed routing subtrees. 18 */ 19 20 #define pr_fmt(fmt) "IPv6: " fmt 21 22 #include <linux/errno.h> 23 #include <linux/types.h> 24 #include <linux/net.h> 25 #include <linux/route.h> 26 #include <linux/netdevice.h> 27 #include <linux/in6.h> 28 #include <linux/init.h> 29 #include <linux/list.h> 30 #include <linux/slab.h> 31 32 #include <net/ipv6.h> 33 #include <net/ndisc.h> 34 #include <net/addrconf.h> 35 #include <net/lwtunnel.h> 36 37 #include <net/ip6_fib.h> 38 #include <net/ip6_route.h> 39 40 #define RT6_DEBUG 2 41 42 #if RT6_DEBUG >= 3 43 #define RT6_TRACE(x...) pr_debug(x) 44 #else 45 #define RT6_TRACE(x...) do { ; } while (0) 46 #endif 47 48 static struct kmem_cache *fib6_node_kmem __read_mostly; 49 50 struct fib6_cleaner { 51 struct fib6_walker w; 52 struct net *net; 53 int (*func)(struct rt6_info *, void *arg); 54 int sernum; 55 void *arg; 56 }; 57 58 #ifdef CONFIG_IPV6_SUBTREES 59 #define FWS_INIT FWS_S 60 #else 61 #define FWS_INIT FWS_L 62 #endif 63 64 static void fib6_prune_clones(struct net *net, struct fib6_node *fn); 65 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); 66 static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); 67 static int fib6_walk(struct net *net, struct fib6_walker *w); 68 static int fib6_walk_continue(struct fib6_walker *w); 69 70 /* 71 * A routing update causes an increase of the serial number on the 72 * affected subtree. This allows for cached routes to be asynchronously 73 * tested when modifications are made to the destination cache as a 74 * result of redirects, path MTU changes, etc. 75 */ 76 77 static void fib6_gc_timer_cb(unsigned long arg); 78 79 #define FOR_WALKERS(net, w) \ 80 list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh) 81 82 static void fib6_walker_link(struct net *net, struct fib6_walker *w) 83 { 84 write_lock_bh(&net->ipv6.fib6_walker_lock); 85 list_add(&w->lh, &net->ipv6.fib6_walkers); 86 write_unlock_bh(&net->ipv6.fib6_walker_lock); 87 } 88 89 static void fib6_walker_unlink(struct net *net, struct fib6_walker *w) 90 { 91 write_lock_bh(&net->ipv6.fib6_walker_lock); 92 list_del(&w->lh); 93 write_unlock_bh(&net->ipv6.fib6_walker_lock); 94 } 95 96 static int fib6_new_sernum(struct net *net) 97 { 98 int new, old; 99 100 do { 101 old = atomic_read(&net->ipv6.fib6_sernum); 102 new = old < INT_MAX ? old + 1 : 1; 103 } while (atomic_cmpxchg(&net->ipv6.fib6_sernum, 104 old, new) != old); 105 return new; 106 } 107 108 enum { 109 FIB6_NO_SERNUM_CHANGE = 0, 110 }; 111 112 /* 113 * Auxiliary address test functions for the radix tree. 114 * 115 * These assume a 32bit processor (although it will work on 116 * 64bit processors) 117 */ 118 119 /* 120 * test bit 121 */ 122 #if defined(__LITTLE_ENDIAN) 123 # define BITOP_BE32_SWIZZLE (0x1F & ~7) 124 #else 125 # define BITOP_BE32_SWIZZLE 0 126 #endif 127 128 static __be32 addr_bit_set(const void *token, int fn_bit) 129 { 130 const __be32 *addr = token; 131 /* 132 * Here, 133 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) 134 * is optimized version of 135 * htonl(1 << ((~fn_bit)&0x1F)) 136 * See include/asm-generic/bitops/le.h. 137 */ 138 return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & 139 addr[fn_bit >> 5]; 140 } 141 142 static struct fib6_node *node_alloc(void) 143 { 144 struct fib6_node *fn; 145 146 fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); 147 148 return fn; 149 } 150 151 static void node_free(struct fib6_node *fn) 152 { 153 kmem_cache_free(fib6_node_kmem, fn); 154 } 155 156 static void rt6_rcu_free(struct rt6_info *rt) 157 { 158 call_rcu(&rt->dst.rcu_head, dst_rcu_free); 159 } 160 161 static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) 162 { 163 int cpu; 164 165 if (!non_pcpu_rt->rt6i_pcpu) 166 return; 167 168 for_each_possible_cpu(cpu) { 169 struct rt6_info **ppcpu_rt; 170 struct rt6_info *pcpu_rt; 171 172 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); 173 pcpu_rt = *ppcpu_rt; 174 if (pcpu_rt) { 175 rt6_rcu_free(pcpu_rt); 176 *ppcpu_rt = NULL; 177 } 178 } 179 180 free_percpu(non_pcpu_rt->rt6i_pcpu); 181 non_pcpu_rt->rt6i_pcpu = NULL; 182 } 183 184 static void rt6_release(struct rt6_info *rt) 185 { 186 if (atomic_dec_and_test(&rt->rt6i_ref)) { 187 rt6_free_pcpu(rt); 188 rt6_rcu_free(rt); 189 } 190 } 191 192 static void fib6_link_table(struct net *net, struct fib6_table *tb) 193 { 194 unsigned int h; 195 196 /* 197 * Initialize table lock at a single place to give lockdep a key, 198 * tables aren't visible prior to being linked to the list. 199 */ 200 rwlock_init(&tb->tb6_lock); 201 202 h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); 203 204 /* 205 * No protection necessary, this is the only list mutatation 206 * operation, tables never disappear once they exist. 207 */ 208 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); 209 } 210 211 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 212 213 static struct fib6_table *fib6_alloc_table(struct net *net, u32 id) 214 { 215 struct fib6_table *table; 216 217 table = kzalloc(sizeof(*table), GFP_ATOMIC); 218 if (table) { 219 table->tb6_id = id; 220 table->tb6_root.leaf = net->ipv6.ip6_null_entry; 221 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 222 inet_peer_base_init(&table->tb6_peers); 223 } 224 225 return table; 226 } 227 228 struct fib6_table *fib6_new_table(struct net *net, u32 id) 229 { 230 struct fib6_table *tb; 231 232 if (id == 0) 233 id = RT6_TABLE_MAIN; 234 tb = fib6_get_table(net, id); 235 if (tb) 236 return tb; 237 238 tb = fib6_alloc_table(net, id); 239 if (tb) 240 fib6_link_table(net, tb); 241 242 return tb; 243 } 244 EXPORT_SYMBOL_GPL(fib6_new_table); 245 246 struct fib6_table *fib6_get_table(struct net *net, u32 id) 247 { 248 struct fib6_table *tb; 249 struct hlist_head *head; 250 unsigned int h; 251 252 if (id == 0) 253 id = RT6_TABLE_MAIN; 254 h = id & (FIB6_TABLE_HASHSZ - 1); 255 rcu_read_lock(); 256 head = &net->ipv6.fib_table_hash[h]; 257 hlist_for_each_entry_rcu(tb, head, tb6_hlist) { 258 if (tb->tb6_id == id) { 259 rcu_read_unlock(); 260 return tb; 261 } 262 } 263 rcu_read_unlock(); 264 265 return NULL; 266 } 267 EXPORT_SYMBOL_GPL(fib6_get_table); 268 269 static void __net_init fib6_tables_init(struct net *net) 270 { 271 fib6_link_table(net, net->ipv6.fib6_main_tbl); 272 fib6_link_table(net, net->ipv6.fib6_local_tbl); 273 } 274 #else 275 276 struct fib6_table *fib6_new_table(struct net *net, u32 id) 277 { 278 return fib6_get_table(net, id); 279 } 280 281 struct fib6_table *fib6_get_table(struct net *net, u32 id) 282 { 283 return net->ipv6.fib6_main_tbl; 284 } 285 286 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 287 int flags, pol_lookup_t lookup) 288 { 289 struct rt6_info *rt; 290 291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 292 if (rt->rt6i_flags & RTF_REJECT && 293 rt->dst.error == -EAGAIN) { 294 ip6_rt_put(rt); 295 rt = net->ipv6.ip6_null_entry; 296 dst_hold(&rt->dst); 297 } 298 299 return &rt->dst; 300 } 301 302 static void __net_init fib6_tables_init(struct net *net) 303 { 304 fib6_link_table(net, net->ipv6.fib6_main_tbl); 305 } 306 307 #endif 308 309 static int fib6_dump_node(struct fib6_walker *w) 310 { 311 int res; 312 struct rt6_info *rt; 313 314 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { 315 res = rt6_dump_route(rt, w->args); 316 if (res < 0) { 317 /* Frame is full, suspend walking */ 318 w->leaf = rt; 319 return 1; 320 } 321 } 322 w->leaf = NULL; 323 return 0; 324 } 325 326 static void fib6_dump_end(struct netlink_callback *cb) 327 { 328 struct net *net = sock_net(cb->skb->sk); 329 struct fib6_walker *w = (void *)cb->args[2]; 330 331 if (w) { 332 if (cb->args[4]) { 333 cb->args[4] = 0; 334 fib6_walker_unlink(net, w); 335 } 336 cb->args[2] = 0; 337 kfree(w); 338 } 339 cb->done = (void *)cb->args[3]; 340 cb->args[1] = 3; 341 } 342 343 static int fib6_dump_done(struct netlink_callback *cb) 344 { 345 fib6_dump_end(cb); 346 return cb->done ? cb->done(cb) : 0; 347 } 348 349 static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, 350 struct netlink_callback *cb) 351 { 352 struct net *net = sock_net(skb->sk); 353 struct fib6_walker *w; 354 int res; 355 356 w = (void *)cb->args[2]; 357 w->root = &table->tb6_root; 358 359 if (cb->args[4] == 0) { 360 w->count = 0; 361 w->skip = 0; 362 363 read_lock_bh(&table->tb6_lock); 364 res = fib6_walk(net, w); 365 read_unlock_bh(&table->tb6_lock); 366 if (res > 0) { 367 cb->args[4] = 1; 368 cb->args[5] = w->root->fn_sernum; 369 } 370 } else { 371 if (cb->args[5] != w->root->fn_sernum) { 372 /* Begin at the root if the tree changed */ 373 cb->args[5] = w->root->fn_sernum; 374 w->state = FWS_INIT; 375 w->node = w->root; 376 w->skip = w->count; 377 } else 378 w->skip = 0; 379 380 read_lock_bh(&table->tb6_lock); 381 res = fib6_walk_continue(w); 382 read_unlock_bh(&table->tb6_lock); 383 if (res <= 0) { 384 fib6_walker_unlink(net, w); 385 cb->args[4] = 0; 386 } 387 } 388 389 return res; 390 } 391 392 static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) 393 { 394 struct net *net = sock_net(skb->sk); 395 unsigned int h, s_h; 396 unsigned int e = 0, s_e; 397 struct rt6_rtnl_dump_arg arg; 398 struct fib6_walker *w; 399 struct fib6_table *tb; 400 struct hlist_head *head; 401 int res = 0; 402 403 s_h = cb->args[0]; 404 s_e = cb->args[1]; 405 406 w = (void *)cb->args[2]; 407 if (!w) { 408 /* New dump: 409 * 410 * 1. hook callback destructor. 411 */ 412 cb->args[3] = (long)cb->done; 413 cb->done = fib6_dump_done; 414 415 /* 416 * 2. allocate and initialize walker. 417 */ 418 w = kzalloc(sizeof(*w), GFP_ATOMIC); 419 if (!w) 420 return -ENOMEM; 421 w->func = fib6_dump_node; 422 cb->args[2] = (long)w; 423 } 424 425 arg.skb = skb; 426 arg.cb = cb; 427 arg.net = net; 428 w->args = &arg; 429 430 rcu_read_lock(); 431 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { 432 e = 0; 433 head = &net->ipv6.fib_table_hash[h]; 434 hlist_for_each_entry_rcu(tb, head, tb6_hlist) { 435 if (e < s_e) 436 goto next; 437 res = fib6_dump_table(tb, skb, cb); 438 if (res != 0) 439 goto out; 440 next: 441 e++; 442 } 443 } 444 out: 445 rcu_read_unlock(); 446 cb->args[1] = e; 447 cb->args[0] = h; 448 449 res = res < 0 ? res : skb->len; 450 if (res <= 0) 451 fib6_dump_end(cb); 452 return res; 453 } 454 455 /* 456 * Routing Table 457 * 458 * return the appropriate node for a routing tree "add" operation 459 * by either creating and inserting or by returning an existing 460 * node. 461 */ 462 463 static struct fib6_node *fib6_add_1(struct fib6_node *root, 464 struct in6_addr *addr, int plen, 465 int offset, int allow_create, 466 int replace_required, int sernum) 467 { 468 struct fib6_node *fn, *in, *ln; 469 struct fib6_node *pn = NULL; 470 struct rt6key *key; 471 int bit; 472 __be32 dir = 0; 473 474 RT6_TRACE("fib6_add_1\n"); 475 476 /* insert node in tree */ 477 478 fn = root; 479 480 do { 481 key = (struct rt6key *)((u8 *)fn->leaf + offset); 482 483 /* 484 * Prefix match 485 */ 486 if (plen < fn->fn_bit || 487 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { 488 if (!allow_create) { 489 if (replace_required) { 490 pr_warn("Can't replace route, no match found\n"); 491 return ERR_PTR(-ENOENT); 492 } 493 pr_warn("NLM_F_CREATE should be set when creating new route\n"); 494 } 495 goto insert_above; 496 } 497 498 /* 499 * Exact match ? 500 */ 501 502 if (plen == fn->fn_bit) { 503 /* clean up an intermediate node */ 504 if (!(fn->fn_flags & RTN_RTINFO)) { 505 rt6_release(fn->leaf); 506 fn->leaf = NULL; 507 } 508 509 fn->fn_sernum = sernum; 510 511 return fn; 512 } 513 514 /* 515 * We have more bits to go 516 */ 517 518 /* Try to walk down on tree. */ 519 fn->fn_sernum = sernum; 520 dir = addr_bit_set(addr, fn->fn_bit); 521 pn = fn; 522 fn = dir ? fn->right : fn->left; 523 } while (fn); 524 525 if (!allow_create) { 526 /* We should not create new node because 527 * NLM_F_REPLACE was specified without NLM_F_CREATE 528 * I assume it is safe to require NLM_F_CREATE when 529 * REPLACE flag is used! Later we may want to remove the 530 * check for replace_required, because according 531 * to netlink specification, NLM_F_CREATE 532 * MUST be specified if new route is created. 533 * That would keep IPv6 consistent with IPv4 534 */ 535 if (replace_required) { 536 pr_warn("Can't replace route, no match found\n"); 537 return ERR_PTR(-ENOENT); 538 } 539 pr_warn("NLM_F_CREATE should be set when creating new route\n"); 540 } 541 /* 542 * We walked to the bottom of tree. 543 * Create new leaf node without children. 544 */ 545 546 ln = node_alloc(); 547 548 if (!ln) 549 return ERR_PTR(-ENOMEM); 550 ln->fn_bit = plen; 551 552 ln->parent = pn; 553 ln->fn_sernum = sernum; 554 555 if (dir) 556 pn->right = ln; 557 else 558 pn->left = ln; 559 560 return ln; 561 562 563 insert_above: 564 /* 565 * split since we don't have a common prefix anymore or 566 * we have a less significant route. 567 * we've to insert an intermediate node on the list 568 * this new node will point to the one we need to create 569 * and the current 570 */ 571 572 pn = fn->parent; 573 574 /* find 1st bit in difference between the 2 addrs. 575 576 See comment in __ipv6_addr_diff: bit may be an invalid value, 577 but if it is >= plen, the value is ignored in any case. 578 */ 579 580 bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr)); 581 582 /* 583 * (intermediate)[in] 584 * / \ 585 * (new leaf node)[ln] (old node)[fn] 586 */ 587 if (plen > bit) { 588 in = node_alloc(); 589 ln = node_alloc(); 590 591 if (!in || !ln) { 592 if (in) 593 node_free(in); 594 if (ln) 595 node_free(ln); 596 return ERR_PTR(-ENOMEM); 597 } 598 599 /* 600 * new intermediate node. 601 * RTN_RTINFO will 602 * be off since that an address that chooses one of 603 * the branches would not match less specific routes 604 * in the other branch 605 */ 606 607 in->fn_bit = bit; 608 609 in->parent = pn; 610 in->leaf = fn->leaf; 611 atomic_inc(&in->leaf->rt6i_ref); 612 613 in->fn_sernum = sernum; 614 615 /* update parent pointer */ 616 if (dir) 617 pn->right = in; 618 else 619 pn->left = in; 620 621 ln->fn_bit = plen; 622 623 ln->parent = in; 624 fn->parent = in; 625 626 ln->fn_sernum = sernum; 627 628 if (addr_bit_set(addr, bit)) { 629 in->right = ln; 630 in->left = fn; 631 } else { 632 in->left = ln; 633 in->right = fn; 634 } 635 } else { /* plen <= bit */ 636 637 /* 638 * (new leaf node)[ln] 639 * / \ 640 * (old node)[fn] NULL 641 */ 642 643 ln = node_alloc(); 644 645 if (!ln) 646 return ERR_PTR(-ENOMEM); 647 648 ln->fn_bit = plen; 649 650 ln->parent = pn; 651 652 ln->fn_sernum = sernum; 653 654 if (dir) 655 pn->right = ln; 656 else 657 pn->left = ln; 658 659 if (addr_bit_set(&key->addr, plen)) 660 ln->right = fn; 661 else 662 ln->left = fn; 663 664 fn->parent = ln; 665 } 666 return ln; 667 } 668 669 static bool rt6_qualify_for_ecmp(struct rt6_info *rt) 670 { 671 return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == 672 RTF_GATEWAY; 673 } 674 675 static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc) 676 { 677 int i; 678 679 for (i = 0; i < RTAX_MAX; i++) { 680 if (test_bit(i, mxc->mx_valid)) 681 mp[i] = mxc->mx[i]; 682 } 683 } 684 685 static int fib6_commit_metrics(struct dst_entry *dst, struct mx6_config *mxc) 686 { 687 if (!mxc->mx) 688 return 0; 689 690 if (dst->flags & DST_HOST) { 691 u32 *mp = dst_metrics_write_ptr(dst); 692 693 if (unlikely(!mp)) 694 return -ENOMEM; 695 696 fib6_copy_metrics(mp, mxc); 697 } else { 698 dst_init_metrics(dst, mxc->mx, false); 699 700 /* We've stolen mx now. */ 701 mxc->mx = NULL; 702 } 703 704 return 0; 705 } 706 707 static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, 708 struct net *net) 709 { 710 if (atomic_read(&rt->rt6i_ref) != 1) { 711 /* This route is used as dummy address holder in some split 712 * nodes. It is not leaked, but it still holds other resources, 713 * which must be released in time. So, scan ascendant nodes 714 * and replace dummy references to this route with references 715 * to still alive ones. 716 */ 717 while (fn) { 718 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { 719 fn->leaf = fib6_find_prefix(net, fn); 720 atomic_inc(&fn->leaf->rt6i_ref); 721 rt6_release(rt); 722 } 723 fn = fn->parent; 724 } 725 /* No more references are possible at this point. */ 726 BUG_ON(atomic_read(&rt->rt6i_ref) != 1); 727 } 728 } 729 730 /* 731 * Insert routing information in a node. 732 */ 733 734 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, 735 struct nl_info *info, struct mx6_config *mxc) 736 { 737 struct rt6_info *iter = NULL; 738 struct rt6_info **ins; 739 struct rt6_info **fallback_ins = NULL; 740 int replace = (info->nlh && 741 (info->nlh->nlmsg_flags & NLM_F_REPLACE)); 742 int add = (!info->nlh || 743 (info->nlh->nlmsg_flags & NLM_F_CREATE)); 744 int found = 0; 745 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); 746 int err; 747 748 ins = &fn->leaf; 749 750 for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) { 751 /* 752 * Search for duplicates 753 */ 754 755 if (iter->rt6i_metric == rt->rt6i_metric) { 756 /* 757 * Same priority level 758 */ 759 if (info->nlh && 760 (info->nlh->nlmsg_flags & NLM_F_EXCL)) 761 return -EEXIST; 762 if (replace) { 763 if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { 764 found++; 765 break; 766 } 767 if (rt_can_ecmp) 768 fallback_ins = fallback_ins ?: ins; 769 goto next_iter; 770 } 771 772 if (iter->dst.dev == rt->dst.dev && 773 iter->rt6i_idev == rt->rt6i_idev && 774 ipv6_addr_equal(&iter->rt6i_gateway, 775 &rt->rt6i_gateway)) { 776 if (rt->rt6i_nsiblings) 777 rt->rt6i_nsiblings = 0; 778 if (!(iter->rt6i_flags & RTF_EXPIRES)) 779 return -EEXIST; 780 if (!(rt->rt6i_flags & RTF_EXPIRES)) 781 rt6_clean_expires(iter); 782 else 783 rt6_set_expires(iter, rt->dst.expires); 784 iter->rt6i_pmtu = rt->rt6i_pmtu; 785 return -EEXIST; 786 } 787 /* If we have the same destination and the same metric, 788 * but not the same gateway, then the route we try to 789 * add is sibling to this route, increment our counter 790 * of siblings, and later we will add our route to the 791 * list. 792 * Only static routes (which don't have flag 793 * RTF_EXPIRES) are used for ECMPv6. 794 * 795 * To avoid long list, we only had siblings if the 796 * route have a gateway. 797 */ 798 if (rt_can_ecmp && 799 rt6_qualify_for_ecmp(iter)) 800 rt->rt6i_nsiblings++; 801 } 802 803 if (iter->rt6i_metric > rt->rt6i_metric) 804 break; 805 806 next_iter: 807 ins = &iter->dst.rt6_next; 808 } 809 810 if (fallback_ins && !found) { 811 /* No ECMP-able route found, replace first non-ECMP one */ 812 ins = fallback_ins; 813 iter = *ins; 814 found++; 815 } 816 817 /* Reset round-robin state, if necessary */ 818 if (ins == &fn->leaf) 819 fn->rr_ptr = NULL; 820 821 /* Link this route to others same route. */ 822 if (rt->rt6i_nsiblings) { 823 unsigned int rt6i_nsiblings; 824 struct rt6_info *sibling, *temp_sibling; 825 826 /* Find the first route that have the same metric */ 827 sibling = fn->leaf; 828 while (sibling) { 829 if (sibling->rt6i_metric == rt->rt6i_metric && 830 rt6_qualify_for_ecmp(sibling)) { 831 list_add_tail(&rt->rt6i_siblings, 832 &sibling->rt6i_siblings); 833 break; 834 } 835 sibling = sibling->dst.rt6_next; 836 } 837 /* For each sibling in the list, increment the counter of 838 * siblings. BUG() if counters does not match, list of siblings 839 * is broken! 840 */ 841 rt6i_nsiblings = 0; 842 list_for_each_entry_safe(sibling, temp_sibling, 843 &rt->rt6i_siblings, rt6i_siblings) { 844 sibling->rt6i_nsiblings++; 845 BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings); 846 rt6i_nsiblings++; 847 } 848 BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings); 849 } 850 851 /* 852 * insert node 853 */ 854 if (!replace) { 855 if (!add) 856 pr_warn("NLM_F_CREATE should be set when creating new route\n"); 857 858 add: 859 err = fib6_commit_metrics(&rt->dst, mxc); 860 if (err) 861 return err; 862 863 rt->dst.rt6_next = iter; 864 *ins = rt; 865 rt->rt6i_node = fn; 866 atomic_inc(&rt->rt6i_ref); 867 inet6_rt_notify(RTM_NEWROUTE, rt, info, 0); 868 info->nl_net->ipv6.rt6_stats->fib_rt_entries++; 869 870 if (!(fn->fn_flags & RTN_RTINFO)) { 871 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 872 fn->fn_flags |= RTN_RTINFO; 873 } 874 875 } else { 876 int nsiblings; 877 878 if (!found) { 879 if (add) 880 goto add; 881 pr_warn("NLM_F_REPLACE set, but no existing node found!\n"); 882 return -ENOENT; 883 } 884 885 err = fib6_commit_metrics(&rt->dst, mxc); 886 if (err) 887 return err; 888 889 *ins = rt; 890 rt->rt6i_node = fn; 891 rt->dst.rt6_next = iter->dst.rt6_next; 892 atomic_inc(&rt->rt6i_ref); 893 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); 894 if (!(fn->fn_flags & RTN_RTINFO)) { 895 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 896 fn->fn_flags |= RTN_RTINFO; 897 } 898 nsiblings = iter->rt6i_nsiblings; 899 fib6_purge_rt(iter, fn, info->nl_net); 900 rt6_release(iter); 901 902 if (nsiblings) { 903 /* Replacing an ECMP route, remove all siblings */ 904 ins = &rt->dst.rt6_next; 905 iter = *ins; 906 while (iter) { 907 if (rt6_qualify_for_ecmp(iter)) { 908 *ins = iter->dst.rt6_next; 909 fib6_purge_rt(iter, fn, info->nl_net); 910 rt6_release(iter); 911 nsiblings--; 912 } else { 913 ins = &iter->dst.rt6_next; 914 } 915 iter = *ins; 916 } 917 WARN_ON(nsiblings != 0); 918 } 919 } 920 921 return 0; 922 } 923 924 static void fib6_start_gc(struct net *net, struct rt6_info *rt) 925 { 926 if (!timer_pending(&net->ipv6.ip6_fib_timer) && 927 (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE))) 928 mod_timer(&net->ipv6.ip6_fib_timer, 929 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); 930 } 931 932 void fib6_force_start_gc(struct net *net) 933 { 934 if (!timer_pending(&net->ipv6.ip6_fib_timer)) 935 mod_timer(&net->ipv6.ip6_fib_timer, 936 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); 937 } 938 939 /* 940 * Add routing information to the routing tree. 941 * <destination addr>/<source addr> 942 * with source addr info in sub-trees 943 */ 944 945 int fib6_add(struct fib6_node *root, struct rt6_info *rt, 946 struct nl_info *info, struct mx6_config *mxc) 947 { 948 struct fib6_node *fn, *pn = NULL; 949 int err = -ENOMEM; 950 int allow_create = 1; 951 int replace_required = 0; 952 int sernum = fib6_new_sernum(info->nl_net); 953 954 if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) && 955 !atomic_read(&rt->dst.__refcnt))) 956 return -EINVAL; 957 958 if (info->nlh) { 959 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) 960 allow_create = 0; 961 if (info->nlh->nlmsg_flags & NLM_F_REPLACE) 962 replace_required = 1; 963 } 964 if (!allow_create && !replace_required) 965 pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); 966 967 fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen, 968 offsetof(struct rt6_info, rt6i_dst), allow_create, 969 replace_required, sernum); 970 if (IS_ERR(fn)) { 971 err = PTR_ERR(fn); 972 fn = NULL; 973 goto out; 974 } 975 976 pn = fn; 977 978 #ifdef CONFIG_IPV6_SUBTREES 979 if (rt->rt6i_src.plen) { 980 struct fib6_node *sn; 981 982 if (!fn->subtree) { 983 struct fib6_node *sfn; 984 985 /* 986 * Create subtree. 987 * 988 * fn[main tree] 989 * | 990 * sfn[subtree root] 991 * \ 992 * sn[new leaf node] 993 */ 994 995 /* Create subtree root node */ 996 sfn = node_alloc(); 997 if (!sfn) 998 goto st_failure; 999 1000 sfn->leaf = info->nl_net->ipv6.ip6_null_entry; 1001 atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); 1002 sfn->fn_flags = RTN_ROOT; 1003 sfn->fn_sernum = sernum; 1004 1005 /* Now add the first leaf node to new subtree */ 1006 1007 sn = fib6_add_1(sfn, &rt->rt6i_src.addr, 1008 rt->rt6i_src.plen, 1009 offsetof(struct rt6_info, rt6i_src), 1010 allow_create, replace_required, sernum); 1011 1012 if (IS_ERR(sn)) { 1013 /* If it is failed, discard just allocated 1014 root, and then (in st_failure) stale node 1015 in main tree. 1016 */ 1017 node_free(sfn); 1018 err = PTR_ERR(sn); 1019 goto st_failure; 1020 } 1021 1022 /* Now link new subtree to main tree */ 1023 sfn->parent = fn; 1024 fn->subtree = sfn; 1025 } else { 1026 sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, 1027 rt->rt6i_src.plen, 1028 offsetof(struct rt6_info, rt6i_src), 1029 allow_create, replace_required, sernum); 1030 1031 if (IS_ERR(sn)) { 1032 err = PTR_ERR(sn); 1033 goto st_failure; 1034 } 1035 } 1036 1037 if (!fn->leaf) { 1038 fn->leaf = rt; 1039 atomic_inc(&rt->rt6i_ref); 1040 } 1041 fn = sn; 1042 } 1043 #endif 1044 1045 err = fib6_add_rt2node(fn, rt, info, mxc); 1046 if (!err) { 1047 fib6_start_gc(info->nl_net, rt); 1048 if (!(rt->rt6i_flags & RTF_CACHE)) 1049 fib6_prune_clones(info->nl_net, pn); 1050 rt->dst.flags &= ~DST_NOCACHE; 1051 } 1052 1053 out: 1054 if (err) { 1055 #ifdef CONFIG_IPV6_SUBTREES 1056 /* 1057 * If fib6_add_1 has cleared the old leaf pointer in the 1058 * super-tree leaf node we have to find a new one for it. 1059 */ 1060 if (pn != fn && pn->leaf == rt) { 1061 pn->leaf = NULL; 1062 atomic_dec(&rt->rt6i_ref); 1063 } 1064 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { 1065 pn->leaf = fib6_find_prefix(info->nl_net, pn); 1066 #if RT6_DEBUG >= 2 1067 if (!pn->leaf) { 1068 WARN_ON(pn->leaf == NULL); 1069 pn->leaf = info->nl_net->ipv6.ip6_null_entry; 1070 } 1071 #endif 1072 atomic_inc(&pn->leaf->rt6i_ref); 1073 } 1074 #endif 1075 if (!(rt->dst.flags & DST_NOCACHE)) 1076 dst_free(&rt->dst); 1077 } 1078 return err; 1079 1080 #ifdef CONFIG_IPV6_SUBTREES 1081 /* Subtree creation failed, probably main tree node 1082 is orphan. If it is, shoot it. 1083 */ 1084 st_failure: 1085 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 1086 fib6_repair_tree(info->nl_net, fn); 1087 if (!(rt->dst.flags & DST_NOCACHE)) 1088 dst_free(&rt->dst); 1089 return err; 1090 #endif 1091 } 1092 1093 /* 1094 * Routing tree lookup 1095 * 1096 */ 1097 1098 struct lookup_args { 1099 int offset; /* key offset on rt6_info */ 1100 const struct in6_addr *addr; /* search key */ 1101 }; 1102 1103 static struct fib6_node *fib6_lookup_1(struct fib6_node *root, 1104 struct lookup_args *args) 1105 { 1106 struct fib6_node *fn; 1107 __be32 dir; 1108 1109 if (unlikely(args->offset == 0)) 1110 return NULL; 1111 1112 /* 1113 * Descend on a tree 1114 */ 1115 1116 fn = root; 1117 1118 for (;;) { 1119 struct fib6_node *next; 1120 1121 dir = addr_bit_set(args->addr, fn->fn_bit); 1122 1123 next = dir ? fn->right : fn->left; 1124 1125 if (next) { 1126 fn = next; 1127 continue; 1128 } 1129 break; 1130 } 1131 1132 while (fn) { 1133 if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) { 1134 struct rt6key *key; 1135 1136 key = (struct rt6key *) ((u8 *) fn->leaf + 1137 args->offset); 1138 1139 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { 1140 #ifdef CONFIG_IPV6_SUBTREES 1141 if (fn->subtree) { 1142 struct fib6_node *sfn; 1143 sfn = fib6_lookup_1(fn->subtree, 1144 args + 1); 1145 if (!sfn) 1146 goto backtrack; 1147 fn = sfn; 1148 } 1149 #endif 1150 if (fn->fn_flags & RTN_RTINFO) 1151 return fn; 1152 } 1153 } 1154 #ifdef CONFIG_IPV6_SUBTREES 1155 backtrack: 1156 #endif 1157 if (fn->fn_flags & RTN_ROOT) 1158 break; 1159 1160 fn = fn->parent; 1161 } 1162 1163 return NULL; 1164 } 1165 1166 struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr, 1167 const struct in6_addr *saddr) 1168 { 1169 struct fib6_node *fn; 1170 struct lookup_args args[] = { 1171 { 1172 .offset = offsetof(struct rt6_info, rt6i_dst), 1173 .addr = daddr, 1174 }, 1175 #ifdef CONFIG_IPV6_SUBTREES 1176 { 1177 .offset = offsetof(struct rt6_info, rt6i_src), 1178 .addr = saddr, 1179 }, 1180 #endif 1181 { 1182 .offset = 0, /* sentinel */ 1183 } 1184 }; 1185 1186 fn = fib6_lookup_1(root, daddr ? args : args + 1); 1187 if (!fn || fn->fn_flags & RTN_TL_ROOT) 1188 fn = root; 1189 1190 return fn; 1191 } 1192 1193 /* 1194 * Get node with specified destination prefix (and source prefix, 1195 * if subtrees are used) 1196 */ 1197 1198 1199 static struct fib6_node *fib6_locate_1(struct fib6_node *root, 1200 const struct in6_addr *addr, 1201 int plen, int offset) 1202 { 1203 struct fib6_node *fn; 1204 1205 for (fn = root; fn ; ) { 1206 struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset); 1207 1208 /* 1209 * Prefix match 1210 */ 1211 if (plen < fn->fn_bit || 1212 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) 1213 return NULL; 1214 1215 if (plen == fn->fn_bit) 1216 return fn; 1217 1218 /* 1219 * We have more bits to go 1220 */ 1221 if (addr_bit_set(addr, fn->fn_bit)) 1222 fn = fn->right; 1223 else 1224 fn = fn->left; 1225 } 1226 return NULL; 1227 } 1228 1229 struct fib6_node *fib6_locate(struct fib6_node *root, 1230 const struct in6_addr *daddr, int dst_len, 1231 const struct in6_addr *saddr, int src_len) 1232 { 1233 struct fib6_node *fn; 1234 1235 fn = fib6_locate_1(root, daddr, dst_len, 1236 offsetof(struct rt6_info, rt6i_dst)); 1237 1238 #ifdef CONFIG_IPV6_SUBTREES 1239 if (src_len) { 1240 WARN_ON(saddr == NULL); 1241 if (fn && fn->subtree) 1242 fn = fib6_locate_1(fn->subtree, saddr, src_len, 1243 offsetof(struct rt6_info, rt6i_src)); 1244 } 1245 #endif 1246 1247 if (fn && fn->fn_flags & RTN_RTINFO) 1248 return fn; 1249 1250 return NULL; 1251 } 1252 1253 1254 /* 1255 * Deletion 1256 * 1257 */ 1258 1259 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn) 1260 { 1261 if (fn->fn_flags & RTN_ROOT) 1262 return net->ipv6.ip6_null_entry; 1263 1264 while (fn) { 1265 if (fn->left) 1266 return fn->left->leaf; 1267 if (fn->right) 1268 return fn->right->leaf; 1269 1270 fn = FIB6_SUBTREE(fn); 1271 } 1272 return NULL; 1273 } 1274 1275 /* 1276 * Called to trim the tree of intermediate nodes when possible. "fn" 1277 * is the node we want to try and remove. 1278 */ 1279 1280 static struct fib6_node *fib6_repair_tree(struct net *net, 1281 struct fib6_node *fn) 1282 { 1283 int children; 1284 int nstate; 1285 struct fib6_node *child, *pn; 1286 struct fib6_walker *w; 1287 int iter = 0; 1288 1289 for (;;) { 1290 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); 1291 iter++; 1292 1293 WARN_ON(fn->fn_flags & RTN_RTINFO); 1294 WARN_ON(fn->fn_flags & RTN_TL_ROOT); 1295 WARN_ON(fn->leaf); 1296 1297 children = 0; 1298 child = NULL; 1299 if (fn->right) 1300 child = fn->right, children |= 1; 1301 if (fn->left) 1302 child = fn->left, children |= 2; 1303 1304 if (children == 3 || FIB6_SUBTREE(fn) 1305 #ifdef CONFIG_IPV6_SUBTREES 1306 /* Subtree root (i.e. fn) may have one child */ 1307 || (children && fn->fn_flags & RTN_ROOT) 1308 #endif 1309 ) { 1310 fn->leaf = fib6_find_prefix(net, fn); 1311 #if RT6_DEBUG >= 2 1312 if (!fn->leaf) { 1313 WARN_ON(!fn->leaf); 1314 fn->leaf = net->ipv6.ip6_null_entry; 1315 } 1316 #endif 1317 atomic_inc(&fn->leaf->rt6i_ref); 1318 return fn->parent; 1319 } 1320 1321 pn = fn->parent; 1322 #ifdef CONFIG_IPV6_SUBTREES 1323 if (FIB6_SUBTREE(pn) == fn) { 1324 WARN_ON(!(fn->fn_flags & RTN_ROOT)); 1325 FIB6_SUBTREE(pn) = NULL; 1326 nstate = FWS_L; 1327 } else { 1328 WARN_ON(fn->fn_flags & RTN_ROOT); 1329 #endif 1330 if (pn->right == fn) 1331 pn->right = child; 1332 else if (pn->left == fn) 1333 pn->left = child; 1334 #if RT6_DEBUG >= 2 1335 else 1336 WARN_ON(1); 1337 #endif 1338 if (child) 1339 child->parent = pn; 1340 nstate = FWS_R; 1341 #ifdef CONFIG_IPV6_SUBTREES 1342 } 1343 #endif 1344 1345 read_lock(&net->ipv6.fib6_walker_lock); 1346 FOR_WALKERS(net, w) { 1347 if (!child) { 1348 if (w->root == fn) { 1349 w->root = w->node = NULL; 1350 RT6_TRACE("W %p adjusted by delroot 1\n", w); 1351 } else if (w->node == fn) { 1352 RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate); 1353 w->node = pn; 1354 w->state = nstate; 1355 } 1356 } else { 1357 if (w->root == fn) { 1358 w->root = child; 1359 RT6_TRACE("W %p adjusted by delroot 2\n", w); 1360 } 1361 if (w->node == fn) { 1362 w->node = child; 1363 if (children&2) { 1364 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); 1365 w->state = w->state >= FWS_R ? FWS_U : FWS_INIT; 1366 } else { 1367 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); 1368 w->state = w->state >= FWS_C ? FWS_U : FWS_INIT; 1369 } 1370 } 1371 } 1372 } 1373 read_unlock(&net->ipv6.fib6_walker_lock); 1374 1375 node_free(fn); 1376 if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn)) 1377 return pn; 1378 1379 rt6_release(pn->leaf); 1380 pn->leaf = NULL; 1381 fn = pn; 1382 } 1383 } 1384 1385 static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, 1386 struct nl_info *info) 1387 { 1388 struct fib6_walker *w; 1389 struct rt6_info *rt = *rtp; 1390 struct net *net = info->nl_net; 1391 1392 RT6_TRACE("fib6_del_route\n"); 1393 1394 /* Unlink it */ 1395 *rtp = rt->dst.rt6_next; 1396 rt->rt6i_node = NULL; 1397 net->ipv6.rt6_stats->fib_rt_entries--; 1398 net->ipv6.rt6_stats->fib_discarded_routes++; 1399 1400 /* Reset round-robin state, if necessary */ 1401 if (fn->rr_ptr == rt) 1402 fn->rr_ptr = NULL; 1403 1404 /* Remove this entry from other siblings */ 1405 if (rt->rt6i_nsiblings) { 1406 struct rt6_info *sibling, *next_sibling; 1407 1408 list_for_each_entry_safe(sibling, next_sibling, 1409 &rt->rt6i_siblings, rt6i_siblings) 1410 sibling->rt6i_nsiblings--; 1411 rt->rt6i_nsiblings = 0; 1412 list_del_init(&rt->rt6i_siblings); 1413 } 1414 1415 /* Adjust walkers */ 1416 read_lock(&net->ipv6.fib6_walker_lock); 1417 FOR_WALKERS(net, w) { 1418 if (w->state == FWS_C && w->leaf == rt) { 1419 RT6_TRACE("walker %p adjusted by delroute\n", w); 1420 w->leaf = rt->dst.rt6_next; 1421 if (!w->leaf) 1422 w->state = FWS_U; 1423 } 1424 } 1425 read_unlock(&net->ipv6.fib6_walker_lock); 1426 1427 rt->dst.rt6_next = NULL; 1428 1429 /* If it was last route, expunge its radix tree node */ 1430 if (!fn->leaf) { 1431 fn->fn_flags &= ~RTN_RTINFO; 1432 net->ipv6.rt6_stats->fib_route_nodes--; 1433 fn = fib6_repair_tree(net, fn); 1434 } 1435 1436 fib6_purge_rt(rt, fn, net); 1437 1438 inet6_rt_notify(RTM_DELROUTE, rt, info, 0); 1439 rt6_release(rt); 1440 } 1441 1442 int fib6_del(struct rt6_info *rt, struct nl_info *info) 1443 { 1444 struct net *net = info->nl_net; 1445 struct fib6_node *fn = rt->rt6i_node; 1446 struct rt6_info **rtp; 1447 1448 #if RT6_DEBUG >= 2 1449 if (rt->dst.obsolete > 0) { 1450 WARN_ON(fn); 1451 return -ENOENT; 1452 } 1453 #endif 1454 if (!fn || rt == net->ipv6.ip6_null_entry) 1455 return -ENOENT; 1456 1457 WARN_ON(!(fn->fn_flags & RTN_RTINFO)); 1458 1459 if (!(rt->rt6i_flags & RTF_CACHE)) { 1460 struct fib6_node *pn = fn; 1461 #ifdef CONFIG_IPV6_SUBTREES 1462 /* clones of this route might be in another subtree */ 1463 if (rt->rt6i_src.plen) { 1464 while (!(pn->fn_flags & RTN_ROOT)) 1465 pn = pn->parent; 1466 pn = pn->parent; 1467 } 1468 #endif 1469 fib6_prune_clones(info->nl_net, pn); 1470 } 1471 1472 /* 1473 * Walk the leaf entries looking for ourself 1474 */ 1475 1476 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) { 1477 if (*rtp == rt) { 1478 fib6_del_route(fn, rtp, info); 1479 return 0; 1480 } 1481 } 1482 return -ENOENT; 1483 } 1484 1485 /* 1486 * Tree traversal function. 1487 * 1488 * Certainly, it is not interrupt safe. 1489 * However, it is internally reenterable wrt itself and fib6_add/fib6_del. 1490 * It means, that we can modify tree during walking 1491 * and use this function for garbage collection, clone pruning, 1492 * cleaning tree when a device goes down etc. etc. 1493 * 1494 * It guarantees that every node will be traversed, 1495 * and that it will be traversed only once. 1496 * 1497 * Callback function w->func may return: 1498 * 0 -> continue walking. 1499 * positive value -> walking is suspended (used by tree dumps, 1500 * and probably by gc, if it will be split to several slices) 1501 * negative value -> terminate walking. 1502 * 1503 * The function itself returns: 1504 * 0 -> walk is complete. 1505 * >0 -> walk is incomplete (i.e. suspended) 1506 * <0 -> walk is terminated by an error. 1507 */ 1508 1509 static int fib6_walk_continue(struct fib6_walker *w) 1510 { 1511 struct fib6_node *fn, *pn; 1512 1513 for (;;) { 1514 fn = w->node; 1515 if (!fn) 1516 return 0; 1517 1518 if (w->prune && fn != w->root && 1519 fn->fn_flags & RTN_RTINFO && w->state < FWS_C) { 1520 w->state = FWS_C; 1521 w->leaf = fn->leaf; 1522 } 1523 switch (w->state) { 1524 #ifdef CONFIG_IPV6_SUBTREES 1525 case FWS_S: 1526 if (FIB6_SUBTREE(fn)) { 1527 w->node = FIB6_SUBTREE(fn); 1528 continue; 1529 } 1530 w->state = FWS_L; 1531 #endif 1532 case FWS_L: 1533 if (fn->left) { 1534 w->node = fn->left; 1535 w->state = FWS_INIT; 1536 continue; 1537 } 1538 w->state = FWS_R; 1539 case FWS_R: 1540 if (fn->right) { 1541 w->node = fn->right; 1542 w->state = FWS_INIT; 1543 continue; 1544 } 1545 w->state = FWS_C; 1546 w->leaf = fn->leaf; 1547 case FWS_C: 1548 if (w->leaf && fn->fn_flags & RTN_RTINFO) { 1549 int err; 1550 1551 if (w->skip) { 1552 w->skip--; 1553 goto skip; 1554 } 1555 1556 err = w->func(w); 1557 if (err) 1558 return err; 1559 1560 w->count++; 1561 continue; 1562 } 1563 skip: 1564 w->state = FWS_U; 1565 case FWS_U: 1566 if (fn == w->root) 1567 return 0; 1568 pn = fn->parent; 1569 w->node = pn; 1570 #ifdef CONFIG_IPV6_SUBTREES 1571 if (FIB6_SUBTREE(pn) == fn) { 1572 WARN_ON(!(fn->fn_flags & RTN_ROOT)); 1573 w->state = FWS_L; 1574 continue; 1575 } 1576 #endif 1577 if (pn->left == fn) { 1578 w->state = FWS_R; 1579 continue; 1580 } 1581 if (pn->right == fn) { 1582 w->state = FWS_C; 1583 w->leaf = w->node->leaf; 1584 continue; 1585 } 1586 #if RT6_DEBUG >= 2 1587 WARN_ON(1); 1588 #endif 1589 } 1590 } 1591 } 1592 1593 static int fib6_walk(struct net *net, struct fib6_walker *w) 1594 { 1595 int res; 1596 1597 w->state = FWS_INIT; 1598 w->node = w->root; 1599 1600 fib6_walker_link(net, w); 1601 res = fib6_walk_continue(w); 1602 if (res <= 0) 1603 fib6_walker_unlink(net, w); 1604 return res; 1605 } 1606 1607 static int fib6_clean_node(struct fib6_walker *w) 1608 { 1609 int res; 1610 struct rt6_info *rt; 1611 struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w); 1612 struct nl_info info = { 1613 .nl_net = c->net, 1614 }; 1615 1616 if (c->sernum != FIB6_NO_SERNUM_CHANGE && 1617 w->node->fn_sernum != c->sernum) 1618 w->node->fn_sernum = c->sernum; 1619 1620 if (!c->func) { 1621 WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); 1622 w->leaf = NULL; 1623 return 0; 1624 } 1625 1626 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { 1627 res = c->func(rt, c->arg); 1628 if (res < 0) { 1629 w->leaf = rt; 1630 res = fib6_del(rt, &info); 1631 if (res) { 1632 #if RT6_DEBUG >= 2 1633 pr_debug("%s: del failed: rt=%p@%p err=%d\n", 1634 __func__, rt, rt->rt6i_node, res); 1635 #endif 1636 continue; 1637 } 1638 return 0; 1639 } 1640 WARN_ON(res != 0); 1641 } 1642 w->leaf = rt; 1643 return 0; 1644 } 1645 1646 /* 1647 * Convenient frontend to tree walker. 1648 * 1649 * func is called on each route. 1650 * It may return -1 -> delete this route. 1651 * 0 -> continue walking 1652 * 1653 * prune==1 -> only immediate children of node (certainly, 1654 * ignoring pure split nodes) will be scanned. 1655 */ 1656 1657 static void fib6_clean_tree(struct net *net, struct fib6_node *root, 1658 int (*func)(struct rt6_info *, void *arg), 1659 bool prune, int sernum, void *arg) 1660 { 1661 struct fib6_cleaner c; 1662 1663 c.w.root = root; 1664 c.w.func = fib6_clean_node; 1665 c.w.prune = prune; 1666 c.w.count = 0; 1667 c.w.skip = 0; 1668 c.func = func; 1669 c.sernum = sernum; 1670 c.arg = arg; 1671 c.net = net; 1672 1673 fib6_walk(net, &c.w); 1674 } 1675 1676 static void __fib6_clean_all(struct net *net, 1677 int (*func)(struct rt6_info *, void *), 1678 int sernum, void *arg) 1679 { 1680 struct fib6_table *table; 1681 struct hlist_head *head; 1682 unsigned int h; 1683 1684 rcu_read_lock(); 1685 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 1686 head = &net->ipv6.fib_table_hash[h]; 1687 hlist_for_each_entry_rcu(table, head, tb6_hlist) { 1688 write_lock_bh(&table->tb6_lock); 1689 fib6_clean_tree(net, &table->tb6_root, 1690 func, false, sernum, arg); 1691 write_unlock_bh(&table->tb6_lock); 1692 } 1693 } 1694 rcu_read_unlock(); 1695 } 1696 1697 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *), 1698 void *arg) 1699 { 1700 __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg); 1701 } 1702 1703 static int fib6_prune_clone(struct rt6_info *rt, void *arg) 1704 { 1705 if (rt->rt6i_flags & RTF_CACHE) { 1706 RT6_TRACE("pruning clone %p\n", rt); 1707 return -1; 1708 } 1709 1710 return 0; 1711 } 1712 1713 static void fib6_prune_clones(struct net *net, struct fib6_node *fn) 1714 { 1715 fib6_clean_tree(net, fn, fib6_prune_clone, true, 1716 FIB6_NO_SERNUM_CHANGE, NULL); 1717 } 1718 1719 static void fib6_flush_trees(struct net *net) 1720 { 1721 int new_sernum = fib6_new_sernum(net); 1722 1723 __fib6_clean_all(net, NULL, new_sernum, NULL); 1724 } 1725 1726 /* 1727 * Garbage collection 1728 */ 1729 1730 struct fib6_gc_args 1731 { 1732 int timeout; 1733 int more; 1734 }; 1735 1736 static int fib6_age(struct rt6_info *rt, void *arg) 1737 { 1738 struct fib6_gc_args *gc_args = arg; 1739 unsigned long now = jiffies; 1740 1741 /* 1742 * check addrconf expiration here. 1743 * Routes are expired even if they are in use. 1744 * 1745 * Also age clones. Note, that clones are aged out 1746 * only if they are not in use now. 1747 */ 1748 1749 if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) { 1750 if (time_after(now, rt->dst.expires)) { 1751 RT6_TRACE("expiring %p\n", rt); 1752 return -1; 1753 } 1754 gc_args->more++; 1755 } else if (rt->rt6i_flags & RTF_CACHE) { 1756 if (atomic_read(&rt->dst.__refcnt) == 0 && 1757 time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) { 1758 RT6_TRACE("aging clone %p\n", rt); 1759 return -1; 1760 } else if (rt->rt6i_flags & RTF_GATEWAY) { 1761 struct neighbour *neigh; 1762 __u8 neigh_flags = 0; 1763 1764 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway); 1765 if (neigh) { 1766 neigh_flags = neigh->flags; 1767 neigh_release(neigh); 1768 } 1769 if (!(neigh_flags & NTF_ROUTER)) { 1770 RT6_TRACE("purging route %p via non-router but gateway\n", 1771 rt); 1772 return -1; 1773 } 1774 } 1775 gc_args->more++; 1776 } 1777 1778 return 0; 1779 } 1780 1781 void fib6_run_gc(unsigned long expires, struct net *net, bool force) 1782 { 1783 struct fib6_gc_args gc_args; 1784 unsigned long now; 1785 1786 if (force) { 1787 spin_lock_bh(&net->ipv6.fib6_gc_lock); 1788 } else if (!spin_trylock_bh(&net->ipv6.fib6_gc_lock)) { 1789 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ); 1790 return; 1791 } 1792 gc_args.timeout = expires ? (int)expires : 1793 net->ipv6.sysctl.ip6_rt_gc_interval; 1794 1795 gc_args.more = icmp6_dst_gc(); 1796 1797 fib6_clean_all(net, fib6_age, &gc_args); 1798 now = jiffies; 1799 net->ipv6.ip6_rt_last_gc = now; 1800 1801 if (gc_args.more) 1802 mod_timer(&net->ipv6.ip6_fib_timer, 1803 round_jiffies(now 1804 + net->ipv6.sysctl.ip6_rt_gc_interval)); 1805 else 1806 del_timer(&net->ipv6.ip6_fib_timer); 1807 spin_unlock_bh(&net->ipv6.fib6_gc_lock); 1808 } 1809 1810 static void fib6_gc_timer_cb(unsigned long arg) 1811 { 1812 fib6_run_gc(0, (struct net *)arg, true); 1813 } 1814 1815 static int __net_init fib6_net_init(struct net *net) 1816 { 1817 size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ; 1818 1819 spin_lock_init(&net->ipv6.fib6_gc_lock); 1820 rwlock_init(&net->ipv6.fib6_walker_lock); 1821 INIT_LIST_HEAD(&net->ipv6.fib6_walkers); 1822 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); 1823 1824 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); 1825 if (!net->ipv6.rt6_stats) 1826 goto out_timer; 1827 1828 /* Avoid false sharing : Use at least a full cache line */ 1829 size = max_t(size_t, size, L1_CACHE_BYTES); 1830 1831 net->ipv6.fib_table_hash = kzalloc(size, GFP_KERNEL); 1832 if (!net->ipv6.fib_table_hash) 1833 goto out_rt6_stats; 1834 1835 net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl), 1836 GFP_KERNEL); 1837 if (!net->ipv6.fib6_main_tbl) 1838 goto out_fib_table_hash; 1839 1840 net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN; 1841 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1842 net->ipv6.fib6_main_tbl->tb6_root.fn_flags = 1843 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1844 inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers); 1845 1846 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 1847 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), 1848 GFP_KERNEL); 1849 if (!net->ipv6.fib6_local_tbl) 1850 goto out_fib6_main_tbl; 1851 net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL; 1852 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1853 net->ipv6.fib6_local_tbl->tb6_root.fn_flags = 1854 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1855 inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers); 1856 #endif 1857 fib6_tables_init(net); 1858 1859 return 0; 1860 1861 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 1862 out_fib6_main_tbl: 1863 kfree(net->ipv6.fib6_main_tbl); 1864 #endif 1865 out_fib_table_hash: 1866 kfree(net->ipv6.fib_table_hash); 1867 out_rt6_stats: 1868 kfree(net->ipv6.rt6_stats); 1869 out_timer: 1870 return -ENOMEM; 1871 } 1872 1873 static void fib6_net_exit(struct net *net) 1874 { 1875 rt6_ifdown(net, NULL); 1876 del_timer_sync(&net->ipv6.ip6_fib_timer); 1877 1878 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 1879 inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers); 1880 kfree(net->ipv6.fib6_local_tbl); 1881 #endif 1882 inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers); 1883 kfree(net->ipv6.fib6_main_tbl); 1884 kfree(net->ipv6.fib_table_hash); 1885 kfree(net->ipv6.rt6_stats); 1886 } 1887 1888 static struct pernet_operations fib6_net_ops = { 1889 .init = fib6_net_init, 1890 .exit = fib6_net_exit, 1891 }; 1892 1893 int __init fib6_init(void) 1894 { 1895 int ret = -ENOMEM; 1896 1897 fib6_node_kmem = kmem_cache_create("fib6_nodes", 1898 sizeof(struct fib6_node), 1899 0, SLAB_HWCACHE_ALIGN, 1900 NULL); 1901 if (!fib6_node_kmem) 1902 goto out; 1903 1904 ret = register_pernet_subsys(&fib6_net_ops); 1905 if (ret) 1906 goto out_kmem_cache_create; 1907 1908 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib, 1909 NULL); 1910 if (ret) 1911 goto out_unregister_subsys; 1912 1913 __fib6_flush_trees = fib6_flush_trees; 1914 out: 1915 return ret; 1916 1917 out_unregister_subsys: 1918 unregister_pernet_subsys(&fib6_net_ops); 1919 out_kmem_cache_create: 1920 kmem_cache_destroy(fib6_node_kmem); 1921 goto out; 1922 } 1923 1924 void fib6_gc_cleanup(void) 1925 { 1926 unregister_pernet_subsys(&fib6_net_ops); 1927 kmem_cache_destroy(fib6_node_kmem); 1928 } 1929 1930 #ifdef CONFIG_PROC_FS 1931 1932 struct ipv6_route_iter { 1933 struct seq_net_private p; 1934 struct fib6_walker w; 1935 loff_t skip; 1936 struct fib6_table *tbl; 1937 int sernum; 1938 }; 1939 1940 static int ipv6_route_seq_show(struct seq_file *seq, void *v) 1941 { 1942 struct rt6_info *rt = v; 1943 struct ipv6_route_iter *iter = seq->private; 1944 1945 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); 1946 1947 #ifdef CONFIG_IPV6_SUBTREES 1948 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen); 1949 #else 1950 seq_puts(seq, "00000000000000000000000000000000 00 "); 1951 #endif 1952 if (rt->rt6i_flags & RTF_GATEWAY) 1953 seq_printf(seq, "%pi6", &rt->rt6i_gateway); 1954 else 1955 seq_puts(seq, "00000000000000000000000000000000"); 1956 1957 seq_printf(seq, " %08x %08x %08x %08x %8s\n", 1958 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), 1959 rt->dst.__use, rt->rt6i_flags, 1960 rt->dst.dev ? rt->dst.dev->name : ""); 1961 iter->w.leaf = NULL; 1962 return 0; 1963 } 1964 1965 static int ipv6_route_yield(struct fib6_walker *w) 1966 { 1967 struct ipv6_route_iter *iter = w->args; 1968 1969 if (!iter->skip) 1970 return 1; 1971 1972 do { 1973 iter->w.leaf = iter->w.leaf->dst.rt6_next; 1974 iter->skip--; 1975 if (!iter->skip && iter->w.leaf) 1976 return 1; 1977 } while (iter->w.leaf); 1978 1979 return 0; 1980 } 1981 1982 static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter, 1983 struct net *net) 1984 { 1985 memset(&iter->w, 0, sizeof(iter->w)); 1986 iter->w.func = ipv6_route_yield; 1987 iter->w.root = &iter->tbl->tb6_root; 1988 iter->w.state = FWS_INIT; 1989 iter->w.node = iter->w.root; 1990 iter->w.args = iter; 1991 iter->sernum = iter->w.root->fn_sernum; 1992 INIT_LIST_HEAD(&iter->w.lh); 1993 fib6_walker_link(net, &iter->w); 1994 } 1995 1996 static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, 1997 struct net *net) 1998 { 1999 unsigned int h; 2000 struct hlist_node *node; 2001 2002 if (tbl) { 2003 h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; 2004 node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist)); 2005 } else { 2006 h = 0; 2007 node = NULL; 2008 } 2009 2010 while (!node && h < FIB6_TABLE_HASHSZ) { 2011 node = rcu_dereference_bh( 2012 hlist_first_rcu(&net->ipv6.fib_table_hash[h++])); 2013 } 2014 return hlist_entry_safe(node, struct fib6_table, tb6_hlist); 2015 } 2016 2017 static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) 2018 { 2019 if (iter->sernum != iter->w.root->fn_sernum) { 2020 iter->sernum = iter->w.root->fn_sernum; 2021 iter->w.state = FWS_INIT; 2022 iter->w.node = iter->w.root; 2023 WARN_ON(iter->w.skip); 2024 iter->w.skip = iter->w.count; 2025 } 2026 } 2027 2028 static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2029 { 2030 int r; 2031 struct rt6_info *n; 2032 struct net *net = seq_file_net(seq); 2033 struct ipv6_route_iter *iter = seq->private; 2034 2035 if (!v) 2036 goto iter_table; 2037 2038 n = ((struct rt6_info *)v)->dst.rt6_next; 2039 if (n) { 2040 ++*pos; 2041 return n; 2042 } 2043 2044 iter_table: 2045 ipv6_route_check_sernum(iter); 2046 read_lock(&iter->tbl->tb6_lock); 2047 r = fib6_walk_continue(&iter->w); 2048 read_unlock(&iter->tbl->tb6_lock); 2049 if (r > 0) { 2050 if (v) 2051 ++*pos; 2052 return iter->w.leaf; 2053 } else if (r < 0) { 2054 fib6_walker_unlink(net, &iter->w); 2055 return NULL; 2056 } 2057 fib6_walker_unlink(net, &iter->w); 2058 2059 iter->tbl = ipv6_route_seq_next_table(iter->tbl, net); 2060 if (!iter->tbl) 2061 return NULL; 2062 2063 ipv6_route_seq_setup_walk(iter, net); 2064 goto iter_table; 2065 } 2066 2067 static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) 2068 __acquires(RCU_BH) 2069 { 2070 struct net *net = seq_file_net(seq); 2071 struct ipv6_route_iter *iter = seq->private; 2072 2073 rcu_read_lock_bh(); 2074 iter->tbl = ipv6_route_seq_next_table(NULL, net); 2075 iter->skip = *pos; 2076 2077 if (iter->tbl) { 2078 ipv6_route_seq_setup_walk(iter, net); 2079 return ipv6_route_seq_next(seq, NULL, pos); 2080 } else { 2081 return NULL; 2082 } 2083 } 2084 2085 static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) 2086 { 2087 struct fib6_walker *w = &iter->w; 2088 return w->node && !(w->state == FWS_U && w->node == w->root); 2089 } 2090 2091 static void ipv6_route_seq_stop(struct seq_file *seq, void *v) 2092 __releases(RCU_BH) 2093 { 2094 struct net *net = seq_file_net(seq); 2095 struct ipv6_route_iter *iter = seq->private; 2096 2097 if (ipv6_route_iter_active(iter)) 2098 fib6_walker_unlink(net, &iter->w); 2099 2100 rcu_read_unlock_bh(); 2101 } 2102 2103 static const struct seq_operations ipv6_route_seq_ops = { 2104 .start = ipv6_route_seq_start, 2105 .next = ipv6_route_seq_next, 2106 .stop = ipv6_route_seq_stop, 2107 .show = ipv6_route_seq_show 2108 }; 2109 2110 int ipv6_route_open(struct inode *inode, struct file *file) 2111 { 2112 return seq_open_net(inode, file, &ipv6_route_seq_ops, 2113 sizeof(struct ipv6_route_iter)); 2114 } 2115 2116 #endif /* CONFIG_PROC_FS */ 2117