1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * IPv4 Forwarding Information Base: semantics. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #include <asm/uaccess.h> 17 #include <asm/system.h> 18 #include <linux/bitops.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/jiffies.h> 22 #include <linux/mm.h> 23 #include <linux/string.h> 24 #include <linux/socket.h> 25 #include <linux/sockios.h> 26 #include <linux/errno.h> 27 #include <linux/in.h> 28 #include <linux/inet.h> 29 #include <linux/inetdevice.h> 30 #include <linux/netdevice.h> 31 #include <linux/if_arp.h> 32 #include <linux/proc_fs.h> 33 #include <linux/skbuff.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 37 #include <net/arp.h> 38 #include <net/ip.h> 39 #include <net/protocol.h> 40 #include <net/route.h> 41 #include <net/tcp.h> 42 #include <net/sock.h> 43 #include <net/ip_fib.h> 44 #include <net/netlink.h> 45 #include <net/nexthop.h> 46 47 #include "fib_lookup.h" 48 49 static DEFINE_SPINLOCK(fib_info_lock); 50 static struct hlist_head *fib_info_hash; 51 static struct hlist_head *fib_info_laddrhash; 52 static unsigned int fib_info_hash_size; 53 static unsigned int fib_info_cnt; 54 55 #define DEVINDEX_HASHBITS 8 56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) 57 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; 58 59 #ifdef CONFIG_IP_ROUTE_MULTIPATH 60 61 static DEFINE_SPINLOCK(fib_multipath_lock); 62 63 #define for_nexthops(fi) { \ 64 int nhsel; const struct fib_nh *nh; \ 65 for (nhsel = 0, nh = (fi)->fib_nh; \ 66 nhsel < (fi)->fib_nhs; \ 67 nh++, nhsel++) 68 69 #define change_nexthops(fi) { \ 70 int nhsel; struct fib_nh *nexthop_nh; \ 71 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 72 nhsel < (fi)->fib_nhs; \ 73 nexthop_nh++, nhsel++) 74 75 #else /* CONFIG_IP_ROUTE_MULTIPATH */ 76 77 /* Hope, that gcc will optimize it to get rid of dummy loop */ 78 79 #define for_nexthops(fi) { \ 80 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ 81 for (nhsel = 0; nhsel < 1; nhsel++) 82 83 #define change_nexthops(fi) { \ 84 int nhsel; \ 85 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 86 for (nhsel = 0; nhsel < 1; nhsel++) 87 88 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 89 90 #define endfor_nexthops(fi) } 91 92 93 const struct fib_prop fib_props[RTN_MAX + 1] = { 94 [RTN_UNSPEC] = { 95 .error = 0, 96 .scope = RT_SCOPE_NOWHERE, 97 }, 98 [RTN_UNICAST] = { 99 .error = 0, 100 .scope = RT_SCOPE_UNIVERSE, 101 }, 102 [RTN_LOCAL] = { 103 .error = 0, 104 .scope = RT_SCOPE_HOST, 105 }, 106 [RTN_BROADCAST] = { 107 .error = 0, 108 .scope = RT_SCOPE_LINK, 109 }, 110 [RTN_ANYCAST] = { 111 .error = 0, 112 .scope = RT_SCOPE_LINK, 113 }, 114 [RTN_MULTICAST] = { 115 .error = 0, 116 .scope = RT_SCOPE_UNIVERSE, 117 }, 118 [RTN_BLACKHOLE] = { 119 .error = -EINVAL, 120 .scope = RT_SCOPE_UNIVERSE, 121 }, 122 [RTN_UNREACHABLE] = { 123 .error = -EHOSTUNREACH, 124 .scope = RT_SCOPE_UNIVERSE, 125 }, 126 [RTN_PROHIBIT] = { 127 .error = -EACCES, 128 .scope = RT_SCOPE_UNIVERSE, 129 }, 130 [RTN_THROW] = { 131 .error = -EAGAIN, 132 .scope = RT_SCOPE_UNIVERSE, 133 }, 134 [RTN_NAT] = { 135 .error = -EINVAL, 136 .scope = RT_SCOPE_NOWHERE, 137 }, 138 [RTN_XRESOLVE] = { 139 .error = -EINVAL, 140 .scope = RT_SCOPE_NOWHERE, 141 }, 142 }; 143 144 /* Release a nexthop info record */ 145 static void free_fib_info_rcu(struct rcu_head *head) 146 { 147 struct fib_info *fi = container_of(head, struct fib_info, rcu); 148 149 if (fi->fib_metrics != (u32 *) dst_default_metrics) 150 kfree(fi->fib_metrics); 151 kfree(fi); 152 } 153 154 void free_fib_info(struct fib_info *fi) 155 { 156 if (fi->fib_dead == 0) { 157 pr_warning("Freeing alive fib_info %p\n", fi); 158 return; 159 } 160 change_nexthops(fi) { 161 if (nexthop_nh->nh_dev) 162 dev_put(nexthop_nh->nh_dev); 163 nexthop_nh->nh_dev = NULL; 164 } endfor_nexthops(fi); 165 fib_info_cnt--; 166 release_net(fi->fib_net); 167 call_rcu(&fi->rcu, free_fib_info_rcu); 168 } 169 170 void fib_release_info(struct fib_info *fi) 171 { 172 spin_lock_bh(&fib_info_lock); 173 if (fi && --fi->fib_treeref == 0) { 174 hlist_del(&fi->fib_hash); 175 if (fi->fib_prefsrc) 176 hlist_del(&fi->fib_lhash); 177 change_nexthops(fi) { 178 if (!nexthop_nh->nh_dev) 179 continue; 180 hlist_del(&nexthop_nh->nh_hash); 181 } endfor_nexthops(fi) 182 fi->fib_dead = 1; 183 fib_info_put(fi); 184 } 185 spin_unlock_bh(&fib_info_lock); 186 } 187 188 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 189 { 190 const struct fib_nh *onh = ofi->fib_nh; 191 192 for_nexthops(fi) { 193 if (nh->nh_oif != onh->nh_oif || 194 nh->nh_gw != onh->nh_gw || 195 nh->nh_scope != onh->nh_scope || 196 #ifdef CONFIG_IP_ROUTE_MULTIPATH 197 nh->nh_weight != onh->nh_weight || 198 #endif 199 #ifdef CONFIG_IP_ROUTE_CLASSID 200 nh->nh_tclassid != onh->nh_tclassid || 201 #endif 202 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) 203 return -1; 204 onh++; 205 } endfor_nexthops(fi); 206 return 0; 207 } 208 209 static inline unsigned int fib_devindex_hashfn(unsigned int val) 210 { 211 unsigned int mask = DEVINDEX_HASHSIZE - 1; 212 213 return (val ^ 214 (val >> DEVINDEX_HASHBITS) ^ 215 (val >> (DEVINDEX_HASHBITS * 2))) & mask; 216 } 217 218 static inline unsigned int fib_info_hashfn(const struct fib_info *fi) 219 { 220 unsigned int mask = (fib_info_hash_size - 1); 221 unsigned int val = fi->fib_nhs; 222 223 val ^= (fi->fib_protocol << 8) | fi->fib_scope; 224 val ^= (__force u32)fi->fib_prefsrc; 225 val ^= fi->fib_priority; 226 for_nexthops(fi) { 227 val ^= fib_devindex_hashfn(nh->nh_oif); 228 } endfor_nexthops(fi) 229 230 return (val ^ (val >> 7) ^ (val >> 12)) & mask; 231 } 232 233 static struct fib_info *fib_find_info(const struct fib_info *nfi) 234 { 235 struct hlist_head *head; 236 struct hlist_node *node; 237 struct fib_info *fi; 238 unsigned int hash; 239 240 hash = fib_info_hashfn(nfi); 241 head = &fib_info_hash[hash]; 242 243 hlist_for_each_entry(fi, node, head, fib_hash) { 244 if (!net_eq(fi->fib_net, nfi->fib_net)) 245 continue; 246 if (fi->fib_nhs != nfi->fib_nhs) 247 continue; 248 if (nfi->fib_protocol == fi->fib_protocol && 249 nfi->fib_scope == fi->fib_scope && 250 nfi->fib_prefsrc == fi->fib_prefsrc && 251 nfi->fib_priority == fi->fib_priority && 252 memcmp(nfi->fib_metrics, fi->fib_metrics, 253 sizeof(u32) * RTAX_MAX) == 0 && 254 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && 255 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) 256 return fi; 257 } 258 259 return NULL; 260 } 261 262 /* Check, that the gateway is already configured. 263 * Used only by redirect accept routine. 264 */ 265 int ip_fib_check_default(__be32 gw, struct net_device *dev) 266 { 267 struct hlist_head *head; 268 struct hlist_node *node; 269 struct fib_nh *nh; 270 unsigned int hash; 271 272 spin_lock(&fib_info_lock); 273 274 hash = fib_devindex_hashfn(dev->ifindex); 275 head = &fib_info_devhash[hash]; 276 hlist_for_each_entry(nh, node, head, nh_hash) { 277 if (nh->nh_dev == dev && 278 nh->nh_gw == gw && 279 !(nh->nh_flags & RTNH_F_DEAD)) { 280 spin_unlock(&fib_info_lock); 281 return 0; 282 } 283 } 284 285 spin_unlock(&fib_info_lock); 286 287 return -1; 288 } 289 290 static inline size_t fib_nlmsg_size(struct fib_info *fi) 291 { 292 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) 293 + nla_total_size(4) /* RTA_TABLE */ 294 + nla_total_size(4) /* RTA_DST */ 295 + nla_total_size(4) /* RTA_PRIORITY */ 296 + nla_total_size(4); /* RTA_PREFSRC */ 297 298 /* space for nested metrics */ 299 payload += nla_total_size((RTAX_MAX * nla_total_size(4))); 300 301 if (fi->fib_nhs) { 302 /* Also handles the special case fib_nhs == 1 */ 303 304 /* each nexthop is packed in an attribute */ 305 size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); 306 307 /* may contain flow and gateway attribute */ 308 nhsize += 2 * nla_total_size(4); 309 310 /* all nexthops are packed in a nested attribute */ 311 payload += nla_total_size(fi->fib_nhs * nhsize); 312 } 313 314 return payload; 315 } 316 317 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, 318 int dst_len, u32 tb_id, struct nl_info *info, 319 unsigned int nlm_flags) 320 { 321 struct sk_buff *skb; 322 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 323 int err = -ENOBUFS; 324 325 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); 326 if (skb == NULL) 327 goto errout; 328 329 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 330 fa->fa_type, key, dst_len, 331 fa->fa_tos, fa->fa_info, nlm_flags); 332 if (err < 0) { 333 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ 334 WARN_ON(err == -EMSGSIZE); 335 kfree_skb(skb); 336 goto errout; 337 } 338 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE, 339 info->nlh, GFP_KERNEL); 340 return; 341 errout: 342 if (err < 0) 343 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); 344 } 345 346 /* Return the first fib alias matching TOS with 347 * priority less than or equal to PRIO. 348 */ 349 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) 350 { 351 if (fah) { 352 struct fib_alias *fa; 353 list_for_each_entry(fa, fah, fa_list) { 354 if (fa->fa_tos > tos) 355 continue; 356 if (fa->fa_info->fib_priority >= prio || 357 fa->fa_tos < tos) 358 return fa; 359 } 360 } 361 return NULL; 362 } 363 364 int fib_detect_death(struct fib_info *fi, int order, 365 struct fib_info **last_resort, int *last_idx, int dflt) 366 { 367 struct neighbour *n; 368 int state = NUD_NONE; 369 370 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); 371 if (n) { 372 state = n->nud_state; 373 neigh_release(n); 374 } 375 if (state == NUD_REACHABLE) 376 return 0; 377 if ((state & NUD_VALID) && order != dflt) 378 return 0; 379 if ((state & NUD_VALID) || 380 (*last_idx < 0 && order > dflt)) { 381 *last_resort = fi; 382 *last_idx = order; 383 } 384 return 1; 385 } 386 387 #ifdef CONFIG_IP_ROUTE_MULTIPATH 388 389 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) 390 { 391 int nhs = 0; 392 393 while (rtnh_ok(rtnh, remaining)) { 394 nhs++; 395 rtnh = rtnh_next(rtnh, &remaining); 396 } 397 398 /* leftover implies invalid nexthop configuration, discard it */ 399 return remaining > 0 ? 0 : nhs; 400 } 401 402 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, 403 int remaining, struct fib_config *cfg) 404 { 405 change_nexthops(fi) { 406 int attrlen; 407 408 if (!rtnh_ok(rtnh, remaining)) 409 return -EINVAL; 410 411 nexthop_nh->nh_flags = 412 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 413 nexthop_nh->nh_oif = rtnh->rtnh_ifindex; 414 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; 415 416 attrlen = rtnh_attrlen(rtnh); 417 if (attrlen > 0) { 418 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 419 420 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 421 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; 422 #ifdef CONFIG_IP_ROUTE_CLASSID 423 nla = nla_find(attrs, attrlen, RTA_FLOW); 424 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 425 #endif 426 } 427 428 rtnh = rtnh_next(rtnh, &remaining); 429 } endfor_nexthops(fi); 430 431 return 0; 432 } 433 434 #endif 435 436 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) 437 { 438 #ifdef CONFIG_IP_ROUTE_MULTIPATH 439 struct rtnexthop *rtnh; 440 int remaining; 441 #endif 442 443 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) 444 return 1; 445 446 if (cfg->fc_oif || cfg->fc_gw) { 447 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && 448 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) 449 return 0; 450 return 1; 451 } 452 453 #ifdef CONFIG_IP_ROUTE_MULTIPATH 454 if (cfg->fc_mp == NULL) 455 return 0; 456 457 rtnh = cfg->fc_mp; 458 remaining = cfg->fc_mp_len; 459 460 for_nexthops(fi) { 461 int attrlen; 462 463 if (!rtnh_ok(rtnh, remaining)) 464 return -EINVAL; 465 466 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif) 467 return 1; 468 469 attrlen = rtnh_attrlen(rtnh); 470 if (attrlen < 0) { 471 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 472 473 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 474 if (nla && nla_get_be32(nla) != nh->nh_gw) 475 return 1; 476 #ifdef CONFIG_IP_ROUTE_CLASSID 477 nla = nla_find(attrs, attrlen, RTA_FLOW); 478 if (nla && nla_get_u32(nla) != nh->nh_tclassid) 479 return 1; 480 #endif 481 } 482 483 rtnh = rtnh_next(rtnh, &remaining); 484 } endfor_nexthops(fi); 485 #endif 486 return 0; 487 } 488 489 490 /* 491 * Picture 492 * ------- 493 * 494 * Semantics of nexthop is very messy by historical reasons. 495 * We have to take into account, that: 496 * a) gateway can be actually local interface address, 497 * so that gatewayed route is direct. 498 * b) gateway must be on-link address, possibly 499 * described not by an ifaddr, but also by a direct route. 500 * c) If both gateway and interface are specified, they should not 501 * contradict. 502 * d) If we use tunnel routes, gateway could be not on-link. 503 * 504 * Attempt to reconcile all of these (alas, self-contradictory) conditions 505 * results in pretty ugly and hairy code with obscure logic. 506 * 507 * I chose to generalized it instead, so that the size 508 * of code does not increase practically, but it becomes 509 * much more general. 510 * Every prefix is assigned a "scope" value: "host" is local address, 511 * "link" is direct route, 512 * [ ... "site" ... "interior" ... ] 513 * and "universe" is true gateway route with global meaning. 514 * 515 * Every prefix refers to a set of "nexthop"s (gw, oif), 516 * where gw must have narrower scope. This recursion stops 517 * when gw has LOCAL scope or if "nexthop" is declared ONLINK, 518 * which means that gw is forced to be on link. 519 * 520 * Code is still hairy, but now it is apparently logically 521 * consistent and very flexible. F.e. as by-product it allows 522 * to co-exists in peace independent exterior and interior 523 * routing processes. 524 * 525 * Normally it looks as following. 526 * 527 * {universe prefix} -> (gw, oif) [scope link] 528 * | 529 * |-> {link prefix} -> (gw, oif) [scope local] 530 * | 531 * |-> {local prefix} (terminal node) 532 */ 533 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, 534 struct fib_nh *nh) 535 { 536 int err; 537 struct net *net; 538 struct net_device *dev; 539 540 net = cfg->fc_nlinfo.nl_net; 541 if (nh->nh_gw) { 542 struct fib_result res; 543 544 if (nh->nh_flags & RTNH_F_ONLINK) { 545 546 if (cfg->fc_scope >= RT_SCOPE_LINK) 547 return -EINVAL; 548 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST) 549 return -EINVAL; 550 dev = __dev_get_by_index(net, nh->nh_oif); 551 if (!dev) 552 return -ENODEV; 553 if (!(dev->flags & IFF_UP)) 554 return -ENETDOWN; 555 nh->nh_dev = dev; 556 dev_hold(dev); 557 nh->nh_scope = RT_SCOPE_LINK; 558 return 0; 559 } 560 rcu_read_lock(); 561 { 562 struct flowi4 fl4 = { 563 .daddr = nh->nh_gw, 564 .flowi4_scope = cfg->fc_scope + 1, 565 .flowi4_oif = nh->nh_oif, 566 }; 567 568 /* It is not necessary, but requires a bit of thinking */ 569 if (fl4.flowi4_scope < RT_SCOPE_LINK) 570 fl4.flowi4_scope = RT_SCOPE_LINK; 571 err = fib_lookup(net, &fl4, &res); 572 if (err) { 573 rcu_read_unlock(); 574 return err; 575 } 576 } 577 err = -EINVAL; 578 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) 579 goto out; 580 nh->nh_scope = res.scope; 581 nh->nh_oif = FIB_RES_OIF(res); 582 nh->nh_dev = dev = FIB_RES_DEV(res); 583 if (!dev) 584 goto out; 585 dev_hold(dev); 586 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; 587 } else { 588 struct in_device *in_dev; 589 590 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) 591 return -EINVAL; 592 593 rcu_read_lock(); 594 err = -ENODEV; 595 in_dev = inetdev_by_index(net, nh->nh_oif); 596 if (in_dev == NULL) 597 goto out; 598 err = -ENETDOWN; 599 if (!(in_dev->dev->flags & IFF_UP)) 600 goto out; 601 nh->nh_dev = in_dev->dev; 602 dev_hold(nh->nh_dev); 603 nh->nh_scope = RT_SCOPE_HOST; 604 err = 0; 605 } 606 out: 607 rcu_read_unlock(); 608 return err; 609 } 610 611 static inline unsigned int fib_laddr_hashfn(__be32 val) 612 { 613 unsigned int mask = (fib_info_hash_size - 1); 614 615 return ((__force u32)val ^ 616 ((__force u32)val >> 7) ^ 617 ((__force u32)val >> 14)) & mask; 618 } 619 620 static struct hlist_head *fib_info_hash_alloc(int bytes) 621 { 622 if (bytes <= PAGE_SIZE) 623 return kzalloc(bytes, GFP_KERNEL); 624 else 625 return (struct hlist_head *) 626 __get_free_pages(GFP_KERNEL | __GFP_ZERO, 627 get_order(bytes)); 628 } 629 630 static void fib_info_hash_free(struct hlist_head *hash, int bytes) 631 { 632 if (!hash) 633 return; 634 635 if (bytes <= PAGE_SIZE) 636 kfree(hash); 637 else 638 free_pages((unsigned long) hash, get_order(bytes)); 639 } 640 641 static void fib_info_hash_move(struct hlist_head *new_info_hash, 642 struct hlist_head *new_laddrhash, 643 unsigned int new_size) 644 { 645 struct hlist_head *old_info_hash, *old_laddrhash; 646 unsigned int old_size = fib_info_hash_size; 647 unsigned int i, bytes; 648 649 spin_lock_bh(&fib_info_lock); 650 old_info_hash = fib_info_hash; 651 old_laddrhash = fib_info_laddrhash; 652 fib_info_hash_size = new_size; 653 654 for (i = 0; i < old_size; i++) { 655 struct hlist_head *head = &fib_info_hash[i]; 656 struct hlist_node *node, *n; 657 struct fib_info *fi; 658 659 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { 660 struct hlist_head *dest; 661 unsigned int new_hash; 662 663 hlist_del(&fi->fib_hash); 664 665 new_hash = fib_info_hashfn(fi); 666 dest = &new_info_hash[new_hash]; 667 hlist_add_head(&fi->fib_hash, dest); 668 } 669 } 670 fib_info_hash = new_info_hash; 671 672 for (i = 0; i < old_size; i++) { 673 struct hlist_head *lhead = &fib_info_laddrhash[i]; 674 struct hlist_node *node, *n; 675 struct fib_info *fi; 676 677 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { 678 struct hlist_head *ldest; 679 unsigned int new_hash; 680 681 hlist_del(&fi->fib_lhash); 682 683 new_hash = fib_laddr_hashfn(fi->fib_prefsrc); 684 ldest = &new_laddrhash[new_hash]; 685 hlist_add_head(&fi->fib_lhash, ldest); 686 } 687 } 688 fib_info_laddrhash = new_laddrhash; 689 690 spin_unlock_bh(&fib_info_lock); 691 692 bytes = old_size * sizeof(struct hlist_head *); 693 fib_info_hash_free(old_info_hash, bytes); 694 fib_info_hash_free(old_laddrhash, bytes); 695 } 696 697 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) 698 { 699 nh->nh_saddr = inet_select_addr(nh->nh_dev, 700 nh->nh_gw, 701 nh->nh_parent->fib_scope); 702 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); 703 704 return nh->nh_saddr; 705 } 706 707 struct fib_info *fib_create_info(struct fib_config *cfg) 708 { 709 int err; 710 struct fib_info *fi = NULL; 711 struct fib_info *ofi; 712 int nhs = 1; 713 struct net *net = cfg->fc_nlinfo.nl_net; 714 715 if (cfg->fc_type > RTN_MAX) 716 goto err_inval; 717 718 /* Fast check to catch the most weird cases */ 719 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 720 goto err_inval; 721 722 #ifdef CONFIG_IP_ROUTE_MULTIPATH 723 if (cfg->fc_mp) { 724 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); 725 if (nhs == 0) 726 goto err_inval; 727 } 728 #endif 729 730 err = -ENOBUFS; 731 if (fib_info_cnt >= fib_info_hash_size) { 732 unsigned int new_size = fib_info_hash_size << 1; 733 struct hlist_head *new_info_hash; 734 struct hlist_head *new_laddrhash; 735 unsigned int bytes; 736 737 if (!new_size) 738 new_size = 1; 739 bytes = new_size * sizeof(struct hlist_head *); 740 new_info_hash = fib_info_hash_alloc(bytes); 741 new_laddrhash = fib_info_hash_alloc(bytes); 742 if (!new_info_hash || !new_laddrhash) { 743 fib_info_hash_free(new_info_hash, bytes); 744 fib_info_hash_free(new_laddrhash, bytes); 745 } else 746 fib_info_hash_move(new_info_hash, new_laddrhash, new_size); 747 748 if (!fib_info_hash_size) 749 goto failure; 750 } 751 752 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 753 if (fi == NULL) 754 goto failure; 755 if (cfg->fc_mx) { 756 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 757 if (!fi->fib_metrics) 758 goto failure; 759 } else 760 fi->fib_metrics = (u32 *) dst_default_metrics; 761 fib_info_cnt++; 762 763 fi->fib_net = hold_net(net); 764 fi->fib_protocol = cfg->fc_protocol; 765 fi->fib_scope = cfg->fc_scope; 766 fi->fib_flags = cfg->fc_flags; 767 fi->fib_priority = cfg->fc_priority; 768 fi->fib_prefsrc = cfg->fc_prefsrc; 769 770 fi->fib_nhs = nhs; 771 change_nexthops(fi) { 772 nexthop_nh->nh_parent = fi; 773 } endfor_nexthops(fi) 774 775 if (cfg->fc_mx) { 776 struct nlattr *nla; 777 int remaining; 778 779 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { 780 int type = nla_type(nla); 781 782 if (type) { 783 if (type > RTAX_MAX) 784 goto err_inval; 785 fi->fib_metrics[type - 1] = nla_get_u32(nla); 786 } 787 } 788 } 789 790 if (cfg->fc_mp) { 791 #ifdef CONFIG_IP_ROUTE_MULTIPATH 792 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg); 793 if (err != 0) 794 goto failure; 795 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) 796 goto err_inval; 797 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) 798 goto err_inval; 799 #ifdef CONFIG_IP_ROUTE_CLASSID 800 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) 801 goto err_inval; 802 #endif 803 #else 804 goto err_inval; 805 #endif 806 } else { 807 struct fib_nh *nh = fi->fib_nh; 808 809 nh->nh_oif = cfg->fc_oif; 810 nh->nh_gw = cfg->fc_gw; 811 nh->nh_flags = cfg->fc_flags; 812 #ifdef CONFIG_IP_ROUTE_CLASSID 813 nh->nh_tclassid = cfg->fc_flow; 814 #endif 815 #ifdef CONFIG_IP_ROUTE_MULTIPATH 816 nh->nh_weight = 1; 817 #endif 818 } 819 820 if (fib_props[cfg->fc_type].error) { 821 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) 822 goto err_inval; 823 goto link_it; 824 } else { 825 switch (cfg->fc_type) { 826 case RTN_UNICAST: 827 case RTN_LOCAL: 828 case RTN_BROADCAST: 829 case RTN_ANYCAST: 830 case RTN_MULTICAST: 831 break; 832 default: 833 goto err_inval; 834 } 835 } 836 837 if (cfg->fc_scope > RT_SCOPE_HOST) 838 goto err_inval; 839 840 if (cfg->fc_scope == RT_SCOPE_HOST) { 841 struct fib_nh *nh = fi->fib_nh; 842 843 /* Local address is added. */ 844 if (nhs != 1 || nh->nh_gw) 845 goto err_inval; 846 nh->nh_scope = RT_SCOPE_NOWHERE; 847 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); 848 err = -ENODEV; 849 if (nh->nh_dev == NULL) 850 goto failure; 851 } else { 852 change_nexthops(fi) { 853 err = fib_check_nh(cfg, fi, nexthop_nh); 854 if (err != 0) 855 goto failure; 856 } endfor_nexthops(fi) 857 } 858 859 if (fi->fib_prefsrc) { 860 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || 861 fi->fib_prefsrc != cfg->fc_dst) 862 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL) 863 goto err_inval; 864 } 865 866 change_nexthops(fi) { 867 fib_info_update_nh_saddr(net, nexthop_nh); 868 } endfor_nexthops(fi) 869 870 link_it: 871 ofi = fib_find_info(fi); 872 if (ofi) { 873 fi->fib_dead = 1; 874 free_fib_info(fi); 875 ofi->fib_treeref++; 876 return ofi; 877 } 878 879 fi->fib_treeref++; 880 atomic_inc(&fi->fib_clntref); 881 spin_lock_bh(&fib_info_lock); 882 hlist_add_head(&fi->fib_hash, 883 &fib_info_hash[fib_info_hashfn(fi)]); 884 if (fi->fib_prefsrc) { 885 struct hlist_head *head; 886 887 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; 888 hlist_add_head(&fi->fib_lhash, head); 889 } 890 change_nexthops(fi) { 891 struct hlist_head *head; 892 unsigned int hash; 893 894 if (!nexthop_nh->nh_dev) 895 continue; 896 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); 897 head = &fib_info_devhash[hash]; 898 hlist_add_head(&nexthop_nh->nh_hash, head); 899 } endfor_nexthops(fi) 900 spin_unlock_bh(&fib_info_lock); 901 return fi; 902 903 err_inval: 904 err = -EINVAL; 905 906 failure: 907 if (fi) { 908 fi->fib_dead = 1; 909 free_fib_info(fi); 910 } 911 912 return ERR_PTR(err); 913 } 914 915 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 916 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, 917 struct fib_info *fi, unsigned int flags) 918 { 919 struct nlmsghdr *nlh; 920 struct rtmsg *rtm; 921 922 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 923 if (nlh == NULL) 924 return -EMSGSIZE; 925 926 rtm = nlmsg_data(nlh); 927 rtm->rtm_family = AF_INET; 928 rtm->rtm_dst_len = dst_len; 929 rtm->rtm_src_len = 0; 930 rtm->rtm_tos = tos; 931 if (tb_id < 256) 932 rtm->rtm_table = tb_id; 933 else 934 rtm->rtm_table = RT_TABLE_COMPAT; 935 NLA_PUT_U32(skb, RTA_TABLE, tb_id); 936 rtm->rtm_type = type; 937 rtm->rtm_flags = fi->fib_flags; 938 rtm->rtm_scope = fi->fib_scope; 939 rtm->rtm_protocol = fi->fib_protocol; 940 941 if (rtm->rtm_dst_len) 942 NLA_PUT_BE32(skb, RTA_DST, dst); 943 944 if (fi->fib_priority) 945 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); 946 947 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 948 goto nla_put_failure; 949 950 if (fi->fib_prefsrc) 951 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); 952 953 if (fi->fib_nhs == 1) { 954 if (fi->fib_nh->nh_gw) 955 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); 956 957 if (fi->fib_nh->nh_oif) 958 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); 959 #ifdef CONFIG_IP_ROUTE_CLASSID 960 if (fi->fib_nh[0].nh_tclassid) 961 NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); 962 #endif 963 } 964 #ifdef CONFIG_IP_ROUTE_MULTIPATH 965 if (fi->fib_nhs > 1) { 966 struct rtnexthop *rtnh; 967 struct nlattr *mp; 968 969 mp = nla_nest_start(skb, RTA_MULTIPATH); 970 if (mp == NULL) 971 goto nla_put_failure; 972 973 for_nexthops(fi) { 974 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 975 if (rtnh == NULL) 976 goto nla_put_failure; 977 978 rtnh->rtnh_flags = nh->nh_flags & 0xFF; 979 rtnh->rtnh_hops = nh->nh_weight - 1; 980 rtnh->rtnh_ifindex = nh->nh_oif; 981 982 if (nh->nh_gw) 983 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); 984 #ifdef CONFIG_IP_ROUTE_CLASSID 985 if (nh->nh_tclassid) 986 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); 987 #endif 988 /* length of rtnetlink header + attributes */ 989 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 990 } endfor_nexthops(fi); 991 992 nla_nest_end(skb, mp); 993 } 994 #endif 995 return nlmsg_end(skb, nlh); 996 997 nla_put_failure: 998 nlmsg_cancel(skb, nlh); 999 return -EMSGSIZE; 1000 } 1001 1002 /* 1003 * Update FIB if: 1004 * - local address disappeared -> we must delete all the entries 1005 * referring to it. 1006 * - device went down -> we must shutdown all nexthops going via it. 1007 */ 1008 int fib_sync_down_addr(struct net *net, __be32 local) 1009 { 1010 int ret = 0; 1011 unsigned int hash = fib_laddr_hashfn(local); 1012 struct hlist_head *head = &fib_info_laddrhash[hash]; 1013 struct hlist_node *node; 1014 struct fib_info *fi; 1015 1016 if (fib_info_laddrhash == NULL || local == 0) 1017 return 0; 1018 1019 hlist_for_each_entry(fi, node, head, fib_lhash) { 1020 if (!net_eq(fi->fib_net, net)) 1021 continue; 1022 if (fi->fib_prefsrc == local) { 1023 fi->fib_flags |= RTNH_F_DEAD; 1024 ret++; 1025 } 1026 } 1027 return ret; 1028 } 1029 1030 int fib_sync_down_dev(struct net_device *dev, int force) 1031 { 1032 int ret = 0; 1033 int scope = RT_SCOPE_NOWHERE; 1034 struct fib_info *prev_fi = NULL; 1035 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1036 struct hlist_head *head = &fib_info_devhash[hash]; 1037 struct hlist_node *node; 1038 struct fib_nh *nh; 1039 1040 if (force) 1041 scope = -1; 1042 1043 hlist_for_each_entry(nh, node, head, nh_hash) { 1044 struct fib_info *fi = nh->nh_parent; 1045 int dead; 1046 1047 BUG_ON(!fi->fib_nhs); 1048 if (nh->nh_dev != dev || fi == prev_fi) 1049 continue; 1050 prev_fi = fi; 1051 dead = 0; 1052 change_nexthops(fi) { 1053 if (nexthop_nh->nh_flags & RTNH_F_DEAD) 1054 dead++; 1055 else if (nexthop_nh->nh_dev == dev && 1056 nexthop_nh->nh_scope != scope) { 1057 nexthop_nh->nh_flags |= RTNH_F_DEAD; 1058 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1059 spin_lock_bh(&fib_multipath_lock); 1060 fi->fib_power -= nexthop_nh->nh_power; 1061 nexthop_nh->nh_power = 0; 1062 spin_unlock_bh(&fib_multipath_lock); 1063 #endif 1064 dead++; 1065 } 1066 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1067 if (force > 1 && nexthop_nh->nh_dev == dev) { 1068 dead = fi->fib_nhs; 1069 break; 1070 } 1071 #endif 1072 } endfor_nexthops(fi) 1073 if (dead == fi->fib_nhs) { 1074 fi->fib_flags |= RTNH_F_DEAD; 1075 ret++; 1076 } 1077 } 1078 1079 return ret; 1080 } 1081 1082 /* Must be invoked inside of an RCU protected region. */ 1083 void fib_select_default(struct fib_result *res) 1084 { 1085 struct fib_info *fi = NULL, *last_resort = NULL; 1086 struct list_head *fa_head = res->fa_head; 1087 struct fib_table *tb = res->table; 1088 int order = -1, last_idx = -1; 1089 struct fib_alias *fa; 1090 1091 list_for_each_entry_rcu(fa, fa_head, fa_list) { 1092 struct fib_info *next_fi = fa->fa_info; 1093 1094 if (next_fi->fib_scope != res->scope || 1095 fa->fa_type != RTN_UNICAST) 1096 continue; 1097 1098 if (next_fi->fib_priority > res->fi->fib_priority) 1099 break; 1100 if (!next_fi->fib_nh[0].nh_gw || 1101 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1102 continue; 1103 1104 fib_alias_accessed(fa); 1105 1106 if (fi == NULL) { 1107 if (next_fi != res->fi) 1108 break; 1109 } else if (!fib_detect_death(fi, order, &last_resort, 1110 &last_idx, tb->tb_default)) { 1111 fib_result_assign(res, fi); 1112 tb->tb_default = order; 1113 goto out; 1114 } 1115 fi = next_fi; 1116 order++; 1117 } 1118 1119 if (order <= 0 || fi == NULL) { 1120 tb->tb_default = -1; 1121 goto out; 1122 } 1123 1124 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1125 tb->tb_default)) { 1126 fib_result_assign(res, fi); 1127 tb->tb_default = order; 1128 goto out; 1129 } 1130 1131 if (last_idx >= 0) 1132 fib_result_assign(res, last_resort); 1133 tb->tb_default = last_idx; 1134 out: 1135 return; 1136 } 1137 1138 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1139 1140 /* 1141 * Dead device goes up. We wake up dead nexthops. 1142 * It takes sense only on multipath routes. 1143 */ 1144 int fib_sync_up(struct net_device *dev) 1145 { 1146 struct fib_info *prev_fi; 1147 unsigned int hash; 1148 struct hlist_head *head; 1149 struct hlist_node *node; 1150 struct fib_nh *nh; 1151 int ret; 1152 1153 if (!(dev->flags & IFF_UP)) 1154 return 0; 1155 1156 prev_fi = NULL; 1157 hash = fib_devindex_hashfn(dev->ifindex); 1158 head = &fib_info_devhash[hash]; 1159 ret = 0; 1160 1161 hlist_for_each_entry(nh, node, head, nh_hash) { 1162 struct fib_info *fi = nh->nh_parent; 1163 int alive; 1164 1165 BUG_ON(!fi->fib_nhs); 1166 if (nh->nh_dev != dev || fi == prev_fi) 1167 continue; 1168 1169 prev_fi = fi; 1170 alive = 0; 1171 change_nexthops(fi) { 1172 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1173 alive++; 1174 continue; 1175 } 1176 if (nexthop_nh->nh_dev == NULL || 1177 !(nexthop_nh->nh_dev->flags & IFF_UP)) 1178 continue; 1179 if (nexthop_nh->nh_dev != dev || 1180 !__in_dev_get_rtnl(dev)) 1181 continue; 1182 alive++; 1183 spin_lock_bh(&fib_multipath_lock); 1184 nexthop_nh->nh_power = 0; 1185 nexthop_nh->nh_flags &= ~RTNH_F_DEAD; 1186 spin_unlock_bh(&fib_multipath_lock); 1187 } endfor_nexthops(fi) 1188 1189 if (alive > 0) { 1190 fi->fib_flags &= ~RTNH_F_DEAD; 1191 ret++; 1192 } 1193 } 1194 1195 return ret; 1196 } 1197 1198 /* 1199 * The algorithm is suboptimal, but it provides really 1200 * fair weighted route distribution. 1201 */ 1202 void fib_select_multipath(struct fib_result *res) 1203 { 1204 struct fib_info *fi = res->fi; 1205 int w; 1206 1207 spin_lock_bh(&fib_multipath_lock); 1208 if (fi->fib_power <= 0) { 1209 int power = 0; 1210 change_nexthops(fi) { 1211 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1212 power += nexthop_nh->nh_weight; 1213 nexthop_nh->nh_power = nexthop_nh->nh_weight; 1214 } 1215 } endfor_nexthops(fi); 1216 fi->fib_power = power; 1217 if (power <= 0) { 1218 spin_unlock_bh(&fib_multipath_lock); 1219 /* Race condition: route has just become dead. */ 1220 res->nh_sel = 0; 1221 return; 1222 } 1223 } 1224 1225 1226 /* w should be random number [0..fi->fib_power-1], 1227 * it is pretty bad approximation. 1228 */ 1229 1230 w = jiffies % fi->fib_power; 1231 1232 change_nexthops(fi) { 1233 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) && 1234 nexthop_nh->nh_power) { 1235 w -= nexthop_nh->nh_power; 1236 if (w <= 0) { 1237 nexthop_nh->nh_power--; 1238 fi->fib_power--; 1239 res->nh_sel = nhsel; 1240 spin_unlock_bh(&fib_multipath_lock); 1241 return; 1242 } 1243 } 1244 } endfor_nexthops(fi); 1245 1246 /* Race condition: route has just become dead. */ 1247 res->nh_sel = 0; 1248 spin_unlock_bh(&fib_multipath_lock); 1249 } 1250 #endif 1251