1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * IPv4 Forwarding Information Base: semantics. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #include <asm/uaccess.h> 17 #include <asm/system.h> 18 #include <linux/bitops.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/jiffies.h> 22 #include <linux/mm.h> 23 #include <linux/string.h> 24 #include <linux/socket.h> 25 #include <linux/sockios.h> 26 #include <linux/errno.h> 27 #include <linux/in.h> 28 #include <linux/inet.h> 29 #include <linux/inetdevice.h> 30 #include <linux/netdevice.h> 31 #include <linux/if_arp.h> 32 #include <linux/proc_fs.h> 33 #include <linux/skbuff.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 37 #include <net/arp.h> 38 #include <net/ip.h> 39 #include <net/protocol.h> 40 #include <net/route.h> 41 #include <net/tcp.h> 42 #include <net/sock.h> 43 #include <net/ip_fib.h> 44 #include <net/netlink.h> 45 #include <net/nexthop.h> 46 47 #include "fib_lookup.h" 48 49 static DEFINE_SPINLOCK(fib_info_lock); 50 static struct hlist_head *fib_info_hash; 51 static struct hlist_head *fib_info_laddrhash; 52 static unsigned int fib_info_hash_size; 53 static unsigned int fib_info_cnt; 54 55 #define DEVINDEX_HASHBITS 8 56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) 57 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; 58 59 #ifdef CONFIG_IP_ROUTE_MULTIPATH 60 61 static DEFINE_SPINLOCK(fib_multipath_lock); 62 63 #define for_nexthops(fi) { \ 64 int nhsel; const struct fib_nh *nh; \ 65 for (nhsel = 0, nh = (fi)->fib_nh; \ 66 nhsel < (fi)->fib_nhs; \ 67 nh++, nhsel++) 68 69 #define change_nexthops(fi) { \ 70 int nhsel; struct fib_nh *nexthop_nh; \ 71 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 72 nhsel < (fi)->fib_nhs; \ 73 nexthop_nh++, nhsel++) 74 75 #else /* CONFIG_IP_ROUTE_MULTIPATH */ 76 77 /* Hope, that gcc will optimize it to get rid of dummy loop */ 78 79 #define for_nexthops(fi) { \ 80 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ 81 for (nhsel = 0; nhsel < 1; nhsel++) 82 83 #define change_nexthops(fi) { \ 84 int nhsel; \ 85 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 86 for (nhsel = 0; nhsel < 1; nhsel++) 87 88 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 89 90 #define endfor_nexthops(fi) } 91 92 93 const struct fib_prop fib_props[RTN_MAX + 1] = { 94 [RTN_UNSPEC] = { 95 .error = 0, 96 .scope = RT_SCOPE_NOWHERE, 97 }, 98 [RTN_UNICAST] = { 99 .error = 0, 100 .scope = RT_SCOPE_UNIVERSE, 101 }, 102 [RTN_LOCAL] = { 103 .error = 0, 104 .scope = RT_SCOPE_HOST, 105 }, 106 [RTN_BROADCAST] = { 107 .error = 0, 108 .scope = RT_SCOPE_LINK, 109 }, 110 [RTN_ANYCAST] = { 111 .error = 0, 112 .scope = RT_SCOPE_LINK, 113 }, 114 [RTN_MULTICAST] = { 115 .error = 0, 116 .scope = RT_SCOPE_UNIVERSE, 117 }, 118 [RTN_BLACKHOLE] = { 119 .error = -EINVAL, 120 .scope = RT_SCOPE_UNIVERSE, 121 }, 122 [RTN_UNREACHABLE] = { 123 .error = -EHOSTUNREACH, 124 .scope = RT_SCOPE_UNIVERSE, 125 }, 126 [RTN_PROHIBIT] = { 127 .error = -EACCES, 128 .scope = RT_SCOPE_UNIVERSE, 129 }, 130 [RTN_THROW] = { 131 .error = -EAGAIN, 132 .scope = RT_SCOPE_UNIVERSE, 133 }, 134 [RTN_NAT] = { 135 .error = -EINVAL, 136 .scope = RT_SCOPE_NOWHERE, 137 }, 138 [RTN_XRESOLVE] = { 139 .error = -EINVAL, 140 .scope = RT_SCOPE_NOWHERE, 141 }, 142 }; 143 144 /* Release a nexthop info record */ 145 146 void free_fib_info(struct fib_info *fi) 147 { 148 if (fi->fib_dead == 0) { 149 pr_warning("Freeing alive fib_info %p\n", fi); 150 return; 151 } 152 change_nexthops(fi) { 153 if (nexthop_nh->nh_dev) 154 dev_put(nexthop_nh->nh_dev); 155 nexthop_nh->nh_dev = NULL; 156 } endfor_nexthops(fi); 157 fib_info_cnt--; 158 release_net(fi->fib_net); 159 kfree_rcu(fi, rcu); 160 } 161 162 void fib_release_info(struct fib_info *fi) 163 { 164 spin_lock_bh(&fib_info_lock); 165 if (fi && --fi->fib_treeref == 0) { 166 hlist_del(&fi->fib_hash); 167 if (fi->fib_prefsrc) 168 hlist_del(&fi->fib_lhash); 169 change_nexthops(fi) { 170 if (!nexthop_nh->nh_dev) 171 continue; 172 hlist_del(&nexthop_nh->nh_hash); 173 } endfor_nexthops(fi) 174 fi->fib_dead = 1; 175 fib_info_put(fi); 176 } 177 spin_unlock_bh(&fib_info_lock); 178 } 179 180 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 181 { 182 const struct fib_nh *onh = ofi->fib_nh; 183 184 for_nexthops(fi) { 185 if (nh->nh_oif != onh->nh_oif || 186 nh->nh_gw != onh->nh_gw || 187 nh->nh_scope != onh->nh_scope || 188 #ifdef CONFIG_IP_ROUTE_MULTIPATH 189 nh->nh_weight != onh->nh_weight || 190 #endif 191 #ifdef CONFIG_IP_ROUTE_CLASSID 192 nh->nh_tclassid != onh->nh_tclassid || 193 #endif 194 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) 195 return -1; 196 onh++; 197 } endfor_nexthops(fi); 198 return 0; 199 } 200 201 static inline unsigned int fib_devindex_hashfn(unsigned int val) 202 { 203 unsigned int mask = DEVINDEX_HASHSIZE - 1; 204 205 return (val ^ 206 (val >> DEVINDEX_HASHBITS) ^ 207 (val >> (DEVINDEX_HASHBITS * 2))) & mask; 208 } 209 210 static inline unsigned int fib_info_hashfn(const struct fib_info *fi) 211 { 212 unsigned int mask = (fib_info_hash_size - 1); 213 unsigned int val = fi->fib_nhs; 214 215 val ^= (fi->fib_protocol << 8) | fi->fib_scope; 216 val ^= (__force u32)fi->fib_prefsrc; 217 val ^= fi->fib_priority; 218 for_nexthops(fi) { 219 val ^= fib_devindex_hashfn(nh->nh_oif); 220 } endfor_nexthops(fi) 221 222 return (val ^ (val >> 7) ^ (val >> 12)) & mask; 223 } 224 225 static struct fib_info *fib_find_info(const struct fib_info *nfi) 226 { 227 struct hlist_head *head; 228 struct hlist_node *node; 229 struct fib_info *fi; 230 unsigned int hash; 231 232 hash = fib_info_hashfn(nfi); 233 head = &fib_info_hash[hash]; 234 235 hlist_for_each_entry(fi, node, head, fib_hash) { 236 if (!net_eq(fi->fib_net, nfi->fib_net)) 237 continue; 238 if (fi->fib_nhs != nfi->fib_nhs) 239 continue; 240 if (nfi->fib_protocol == fi->fib_protocol && 241 nfi->fib_scope == fi->fib_scope && 242 nfi->fib_prefsrc == fi->fib_prefsrc && 243 nfi->fib_priority == fi->fib_priority && 244 memcmp(nfi->fib_metrics, fi->fib_metrics, 245 sizeof(u32) * RTAX_MAX) == 0 && 246 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && 247 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) 248 return fi; 249 } 250 251 return NULL; 252 } 253 254 /* Check, that the gateway is already configured. 255 * Used only by redirect accept routine. 256 */ 257 int ip_fib_check_default(__be32 gw, struct net_device *dev) 258 { 259 struct hlist_head *head; 260 struct hlist_node *node; 261 struct fib_nh *nh; 262 unsigned int hash; 263 264 spin_lock(&fib_info_lock); 265 266 hash = fib_devindex_hashfn(dev->ifindex); 267 head = &fib_info_devhash[hash]; 268 hlist_for_each_entry(nh, node, head, nh_hash) { 269 if (nh->nh_dev == dev && 270 nh->nh_gw == gw && 271 !(nh->nh_flags & RTNH_F_DEAD)) { 272 spin_unlock(&fib_info_lock); 273 return 0; 274 } 275 } 276 277 spin_unlock(&fib_info_lock); 278 279 return -1; 280 } 281 282 static inline size_t fib_nlmsg_size(struct fib_info *fi) 283 { 284 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) 285 + nla_total_size(4) /* RTA_TABLE */ 286 + nla_total_size(4) /* RTA_DST */ 287 + nla_total_size(4) /* RTA_PRIORITY */ 288 + nla_total_size(4); /* RTA_PREFSRC */ 289 290 /* space for nested metrics */ 291 payload += nla_total_size((RTAX_MAX * nla_total_size(4))); 292 293 if (fi->fib_nhs) { 294 /* Also handles the special case fib_nhs == 1 */ 295 296 /* each nexthop is packed in an attribute */ 297 size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); 298 299 /* may contain flow and gateway attribute */ 300 nhsize += 2 * nla_total_size(4); 301 302 /* all nexthops are packed in a nested attribute */ 303 payload += nla_total_size(fi->fib_nhs * nhsize); 304 } 305 306 return payload; 307 } 308 309 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, 310 int dst_len, u32 tb_id, struct nl_info *info, 311 unsigned int nlm_flags) 312 { 313 struct sk_buff *skb; 314 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 315 int err = -ENOBUFS; 316 317 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); 318 if (skb == NULL) 319 goto errout; 320 321 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 322 fa->fa_type, key, dst_len, 323 fa->fa_tos, fa->fa_info, nlm_flags); 324 if (err < 0) { 325 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ 326 WARN_ON(err == -EMSGSIZE); 327 kfree_skb(skb); 328 goto errout; 329 } 330 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE, 331 info->nlh, GFP_KERNEL); 332 return; 333 errout: 334 if (err < 0) 335 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); 336 } 337 338 /* Return the first fib alias matching TOS with 339 * priority less than or equal to PRIO. 340 */ 341 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) 342 { 343 if (fah) { 344 struct fib_alias *fa; 345 list_for_each_entry(fa, fah, fa_list) { 346 if (fa->fa_tos > tos) 347 continue; 348 if (fa->fa_info->fib_priority >= prio || 349 fa->fa_tos < tos) 350 return fa; 351 } 352 } 353 return NULL; 354 } 355 356 int fib_detect_death(struct fib_info *fi, int order, 357 struct fib_info **last_resort, int *last_idx, int dflt) 358 { 359 struct neighbour *n; 360 int state = NUD_NONE; 361 362 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); 363 if (n) { 364 state = n->nud_state; 365 neigh_release(n); 366 } 367 if (state == NUD_REACHABLE) 368 return 0; 369 if ((state & NUD_VALID) && order != dflt) 370 return 0; 371 if ((state & NUD_VALID) || 372 (*last_idx < 0 && order > dflt)) { 373 *last_resort = fi; 374 *last_idx = order; 375 } 376 return 1; 377 } 378 379 #ifdef CONFIG_IP_ROUTE_MULTIPATH 380 381 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) 382 { 383 int nhs = 0; 384 385 while (rtnh_ok(rtnh, remaining)) { 386 nhs++; 387 rtnh = rtnh_next(rtnh, &remaining); 388 } 389 390 /* leftover implies invalid nexthop configuration, discard it */ 391 return remaining > 0 ? 0 : nhs; 392 } 393 394 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, 395 int remaining, struct fib_config *cfg) 396 { 397 change_nexthops(fi) { 398 int attrlen; 399 400 if (!rtnh_ok(rtnh, remaining)) 401 return -EINVAL; 402 403 nexthop_nh->nh_flags = 404 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 405 nexthop_nh->nh_oif = rtnh->rtnh_ifindex; 406 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; 407 408 attrlen = rtnh_attrlen(rtnh); 409 if (attrlen > 0) { 410 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 411 412 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 413 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; 414 #ifdef CONFIG_IP_ROUTE_CLASSID 415 nla = nla_find(attrs, attrlen, RTA_FLOW); 416 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 417 #endif 418 } 419 420 rtnh = rtnh_next(rtnh, &remaining); 421 } endfor_nexthops(fi); 422 423 return 0; 424 } 425 426 #endif 427 428 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) 429 { 430 #ifdef CONFIG_IP_ROUTE_MULTIPATH 431 struct rtnexthop *rtnh; 432 int remaining; 433 #endif 434 435 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) 436 return 1; 437 438 if (cfg->fc_oif || cfg->fc_gw) { 439 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && 440 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) 441 return 0; 442 return 1; 443 } 444 445 #ifdef CONFIG_IP_ROUTE_MULTIPATH 446 if (cfg->fc_mp == NULL) 447 return 0; 448 449 rtnh = cfg->fc_mp; 450 remaining = cfg->fc_mp_len; 451 452 for_nexthops(fi) { 453 int attrlen; 454 455 if (!rtnh_ok(rtnh, remaining)) 456 return -EINVAL; 457 458 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif) 459 return 1; 460 461 attrlen = rtnh_attrlen(rtnh); 462 if (attrlen < 0) { 463 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 464 465 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 466 if (nla && nla_get_be32(nla) != nh->nh_gw) 467 return 1; 468 #ifdef CONFIG_IP_ROUTE_CLASSID 469 nla = nla_find(attrs, attrlen, RTA_FLOW); 470 if (nla && nla_get_u32(nla) != nh->nh_tclassid) 471 return 1; 472 #endif 473 } 474 475 rtnh = rtnh_next(rtnh, &remaining); 476 } endfor_nexthops(fi); 477 #endif 478 return 0; 479 } 480 481 482 /* 483 * Picture 484 * ------- 485 * 486 * Semantics of nexthop is very messy by historical reasons. 487 * We have to take into account, that: 488 * a) gateway can be actually local interface address, 489 * so that gatewayed route is direct. 490 * b) gateway must be on-link address, possibly 491 * described not by an ifaddr, but also by a direct route. 492 * c) If both gateway and interface are specified, they should not 493 * contradict. 494 * d) If we use tunnel routes, gateway could be not on-link. 495 * 496 * Attempt to reconcile all of these (alas, self-contradictory) conditions 497 * results in pretty ugly and hairy code with obscure logic. 498 * 499 * I chose to generalized it instead, so that the size 500 * of code does not increase practically, but it becomes 501 * much more general. 502 * Every prefix is assigned a "scope" value: "host" is local address, 503 * "link" is direct route, 504 * [ ... "site" ... "interior" ... ] 505 * and "universe" is true gateway route with global meaning. 506 * 507 * Every prefix refers to a set of "nexthop"s (gw, oif), 508 * where gw must have narrower scope. This recursion stops 509 * when gw has LOCAL scope or if "nexthop" is declared ONLINK, 510 * which means that gw is forced to be on link. 511 * 512 * Code is still hairy, but now it is apparently logically 513 * consistent and very flexible. F.e. as by-product it allows 514 * to co-exists in peace independent exterior and interior 515 * routing processes. 516 * 517 * Normally it looks as following. 518 * 519 * {universe prefix} -> (gw, oif) [scope link] 520 * | 521 * |-> {link prefix} -> (gw, oif) [scope local] 522 * | 523 * |-> {local prefix} (terminal node) 524 */ 525 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, 526 struct fib_nh *nh) 527 { 528 int err; 529 struct net *net; 530 struct net_device *dev; 531 532 net = cfg->fc_nlinfo.nl_net; 533 if (nh->nh_gw) { 534 struct fib_result res; 535 536 if (nh->nh_flags & RTNH_F_ONLINK) { 537 538 if (cfg->fc_scope >= RT_SCOPE_LINK) 539 return -EINVAL; 540 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST) 541 return -EINVAL; 542 dev = __dev_get_by_index(net, nh->nh_oif); 543 if (!dev) 544 return -ENODEV; 545 if (!(dev->flags & IFF_UP)) 546 return -ENETDOWN; 547 nh->nh_dev = dev; 548 dev_hold(dev); 549 nh->nh_scope = RT_SCOPE_LINK; 550 return 0; 551 } 552 rcu_read_lock(); 553 { 554 struct flowi4 fl4 = { 555 .daddr = nh->nh_gw, 556 .flowi4_scope = cfg->fc_scope + 1, 557 .flowi4_oif = nh->nh_oif, 558 }; 559 560 /* It is not necessary, but requires a bit of thinking */ 561 if (fl4.flowi4_scope < RT_SCOPE_LINK) 562 fl4.flowi4_scope = RT_SCOPE_LINK; 563 err = fib_lookup(net, &fl4, &res); 564 if (err) { 565 rcu_read_unlock(); 566 return err; 567 } 568 } 569 err = -EINVAL; 570 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) 571 goto out; 572 nh->nh_scope = res.scope; 573 nh->nh_oif = FIB_RES_OIF(res); 574 nh->nh_dev = dev = FIB_RES_DEV(res); 575 if (!dev) 576 goto out; 577 dev_hold(dev); 578 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; 579 } else { 580 struct in_device *in_dev; 581 582 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) 583 return -EINVAL; 584 585 rcu_read_lock(); 586 err = -ENODEV; 587 in_dev = inetdev_by_index(net, nh->nh_oif); 588 if (in_dev == NULL) 589 goto out; 590 err = -ENETDOWN; 591 if (!(in_dev->dev->flags & IFF_UP)) 592 goto out; 593 nh->nh_dev = in_dev->dev; 594 dev_hold(nh->nh_dev); 595 nh->nh_scope = RT_SCOPE_HOST; 596 err = 0; 597 } 598 out: 599 rcu_read_unlock(); 600 return err; 601 } 602 603 static inline unsigned int fib_laddr_hashfn(__be32 val) 604 { 605 unsigned int mask = (fib_info_hash_size - 1); 606 607 return ((__force u32)val ^ 608 ((__force u32)val >> 7) ^ 609 ((__force u32)val >> 14)) & mask; 610 } 611 612 static struct hlist_head *fib_info_hash_alloc(int bytes) 613 { 614 if (bytes <= PAGE_SIZE) 615 return kzalloc(bytes, GFP_KERNEL); 616 else 617 return (struct hlist_head *) 618 __get_free_pages(GFP_KERNEL | __GFP_ZERO, 619 get_order(bytes)); 620 } 621 622 static void fib_info_hash_free(struct hlist_head *hash, int bytes) 623 { 624 if (!hash) 625 return; 626 627 if (bytes <= PAGE_SIZE) 628 kfree(hash); 629 else 630 free_pages((unsigned long) hash, get_order(bytes)); 631 } 632 633 static void fib_info_hash_move(struct hlist_head *new_info_hash, 634 struct hlist_head *new_laddrhash, 635 unsigned int new_size) 636 { 637 struct hlist_head *old_info_hash, *old_laddrhash; 638 unsigned int old_size = fib_info_hash_size; 639 unsigned int i, bytes; 640 641 spin_lock_bh(&fib_info_lock); 642 old_info_hash = fib_info_hash; 643 old_laddrhash = fib_info_laddrhash; 644 fib_info_hash_size = new_size; 645 646 for (i = 0; i < old_size; i++) { 647 struct hlist_head *head = &fib_info_hash[i]; 648 struct hlist_node *node, *n; 649 struct fib_info *fi; 650 651 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { 652 struct hlist_head *dest; 653 unsigned int new_hash; 654 655 hlist_del(&fi->fib_hash); 656 657 new_hash = fib_info_hashfn(fi); 658 dest = &new_info_hash[new_hash]; 659 hlist_add_head(&fi->fib_hash, dest); 660 } 661 } 662 fib_info_hash = new_info_hash; 663 664 for (i = 0; i < old_size; i++) { 665 struct hlist_head *lhead = &fib_info_laddrhash[i]; 666 struct hlist_node *node, *n; 667 struct fib_info *fi; 668 669 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { 670 struct hlist_head *ldest; 671 unsigned int new_hash; 672 673 hlist_del(&fi->fib_lhash); 674 675 new_hash = fib_laddr_hashfn(fi->fib_prefsrc); 676 ldest = &new_laddrhash[new_hash]; 677 hlist_add_head(&fi->fib_lhash, ldest); 678 } 679 } 680 fib_info_laddrhash = new_laddrhash; 681 682 spin_unlock_bh(&fib_info_lock); 683 684 bytes = old_size * sizeof(struct hlist_head *); 685 fib_info_hash_free(old_info_hash, bytes); 686 fib_info_hash_free(old_laddrhash, bytes); 687 } 688 689 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) 690 { 691 nh->nh_saddr = inet_select_addr(nh->nh_dev, 692 nh->nh_gw, 693 nh->nh_parent->fib_scope); 694 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); 695 696 return nh->nh_saddr; 697 } 698 699 struct fib_info *fib_create_info(struct fib_config *cfg) 700 { 701 int err; 702 struct fib_info *fi = NULL; 703 struct fib_info *ofi; 704 int nhs = 1; 705 struct net *net = cfg->fc_nlinfo.nl_net; 706 707 if (cfg->fc_type > RTN_MAX) 708 goto err_inval; 709 710 /* Fast check to catch the most weird cases */ 711 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 712 goto err_inval; 713 714 #ifdef CONFIG_IP_ROUTE_MULTIPATH 715 if (cfg->fc_mp) { 716 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); 717 if (nhs == 0) 718 goto err_inval; 719 } 720 #endif 721 722 err = -ENOBUFS; 723 if (fib_info_cnt >= fib_info_hash_size) { 724 unsigned int new_size = fib_info_hash_size << 1; 725 struct hlist_head *new_info_hash; 726 struct hlist_head *new_laddrhash; 727 unsigned int bytes; 728 729 if (!new_size) 730 new_size = 1; 731 bytes = new_size * sizeof(struct hlist_head *); 732 new_info_hash = fib_info_hash_alloc(bytes); 733 new_laddrhash = fib_info_hash_alloc(bytes); 734 if (!new_info_hash || !new_laddrhash) { 735 fib_info_hash_free(new_info_hash, bytes); 736 fib_info_hash_free(new_laddrhash, bytes); 737 } else 738 fib_info_hash_move(new_info_hash, new_laddrhash, new_size); 739 740 if (!fib_info_hash_size) 741 goto failure; 742 } 743 744 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 745 if (fi == NULL) 746 goto failure; 747 if (cfg->fc_mx) { 748 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 749 if (!fi->fib_metrics) 750 goto failure; 751 } else 752 fi->fib_metrics = (u32 *) dst_default_metrics; 753 fib_info_cnt++; 754 755 fi->fib_net = hold_net(net); 756 fi->fib_protocol = cfg->fc_protocol; 757 fi->fib_scope = cfg->fc_scope; 758 fi->fib_flags = cfg->fc_flags; 759 fi->fib_priority = cfg->fc_priority; 760 fi->fib_prefsrc = cfg->fc_prefsrc; 761 762 fi->fib_nhs = nhs; 763 change_nexthops(fi) { 764 nexthop_nh->nh_parent = fi; 765 } endfor_nexthops(fi) 766 767 if (cfg->fc_mx) { 768 struct nlattr *nla; 769 int remaining; 770 771 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { 772 int type = nla_type(nla); 773 774 if (type) { 775 if (type > RTAX_MAX) 776 goto err_inval; 777 fi->fib_metrics[type - 1] = nla_get_u32(nla); 778 } 779 } 780 } 781 782 if (cfg->fc_mp) { 783 #ifdef CONFIG_IP_ROUTE_MULTIPATH 784 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg); 785 if (err != 0) 786 goto failure; 787 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) 788 goto err_inval; 789 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) 790 goto err_inval; 791 #ifdef CONFIG_IP_ROUTE_CLASSID 792 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) 793 goto err_inval; 794 #endif 795 #else 796 goto err_inval; 797 #endif 798 } else { 799 struct fib_nh *nh = fi->fib_nh; 800 801 nh->nh_oif = cfg->fc_oif; 802 nh->nh_gw = cfg->fc_gw; 803 nh->nh_flags = cfg->fc_flags; 804 #ifdef CONFIG_IP_ROUTE_CLASSID 805 nh->nh_tclassid = cfg->fc_flow; 806 #endif 807 #ifdef CONFIG_IP_ROUTE_MULTIPATH 808 nh->nh_weight = 1; 809 #endif 810 } 811 812 if (fib_props[cfg->fc_type].error) { 813 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) 814 goto err_inval; 815 goto link_it; 816 } else { 817 switch (cfg->fc_type) { 818 case RTN_UNICAST: 819 case RTN_LOCAL: 820 case RTN_BROADCAST: 821 case RTN_ANYCAST: 822 case RTN_MULTICAST: 823 break; 824 default: 825 goto err_inval; 826 } 827 } 828 829 if (cfg->fc_scope > RT_SCOPE_HOST) 830 goto err_inval; 831 832 if (cfg->fc_scope == RT_SCOPE_HOST) { 833 struct fib_nh *nh = fi->fib_nh; 834 835 /* Local address is added. */ 836 if (nhs != 1 || nh->nh_gw) 837 goto err_inval; 838 nh->nh_scope = RT_SCOPE_NOWHERE; 839 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); 840 err = -ENODEV; 841 if (nh->nh_dev == NULL) 842 goto failure; 843 } else { 844 change_nexthops(fi) { 845 err = fib_check_nh(cfg, fi, nexthop_nh); 846 if (err != 0) 847 goto failure; 848 } endfor_nexthops(fi) 849 } 850 851 if (fi->fib_prefsrc) { 852 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || 853 fi->fib_prefsrc != cfg->fc_dst) 854 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL) 855 goto err_inval; 856 } 857 858 change_nexthops(fi) { 859 fib_info_update_nh_saddr(net, nexthop_nh); 860 } endfor_nexthops(fi) 861 862 link_it: 863 ofi = fib_find_info(fi); 864 if (ofi) { 865 fi->fib_dead = 1; 866 free_fib_info(fi); 867 ofi->fib_treeref++; 868 return ofi; 869 } 870 871 fi->fib_treeref++; 872 atomic_inc(&fi->fib_clntref); 873 spin_lock_bh(&fib_info_lock); 874 hlist_add_head(&fi->fib_hash, 875 &fib_info_hash[fib_info_hashfn(fi)]); 876 if (fi->fib_prefsrc) { 877 struct hlist_head *head; 878 879 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; 880 hlist_add_head(&fi->fib_lhash, head); 881 } 882 change_nexthops(fi) { 883 struct hlist_head *head; 884 unsigned int hash; 885 886 if (!nexthop_nh->nh_dev) 887 continue; 888 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); 889 head = &fib_info_devhash[hash]; 890 hlist_add_head(&nexthop_nh->nh_hash, head); 891 } endfor_nexthops(fi) 892 spin_unlock_bh(&fib_info_lock); 893 return fi; 894 895 err_inval: 896 err = -EINVAL; 897 898 failure: 899 if (fi) { 900 fi->fib_dead = 1; 901 free_fib_info(fi); 902 } 903 904 return ERR_PTR(err); 905 } 906 907 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 908 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, 909 struct fib_info *fi, unsigned int flags) 910 { 911 struct nlmsghdr *nlh; 912 struct rtmsg *rtm; 913 914 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 915 if (nlh == NULL) 916 return -EMSGSIZE; 917 918 rtm = nlmsg_data(nlh); 919 rtm->rtm_family = AF_INET; 920 rtm->rtm_dst_len = dst_len; 921 rtm->rtm_src_len = 0; 922 rtm->rtm_tos = tos; 923 if (tb_id < 256) 924 rtm->rtm_table = tb_id; 925 else 926 rtm->rtm_table = RT_TABLE_COMPAT; 927 NLA_PUT_U32(skb, RTA_TABLE, tb_id); 928 rtm->rtm_type = type; 929 rtm->rtm_flags = fi->fib_flags; 930 rtm->rtm_scope = fi->fib_scope; 931 rtm->rtm_protocol = fi->fib_protocol; 932 933 if (rtm->rtm_dst_len) 934 NLA_PUT_BE32(skb, RTA_DST, dst); 935 936 if (fi->fib_priority) 937 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); 938 939 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 940 goto nla_put_failure; 941 942 if (fi->fib_prefsrc) 943 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); 944 945 if (fi->fib_nhs == 1) { 946 if (fi->fib_nh->nh_gw) 947 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); 948 949 if (fi->fib_nh->nh_oif) 950 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); 951 #ifdef CONFIG_IP_ROUTE_CLASSID 952 if (fi->fib_nh[0].nh_tclassid) 953 NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); 954 #endif 955 } 956 #ifdef CONFIG_IP_ROUTE_MULTIPATH 957 if (fi->fib_nhs > 1) { 958 struct rtnexthop *rtnh; 959 struct nlattr *mp; 960 961 mp = nla_nest_start(skb, RTA_MULTIPATH); 962 if (mp == NULL) 963 goto nla_put_failure; 964 965 for_nexthops(fi) { 966 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 967 if (rtnh == NULL) 968 goto nla_put_failure; 969 970 rtnh->rtnh_flags = nh->nh_flags & 0xFF; 971 rtnh->rtnh_hops = nh->nh_weight - 1; 972 rtnh->rtnh_ifindex = nh->nh_oif; 973 974 if (nh->nh_gw) 975 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); 976 #ifdef CONFIG_IP_ROUTE_CLASSID 977 if (nh->nh_tclassid) 978 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); 979 #endif 980 /* length of rtnetlink header + attributes */ 981 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 982 } endfor_nexthops(fi); 983 984 nla_nest_end(skb, mp); 985 } 986 #endif 987 return nlmsg_end(skb, nlh); 988 989 nla_put_failure: 990 nlmsg_cancel(skb, nlh); 991 return -EMSGSIZE; 992 } 993 994 /* 995 * Update FIB if: 996 * - local address disappeared -> we must delete all the entries 997 * referring to it. 998 * - device went down -> we must shutdown all nexthops going via it. 999 */ 1000 int fib_sync_down_addr(struct net *net, __be32 local) 1001 { 1002 int ret = 0; 1003 unsigned int hash = fib_laddr_hashfn(local); 1004 struct hlist_head *head = &fib_info_laddrhash[hash]; 1005 struct hlist_node *node; 1006 struct fib_info *fi; 1007 1008 if (fib_info_laddrhash == NULL || local == 0) 1009 return 0; 1010 1011 hlist_for_each_entry(fi, node, head, fib_lhash) { 1012 if (!net_eq(fi->fib_net, net)) 1013 continue; 1014 if (fi->fib_prefsrc == local) { 1015 fi->fib_flags |= RTNH_F_DEAD; 1016 ret++; 1017 } 1018 } 1019 return ret; 1020 } 1021 1022 int fib_sync_down_dev(struct net_device *dev, int force) 1023 { 1024 int ret = 0; 1025 int scope = RT_SCOPE_NOWHERE; 1026 struct fib_info *prev_fi = NULL; 1027 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1028 struct hlist_head *head = &fib_info_devhash[hash]; 1029 struct hlist_node *node; 1030 struct fib_nh *nh; 1031 1032 if (force) 1033 scope = -1; 1034 1035 hlist_for_each_entry(nh, node, head, nh_hash) { 1036 struct fib_info *fi = nh->nh_parent; 1037 int dead; 1038 1039 BUG_ON(!fi->fib_nhs); 1040 if (nh->nh_dev != dev || fi == prev_fi) 1041 continue; 1042 prev_fi = fi; 1043 dead = 0; 1044 change_nexthops(fi) { 1045 if (nexthop_nh->nh_flags & RTNH_F_DEAD) 1046 dead++; 1047 else if (nexthop_nh->nh_dev == dev && 1048 nexthop_nh->nh_scope != scope) { 1049 nexthop_nh->nh_flags |= RTNH_F_DEAD; 1050 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1051 spin_lock_bh(&fib_multipath_lock); 1052 fi->fib_power -= nexthop_nh->nh_power; 1053 nexthop_nh->nh_power = 0; 1054 spin_unlock_bh(&fib_multipath_lock); 1055 #endif 1056 dead++; 1057 } 1058 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1059 if (force > 1 && nexthop_nh->nh_dev == dev) { 1060 dead = fi->fib_nhs; 1061 break; 1062 } 1063 #endif 1064 } endfor_nexthops(fi) 1065 if (dead == fi->fib_nhs) { 1066 fi->fib_flags |= RTNH_F_DEAD; 1067 ret++; 1068 } 1069 } 1070 1071 return ret; 1072 } 1073 1074 /* Must be invoked inside of an RCU protected region. */ 1075 void fib_select_default(struct fib_result *res) 1076 { 1077 struct fib_info *fi = NULL, *last_resort = NULL; 1078 struct list_head *fa_head = res->fa_head; 1079 struct fib_table *tb = res->table; 1080 int order = -1, last_idx = -1; 1081 struct fib_alias *fa; 1082 1083 list_for_each_entry_rcu(fa, fa_head, fa_list) { 1084 struct fib_info *next_fi = fa->fa_info; 1085 1086 if (next_fi->fib_scope != res->scope || 1087 fa->fa_type != RTN_UNICAST) 1088 continue; 1089 1090 if (next_fi->fib_priority > res->fi->fib_priority) 1091 break; 1092 if (!next_fi->fib_nh[0].nh_gw || 1093 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1094 continue; 1095 1096 fib_alias_accessed(fa); 1097 1098 if (fi == NULL) { 1099 if (next_fi != res->fi) 1100 break; 1101 } else if (!fib_detect_death(fi, order, &last_resort, 1102 &last_idx, tb->tb_default)) { 1103 fib_result_assign(res, fi); 1104 tb->tb_default = order; 1105 goto out; 1106 } 1107 fi = next_fi; 1108 order++; 1109 } 1110 1111 if (order <= 0 || fi == NULL) { 1112 tb->tb_default = -1; 1113 goto out; 1114 } 1115 1116 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1117 tb->tb_default)) { 1118 fib_result_assign(res, fi); 1119 tb->tb_default = order; 1120 goto out; 1121 } 1122 1123 if (last_idx >= 0) 1124 fib_result_assign(res, last_resort); 1125 tb->tb_default = last_idx; 1126 out: 1127 return; 1128 } 1129 1130 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1131 1132 /* 1133 * Dead device goes up. We wake up dead nexthops. 1134 * It takes sense only on multipath routes. 1135 */ 1136 int fib_sync_up(struct net_device *dev) 1137 { 1138 struct fib_info *prev_fi; 1139 unsigned int hash; 1140 struct hlist_head *head; 1141 struct hlist_node *node; 1142 struct fib_nh *nh; 1143 int ret; 1144 1145 if (!(dev->flags & IFF_UP)) 1146 return 0; 1147 1148 prev_fi = NULL; 1149 hash = fib_devindex_hashfn(dev->ifindex); 1150 head = &fib_info_devhash[hash]; 1151 ret = 0; 1152 1153 hlist_for_each_entry(nh, node, head, nh_hash) { 1154 struct fib_info *fi = nh->nh_parent; 1155 int alive; 1156 1157 BUG_ON(!fi->fib_nhs); 1158 if (nh->nh_dev != dev || fi == prev_fi) 1159 continue; 1160 1161 prev_fi = fi; 1162 alive = 0; 1163 change_nexthops(fi) { 1164 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1165 alive++; 1166 continue; 1167 } 1168 if (nexthop_nh->nh_dev == NULL || 1169 !(nexthop_nh->nh_dev->flags & IFF_UP)) 1170 continue; 1171 if (nexthop_nh->nh_dev != dev || 1172 !__in_dev_get_rtnl(dev)) 1173 continue; 1174 alive++; 1175 spin_lock_bh(&fib_multipath_lock); 1176 nexthop_nh->nh_power = 0; 1177 nexthop_nh->nh_flags &= ~RTNH_F_DEAD; 1178 spin_unlock_bh(&fib_multipath_lock); 1179 } endfor_nexthops(fi) 1180 1181 if (alive > 0) { 1182 fi->fib_flags &= ~RTNH_F_DEAD; 1183 ret++; 1184 } 1185 } 1186 1187 return ret; 1188 } 1189 1190 /* 1191 * The algorithm is suboptimal, but it provides really 1192 * fair weighted route distribution. 1193 */ 1194 void fib_select_multipath(struct fib_result *res) 1195 { 1196 struct fib_info *fi = res->fi; 1197 int w; 1198 1199 spin_lock_bh(&fib_multipath_lock); 1200 if (fi->fib_power <= 0) { 1201 int power = 0; 1202 change_nexthops(fi) { 1203 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1204 power += nexthop_nh->nh_weight; 1205 nexthop_nh->nh_power = nexthop_nh->nh_weight; 1206 } 1207 } endfor_nexthops(fi); 1208 fi->fib_power = power; 1209 if (power <= 0) { 1210 spin_unlock_bh(&fib_multipath_lock); 1211 /* Race condition: route has just become dead. */ 1212 res->nh_sel = 0; 1213 return; 1214 } 1215 } 1216 1217 1218 /* w should be random number [0..fi->fib_power-1], 1219 * it is pretty bad approximation. 1220 */ 1221 1222 w = jiffies % fi->fib_power; 1223 1224 change_nexthops(fi) { 1225 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) && 1226 nexthop_nh->nh_power) { 1227 w -= nexthop_nh->nh_power; 1228 if (w <= 0) { 1229 nexthop_nh->nh_power--; 1230 fi->fib_power--; 1231 res->nh_sel = nhsel; 1232 spin_unlock_bh(&fib_multipath_lock); 1233 return; 1234 } 1235 } 1236 } endfor_nexthops(fi); 1237 1238 /* Race condition: route has just become dead. */ 1239 res->nh_sel = 0; 1240 spin_unlock_bh(&fib_multipath_lock); 1241 } 1242 #endif 1243