1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * IPv4 Forwarding Information Base: semantics. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #include <asm/uaccess.h> 17 #include <asm/system.h> 18 #include <linux/bitops.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/jiffies.h> 22 #include <linux/mm.h> 23 #include <linux/string.h> 24 #include <linux/socket.h> 25 #include <linux/sockios.h> 26 #include <linux/errno.h> 27 #include <linux/in.h> 28 #include <linux/inet.h> 29 #include <linux/inetdevice.h> 30 #include <linux/netdevice.h> 31 #include <linux/if_arp.h> 32 #include <linux/proc_fs.h> 33 #include <linux/skbuff.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 37 #include <net/arp.h> 38 #include <net/ip.h> 39 #include <net/protocol.h> 40 #include <net/route.h> 41 #include <net/tcp.h> 42 #include <net/sock.h> 43 #include <net/ip_fib.h> 44 #include <net/netlink.h> 45 #include <net/nexthop.h> 46 47 #include "fib_lookup.h" 48 49 static DEFINE_SPINLOCK(fib_info_lock); 50 static struct hlist_head *fib_info_hash; 51 static struct hlist_head *fib_info_laddrhash; 52 static unsigned int fib_hash_size; 53 static unsigned int fib_info_cnt; 54 55 #define DEVINDEX_HASHBITS 8 56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) 57 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; 58 59 #ifdef CONFIG_IP_ROUTE_MULTIPATH 60 61 static DEFINE_SPINLOCK(fib_multipath_lock); 62 63 #define for_nexthops(fi) { \ 64 int nhsel; const struct fib_nh *nh; \ 65 for (nhsel = 0, nh = (fi)->fib_nh; \ 66 nhsel < (fi)->fib_nhs; \ 67 nh++, nhsel++) 68 69 #define change_nexthops(fi) { \ 70 int nhsel; struct fib_nh *nexthop_nh; \ 71 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 72 nhsel < (fi)->fib_nhs; \ 73 nexthop_nh++, nhsel++) 74 75 #else /* CONFIG_IP_ROUTE_MULTIPATH */ 76 77 /* Hope, that gcc will optimize it to get rid of dummy loop */ 78 79 #define for_nexthops(fi) { \ 80 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ 81 for (nhsel = 0; nhsel < 1; nhsel++) 82 83 #define change_nexthops(fi) { \ 84 int nhsel; \ 85 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 86 for (nhsel = 0; nhsel < 1; nhsel++) 87 88 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 89 90 #define endfor_nexthops(fi) } 91 92 93 static const struct 94 { 95 int error; 96 u8 scope; 97 } fib_props[RTN_MAX + 1] = { 98 [RTN_UNSPEC] = { 99 .error = 0, 100 .scope = RT_SCOPE_NOWHERE, 101 }, 102 [RTN_UNICAST] = { 103 .error = 0, 104 .scope = RT_SCOPE_UNIVERSE, 105 }, 106 [RTN_LOCAL] = { 107 .error = 0, 108 .scope = RT_SCOPE_HOST, 109 }, 110 [RTN_BROADCAST] = { 111 .error = 0, 112 .scope = RT_SCOPE_LINK, 113 }, 114 [RTN_ANYCAST] = { 115 .error = 0, 116 .scope = RT_SCOPE_LINK, 117 }, 118 [RTN_MULTICAST] = { 119 .error = 0, 120 .scope = RT_SCOPE_UNIVERSE, 121 }, 122 [RTN_BLACKHOLE] = { 123 .error = -EINVAL, 124 .scope = RT_SCOPE_UNIVERSE, 125 }, 126 [RTN_UNREACHABLE] = { 127 .error = -EHOSTUNREACH, 128 .scope = RT_SCOPE_UNIVERSE, 129 }, 130 [RTN_PROHIBIT] = { 131 .error = -EACCES, 132 .scope = RT_SCOPE_UNIVERSE, 133 }, 134 [RTN_THROW] = { 135 .error = -EAGAIN, 136 .scope = RT_SCOPE_UNIVERSE, 137 }, 138 [RTN_NAT] = { 139 .error = -EINVAL, 140 .scope = RT_SCOPE_NOWHERE, 141 }, 142 [RTN_XRESOLVE] = { 143 .error = -EINVAL, 144 .scope = RT_SCOPE_NOWHERE, 145 }, 146 }; 147 148 149 /* Release a nexthop info record */ 150 151 static void free_fib_info_rcu(struct rcu_head *head) 152 { 153 struct fib_info *fi = container_of(head, struct fib_info, rcu); 154 155 kfree(fi); 156 } 157 158 void free_fib_info(struct fib_info *fi) 159 { 160 if (fi->fib_dead == 0) { 161 pr_warning("Freeing alive fib_info %p\n", fi); 162 return; 163 } 164 change_nexthops(fi) { 165 if (nexthop_nh->nh_dev) 166 dev_put(nexthop_nh->nh_dev); 167 nexthop_nh->nh_dev = NULL; 168 } endfor_nexthops(fi); 169 fib_info_cnt--; 170 release_net(fi->fib_net); 171 call_rcu(&fi->rcu, free_fib_info_rcu); 172 } 173 174 void fib_release_info(struct fib_info *fi) 175 { 176 spin_lock_bh(&fib_info_lock); 177 if (fi && --fi->fib_treeref == 0) { 178 hlist_del(&fi->fib_hash); 179 if (fi->fib_prefsrc) 180 hlist_del(&fi->fib_lhash); 181 change_nexthops(fi) { 182 if (!nexthop_nh->nh_dev) 183 continue; 184 hlist_del(&nexthop_nh->nh_hash); 185 } endfor_nexthops(fi) 186 fi->fib_dead = 1; 187 fib_info_put(fi); 188 } 189 spin_unlock_bh(&fib_info_lock); 190 } 191 192 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 193 { 194 const struct fib_nh *onh = ofi->fib_nh; 195 196 for_nexthops(fi) { 197 if (nh->nh_oif != onh->nh_oif || 198 nh->nh_gw != onh->nh_gw || 199 nh->nh_scope != onh->nh_scope || 200 #ifdef CONFIG_IP_ROUTE_MULTIPATH 201 nh->nh_weight != onh->nh_weight || 202 #endif 203 #ifdef CONFIG_NET_CLS_ROUTE 204 nh->nh_tclassid != onh->nh_tclassid || 205 #endif 206 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) 207 return -1; 208 onh++; 209 } endfor_nexthops(fi); 210 return 0; 211 } 212 213 static inline unsigned int fib_devindex_hashfn(unsigned int val) 214 { 215 unsigned int mask = DEVINDEX_HASHSIZE - 1; 216 217 return (val ^ 218 (val >> DEVINDEX_HASHBITS) ^ 219 (val >> (DEVINDEX_HASHBITS * 2))) & mask; 220 } 221 222 static inline unsigned int fib_info_hashfn(const struct fib_info *fi) 223 { 224 unsigned int mask = (fib_hash_size - 1); 225 unsigned int val = fi->fib_nhs; 226 227 val ^= fi->fib_protocol; 228 val ^= (__force u32)fi->fib_prefsrc; 229 val ^= fi->fib_priority; 230 for_nexthops(fi) { 231 val ^= fib_devindex_hashfn(nh->nh_oif); 232 } endfor_nexthops(fi) 233 234 return (val ^ (val >> 7) ^ (val >> 12)) & mask; 235 } 236 237 static struct fib_info *fib_find_info(const struct fib_info *nfi) 238 { 239 struct hlist_head *head; 240 struct hlist_node *node; 241 struct fib_info *fi; 242 unsigned int hash; 243 244 hash = fib_info_hashfn(nfi); 245 head = &fib_info_hash[hash]; 246 247 hlist_for_each_entry(fi, node, head, fib_hash) { 248 if (!net_eq(fi->fib_net, nfi->fib_net)) 249 continue; 250 if (fi->fib_nhs != nfi->fib_nhs) 251 continue; 252 if (nfi->fib_protocol == fi->fib_protocol && 253 nfi->fib_prefsrc == fi->fib_prefsrc && 254 nfi->fib_priority == fi->fib_priority && 255 memcmp(nfi->fib_metrics, fi->fib_metrics, 256 sizeof(fi->fib_metrics)) == 0 && 257 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && 258 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) 259 return fi; 260 } 261 262 return NULL; 263 } 264 265 /* Check, that the gateway is already configured. 266 * Used only by redirect accept routine. 267 */ 268 int ip_fib_check_default(__be32 gw, struct net_device *dev) 269 { 270 struct hlist_head *head; 271 struct hlist_node *node; 272 struct fib_nh *nh; 273 unsigned int hash; 274 275 spin_lock(&fib_info_lock); 276 277 hash = fib_devindex_hashfn(dev->ifindex); 278 head = &fib_info_devhash[hash]; 279 hlist_for_each_entry(nh, node, head, nh_hash) { 280 if (nh->nh_dev == dev && 281 nh->nh_gw == gw && 282 !(nh->nh_flags & RTNH_F_DEAD)) { 283 spin_unlock(&fib_info_lock); 284 return 0; 285 } 286 } 287 288 spin_unlock(&fib_info_lock); 289 290 return -1; 291 } 292 293 static inline size_t fib_nlmsg_size(struct fib_info *fi) 294 { 295 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) 296 + nla_total_size(4) /* RTA_TABLE */ 297 + nla_total_size(4) /* RTA_DST */ 298 + nla_total_size(4) /* RTA_PRIORITY */ 299 + nla_total_size(4); /* RTA_PREFSRC */ 300 301 /* space for nested metrics */ 302 payload += nla_total_size((RTAX_MAX * nla_total_size(4))); 303 304 if (fi->fib_nhs) { 305 /* Also handles the special case fib_nhs == 1 */ 306 307 /* each nexthop is packed in an attribute */ 308 size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); 309 310 /* may contain flow and gateway attribute */ 311 nhsize += 2 * nla_total_size(4); 312 313 /* all nexthops are packed in a nested attribute */ 314 payload += nla_total_size(fi->fib_nhs * nhsize); 315 } 316 317 return payload; 318 } 319 320 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, 321 int dst_len, u32 tb_id, struct nl_info *info, 322 unsigned int nlm_flags) 323 { 324 struct sk_buff *skb; 325 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 326 int err = -ENOBUFS; 327 328 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); 329 if (skb == NULL) 330 goto errout; 331 332 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 333 fa->fa_type, fa->fa_scope, key, dst_len, 334 fa->fa_tos, fa->fa_info, nlm_flags); 335 if (err < 0) { 336 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ 337 WARN_ON(err == -EMSGSIZE); 338 kfree_skb(skb); 339 goto errout; 340 } 341 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE, 342 info->nlh, GFP_KERNEL); 343 return; 344 errout: 345 if (err < 0) 346 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); 347 } 348 349 /* Return the first fib alias matching TOS with 350 * priority less than or equal to PRIO. 351 */ 352 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) 353 { 354 if (fah) { 355 struct fib_alias *fa; 356 list_for_each_entry(fa, fah, fa_list) { 357 if (fa->fa_tos > tos) 358 continue; 359 if (fa->fa_info->fib_priority >= prio || 360 fa->fa_tos < tos) 361 return fa; 362 } 363 } 364 return NULL; 365 } 366 367 int fib_detect_death(struct fib_info *fi, int order, 368 struct fib_info **last_resort, int *last_idx, int dflt) 369 { 370 struct neighbour *n; 371 int state = NUD_NONE; 372 373 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); 374 if (n) { 375 state = n->nud_state; 376 neigh_release(n); 377 } 378 if (state == NUD_REACHABLE) 379 return 0; 380 if ((state & NUD_VALID) && order != dflt) 381 return 0; 382 if ((state & NUD_VALID) || 383 (*last_idx < 0 && order > dflt)) { 384 *last_resort = fi; 385 *last_idx = order; 386 } 387 return 1; 388 } 389 390 #ifdef CONFIG_IP_ROUTE_MULTIPATH 391 392 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) 393 { 394 int nhs = 0; 395 396 while (rtnh_ok(rtnh, remaining)) { 397 nhs++; 398 rtnh = rtnh_next(rtnh, &remaining); 399 } 400 401 /* leftover implies invalid nexthop configuration, discard it */ 402 return remaining > 0 ? 0 : nhs; 403 } 404 405 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, 406 int remaining, struct fib_config *cfg) 407 { 408 change_nexthops(fi) { 409 int attrlen; 410 411 if (!rtnh_ok(rtnh, remaining)) 412 return -EINVAL; 413 414 nexthop_nh->nh_flags = 415 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 416 nexthop_nh->nh_oif = rtnh->rtnh_ifindex; 417 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; 418 419 attrlen = rtnh_attrlen(rtnh); 420 if (attrlen > 0) { 421 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 422 423 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 424 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; 425 #ifdef CONFIG_NET_CLS_ROUTE 426 nla = nla_find(attrs, attrlen, RTA_FLOW); 427 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 428 #endif 429 } 430 431 rtnh = rtnh_next(rtnh, &remaining); 432 } endfor_nexthops(fi); 433 434 return 0; 435 } 436 437 #endif 438 439 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) 440 { 441 #ifdef CONFIG_IP_ROUTE_MULTIPATH 442 struct rtnexthop *rtnh; 443 int remaining; 444 #endif 445 446 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) 447 return 1; 448 449 if (cfg->fc_oif || cfg->fc_gw) { 450 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && 451 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) 452 return 0; 453 return 1; 454 } 455 456 #ifdef CONFIG_IP_ROUTE_MULTIPATH 457 if (cfg->fc_mp == NULL) 458 return 0; 459 460 rtnh = cfg->fc_mp; 461 remaining = cfg->fc_mp_len; 462 463 for_nexthops(fi) { 464 int attrlen; 465 466 if (!rtnh_ok(rtnh, remaining)) 467 return -EINVAL; 468 469 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif) 470 return 1; 471 472 attrlen = rtnh_attrlen(rtnh); 473 if (attrlen < 0) { 474 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 475 476 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 477 if (nla && nla_get_be32(nla) != nh->nh_gw) 478 return 1; 479 #ifdef CONFIG_NET_CLS_ROUTE 480 nla = nla_find(attrs, attrlen, RTA_FLOW); 481 if (nla && nla_get_u32(nla) != nh->nh_tclassid) 482 return 1; 483 #endif 484 } 485 486 rtnh = rtnh_next(rtnh, &remaining); 487 } endfor_nexthops(fi); 488 #endif 489 return 0; 490 } 491 492 493 /* 494 * Picture 495 * ------- 496 * 497 * Semantics of nexthop is very messy by historical reasons. 498 * We have to take into account, that: 499 * a) gateway can be actually local interface address, 500 * so that gatewayed route is direct. 501 * b) gateway must be on-link address, possibly 502 * described not by an ifaddr, but also by a direct route. 503 * c) If both gateway and interface are specified, they should not 504 * contradict. 505 * d) If we use tunnel routes, gateway could be not on-link. 506 * 507 * Attempt to reconcile all of these (alas, self-contradictory) conditions 508 * results in pretty ugly and hairy code with obscure logic. 509 * 510 * I chose to generalized it instead, so that the size 511 * of code does not increase practically, but it becomes 512 * much more general. 513 * Every prefix is assigned a "scope" value: "host" is local address, 514 * "link" is direct route, 515 * [ ... "site" ... "interior" ... ] 516 * and "universe" is true gateway route with global meaning. 517 * 518 * Every prefix refers to a set of "nexthop"s (gw, oif), 519 * where gw must have narrower scope. This recursion stops 520 * when gw has LOCAL scope or if "nexthop" is declared ONLINK, 521 * which means that gw is forced to be on link. 522 * 523 * Code is still hairy, but now it is apparently logically 524 * consistent and very flexible. F.e. as by-product it allows 525 * to co-exists in peace independent exterior and interior 526 * routing processes. 527 * 528 * Normally it looks as following. 529 * 530 * {universe prefix} -> (gw, oif) [scope link] 531 * | 532 * |-> {link prefix} -> (gw, oif) [scope local] 533 * | 534 * |-> {local prefix} (terminal node) 535 */ 536 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, 537 struct fib_nh *nh) 538 { 539 int err; 540 struct net *net; 541 struct net_device *dev; 542 543 net = cfg->fc_nlinfo.nl_net; 544 if (nh->nh_gw) { 545 struct fib_result res; 546 547 if (nh->nh_flags & RTNH_F_ONLINK) { 548 549 if (cfg->fc_scope >= RT_SCOPE_LINK) 550 return -EINVAL; 551 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST) 552 return -EINVAL; 553 dev = __dev_get_by_index(net, nh->nh_oif); 554 if (!dev) 555 return -ENODEV; 556 if (!(dev->flags & IFF_UP)) 557 return -ENETDOWN; 558 nh->nh_dev = dev; 559 dev_hold(dev); 560 nh->nh_scope = RT_SCOPE_LINK; 561 return 0; 562 } 563 rcu_read_lock(); 564 { 565 struct flowi fl = { 566 .fl4_dst = nh->nh_gw, 567 .fl4_scope = cfg->fc_scope + 1, 568 .oif = nh->nh_oif, 569 }; 570 571 /* It is not necessary, but requires a bit of thinking */ 572 if (fl.fl4_scope < RT_SCOPE_LINK) 573 fl.fl4_scope = RT_SCOPE_LINK; 574 err = fib_lookup(net, &fl, &res); 575 if (err) { 576 rcu_read_unlock(); 577 return err; 578 } 579 } 580 err = -EINVAL; 581 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) 582 goto out; 583 nh->nh_scope = res.scope; 584 nh->nh_oif = FIB_RES_OIF(res); 585 nh->nh_dev = dev = FIB_RES_DEV(res); 586 if (!dev) 587 goto out; 588 dev_hold(dev); 589 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; 590 } else { 591 struct in_device *in_dev; 592 593 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) 594 return -EINVAL; 595 596 rcu_read_lock(); 597 err = -ENODEV; 598 in_dev = inetdev_by_index(net, nh->nh_oif); 599 if (in_dev == NULL) 600 goto out; 601 err = -ENETDOWN; 602 if (!(in_dev->dev->flags & IFF_UP)) 603 goto out; 604 nh->nh_dev = in_dev->dev; 605 dev_hold(nh->nh_dev); 606 nh->nh_scope = RT_SCOPE_HOST; 607 err = 0; 608 } 609 out: 610 rcu_read_unlock(); 611 return err; 612 } 613 614 static inline unsigned int fib_laddr_hashfn(__be32 val) 615 { 616 unsigned int mask = (fib_hash_size - 1); 617 618 return ((__force u32)val ^ 619 ((__force u32)val >> 7) ^ 620 ((__force u32)val >> 14)) & mask; 621 } 622 623 static struct hlist_head *fib_hash_alloc(int bytes) 624 { 625 if (bytes <= PAGE_SIZE) 626 return kzalloc(bytes, GFP_KERNEL); 627 else 628 return (struct hlist_head *) 629 __get_free_pages(GFP_KERNEL | __GFP_ZERO, 630 get_order(bytes)); 631 } 632 633 static void fib_hash_free(struct hlist_head *hash, int bytes) 634 { 635 if (!hash) 636 return; 637 638 if (bytes <= PAGE_SIZE) 639 kfree(hash); 640 else 641 free_pages((unsigned long) hash, get_order(bytes)); 642 } 643 644 static void fib_hash_move(struct hlist_head *new_info_hash, 645 struct hlist_head *new_laddrhash, 646 unsigned int new_size) 647 { 648 struct hlist_head *old_info_hash, *old_laddrhash; 649 unsigned int old_size = fib_hash_size; 650 unsigned int i, bytes; 651 652 spin_lock_bh(&fib_info_lock); 653 old_info_hash = fib_info_hash; 654 old_laddrhash = fib_info_laddrhash; 655 fib_hash_size = new_size; 656 657 for (i = 0; i < old_size; i++) { 658 struct hlist_head *head = &fib_info_hash[i]; 659 struct hlist_node *node, *n; 660 struct fib_info *fi; 661 662 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { 663 struct hlist_head *dest; 664 unsigned int new_hash; 665 666 hlist_del(&fi->fib_hash); 667 668 new_hash = fib_info_hashfn(fi); 669 dest = &new_info_hash[new_hash]; 670 hlist_add_head(&fi->fib_hash, dest); 671 } 672 } 673 fib_info_hash = new_info_hash; 674 675 for (i = 0; i < old_size; i++) { 676 struct hlist_head *lhead = &fib_info_laddrhash[i]; 677 struct hlist_node *node, *n; 678 struct fib_info *fi; 679 680 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { 681 struct hlist_head *ldest; 682 unsigned int new_hash; 683 684 hlist_del(&fi->fib_lhash); 685 686 new_hash = fib_laddr_hashfn(fi->fib_prefsrc); 687 ldest = &new_laddrhash[new_hash]; 688 hlist_add_head(&fi->fib_lhash, ldest); 689 } 690 } 691 fib_info_laddrhash = new_laddrhash; 692 693 spin_unlock_bh(&fib_info_lock); 694 695 bytes = old_size * sizeof(struct hlist_head *); 696 fib_hash_free(old_info_hash, bytes); 697 fib_hash_free(old_laddrhash, bytes); 698 } 699 700 struct fib_info *fib_create_info(struct fib_config *cfg) 701 { 702 int err; 703 struct fib_info *fi = NULL; 704 struct fib_info *ofi; 705 int nhs = 1; 706 struct net *net = cfg->fc_nlinfo.nl_net; 707 708 /* Fast check to catch the most weird cases */ 709 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 710 goto err_inval; 711 712 #ifdef CONFIG_IP_ROUTE_MULTIPATH 713 if (cfg->fc_mp) { 714 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); 715 if (nhs == 0) 716 goto err_inval; 717 } 718 #endif 719 720 err = -ENOBUFS; 721 if (fib_info_cnt >= fib_hash_size) { 722 unsigned int new_size = fib_hash_size << 1; 723 struct hlist_head *new_info_hash; 724 struct hlist_head *new_laddrhash; 725 unsigned int bytes; 726 727 if (!new_size) 728 new_size = 1; 729 bytes = new_size * sizeof(struct hlist_head *); 730 new_info_hash = fib_hash_alloc(bytes); 731 new_laddrhash = fib_hash_alloc(bytes); 732 if (!new_info_hash || !new_laddrhash) { 733 fib_hash_free(new_info_hash, bytes); 734 fib_hash_free(new_laddrhash, bytes); 735 } else 736 fib_hash_move(new_info_hash, new_laddrhash, new_size); 737 738 if (!fib_hash_size) 739 goto failure; 740 } 741 742 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 743 if (fi == NULL) 744 goto failure; 745 fib_info_cnt++; 746 747 fi->fib_net = hold_net(net); 748 fi->fib_protocol = cfg->fc_protocol; 749 fi->fib_flags = cfg->fc_flags; 750 fi->fib_priority = cfg->fc_priority; 751 fi->fib_prefsrc = cfg->fc_prefsrc; 752 753 fi->fib_nhs = nhs; 754 change_nexthops(fi) { 755 nexthop_nh->nh_parent = fi; 756 } endfor_nexthops(fi) 757 758 if (cfg->fc_mx) { 759 struct nlattr *nla; 760 int remaining; 761 762 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { 763 int type = nla_type(nla); 764 765 if (type) { 766 if (type > RTAX_MAX) 767 goto err_inval; 768 fi->fib_metrics[type - 1] = nla_get_u32(nla); 769 } 770 } 771 } 772 773 if (cfg->fc_mp) { 774 #ifdef CONFIG_IP_ROUTE_MULTIPATH 775 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg); 776 if (err != 0) 777 goto failure; 778 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) 779 goto err_inval; 780 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) 781 goto err_inval; 782 #ifdef CONFIG_NET_CLS_ROUTE 783 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) 784 goto err_inval; 785 #endif 786 #else 787 goto err_inval; 788 #endif 789 } else { 790 struct fib_nh *nh = fi->fib_nh; 791 792 nh->nh_oif = cfg->fc_oif; 793 nh->nh_gw = cfg->fc_gw; 794 nh->nh_flags = cfg->fc_flags; 795 #ifdef CONFIG_NET_CLS_ROUTE 796 nh->nh_tclassid = cfg->fc_flow; 797 #endif 798 #ifdef CONFIG_IP_ROUTE_MULTIPATH 799 nh->nh_weight = 1; 800 #endif 801 } 802 803 if (fib_props[cfg->fc_type].error) { 804 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) 805 goto err_inval; 806 goto link_it; 807 } 808 809 if (cfg->fc_scope > RT_SCOPE_HOST) 810 goto err_inval; 811 812 if (cfg->fc_scope == RT_SCOPE_HOST) { 813 struct fib_nh *nh = fi->fib_nh; 814 815 /* Local address is added. */ 816 if (nhs != 1 || nh->nh_gw) 817 goto err_inval; 818 nh->nh_scope = RT_SCOPE_NOWHERE; 819 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); 820 err = -ENODEV; 821 if (nh->nh_dev == NULL) 822 goto failure; 823 } else { 824 change_nexthops(fi) { 825 err = fib_check_nh(cfg, fi, nexthop_nh); 826 if (err != 0) 827 goto failure; 828 } endfor_nexthops(fi) 829 } 830 831 if (fi->fib_prefsrc) { 832 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || 833 fi->fib_prefsrc != cfg->fc_dst) 834 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL) 835 goto err_inval; 836 } 837 838 link_it: 839 ofi = fib_find_info(fi); 840 if (ofi) { 841 fi->fib_dead = 1; 842 free_fib_info(fi); 843 ofi->fib_treeref++; 844 return ofi; 845 } 846 847 fi->fib_treeref++; 848 atomic_inc(&fi->fib_clntref); 849 spin_lock_bh(&fib_info_lock); 850 hlist_add_head(&fi->fib_hash, 851 &fib_info_hash[fib_info_hashfn(fi)]); 852 if (fi->fib_prefsrc) { 853 struct hlist_head *head; 854 855 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; 856 hlist_add_head(&fi->fib_lhash, head); 857 } 858 change_nexthops(fi) { 859 struct hlist_head *head; 860 unsigned int hash; 861 862 if (!nexthop_nh->nh_dev) 863 continue; 864 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); 865 head = &fib_info_devhash[hash]; 866 hlist_add_head(&nexthop_nh->nh_hash, head); 867 } endfor_nexthops(fi) 868 spin_unlock_bh(&fib_info_lock); 869 return fi; 870 871 err_inval: 872 err = -EINVAL; 873 874 failure: 875 if (fi) { 876 fi->fib_dead = 1; 877 free_fib_info(fi); 878 } 879 880 return ERR_PTR(err); 881 } 882 883 /* Note! fib_semantic_match intentionally uses RCU list functions. */ 884 int fib_semantic_match(struct list_head *head, const struct flowi *flp, 885 struct fib_result *res, int prefixlen, int fib_flags) 886 { 887 struct fib_alias *fa; 888 int nh_sel = 0; 889 890 list_for_each_entry_rcu(fa, head, fa_list) { 891 int err; 892 893 if (fa->fa_tos && 894 fa->fa_tos != flp->fl4_tos) 895 continue; 896 897 if (fa->fa_scope < flp->fl4_scope) 898 continue; 899 900 fib_alias_accessed(fa); 901 902 err = fib_props[fa->fa_type].error; 903 if (err == 0) { 904 struct fib_info *fi = fa->fa_info; 905 906 if (fi->fib_flags & RTNH_F_DEAD) 907 continue; 908 909 switch (fa->fa_type) { 910 case RTN_UNICAST: 911 case RTN_LOCAL: 912 case RTN_BROADCAST: 913 case RTN_ANYCAST: 914 case RTN_MULTICAST: 915 for_nexthops(fi) { 916 if (nh->nh_flags & RTNH_F_DEAD) 917 continue; 918 if (!flp->oif || flp->oif == nh->nh_oif) 919 break; 920 } 921 #ifdef CONFIG_IP_ROUTE_MULTIPATH 922 if (nhsel < fi->fib_nhs) { 923 nh_sel = nhsel; 924 goto out_fill_res; 925 } 926 #else 927 if (nhsel < 1) 928 goto out_fill_res; 929 #endif 930 endfor_nexthops(fi); 931 continue; 932 933 default: 934 pr_warning("fib_semantic_match bad type %#x\n", 935 fa->fa_type); 936 return -EINVAL; 937 } 938 } 939 return err; 940 } 941 return 1; 942 943 out_fill_res: 944 res->prefixlen = prefixlen; 945 res->nh_sel = nh_sel; 946 res->type = fa->fa_type; 947 res->scope = fa->fa_scope; 948 res->fi = fa->fa_info; 949 if (!(fib_flags & FIB_LOOKUP_NOREF)) 950 atomic_inc(&res->fi->fib_clntref); 951 return 0; 952 } 953 954 /* Find appropriate source address to this destination */ 955 956 __be32 __fib_res_prefsrc(struct fib_result *res) 957 { 958 return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope); 959 } 960 961 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 962 u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos, 963 struct fib_info *fi, unsigned int flags) 964 { 965 struct nlmsghdr *nlh; 966 struct rtmsg *rtm; 967 968 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 969 if (nlh == NULL) 970 return -EMSGSIZE; 971 972 rtm = nlmsg_data(nlh); 973 rtm->rtm_family = AF_INET; 974 rtm->rtm_dst_len = dst_len; 975 rtm->rtm_src_len = 0; 976 rtm->rtm_tos = tos; 977 if (tb_id < 256) 978 rtm->rtm_table = tb_id; 979 else 980 rtm->rtm_table = RT_TABLE_COMPAT; 981 NLA_PUT_U32(skb, RTA_TABLE, tb_id); 982 rtm->rtm_type = type; 983 rtm->rtm_flags = fi->fib_flags; 984 rtm->rtm_scope = scope; 985 rtm->rtm_protocol = fi->fib_protocol; 986 987 if (rtm->rtm_dst_len) 988 NLA_PUT_BE32(skb, RTA_DST, dst); 989 990 if (fi->fib_priority) 991 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); 992 993 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 994 goto nla_put_failure; 995 996 if (fi->fib_prefsrc) 997 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); 998 999 if (fi->fib_nhs == 1) { 1000 if (fi->fib_nh->nh_gw) 1001 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); 1002 1003 if (fi->fib_nh->nh_oif) 1004 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); 1005 #ifdef CONFIG_NET_CLS_ROUTE 1006 if (fi->fib_nh[0].nh_tclassid) 1007 NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); 1008 #endif 1009 } 1010 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1011 if (fi->fib_nhs > 1) { 1012 struct rtnexthop *rtnh; 1013 struct nlattr *mp; 1014 1015 mp = nla_nest_start(skb, RTA_MULTIPATH); 1016 if (mp == NULL) 1017 goto nla_put_failure; 1018 1019 for_nexthops(fi) { 1020 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 1021 if (rtnh == NULL) 1022 goto nla_put_failure; 1023 1024 rtnh->rtnh_flags = nh->nh_flags & 0xFF; 1025 rtnh->rtnh_hops = nh->nh_weight - 1; 1026 rtnh->rtnh_ifindex = nh->nh_oif; 1027 1028 if (nh->nh_gw) 1029 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); 1030 #ifdef CONFIG_NET_CLS_ROUTE 1031 if (nh->nh_tclassid) 1032 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); 1033 #endif 1034 /* length of rtnetlink header + attributes */ 1035 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 1036 } endfor_nexthops(fi); 1037 1038 nla_nest_end(skb, mp); 1039 } 1040 #endif 1041 return nlmsg_end(skb, nlh); 1042 1043 nla_put_failure: 1044 nlmsg_cancel(skb, nlh); 1045 return -EMSGSIZE; 1046 } 1047 1048 /* 1049 * Update FIB if: 1050 * - local address disappeared -> we must delete all the entries 1051 * referring to it. 1052 * - device went down -> we must shutdown all nexthops going via it. 1053 */ 1054 int fib_sync_down_addr(struct net *net, __be32 local) 1055 { 1056 int ret = 0; 1057 unsigned int hash = fib_laddr_hashfn(local); 1058 struct hlist_head *head = &fib_info_laddrhash[hash]; 1059 struct hlist_node *node; 1060 struct fib_info *fi; 1061 1062 if (fib_info_laddrhash == NULL || local == 0) 1063 return 0; 1064 1065 hlist_for_each_entry(fi, node, head, fib_lhash) { 1066 if (!net_eq(fi->fib_net, net)) 1067 continue; 1068 if (fi->fib_prefsrc == local) { 1069 fi->fib_flags |= RTNH_F_DEAD; 1070 ret++; 1071 } 1072 } 1073 return ret; 1074 } 1075 1076 int fib_sync_down_dev(struct net_device *dev, int force) 1077 { 1078 int ret = 0; 1079 int scope = RT_SCOPE_NOWHERE; 1080 struct fib_info *prev_fi = NULL; 1081 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1082 struct hlist_head *head = &fib_info_devhash[hash]; 1083 struct hlist_node *node; 1084 struct fib_nh *nh; 1085 1086 if (force) 1087 scope = -1; 1088 1089 hlist_for_each_entry(nh, node, head, nh_hash) { 1090 struct fib_info *fi = nh->nh_parent; 1091 int dead; 1092 1093 BUG_ON(!fi->fib_nhs); 1094 if (nh->nh_dev != dev || fi == prev_fi) 1095 continue; 1096 prev_fi = fi; 1097 dead = 0; 1098 change_nexthops(fi) { 1099 if (nexthop_nh->nh_flags & RTNH_F_DEAD) 1100 dead++; 1101 else if (nexthop_nh->nh_dev == dev && 1102 nexthop_nh->nh_scope != scope) { 1103 nexthop_nh->nh_flags |= RTNH_F_DEAD; 1104 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1105 spin_lock_bh(&fib_multipath_lock); 1106 fi->fib_power -= nexthop_nh->nh_power; 1107 nexthop_nh->nh_power = 0; 1108 spin_unlock_bh(&fib_multipath_lock); 1109 #endif 1110 dead++; 1111 } 1112 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1113 if (force > 1 && nexthop_nh->nh_dev == dev) { 1114 dead = fi->fib_nhs; 1115 break; 1116 } 1117 #endif 1118 } endfor_nexthops(fi) 1119 if (dead == fi->fib_nhs) { 1120 fi->fib_flags |= RTNH_F_DEAD; 1121 ret++; 1122 } 1123 } 1124 1125 return ret; 1126 } 1127 1128 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1129 1130 /* 1131 * Dead device goes up. We wake up dead nexthops. 1132 * It takes sense only on multipath routes. 1133 */ 1134 int fib_sync_up(struct net_device *dev) 1135 { 1136 struct fib_info *prev_fi; 1137 unsigned int hash; 1138 struct hlist_head *head; 1139 struct hlist_node *node; 1140 struct fib_nh *nh; 1141 int ret; 1142 1143 if (!(dev->flags & IFF_UP)) 1144 return 0; 1145 1146 prev_fi = NULL; 1147 hash = fib_devindex_hashfn(dev->ifindex); 1148 head = &fib_info_devhash[hash]; 1149 ret = 0; 1150 1151 hlist_for_each_entry(nh, node, head, nh_hash) { 1152 struct fib_info *fi = nh->nh_parent; 1153 int alive; 1154 1155 BUG_ON(!fi->fib_nhs); 1156 if (nh->nh_dev != dev || fi == prev_fi) 1157 continue; 1158 1159 prev_fi = fi; 1160 alive = 0; 1161 change_nexthops(fi) { 1162 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1163 alive++; 1164 continue; 1165 } 1166 if (nexthop_nh->nh_dev == NULL || 1167 !(nexthop_nh->nh_dev->flags & IFF_UP)) 1168 continue; 1169 if (nexthop_nh->nh_dev != dev || 1170 !__in_dev_get_rtnl(dev)) 1171 continue; 1172 alive++; 1173 spin_lock_bh(&fib_multipath_lock); 1174 nexthop_nh->nh_power = 0; 1175 nexthop_nh->nh_flags &= ~RTNH_F_DEAD; 1176 spin_unlock_bh(&fib_multipath_lock); 1177 } endfor_nexthops(fi) 1178 1179 if (alive > 0) { 1180 fi->fib_flags &= ~RTNH_F_DEAD; 1181 ret++; 1182 } 1183 } 1184 1185 return ret; 1186 } 1187 1188 /* 1189 * The algorithm is suboptimal, but it provides really 1190 * fair weighted route distribution. 1191 */ 1192 void fib_select_multipath(const struct flowi *flp, struct fib_result *res) 1193 { 1194 struct fib_info *fi = res->fi; 1195 int w; 1196 1197 spin_lock_bh(&fib_multipath_lock); 1198 if (fi->fib_power <= 0) { 1199 int power = 0; 1200 change_nexthops(fi) { 1201 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1202 power += nexthop_nh->nh_weight; 1203 nexthop_nh->nh_power = nexthop_nh->nh_weight; 1204 } 1205 } endfor_nexthops(fi); 1206 fi->fib_power = power; 1207 if (power <= 0) { 1208 spin_unlock_bh(&fib_multipath_lock); 1209 /* Race condition: route has just become dead. */ 1210 res->nh_sel = 0; 1211 return; 1212 } 1213 } 1214 1215 1216 /* w should be random number [0..fi->fib_power-1], 1217 * it is pretty bad approximation. 1218 */ 1219 1220 w = jiffies % fi->fib_power; 1221 1222 change_nexthops(fi) { 1223 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) && 1224 nexthop_nh->nh_power) { 1225 w -= nexthop_nh->nh_power; 1226 if (w <= 0) { 1227 nexthop_nh->nh_power--; 1228 fi->fib_power--; 1229 res->nh_sel = nhsel; 1230 spin_unlock_bh(&fib_multipath_lock); 1231 return; 1232 } 1233 } 1234 } endfor_nexthops(fi); 1235 1236 /* Race condition: route has just become dead. */ 1237 res->nh_sel = 0; 1238 spin_unlock_bh(&fib_multipath_lock); 1239 } 1240 #endif 1241