1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * ROUTE - implementation of the IP router. 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org> 11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi> 12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 13 * 14 * Fixes: 15 * Alan Cox : Verify area fixes. 16 * Alan Cox : cli() protects routing changes 17 * Rui Oliveira : ICMP routing table updates 18 * (rco@di.uminho.pt) Routing table insertion and update 19 * Linus Torvalds : Rewrote bits to be sensible 20 * Alan Cox : Added BSD route gw semantics 21 * Alan Cox : Super /proc >4K 22 * Alan Cox : MTU in route table 23 * Alan Cox : MSS actually. Also added the window 24 * clamper. 25 * Sam Lantinga : Fixed route matching in rt_del() 26 * Alan Cox : Routing cache support. 27 * Alan Cox : Removed compatibility cruft. 28 * Alan Cox : RTF_REJECT support. 29 * Alan Cox : TCP irtt support. 30 * Jonathan Naylor : Added Metric support. 31 * Miquel van Smoorenburg : BSD API fixes. 32 * Miquel van Smoorenburg : Metrics. 33 * Alan Cox : Use __u32 properly 34 * Alan Cox : Aligned routing errors more closely with BSD 35 * our system is still very different. 36 * Alan Cox : Faster /proc handling 37 * Alexey Kuznetsov : Massive rework to support tree based routing, 38 * routing caches and better behaviour. 39 * 40 * Olaf Erb : irtt wasn't being copied right. 41 * Bjorn Ekwall : Kerneld route support. 42 * Alan Cox : Multicast fixed (I hope) 43 * Pavel Krauz : Limited broadcast fixed 44 * Mike McLagan : Routing by source 45 * Alexey Kuznetsov : End of old history. Split to fib.c and 46 * route.c and rewritten from scratch. 47 * Andi Kleen : Load-limit warning messages. 48 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow. 50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow. 51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful. 52 * Marc Boucher : routing by fwmark 53 * Robert Olsson : Added rt_cache statistics 54 * Arnaldo C. Melo : Convert proc stuff to seq_file 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. 56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect 57 * Ilia Sotnikov : Removed TOS from hash calculations 58 * 59 * This program is free software; you can redistribute it and/or 60 * modify it under the terms of the GNU General Public License 61 * as published by the Free Software Foundation; either version 62 * 2 of the License, or (at your option) any later version. 63 */ 64 65 #define pr_fmt(fmt) "IPv4: " fmt 66 67 #include <linux/module.h> 68 #include <linux/uaccess.h> 69 #include <linux/bitops.h> 70 #include <linux/types.h> 71 #include <linux/kernel.h> 72 #include <linux/mm.h> 73 #include <linux/string.h> 74 #include <linux/socket.h> 75 #include <linux/sockios.h> 76 #include <linux/errno.h> 77 #include <linux/in.h> 78 #include <linux/inet.h> 79 #include <linux/netdevice.h> 80 #include <linux/proc_fs.h> 81 #include <linux/init.h> 82 #include <linux/skbuff.h> 83 #include <linux/inetdevice.h> 84 #include <linux/igmp.h> 85 #include <linux/pkt_sched.h> 86 #include <linux/mroute.h> 87 #include <linux/netfilter_ipv4.h> 88 #include <linux/random.h> 89 #include <linux/rcupdate.h> 90 #include <linux/times.h> 91 #include <linux/slab.h> 92 #include <linux/jhash.h> 93 #include <net/dst.h> 94 #include <net/dst_metadata.h> 95 #include <net/net_namespace.h> 96 #include <net/protocol.h> 97 #include <net/ip.h> 98 #include <net/route.h> 99 #include <net/inetpeer.h> 100 #include <net/sock.h> 101 #include <net/ip_fib.h> 102 #include <net/arp.h> 103 #include <net/tcp.h> 104 #include <net/icmp.h> 105 #include <net/xfrm.h> 106 #include <net/lwtunnel.h> 107 #include <net/netevent.h> 108 #include <net/rtnetlink.h> 109 #ifdef CONFIG_SYSCTL 110 #include <linux/sysctl.h> 111 #endif 112 #include <net/secure_seq.h> 113 #include <net/ip_tunnels.h> 114 #include <net/l3mdev.h> 115 116 #include "fib_lookup.h" 117 118 #define RT_FL_TOS(oldflp4) \ 119 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) 120 121 #define RT_GC_TIMEOUT (300*HZ) 122 123 static int ip_rt_max_size; 124 static int ip_rt_redirect_number __read_mostly = 9; 125 static int ip_rt_redirect_load __read_mostly = HZ / 50; 126 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); 127 static int ip_rt_error_cost __read_mostly = HZ; 128 static int ip_rt_error_burst __read_mostly = 5 * HZ; 129 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 130 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 131 static int ip_rt_min_advmss __read_mostly = 256; 132 133 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 134 135 /* 136 * Interface to generic destination cache. 137 */ 138 139 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 140 static unsigned int ipv4_default_advmss(const struct dst_entry *dst); 141 static unsigned int ipv4_mtu(const struct dst_entry *dst); 142 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 143 static void ipv4_link_failure(struct sk_buff *skb); 144 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 145 struct sk_buff *skb, u32 mtu); 146 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, 147 struct sk_buff *skb); 148 static void ipv4_dst_destroy(struct dst_entry *dst); 149 150 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) 151 { 152 WARN_ON(1); 153 return NULL; 154 } 155 156 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 157 struct sk_buff *skb, 158 const void *daddr); 159 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr); 160 161 static struct dst_ops ipv4_dst_ops = { 162 .family = AF_INET, 163 .check = ipv4_dst_check, 164 .default_advmss = ipv4_default_advmss, 165 .mtu = ipv4_mtu, 166 .cow_metrics = ipv4_cow_metrics, 167 .destroy = ipv4_dst_destroy, 168 .negative_advice = ipv4_negative_advice, 169 .link_failure = ipv4_link_failure, 170 .update_pmtu = ip_rt_update_pmtu, 171 .redirect = ip_do_redirect, 172 .local_out = __ip_local_out, 173 .neigh_lookup = ipv4_neigh_lookup, 174 .confirm_neigh = ipv4_confirm_neigh, 175 }; 176 177 #define ECN_OR_COST(class) TC_PRIO_##class 178 179 const __u8 ip_tos2prio[16] = { 180 TC_PRIO_BESTEFFORT, 181 ECN_OR_COST(BESTEFFORT), 182 TC_PRIO_BESTEFFORT, 183 ECN_OR_COST(BESTEFFORT), 184 TC_PRIO_BULK, 185 ECN_OR_COST(BULK), 186 TC_PRIO_BULK, 187 ECN_OR_COST(BULK), 188 TC_PRIO_INTERACTIVE, 189 ECN_OR_COST(INTERACTIVE), 190 TC_PRIO_INTERACTIVE, 191 ECN_OR_COST(INTERACTIVE), 192 TC_PRIO_INTERACTIVE_BULK, 193 ECN_OR_COST(INTERACTIVE_BULK), 194 TC_PRIO_INTERACTIVE_BULK, 195 ECN_OR_COST(INTERACTIVE_BULK) 196 }; 197 EXPORT_SYMBOL(ip_tos2prio); 198 199 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 200 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) 201 202 #ifdef CONFIG_PROC_FS 203 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 204 { 205 if (*pos) 206 return NULL; 207 return SEQ_START_TOKEN; 208 } 209 210 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 211 { 212 ++*pos; 213 return NULL; 214 } 215 216 static void rt_cache_seq_stop(struct seq_file *seq, void *v) 217 { 218 } 219 220 static int rt_cache_seq_show(struct seq_file *seq, void *v) 221 { 222 if (v == SEQ_START_TOKEN) 223 seq_printf(seq, "%-127s\n", 224 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" 225 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" 226 "HHUptod\tSpecDst"); 227 return 0; 228 } 229 230 static const struct seq_operations rt_cache_seq_ops = { 231 .start = rt_cache_seq_start, 232 .next = rt_cache_seq_next, 233 .stop = rt_cache_seq_stop, 234 .show = rt_cache_seq_show, 235 }; 236 237 static int rt_cache_seq_open(struct inode *inode, struct file *file) 238 { 239 return seq_open(file, &rt_cache_seq_ops); 240 } 241 242 static const struct file_operations rt_cache_seq_fops = { 243 .open = rt_cache_seq_open, 244 .read = seq_read, 245 .llseek = seq_lseek, 246 .release = seq_release, 247 }; 248 249 250 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) 251 { 252 int cpu; 253 254 if (*pos == 0) 255 return SEQ_START_TOKEN; 256 257 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 258 if (!cpu_possible(cpu)) 259 continue; 260 *pos = cpu+1; 261 return &per_cpu(rt_cache_stat, cpu); 262 } 263 return NULL; 264 } 265 266 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) 267 { 268 int cpu; 269 270 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 271 if (!cpu_possible(cpu)) 272 continue; 273 *pos = cpu+1; 274 return &per_cpu(rt_cache_stat, cpu); 275 } 276 return NULL; 277 278 } 279 280 static void rt_cpu_seq_stop(struct seq_file *seq, void *v) 281 { 282 283 } 284 285 static int rt_cpu_seq_show(struct seq_file *seq, void *v) 286 { 287 struct rt_cache_stat *st = v; 288 289 if (v == SEQ_START_TOKEN) { 290 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 291 return 0; 292 } 293 294 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " 295 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", 296 dst_entries_get_slow(&ipv4_dst_ops), 297 0, /* st->in_hit */ 298 st->in_slow_tot, 299 st->in_slow_mc, 300 st->in_no_route, 301 st->in_brd, 302 st->in_martian_dst, 303 st->in_martian_src, 304 305 0, /* st->out_hit */ 306 st->out_slow_tot, 307 st->out_slow_mc, 308 309 0, /* st->gc_total */ 310 0, /* st->gc_ignored */ 311 0, /* st->gc_goal_miss */ 312 0, /* st->gc_dst_overflow */ 313 0, /* st->in_hlist_search */ 314 0 /* st->out_hlist_search */ 315 ); 316 return 0; 317 } 318 319 static const struct seq_operations rt_cpu_seq_ops = { 320 .start = rt_cpu_seq_start, 321 .next = rt_cpu_seq_next, 322 .stop = rt_cpu_seq_stop, 323 .show = rt_cpu_seq_show, 324 }; 325 326 327 static int rt_cpu_seq_open(struct inode *inode, struct file *file) 328 { 329 return seq_open(file, &rt_cpu_seq_ops); 330 } 331 332 static const struct file_operations rt_cpu_seq_fops = { 333 .open = rt_cpu_seq_open, 334 .read = seq_read, 335 .llseek = seq_lseek, 336 .release = seq_release, 337 }; 338 339 #ifdef CONFIG_IP_ROUTE_CLASSID 340 static int rt_acct_proc_show(struct seq_file *m, void *v) 341 { 342 struct ip_rt_acct *dst, *src; 343 unsigned int i, j; 344 345 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); 346 if (!dst) 347 return -ENOMEM; 348 349 for_each_possible_cpu(i) { 350 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); 351 for (j = 0; j < 256; j++) { 352 dst[j].o_bytes += src[j].o_bytes; 353 dst[j].o_packets += src[j].o_packets; 354 dst[j].i_bytes += src[j].i_bytes; 355 dst[j].i_packets += src[j].i_packets; 356 } 357 } 358 359 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); 360 kfree(dst); 361 return 0; 362 } 363 #endif 364 365 static int __net_init ip_rt_do_proc_init(struct net *net) 366 { 367 struct proc_dir_entry *pde; 368 369 pde = proc_create("rt_cache", 0444, net->proc_net, 370 &rt_cache_seq_fops); 371 if (!pde) 372 goto err1; 373 374 pde = proc_create("rt_cache", 0444, 375 net->proc_net_stat, &rt_cpu_seq_fops); 376 if (!pde) 377 goto err2; 378 379 #ifdef CONFIG_IP_ROUTE_CLASSID 380 pde = proc_create_single("rt_acct", 0, net->proc_net, 381 rt_acct_proc_show); 382 if (!pde) 383 goto err3; 384 #endif 385 return 0; 386 387 #ifdef CONFIG_IP_ROUTE_CLASSID 388 err3: 389 remove_proc_entry("rt_cache", net->proc_net_stat); 390 #endif 391 err2: 392 remove_proc_entry("rt_cache", net->proc_net); 393 err1: 394 return -ENOMEM; 395 } 396 397 static void __net_exit ip_rt_do_proc_exit(struct net *net) 398 { 399 remove_proc_entry("rt_cache", net->proc_net_stat); 400 remove_proc_entry("rt_cache", net->proc_net); 401 #ifdef CONFIG_IP_ROUTE_CLASSID 402 remove_proc_entry("rt_acct", net->proc_net); 403 #endif 404 } 405 406 static struct pernet_operations ip_rt_proc_ops __net_initdata = { 407 .init = ip_rt_do_proc_init, 408 .exit = ip_rt_do_proc_exit, 409 }; 410 411 static int __init ip_rt_proc_init(void) 412 { 413 return register_pernet_subsys(&ip_rt_proc_ops); 414 } 415 416 #else 417 static inline int ip_rt_proc_init(void) 418 { 419 return 0; 420 } 421 #endif /* CONFIG_PROC_FS */ 422 423 static inline bool rt_is_expired(const struct rtable *rth) 424 { 425 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); 426 } 427 428 void rt_cache_flush(struct net *net) 429 { 430 rt_genid_bump_ipv4(net); 431 } 432 433 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 434 struct sk_buff *skb, 435 const void *daddr) 436 { 437 struct net_device *dev = dst->dev; 438 const __be32 *pkey = daddr; 439 const struct rtable *rt; 440 struct neighbour *n; 441 442 rt = (const struct rtable *) dst; 443 if (rt->rt_gateway) 444 pkey = (const __be32 *) &rt->rt_gateway; 445 else if (skb) 446 pkey = &ip_hdr(skb)->daddr; 447 448 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); 449 if (n) 450 return n; 451 return neigh_create(&arp_tbl, pkey, dev); 452 } 453 454 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) 455 { 456 struct net_device *dev = dst->dev; 457 const __be32 *pkey = daddr; 458 const struct rtable *rt; 459 460 rt = (const struct rtable *)dst; 461 if (rt->rt_gateway) 462 pkey = (const __be32 *)&rt->rt_gateway; 463 else if (!daddr || 464 (rt->rt_flags & 465 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) 466 return; 467 468 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); 469 } 470 471 #define IP_IDENTS_SZ 2048u 472 473 static atomic_t *ip_idents __read_mostly; 474 static u32 *ip_tstamps __read_mostly; 475 476 /* In order to protect privacy, we add a perturbation to identifiers 477 * if one generator is seldom used. This makes hard for an attacker 478 * to infer how many packets were sent between two points in time. 479 */ 480 u32 ip_idents_reserve(u32 hash, int segs) 481 { 482 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; 483 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; 484 u32 old = READ_ONCE(*p_tstamp); 485 u32 now = (u32)jiffies; 486 u32 new, delta = 0; 487 488 if (old != now && cmpxchg(p_tstamp, old, now) == old) 489 delta = prandom_u32_max(now - old); 490 491 /* Do not use atomic_add_return() as it makes UBSAN unhappy */ 492 do { 493 old = (u32)atomic_read(p_id); 494 new = old + delta + segs; 495 } while (atomic_cmpxchg(p_id, old, new) != old); 496 497 return new - segs; 498 } 499 EXPORT_SYMBOL(ip_idents_reserve); 500 501 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) 502 { 503 static u32 ip_idents_hashrnd __read_mostly; 504 u32 hash, id; 505 506 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); 507 508 hash = jhash_3words((__force u32)iph->daddr, 509 (__force u32)iph->saddr, 510 iph->protocol ^ net_hash_mix(net), 511 ip_idents_hashrnd); 512 id = ip_idents_reserve(hash, segs); 513 iph->id = htons(id); 514 } 515 EXPORT_SYMBOL(__ip_select_ident); 516 517 static void __build_flow_key(const struct net *net, struct flowi4 *fl4, 518 const struct sock *sk, 519 const struct iphdr *iph, 520 int oif, u8 tos, 521 u8 prot, u32 mark, int flow_flags) 522 { 523 if (sk) { 524 const struct inet_sock *inet = inet_sk(sk); 525 526 oif = sk->sk_bound_dev_if; 527 mark = sk->sk_mark; 528 tos = RT_CONN_FLAGS(sk); 529 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; 530 } 531 flowi4_init_output(fl4, oif, mark, tos, 532 RT_SCOPE_UNIVERSE, prot, 533 flow_flags, 534 iph->daddr, iph->saddr, 0, 0, 535 sock_net_uid(net, sk)); 536 } 537 538 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, 539 const struct sock *sk) 540 { 541 const struct net *net = dev_net(skb->dev); 542 const struct iphdr *iph = ip_hdr(skb); 543 int oif = skb->dev->ifindex; 544 u8 tos = RT_TOS(iph->tos); 545 u8 prot = iph->protocol; 546 u32 mark = skb->mark; 547 548 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0); 549 } 550 551 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) 552 { 553 const struct inet_sock *inet = inet_sk(sk); 554 const struct ip_options_rcu *inet_opt; 555 __be32 daddr = inet->inet_daddr; 556 557 rcu_read_lock(); 558 inet_opt = rcu_dereference(inet->inet_opt); 559 if (inet_opt && inet_opt->opt.srr) 560 daddr = inet_opt->opt.faddr; 561 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 562 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 563 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 564 inet_sk_flowi_flags(sk), 565 daddr, inet->inet_saddr, 0, 0, sk->sk_uid); 566 rcu_read_unlock(); 567 } 568 569 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, 570 const struct sk_buff *skb) 571 { 572 if (skb) 573 build_skb_flow_key(fl4, skb, sk); 574 else 575 build_sk_flow_key(fl4, sk); 576 } 577 578 static DEFINE_SPINLOCK(fnhe_lock); 579 580 static void fnhe_flush_routes(struct fib_nh_exception *fnhe) 581 { 582 struct rtable *rt; 583 584 rt = rcu_dereference(fnhe->fnhe_rth_input); 585 if (rt) { 586 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); 587 dst_dev_put(&rt->dst); 588 dst_release(&rt->dst); 589 } 590 rt = rcu_dereference(fnhe->fnhe_rth_output); 591 if (rt) { 592 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); 593 dst_dev_put(&rt->dst); 594 dst_release(&rt->dst); 595 } 596 } 597 598 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) 599 { 600 struct fib_nh_exception *fnhe, *oldest; 601 602 oldest = rcu_dereference(hash->chain); 603 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; 604 fnhe = rcu_dereference(fnhe->fnhe_next)) { 605 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) 606 oldest = fnhe; 607 } 608 fnhe_flush_routes(oldest); 609 return oldest; 610 } 611 612 static inline u32 fnhe_hashfun(__be32 daddr) 613 { 614 static u32 fnhe_hashrnd __read_mostly; 615 u32 hval; 616 617 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); 618 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd); 619 return hash_32(hval, FNHE_HASH_SHIFT); 620 } 621 622 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 623 { 624 rt->rt_pmtu = fnhe->fnhe_pmtu; 625 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; 626 rt->dst.expires = fnhe->fnhe_expires; 627 628 if (fnhe->fnhe_gw) { 629 rt->rt_flags |= RTCF_REDIRECTED; 630 rt->rt_gateway = fnhe->fnhe_gw; 631 rt->rt_uses_gateway = 1; 632 } 633 } 634 635 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, 636 u32 pmtu, bool lock, unsigned long expires) 637 { 638 struct fnhe_hash_bucket *hash; 639 struct fib_nh_exception *fnhe; 640 struct rtable *rt; 641 u32 genid, hval; 642 unsigned int i; 643 int depth; 644 645 genid = fnhe_genid(dev_net(nh->nh_dev)); 646 hval = fnhe_hashfun(daddr); 647 648 spin_lock_bh(&fnhe_lock); 649 650 hash = rcu_dereference(nh->nh_exceptions); 651 if (!hash) { 652 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC); 653 if (!hash) 654 goto out_unlock; 655 rcu_assign_pointer(nh->nh_exceptions, hash); 656 } 657 658 hash += hval; 659 660 depth = 0; 661 for (fnhe = rcu_dereference(hash->chain); fnhe; 662 fnhe = rcu_dereference(fnhe->fnhe_next)) { 663 if (fnhe->fnhe_daddr == daddr) 664 break; 665 depth++; 666 } 667 668 if (fnhe) { 669 if (fnhe->fnhe_genid != genid) 670 fnhe->fnhe_genid = genid; 671 if (gw) 672 fnhe->fnhe_gw = gw; 673 if (pmtu) { 674 fnhe->fnhe_pmtu = pmtu; 675 fnhe->fnhe_mtu_locked = lock; 676 } 677 fnhe->fnhe_expires = max(1UL, expires); 678 /* Update all cached dsts too */ 679 rt = rcu_dereference(fnhe->fnhe_rth_input); 680 if (rt) 681 fill_route_from_fnhe(rt, fnhe); 682 rt = rcu_dereference(fnhe->fnhe_rth_output); 683 if (rt) 684 fill_route_from_fnhe(rt, fnhe); 685 } else { 686 if (depth > FNHE_RECLAIM_DEPTH) 687 fnhe = fnhe_oldest(hash); 688 else { 689 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); 690 if (!fnhe) 691 goto out_unlock; 692 693 fnhe->fnhe_next = hash->chain; 694 rcu_assign_pointer(hash->chain, fnhe); 695 } 696 fnhe->fnhe_genid = genid; 697 fnhe->fnhe_daddr = daddr; 698 fnhe->fnhe_gw = gw; 699 fnhe->fnhe_pmtu = pmtu; 700 fnhe->fnhe_mtu_locked = lock; 701 fnhe->fnhe_expires = max(1UL, expires); 702 703 /* Exception created; mark the cached routes for the nexthop 704 * stale, so anyone caching it rechecks if this exception 705 * applies to them. 706 */ 707 rt = rcu_dereference(nh->nh_rth_input); 708 if (rt) 709 rt->dst.obsolete = DST_OBSOLETE_KILL; 710 711 for_each_possible_cpu(i) { 712 struct rtable __rcu **prt; 713 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i); 714 rt = rcu_dereference(*prt); 715 if (rt) 716 rt->dst.obsolete = DST_OBSOLETE_KILL; 717 } 718 } 719 720 fnhe->fnhe_stamp = jiffies; 721 722 out_unlock: 723 spin_unlock_bh(&fnhe_lock); 724 } 725 726 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, 727 bool kill_route) 728 { 729 __be32 new_gw = icmp_hdr(skb)->un.gateway; 730 __be32 old_gw = ip_hdr(skb)->saddr; 731 struct net_device *dev = skb->dev; 732 struct in_device *in_dev; 733 struct fib_result res; 734 struct neighbour *n; 735 struct net *net; 736 737 switch (icmp_hdr(skb)->code & 7) { 738 case ICMP_REDIR_NET: 739 case ICMP_REDIR_NETTOS: 740 case ICMP_REDIR_HOST: 741 case ICMP_REDIR_HOSTTOS: 742 break; 743 744 default: 745 return; 746 } 747 748 if (rt->rt_gateway != old_gw) 749 return; 750 751 in_dev = __in_dev_get_rcu(dev); 752 if (!in_dev) 753 return; 754 755 net = dev_net(dev); 756 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) || 757 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) || 758 ipv4_is_zeronet(new_gw)) 759 goto reject_redirect; 760 761 if (!IN_DEV_SHARED_MEDIA(in_dev)) { 762 if (!inet_addr_onlink(in_dev, new_gw, old_gw)) 763 goto reject_redirect; 764 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) 765 goto reject_redirect; 766 } else { 767 if (inet_addr_type(net, new_gw) != RTN_UNICAST) 768 goto reject_redirect; 769 } 770 771 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); 772 if (!n) 773 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); 774 if (!IS_ERR(n)) { 775 if (!(n->nud_state & NUD_VALID)) { 776 neigh_event_send(n, NULL); 777 } else { 778 if (fib_lookup(net, fl4, &res, 0) == 0) { 779 struct fib_nh *nh = &FIB_RES_NH(res); 780 781 update_or_create_fnhe(nh, fl4->daddr, new_gw, 782 0, false, 783 jiffies + ip_rt_gc_timeout); 784 } 785 if (kill_route) 786 rt->dst.obsolete = DST_OBSOLETE_KILL; 787 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); 788 } 789 neigh_release(n); 790 } 791 return; 792 793 reject_redirect: 794 #ifdef CONFIG_IP_ROUTE_VERBOSE 795 if (IN_DEV_LOG_MARTIANS(in_dev)) { 796 const struct iphdr *iph = (const struct iphdr *) skb->data; 797 __be32 daddr = iph->daddr; 798 __be32 saddr = iph->saddr; 799 800 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" 801 " Advised path = %pI4 -> %pI4\n", 802 &old_gw, dev->name, &new_gw, 803 &saddr, &daddr); 804 } 805 #endif 806 ; 807 } 808 809 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 810 { 811 struct rtable *rt; 812 struct flowi4 fl4; 813 const struct iphdr *iph = (const struct iphdr *) skb->data; 814 struct net *net = dev_net(skb->dev); 815 int oif = skb->dev->ifindex; 816 u8 tos = RT_TOS(iph->tos); 817 u8 prot = iph->protocol; 818 u32 mark = skb->mark; 819 820 rt = (struct rtable *) dst; 821 822 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); 823 __ip_do_redirect(rt, skb, &fl4, true); 824 } 825 826 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 827 { 828 struct rtable *rt = (struct rtable *)dst; 829 struct dst_entry *ret = dst; 830 831 if (rt) { 832 if (dst->obsolete > 0) { 833 ip_rt_put(rt); 834 ret = NULL; 835 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 836 rt->dst.expires) { 837 ip_rt_put(rt); 838 ret = NULL; 839 } 840 } 841 return ret; 842 } 843 844 /* 845 * Algorithm: 846 * 1. The first ip_rt_redirect_number redirects are sent 847 * with exponential backoff, then we stop sending them at all, 848 * assuming that the host ignores our redirects. 849 * 2. If we did not see packets requiring redirects 850 * during ip_rt_redirect_silence, we assume that the host 851 * forgot redirected route and start to send redirects again. 852 * 853 * This algorithm is much cheaper and more intelligent than dumb load limiting 854 * in icmp.c. 855 * 856 * NOTE. Do not forget to inhibit load limiting for redirects (redundant) 857 * and "frag. need" (breaks PMTU discovery) in icmp.c. 858 */ 859 860 void ip_rt_send_redirect(struct sk_buff *skb) 861 { 862 struct rtable *rt = skb_rtable(skb); 863 struct in_device *in_dev; 864 struct inet_peer *peer; 865 struct net *net; 866 int log_martians; 867 int vif; 868 869 rcu_read_lock(); 870 in_dev = __in_dev_get_rcu(rt->dst.dev); 871 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { 872 rcu_read_unlock(); 873 return; 874 } 875 log_martians = IN_DEV_LOG_MARTIANS(in_dev); 876 vif = l3mdev_master_ifindex_rcu(rt->dst.dev); 877 rcu_read_unlock(); 878 879 net = dev_net(rt->dst.dev); 880 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1); 881 if (!peer) { 882 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, 883 rt_nexthop(rt, ip_hdr(skb)->daddr)); 884 return; 885 } 886 887 /* No redirected packets during ip_rt_redirect_silence; 888 * reset the algorithm. 889 */ 890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) 891 peer->rate_tokens = 0; 892 893 /* Too many ignored redirects; do not send anything 894 * set dst.rate_last to the last seen redirected packet. 895 */ 896 if (peer->rate_tokens >= ip_rt_redirect_number) { 897 peer->rate_last = jiffies; 898 goto out_put_peer; 899 } 900 901 /* Check for load limit; set rate_last to the latest sent 902 * redirect. 903 */ 904 if (peer->rate_tokens == 0 || 905 time_after(jiffies, 906 (peer->rate_last + 907 (ip_rt_redirect_load << peer->rate_tokens)))) { 908 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); 909 910 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 911 peer->rate_last = jiffies; 912 ++peer->rate_tokens; 913 #ifdef CONFIG_IP_ROUTE_VERBOSE 914 if (log_martians && 915 peer->rate_tokens == ip_rt_redirect_number) 916 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", 917 &ip_hdr(skb)->saddr, inet_iif(skb), 918 &ip_hdr(skb)->daddr, &gw); 919 #endif 920 } 921 out_put_peer: 922 inet_putpeer(peer); 923 } 924 925 static int ip_error(struct sk_buff *skb) 926 { 927 struct rtable *rt = skb_rtable(skb); 928 struct net_device *dev = skb->dev; 929 struct in_device *in_dev; 930 struct inet_peer *peer; 931 unsigned long now; 932 struct net *net; 933 bool send; 934 int code; 935 936 if (netif_is_l3_master(skb->dev)) { 937 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); 938 if (!dev) 939 goto out; 940 } 941 942 in_dev = __in_dev_get_rcu(dev); 943 944 /* IP on this device is disabled. */ 945 if (!in_dev) 946 goto out; 947 948 net = dev_net(rt->dst.dev); 949 if (!IN_DEV_FORWARD(in_dev)) { 950 switch (rt->dst.error) { 951 case EHOSTUNREACH: 952 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS); 953 break; 954 955 case ENETUNREACH: 956 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 957 break; 958 } 959 goto out; 960 } 961 962 switch (rt->dst.error) { 963 case EINVAL: 964 default: 965 goto out; 966 case EHOSTUNREACH: 967 code = ICMP_HOST_UNREACH; 968 break; 969 case ENETUNREACH: 970 code = ICMP_NET_UNREACH; 971 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 972 break; 973 case EACCES: 974 code = ICMP_PKT_FILTERED; 975 break; 976 } 977 978 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 979 l3mdev_master_ifindex(skb->dev), 1); 980 981 send = true; 982 if (peer) { 983 now = jiffies; 984 peer->rate_tokens += now - peer->rate_last; 985 if (peer->rate_tokens > ip_rt_error_burst) 986 peer->rate_tokens = ip_rt_error_burst; 987 peer->rate_last = now; 988 if (peer->rate_tokens >= ip_rt_error_cost) 989 peer->rate_tokens -= ip_rt_error_cost; 990 else 991 send = false; 992 inet_putpeer(peer); 993 } 994 if (send) 995 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 996 997 out: kfree_skb(skb); 998 return 0; 999 } 1000 1001 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 1002 { 1003 struct dst_entry *dst = &rt->dst; 1004 struct fib_result res; 1005 bool lock = false; 1006 1007 if (ip_mtu_locked(dst)) 1008 return; 1009 1010 if (ipv4_mtu(dst) < mtu) 1011 return; 1012 1013 if (mtu < ip_rt_min_pmtu) { 1014 lock = true; 1015 mtu = ip_rt_min_pmtu; 1016 } 1017 1018 if (rt->rt_pmtu == mtu && 1019 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 1020 return; 1021 1022 rcu_read_lock(); 1023 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { 1024 struct fib_nh *nh = &FIB_RES_NH(res); 1025 1026 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock, 1027 jiffies + ip_rt_mtu_expires); 1028 } 1029 rcu_read_unlock(); 1030 } 1031 1032 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 1033 struct sk_buff *skb, u32 mtu) 1034 { 1035 struct rtable *rt = (struct rtable *) dst; 1036 struct flowi4 fl4; 1037 1038 ip_rt_build_flow_key(&fl4, sk, skb); 1039 __ip_rt_update_pmtu(rt, &fl4, mtu); 1040 } 1041 1042 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, 1043 int oif, u8 protocol) 1044 { 1045 const struct iphdr *iph = (const struct iphdr *) skb->data; 1046 struct flowi4 fl4; 1047 struct rtable *rt; 1048 u32 mark = IP4_REPLY_MARK(net, skb->mark); 1049 1050 __build_flow_key(net, &fl4, NULL, iph, oif, 1051 RT_TOS(iph->tos), protocol, mark, 0); 1052 rt = __ip_route_output_key(net, &fl4); 1053 if (!IS_ERR(rt)) { 1054 __ip_rt_update_pmtu(rt, &fl4, mtu); 1055 ip_rt_put(rt); 1056 } 1057 } 1058 EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 1059 1060 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1061 { 1062 const struct iphdr *iph = (const struct iphdr *) skb->data; 1063 struct flowi4 fl4; 1064 struct rtable *rt; 1065 1066 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0); 1067 1068 if (!fl4.flowi4_mark) 1069 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark); 1070 1071 rt = __ip_route_output_key(sock_net(sk), &fl4); 1072 if (!IS_ERR(rt)) { 1073 __ip_rt_update_pmtu(rt, &fl4, mtu); 1074 ip_rt_put(rt); 1075 } 1076 } 1077 1078 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1079 { 1080 const struct iphdr *iph = (const struct iphdr *) skb->data; 1081 struct flowi4 fl4; 1082 struct rtable *rt; 1083 struct dst_entry *odst = NULL; 1084 bool new = false; 1085 struct net *net = sock_net(sk); 1086 1087 bh_lock_sock(sk); 1088 1089 if (!ip_sk_accept_pmtu(sk)) 1090 goto out; 1091 1092 odst = sk_dst_get(sk); 1093 1094 if (sock_owned_by_user(sk) || !odst) { 1095 __ipv4_sk_update_pmtu(skb, sk, mtu); 1096 goto out; 1097 } 1098 1099 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1100 1101 rt = (struct rtable *)odst; 1102 if (odst->obsolete && !odst->ops->check(odst, 0)) { 1103 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1104 if (IS_ERR(rt)) 1105 goto out; 1106 1107 new = true; 1108 } 1109 1110 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu); 1111 1112 if (!dst_check(&rt->dst, 0)) { 1113 if (new) 1114 dst_release(&rt->dst); 1115 1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1117 if (IS_ERR(rt)) 1118 goto out; 1119 1120 new = true; 1121 } 1122 1123 if (new) 1124 sk_dst_set(sk, &rt->dst); 1125 1126 out: 1127 bh_unlock_sock(sk); 1128 dst_release(odst); 1129 } 1130 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1131 1132 void ipv4_redirect(struct sk_buff *skb, struct net *net, 1133 int oif, u8 protocol) 1134 { 1135 const struct iphdr *iph = (const struct iphdr *) skb->data; 1136 struct flowi4 fl4; 1137 struct rtable *rt; 1138 1139 __build_flow_key(net, &fl4, NULL, iph, oif, 1140 RT_TOS(iph->tos), protocol, 0, 0); 1141 rt = __ip_route_output_key(net, &fl4); 1142 if (!IS_ERR(rt)) { 1143 __ip_do_redirect(rt, skb, &fl4, false); 1144 ip_rt_put(rt); 1145 } 1146 } 1147 EXPORT_SYMBOL_GPL(ipv4_redirect); 1148 1149 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk) 1150 { 1151 const struct iphdr *iph = (const struct iphdr *) skb->data; 1152 struct flowi4 fl4; 1153 struct rtable *rt; 1154 struct net *net = sock_net(sk); 1155 1156 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1157 rt = __ip_route_output_key(net, &fl4); 1158 if (!IS_ERR(rt)) { 1159 __ip_do_redirect(rt, skb, &fl4, false); 1160 ip_rt_put(rt); 1161 } 1162 } 1163 EXPORT_SYMBOL_GPL(ipv4_sk_redirect); 1164 1165 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1166 { 1167 struct rtable *rt = (struct rtable *) dst; 1168 1169 /* All IPV4 dsts are created with ->obsolete set to the value 1170 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 1171 * into this function always. 1172 * 1173 * When a PMTU/redirect information update invalidates a route, 1174 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or 1175 * DST_OBSOLETE_DEAD by dst_free(). 1176 */ 1177 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt)) 1178 return NULL; 1179 return dst; 1180 } 1181 1182 static void ipv4_link_failure(struct sk_buff *skb) 1183 { 1184 struct rtable *rt; 1185 1186 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1187 1188 rt = skb_rtable(skb); 1189 if (rt) 1190 dst_set_expires(&rt->dst, 0); 1191 } 1192 1193 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb) 1194 { 1195 pr_debug("%s: %pI4 -> %pI4, %s\n", 1196 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1197 skb->dev ? skb->dev->name : "?"); 1198 kfree_skb(skb); 1199 WARN_ON(1); 1200 return 0; 1201 } 1202 1203 /* 1204 We do not cache source address of outgoing interface, 1205 because it is used only by IP RR, TS and SRR options, 1206 so that it out of fast path. 1207 1208 BTW remember: "addr" is allowed to be not aligned 1209 in IP options! 1210 */ 1211 1212 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) 1213 { 1214 __be32 src; 1215 1216 if (rt_is_output_route(rt)) 1217 src = ip_hdr(skb)->saddr; 1218 else { 1219 struct fib_result res; 1220 struct iphdr *iph = ip_hdr(skb); 1221 struct flowi4 fl4 = { 1222 .daddr = iph->daddr, 1223 .saddr = iph->saddr, 1224 .flowi4_tos = RT_TOS(iph->tos), 1225 .flowi4_oif = rt->dst.dev->ifindex, 1226 .flowi4_iif = skb->dev->ifindex, 1227 .flowi4_mark = skb->mark, 1228 }; 1229 1230 rcu_read_lock(); 1231 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0) 1232 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); 1233 else 1234 src = inet_select_addr(rt->dst.dev, 1235 rt_nexthop(rt, iph->daddr), 1236 RT_SCOPE_UNIVERSE); 1237 rcu_read_unlock(); 1238 } 1239 memcpy(addr, &src, 4); 1240 } 1241 1242 #ifdef CONFIG_IP_ROUTE_CLASSID 1243 static void set_class_tag(struct rtable *rt, u32 tag) 1244 { 1245 if (!(rt->dst.tclassid & 0xFFFF)) 1246 rt->dst.tclassid |= tag & 0xFFFF; 1247 if (!(rt->dst.tclassid & 0xFFFF0000)) 1248 rt->dst.tclassid |= tag & 0xFFFF0000; 1249 } 1250 #endif 1251 1252 static unsigned int ipv4_default_advmss(const struct dst_entry *dst) 1253 { 1254 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr); 1255 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, 1256 ip_rt_min_advmss); 1257 1258 return min(advmss, IPV4_MAX_PMTU - header_size); 1259 } 1260 1261 static unsigned int ipv4_mtu(const struct dst_entry *dst) 1262 { 1263 const struct rtable *rt = (const struct rtable *) dst; 1264 unsigned int mtu = rt->rt_pmtu; 1265 1266 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) 1267 mtu = dst_metric_raw(dst, RTAX_MTU); 1268 1269 if (mtu) 1270 return mtu; 1271 1272 mtu = READ_ONCE(dst->dev->mtu); 1273 1274 if (unlikely(ip_mtu_locked(dst))) { 1275 if (rt->rt_uses_gateway && mtu > 576) 1276 mtu = 576; 1277 } 1278 1279 mtu = min_t(unsigned int, mtu, IP_MAX_MTU); 1280 1281 return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 1282 } 1283 1284 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) 1285 { 1286 struct fnhe_hash_bucket *hash; 1287 struct fib_nh_exception *fnhe, __rcu **fnhe_p; 1288 u32 hval = fnhe_hashfun(daddr); 1289 1290 spin_lock_bh(&fnhe_lock); 1291 1292 hash = rcu_dereference_protected(nh->nh_exceptions, 1293 lockdep_is_held(&fnhe_lock)); 1294 hash += hval; 1295 1296 fnhe_p = &hash->chain; 1297 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); 1298 while (fnhe) { 1299 if (fnhe->fnhe_daddr == daddr) { 1300 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( 1301 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); 1302 fnhe_flush_routes(fnhe); 1303 kfree_rcu(fnhe, rcu); 1304 break; 1305 } 1306 fnhe_p = &fnhe->fnhe_next; 1307 fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1308 lockdep_is_held(&fnhe_lock)); 1309 } 1310 1311 spin_unlock_bh(&fnhe_lock); 1312 } 1313 1314 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) 1315 { 1316 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); 1317 struct fib_nh_exception *fnhe; 1318 u32 hval; 1319 1320 if (!hash) 1321 return NULL; 1322 1323 hval = fnhe_hashfun(daddr); 1324 1325 for (fnhe = rcu_dereference(hash[hval].chain); fnhe; 1326 fnhe = rcu_dereference(fnhe->fnhe_next)) { 1327 if (fnhe->fnhe_daddr == daddr) { 1328 if (fnhe->fnhe_expires && 1329 time_after(jiffies, fnhe->fnhe_expires)) { 1330 ip_del_fnhe(nh, daddr); 1331 break; 1332 } 1333 return fnhe; 1334 } 1335 } 1336 return NULL; 1337 } 1338 1339 /* MTU selection: 1340 * 1. mtu on route is locked - use it 1341 * 2. mtu from nexthop exception 1342 * 3. mtu from egress device 1343 */ 1344 1345 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) 1346 { 1347 struct fib_info *fi = res->fi; 1348 struct fib_nh *nh = &fi->fib_nh[res->nh_sel]; 1349 struct net_device *dev = nh->nh_dev; 1350 u32 mtu = 0; 1351 1352 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu || 1353 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) 1354 mtu = fi->fib_mtu; 1355 1356 if (likely(!mtu)) { 1357 struct fib_nh_exception *fnhe; 1358 1359 fnhe = find_exception(nh, daddr); 1360 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires)) 1361 mtu = fnhe->fnhe_pmtu; 1362 } 1363 1364 if (likely(!mtu)) 1365 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU); 1366 1367 return mtu - lwtunnel_headroom(nh->nh_lwtstate, mtu); 1368 } 1369 1370 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, 1371 __be32 daddr, const bool do_cache) 1372 { 1373 bool ret = false; 1374 1375 spin_lock_bh(&fnhe_lock); 1376 1377 if (daddr == fnhe->fnhe_daddr) { 1378 struct rtable __rcu **porig; 1379 struct rtable *orig; 1380 int genid = fnhe_genid(dev_net(rt->dst.dev)); 1381 1382 if (rt_is_input_route(rt)) 1383 porig = &fnhe->fnhe_rth_input; 1384 else 1385 porig = &fnhe->fnhe_rth_output; 1386 orig = rcu_dereference(*porig); 1387 1388 if (fnhe->fnhe_genid != genid) { 1389 fnhe->fnhe_genid = genid; 1390 fnhe->fnhe_gw = 0; 1391 fnhe->fnhe_pmtu = 0; 1392 fnhe->fnhe_expires = 0; 1393 fnhe->fnhe_mtu_locked = false; 1394 fnhe_flush_routes(fnhe); 1395 orig = NULL; 1396 } 1397 fill_route_from_fnhe(rt, fnhe); 1398 if (!rt->rt_gateway) 1399 rt->rt_gateway = daddr; 1400 1401 if (do_cache) { 1402 dst_hold(&rt->dst); 1403 rcu_assign_pointer(*porig, rt); 1404 if (orig) { 1405 dst_dev_put(&orig->dst); 1406 dst_release(&orig->dst); 1407 } 1408 ret = true; 1409 } 1410 1411 fnhe->fnhe_stamp = jiffies; 1412 } 1413 spin_unlock_bh(&fnhe_lock); 1414 1415 return ret; 1416 } 1417 1418 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) 1419 { 1420 struct rtable *orig, *prev, **p; 1421 bool ret = true; 1422 1423 if (rt_is_input_route(rt)) { 1424 p = (struct rtable **)&nh->nh_rth_input; 1425 } else { 1426 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output); 1427 } 1428 orig = *p; 1429 1430 /* hold dst before doing cmpxchg() to avoid race condition 1431 * on this dst 1432 */ 1433 dst_hold(&rt->dst); 1434 prev = cmpxchg(p, orig, rt); 1435 if (prev == orig) { 1436 if (orig) { 1437 dst_dev_put(&orig->dst); 1438 dst_release(&orig->dst); 1439 } 1440 } else { 1441 dst_release(&rt->dst); 1442 ret = false; 1443 } 1444 1445 return ret; 1446 } 1447 1448 struct uncached_list { 1449 spinlock_t lock; 1450 struct list_head head; 1451 }; 1452 1453 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1454 1455 void rt_add_uncached_list(struct rtable *rt) 1456 { 1457 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1458 1459 rt->rt_uncached_list = ul; 1460 1461 spin_lock_bh(&ul->lock); 1462 list_add_tail(&rt->rt_uncached, &ul->head); 1463 spin_unlock_bh(&ul->lock); 1464 } 1465 1466 void rt_del_uncached_list(struct rtable *rt) 1467 { 1468 if (!list_empty(&rt->rt_uncached)) { 1469 struct uncached_list *ul = rt->rt_uncached_list; 1470 1471 spin_lock_bh(&ul->lock); 1472 list_del(&rt->rt_uncached); 1473 spin_unlock_bh(&ul->lock); 1474 } 1475 } 1476 1477 static void ipv4_dst_destroy(struct dst_entry *dst) 1478 { 1479 struct rtable *rt = (struct rtable *)dst; 1480 1481 ip_dst_metrics_put(dst); 1482 rt_del_uncached_list(rt); 1483 } 1484 1485 void rt_flush_dev(struct net_device *dev) 1486 { 1487 struct net *net = dev_net(dev); 1488 struct rtable *rt; 1489 int cpu; 1490 1491 for_each_possible_cpu(cpu) { 1492 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 1493 1494 spin_lock_bh(&ul->lock); 1495 list_for_each_entry(rt, &ul->head, rt_uncached) { 1496 if (rt->dst.dev != dev) 1497 continue; 1498 rt->dst.dev = net->loopback_dev; 1499 dev_hold(rt->dst.dev); 1500 dev_put(dev); 1501 } 1502 spin_unlock_bh(&ul->lock); 1503 } 1504 } 1505 1506 static bool rt_cache_valid(const struct rtable *rt) 1507 { 1508 return rt && 1509 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1510 !rt_is_expired(rt); 1511 } 1512 1513 static void rt_set_nexthop(struct rtable *rt, __be32 daddr, 1514 const struct fib_result *res, 1515 struct fib_nh_exception *fnhe, 1516 struct fib_info *fi, u16 type, u32 itag, 1517 const bool do_cache) 1518 { 1519 bool cached = false; 1520 1521 if (fi) { 1522 struct fib_nh *nh = &FIB_RES_NH(*res); 1523 1524 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) { 1525 rt->rt_gateway = nh->nh_gw; 1526 rt->rt_uses_gateway = 1; 1527 } 1528 ip_dst_init_metrics(&rt->dst, fi->fib_metrics); 1529 1530 #ifdef CONFIG_IP_ROUTE_CLASSID 1531 rt->dst.tclassid = nh->nh_tclassid; 1532 #endif 1533 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate); 1534 if (unlikely(fnhe)) 1535 cached = rt_bind_exception(rt, fnhe, daddr, do_cache); 1536 else if (do_cache) 1537 cached = rt_cache_route(nh, rt); 1538 if (unlikely(!cached)) { 1539 /* Routes we intend to cache in nexthop exception or 1540 * FIB nexthop have the DST_NOCACHE bit clear. 1541 * However, if we are unsuccessful at storing this 1542 * route into the cache we really need to set it. 1543 */ 1544 if (!rt->rt_gateway) 1545 rt->rt_gateway = daddr; 1546 rt_add_uncached_list(rt); 1547 } 1548 } else 1549 rt_add_uncached_list(rt); 1550 1551 #ifdef CONFIG_IP_ROUTE_CLASSID 1552 #ifdef CONFIG_IP_MULTIPLE_TABLES 1553 set_class_tag(rt, res->tclassid); 1554 #endif 1555 set_class_tag(rt, itag); 1556 #endif 1557 } 1558 1559 struct rtable *rt_dst_alloc(struct net_device *dev, 1560 unsigned int flags, u16 type, 1561 bool nopolicy, bool noxfrm, bool will_cache) 1562 { 1563 struct rtable *rt; 1564 1565 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, 1566 (will_cache ? 0 : DST_HOST) | 1567 (nopolicy ? DST_NOPOLICY : 0) | 1568 (noxfrm ? DST_NOXFRM : 0)); 1569 1570 if (rt) { 1571 rt->rt_genid = rt_genid_ipv4(dev_net(dev)); 1572 rt->rt_flags = flags; 1573 rt->rt_type = type; 1574 rt->rt_is_input = 0; 1575 rt->rt_iif = 0; 1576 rt->rt_pmtu = 0; 1577 rt->rt_mtu_locked = 0; 1578 rt->rt_gateway = 0; 1579 rt->rt_uses_gateway = 0; 1580 INIT_LIST_HEAD(&rt->rt_uncached); 1581 1582 rt->dst.output = ip_output; 1583 if (flags & RTCF_LOCAL) 1584 rt->dst.input = ip_local_deliver; 1585 } 1586 1587 return rt; 1588 } 1589 EXPORT_SYMBOL(rt_dst_alloc); 1590 1591 /* called in rcu_read_lock() section */ 1592 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1593 u8 tos, struct net_device *dev, 1594 struct in_device *in_dev, u32 *itag) 1595 { 1596 int err; 1597 1598 /* Primary sanity checks. */ 1599 if (!in_dev) 1600 return -EINVAL; 1601 1602 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1603 skb->protocol != htons(ETH_P_IP)) 1604 return -EINVAL; 1605 1606 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) 1607 return -EINVAL; 1608 1609 if (ipv4_is_zeronet(saddr)) { 1610 if (!ipv4_is_local_multicast(daddr)) 1611 return -EINVAL; 1612 } else { 1613 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, 1614 in_dev, itag); 1615 if (err < 0) 1616 return err; 1617 } 1618 return 0; 1619 } 1620 1621 /* called in rcu_read_lock() section */ 1622 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1623 u8 tos, struct net_device *dev, int our) 1624 { 1625 struct in_device *in_dev = __in_dev_get_rcu(dev); 1626 unsigned int flags = RTCF_MULTICAST; 1627 struct rtable *rth; 1628 u32 itag = 0; 1629 int err; 1630 1631 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag); 1632 if (err) 1633 return err; 1634 1635 if (our) 1636 flags |= RTCF_LOCAL; 1637 1638 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, 1639 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); 1640 if (!rth) 1641 return -ENOBUFS; 1642 1643 #ifdef CONFIG_IP_ROUTE_CLASSID 1644 rth->dst.tclassid = itag; 1645 #endif 1646 rth->dst.output = ip_rt_bug; 1647 rth->rt_is_input= 1; 1648 1649 #ifdef CONFIG_IP_MROUTE 1650 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) 1651 rth->dst.input = ip_mr_input; 1652 #endif 1653 RT_CACHE_STAT_INC(in_slow_mc); 1654 1655 skb_dst_set(skb, &rth->dst); 1656 return 0; 1657 } 1658 1659 1660 static void ip_handle_martian_source(struct net_device *dev, 1661 struct in_device *in_dev, 1662 struct sk_buff *skb, 1663 __be32 daddr, 1664 __be32 saddr) 1665 { 1666 RT_CACHE_STAT_INC(in_martian_src); 1667 #ifdef CONFIG_IP_ROUTE_VERBOSE 1668 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) { 1669 /* 1670 * RFC1812 recommendation, if source is martian, 1671 * the only hint is MAC header. 1672 */ 1673 pr_warn("martian source %pI4 from %pI4, on dev %s\n", 1674 &daddr, &saddr, dev->name); 1675 if (dev->hard_header_len && skb_mac_header_was_set(skb)) { 1676 print_hex_dump(KERN_WARNING, "ll header: ", 1677 DUMP_PREFIX_OFFSET, 16, 1, 1678 skb_mac_header(skb), 1679 dev->hard_header_len, true); 1680 } 1681 } 1682 #endif 1683 } 1684 1685 /* called in rcu_read_lock() section */ 1686 static int __mkroute_input(struct sk_buff *skb, 1687 const struct fib_result *res, 1688 struct in_device *in_dev, 1689 __be32 daddr, __be32 saddr, u32 tos) 1690 { 1691 struct fib_nh_exception *fnhe; 1692 struct rtable *rth; 1693 int err; 1694 struct in_device *out_dev; 1695 bool do_cache; 1696 u32 itag = 0; 1697 1698 /* get a working reference to the output device */ 1699 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); 1700 if (!out_dev) { 1701 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); 1702 return -EINVAL; 1703 } 1704 1705 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), 1706 in_dev->dev, in_dev, &itag); 1707 if (err < 0) { 1708 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1709 saddr); 1710 1711 goto cleanup; 1712 } 1713 1714 do_cache = res->fi && !itag; 1715 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && 1716 skb->protocol == htons(ETH_P_IP) && 1717 (IN_DEV_SHARED_MEDIA(out_dev) || 1718 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) 1719 IPCB(skb)->flags |= IPSKB_DOREDIRECT; 1720 1721 if (skb->protocol != htons(ETH_P_IP)) { 1722 /* Not IP (i.e. ARP). Do not create route, if it is 1723 * invalid for proxy arp. DNAT routes are always valid. 1724 * 1725 * Proxy arp feature have been extended to allow, ARP 1726 * replies back to the same interface, to support 1727 * Private VLAN switch technologies. See arp.c. 1728 */ 1729 if (out_dev == in_dev && 1730 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { 1731 err = -EINVAL; 1732 goto cleanup; 1733 } 1734 } 1735 1736 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1737 if (do_cache) { 1738 if (fnhe) 1739 rth = rcu_dereference(fnhe->fnhe_rth_input); 1740 else 1741 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1742 if (rt_cache_valid(rth)) { 1743 skb_dst_set_noref(skb, &rth->dst); 1744 goto out; 1745 } 1746 } 1747 1748 rth = rt_dst_alloc(out_dev->dev, 0, res->type, 1749 IN_DEV_CONF_GET(in_dev, NOPOLICY), 1750 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache); 1751 if (!rth) { 1752 err = -ENOBUFS; 1753 goto cleanup; 1754 } 1755 1756 rth->rt_is_input = 1; 1757 RT_CACHE_STAT_INC(in_slow_tot); 1758 1759 rth->dst.input = ip_forward; 1760 1761 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag, 1762 do_cache); 1763 lwtunnel_set_redirect(&rth->dst); 1764 skb_dst_set(skb, &rth->dst); 1765 out: 1766 err = 0; 1767 cleanup: 1768 return err; 1769 } 1770 1771 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1772 /* To make ICMP packets follow the right flow, the multipath hash is 1773 * calculated from the inner IP addresses. 1774 */ 1775 static void ip_multipath_l3_keys(const struct sk_buff *skb, 1776 struct flow_keys *hash_keys) 1777 { 1778 const struct iphdr *outer_iph = ip_hdr(skb); 1779 const struct iphdr *key_iph = outer_iph; 1780 const struct iphdr *inner_iph; 1781 const struct icmphdr *icmph; 1782 struct iphdr _inner_iph; 1783 struct icmphdr _icmph; 1784 1785 if (likely(outer_iph->protocol != IPPROTO_ICMP)) 1786 goto out; 1787 1788 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) 1789 goto out; 1790 1791 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), 1792 &_icmph); 1793 if (!icmph) 1794 goto out; 1795 1796 if (icmph->type != ICMP_DEST_UNREACH && 1797 icmph->type != ICMP_REDIRECT && 1798 icmph->type != ICMP_TIME_EXCEEDED && 1799 icmph->type != ICMP_PARAMETERPROB) 1800 goto out; 1801 1802 inner_iph = skb_header_pointer(skb, 1803 outer_iph->ihl * 4 + sizeof(_icmph), 1804 sizeof(_inner_iph), &_inner_iph); 1805 if (!inner_iph) 1806 goto out; 1807 1808 key_iph = inner_iph; 1809 out: 1810 hash_keys->addrs.v4addrs.src = key_iph->saddr; 1811 hash_keys->addrs.v4addrs.dst = key_iph->daddr; 1812 } 1813 1814 /* if skb is set it will be used and fl4 can be NULL */ 1815 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, 1816 const struct sk_buff *skb, struct flow_keys *flkeys) 1817 { 1818 struct flow_keys hash_keys; 1819 u32 mhash; 1820 1821 switch (net->ipv4.sysctl_fib_multipath_hash_policy) { 1822 case 0: 1823 memset(&hash_keys, 0, sizeof(hash_keys)); 1824 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1825 if (skb) { 1826 ip_multipath_l3_keys(skb, &hash_keys); 1827 } else { 1828 hash_keys.addrs.v4addrs.src = fl4->saddr; 1829 hash_keys.addrs.v4addrs.dst = fl4->daddr; 1830 } 1831 break; 1832 case 1: 1833 /* skb is currently provided only when forwarding */ 1834 if (skb) { 1835 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; 1836 struct flow_keys keys; 1837 1838 /* short-circuit if we already have L4 hash present */ 1839 if (skb->l4_hash) 1840 return skb_get_hash_raw(skb) >> 1; 1841 1842 memset(&hash_keys, 0, sizeof(hash_keys)); 1843 1844 if (!flkeys) { 1845 skb_flow_dissect_flow_keys(skb, &keys, flag); 1846 flkeys = &keys; 1847 } 1848 1849 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1850 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src; 1851 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst; 1852 hash_keys.ports.src = flkeys->ports.src; 1853 hash_keys.ports.dst = flkeys->ports.dst; 1854 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 1855 } else { 1856 memset(&hash_keys, 0, sizeof(hash_keys)); 1857 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1858 hash_keys.addrs.v4addrs.src = fl4->saddr; 1859 hash_keys.addrs.v4addrs.dst = fl4->daddr; 1860 hash_keys.ports.src = fl4->fl4_sport; 1861 hash_keys.ports.dst = fl4->fl4_dport; 1862 hash_keys.basic.ip_proto = fl4->flowi4_proto; 1863 } 1864 break; 1865 } 1866 mhash = flow_hash_from_keys(&hash_keys); 1867 1868 return mhash >> 1; 1869 } 1870 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 1871 1872 static int ip_mkroute_input(struct sk_buff *skb, 1873 struct fib_result *res, 1874 struct in_device *in_dev, 1875 __be32 daddr, __be32 saddr, u32 tos, 1876 struct flow_keys *hkeys) 1877 { 1878 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1879 if (res->fi && res->fi->fib_nhs > 1) { 1880 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys); 1881 1882 fib_select_multipath(res, h); 1883 } 1884 #endif 1885 1886 /* create a routing cache entry */ 1887 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos); 1888 } 1889 1890 /* 1891 * NOTE. We drop all the packets that has local source 1892 * addresses, because every properly looped back packet 1893 * must have correct destination already attached by output routine. 1894 * 1895 * Such approach solves two big problems: 1896 * 1. Not simplex devices are handled properly. 1897 * 2. IP spoofing attempts are filtered with 100% of guarantee. 1898 * called with rcu_read_lock() 1899 */ 1900 1901 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1902 u8 tos, struct net_device *dev, 1903 struct fib_result *res) 1904 { 1905 struct in_device *in_dev = __in_dev_get_rcu(dev); 1906 struct flow_keys *flkeys = NULL, _flkeys; 1907 struct net *net = dev_net(dev); 1908 struct ip_tunnel_info *tun_info; 1909 int err = -EINVAL; 1910 unsigned int flags = 0; 1911 u32 itag = 0; 1912 struct rtable *rth; 1913 struct flowi4 fl4; 1914 bool do_cache; 1915 1916 /* IP on this device is disabled. */ 1917 1918 if (!in_dev) 1919 goto out; 1920 1921 /* Check for the most weird martians, which can be not detected 1922 by fib_lookup. 1923 */ 1924 1925 tun_info = skb_tunnel_info(skb); 1926 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) 1927 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id; 1928 else 1929 fl4.flowi4_tun_key.tun_id = 0; 1930 skb_dst_drop(skb); 1931 1932 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) 1933 goto martian_source; 1934 1935 res->fi = NULL; 1936 res->table = NULL; 1937 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) 1938 goto brd_input; 1939 1940 /* Accept zero addresses only to limited broadcast; 1941 * I even do not know to fix it or not. Waiting for complains :-) 1942 */ 1943 if (ipv4_is_zeronet(saddr)) 1944 goto martian_source; 1945 1946 if (ipv4_is_zeronet(daddr)) 1947 goto martian_destination; 1948 1949 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), 1950 * and call it once if daddr or/and saddr are loopback addresses 1951 */ 1952 if (ipv4_is_loopback(daddr)) { 1953 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) 1954 goto martian_destination; 1955 } else if (ipv4_is_loopback(saddr)) { 1956 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) 1957 goto martian_source; 1958 } 1959 1960 /* 1961 * Now we are ready to route packet. 1962 */ 1963 fl4.flowi4_oif = 0; 1964 fl4.flowi4_iif = dev->ifindex; 1965 fl4.flowi4_mark = skb->mark; 1966 fl4.flowi4_tos = tos; 1967 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 1968 fl4.flowi4_flags = 0; 1969 fl4.daddr = daddr; 1970 fl4.saddr = saddr; 1971 fl4.flowi4_uid = sock_net_uid(net, NULL); 1972 1973 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { 1974 flkeys = &_flkeys; 1975 } else { 1976 fl4.flowi4_proto = 0; 1977 fl4.fl4_sport = 0; 1978 fl4.fl4_dport = 0; 1979 } 1980 1981 err = fib_lookup(net, &fl4, res, 0); 1982 if (err != 0) { 1983 if (!IN_DEV_FORWARD(in_dev)) 1984 err = -EHOSTUNREACH; 1985 goto no_route; 1986 } 1987 1988 if (res->type == RTN_BROADCAST) { 1989 if (IN_DEV_BFORWARD(in_dev)) 1990 goto make_route; 1991 goto brd_input; 1992 } 1993 1994 if (res->type == RTN_LOCAL) { 1995 err = fib_validate_source(skb, saddr, daddr, tos, 1996 0, dev, in_dev, &itag); 1997 if (err < 0) 1998 goto martian_source; 1999 goto local_input; 2000 } 2001 2002 if (!IN_DEV_FORWARD(in_dev)) { 2003 err = -EHOSTUNREACH; 2004 goto no_route; 2005 } 2006 if (res->type != RTN_UNICAST) 2007 goto martian_destination; 2008 2009 make_route: 2010 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys); 2011 out: return err; 2012 2013 brd_input: 2014 if (skb->protocol != htons(ETH_P_IP)) 2015 goto e_inval; 2016 2017 if (!ipv4_is_zeronet(saddr)) { 2018 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, 2019 in_dev, &itag); 2020 if (err < 0) 2021 goto martian_source; 2022 } 2023 flags |= RTCF_BROADCAST; 2024 res->type = RTN_BROADCAST; 2025 RT_CACHE_STAT_INC(in_brd); 2026 2027 local_input: 2028 do_cache = false; 2029 if (res->fi) { 2030 if (!itag) { 2031 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 2032 if (rt_cache_valid(rth)) { 2033 skb_dst_set_noref(skb, &rth->dst); 2034 err = 0; 2035 goto out; 2036 } 2037 do_cache = true; 2038 } 2039 } 2040 2041 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev, 2042 flags | RTCF_LOCAL, res->type, 2043 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache); 2044 if (!rth) 2045 goto e_nobufs; 2046 2047 rth->dst.output= ip_rt_bug; 2048 #ifdef CONFIG_IP_ROUTE_CLASSID 2049 rth->dst.tclassid = itag; 2050 #endif 2051 rth->rt_is_input = 1; 2052 2053 RT_CACHE_STAT_INC(in_slow_tot); 2054 if (res->type == RTN_UNREACHABLE) { 2055 rth->dst.input= ip_error; 2056 rth->dst.error= -err; 2057 rth->rt_flags &= ~RTCF_LOCAL; 2058 } 2059 2060 if (do_cache) { 2061 struct fib_nh *nh = &FIB_RES_NH(*res); 2062 2063 rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate); 2064 if (lwtunnel_input_redirect(rth->dst.lwtstate)) { 2065 WARN_ON(rth->dst.input == lwtunnel_input); 2066 rth->dst.lwtstate->orig_input = rth->dst.input; 2067 rth->dst.input = lwtunnel_input; 2068 } 2069 2070 if (unlikely(!rt_cache_route(nh, rth))) 2071 rt_add_uncached_list(rth); 2072 } 2073 skb_dst_set(skb, &rth->dst); 2074 err = 0; 2075 goto out; 2076 2077 no_route: 2078 RT_CACHE_STAT_INC(in_no_route); 2079 res->type = RTN_UNREACHABLE; 2080 res->fi = NULL; 2081 res->table = NULL; 2082 goto local_input; 2083 2084 /* 2085 * Do not cache martian addresses: they should be logged (RFC1812) 2086 */ 2087 martian_destination: 2088 RT_CACHE_STAT_INC(in_martian_dst); 2089 #ifdef CONFIG_IP_ROUTE_VERBOSE 2090 if (IN_DEV_LOG_MARTIANS(in_dev)) 2091 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", 2092 &daddr, &saddr, dev->name); 2093 #endif 2094 2095 e_inval: 2096 err = -EINVAL; 2097 goto out; 2098 2099 e_nobufs: 2100 err = -ENOBUFS; 2101 goto out; 2102 2103 martian_source: 2104 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); 2105 goto out; 2106 } 2107 2108 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2109 u8 tos, struct net_device *dev) 2110 { 2111 struct fib_result res; 2112 int err; 2113 2114 tos &= IPTOS_RT_MASK; 2115 rcu_read_lock(); 2116 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res); 2117 rcu_read_unlock(); 2118 2119 return err; 2120 } 2121 EXPORT_SYMBOL(ip_route_input_noref); 2122 2123 /* called with rcu_read_lock held */ 2124 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2125 u8 tos, struct net_device *dev, struct fib_result *res) 2126 { 2127 /* Multicast recognition logic is moved from route cache to here. 2128 The problem was that too many Ethernet cards have broken/missing 2129 hardware multicast filters :-( As result the host on multicasting 2130 network acquires a lot of useless route cache entries, sort of 2131 SDR messages from all the world. Now we try to get rid of them. 2132 Really, provided software IP multicast filter is organized 2133 reasonably (at least, hashed), it does not result in a slowdown 2134 comparing with route cache reject entries. 2135 Note, that multicast routers are not affected, because 2136 route cache entry is created eventually. 2137 */ 2138 if (ipv4_is_multicast(daddr)) { 2139 struct in_device *in_dev = __in_dev_get_rcu(dev); 2140 int our = 0; 2141 int err = -EINVAL; 2142 2143 if (in_dev) 2144 our = ip_check_mc_rcu(in_dev, daddr, saddr, 2145 ip_hdr(skb)->protocol); 2146 2147 /* check l3 master if no match yet */ 2148 if ((!in_dev || !our) && netif_is_l3_slave(dev)) { 2149 struct in_device *l3_in_dev; 2150 2151 l3_in_dev = __in_dev_get_rcu(skb->dev); 2152 if (l3_in_dev) 2153 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr, 2154 ip_hdr(skb)->protocol); 2155 } 2156 2157 if (our 2158 #ifdef CONFIG_IP_MROUTE 2159 || 2160 (!ipv4_is_local_multicast(daddr) && 2161 IN_DEV_MFORWARD(in_dev)) 2162 #endif 2163 ) { 2164 err = ip_route_input_mc(skb, daddr, saddr, 2165 tos, dev, our); 2166 } 2167 return err; 2168 } 2169 2170 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res); 2171 } 2172 2173 /* called with rcu_read_lock() */ 2174 static struct rtable *__mkroute_output(const struct fib_result *res, 2175 const struct flowi4 *fl4, int orig_oif, 2176 struct net_device *dev_out, 2177 unsigned int flags) 2178 { 2179 struct fib_info *fi = res->fi; 2180 struct fib_nh_exception *fnhe; 2181 struct in_device *in_dev; 2182 u16 type = res->type; 2183 struct rtable *rth; 2184 bool do_cache; 2185 2186 in_dev = __in_dev_get_rcu(dev_out); 2187 if (!in_dev) 2188 return ERR_PTR(-EINVAL); 2189 2190 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) 2191 if (ipv4_is_loopback(fl4->saddr) && 2192 !(dev_out->flags & IFF_LOOPBACK) && 2193 !netif_is_l3_master(dev_out)) 2194 return ERR_PTR(-EINVAL); 2195 2196 if (ipv4_is_lbcast(fl4->daddr)) 2197 type = RTN_BROADCAST; 2198 else if (ipv4_is_multicast(fl4->daddr)) 2199 type = RTN_MULTICAST; 2200 else if (ipv4_is_zeronet(fl4->daddr)) 2201 return ERR_PTR(-EINVAL); 2202 2203 if (dev_out->flags & IFF_LOOPBACK) 2204 flags |= RTCF_LOCAL; 2205 2206 do_cache = true; 2207 if (type == RTN_BROADCAST) { 2208 flags |= RTCF_BROADCAST | RTCF_LOCAL; 2209 fi = NULL; 2210 } else if (type == RTN_MULTICAST) { 2211 flags |= RTCF_MULTICAST | RTCF_LOCAL; 2212 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, 2213 fl4->flowi4_proto)) 2214 flags &= ~RTCF_LOCAL; 2215 else 2216 do_cache = false; 2217 /* If multicast route do not exist use 2218 * default one, but do not gateway in this case. 2219 * Yes, it is hack. 2220 */ 2221 if (fi && res->prefixlen < 4) 2222 fi = NULL; 2223 } else if ((type == RTN_LOCAL) && (orig_oif != 0) && 2224 (orig_oif != dev_out->ifindex)) { 2225 /* For local routes that require a particular output interface 2226 * we do not want to cache the result. Caching the result 2227 * causes incorrect behaviour when there are multiple source 2228 * addresses on the interface, the end result being that if the 2229 * intended recipient is waiting on that interface for the 2230 * packet he won't receive it because it will be delivered on 2231 * the loopback interface and the IP_PKTINFO ipi_ifindex will 2232 * be set to the loopback interface as well. 2233 */ 2234 do_cache = false; 2235 } 2236 2237 fnhe = NULL; 2238 do_cache &= fi != NULL; 2239 if (fi) { 2240 struct rtable __rcu **prth; 2241 struct fib_nh *nh = &FIB_RES_NH(*res); 2242 2243 fnhe = find_exception(nh, fl4->daddr); 2244 if (!do_cache) 2245 goto add; 2246 if (fnhe) { 2247 prth = &fnhe->fnhe_rth_output; 2248 } else { 2249 if (unlikely(fl4->flowi4_flags & 2250 FLOWI_FLAG_KNOWN_NH && 2251 !(nh->nh_gw && 2252 nh->nh_scope == RT_SCOPE_LINK))) { 2253 do_cache = false; 2254 goto add; 2255 } 2256 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); 2257 } 2258 rth = rcu_dereference(*prth); 2259 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) 2260 return rth; 2261 } 2262 2263 add: 2264 rth = rt_dst_alloc(dev_out, flags, type, 2265 IN_DEV_CONF_GET(in_dev, NOPOLICY), 2266 IN_DEV_CONF_GET(in_dev, NOXFRM), 2267 do_cache); 2268 if (!rth) 2269 return ERR_PTR(-ENOBUFS); 2270 2271 rth->rt_iif = orig_oif; 2272 2273 RT_CACHE_STAT_INC(out_slow_tot); 2274 2275 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2276 if (flags & RTCF_LOCAL && 2277 !(dev_out->flags & IFF_LOOPBACK)) { 2278 rth->dst.output = ip_mc_output; 2279 RT_CACHE_STAT_INC(out_slow_mc); 2280 } 2281 #ifdef CONFIG_IP_MROUTE 2282 if (type == RTN_MULTICAST) { 2283 if (IN_DEV_MFORWARD(in_dev) && 2284 !ipv4_is_local_multicast(fl4->daddr)) { 2285 rth->dst.input = ip_mr_input; 2286 rth->dst.output = ip_mc_output; 2287 } 2288 } 2289 #endif 2290 } 2291 2292 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache); 2293 lwtunnel_set_redirect(&rth->dst); 2294 2295 return rth; 2296 } 2297 2298 /* 2299 * Major route resolver routine. 2300 */ 2301 2302 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, 2303 const struct sk_buff *skb) 2304 { 2305 __u8 tos = RT_FL_TOS(fl4); 2306 struct fib_result res = { 2307 .type = RTN_UNSPEC, 2308 .fi = NULL, 2309 .table = NULL, 2310 .tclassid = 0, 2311 }; 2312 struct rtable *rth; 2313 2314 fl4->flowi4_iif = LOOPBACK_IFINDEX; 2315 fl4->flowi4_tos = tos & IPTOS_RT_MASK; 2316 fl4->flowi4_scope = ((tos & RTO_ONLINK) ? 2317 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); 2318 2319 rcu_read_lock(); 2320 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb); 2321 rcu_read_unlock(); 2322 2323 return rth; 2324 } 2325 EXPORT_SYMBOL_GPL(ip_route_output_key_hash); 2326 2327 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, 2328 struct fib_result *res, 2329 const struct sk_buff *skb) 2330 { 2331 struct net_device *dev_out = NULL; 2332 int orig_oif = fl4->flowi4_oif; 2333 unsigned int flags = 0; 2334 struct rtable *rth; 2335 int err = -ENETUNREACH; 2336 2337 if (fl4->saddr) { 2338 rth = ERR_PTR(-EINVAL); 2339 if (ipv4_is_multicast(fl4->saddr) || 2340 ipv4_is_lbcast(fl4->saddr) || 2341 ipv4_is_zeronet(fl4->saddr)) 2342 goto out; 2343 2344 /* I removed check for oif == dev_out->oif here. 2345 It was wrong for two reasons: 2346 1. ip_dev_find(net, saddr) can return wrong iface, if saddr 2347 is assigned to multiple interfaces. 2348 2. Moreover, we are allowed to send packets with saddr 2349 of another iface. --ANK 2350 */ 2351 2352 if (fl4->flowi4_oif == 0 && 2353 (ipv4_is_multicast(fl4->daddr) || 2354 ipv4_is_lbcast(fl4->daddr))) { 2355 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2356 dev_out = __ip_dev_find(net, fl4->saddr, false); 2357 if (!dev_out) 2358 goto out; 2359 2360 /* Special hack: user can direct multicasts 2361 and limited broadcast via necessary interface 2362 without fiddling with IP_MULTICAST_IF or IP_PKTINFO. 2363 This hack is not just for fun, it allows 2364 vic,vat and friends to work. 2365 They bind socket to loopback, set ttl to zero 2366 and expect that it will work. 2367 From the viewpoint of routing cache they are broken, 2368 because we are not allowed to build multicast path 2369 with loopback source addr (look, routing cache 2370 cannot know, that ttl is zero, so that packet 2371 will not leave this host and route is valid). 2372 Luckily, this hack is good workaround. 2373 */ 2374 2375 fl4->flowi4_oif = dev_out->ifindex; 2376 goto make_route; 2377 } 2378 2379 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { 2380 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2381 if (!__ip_dev_find(net, fl4->saddr, false)) 2382 goto out; 2383 } 2384 } 2385 2386 2387 if (fl4->flowi4_oif) { 2388 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); 2389 rth = ERR_PTR(-ENODEV); 2390 if (!dev_out) 2391 goto out; 2392 2393 /* RACE: Check return value of inet_select_addr instead. */ 2394 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { 2395 rth = ERR_PTR(-ENETUNREACH); 2396 goto out; 2397 } 2398 if (ipv4_is_local_multicast(fl4->daddr) || 2399 ipv4_is_lbcast(fl4->daddr) || 2400 fl4->flowi4_proto == IPPROTO_IGMP) { 2401 if (!fl4->saddr) 2402 fl4->saddr = inet_select_addr(dev_out, 0, 2403 RT_SCOPE_LINK); 2404 goto make_route; 2405 } 2406 if (!fl4->saddr) { 2407 if (ipv4_is_multicast(fl4->daddr)) 2408 fl4->saddr = inet_select_addr(dev_out, 0, 2409 fl4->flowi4_scope); 2410 else if (!fl4->daddr) 2411 fl4->saddr = inet_select_addr(dev_out, 0, 2412 RT_SCOPE_HOST); 2413 } 2414 } 2415 2416 if (!fl4->daddr) { 2417 fl4->daddr = fl4->saddr; 2418 if (!fl4->daddr) 2419 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); 2420 dev_out = net->loopback_dev; 2421 fl4->flowi4_oif = LOOPBACK_IFINDEX; 2422 res->type = RTN_LOCAL; 2423 flags |= RTCF_LOCAL; 2424 goto make_route; 2425 } 2426 2427 err = fib_lookup(net, fl4, res, 0); 2428 if (err) { 2429 res->fi = NULL; 2430 res->table = NULL; 2431 if (fl4->flowi4_oif && 2432 (ipv4_is_multicast(fl4->daddr) || 2433 !netif_index_is_l3_master(net, fl4->flowi4_oif))) { 2434 /* Apparently, routing tables are wrong. Assume, 2435 that the destination is on link. 2436 2437 WHY? DW. 2438 Because we are allowed to send to iface 2439 even if it has NO routes and NO assigned 2440 addresses. When oif is specified, routing 2441 tables are looked up with only one purpose: 2442 to catch if destination is gatewayed, rather than 2443 direct. Moreover, if MSG_DONTROUTE is set, 2444 we send packet, ignoring both routing tables 2445 and ifaddr state. --ANK 2446 2447 2448 We could make it even if oif is unknown, 2449 likely IPv6, but we do not. 2450 */ 2451 2452 if (fl4->saddr == 0) 2453 fl4->saddr = inet_select_addr(dev_out, 0, 2454 RT_SCOPE_LINK); 2455 res->type = RTN_UNICAST; 2456 goto make_route; 2457 } 2458 rth = ERR_PTR(err); 2459 goto out; 2460 } 2461 2462 if (res->type == RTN_LOCAL) { 2463 if (!fl4->saddr) { 2464 if (res->fi->fib_prefsrc) 2465 fl4->saddr = res->fi->fib_prefsrc; 2466 else 2467 fl4->saddr = fl4->daddr; 2468 } 2469 2470 /* L3 master device is the loopback for that domain */ 2471 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? : 2472 net->loopback_dev; 2473 2474 /* make sure orig_oif points to fib result device even 2475 * though packet rx/tx happens over loopback or l3mdev 2476 */ 2477 orig_oif = FIB_RES_OIF(*res); 2478 2479 fl4->flowi4_oif = dev_out->ifindex; 2480 flags |= RTCF_LOCAL; 2481 goto make_route; 2482 } 2483 2484 fib_select_path(net, res, fl4, skb); 2485 2486 dev_out = FIB_RES_DEV(*res); 2487 fl4->flowi4_oif = dev_out->ifindex; 2488 2489 2490 make_route: 2491 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); 2492 2493 out: 2494 return rth; 2495 } 2496 2497 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) 2498 { 2499 return NULL; 2500 } 2501 2502 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) 2503 { 2504 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2505 2506 return mtu ? : dst->dev->mtu; 2507 } 2508 2509 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 2510 struct sk_buff *skb, u32 mtu) 2511 { 2512 } 2513 2514 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, 2515 struct sk_buff *skb) 2516 { 2517 } 2518 2519 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, 2520 unsigned long old) 2521 { 2522 return NULL; 2523 } 2524 2525 static struct dst_ops ipv4_dst_blackhole_ops = { 2526 .family = AF_INET, 2527 .check = ipv4_blackhole_dst_check, 2528 .mtu = ipv4_blackhole_mtu, 2529 .default_advmss = ipv4_default_advmss, 2530 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2531 .redirect = ipv4_rt_blackhole_redirect, 2532 .cow_metrics = ipv4_rt_blackhole_cow_metrics, 2533 .neigh_lookup = ipv4_neigh_lookup, 2534 }; 2535 2536 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2537 { 2538 struct rtable *ort = (struct rtable *) dst_orig; 2539 struct rtable *rt; 2540 2541 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0); 2542 if (rt) { 2543 struct dst_entry *new = &rt->dst; 2544 2545 new->__use = 1; 2546 new->input = dst_discard; 2547 new->output = dst_discard_out; 2548 2549 new->dev = net->loopback_dev; 2550 if (new->dev) 2551 dev_hold(new->dev); 2552 2553 rt->rt_is_input = ort->rt_is_input; 2554 rt->rt_iif = ort->rt_iif; 2555 rt->rt_pmtu = ort->rt_pmtu; 2556 rt->rt_mtu_locked = ort->rt_mtu_locked; 2557 2558 rt->rt_genid = rt_genid_ipv4(net); 2559 rt->rt_flags = ort->rt_flags; 2560 rt->rt_type = ort->rt_type; 2561 rt->rt_gateway = ort->rt_gateway; 2562 rt->rt_uses_gateway = ort->rt_uses_gateway; 2563 2564 INIT_LIST_HEAD(&rt->rt_uncached); 2565 } 2566 2567 dst_release(dst_orig); 2568 2569 return rt ? &rt->dst : ERR_PTR(-ENOMEM); 2570 } 2571 2572 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, 2573 const struct sock *sk) 2574 { 2575 struct rtable *rt = __ip_route_output_key(net, flp4); 2576 2577 if (IS_ERR(rt)) 2578 return rt; 2579 2580 if (flp4->flowi4_proto) 2581 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, 2582 flowi4_to_flowi(flp4), 2583 sk, 0); 2584 2585 return rt; 2586 } 2587 EXPORT_SYMBOL_GPL(ip_route_output_flow); 2588 2589 /* called with rcu_read_lock held */ 2590 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2591 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2592 struct sk_buff *skb, u32 portid, u32 seq) 2593 { 2594 struct rtmsg *r; 2595 struct nlmsghdr *nlh; 2596 unsigned long expires = 0; 2597 u32 error; 2598 u32 metrics[RTAX_MAX]; 2599 2600 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); 2601 if (!nlh) 2602 return -EMSGSIZE; 2603 2604 r = nlmsg_data(nlh); 2605 r->rtm_family = AF_INET; 2606 r->rtm_dst_len = 32; 2607 r->rtm_src_len = 0; 2608 r->rtm_tos = fl4->flowi4_tos; 2609 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; 2610 if (nla_put_u32(skb, RTA_TABLE, table_id)) 2611 goto nla_put_failure; 2612 r->rtm_type = rt->rt_type; 2613 r->rtm_scope = RT_SCOPE_UNIVERSE; 2614 r->rtm_protocol = RTPROT_UNSPEC; 2615 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 2616 if (rt->rt_flags & RTCF_NOTIFY) 2617 r->rtm_flags |= RTM_F_NOTIFY; 2618 if (IPCB(skb)->flags & IPSKB_DOREDIRECT) 2619 r->rtm_flags |= RTCF_DOREDIRECT; 2620 2621 if (nla_put_in_addr(skb, RTA_DST, dst)) 2622 goto nla_put_failure; 2623 if (src) { 2624 r->rtm_src_len = 32; 2625 if (nla_put_in_addr(skb, RTA_SRC, src)) 2626 goto nla_put_failure; 2627 } 2628 if (rt->dst.dev && 2629 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 2630 goto nla_put_failure; 2631 #ifdef CONFIG_IP_ROUTE_CLASSID 2632 if (rt->dst.tclassid && 2633 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) 2634 goto nla_put_failure; 2635 #endif 2636 if (!rt_is_input_route(rt) && 2637 fl4->saddr != src) { 2638 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr)) 2639 goto nla_put_failure; 2640 } 2641 if (rt->rt_uses_gateway && 2642 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway)) 2643 goto nla_put_failure; 2644 2645 expires = rt->dst.expires; 2646 if (expires) { 2647 unsigned long now = jiffies; 2648 2649 if (time_before(now, expires)) 2650 expires -= now; 2651 else 2652 expires = 0; 2653 } 2654 2655 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2656 if (rt->rt_pmtu && expires) 2657 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2658 if (rt->rt_mtu_locked && expires) 2659 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU); 2660 if (rtnetlink_put_metrics(skb, metrics) < 0) 2661 goto nla_put_failure; 2662 2663 if (fl4->flowi4_mark && 2664 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) 2665 goto nla_put_failure; 2666 2667 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) && 2668 nla_put_u32(skb, RTA_UID, 2669 from_kuid_munged(current_user_ns(), fl4->flowi4_uid))) 2670 goto nla_put_failure; 2671 2672 error = rt->dst.error; 2673 2674 if (rt_is_input_route(rt)) { 2675 #ifdef CONFIG_IP_MROUTE 2676 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && 2677 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { 2678 int err = ipmr_get_route(net, skb, 2679 fl4->saddr, fl4->daddr, 2680 r, portid); 2681 2682 if (err <= 0) { 2683 if (err == 0) 2684 return 0; 2685 goto nla_put_failure; 2686 } 2687 } else 2688 #endif 2689 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif)) 2690 goto nla_put_failure; 2691 } 2692 2693 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) 2694 goto nla_put_failure; 2695 2696 nlmsg_end(skb, nlh); 2697 return 0; 2698 2699 nla_put_failure: 2700 nlmsg_cancel(skb, nlh); 2701 return -EMSGSIZE; 2702 } 2703 2704 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst, 2705 u8 ip_proto, __be16 sport, 2706 __be16 dport) 2707 { 2708 struct sk_buff *skb; 2709 struct iphdr *iph; 2710 2711 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2712 if (!skb) 2713 return NULL; 2714 2715 /* Reserve room for dummy headers, this skb can pass 2716 * through good chunk of routing engine. 2717 */ 2718 skb_reset_mac_header(skb); 2719 skb_reset_network_header(skb); 2720 skb->protocol = htons(ETH_P_IP); 2721 iph = skb_put(skb, sizeof(struct iphdr)); 2722 iph->protocol = ip_proto; 2723 iph->saddr = src; 2724 iph->daddr = dst; 2725 iph->version = 0x4; 2726 iph->frag_off = 0; 2727 iph->ihl = 0x5; 2728 skb_set_transport_header(skb, skb->len); 2729 2730 switch (iph->protocol) { 2731 case IPPROTO_UDP: { 2732 struct udphdr *udph; 2733 2734 udph = skb_put_zero(skb, sizeof(struct udphdr)); 2735 udph->source = sport; 2736 udph->dest = dport; 2737 udph->len = sizeof(struct udphdr); 2738 udph->check = 0; 2739 break; 2740 } 2741 case IPPROTO_TCP: { 2742 struct tcphdr *tcph; 2743 2744 tcph = skb_put_zero(skb, sizeof(struct tcphdr)); 2745 tcph->source = sport; 2746 tcph->dest = dport; 2747 tcph->doff = sizeof(struct tcphdr) / 4; 2748 tcph->rst = 1; 2749 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), 2750 src, dst, 0); 2751 break; 2752 } 2753 case IPPROTO_ICMP: { 2754 struct icmphdr *icmph; 2755 2756 icmph = skb_put_zero(skb, sizeof(struct icmphdr)); 2757 icmph->type = ICMP_ECHO; 2758 icmph->code = 0; 2759 } 2760 } 2761 2762 return skb; 2763 } 2764 2765 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 2766 struct netlink_ext_ack *extack) 2767 { 2768 struct net *net = sock_net(in_skb->sk); 2769 struct nlattr *tb[RTA_MAX+1]; 2770 u32 table_id = RT_TABLE_MAIN; 2771 __be16 sport = 0, dport = 0; 2772 struct fib_result res = {}; 2773 u8 ip_proto = IPPROTO_UDP; 2774 struct rtable *rt = NULL; 2775 struct sk_buff *skb; 2776 struct rtmsg *rtm; 2777 struct flowi4 fl4 = {}; 2778 __be32 dst = 0; 2779 __be32 src = 0; 2780 kuid_t uid; 2781 u32 iif; 2782 int err; 2783 int mark; 2784 2785 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy, 2786 extack); 2787 if (err < 0) 2788 return err; 2789 2790 rtm = nlmsg_data(nlh); 2791 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 2792 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; 2793 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; 2794 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; 2795 if (tb[RTA_UID]) 2796 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); 2797 else 2798 uid = (iif ? INVALID_UID : current_uid()); 2799 2800 if (tb[RTA_IP_PROTO]) { 2801 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], 2802 &ip_proto, extack); 2803 if (err) 2804 return err; 2805 } 2806 2807 if (tb[RTA_SPORT]) 2808 sport = nla_get_be16(tb[RTA_SPORT]); 2809 2810 if (tb[RTA_DPORT]) 2811 dport = nla_get_be16(tb[RTA_DPORT]); 2812 2813 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport); 2814 if (!skb) 2815 return -ENOBUFS; 2816 2817 fl4.daddr = dst; 2818 fl4.saddr = src; 2819 fl4.flowi4_tos = rtm->rtm_tos; 2820 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; 2821 fl4.flowi4_mark = mark; 2822 fl4.flowi4_uid = uid; 2823 if (sport) 2824 fl4.fl4_sport = sport; 2825 if (dport) 2826 fl4.fl4_dport = dport; 2827 fl4.flowi4_proto = ip_proto; 2828 2829 rcu_read_lock(); 2830 2831 if (iif) { 2832 struct net_device *dev; 2833 2834 dev = dev_get_by_index_rcu(net, iif); 2835 if (!dev) { 2836 err = -ENODEV; 2837 goto errout_rcu; 2838 } 2839 2840 fl4.flowi4_iif = iif; /* for rt_fill_info */ 2841 skb->dev = dev; 2842 skb->mark = mark; 2843 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos, 2844 dev, &res); 2845 2846 rt = skb_rtable(skb); 2847 if (err == 0 && rt->dst.error) 2848 err = -rt->dst.error; 2849 } else { 2850 fl4.flowi4_iif = LOOPBACK_IFINDEX; 2851 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); 2852 err = 0; 2853 if (IS_ERR(rt)) 2854 err = PTR_ERR(rt); 2855 else 2856 skb_dst_set(skb, &rt->dst); 2857 } 2858 2859 if (err) 2860 goto errout_rcu; 2861 2862 if (rtm->rtm_flags & RTM_F_NOTIFY) 2863 rt->rt_flags |= RTCF_NOTIFY; 2864 2865 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE) 2866 table_id = res.table ? res.table->tb_id : 0; 2867 2868 /* reset skb for netlink reply msg */ 2869 skb_trim(skb, 0); 2870 skb_reset_network_header(skb); 2871 skb_reset_transport_header(skb); 2872 skb_reset_mac_header(skb); 2873 2874 if (rtm->rtm_flags & RTM_F_FIB_MATCH) { 2875 if (!res.fi) { 2876 err = fib_props[res.type].error; 2877 if (!err) 2878 err = -EHOSTUNREACH; 2879 goto errout_rcu; 2880 } 2881 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid, 2882 nlh->nlmsg_seq, RTM_NEWROUTE, table_id, 2883 rt->rt_type, res.prefix, res.prefixlen, 2884 fl4.flowi4_tos, res.fi, 0); 2885 } else { 2886 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 2887 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); 2888 } 2889 if (err < 0) 2890 goto errout_rcu; 2891 2892 rcu_read_unlock(); 2893 2894 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 2895 2896 errout_free: 2897 return err; 2898 errout_rcu: 2899 rcu_read_unlock(); 2900 kfree_skb(skb); 2901 goto errout_free; 2902 } 2903 2904 void ip_rt_multicast_event(struct in_device *in_dev) 2905 { 2906 rt_cache_flush(dev_net(in_dev->dev)); 2907 } 2908 2909 #ifdef CONFIG_SYSCTL 2910 static int ip_rt_gc_interval __read_mostly = 60 * HZ; 2911 static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 2912 static int ip_rt_gc_elasticity __read_mostly = 8; 2913 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; 2914 2915 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, 2916 void __user *buffer, 2917 size_t *lenp, loff_t *ppos) 2918 { 2919 struct net *net = (struct net *)__ctl->extra1; 2920 2921 if (write) { 2922 rt_cache_flush(net); 2923 fnhe_genid_bump(net); 2924 return 0; 2925 } 2926 2927 return -EINVAL; 2928 } 2929 2930 static struct ctl_table ipv4_route_table[] = { 2931 { 2932 .procname = "gc_thresh", 2933 .data = &ipv4_dst_ops.gc_thresh, 2934 .maxlen = sizeof(int), 2935 .mode = 0644, 2936 .proc_handler = proc_dointvec, 2937 }, 2938 { 2939 .procname = "max_size", 2940 .data = &ip_rt_max_size, 2941 .maxlen = sizeof(int), 2942 .mode = 0644, 2943 .proc_handler = proc_dointvec, 2944 }, 2945 { 2946 /* Deprecated. Use gc_min_interval_ms */ 2947 2948 .procname = "gc_min_interval", 2949 .data = &ip_rt_gc_min_interval, 2950 .maxlen = sizeof(int), 2951 .mode = 0644, 2952 .proc_handler = proc_dointvec_jiffies, 2953 }, 2954 { 2955 .procname = "gc_min_interval_ms", 2956 .data = &ip_rt_gc_min_interval, 2957 .maxlen = sizeof(int), 2958 .mode = 0644, 2959 .proc_handler = proc_dointvec_ms_jiffies, 2960 }, 2961 { 2962 .procname = "gc_timeout", 2963 .data = &ip_rt_gc_timeout, 2964 .maxlen = sizeof(int), 2965 .mode = 0644, 2966 .proc_handler = proc_dointvec_jiffies, 2967 }, 2968 { 2969 .procname = "gc_interval", 2970 .data = &ip_rt_gc_interval, 2971 .maxlen = sizeof(int), 2972 .mode = 0644, 2973 .proc_handler = proc_dointvec_jiffies, 2974 }, 2975 { 2976 .procname = "redirect_load", 2977 .data = &ip_rt_redirect_load, 2978 .maxlen = sizeof(int), 2979 .mode = 0644, 2980 .proc_handler = proc_dointvec, 2981 }, 2982 { 2983 .procname = "redirect_number", 2984 .data = &ip_rt_redirect_number, 2985 .maxlen = sizeof(int), 2986 .mode = 0644, 2987 .proc_handler = proc_dointvec, 2988 }, 2989 { 2990 .procname = "redirect_silence", 2991 .data = &ip_rt_redirect_silence, 2992 .maxlen = sizeof(int), 2993 .mode = 0644, 2994 .proc_handler = proc_dointvec, 2995 }, 2996 { 2997 .procname = "error_cost", 2998 .data = &ip_rt_error_cost, 2999 .maxlen = sizeof(int), 3000 .mode = 0644, 3001 .proc_handler = proc_dointvec, 3002 }, 3003 { 3004 .procname = "error_burst", 3005 .data = &ip_rt_error_burst, 3006 .maxlen = sizeof(int), 3007 .mode = 0644, 3008 .proc_handler = proc_dointvec, 3009 }, 3010 { 3011 .procname = "gc_elasticity", 3012 .data = &ip_rt_gc_elasticity, 3013 .maxlen = sizeof(int), 3014 .mode = 0644, 3015 .proc_handler = proc_dointvec, 3016 }, 3017 { 3018 .procname = "mtu_expires", 3019 .data = &ip_rt_mtu_expires, 3020 .maxlen = sizeof(int), 3021 .mode = 0644, 3022 .proc_handler = proc_dointvec_jiffies, 3023 }, 3024 { 3025 .procname = "min_pmtu", 3026 .data = &ip_rt_min_pmtu, 3027 .maxlen = sizeof(int), 3028 .mode = 0644, 3029 .proc_handler = proc_dointvec_minmax, 3030 .extra1 = &ip_min_valid_pmtu, 3031 }, 3032 { 3033 .procname = "min_adv_mss", 3034 .data = &ip_rt_min_advmss, 3035 .maxlen = sizeof(int), 3036 .mode = 0644, 3037 .proc_handler = proc_dointvec, 3038 }, 3039 { } 3040 }; 3041 3042 static struct ctl_table ipv4_route_flush_table[] = { 3043 { 3044 .procname = "flush", 3045 .maxlen = sizeof(int), 3046 .mode = 0200, 3047 .proc_handler = ipv4_sysctl_rtcache_flush, 3048 }, 3049 { }, 3050 }; 3051 3052 static __net_init int sysctl_route_net_init(struct net *net) 3053 { 3054 struct ctl_table *tbl; 3055 3056 tbl = ipv4_route_flush_table; 3057 if (!net_eq(net, &init_net)) { 3058 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); 3059 if (!tbl) 3060 goto err_dup; 3061 3062 /* Don't export sysctls to unprivileged users */ 3063 if (net->user_ns != &init_user_ns) 3064 tbl[0].procname = NULL; 3065 } 3066 tbl[0].extra1 = net; 3067 3068 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); 3069 if (!net->ipv4.route_hdr) 3070 goto err_reg; 3071 return 0; 3072 3073 err_reg: 3074 if (tbl != ipv4_route_flush_table) 3075 kfree(tbl); 3076 err_dup: 3077 return -ENOMEM; 3078 } 3079 3080 static __net_exit void sysctl_route_net_exit(struct net *net) 3081 { 3082 struct ctl_table *tbl; 3083 3084 tbl = net->ipv4.route_hdr->ctl_table_arg; 3085 unregister_net_sysctl_table(net->ipv4.route_hdr); 3086 BUG_ON(tbl == ipv4_route_flush_table); 3087 kfree(tbl); 3088 } 3089 3090 static __net_initdata struct pernet_operations sysctl_route_ops = { 3091 .init = sysctl_route_net_init, 3092 .exit = sysctl_route_net_exit, 3093 }; 3094 #endif 3095 3096 static __net_init int rt_genid_init(struct net *net) 3097 { 3098 atomic_set(&net->ipv4.rt_genid, 0); 3099 atomic_set(&net->fnhe_genid, 0); 3100 atomic_set(&net->ipv4.dev_addr_genid, get_random_int()); 3101 return 0; 3102 } 3103 3104 static __net_initdata struct pernet_operations rt_genid_ops = { 3105 .init = rt_genid_init, 3106 }; 3107 3108 static int __net_init ipv4_inetpeer_init(struct net *net) 3109 { 3110 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); 3111 3112 if (!bp) 3113 return -ENOMEM; 3114 inet_peer_base_init(bp); 3115 net->ipv4.peers = bp; 3116 return 0; 3117 } 3118 3119 static void __net_exit ipv4_inetpeer_exit(struct net *net) 3120 { 3121 struct inet_peer_base *bp = net->ipv4.peers; 3122 3123 net->ipv4.peers = NULL; 3124 inetpeer_invalidate_tree(bp); 3125 kfree(bp); 3126 } 3127 3128 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { 3129 .init = ipv4_inetpeer_init, 3130 .exit = ipv4_inetpeer_exit, 3131 }; 3132 3133 #ifdef CONFIG_IP_ROUTE_CLASSID 3134 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; 3135 #endif /* CONFIG_IP_ROUTE_CLASSID */ 3136 3137 int __init ip_rt_init(void) 3138 { 3139 int cpu; 3140 3141 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents), 3142 GFP_KERNEL); 3143 if (!ip_idents) 3144 panic("IP: failed to allocate ip_idents\n"); 3145 3146 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); 3147 3148 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL); 3149 if (!ip_tstamps) 3150 panic("IP: failed to allocate ip_tstamps\n"); 3151 3152 for_each_possible_cpu(cpu) { 3153 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 3154 3155 INIT_LIST_HEAD(&ul->head); 3156 spin_lock_init(&ul->lock); 3157 } 3158 #ifdef CONFIG_IP_ROUTE_CLASSID 3159 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 3160 if (!ip_rt_acct) 3161 panic("IP: failed to allocate ip_rt_acct\n"); 3162 #endif 3163 3164 ipv4_dst_ops.kmem_cachep = 3165 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0, 3166 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3167 3168 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; 3169 3170 if (dst_entries_init(&ipv4_dst_ops) < 0) 3171 panic("IP: failed to allocate ipv4_dst_ops counter\n"); 3172 3173 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0) 3174 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n"); 3175 3176 ipv4_dst_ops.gc_thresh = ~0; 3177 ip_rt_max_size = INT_MAX; 3178 3179 devinet_init(); 3180 ip_fib_init(); 3181 3182 if (ip_rt_proc_init()) 3183 pr_err("Unable to create route proc files\n"); 3184 #ifdef CONFIG_XFRM 3185 xfrm_init(); 3186 xfrm4_init(); 3187 #endif 3188 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, 3189 RTNL_FLAG_DOIT_UNLOCKED); 3190 3191 #ifdef CONFIG_SYSCTL 3192 register_pernet_subsys(&sysctl_route_ops); 3193 #endif 3194 register_pernet_subsys(&rt_genid_ops); 3195 register_pernet_subsys(&ipv4_inetpeer_ops); 3196 return 0; 3197 } 3198 3199 #ifdef CONFIG_SYSCTL 3200 /* 3201 * We really need to sanitize the damn ipv4 init order, then all 3202 * this nonsense will go away. 3203 */ 3204 void __init ip_static_sysctl_init(void) 3205 { 3206 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); 3207 } 3208 #endif 3209