xref: /openbmc/linux/net/ipv4/route.c (revision 48d54403)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		ROUTE - implementation of the IP router.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13  *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
14  *
15  * Fixes:
16  *		Alan Cox	:	Verify area fixes.
17  *		Alan Cox	:	cli() protects routing changes
18  *		Rui Oliveira	:	ICMP routing table updates
19  *		(rco@di.uminho.pt)	Routing table insertion and update
20  *		Linus Torvalds	:	Rewrote bits to be sensible
21  *		Alan Cox	:	Added BSD route gw semantics
22  *		Alan Cox	:	Super /proc >4K
23  *		Alan Cox	:	MTU in route table
24  *		Alan Cox	:	MSS actually. Also added the window
25  *					clamper.
26  *		Sam Lantinga	:	Fixed route matching in rt_del()
27  *		Alan Cox	:	Routing cache support.
28  *		Alan Cox	:	Removed compatibility cruft.
29  *		Alan Cox	:	RTF_REJECT support.
30  *		Alan Cox	:	TCP irtt support.
31  *		Jonathan Naylor	:	Added Metric support.
32  *	Miquel van Smoorenburg	:	BSD API fixes.
33  *	Miquel van Smoorenburg	:	Metrics.
34  *		Alan Cox	:	Use __u32 properly
35  *		Alan Cox	:	Aligned routing errors more closely with BSD
36  *					our system is still very different.
37  *		Alan Cox	:	Faster /proc handling
38  *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
39  *					routing caches and better behaviour.
40  *
41  *		Olaf Erb	:	irtt wasn't being copied right.
42  *		Bjorn Ekwall	:	Kerneld route support.
43  *		Alan Cox	:	Multicast fixed (I hope)
44  *		Pavel Krauz	:	Limited broadcast fixed
45  *		Mike McLagan	:	Routing by source
46  *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
47  *					route.c and rewritten from scratch.
48  *		Andi Kleen	:	Load-limit warning messages.
49  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
50  *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
51  *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
52  *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
53  *		Marc Boucher	:	routing by fwmark
54  *	Robert Olsson		:	Added rt_cache statistics
55  *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
56  *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
57  *	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
58  *	Ilia Sotnikov		:	Removed TOS from hash calculations
59  */
60 
61 #define pr_fmt(fmt) "IPv4: " fmt
62 
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
68 #include <linux/mm.h>
69 #include <linux/memblock.h>
70 #include <linux/string.h>
71 #include <linux/socket.h>
72 #include <linux/sockios.h>
73 #include <linux/errno.h>
74 #include <linux/in.h>
75 #include <linux/inet.h>
76 #include <linux/netdevice.h>
77 #include <linux/proc_fs.h>
78 #include <linux/init.h>
79 #include <linux/skbuff.h>
80 #include <linux/inetdevice.h>
81 #include <linux/igmp.h>
82 #include <linux/pkt_sched.h>
83 #include <linux/mroute.h>
84 #include <linux/netfilter_ipv4.h>
85 #include <linux/random.h>
86 #include <linux/rcupdate.h>
87 #include <linux/times.h>
88 #include <linux/slab.h>
89 #include <linux/jhash.h>
90 #include <net/dst.h>
91 #include <net/dst_metadata.h>
92 #include <net/net_namespace.h>
93 #include <net/protocol.h>
94 #include <net/ip.h>
95 #include <net/route.h>
96 #include <net/inetpeer.h>
97 #include <net/sock.h>
98 #include <net/ip_fib.h>
99 #include <net/nexthop.h>
100 #include <net/arp.h>
101 #include <net/tcp.h>
102 #include <net/icmp.h>
103 #include <net/xfrm.h>
104 #include <net/lwtunnel.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
107 #ifdef CONFIG_SYSCTL
108 #include <linux/sysctl.h>
109 #endif
110 #include <net/secure_seq.h>
111 #include <net/ip_tunnels.h>
112 #include <net/l3mdev.h>
113 
114 #include "fib_lookup.h"
115 
116 #define RT_FL_TOS(oldflp4) \
117 	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118 
119 #define RT_GC_TIMEOUT (300*HZ)
120 
121 static int ip_rt_max_size;
122 static int ip_rt_redirect_number __read_mostly	= 9;
123 static int ip_rt_redirect_load __read_mostly	= HZ / 50;
124 static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
125 static int ip_rt_error_cost __read_mostly	= HZ;
126 static int ip_rt_error_burst __read_mostly	= 5 * HZ;
127 static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
128 static u32 ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
129 static int ip_rt_min_advmss __read_mostly	= 256;
130 
131 static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
132 
133 /*
134  *	Interface to generic destination cache.
135  */
136 
137 INDIRECT_CALLABLE_SCOPE
138 struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
139 static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
140 INDIRECT_CALLABLE_SCOPE
141 unsigned int		ipv4_mtu(const struct dst_entry *dst);
142 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143 static void		 ipv4_link_failure(struct sk_buff *skb);
144 static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
145 					   struct sk_buff *skb, u32 mtu,
146 					   bool confirm_neigh);
147 static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
148 					struct sk_buff *skb);
149 static void		ipv4_dst_destroy(struct dst_entry *dst);
150 
151 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
152 {
153 	WARN_ON(1);
154 	return NULL;
155 }
156 
157 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
158 					   struct sk_buff *skb,
159 					   const void *daddr);
160 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
161 
162 static struct dst_ops ipv4_dst_ops = {
163 	.family =		AF_INET,
164 	.check =		ipv4_dst_check,
165 	.default_advmss =	ipv4_default_advmss,
166 	.mtu =			ipv4_mtu,
167 	.cow_metrics =		ipv4_cow_metrics,
168 	.destroy =		ipv4_dst_destroy,
169 	.negative_advice =	ipv4_negative_advice,
170 	.link_failure =		ipv4_link_failure,
171 	.update_pmtu =		ip_rt_update_pmtu,
172 	.redirect =		ip_do_redirect,
173 	.local_out =		__ip_local_out,
174 	.neigh_lookup =		ipv4_neigh_lookup,
175 	.confirm_neigh =	ipv4_confirm_neigh,
176 };
177 
178 #define ECN_OR_COST(class)	TC_PRIO_##class
179 
180 const __u8 ip_tos2prio[16] = {
181 	TC_PRIO_BESTEFFORT,
182 	ECN_OR_COST(BESTEFFORT),
183 	TC_PRIO_BESTEFFORT,
184 	ECN_OR_COST(BESTEFFORT),
185 	TC_PRIO_BULK,
186 	ECN_OR_COST(BULK),
187 	TC_PRIO_BULK,
188 	ECN_OR_COST(BULK),
189 	TC_PRIO_INTERACTIVE,
190 	ECN_OR_COST(INTERACTIVE),
191 	TC_PRIO_INTERACTIVE,
192 	ECN_OR_COST(INTERACTIVE),
193 	TC_PRIO_INTERACTIVE_BULK,
194 	ECN_OR_COST(INTERACTIVE_BULK),
195 	TC_PRIO_INTERACTIVE_BULK,
196 	ECN_OR_COST(INTERACTIVE_BULK)
197 };
198 EXPORT_SYMBOL(ip_tos2prio);
199 
200 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
201 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
202 
203 #ifdef CONFIG_PROC_FS
204 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
205 {
206 	if (*pos)
207 		return NULL;
208 	return SEQ_START_TOKEN;
209 }
210 
211 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
212 {
213 	++*pos;
214 	return NULL;
215 }
216 
217 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
218 {
219 }
220 
221 static int rt_cache_seq_show(struct seq_file *seq, void *v)
222 {
223 	if (v == SEQ_START_TOKEN)
224 		seq_printf(seq, "%-127s\n",
225 			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
226 			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
227 			   "HHUptod\tSpecDst");
228 	return 0;
229 }
230 
231 static const struct seq_operations rt_cache_seq_ops = {
232 	.start  = rt_cache_seq_start,
233 	.next   = rt_cache_seq_next,
234 	.stop   = rt_cache_seq_stop,
235 	.show   = rt_cache_seq_show,
236 };
237 
238 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
239 {
240 	int cpu;
241 
242 	if (*pos == 0)
243 		return SEQ_START_TOKEN;
244 
245 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
246 		if (!cpu_possible(cpu))
247 			continue;
248 		*pos = cpu+1;
249 		return &per_cpu(rt_cache_stat, cpu);
250 	}
251 	return NULL;
252 }
253 
254 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
255 {
256 	int cpu;
257 
258 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
259 		if (!cpu_possible(cpu))
260 			continue;
261 		*pos = cpu+1;
262 		return &per_cpu(rt_cache_stat, cpu);
263 	}
264 	(*pos)++;
265 	return NULL;
266 
267 }
268 
269 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
270 {
271 
272 }
273 
274 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
275 {
276 	struct rt_cache_stat *st = v;
277 
278 	if (v == SEQ_START_TOKEN) {
279 		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
280 		return 0;
281 	}
282 
283 	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
284 		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
285 		   dst_entries_get_slow(&ipv4_dst_ops),
286 		   0, /* st->in_hit */
287 		   st->in_slow_tot,
288 		   st->in_slow_mc,
289 		   st->in_no_route,
290 		   st->in_brd,
291 		   st->in_martian_dst,
292 		   st->in_martian_src,
293 
294 		   0, /* st->out_hit */
295 		   st->out_slow_tot,
296 		   st->out_slow_mc,
297 
298 		   0, /* st->gc_total */
299 		   0, /* st->gc_ignored */
300 		   0, /* st->gc_goal_miss */
301 		   0, /* st->gc_dst_overflow */
302 		   0, /* st->in_hlist_search */
303 		   0  /* st->out_hlist_search */
304 		);
305 	return 0;
306 }
307 
308 static const struct seq_operations rt_cpu_seq_ops = {
309 	.start  = rt_cpu_seq_start,
310 	.next   = rt_cpu_seq_next,
311 	.stop   = rt_cpu_seq_stop,
312 	.show   = rt_cpu_seq_show,
313 };
314 
315 #ifdef CONFIG_IP_ROUTE_CLASSID
316 static int rt_acct_proc_show(struct seq_file *m, void *v)
317 {
318 	struct ip_rt_acct *dst, *src;
319 	unsigned int i, j;
320 
321 	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
322 	if (!dst)
323 		return -ENOMEM;
324 
325 	for_each_possible_cpu(i) {
326 		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
327 		for (j = 0; j < 256; j++) {
328 			dst[j].o_bytes   += src[j].o_bytes;
329 			dst[j].o_packets += src[j].o_packets;
330 			dst[j].i_bytes   += src[j].i_bytes;
331 			dst[j].i_packets += src[j].i_packets;
332 		}
333 	}
334 
335 	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
336 	kfree(dst);
337 	return 0;
338 }
339 #endif
340 
341 static int __net_init ip_rt_do_proc_init(struct net *net)
342 {
343 	struct proc_dir_entry *pde;
344 
345 	pde = proc_create_seq("rt_cache", 0444, net->proc_net,
346 			      &rt_cache_seq_ops);
347 	if (!pde)
348 		goto err1;
349 
350 	pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
351 			      &rt_cpu_seq_ops);
352 	if (!pde)
353 		goto err2;
354 
355 #ifdef CONFIG_IP_ROUTE_CLASSID
356 	pde = proc_create_single("rt_acct", 0, net->proc_net,
357 			rt_acct_proc_show);
358 	if (!pde)
359 		goto err3;
360 #endif
361 	return 0;
362 
363 #ifdef CONFIG_IP_ROUTE_CLASSID
364 err3:
365 	remove_proc_entry("rt_cache", net->proc_net_stat);
366 #endif
367 err2:
368 	remove_proc_entry("rt_cache", net->proc_net);
369 err1:
370 	return -ENOMEM;
371 }
372 
373 static void __net_exit ip_rt_do_proc_exit(struct net *net)
374 {
375 	remove_proc_entry("rt_cache", net->proc_net_stat);
376 	remove_proc_entry("rt_cache", net->proc_net);
377 #ifdef CONFIG_IP_ROUTE_CLASSID
378 	remove_proc_entry("rt_acct", net->proc_net);
379 #endif
380 }
381 
382 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
383 	.init = ip_rt_do_proc_init,
384 	.exit = ip_rt_do_proc_exit,
385 };
386 
387 static int __init ip_rt_proc_init(void)
388 {
389 	return register_pernet_subsys(&ip_rt_proc_ops);
390 }
391 
392 #else
393 static inline int ip_rt_proc_init(void)
394 {
395 	return 0;
396 }
397 #endif /* CONFIG_PROC_FS */
398 
399 static inline bool rt_is_expired(const struct rtable *rth)
400 {
401 	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
402 }
403 
404 void rt_cache_flush(struct net *net)
405 {
406 	rt_genid_bump_ipv4(net);
407 }
408 
409 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
410 					   struct sk_buff *skb,
411 					   const void *daddr)
412 {
413 	const struct rtable *rt = container_of(dst, struct rtable, dst);
414 	struct net_device *dev = dst->dev;
415 	struct neighbour *n;
416 
417 	rcu_read_lock_bh();
418 
419 	if (likely(rt->rt_gw_family == AF_INET)) {
420 		n = ip_neigh_gw4(dev, rt->rt_gw4);
421 	} else if (rt->rt_gw_family == AF_INET6) {
422 		n = ip_neigh_gw6(dev, &rt->rt_gw6);
423         } else {
424 		__be32 pkey;
425 
426 		pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
427 		n = ip_neigh_gw4(dev, pkey);
428 	}
429 
430 	if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
431 		n = NULL;
432 
433 	rcu_read_unlock_bh();
434 
435 	return n;
436 }
437 
438 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
439 {
440 	const struct rtable *rt = container_of(dst, struct rtable, dst);
441 	struct net_device *dev = dst->dev;
442 	const __be32 *pkey = daddr;
443 
444 	if (rt->rt_gw_family == AF_INET) {
445 		pkey = (const __be32 *)&rt->rt_gw4;
446 	} else if (rt->rt_gw_family == AF_INET6) {
447 		return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
448 	} else if (!daddr ||
449 		 (rt->rt_flags &
450 		  (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
451 		return;
452 	}
453 	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
454 }
455 
456 /* Hash tables of size 2048..262144 depending on RAM size.
457  * Each bucket uses 8 bytes.
458  */
459 static u32 ip_idents_mask __read_mostly;
460 static atomic_t *ip_idents __read_mostly;
461 static u32 *ip_tstamps __read_mostly;
462 
463 /* In order to protect privacy, we add a perturbation to identifiers
464  * if one generator is seldom used. This makes hard for an attacker
465  * to infer how many packets were sent between two points in time.
466  */
467 u32 ip_idents_reserve(u32 hash, int segs)
468 {
469 	u32 bucket, old, now = (u32)jiffies;
470 	atomic_t *p_id;
471 	u32 *p_tstamp;
472 	u32 delta = 0;
473 
474 	bucket = hash & ip_idents_mask;
475 	p_tstamp = ip_tstamps + bucket;
476 	p_id = ip_idents + bucket;
477 	old = READ_ONCE(*p_tstamp);
478 
479 	if (old != now && cmpxchg(p_tstamp, old, now) == old)
480 		delta = prandom_u32_max(now - old);
481 
482 	/* If UBSAN reports an error there, please make sure your compiler
483 	 * supports -fno-strict-overflow before reporting it that was a bug
484 	 * in UBSAN, and it has been fixed in GCC-8.
485 	 */
486 	return atomic_add_return(segs + delta, p_id) - segs;
487 }
488 EXPORT_SYMBOL(ip_idents_reserve);
489 
490 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
491 {
492 	u32 hash, id;
493 
494 	/* Note the following code is not safe, but this is okay. */
495 	if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
496 		get_random_bytes(&net->ipv4.ip_id_key,
497 				 sizeof(net->ipv4.ip_id_key));
498 
499 	hash = siphash_3u32((__force u32)iph->daddr,
500 			    (__force u32)iph->saddr,
501 			    iph->protocol,
502 			    &net->ipv4.ip_id_key);
503 	id = ip_idents_reserve(hash, segs);
504 	iph->id = htons(id);
505 }
506 EXPORT_SYMBOL(__ip_select_ident);
507 
508 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
509 			     const struct sock *sk,
510 			     const struct iphdr *iph,
511 			     int oif, u8 tos,
512 			     u8 prot, u32 mark, int flow_flags)
513 {
514 	if (sk) {
515 		const struct inet_sock *inet = inet_sk(sk);
516 
517 		oif = sk->sk_bound_dev_if;
518 		mark = sk->sk_mark;
519 		tos = RT_CONN_FLAGS(sk);
520 		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
521 	}
522 	flowi4_init_output(fl4, oif, mark, tos,
523 			   RT_SCOPE_UNIVERSE, prot,
524 			   flow_flags,
525 			   iph->daddr, iph->saddr, 0, 0,
526 			   sock_net_uid(net, sk));
527 }
528 
529 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
530 			       const struct sock *sk)
531 {
532 	const struct net *net = dev_net(skb->dev);
533 	const struct iphdr *iph = ip_hdr(skb);
534 	int oif = skb->dev->ifindex;
535 	u8 tos = RT_TOS(iph->tos);
536 	u8 prot = iph->protocol;
537 	u32 mark = skb->mark;
538 
539 	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
540 }
541 
542 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
543 {
544 	const struct inet_sock *inet = inet_sk(sk);
545 	const struct ip_options_rcu *inet_opt;
546 	__be32 daddr = inet->inet_daddr;
547 
548 	rcu_read_lock();
549 	inet_opt = rcu_dereference(inet->inet_opt);
550 	if (inet_opt && inet_opt->opt.srr)
551 		daddr = inet_opt->opt.faddr;
552 	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
553 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
554 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
555 			   inet_sk_flowi_flags(sk),
556 			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
557 	rcu_read_unlock();
558 }
559 
560 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
561 				 const struct sk_buff *skb)
562 {
563 	if (skb)
564 		build_skb_flow_key(fl4, skb, sk);
565 	else
566 		build_sk_flow_key(fl4, sk);
567 }
568 
569 static DEFINE_SPINLOCK(fnhe_lock);
570 
571 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
572 {
573 	struct rtable *rt;
574 
575 	rt = rcu_dereference(fnhe->fnhe_rth_input);
576 	if (rt) {
577 		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
578 		dst_dev_put(&rt->dst);
579 		dst_release(&rt->dst);
580 	}
581 	rt = rcu_dereference(fnhe->fnhe_rth_output);
582 	if (rt) {
583 		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
584 		dst_dev_put(&rt->dst);
585 		dst_release(&rt->dst);
586 	}
587 }
588 
589 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
590 {
591 	struct fib_nh_exception *fnhe, *oldest;
592 
593 	oldest = rcu_dereference(hash->chain);
594 	for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
595 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
596 		if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
597 			oldest = fnhe;
598 	}
599 	fnhe_flush_routes(oldest);
600 	return oldest;
601 }
602 
603 static inline u32 fnhe_hashfun(__be32 daddr)
604 {
605 	static u32 fnhe_hashrnd __read_mostly;
606 	u32 hval;
607 
608 	net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
609 	hval = jhash_1word((__force u32)daddr, fnhe_hashrnd);
610 	return hash_32(hval, FNHE_HASH_SHIFT);
611 }
612 
613 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
614 {
615 	rt->rt_pmtu = fnhe->fnhe_pmtu;
616 	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
617 	rt->dst.expires = fnhe->fnhe_expires;
618 
619 	if (fnhe->fnhe_gw) {
620 		rt->rt_flags |= RTCF_REDIRECTED;
621 		rt->rt_uses_gateway = 1;
622 		rt->rt_gw_family = AF_INET;
623 		rt->rt_gw4 = fnhe->fnhe_gw;
624 	}
625 }
626 
627 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
628 				  __be32 gw, u32 pmtu, bool lock,
629 				  unsigned long expires)
630 {
631 	struct fnhe_hash_bucket *hash;
632 	struct fib_nh_exception *fnhe;
633 	struct rtable *rt;
634 	u32 genid, hval;
635 	unsigned int i;
636 	int depth;
637 
638 	genid = fnhe_genid(dev_net(nhc->nhc_dev));
639 	hval = fnhe_hashfun(daddr);
640 
641 	spin_lock_bh(&fnhe_lock);
642 
643 	hash = rcu_dereference(nhc->nhc_exceptions);
644 	if (!hash) {
645 		hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
646 		if (!hash)
647 			goto out_unlock;
648 		rcu_assign_pointer(nhc->nhc_exceptions, hash);
649 	}
650 
651 	hash += hval;
652 
653 	depth = 0;
654 	for (fnhe = rcu_dereference(hash->chain); fnhe;
655 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
656 		if (fnhe->fnhe_daddr == daddr)
657 			break;
658 		depth++;
659 	}
660 
661 	if (fnhe) {
662 		if (fnhe->fnhe_genid != genid)
663 			fnhe->fnhe_genid = genid;
664 		if (gw)
665 			fnhe->fnhe_gw = gw;
666 		if (pmtu) {
667 			fnhe->fnhe_pmtu = pmtu;
668 			fnhe->fnhe_mtu_locked = lock;
669 		}
670 		fnhe->fnhe_expires = max(1UL, expires);
671 		/* Update all cached dsts too */
672 		rt = rcu_dereference(fnhe->fnhe_rth_input);
673 		if (rt)
674 			fill_route_from_fnhe(rt, fnhe);
675 		rt = rcu_dereference(fnhe->fnhe_rth_output);
676 		if (rt)
677 			fill_route_from_fnhe(rt, fnhe);
678 	} else {
679 		if (depth > FNHE_RECLAIM_DEPTH)
680 			fnhe = fnhe_oldest(hash);
681 		else {
682 			fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
683 			if (!fnhe)
684 				goto out_unlock;
685 
686 			fnhe->fnhe_next = hash->chain;
687 			rcu_assign_pointer(hash->chain, fnhe);
688 		}
689 		fnhe->fnhe_genid = genid;
690 		fnhe->fnhe_daddr = daddr;
691 		fnhe->fnhe_gw = gw;
692 		fnhe->fnhe_pmtu = pmtu;
693 		fnhe->fnhe_mtu_locked = lock;
694 		fnhe->fnhe_expires = max(1UL, expires);
695 
696 		/* Exception created; mark the cached routes for the nexthop
697 		 * stale, so anyone caching it rechecks if this exception
698 		 * applies to them.
699 		 */
700 		rt = rcu_dereference(nhc->nhc_rth_input);
701 		if (rt)
702 			rt->dst.obsolete = DST_OBSOLETE_KILL;
703 
704 		for_each_possible_cpu(i) {
705 			struct rtable __rcu **prt;
706 
707 			prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
708 			rt = rcu_dereference(*prt);
709 			if (rt)
710 				rt->dst.obsolete = DST_OBSOLETE_KILL;
711 		}
712 	}
713 
714 	fnhe->fnhe_stamp = jiffies;
715 
716 out_unlock:
717 	spin_unlock_bh(&fnhe_lock);
718 }
719 
720 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
721 			     bool kill_route)
722 {
723 	__be32 new_gw = icmp_hdr(skb)->un.gateway;
724 	__be32 old_gw = ip_hdr(skb)->saddr;
725 	struct net_device *dev = skb->dev;
726 	struct in_device *in_dev;
727 	struct fib_result res;
728 	struct neighbour *n;
729 	struct net *net;
730 
731 	switch (icmp_hdr(skb)->code & 7) {
732 	case ICMP_REDIR_NET:
733 	case ICMP_REDIR_NETTOS:
734 	case ICMP_REDIR_HOST:
735 	case ICMP_REDIR_HOSTTOS:
736 		break;
737 
738 	default:
739 		return;
740 	}
741 
742 	if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
743 		return;
744 
745 	in_dev = __in_dev_get_rcu(dev);
746 	if (!in_dev)
747 		return;
748 
749 	net = dev_net(dev);
750 	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
751 	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
752 	    ipv4_is_zeronet(new_gw))
753 		goto reject_redirect;
754 
755 	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
756 		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
757 			goto reject_redirect;
758 		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
759 			goto reject_redirect;
760 	} else {
761 		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
762 			goto reject_redirect;
763 	}
764 
765 	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
766 	if (!n)
767 		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
768 	if (!IS_ERR(n)) {
769 		if (!(n->nud_state & NUD_VALID)) {
770 			neigh_event_send(n, NULL);
771 		} else {
772 			if (fib_lookup(net, fl4, &res, 0) == 0) {
773 				struct fib_nh_common *nhc;
774 
775 				fib_select_path(net, &res, fl4, skb);
776 				nhc = FIB_RES_NHC(res);
777 				update_or_create_fnhe(nhc, fl4->daddr, new_gw,
778 						0, false,
779 						jiffies + ip_rt_gc_timeout);
780 			}
781 			if (kill_route)
782 				rt->dst.obsolete = DST_OBSOLETE_KILL;
783 			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
784 		}
785 		neigh_release(n);
786 	}
787 	return;
788 
789 reject_redirect:
790 #ifdef CONFIG_IP_ROUTE_VERBOSE
791 	if (IN_DEV_LOG_MARTIANS(in_dev)) {
792 		const struct iphdr *iph = (const struct iphdr *) skb->data;
793 		__be32 daddr = iph->daddr;
794 		__be32 saddr = iph->saddr;
795 
796 		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
797 				     "  Advised path = %pI4 -> %pI4\n",
798 				     &old_gw, dev->name, &new_gw,
799 				     &saddr, &daddr);
800 	}
801 #endif
802 	;
803 }
804 
805 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
806 {
807 	struct rtable *rt;
808 	struct flowi4 fl4;
809 	const struct iphdr *iph = (const struct iphdr *) skb->data;
810 	struct net *net = dev_net(skb->dev);
811 	int oif = skb->dev->ifindex;
812 	u8 tos = RT_TOS(iph->tos);
813 	u8 prot = iph->protocol;
814 	u32 mark = skb->mark;
815 
816 	rt = (struct rtable *) dst;
817 
818 	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
819 	__ip_do_redirect(rt, skb, &fl4, true);
820 }
821 
822 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
823 {
824 	struct rtable *rt = (struct rtable *)dst;
825 	struct dst_entry *ret = dst;
826 
827 	if (rt) {
828 		if (dst->obsolete > 0) {
829 			ip_rt_put(rt);
830 			ret = NULL;
831 		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
832 			   rt->dst.expires) {
833 			ip_rt_put(rt);
834 			ret = NULL;
835 		}
836 	}
837 	return ret;
838 }
839 
840 /*
841  * Algorithm:
842  *	1. The first ip_rt_redirect_number redirects are sent
843  *	   with exponential backoff, then we stop sending them at all,
844  *	   assuming that the host ignores our redirects.
845  *	2. If we did not see packets requiring redirects
846  *	   during ip_rt_redirect_silence, we assume that the host
847  *	   forgot redirected route and start to send redirects again.
848  *
849  * This algorithm is much cheaper and more intelligent than dumb load limiting
850  * in icmp.c.
851  *
852  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
853  * and "frag. need" (breaks PMTU discovery) in icmp.c.
854  */
855 
856 void ip_rt_send_redirect(struct sk_buff *skb)
857 {
858 	struct rtable *rt = skb_rtable(skb);
859 	struct in_device *in_dev;
860 	struct inet_peer *peer;
861 	struct net *net;
862 	int log_martians;
863 	int vif;
864 
865 	rcu_read_lock();
866 	in_dev = __in_dev_get_rcu(rt->dst.dev);
867 	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
868 		rcu_read_unlock();
869 		return;
870 	}
871 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
872 	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
873 	rcu_read_unlock();
874 
875 	net = dev_net(rt->dst.dev);
876 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
877 	if (!peer) {
878 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
879 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
880 		return;
881 	}
882 
883 	/* No redirected packets during ip_rt_redirect_silence;
884 	 * reset the algorithm.
885 	 */
886 	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
887 		peer->rate_tokens = 0;
888 		peer->n_redirects = 0;
889 	}
890 
891 	/* Too many ignored redirects; do not send anything
892 	 * set dst.rate_last to the last seen redirected packet.
893 	 */
894 	if (peer->n_redirects >= ip_rt_redirect_number) {
895 		peer->rate_last = jiffies;
896 		goto out_put_peer;
897 	}
898 
899 	/* Check for load limit; set rate_last to the latest sent
900 	 * redirect.
901 	 */
902 	if (peer->n_redirects == 0 ||
903 	    time_after(jiffies,
904 		       (peer->rate_last +
905 			(ip_rt_redirect_load << peer->n_redirects)))) {
906 		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
907 
908 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
909 		peer->rate_last = jiffies;
910 		++peer->n_redirects;
911 #ifdef CONFIG_IP_ROUTE_VERBOSE
912 		if (log_martians &&
913 		    peer->n_redirects == ip_rt_redirect_number)
914 			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
915 					     &ip_hdr(skb)->saddr, inet_iif(skb),
916 					     &ip_hdr(skb)->daddr, &gw);
917 #endif
918 	}
919 out_put_peer:
920 	inet_putpeer(peer);
921 }
922 
923 static int ip_error(struct sk_buff *skb)
924 {
925 	struct rtable *rt = skb_rtable(skb);
926 	struct net_device *dev = skb->dev;
927 	struct in_device *in_dev;
928 	struct inet_peer *peer;
929 	unsigned long now;
930 	struct net *net;
931 	bool send;
932 	int code;
933 
934 	if (netif_is_l3_master(skb->dev)) {
935 		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
936 		if (!dev)
937 			goto out;
938 	}
939 
940 	in_dev = __in_dev_get_rcu(dev);
941 
942 	/* IP on this device is disabled. */
943 	if (!in_dev)
944 		goto out;
945 
946 	net = dev_net(rt->dst.dev);
947 	if (!IN_DEV_FORWARD(in_dev)) {
948 		switch (rt->dst.error) {
949 		case EHOSTUNREACH:
950 			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
951 			break;
952 
953 		case ENETUNREACH:
954 			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
955 			break;
956 		}
957 		goto out;
958 	}
959 
960 	switch (rt->dst.error) {
961 	case EINVAL:
962 	default:
963 		goto out;
964 	case EHOSTUNREACH:
965 		code = ICMP_HOST_UNREACH;
966 		break;
967 	case ENETUNREACH:
968 		code = ICMP_NET_UNREACH;
969 		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
970 		break;
971 	case EACCES:
972 		code = ICMP_PKT_FILTERED;
973 		break;
974 	}
975 
976 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
977 			       l3mdev_master_ifindex(skb->dev), 1);
978 
979 	send = true;
980 	if (peer) {
981 		now = jiffies;
982 		peer->rate_tokens += now - peer->rate_last;
983 		if (peer->rate_tokens > ip_rt_error_burst)
984 			peer->rate_tokens = ip_rt_error_burst;
985 		peer->rate_last = now;
986 		if (peer->rate_tokens >= ip_rt_error_cost)
987 			peer->rate_tokens -= ip_rt_error_cost;
988 		else
989 			send = false;
990 		inet_putpeer(peer);
991 	}
992 	if (send)
993 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
994 
995 out:	kfree_skb(skb);
996 	return 0;
997 }
998 
999 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1000 {
1001 	struct dst_entry *dst = &rt->dst;
1002 	struct net *net = dev_net(dst->dev);
1003 	struct fib_result res;
1004 	bool lock = false;
1005 	u32 old_mtu;
1006 
1007 	if (ip_mtu_locked(dst))
1008 		return;
1009 
1010 	old_mtu = ipv4_mtu(dst);
1011 	if (old_mtu < mtu)
1012 		return;
1013 
1014 	if (mtu < ip_rt_min_pmtu) {
1015 		lock = true;
1016 		mtu = min(old_mtu, ip_rt_min_pmtu);
1017 	}
1018 
1019 	if (rt->rt_pmtu == mtu && !lock &&
1020 	    time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1021 		return;
1022 
1023 	rcu_read_lock();
1024 	if (fib_lookup(net, fl4, &res, 0) == 0) {
1025 		struct fib_nh_common *nhc;
1026 
1027 		fib_select_path(net, &res, fl4, NULL);
1028 		nhc = FIB_RES_NHC(res);
1029 		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1030 				      jiffies + ip_rt_mtu_expires);
1031 	}
1032 	rcu_read_unlock();
1033 }
1034 
1035 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1036 			      struct sk_buff *skb, u32 mtu,
1037 			      bool confirm_neigh)
1038 {
1039 	struct rtable *rt = (struct rtable *) dst;
1040 	struct flowi4 fl4;
1041 
1042 	ip_rt_build_flow_key(&fl4, sk, skb);
1043 
1044 	/* Don't make lookup fail for bridged encapsulations */
1045 	if (skb && netif_is_any_bridge_port(skb->dev))
1046 		fl4.flowi4_oif = 0;
1047 
1048 	__ip_rt_update_pmtu(rt, &fl4, mtu);
1049 }
1050 
1051 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1052 		      int oif, u8 protocol)
1053 {
1054 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1055 	struct flowi4 fl4;
1056 	struct rtable *rt;
1057 	u32 mark = IP4_REPLY_MARK(net, skb->mark);
1058 
1059 	__build_flow_key(net, &fl4, NULL, iph, oif,
1060 			 RT_TOS(iph->tos), protocol, mark, 0);
1061 	rt = __ip_route_output_key(net, &fl4);
1062 	if (!IS_ERR(rt)) {
1063 		__ip_rt_update_pmtu(rt, &fl4, mtu);
1064 		ip_rt_put(rt);
1065 	}
1066 }
1067 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1068 
1069 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1070 {
1071 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1072 	struct flowi4 fl4;
1073 	struct rtable *rt;
1074 
1075 	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1076 
1077 	if (!fl4.flowi4_mark)
1078 		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1079 
1080 	rt = __ip_route_output_key(sock_net(sk), &fl4);
1081 	if (!IS_ERR(rt)) {
1082 		__ip_rt_update_pmtu(rt, &fl4, mtu);
1083 		ip_rt_put(rt);
1084 	}
1085 }
1086 
1087 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1088 {
1089 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1090 	struct flowi4 fl4;
1091 	struct rtable *rt;
1092 	struct dst_entry *odst = NULL;
1093 	bool new = false;
1094 	struct net *net = sock_net(sk);
1095 
1096 	bh_lock_sock(sk);
1097 
1098 	if (!ip_sk_accept_pmtu(sk))
1099 		goto out;
1100 
1101 	odst = sk_dst_get(sk);
1102 
1103 	if (sock_owned_by_user(sk) || !odst) {
1104 		__ipv4_sk_update_pmtu(skb, sk, mtu);
1105 		goto out;
1106 	}
1107 
1108 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1109 
1110 	rt = (struct rtable *)odst;
1111 	if (odst->obsolete && !odst->ops->check(odst, 0)) {
1112 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1113 		if (IS_ERR(rt))
1114 			goto out;
1115 
1116 		new = true;
1117 	}
1118 
1119 	__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
1120 
1121 	if (!dst_check(&rt->dst, 0)) {
1122 		if (new)
1123 			dst_release(&rt->dst);
1124 
1125 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1126 		if (IS_ERR(rt))
1127 			goto out;
1128 
1129 		new = true;
1130 	}
1131 
1132 	if (new)
1133 		sk_dst_set(sk, &rt->dst);
1134 
1135 out:
1136 	bh_unlock_sock(sk);
1137 	dst_release(odst);
1138 }
1139 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1140 
1141 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1142 		   int oif, u8 protocol)
1143 {
1144 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1145 	struct flowi4 fl4;
1146 	struct rtable *rt;
1147 
1148 	__build_flow_key(net, &fl4, NULL, iph, oif,
1149 			 RT_TOS(iph->tos), protocol, 0, 0);
1150 	rt = __ip_route_output_key(net, &fl4);
1151 	if (!IS_ERR(rt)) {
1152 		__ip_do_redirect(rt, skb, &fl4, false);
1153 		ip_rt_put(rt);
1154 	}
1155 }
1156 EXPORT_SYMBOL_GPL(ipv4_redirect);
1157 
1158 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1159 {
1160 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1161 	struct flowi4 fl4;
1162 	struct rtable *rt;
1163 	struct net *net = sock_net(sk);
1164 
1165 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1166 	rt = __ip_route_output_key(net, &fl4);
1167 	if (!IS_ERR(rt)) {
1168 		__ip_do_redirect(rt, skb, &fl4, false);
1169 		ip_rt_put(rt);
1170 	}
1171 }
1172 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1173 
1174 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1175 							 u32 cookie)
1176 {
1177 	struct rtable *rt = (struct rtable *) dst;
1178 
1179 	/* All IPV4 dsts are created with ->obsolete set to the value
1180 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1181 	 * into this function always.
1182 	 *
1183 	 * When a PMTU/redirect information update invalidates a route,
1184 	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1185 	 * DST_OBSOLETE_DEAD.
1186 	 */
1187 	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1188 		return NULL;
1189 	return dst;
1190 }
1191 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1192 
1193 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1194 {
1195 	struct ip_options opt;
1196 	int res;
1197 
1198 	/* Recompile ip options since IPCB may not be valid anymore.
1199 	 * Also check we have a reasonable ipv4 header.
1200 	 */
1201 	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1202 	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1203 		return;
1204 
1205 	memset(&opt, 0, sizeof(opt));
1206 	if (ip_hdr(skb)->ihl > 5) {
1207 		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1208 			return;
1209 		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1210 
1211 		rcu_read_lock();
1212 		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1213 		rcu_read_unlock();
1214 
1215 		if (res)
1216 			return;
1217 	}
1218 	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1219 }
1220 
1221 static void ipv4_link_failure(struct sk_buff *skb)
1222 {
1223 	struct rtable *rt;
1224 
1225 	ipv4_send_dest_unreach(skb);
1226 
1227 	rt = skb_rtable(skb);
1228 	if (rt)
1229 		dst_set_expires(&rt->dst, 0);
1230 }
1231 
1232 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1233 {
1234 	pr_debug("%s: %pI4 -> %pI4, %s\n",
1235 		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1236 		 skb->dev ? skb->dev->name : "?");
1237 	kfree_skb(skb);
1238 	WARN_ON(1);
1239 	return 0;
1240 }
1241 
1242 /*
1243  * We do not cache source address of outgoing interface,
1244  * because it is used only by IP RR, TS and SRR options,
1245  * so that it out of fast path.
1246  *
1247  * BTW remember: "addr" is allowed to be not aligned
1248  * in IP options!
1249  */
1250 
1251 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1252 {
1253 	__be32 src;
1254 
1255 	if (rt_is_output_route(rt))
1256 		src = ip_hdr(skb)->saddr;
1257 	else {
1258 		struct fib_result res;
1259 		struct iphdr *iph = ip_hdr(skb);
1260 		struct flowi4 fl4 = {
1261 			.daddr = iph->daddr,
1262 			.saddr = iph->saddr,
1263 			.flowi4_tos = RT_TOS(iph->tos),
1264 			.flowi4_oif = rt->dst.dev->ifindex,
1265 			.flowi4_iif = skb->dev->ifindex,
1266 			.flowi4_mark = skb->mark,
1267 		};
1268 
1269 		rcu_read_lock();
1270 		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1271 			src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1272 		else
1273 			src = inet_select_addr(rt->dst.dev,
1274 					       rt_nexthop(rt, iph->daddr),
1275 					       RT_SCOPE_UNIVERSE);
1276 		rcu_read_unlock();
1277 	}
1278 	memcpy(addr, &src, 4);
1279 }
1280 
1281 #ifdef CONFIG_IP_ROUTE_CLASSID
1282 static void set_class_tag(struct rtable *rt, u32 tag)
1283 {
1284 	if (!(rt->dst.tclassid & 0xFFFF))
1285 		rt->dst.tclassid |= tag & 0xFFFF;
1286 	if (!(rt->dst.tclassid & 0xFFFF0000))
1287 		rt->dst.tclassid |= tag & 0xFFFF0000;
1288 }
1289 #endif
1290 
1291 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1292 {
1293 	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1294 	unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1295 				    ip_rt_min_advmss);
1296 
1297 	return min(advmss, IPV4_MAX_PMTU - header_size);
1298 }
1299 
1300 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1301 {
1302 	return ip_dst_mtu_maybe_forward(dst, false);
1303 }
1304 EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1305 
1306 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1307 {
1308 	struct fnhe_hash_bucket *hash;
1309 	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1310 	u32 hval = fnhe_hashfun(daddr);
1311 
1312 	spin_lock_bh(&fnhe_lock);
1313 
1314 	hash = rcu_dereference_protected(nhc->nhc_exceptions,
1315 					 lockdep_is_held(&fnhe_lock));
1316 	hash += hval;
1317 
1318 	fnhe_p = &hash->chain;
1319 	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1320 	while (fnhe) {
1321 		if (fnhe->fnhe_daddr == daddr) {
1322 			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1323 				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1324 			/* set fnhe_daddr to 0 to ensure it won't bind with
1325 			 * new dsts in rt_bind_exception().
1326 			 */
1327 			fnhe->fnhe_daddr = 0;
1328 			fnhe_flush_routes(fnhe);
1329 			kfree_rcu(fnhe, rcu);
1330 			break;
1331 		}
1332 		fnhe_p = &fnhe->fnhe_next;
1333 		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1334 						 lockdep_is_held(&fnhe_lock));
1335 	}
1336 
1337 	spin_unlock_bh(&fnhe_lock);
1338 }
1339 
1340 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1341 					       __be32 daddr)
1342 {
1343 	struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1344 	struct fib_nh_exception *fnhe;
1345 	u32 hval;
1346 
1347 	if (!hash)
1348 		return NULL;
1349 
1350 	hval = fnhe_hashfun(daddr);
1351 
1352 	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1353 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1354 		if (fnhe->fnhe_daddr == daddr) {
1355 			if (fnhe->fnhe_expires &&
1356 			    time_after(jiffies, fnhe->fnhe_expires)) {
1357 				ip_del_fnhe(nhc, daddr);
1358 				break;
1359 			}
1360 			return fnhe;
1361 		}
1362 	}
1363 	return NULL;
1364 }
1365 
1366 /* MTU selection:
1367  * 1. mtu on route is locked - use it
1368  * 2. mtu from nexthop exception
1369  * 3. mtu from egress device
1370  */
1371 
1372 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1373 {
1374 	struct fib_nh_common *nhc = res->nhc;
1375 	struct net_device *dev = nhc->nhc_dev;
1376 	struct fib_info *fi = res->fi;
1377 	u32 mtu = 0;
1378 
1379 	if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1380 	    fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1381 		mtu = fi->fib_mtu;
1382 
1383 	if (likely(!mtu)) {
1384 		struct fib_nh_exception *fnhe;
1385 
1386 		fnhe = find_exception(nhc, daddr);
1387 		if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1388 			mtu = fnhe->fnhe_pmtu;
1389 	}
1390 
1391 	if (likely(!mtu))
1392 		mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1393 
1394 	return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1395 }
1396 
1397 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1398 			      __be32 daddr, const bool do_cache)
1399 {
1400 	bool ret = false;
1401 
1402 	spin_lock_bh(&fnhe_lock);
1403 
1404 	if (daddr == fnhe->fnhe_daddr) {
1405 		struct rtable __rcu **porig;
1406 		struct rtable *orig;
1407 		int genid = fnhe_genid(dev_net(rt->dst.dev));
1408 
1409 		if (rt_is_input_route(rt))
1410 			porig = &fnhe->fnhe_rth_input;
1411 		else
1412 			porig = &fnhe->fnhe_rth_output;
1413 		orig = rcu_dereference(*porig);
1414 
1415 		if (fnhe->fnhe_genid != genid) {
1416 			fnhe->fnhe_genid = genid;
1417 			fnhe->fnhe_gw = 0;
1418 			fnhe->fnhe_pmtu = 0;
1419 			fnhe->fnhe_expires = 0;
1420 			fnhe->fnhe_mtu_locked = false;
1421 			fnhe_flush_routes(fnhe);
1422 			orig = NULL;
1423 		}
1424 		fill_route_from_fnhe(rt, fnhe);
1425 		if (!rt->rt_gw4) {
1426 			rt->rt_gw4 = daddr;
1427 			rt->rt_gw_family = AF_INET;
1428 		}
1429 
1430 		if (do_cache) {
1431 			dst_hold(&rt->dst);
1432 			rcu_assign_pointer(*porig, rt);
1433 			if (orig) {
1434 				dst_dev_put(&orig->dst);
1435 				dst_release(&orig->dst);
1436 			}
1437 			ret = true;
1438 		}
1439 
1440 		fnhe->fnhe_stamp = jiffies;
1441 	}
1442 	spin_unlock_bh(&fnhe_lock);
1443 
1444 	return ret;
1445 }
1446 
1447 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1448 {
1449 	struct rtable *orig, *prev, **p;
1450 	bool ret = true;
1451 
1452 	if (rt_is_input_route(rt)) {
1453 		p = (struct rtable **)&nhc->nhc_rth_input;
1454 	} else {
1455 		p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1456 	}
1457 	orig = *p;
1458 
1459 	/* hold dst before doing cmpxchg() to avoid race condition
1460 	 * on this dst
1461 	 */
1462 	dst_hold(&rt->dst);
1463 	prev = cmpxchg(p, orig, rt);
1464 	if (prev == orig) {
1465 		if (orig) {
1466 			rt_add_uncached_list(orig);
1467 			dst_release(&orig->dst);
1468 		}
1469 	} else {
1470 		dst_release(&rt->dst);
1471 		ret = false;
1472 	}
1473 
1474 	return ret;
1475 }
1476 
1477 struct uncached_list {
1478 	spinlock_t		lock;
1479 	struct list_head	head;
1480 };
1481 
1482 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1483 
1484 void rt_add_uncached_list(struct rtable *rt)
1485 {
1486 	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1487 
1488 	rt->rt_uncached_list = ul;
1489 
1490 	spin_lock_bh(&ul->lock);
1491 	list_add_tail(&rt->rt_uncached, &ul->head);
1492 	spin_unlock_bh(&ul->lock);
1493 }
1494 
1495 void rt_del_uncached_list(struct rtable *rt)
1496 {
1497 	if (!list_empty(&rt->rt_uncached)) {
1498 		struct uncached_list *ul = rt->rt_uncached_list;
1499 
1500 		spin_lock_bh(&ul->lock);
1501 		list_del(&rt->rt_uncached);
1502 		spin_unlock_bh(&ul->lock);
1503 	}
1504 }
1505 
1506 static void ipv4_dst_destroy(struct dst_entry *dst)
1507 {
1508 	struct rtable *rt = (struct rtable *)dst;
1509 
1510 	ip_dst_metrics_put(dst);
1511 	rt_del_uncached_list(rt);
1512 }
1513 
1514 void rt_flush_dev(struct net_device *dev)
1515 {
1516 	struct rtable *rt;
1517 	int cpu;
1518 
1519 	for_each_possible_cpu(cpu) {
1520 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1521 
1522 		spin_lock_bh(&ul->lock);
1523 		list_for_each_entry(rt, &ul->head, rt_uncached) {
1524 			if (rt->dst.dev != dev)
1525 				continue;
1526 			rt->dst.dev = blackhole_netdev;
1527 			dev_hold(rt->dst.dev);
1528 			dev_put(dev);
1529 		}
1530 		spin_unlock_bh(&ul->lock);
1531 	}
1532 }
1533 
1534 static bool rt_cache_valid(const struct rtable *rt)
1535 {
1536 	return	rt &&
1537 		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1538 		!rt_is_expired(rt);
1539 }
1540 
1541 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1542 			   const struct fib_result *res,
1543 			   struct fib_nh_exception *fnhe,
1544 			   struct fib_info *fi, u16 type, u32 itag,
1545 			   const bool do_cache)
1546 {
1547 	bool cached = false;
1548 
1549 	if (fi) {
1550 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1551 
1552 		if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1553 			rt->rt_uses_gateway = 1;
1554 			rt->rt_gw_family = nhc->nhc_gw_family;
1555 			/* only INET and INET6 are supported */
1556 			if (likely(nhc->nhc_gw_family == AF_INET))
1557 				rt->rt_gw4 = nhc->nhc_gw.ipv4;
1558 			else
1559 				rt->rt_gw6 = nhc->nhc_gw.ipv6;
1560 		}
1561 
1562 		ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1563 
1564 #ifdef CONFIG_IP_ROUTE_CLASSID
1565 		if (nhc->nhc_family == AF_INET) {
1566 			struct fib_nh *nh;
1567 
1568 			nh = container_of(nhc, struct fib_nh, nh_common);
1569 			rt->dst.tclassid = nh->nh_tclassid;
1570 		}
1571 #endif
1572 		rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1573 		if (unlikely(fnhe))
1574 			cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1575 		else if (do_cache)
1576 			cached = rt_cache_route(nhc, rt);
1577 		if (unlikely(!cached)) {
1578 			/* Routes we intend to cache in nexthop exception or
1579 			 * FIB nexthop have the DST_NOCACHE bit clear.
1580 			 * However, if we are unsuccessful at storing this
1581 			 * route into the cache we really need to set it.
1582 			 */
1583 			if (!rt->rt_gw4) {
1584 				rt->rt_gw_family = AF_INET;
1585 				rt->rt_gw4 = daddr;
1586 			}
1587 			rt_add_uncached_list(rt);
1588 		}
1589 	} else
1590 		rt_add_uncached_list(rt);
1591 
1592 #ifdef CONFIG_IP_ROUTE_CLASSID
1593 #ifdef CONFIG_IP_MULTIPLE_TABLES
1594 	set_class_tag(rt, res->tclassid);
1595 #endif
1596 	set_class_tag(rt, itag);
1597 #endif
1598 }
1599 
1600 struct rtable *rt_dst_alloc(struct net_device *dev,
1601 			    unsigned int flags, u16 type,
1602 			    bool nopolicy, bool noxfrm)
1603 {
1604 	struct rtable *rt;
1605 
1606 	rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1607 		       (nopolicy ? DST_NOPOLICY : 0) |
1608 		       (noxfrm ? DST_NOXFRM : 0));
1609 
1610 	if (rt) {
1611 		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1612 		rt->rt_flags = flags;
1613 		rt->rt_type = type;
1614 		rt->rt_is_input = 0;
1615 		rt->rt_iif = 0;
1616 		rt->rt_pmtu = 0;
1617 		rt->rt_mtu_locked = 0;
1618 		rt->rt_uses_gateway = 0;
1619 		rt->rt_gw_family = 0;
1620 		rt->rt_gw4 = 0;
1621 		INIT_LIST_HEAD(&rt->rt_uncached);
1622 
1623 		rt->dst.output = ip_output;
1624 		if (flags & RTCF_LOCAL)
1625 			rt->dst.input = ip_local_deliver;
1626 	}
1627 
1628 	return rt;
1629 }
1630 EXPORT_SYMBOL(rt_dst_alloc);
1631 
1632 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1633 {
1634 	struct rtable *new_rt;
1635 
1636 	new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1637 			   rt->dst.flags);
1638 
1639 	if (new_rt) {
1640 		new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1641 		new_rt->rt_flags = rt->rt_flags;
1642 		new_rt->rt_type = rt->rt_type;
1643 		new_rt->rt_is_input = rt->rt_is_input;
1644 		new_rt->rt_iif = rt->rt_iif;
1645 		new_rt->rt_pmtu = rt->rt_pmtu;
1646 		new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1647 		new_rt->rt_gw_family = rt->rt_gw_family;
1648 		if (rt->rt_gw_family == AF_INET)
1649 			new_rt->rt_gw4 = rt->rt_gw4;
1650 		else if (rt->rt_gw_family == AF_INET6)
1651 			new_rt->rt_gw6 = rt->rt_gw6;
1652 		INIT_LIST_HEAD(&new_rt->rt_uncached);
1653 
1654 		new_rt->dst.input = rt->dst.input;
1655 		new_rt->dst.output = rt->dst.output;
1656 		new_rt->dst.error = rt->dst.error;
1657 		new_rt->dst.lastuse = jiffies;
1658 		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1659 	}
1660 	return new_rt;
1661 }
1662 EXPORT_SYMBOL(rt_dst_clone);
1663 
1664 /* called in rcu_read_lock() section */
1665 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1666 			  u8 tos, struct net_device *dev,
1667 			  struct in_device *in_dev, u32 *itag)
1668 {
1669 	int err;
1670 
1671 	/* Primary sanity checks. */
1672 	if (!in_dev)
1673 		return -EINVAL;
1674 
1675 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1676 	    skb->protocol != htons(ETH_P_IP))
1677 		return -EINVAL;
1678 
1679 	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1680 		return -EINVAL;
1681 
1682 	if (ipv4_is_zeronet(saddr)) {
1683 		if (!ipv4_is_local_multicast(daddr) &&
1684 		    ip_hdr(skb)->protocol != IPPROTO_IGMP)
1685 			return -EINVAL;
1686 	} else {
1687 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1688 					  in_dev, itag);
1689 		if (err < 0)
1690 			return err;
1691 	}
1692 	return 0;
1693 }
1694 
1695 /* called in rcu_read_lock() section */
1696 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1697 			     u8 tos, struct net_device *dev, int our)
1698 {
1699 	struct in_device *in_dev = __in_dev_get_rcu(dev);
1700 	unsigned int flags = RTCF_MULTICAST;
1701 	struct rtable *rth;
1702 	u32 itag = 0;
1703 	int err;
1704 
1705 	err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1706 	if (err)
1707 		return err;
1708 
1709 	if (our)
1710 		flags |= RTCF_LOCAL;
1711 
1712 	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1713 			   IN_DEV_ORCONF(in_dev, NOPOLICY), false);
1714 	if (!rth)
1715 		return -ENOBUFS;
1716 
1717 #ifdef CONFIG_IP_ROUTE_CLASSID
1718 	rth->dst.tclassid = itag;
1719 #endif
1720 	rth->dst.output = ip_rt_bug;
1721 	rth->rt_is_input= 1;
1722 
1723 #ifdef CONFIG_IP_MROUTE
1724 	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1725 		rth->dst.input = ip_mr_input;
1726 #endif
1727 	RT_CACHE_STAT_INC(in_slow_mc);
1728 
1729 	skb_dst_set(skb, &rth->dst);
1730 	return 0;
1731 }
1732 
1733 
1734 static void ip_handle_martian_source(struct net_device *dev,
1735 				     struct in_device *in_dev,
1736 				     struct sk_buff *skb,
1737 				     __be32 daddr,
1738 				     __be32 saddr)
1739 {
1740 	RT_CACHE_STAT_INC(in_martian_src);
1741 #ifdef CONFIG_IP_ROUTE_VERBOSE
1742 	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1743 		/*
1744 		 *	RFC1812 recommendation, if source is martian,
1745 		 *	the only hint is MAC header.
1746 		 */
1747 		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1748 			&daddr, &saddr, dev->name);
1749 		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1750 			print_hex_dump(KERN_WARNING, "ll header: ",
1751 				       DUMP_PREFIX_OFFSET, 16, 1,
1752 				       skb_mac_header(skb),
1753 				       dev->hard_header_len, false);
1754 		}
1755 	}
1756 #endif
1757 }
1758 
1759 /* called in rcu_read_lock() section */
1760 static int __mkroute_input(struct sk_buff *skb,
1761 			   const struct fib_result *res,
1762 			   struct in_device *in_dev,
1763 			   __be32 daddr, __be32 saddr, u32 tos)
1764 {
1765 	struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1766 	struct net_device *dev = nhc->nhc_dev;
1767 	struct fib_nh_exception *fnhe;
1768 	struct rtable *rth;
1769 	int err;
1770 	struct in_device *out_dev;
1771 	bool do_cache;
1772 	u32 itag = 0;
1773 
1774 	/* get a working reference to the output device */
1775 	out_dev = __in_dev_get_rcu(dev);
1776 	if (!out_dev) {
1777 		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1778 		return -EINVAL;
1779 	}
1780 
1781 	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1782 				  in_dev->dev, in_dev, &itag);
1783 	if (err < 0) {
1784 		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1785 					 saddr);
1786 
1787 		goto cleanup;
1788 	}
1789 
1790 	do_cache = res->fi && !itag;
1791 	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1792 	    skb->protocol == htons(ETH_P_IP)) {
1793 		__be32 gw;
1794 
1795 		gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1796 		if (IN_DEV_SHARED_MEDIA(out_dev) ||
1797 		    inet_addr_onlink(out_dev, saddr, gw))
1798 			IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1799 	}
1800 
1801 	if (skb->protocol != htons(ETH_P_IP)) {
1802 		/* Not IP (i.e. ARP). Do not create route, if it is
1803 		 * invalid for proxy arp. DNAT routes are always valid.
1804 		 *
1805 		 * Proxy arp feature have been extended to allow, ARP
1806 		 * replies back to the same interface, to support
1807 		 * Private VLAN switch technologies. See arp.c.
1808 		 */
1809 		if (out_dev == in_dev &&
1810 		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1811 			err = -EINVAL;
1812 			goto cleanup;
1813 		}
1814 	}
1815 
1816 	fnhe = find_exception(nhc, daddr);
1817 	if (do_cache) {
1818 		if (fnhe)
1819 			rth = rcu_dereference(fnhe->fnhe_rth_input);
1820 		else
1821 			rth = rcu_dereference(nhc->nhc_rth_input);
1822 		if (rt_cache_valid(rth)) {
1823 			skb_dst_set_noref(skb, &rth->dst);
1824 			goto out;
1825 		}
1826 	}
1827 
1828 	rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1829 			   IN_DEV_ORCONF(in_dev, NOPOLICY),
1830 			   IN_DEV_ORCONF(out_dev, NOXFRM));
1831 	if (!rth) {
1832 		err = -ENOBUFS;
1833 		goto cleanup;
1834 	}
1835 
1836 	rth->rt_is_input = 1;
1837 	RT_CACHE_STAT_INC(in_slow_tot);
1838 
1839 	rth->dst.input = ip_forward;
1840 
1841 	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1842 		       do_cache);
1843 	lwtunnel_set_redirect(&rth->dst);
1844 	skb_dst_set(skb, &rth->dst);
1845 out:
1846 	err = 0;
1847  cleanup:
1848 	return err;
1849 }
1850 
1851 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1852 /* To make ICMP packets follow the right flow, the multipath hash is
1853  * calculated from the inner IP addresses.
1854  */
1855 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1856 				 struct flow_keys *hash_keys)
1857 {
1858 	const struct iphdr *outer_iph = ip_hdr(skb);
1859 	const struct iphdr *key_iph = outer_iph;
1860 	const struct iphdr *inner_iph;
1861 	const struct icmphdr *icmph;
1862 	struct iphdr _inner_iph;
1863 	struct icmphdr _icmph;
1864 
1865 	if (likely(outer_iph->protocol != IPPROTO_ICMP))
1866 		goto out;
1867 
1868 	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1869 		goto out;
1870 
1871 	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1872 				   &_icmph);
1873 	if (!icmph)
1874 		goto out;
1875 
1876 	if (!icmp_is_err(icmph->type))
1877 		goto out;
1878 
1879 	inner_iph = skb_header_pointer(skb,
1880 				       outer_iph->ihl * 4 + sizeof(_icmph),
1881 				       sizeof(_inner_iph), &_inner_iph);
1882 	if (!inner_iph)
1883 		goto out;
1884 
1885 	key_iph = inner_iph;
1886 out:
1887 	hash_keys->addrs.v4addrs.src = key_iph->saddr;
1888 	hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1889 }
1890 
1891 static u32 fib_multipath_custom_hash_outer(const struct net *net,
1892 					   const struct sk_buff *skb,
1893 					   bool *p_has_inner)
1894 {
1895 	u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
1896 	struct flow_keys keys, hash_keys;
1897 
1898 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1899 		return 0;
1900 
1901 	memset(&hash_keys, 0, sizeof(hash_keys));
1902 	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1903 
1904 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1905 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1906 		hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1907 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1908 		hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1909 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1910 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1911 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1912 		hash_keys.ports.src = keys.ports.src;
1913 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1914 		hash_keys.ports.dst = keys.ports.dst;
1915 
1916 	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1917 	return flow_hash_from_keys(&hash_keys);
1918 }
1919 
1920 static u32 fib_multipath_custom_hash_inner(const struct net *net,
1921 					   const struct sk_buff *skb,
1922 					   bool has_inner)
1923 {
1924 	u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
1925 	struct flow_keys keys, hash_keys;
1926 
1927 	/* We assume the packet carries an encapsulation, but if none was
1928 	 * encountered during dissection of the outer flow, then there is no
1929 	 * point in calling the flow dissector again.
1930 	 */
1931 	if (!has_inner)
1932 		return 0;
1933 
1934 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1935 		return 0;
1936 
1937 	memset(&hash_keys, 0, sizeof(hash_keys));
1938 	skb_flow_dissect_flow_keys(skb, &keys, 0);
1939 
1940 	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1941 		return 0;
1942 
1943 	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1944 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1945 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1946 			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1947 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1948 			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1949 	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1950 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1951 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1952 			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1953 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1954 			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1955 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1956 			hash_keys.tags.flow_label = keys.tags.flow_label;
1957 	}
1958 
1959 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1960 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1961 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1962 		hash_keys.ports.src = keys.ports.src;
1963 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
1964 		hash_keys.ports.dst = keys.ports.dst;
1965 
1966 	return flow_hash_from_keys(&hash_keys);
1967 }
1968 
1969 static u32 fib_multipath_custom_hash_skb(const struct net *net,
1970 					 const struct sk_buff *skb)
1971 {
1972 	u32 mhash, mhash_inner;
1973 	bool has_inner = true;
1974 
1975 	mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
1976 	mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
1977 
1978 	return jhash_2words(mhash, mhash_inner, 0);
1979 }
1980 
1981 static u32 fib_multipath_custom_hash_fl4(const struct net *net,
1982 					 const struct flowi4 *fl4)
1983 {
1984 	u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
1985 	struct flow_keys hash_keys;
1986 
1987 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1988 		return 0;
1989 
1990 	memset(&hash_keys, 0, sizeof(hash_keys));
1991 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1992 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1993 		hash_keys.addrs.v4addrs.src = fl4->saddr;
1994 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1995 		hash_keys.addrs.v4addrs.dst = fl4->daddr;
1996 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1997 		hash_keys.basic.ip_proto = fl4->flowi4_proto;
1998 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1999 		hash_keys.ports.src = fl4->fl4_sport;
2000 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2001 		hash_keys.ports.dst = fl4->fl4_dport;
2002 
2003 	return flow_hash_from_keys(&hash_keys);
2004 }
2005 
2006 /* if skb is set it will be used and fl4 can be NULL */
2007 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2008 		       const struct sk_buff *skb, struct flow_keys *flkeys)
2009 {
2010 	u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2011 	struct flow_keys hash_keys;
2012 	u32 mhash = 0;
2013 
2014 	switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
2015 	case 0:
2016 		memset(&hash_keys, 0, sizeof(hash_keys));
2017 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2018 		if (skb) {
2019 			ip_multipath_l3_keys(skb, &hash_keys);
2020 		} else {
2021 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2022 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2023 		}
2024 		mhash = flow_hash_from_keys(&hash_keys);
2025 		break;
2026 	case 1:
2027 		/* skb is currently provided only when forwarding */
2028 		if (skb) {
2029 			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2030 			struct flow_keys keys;
2031 
2032 			/* short-circuit if we already have L4 hash present */
2033 			if (skb->l4_hash)
2034 				return skb_get_hash_raw(skb) >> 1;
2035 
2036 			memset(&hash_keys, 0, sizeof(hash_keys));
2037 
2038 			if (!flkeys) {
2039 				skb_flow_dissect_flow_keys(skb, &keys, flag);
2040 				flkeys = &keys;
2041 			}
2042 
2043 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2044 			hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2045 			hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2046 			hash_keys.ports.src = flkeys->ports.src;
2047 			hash_keys.ports.dst = flkeys->ports.dst;
2048 			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2049 		} else {
2050 			memset(&hash_keys, 0, sizeof(hash_keys));
2051 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2052 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2053 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2054 			hash_keys.ports.src = fl4->fl4_sport;
2055 			hash_keys.ports.dst = fl4->fl4_dport;
2056 			hash_keys.basic.ip_proto = fl4->flowi4_proto;
2057 		}
2058 		mhash = flow_hash_from_keys(&hash_keys);
2059 		break;
2060 	case 2:
2061 		memset(&hash_keys, 0, sizeof(hash_keys));
2062 		/* skb is currently provided only when forwarding */
2063 		if (skb) {
2064 			struct flow_keys keys;
2065 
2066 			skb_flow_dissect_flow_keys(skb, &keys, 0);
2067 			/* Inner can be v4 or v6 */
2068 			if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2069 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2070 				hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2071 				hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2072 			} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2073 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2074 				hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2075 				hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2076 				hash_keys.tags.flow_label = keys.tags.flow_label;
2077 				hash_keys.basic.ip_proto = keys.basic.ip_proto;
2078 			} else {
2079 				/* Same as case 0 */
2080 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2081 				ip_multipath_l3_keys(skb, &hash_keys);
2082 			}
2083 		} else {
2084 			/* Same as case 0 */
2085 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2086 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2087 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2088 		}
2089 		mhash = flow_hash_from_keys(&hash_keys);
2090 		break;
2091 	case 3:
2092 		if (skb)
2093 			mhash = fib_multipath_custom_hash_skb(net, skb);
2094 		else
2095 			mhash = fib_multipath_custom_hash_fl4(net, fl4);
2096 		break;
2097 	}
2098 
2099 	if (multipath_hash)
2100 		mhash = jhash_2words(mhash, multipath_hash, 0);
2101 
2102 	return mhash >> 1;
2103 }
2104 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2105 
2106 static int ip_mkroute_input(struct sk_buff *skb,
2107 			    struct fib_result *res,
2108 			    struct in_device *in_dev,
2109 			    __be32 daddr, __be32 saddr, u32 tos,
2110 			    struct flow_keys *hkeys)
2111 {
2112 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2113 	if (res->fi && fib_info_num_path(res->fi) > 1) {
2114 		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2115 
2116 		fib_select_multipath(res, h);
2117 	}
2118 #endif
2119 
2120 	/* create a routing cache entry */
2121 	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2122 }
2123 
2124 /* Implements all the saddr-related checks as ip_route_input_slow(),
2125  * assuming daddr is valid and the destination is not a local broadcast one.
2126  * Uses the provided hint instead of performing a route lookup.
2127  */
2128 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2129 		      u8 tos, struct net_device *dev,
2130 		      const struct sk_buff *hint)
2131 {
2132 	struct in_device *in_dev = __in_dev_get_rcu(dev);
2133 	struct rtable *rt = skb_rtable(hint);
2134 	struct net *net = dev_net(dev);
2135 	int err = -EINVAL;
2136 	u32 tag = 0;
2137 
2138 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2139 		goto martian_source;
2140 
2141 	if (ipv4_is_zeronet(saddr))
2142 		goto martian_source;
2143 
2144 	if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2145 		goto martian_source;
2146 
2147 	if (rt->rt_type != RTN_LOCAL)
2148 		goto skip_validate_source;
2149 
2150 	tos &= IPTOS_RT_MASK;
2151 	err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2152 	if (err < 0)
2153 		goto martian_source;
2154 
2155 skip_validate_source:
2156 	skb_dst_copy(skb, hint);
2157 	return 0;
2158 
2159 martian_source:
2160 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2161 	return err;
2162 }
2163 
2164 /* get device for dst_alloc with local routes */
2165 static struct net_device *ip_rt_get_dev(struct net *net,
2166 					const struct fib_result *res)
2167 {
2168 	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2169 	struct net_device *dev = NULL;
2170 
2171 	if (nhc)
2172 		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2173 
2174 	return dev ? : net->loopback_dev;
2175 }
2176 
2177 /*
2178  *	NOTE. We drop all the packets that has local source
2179  *	addresses, because every properly looped back packet
2180  *	must have correct destination already attached by output routine.
2181  *	Changes in the enforced policies must be applied also to
2182  *	ip_route_use_hint().
2183  *
2184  *	Such approach solves two big problems:
2185  *	1. Not simplex devices are handled properly.
2186  *	2. IP spoofing attempts are filtered with 100% of guarantee.
2187  *	called with rcu_read_lock()
2188  */
2189 
2190 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2191 			       u8 tos, struct net_device *dev,
2192 			       struct fib_result *res)
2193 {
2194 	struct in_device *in_dev = __in_dev_get_rcu(dev);
2195 	struct flow_keys *flkeys = NULL, _flkeys;
2196 	struct net    *net = dev_net(dev);
2197 	struct ip_tunnel_info *tun_info;
2198 	int		err = -EINVAL;
2199 	unsigned int	flags = 0;
2200 	u32		itag = 0;
2201 	struct rtable	*rth;
2202 	struct flowi4	fl4;
2203 	bool do_cache = true;
2204 
2205 	/* IP on this device is disabled. */
2206 
2207 	if (!in_dev)
2208 		goto out;
2209 
2210 	/* Check for the most weird martians, which can be not detected
2211 	 * by fib_lookup.
2212 	 */
2213 
2214 	tun_info = skb_tunnel_info(skb);
2215 	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2216 		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2217 	else
2218 		fl4.flowi4_tun_key.tun_id = 0;
2219 	skb_dst_drop(skb);
2220 
2221 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2222 		goto martian_source;
2223 
2224 	res->fi = NULL;
2225 	res->table = NULL;
2226 	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2227 		goto brd_input;
2228 
2229 	/* Accept zero addresses only to limited broadcast;
2230 	 * I even do not know to fix it or not. Waiting for complains :-)
2231 	 */
2232 	if (ipv4_is_zeronet(saddr))
2233 		goto martian_source;
2234 
2235 	if (ipv4_is_zeronet(daddr))
2236 		goto martian_destination;
2237 
2238 	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2239 	 * and call it once if daddr or/and saddr are loopback addresses
2240 	 */
2241 	if (ipv4_is_loopback(daddr)) {
2242 		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2243 			goto martian_destination;
2244 	} else if (ipv4_is_loopback(saddr)) {
2245 		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2246 			goto martian_source;
2247 	}
2248 
2249 	/*
2250 	 *	Now we are ready to route packet.
2251 	 */
2252 	fl4.flowi4_oif = 0;
2253 	fl4.flowi4_iif = dev->ifindex;
2254 	fl4.flowi4_mark = skb->mark;
2255 	fl4.flowi4_tos = tos;
2256 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2257 	fl4.flowi4_flags = 0;
2258 	fl4.daddr = daddr;
2259 	fl4.saddr = saddr;
2260 	fl4.flowi4_uid = sock_net_uid(net, NULL);
2261 	fl4.flowi4_multipath_hash = 0;
2262 
2263 	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2264 		flkeys = &_flkeys;
2265 	} else {
2266 		fl4.flowi4_proto = 0;
2267 		fl4.fl4_sport = 0;
2268 		fl4.fl4_dport = 0;
2269 	}
2270 
2271 	err = fib_lookup(net, &fl4, res, 0);
2272 	if (err != 0) {
2273 		if (!IN_DEV_FORWARD(in_dev))
2274 			err = -EHOSTUNREACH;
2275 		goto no_route;
2276 	}
2277 
2278 	if (res->type == RTN_BROADCAST) {
2279 		if (IN_DEV_BFORWARD(in_dev))
2280 			goto make_route;
2281 		/* not do cache if bc_forwarding is enabled */
2282 		if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2283 			do_cache = false;
2284 		goto brd_input;
2285 	}
2286 
2287 	if (res->type == RTN_LOCAL) {
2288 		err = fib_validate_source(skb, saddr, daddr, tos,
2289 					  0, dev, in_dev, &itag);
2290 		if (err < 0)
2291 			goto martian_source;
2292 		goto local_input;
2293 	}
2294 
2295 	if (!IN_DEV_FORWARD(in_dev)) {
2296 		err = -EHOSTUNREACH;
2297 		goto no_route;
2298 	}
2299 	if (res->type != RTN_UNICAST)
2300 		goto martian_destination;
2301 
2302 make_route:
2303 	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2304 out:	return err;
2305 
2306 brd_input:
2307 	if (skb->protocol != htons(ETH_P_IP))
2308 		goto e_inval;
2309 
2310 	if (!ipv4_is_zeronet(saddr)) {
2311 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2312 					  in_dev, &itag);
2313 		if (err < 0)
2314 			goto martian_source;
2315 	}
2316 	flags |= RTCF_BROADCAST;
2317 	res->type = RTN_BROADCAST;
2318 	RT_CACHE_STAT_INC(in_brd);
2319 
2320 local_input:
2321 	do_cache &= res->fi && !itag;
2322 	if (do_cache) {
2323 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2324 
2325 		rth = rcu_dereference(nhc->nhc_rth_input);
2326 		if (rt_cache_valid(rth)) {
2327 			skb_dst_set_noref(skb, &rth->dst);
2328 			err = 0;
2329 			goto out;
2330 		}
2331 	}
2332 
2333 	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2334 			   flags | RTCF_LOCAL, res->type,
2335 			   IN_DEV_ORCONF(in_dev, NOPOLICY), false);
2336 	if (!rth)
2337 		goto e_nobufs;
2338 
2339 	rth->dst.output= ip_rt_bug;
2340 #ifdef CONFIG_IP_ROUTE_CLASSID
2341 	rth->dst.tclassid = itag;
2342 #endif
2343 	rth->rt_is_input = 1;
2344 
2345 	RT_CACHE_STAT_INC(in_slow_tot);
2346 	if (res->type == RTN_UNREACHABLE) {
2347 		rth->dst.input= ip_error;
2348 		rth->dst.error= -err;
2349 		rth->rt_flags	&= ~RTCF_LOCAL;
2350 	}
2351 
2352 	if (do_cache) {
2353 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2354 
2355 		rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2356 		if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2357 			WARN_ON(rth->dst.input == lwtunnel_input);
2358 			rth->dst.lwtstate->orig_input = rth->dst.input;
2359 			rth->dst.input = lwtunnel_input;
2360 		}
2361 
2362 		if (unlikely(!rt_cache_route(nhc, rth)))
2363 			rt_add_uncached_list(rth);
2364 	}
2365 	skb_dst_set(skb, &rth->dst);
2366 	err = 0;
2367 	goto out;
2368 
2369 no_route:
2370 	RT_CACHE_STAT_INC(in_no_route);
2371 	res->type = RTN_UNREACHABLE;
2372 	res->fi = NULL;
2373 	res->table = NULL;
2374 	goto local_input;
2375 
2376 	/*
2377 	 *	Do not cache martian addresses: they should be logged (RFC1812)
2378 	 */
2379 martian_destination:
2380 	RT_CACHE_STAT_INC(in_martian_dst);
2381 #ifdef CONFIG_IP_ROUTE_VERBOSE
2382 	if (IN_DEV_LOG_MARTIANS(in_dev))
2383 		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2384 				     &daddr, &saddr, dev->name);
2385 #endif
2386 
2387 e_inval:
2388 	err = -EINVAL;
2389 	goto out;
2390 
2391 e_nobufs:
2392 	err = -ENOBUFS;
2393 	goto out;
2394 
2395 martian_source:
2396 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2397 	goto out;
2398 }
2399 
2400 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2401 			 u8 tos, struct net_device *dev)
2402 {
2403 	struct fib_result res;
2404 	int err;
2405 
2406 	tos &= IPTOS_RT_MASK;
2407 	rcu_read_lock();
2408 	err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2409 	rcu_read_unlock();
2410 
2411 	return err;
2412 }
2413 EXPORT_SYMBOL(ip_route_input_noref);
2414 
2415 /* called with rcu_read_lock held */
2416 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2417 		       u8 tos, struct net_device *dev, struct fib_result *res)
2418 {
2419 	/* Multicast recognition logic is moved from route cache to here.
2420 	 * The problem was that too many Ethernet cards have broken/missing
2421 	 * hardware multicast filters :-( As result the host on multicasting
2422 	 * network acquires a lot of useless route cache entries, sort of
2423 	 * SDR messages from all the world. Now we try to get rid of them.
2424 	 * Really, provided software IP multicast filter is organized
2425 	 * reasonably (at least, hashed), it does not result in a slowdown
2426 	 * comparing with route cache reject entries.
2427 	 * Note, that multicast routers are not affected, because
2428 	 * route cache entry is created eventually.
2429 	 */
2430 	if (ipv4_is_multicast(daddr)) {
2431 		struct in_device *in_dev = __in_dev_get_rcu(dev);
2432 		int our = 0;
2433 		int err = -EINVAL;
2434 
2435 		if (!in_dev)
2436 			return err;
2437 		our = ip_check_mc_rcu(in_dev, daddr, saddr,
2438 				      ip_hdr(skb)->protocol);
2439 
2440 		/* check l3 master if no match yet */
2441 		if (!our && netif_is_l3_slave(dev)) {
2442 			struct in_device *l3_in_dev;
2443 
2444 			l3_in_dev = __in_dev_get_rcu(skb->dev);
2445 			if (l3_in_dev)
2446 				our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2447 						      ip_hdr(skb)->protocol);
2448 		}
2449 
2450 		if (our
2451 #ifdef CONFIG_IP_MROUTE
2452 			||
2453 		    (!ipv4_is_local_multicast(daddr) &&
2454 		     IN_DEV_MFORWARD(in_dev))
2455 #endif
2456 		   ) {
2457 			err = ip_route_input_mc(skb, daddr, saddr,
2458 						tos, dev, our);
2459 		}
2460 		return err;
2461 	}
2462 
2463 	return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2464 }
2465 
2466 /* called with rcu_read_lock() */
2467 static struct rtable *__mkroute_output(const struct fib_result *res,
2468 				       const struct flowi4 *fl4, int orig_oif,
2469 				       struct net_device *dev_out,
2470 				       unsigned int flags)
2471 {
2472 	struct fib_info *fi = res->fi;
2473 	struct fib_nh_exception *fnhe;
2474 	struct in_device *in_dev;
2475 	u16 type = res->type;
2476 	struct rtable *rth;
2477 	bool do_cache;
2478 
2479 	in_dev = __in_dev_get_rcu(dev_out);
2480 	if (!in_dev)
2481 		return ERR_PTR(-EINVAL);
2482 
2483 	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2484 		if (ipv4_is_loopback(fl4->saddr) &&
2485 		    !(dev_out->flags & IFF_LOOPBACK) &&
2486 		    !netif_is_l3_master(dev_out))
2487 			return ERR_PTR(-EINVAL);
2488 
2489 	if (ipv4_is_lbcast(fl4->daddr))
2490 		type = RTN_BROADCAST;
2491 	else if (ipv4_is_multicast(fl4->daddr))
2492 		type = RTN_MULTICAST;
2493 	else if (ipv4_is_zeronet(fl4->daddr))
2494 		return ERR_PTR(-EINVAL);
2495 
2496 	if (dev_out->flags & IFF_LOOPBACK)
2497 		flags |= RTCF_LOCAL;
2498 
2499 	do_cache = true;
2500 	if (type == RTN_BROADCAST) {
2501 		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2502 		fi = NULL;
2503 	} else if (type == RTN_MULTICAST) {
2504 		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2505 		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2506 				     fl4->flowi4_proto))
2507 			flags &= ~RTCF_LOCAL;
2508 		else
2509 			do_cache = false;
2510 		/* If multicast route do not exist use
2511 		 * default one, but do not gateway in this case.
2512 		 * Yes, it is hack.
2513 		 */
2514 		if (fi && res->prefixlen < 4)
2515 			fi = NULL;
2516 	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2517 		   (orig_oif != dev_out->ifindex)) {
2518 		/* For local routes that require a particular output interface
2519 		 * we do not want to cache the result.  Caching the result
2520 		 * causes incorrect behaviour when there are multiple source
2521 		 * addresses on the interface, the end result being that if the
2522 		 * intended recipient is waiting on that interface for the
2523 		 * packet he won't receive it because it will be delivered on
2524 		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2525 		 * be set to the loopback interface as well.
2526 		 */
2527 		do_cache = false;
2528 	}
2529 
2530 	fnhe = NULL;
2531 	do_cache &= fi != NULL;
2532 	if (fi) {
2533 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2534 		struct rtable __rcu **prth;
2535 
2536 		fnhe = find_exception(nhc, fl4->daddr);
2537 		if (!do_cache)
2538 			goto add;
2539 		if (fnhe) {
2540 			prth = &fnhe->fnhe_rth_output;
2541 		} else {
2542 			if (unlikely(fl4->flowi4_flags &
2543 				     FLOWI_FLAG_KNOWN_NH &&
2544 				     !(nhc->nhc_gw_family &&
2545 				       nhc->nhc_scope == RT_SCOPE_LINK))) {
2546 				do_cache = false;
2547 				goto add;
2548 			}
2549 			prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2550 		}
2551 		rth = rcu_dereference(*prth);
2552 		if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2553 			return rth;
2554 	}
2555 
2556 add:
2557 	rth = rt_dst_alloc(dev_out, flags, type,
2558 			   IN_DEV_ORCONF(in_dev, NOPOLICY),
2559 			   IN_DEV_ORCONF(in_dev, NOXFRM));
2560 	if (!rth)
2561 		return ERR_PTR(-ENOBUFS);
2562 
2563 	rth->rt_iif = orig_oif;
2564 
2565 	RT_CACHE_STAT_INC(out_slow_tot);
2566 
2567 	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2568 		if (flags & RTCF_LOCAL &&
2569 		    !(dev_out->flags & IFF_LOOPBACK)) {
2570 			rth->dst.output = ip_mc_output;
2571 			RT_CACHE_STAT_INC(out_slow_mc);
2572 		}
2573 #ifdef CONFIG_IP_MROUTE
2574 		if (type == RTN_MULTICAST) {
2575 			if (IN_DEV_MFORWARD(in_dev) &&
2576 			    !ipv4_is_local_multicast(fl4->daddr)) {
2577 				rth->dst.input = ip_mr_input;
2578 				rth->dst.output = ip_mc_output;
2579 			}
2580 		}
2581 #endif
2582 	}
2583 
2584 	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2585 	lwtunnel_set_redirect(&rth->dst);
2586 
2587 	return rth;
2588 }
2589 
2590 /*
2591  * Major route resolver routine.
2592  */
2593 
2594 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2595 					const struct sk_buff *skb)
2596 {
2597 	__u8 tos = RT_FL_TOS(fl4);
2598 	struct fib_result res = {
2599 		.type		= RTN_UNSPEC,
2600 		.fi		= NULL,
2601 		.table		= NULL,
2602 		.tclassid	= 0,
2603 	};
2604 	struct rtable *rth;
2605 
2606 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2607 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2608 	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2609 			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2610 
2611 	rcu_read_lock();
2612 	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2613 	rcu_read_unlock();
2614 
2615 	return rth;
2616 }
2617 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2618 
2619 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2620 					    struct fib_result *res,
2621 					    const struct sk_buff *skb)
2622 {
2623 	struct net_device *dev_out = NULL;
2624 	int orig_oif = fl4->flowi4_oif;
2625 	unsigned int flags = 0;
2626 	struct rtable *rth;
2627 	int err;
2628 
2629 	if (fl4->saddr) {
2630 		if (ipv4_is_multicast(fl4->saddr) ||
2631 		    ipv4_is_lbcast(fl4->saddr) ||
2632 		    ipv4_is_zeronet(fl4->saddr)) {
2633 			rth = ERR_PTR(-EINVAL);
2634 			goto out;
2635 		}
2636 
2637 		rth = ERR_PTR(-ENETUNREACH);
2638 
2639 		/* I removed check for oif == dev_out->oif here.
2640 		 * It was wrong for two reasons:
2641 		 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2642 		 *    is assigned to multiple interfaces.
2643 		 * 2. Moreover, we are allowed to send packets with saddr
2644 		 *    of another iface. --ANK
2645 		 */
2646 
2647 		if (fl4->flowi4_oif == 0 &&
2648 		    (ipv4_is_multicast(fl4->daddr) ||
2649 		     ipv4_is_lbcast(fl4->daddr))) {
2650 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2651 			dev_out = __ip_dev_find(net, fl4->saddr, false);
2652 			if (!dev_out)
2653 				goto out;
2654 
2655 			/* Special hack: user can direct multicasts
2656 			 * and limited broadcast via necessary interface
2657 			 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2658 			 * This hack is not just for fun, it allows
2659 			 * vic,vat and friends to work.
2660 			 * They bind socket to loopback, set ttl to zero
2661 			 * and expect that it will work.
2662 			 * From the viewpoint of routing cache they are broken,
2663 			 * because we are not allowed to build multicast path
2664 			 * with loopback source addr (look, routing cache
2665 			 * cannot know, that ttl is zero, so that packet
2666 			 * will not leave this host and route is valid).
2667 			 * Luckily, this hack is good workaround.
2668 			 */
2669 
2670 			fl4->flowi4_oif = dev_out->ifindex;
2671 			goto make_route;
2672 		}
2673 
2674 		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2675 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2676 			if (!__ip_dev_find(net, fl4->saddr, false))
2677 				goto out;
2678 		}
2679 	}
2680 
2681 
2682 	if (fl4->flowi4_oif) {
2683 		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2684 		rth = ERR_PTR(-ENODEV);
2685 		if (!dev_out)
2686 			goto out;
2687 
2688 		/* RACE: Check return value of inet_select_addr instead. */
2689 		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2690 			rth = ERR_PTR(-ENETUNREACH);
2691 			goto out;
2692 		}
2693 		if (ipv4_is_local_multicast(fl4->daddr) ||
2694 		    ipv4_is_lbcast(fl4->daddr) ||
2695 		    fl4->flowi4_proto == IPPROTO_IGMP) {
2696 			if (!fl4->saddr)
2697 				fl4->saddr = inet_select_addr(dev_out, 0,
2698 							      RT_SCOPE_LINK);
2699 			goto make_route;
2700 		}
2701 		if (!fl4->saddr) {
2702 			if (ipv4_is_multicast(fl4->daddr))
2703 				fl4->saddr = inet_select_addr(dev_out, 0,
2704 							      fl4->flowi4_scope);
2705 			else if (!fl4->daddr)
2706 				fl4->saddr = inet_select_addr(dev_out, 0,
2707 							      RT_SCOPE_HOST);
2708 		}
2709 	}
2710 
2711 	if (!fl4->daddr) {
2712 		fl4->daddr = fl4->saddr;
2713 		if (!fl4->daddr)
2714 			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2715 		dev_out = net->loopback_dev;
2716 		fl4->flowi4_oif = LOOPBACK_IFINDEX;
2717 		res->type = RTN_LOCAL;
2718 		flags |= RTCF_LOCAL;
2719 		goto make_route;
2720 	}
2721 
2722 	err = fib_lookup(net, fl4, res, 0);
2723 	if (err) {
2724 		res->fi = NULL;
2725 		res->table = NULL;
2726 		if (fl4->flowi4_oif &&
2727 		    (ipv4_is_multicast(fl4->daddr) ||
2728 		    !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2729 			/* Apparently, routing tables are wrong. Assume,
2730 			 * that the destination is on link.
2731 			 *
2732 			 * WHY? DW.
2733 			 * Because we are allowed to send to iface
2734 			 * even if it has NO routes and NO assigned
2735 			 * addresses. When oif is specified, routing
2736 			 * tables are looked up with only one purpose:
2737 			 * to catch if destination is gatewayed, rather than
2738 			 * direct. Moreover, if MSG_DONTROUTE is set,
2739 			 * we send packet, ignoring both routing tables
2740 			 * and ifaddr state. --ANK
2741 			 *
2742 			 *
2743 			 * We could make it even if oif is unknown,
2744 			 * likely IPv6, but we do not.
2745 			 */
2746 
2747 			if (fl4->saddr == 0)
2748 				fl4->saddr = inet_select_addr(dev_out, 0,
2749 							      RT_SCOPE_LINK);
2750 			res->type = RTN_UNICAST;
2751 			goto make_route;
2752 		}
2753 		rth = ERR_PTR(err);
2754 		goto out;
2755 	}
2756 
2757 	if (res->type == RTN_LOCAL) {
2758 		if (!fl4->saddr) {
2759 			if (res->fi->fib_prefsrc)
2760 				fl4->saddr = res->fi->fib_prefsrc;
2761 			else
2762 				fl4->saddr = fl4->daddr;
2763 		}
2764 
2765 		/* L3 master device is the loopback for that domain */
2766 		dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2767 			net->loopback_dev;
2768 
2769 		/* make sure orig_oif points to fib result device even
2770 		 * though packet rx/tx happens over loopback or l3mdev
2771 		 */
2772 		orig_oif = FIB_RES_OIF(*res);
2773 
2774 		fl4->flowi4_oif = dev_out->ifindex;
2775 		flags |= RTCF_LOCAL;
2776 		goto make_route;
2777 	}
2778 
2779 	fib_select_path(net, res, fl4, skb);
2780 
2781 	dev_out = FIB_RES_DEV(*res);
2782 
2783 make_route:
2784 	rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2785 
2786 out:
2787 	return rth;
2788 }
2789 
2790 static struct dst_ops ipv4_dst_blackhole_ops = {
2791 	.family			= AF_INET,
2792 	.default_advmss		= ipv4_default_advmss,
2793 	.neigh_lookup		= ipv4_neigh_lookup,
2794 	.check			= dst_blackhole_check,
2795 	.cow_metrics		= dst_blackhole_cow_metrics,
2796 	.update_pmtu		= dst_blackhole_update_pmtu,
2797 	.redirect		= dst_blackhole_redirect,
2798 	.mtu			= dst_blackhole_mtu,
2799 };
2800 
2801 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2802 {
2803 	struct rtable *ort = (struct rtable *) dst_orig;
2804 	struct rtable *rt;
2805 
2806 	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2807 	if (rt) {
2808 		struct dst_entry *new = &rt->dst;
2809 
2810 		new->__use = 1;
2811 		new->input = dst_discard;
2812 		new->output = dst_discard_out;
2813 
2814 		new->dev = net->loopback_dev;
2815 		if (new->dev)
2816 			dev_hold(new->dev);
2817 
2818 		rt->rt_is_input = ort->rt_is_input;
2819 		rt->rt_iif = ort->rt_iif;
2820 		rt->rt_pmtu = ort->rt_pmtu;
2821 		rt->rt_mtu_locked = ort->rt_mtu_locked;
2822 
2823 		rt->rt_genid = rt_genid_ipv4(net);
2824 		rt->rt_flags = ort->rt_flags;
2825 		rt->rt_type = ort->rt_type;
2826 		rt->rt_uses_gateway = ort->rt_uses_gateway;
2827 		rt->rt_gw_family = ort->rt_gw_family;
2828 		if (rt->rt_gw_family == AF_INET)
2829 			rt->rt_gw4 = ort->rt_gw4;
2830 		else if (rt->rt_gw_family == AF_INET6)
2831 			rt->rt_gw6 = ort->rt_gw6;
2832 
2833 		INIT_LIST_HEAD(&rt->rt_uncached);
2834 	}
2835 
2836 	dst_release(dst_orig);
2837 
2838 	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2839 }
2840 
2841 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2842 				    const struct sock *sk)
2843 {
2844 	struct rtable *rt = __ip_route_output_key(net, flp4);
2845 
2846 	if (IS_ERR(rt))
2847 		return rt;
2848 
2849 	if (flp4->flowi4_proto) {
2850 		flp4->flowi4_oif = rt->dst.dev->ifindex;
2851 		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2852 							flowi4_to_flowi(flp4),
2853 							sk, 0);
2854 	}
2855 
2856 	return rt;
2857 }
2858 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2859 
2860 struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
2861 				      struct net_device *dev,
2862 				      struct net *net, __be32 *saddr,
2863 				      const struct ip_tunnel_info *info,
2864 				      u8 protocol, bool use_cache)
2865 {
2866 #ifdef CONFIG_DST_CACHE
2867 	struct dst_cache *dst_cache;
2868 #endif
2869 	struct rtable *rt = NULL;
2870 	struct flowi4 fl4;
2871 	__u8 tos;
2872 
2873 #ifdef CONFIG_DST_CACHE
2874 	dst_cache = (struct dst_cache *)&info->dst_cache;
2875 	if (use_cache) {
2876 		rt = dst_cache_get_ip4(dst_cache, saddr);
2877 		if (rt)
2878 			return rt;
2879 	}
2880 #endif
2881 	memset(&fl4, 0, sizeof(fl4));
2882 	fl4.flowi4_mark = skb->mark;
2883 	fl4.flowi4_proto = protocol;
2884 	fl4.daddr = info->key.u.ipv4.dst;
2885 	fl4.saddr = info->key.u.ipv4.src;
2886 	tos = info->key.tos;
2887 	fl4.flowi4_tos = RT_TOS(tos);
2888 
2889 	rt = ip_route_output_key(net, &fl4);
2890 	if (IS_ERR(rt)) {
2891 		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
2892 		return ERR_PTR(-ENETUNREACH);
2893 	}
2894 	if (rt->dst.dev == dev) { /* is this necessary? */
2895 		netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
2896 		ip_rt_put(rt);
2897 		return ERR_PTR(-ELOOP);
2898 	}
2899 #ifdef CONFIG_DST_CACHE
2900 	if (use_cache)
2901 		dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
2902 #endif
2903 	*saddr = fl4.saddr;
2904 	return rt;
2905 }
2906 EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
2907 
2908 /* called with rcu_read_lock held */
2909 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2910 			struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2911 			struct sk_buff *skb, u32 portid, u32 seq,
2912 			unsigned int flags)
2913 {
2914 	struct rtmsg *r;
2915 	struct nlmsghdr *nlh;
2916 	unsigned long expires = 0;
2917 	u32 error;
2918 	u32 metrics[RTAX_MAX];
2919 
2920 	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2921 	if (!nlh)
2922 		return -EMSGSIZE;
2923 
2924 	r = nlmsg_data(nlh);
2925 	r->rtm_family	 = AF_INET;
2926 	r->rtm_dst_len	= 32;
2927 	r->rtm_src_len	= 0;
2928 	r->rtm_tos	= fl4 ? fl4->flowi4_tos : 0;
2929 	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
2930 	if (nla_put_u32(skb, RTA_TABLE, table_id))
2931 		goto nla_put_failure;
2932 	r->rtm_type	= rt->rt_type;
2933 	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2934 	r->rtm_protocol = RTPROT_UNSPEC;
2935 	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2936 	if (rt->rt_flags & RTCF_NOTIFY)
2937 		r->rtm_flags |= RTM_F_NOTIFY;
2938 	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2939 		r->rtm_flags |= RTCF_DOREDIRECT;
2940 
2941 	if (nla_put_in_addr(skb, RTA_DST, dst))
2942 		goto nla_put_failure;
2943 	if (src) {
2944 		r->rtm_src_len = 32;
2945 		if (nla_put_in_addr(skb, RTA_SRC, src))
2946 			goto nla_put_failure;
2947 	}
2948 	if (rt->dst.dev &&
2949 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2950 		goto nla_put_failure;
2951 	if (rt->dst.lwtstate &&
2952 	    lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2953 		goto nla_put_failure;
2954 #ifdef CONFIG_IP_ROUTE_CLASSID
2955 	if (rt->dst.tclassid &&
2956 	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2957 		goto nla_put_failure;
2958 #endif
2959 	if (fl4 && !rt_is_input_route(rt) &&
2960 	    fl4->saddr != src) {
2961 		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2962 			goto nla_put_failure;
2963 	}
2964 	if (rt->rt_uses_gateway) {
2965 		if (rt->rt_gw_family == AF_INET &&
2966 		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2967 			goto nla_put_failure;
2968 		} else if (rt->rt_gw_family == AF_INET6) {
2969 			int alen = sizeof(struct in6_addr);
2970 			struct nlattr *nla;
2971 			struct rtvia *via;
2972 
2973 			nla = nla_reserve(skb, RTA_VIA, alen + 2);
2974 			if (!nla)
2975 				goto nla_put_failure;
2976 
2977 			via = nla_data(nla);
2978 			via->rtvia_family = AF_INET6;
2979 			memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2980 		}
2981 	}
2982 
2983 	expires = rt->dst.expires;
2984 	if (expires) {
2985 		unsigned long now = jiffies;
2986 
2987 		if (time_before(now, expires))
2988 			expires -= now;
2989 		else
2990 			expires = 0;
2991 	}
2992 
2993 	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2994 	if (rt->rt_pmtu && expires)
2995 		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2996 	if (rt->rt_mtu_locked && expires)
2997 		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2998 	if (rtnetlink_put_metrics(skb, metrics) < 0)
2999 		goto nla_put_failure;
3000 
3001 	if (fl4) {
3002 		if (fl4->flowi4_mark &&
3003 		    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
3004 			goto nla_put_failure;
3005 
3006 		if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
3007 		    nla_put_u32(skb, RTA_UID,
3008 				from_kuid_munged(current_user_ns(),
3009 						 fl4->flowi4_uid)))
3010 			goto nla_put_failure;
3011 
3012 		if (rt_is_input_route(rt)) {
3013 #ifdef CONFIG_IP_MROUTE
3014 			if (ipv4_is_multicast(dst) &&
3015 			    !ipv4_is_local_multicast(dst) &&
3016 			    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
3017 				int err = ipmr_get_route(net, skb,
3018 							 fl4->saddr, fl4->daddr,
3019 							 r, portid);
3020 
3021 				if (err <= 0) {
3022 					if (err == 0)
3023 						return 0;
3024 					goto nla_put_failure;
3025 				}
3026 			} else
3027 #endif
3028 				if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
3029 					goto nla_put_failure;
3030 		}
3031 	}
3032 
3033 	error = rt->dst.error;
3034 
3035 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3036 		goto nla_put_failure;
3037 
3038 	nlmsg_end(skb, nlh);
3039 	return 0;
3040 
3041 nla_put_failure:
3042 	nlmsg_cancel(skb, nlh);
3043 	return -EMSGSIZE;
3044 }
3045 
3046 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3047 			    struct netlink_callback *cb, u32 table_id,
3048 			    struct fnhe_hash_bucket *bucket, int genid,
3049 			    int *fa_index, int fa_start, unsigned int flags)
3050 {
3051 	int i;
3052 
3053 	for (i = 0; i < FNHE_HASH_SIZE; i++) {
3054 		struct fib_nh_exception *fnhe;
3055 
3056 		for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3057 		     fnhe = rcu_dereference(fnhe->fnhe_next)) {
3058 			struct rtable *rt;
3059 			int err;
3060 
3061 			if (*fa_index < fa_start)
3062 				goto next;
3063 
3064 			if (fnhe->fnhe_genid != genid)
3065 				goto next;
3066 
3067 			if (fnhe->fnhe_expires &&
3068 			    time_after(jiffies, fnhe->fnhe_expires))
3069 				goto next;
3070 
3071 			rt = rcu_dereference(fnhe->fnhe_rth_input);
3072 			if (!rt)
3073 				rt = rcu_dereference(fnhe->fnhe_rth_output);
3074 			if (!rt)
3075 				goto next;
3076 
3077 			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3078 					   table_id, NULL, skb,
3079 					   NETLINK_CB(cb->skb).portid,
3080 					   cb->nlh->nlmsg_seq, flags);
3081 			if (err)
3082 				return err;
3083 next:
3084 			(*fa_index)++;
3085 		}
3086 	}
3087 
3088 	return 0;
3089 }
3090 
3091 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3092 		       u32 table_id, struct fib_info *fi,
3093 		       int *fa_index, int fa_start, unsigned int flags)
3094 {
3095 	struct net *net = sock_net(cb->skb->sk);
3096 	int nhsel, genid = fnhe_genid(net);
3097 
3098 	for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3099 		struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3100 		struct fnhe_hash_bucket *bucket;
3101 		int err;
3102 
3103 		if (nhc->nhc_flags & RTNH_F_DEAD)
3104 			continue;
3105 
3106 		rcu_read_lock();
3107 		bucket = rcu_dereference(nhc->nhc_exceptions);
3108 		err = 0;
3109 		if (bucket)
3110 			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3111 					       genid, fa_index, fa_start,
3112 					       flags);
3113 		rcu_read_unlock();
3114 		if (err)
3115 			return err;
3116 	}
3117 
3118 	return 0;
3119 }
3120 
3121 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3122 						   u8 ip_proto, __be16 sport,
3123 						   __be16 dport)
3124 {
3125 	struct sk_buff *skb;
3126 	struct iphdr *iph;
3127 
3128 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3129 	if (!skb)
3130 		return NULL;
3131 
3132 	/* Reserve room for dummy headers, this skb can pass
3133 	 * through good chunk of routing engine.
3134 	 */
3135 	skb_reset_mac_header(skb);
3136 	skb_reset_network_header(skb);
3137 	skb->protocol = htons(ETH_P_IP);
3138 	iph = skb_put(skb, sizeof(struct iphdr));
3139 	iph->protocol = ip_proto;
3140 	iph->saddr = src;
3141 	iph->daddr = dst;
3142 	iph->version = 0x4;
3143 	iph->frag_off = 0;
3144 	iph->ihl = 0x5;
3145 	skb_set_transport_header(skb, skb->len);
3146 
3147 	switch (iph->protocol) {
3148 	case IPPROTO_UDP: {
3149 		struct udphdr *udph;
3150 
3151 		udph = skb_put_zero(skb, sizeof(struct udphdr));
3152 		udph->source = sport;
3153 		udph->dest = dport;
3154 		udph->len = sizeof(struct udphdr);
3155 		udph->check = 0;
3156 		break;
3157 	}
3158 	case IPPROTO_TCP: {
3159 		struct tcphdr *tcph;
3160 
3161 		tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3162 		tcph->source	= sport;
3163 		tcph->dest	= dport;
3164 		tcph->doff	= sizeof(struct tcphdr) / 4;
3165 		tcph->rst = 1;
3166 		tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3167 					    src, dst, 0);
3168 		break;
3169 	}
3170 	case IPPROTO_ICMP: {
3171 		struct icmphdr *icmph;
3172 
3173 		icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3174 		icmph->type = ICMP_ECHO;
3175 		icmph->code = 0;
3176 	}
3177 	}
3178 
3179 	return skb;
3180 }
3181 
3182 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3183 				       const struct nlmsghdr *nlh,
3184 				       struct nlattr **tb,
3185 				       struct netlink_ext_ack *extack)
3186 {
3187 	struct rtmsg *rtm;
3188 	int i, err;
3189 
3190 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3191 		NL_SET_ERR_MSG(extack,
3192 			       "ipv4: Invalid header for route get request");
3193 		return -EINVAL;
3194 	}
3195 
3196 	if (!netlink_strict_get_check(skb))
3197 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3198 					      rtm_ipv4_policy, extack);
3199 
3200 	rtm = nlmsg_data(nlh);
3201 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3202 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3203 	    rtm->rtm_table || rtm->rtm_protocol ||
3204 	    rtm->rtm_scope || rtm->rtm_type) {
3205 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3206 		return -EINVAL;
3207 	}
3208 
3209 	if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3210 			       RTM_F_LOOKUP_TABLE |
3211 			       RTM_F_FIB_MATCH)) {
3212 		NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3213 		return -EINVAL;
3214 	}
3215 
3216 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3217 					    rtm_ipv4_policy, extack);
3218 	if (err)
3219 		return err;
3220 
3221 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3222 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3223 		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3224 		return -EINVAL;
3225 	}
3226 
3227 	for (i = 0; i <= RTA_MAX; i++) {
3228 		if (!tb[i])
3229 			continue;
3230 
3231 		switch (i) {
3232 		case RTA_IIF:
3233 		case RTA_OIF:
3234 		case RTA_SRC:
3235 		case RTA_DST:
3236 		case RTA_IP_PROTO:
3237 		case RTA_SPORT:
3238 		case RTA_DPORT:
3239 		case RTA_MARK:
3240 		case RTA_UID:
3241 			break;
3242 		default:
3243 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3244 			return -EINVAL;
3245 		}
3246 	}
3247 
3248 	return 0;
3249 }
3250 
3251 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3252 			     struct netlink_ext_ack *extack)
3253 {
3254 	struct net *net = sock_net(in_skb->sk);
3255 	struct nlattr *tb[RTA_MAX+1];
3256 	u32 table_id = RT_TABLE_MAIN;
3257 	__be16 sport = 0, dport = 0;
3258 	struct fib_result res = {};
3259 	u8 ip_proto = IPPROTO_UDP;
3260 	struct rtable *rt = NULL;
3261 	struct sk_buff *skb;
3262 	struct rtmsg *rtm;
3263 	struct flowi4 fl4 = {};
3264 	__be32 dst = 0;
3265 	__be32 src = 0;
3266 	kuid_t uid;
3267 	u32 iif;
3268 	int err;
3269 	int mark;
3270 
3271 	err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3272 	if (err < 0)
3273 		return err;
3274 
3275 	rtm = nlmsg_data(nlh);
3276 	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3277 	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3278 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3279 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3280 	if (tb[RTA_UID])
3281 		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3282 	else
3283 		uid = (iif ? INVALID_UID : current_uid());
3284 
3285 	if (tb[RTA_IP_PROTO]) {
3286 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3287 						  &ip_proto, AF_INET, extack);
3288 		if (err)
3289 			return err;
3290 	}
3291 
3292 	if (tb[RTA_SPORT])
3293 		sport = nla_get_be16(tb[RTA_SPORT]);
3294 
3295 	if (tb[RTA_DPORT])
3296 		dport = nla_get_be16(tb[RTA_DPORT]);
3297 
3298 	skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3299 	if (!skb)
3300 		return -ENOBUFS;
3301 
3302 	fl4.daddr = dst;
3303 	fl4.saddr = src;
3304 	fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3305 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3306 	fl4.flowi4_mark = mark;
3307 	fl4.flowi4_uid = uid;
3308 	if (sport)
3309 		fl4.fl4_sport = sport;
3310 	if (dport)
3311 		fl4.fl4_dport = dport;
3312 	fl4.flowi4_proto = ip_proto;
3313 
3314 	rcu_read_lock();
3315 
3316 	if (iif) {
3317 		struct net_device *dev;
3318 
3319 		dev = dev_get_by_index_rcu(net, iif);
3320 		if (!dev) {
3321 			err = -ENODEV;
3322 			goto errout_rcu;
3323 		}
3324 
3325 		fl4.flowi4_iif = iif; /* for rt_fill_info */
3326 		skb->dev	= dev;
3327 		skb->mark	= mark;
3328 		err = ip_route_input_rcu(skb, dst, src,
3329 					 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3330 					 &res);
3331 
3332 		rt = skb_rtable(skb);
3333 		if (err == 0 && rt->dst.error)
3334 			err = -rt->dst.error;
3335 	} else {
3336 		fl4.flowi4_iif = LOOPBACK_IFINDEX;
3337 		skb->dev = net->loopback_dev;
3338 		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3339 		err = 0;
3340 		if (IS_ERR(rt))
3341 			err = PTR_ERR(rt);
3342 		else
3343 			skb_dst_set(skb, &rt->dst);
3344 	}
3345 
3346 	if (err)
3347 		goto errout_rcu;
3348 
3349 	if (rtm->rtm_flags & RTM_F_NOTIFY)
3350 		rt->rt_flags |= RTCF_NOTIFY;
3351 
3352 	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3353 		table_id = res.table ? res.table->tb_id : 0;
3354 
3355 	/* reset skb for netlink reply msg */
3356 	skb_trim(skb, 0);
3357 	skb_reset_network_header(skb);
3358 	skb_reset_transport_header(skb);
3359 	skb_reset_mac_header(skb);
3360 
3361 	if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3362 		struct fib_rt_info fri;
3363 
3364 		if (!res.fi) {
3365 			err = fib_props[res.type].error;
3366 			if (!err)
3367 				err = -EHOSTUNREACH;
3368 			goto errout_rcu;
3369 		}
3370 		fri.fi = res.fi;
3371 		fri.tb_id = table_id;
3372 		fri.dst = res.prefix;
3373 		fri.dst_len = res.prefixlen;
3374 		fri.tos = fl4.flowi4_tos;
3375 		fri.type = rt->rt_type;
3376 		fri.offload = 0;
3377 		fri.trap = 0;
3378 		fri.offload_failed = 0;
3379 		if (res.fa_head) {
3380 			struct fib_alias *fa;
3381 
3382 			hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3383 				u8 slen = 32 - fri.dst_len;
3384 
3385 				if (fa->fa_slen == slen &&
3386 				    fa->tb_id == fri.tb_id &&
3387 				    fa->fa_tos == fri.tos &&
3388 				    fa->fa_info == res.fi &&
3389 				    fa->fa_type == fri.type) {
3390 					fri.offload = fa->offload;
3391 					fri.trap = fa->trap;
3392 					break;
3393 				}
3394 			}
3395 		}
3396 		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3397 				    nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3398 	} else {
3399 		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3400 				   NETLINK_CB(in_skb).portid,
3401 				   nlh->nlmsg_seq, 0);
3402 	}
3403 	if (err < 0)
3404 		goto errout_rcu;
3405 
3406 	rcu_read_unlock();
3407 
3408 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3409 
3410 errout_free:
3411 	return err;
3412 errout_rcu:
3413 	rcu_read_unlock();
3414 	kfree_skb(skb);
3415 	goto errout_free;
3416 }
3417 
3418 void ip_rt_multicast_event(struct in_device *in_dev)
3419 {
3420 	rt_cache_flush(dev_net(in_dev->dev));
3421 }
3422 
3423 #ifdef CONFIG_SYSCTL
3424 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3425 static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
3426 static int ip_rt_gc_elasticity __read_mostly	= 8;
3427 static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
3428 
3429 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3430 		void *buffer, size_t *lenp, loff_t *ppos)
3431 {
3432 	struct net *net = (struct net *)__ctl->extra1;
3433 
3434 	if (write) {
3435 		rt_cache_flush(net);
3436 		fnhe_genid_bump(net);
3437 		return 0;
3438 	}
3439 
3440 	return -EINVAL;
3441 }
3442 
3443 static struct ctl_table ipv4_route_table[] = {
3444 	{
3445 		.procname	= "gc_thresh",
3446 		.data		= &ipv4_dst_ops.gc_thresh,
3447 		.maxlen		= sizeof(int),
3448 		.mode		= 0644,
3449 		.proc_handler	= proc_dointvec,
3450 	},
3451 	{
3452 		.procname	= "max_size",
3453 		.data		= &ip_rt_max_size,
3454 		.maxlen		= sizeof(int),
3455 		.mode		= 0644,
3456 		.proc_handler	= proc_dointvec,
3457 	},
3458 	{
3459 		/*  Deprecated. Use gc_min_interval_ms */
3460 
3461 		.procname	= "gc_min_interval",
3462 		.data		= &ip_rt_gc_min_interval,
3463 		.maxlen		= sizeof(int),
3464 		.mode		= 0644,
3465 		.proc_handler	= proc_dointvec_jiffies,
3466 	},
3467 	{
3468 		.procname	= "gc_min_interval_ms",
3469 		.data		= &ip_rt_gc_min_interval,
3470 		.maxlen		= sizeof(int),
3471 		.mode		= 0644,
3472 		.proc_handler	= proc_dointvec_ms_jiffies,
3473 	},
3474 	{
3475 		.procname	= "gc_timeout",
3476 		.data		= &ip_rt_gc_timeout,
3477 		.maxlen		= sizeof(int),
3478 		.mode		= 0644,
3479 		.proc_handler	= proc_dointvec_jiffies,
3480 	},
3481 	{
3482 		.procname	= "gc_interval",
3483 		.data		= &ip_rt_gc_interval,
3484 		.maxlen		= sizeof(int),
3485 		.mode		= 0644,
3486 		.proc_handler	= proc_dointvec_jiffies,
3487 	},
3488 	{
3489 		.procname	= "redirect_load",
3490 		.data		= &ip_rt_redirect_load,
3491 		.maxlen		= sizeof(int),
3492 		.mode		= 0644,
3493 		.proc_handler	= proc_dointvec,
3494 	},
3495 	{
3496 		.procname	= "redirect_number",
3497 		.data		= &ip_rt_redirect_number,
3498 		.maxlen		= sizeof(int),
3499 		.mode		= 0644,
3500 		.proc_handler	= proc_dointvec,
3501 	},
3502 	{
3503 		.procname	= "redirect_silence",
3504 		.data		= &ip_rt_redirect_silence,
3505 		.maxlen		= sizeof(int),
3506 		.mode		= 0644,
3507 		.proc_handler	= proc_dointvec,
3508 	},
3509 	{
3510 		.procname	= "error_cost",
3511 		.data		= &ip_rt_error_cost,
3512 		.maxlen		= sizeof(int),
3513 		.mode		= 0644,
3514 		.proc_handler	= proc_dointvec,
3515 	},
3516 	{
3517 		.procname	= "error_burst",
3518 		.data		= &ip_rt_error_burst,
3519 		.maxlen		= sizeof(int),
3520 		.mode		= 0644,
3521 		.proc_handler	= proc_dointvec,
3522 	},
3523 	{
3524 		.procname	= "gc_elasticity",
3525 		.data		= &ip_rt_gc_elasticity,
3526 		.maxlen		= sizeof(int),
3527 		.mode		= 0644,
3528 		.proc_handler	= proc_dointvec,
3529 	},
3530 	{
3531 		.procname	= "mtu_expires",
3532 		.data		= &ip_rt_mtu_expires,
3533 		.maxlen		= sizeof(int),
3534 		.mode		= 0644,
3535 		.proc_handler	= proc_dointvec_jiffies,
3536 	},
3537 	{
3538 		.procname	= "min_pmtu",
3539 		.data		= &ip_rt_min_pmtu,
3540 		.maxlen		= sizeof(int),
3541 		.mode		= 0644,
3542 		.proc_handler	= proc_dointvec_minmax,
3543 		.extra1		= &ip_min_valid_pmtu,
3544 	},
3545 	{
3546 		.procname	= "min_adv_mss",
3547 		.data		= &ip_rt_min_advmss,
3548 		.maxlen		= sizeof(int),
3549 		.mode		= 0644,
3550 		.proc_handler	= proc_dointvec,
3551 	},
3552 	{ }
3553 };
3554 
3555 static const char ipv4_route_flush_procname[] = "flush";
3556 
3557 static struct ctl_table ipv4_route_flush_table[] = {
3558 	{
3559 		.procname	= ipv4_route_flush_procname,
3560 		.maxlen		= sizeof(int),
3561 		.mode		= 0200,
3562 		.proc_handler	= ipv4_sysctl_rtcache_flush,
3563 	},
3564 	{ },
3565 };
3566 
3567 static __net_init int sysctl_route_net_init(struct net *net)
3568 {
3569 	struct ctl_table *tbl;
3570 
3571 	tbl = ipv4_route_flush_table;
3572 	if (!net_eq(net, &init_net)) {
3573 		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3574 		if (!tbl)
3575 			goto err_dup;
3576 
3577 		/* Don't export non-whitelisted sysctls to unprivileged users */
3578 		if (net->user_ns != &init_user_ns) {
3579 			if (tbl[0].procname != ipv4_route_flush_procname)
3580 				tbl[0].procname = NULL;
3581 		}
3582 	}
3583 	tbl[0].extra1 = net;
3584 
3585 	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3586 	if (!net->ipv4.route_hdr)
3587 		goto err_reg;
3588 	return 0;
3589 
3590 err_reg:
3591 	if (tbl != ipv4_route_flush_table)
3592 		kfree(tbl);
3593 err_dup:
3594 	return -ENOMEM;
3595 }
3596 
3597 static __net_exit void sysctl_route_net_exit(struct net *net)
3598 {
3599 	struct ctl_table *tbl;
3600 
3601 	tbl = net->ipv4.route_hdr->ctl_table_arg;
3602 	unregister_net_sysctl_table(net->ipv4.route_hdr);
3603 	BUG_ON(tbl == ipv4_route_flush_table);
3604 	kfree(tbl);
3605 }
3606 
3607 static __net_initdata struct pernet_operations sysctl_route_ops = {
3608 	.init = sysctl_route_net_init,
3609 	.exit = sysctl_route_net_exit,
3610 };
3611 #endif
3612 
3613 static __net_init int rt_genid_init(struct net *net)
3614 {
3615 	atomic_set(&net->ipv4.rt_genid, 0);
3616 	atomic_set(&net->fnhe_genid, 0);
3617 	atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3618 	return 0;
3619 }
3620 
3621 static __net_initdata struct pernet_operations rt_genid_ops = {
3622 	.init = rt_genid_init,
3623 };
3624 
3625 static int __net_init ipv4_inetpeer_init(struct net *net)
3626 {
3627 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3628 
3629 	if (!bp)
3630 		return -ENOMEM;
3631 	inet_peer_base_init(bp);
3632 	net->ipv4.peers = bp;
3633 	return 0;
3634 }
3635 
3636 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3637 {
3638 	struct inet_peer_base *bp = net->ipv4.peers;
3639 
3640 	net->ipv4.peers = NULL;
3641 	inetpeer_invalidate_tree(bp);
3642 	kfree(bp);
3643 }
3644 
3645 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3646 	.init	=	ipv4_inetpeer_init,
3647 	.exit	=	ipv4_inetpeer_exit,
3648 };
3649 
3650 #ifdef CONFIG_IP_ROUTE_CLASSID
3651 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3652 #endif /* CONFIG_IP_ROUTE_CLASSID */
3653 
3654 int __init ip_rt_init(void)
3655 {
3656 	void *idents_hash;
3657 	int cpu;
3658 
3659 	/* For modern hosts, this will use 2 MB of memory */
3660 	idents_hash = alloc_large_system_hash("IP idents",
3661 					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
3662 					      0,
3663 					      16, /* one bucket per 64 KB */
3664 					      HASH_ZERO,
3665 					      NULL,
3666 					      &ip_idents_mask,
3667 					      2048,
3668 					      256*1024);
3669 
3670 	ip_idents = idents_hash;
3671 
3672 	prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3673 
3674 	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3675 
3676 	for_each_possible_cpu(cpu) {
3677 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3678 
3679 		INIT_LIST_HEAD(&ul->head);
3680 		spin_lock_init(&ul->lock);
3681 	}
3682 #ifdef CONFIG_IP_ROUTE_CLASSID
3683 	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3684 	if (!ip_rt_acct)
3685 		panic("IP: failed to allocate ip_rt_acct\n");
3686 #endif
3687 
3688 	ipv4_dst_ops.kmem_cachep =
3689 		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3690 				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3691 
3692 	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3693 
3694 	if (dst_entries_init(&ipv4_dst_ops) < 0)
3695 		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3696 
3697 	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3698 		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3699 
3700 	ipv4_dst_ops.gc_thresh = ~0;
3701 	ip_rt_max_size = INT_MAX;
3702 
3703 	devinet_init();
3704 	ip_fib_init();
3705 
3706 	if (ip_rt_proc_init())
3707 		pr_err("Unable to create route proc files\n");
3708 #ifdef CONFIG_XFRM
3709 	xfrm_init();
3710 	xfrm4_init();
3711 #endif
3712 	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3713 		      RTNL_FLAG_DOIT_UNLOCKED);
3714 
3715 #ifdef CONFIG_SYSCTL
3716 	register_pernet_subsys(&sysctl_route_ops);
3717 #endif
3718 	register_pernet_subsys(&rt_genid_ops);
3719 	register_pernet_subsys(&ipv4_inetpeer_ops);
3720 	return 0;
3721 }
3722 
3723 #ifdef CONFIG_SYSCTL
3724 /*
3725  * We really need to sanitize the damn ipv4 init order, then all
3726  * this nonsense will go away.
3727  */
3728 void __init ip_static_sysctl_init(void)
3729 {
3730 	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3731 }
3732 #endif
3733