xref: /openbmc/linux/net/ipv4/icmp.c (revision da2ef666)
1 /*
2  *	NET3:	Implementation of the ICMP protocol layer.
3  *
4  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  *	Some of the function names and the icmp unreach table for this
12  *	module were derived from [icmp.c 1.0.11 06/02/93] by
13  *	Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
14  *	Other than that this module is a complete rewrite.
15  *
16  *	Fixes:
17  *	Clemens Fruhwirth	:	introduce global icmp rate limiting
18  *					with icmp type masking ability instead
19  *					of broken per type icmp timeouts.
20  *		Mike Shaver	:	RFC1122 checks.
21  *		Alan Cox	:	Multicast ping reply as self.
22  *		Alan Cox	:	Fix atomicity lockup in ip_build_xmit
23  *					call.
24  *		Alan Cox	:	Added 216,128 byte paths to the MTU
25  *					code.
26  *		Martin Mares	:	RFC1812 checks.
27  *		Martin Mares	:	Can be configured to follow redirects
28  *					if acting as a router _without_ a
29  *					routing protocol (RFC 1812).
30  *		Martin Mares	:	Echo requests may be configured to
31  *					be ignored (RFC 1812).
32  *		Martin Mares	:	Limitation of ICMP error message
33  *					transmit rate (RFC 1812).
34  *		Martin Mares	:	TOS and Precedence set correctly
35  *					(RFC 1812).
36  *		Martin Mares	:	Now copying as much data from the
37  *					original packet as we can without
38  *					exceeding 576 bytes (RFC 1812).
39  *	Willy Konynenberg	:	Transparent proxying support.
40  *		Keith Owens	:	RFC1191 correction for 4.2BSD based
41  *					path MTU bug.
42  *		Thomas Quinot	:	ICMP Dest Unreach codes up to 15 are
43  *					valid (RFC 1812).
44  *		Andi Kleen	:	Check all packet lengths properly
45  *					and moved all kfree_skb() up to
46  *					icmp_rcv.
47  *		Andi Kleen	:	Move the rate limit bookkeeping
48  *					into the dest entry and use a token
49  *					bucket filter (thanks to ANK). Make
50  *					the rates sysctl configurable.
51  *		Yu Tianli	:	Fixed two ugly bugs in icmp_send
52  *					- IP option length was accounted wrongly
53  *					- ICMP header length was not accounted
54  *					  at all.
55  *              Tristan Greaves :       Added sysctl option to ignore bogus
56  *              			broadcast responses from broken routers.
57  *
58  * To Fix:
59  *
60  *	- Should use skb_pull() instead of all the manual checking.
61  *	  This would also greatly simply some upper layer error handlers. --AK
62  *
63  */
64 
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 
67 #include <linux/module.h>
68 #include <linux/types.h>
69 #include <linux/jiffies.h>
70 #include <linux/kernel.h>
71 #include <linux/fcntl.h>
72 #include <linux/socket.h>
73 #include <linux/in.h>
74 #include <linux/inet.h>
75 #include <linux/inetdevice.h>
76 #include <linux/netdevice.h>
77 #include <linux/string.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/slab.h>
80 #include <net/snmp.h>
81 #include <net/ip.h>
82 #include <net/route.h>
83 #include <net/protocol.h>
84 #include <net/icmp.h>
85 #include <net/tcp.h>
86 #include <net/udp.h>
87 #include <net/raw.h>
88 #include <net/ping.h>
89 #include <linux/skbuff.h>
90 #include <net/sock.h>
91 #include <linux/errno.h>
92 #include <linux/timer.h>
93 #include <linux/init.h>
94 #include <linux/uaccess.h>
95 #include <net/checksum.h>
96 #include <net/xfrm.h>
97 #include <net/inet_common.h>
98 #include <net/ip_fib.h>
99 #include <net/l3mdev.h>
100 
101 /*
102  *	Build xmit assembly blocks
103  */
104 
105 struct icmp_bxm {
106 	struct sk_buff *skb;
107 	int offset;
108 	int data_len;
109 
110 	struct {
111 		struct icmphdr icmph;
112 		__be32	       times[3];
113 	} data;
114 	int head_len;
115 	struct ip_options_data replyopts;
116 };
117 
118 /* An array of errno for error messages from dest unreach. */
119 /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
120 
121 const struct icmp_err icmp_err_convert[] = {
122 	{
123 		.errno = ENETUNREACH,	/* ICMP_NET_UNREACH */
124 		.fatal = 0,
125 	},
126 	{
127 		.errno = EHOSTUNREACH,	/* ICMP_HOST_UNREACH */
128 		.fatal = 0,
129 	},
130 	{
131 		.errno = ENOPROTOOPT	/* ICMP_PROT_UNREACH */,
132 		.fatal = 1,
133 	},
134 	{
135 		.errno = ECONNREFUSED,	/* ICMP_PORT_UNREACH */
136 		.fatal = 1,
137 	},
138 	{
139 		.errno = EMSGSIZE,	/* ICMP_FRAG_NEEDED */
140 		.fatal = 0,
141 	},
142 	{
143 		.errno = EOPNOTSUPP,	/* ICMP_SR_FAILED */
144 		.fatal = 0,
145 	},
146 	{
147 		.errno = ENETUNREACH,	/* ICMP_NET_UNKNOWN */
148 		.fatal = 1,
149 	},
150 	{
151 		.errno = EHOSTDOWN,	/* ICMP_HOST_UNKNOWN */
152 		.fatal = 1,
153 	},
154 	{
155 		.errno = ENONET,	/* ICMP_HOST_ISOLATED */
156 		.fatal = 1,
157 	},
158 	{
159 		.errno = ENETUNREACH,	/* ICMP_NET_ANO	*/
160 		.fatal = 1,
161 	},
162 	{
163 		.errno = EHOSTUNREACH,	/* ICMP_HOST_ANO */
164 		.fatal = 1,
165 	},
166 	{
167 		.errno = ENETUNREACH,	/* ICMP_NET_UNR_TOS */
168 		.fatal = 0,
169 	},
170 	{
171 		.errno = EHOSTUNREACH,	/* ICMP_HOST_UNR_TOS */
172 		.fatal = 0,
173 	},
174 	{
175 		.errno = EHOSTUNREACH,	/* ICMP_PKT_FILTERED */
176 		.fatal = 1,
177 	},
178 	{
179 		.errno = EHOSTUNREACH,	/* ICMP_PREC_VIOLATION */
180 		.fatal = 1,
181 	},
182 	{
183 		.errno = EHOSTUNREACH,	/* ICMP_PREC_CUTOFF */
184 		.fatal = 1,
185 	},
186 };
187 EXPORT_SYMBOL(icmp_err_convert);
188 
189 /*
190  *	ICMP control array. This specifies what to do with each ICMP.
191  */
192 
193 struct icmp_control {
194 	bool (*handler)(struct sk_buff *skb);
195 	short   error;		/* This ICMP is classed as an error message */
196 };
197 
198 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
199 
200 /*
201  *	The ICMP socket(s). This is the most convenient way to flow control
202  *	our ICMP output as well as maintain a clean interface throughout
203  *	all layers. All Socketless IP sends will soon be gone.
204  *
205  *	On SMP we have one ICMP socket per-cpu.
206  */
207 static struct sock *icmp_sk(struct net *net)
208 {
209 	return *this_cpu_ptr(net->ipv4.icmp_sk);
210 }
211 
212 /* Called with BH disabled */
213 static inline struct sock *icmp_xmit_lock(struct net *net)
214 {
215 	struct sock *sk;
216 
217 	sk = icmp_sk(net);
218 
219 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
220 		/* This can happen if the output path signals a
221 		 * dst_link_failure() for an outgoing ICMP packet.
222 		 */
223 		return NULL;
224 	}
225 	return sk;
226 }
227 
228 static inline void icmp_xmit_unlock(struct sock *sk)
229 {
230 	spin_unlock(&sk->sk_lock.slock);
231 }
232 
233 int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
234 int sysctl_icmp_msgs_burst __read_mostly = 50;
235 
236 static struct {
237 	spinlock_t	lock;
238 	u32		credit;
239 	u32		stamp;
240 } icmp_global = {
241 	.lock		= __SPIN_LOCK_UNLOCKED(icmp_global.lock),
242 };
243 
244 /**
245  * icmp_global_allow - Are we allowed to send one more ICMP message ?
246  *
247  * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
248  * Returns false if we reached the limit and can not send another packet.
249  * Note: called with BH disabled
250  */
251 bool icmp_global_allow(void)
252 {
253 	u32 credit, delta, incr = 0, now = (u32)jiffies;
254 	bool rc = false;
255 
256 	/* Check if token bucket is empty and cannot be refilled
257 	 * without taking the spinlock.
258 	 */
259 	if (!icmp_global.credit) {
260 		delta = min_t(u32, now - icmp_global.stamp, HZ);
261 		if (delta < HZ / 50)
262 			return false;
263 	}
264 
265 	spin_lock(&icmp_global.lock);
266 	delta = min_t(u32, now - icmp_global.stamp, HZ);
267 	if (delta >= HZ / 50) {
268 		incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
269 		if (incr)
270 			icmp_global.stamp = now;
271 	}
272 	credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
273 	if (credit) {
274 		credit--;
275 		rc = true;
276 	}
277 	icmp_global.credit = credit;
278 	spin_unlock(&icmp_global.lock);
279 	return rc;
280 }
281 EXPORT_SYMBOL(icmp_global_allow);
282 
283 static bool icmpv4_mask_allow(struct net *net, int type, int code)
284 {
285 	if (type > NR_ICMP_TYPES)
286 		return true;
287 
288 	/* Don't limit PMTU discovery. */
289 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
290 		return true;
291 
292 	/* Limit if icmp type is enabled in ratemask. */
293 	if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask))
294 		return true;
295 
296 	return false;
297 }
298 
299 static bool icmpv4_global_allow(struct net *net, int type, int code)
300 {
301 	if (icmpv4_mask_allow(net, type, code))
302 		return true;
303 
304 	if (icmp_global_allow())
305 		return true;
306 
307 	return false;
308 }
309 
310 /*
311  *	Send an ICMP frame.
312  */
313 
314 static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
315 			       struct flowi4 *fl4, int type, int code)
316 {
317 	struct dst_entry *dst = &rt->dst;
318 	struct inet_peer *peer;
319 	bool rc = true;
320 	int vif;
321 
322 	if (icmpv4_mask_allow(net, type, code))
323 		goto out;
324 
325 	/* No rate limit on loopback */
326 	if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
327 		goto out;
328 
329 	vif = l3mdev_master_ifindex(dst->dev);
330 	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
331 	rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit);
332 	if (peer)
333 		inet_putpeer(peer);
334 out:
335 	return rc;
336 }
337 
338 /*
339  *	Maintain the counters used in the SNMP statistics for outgoing ICMP
340  */
341 void icmp_out_count(struct net *net, unsigned char type)
342 {
343 	ICMPMSGOUT_INC_STATS(net, type);
344 	ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
345 }
346 
347 /*
348  *	Checksum each fragment, and on the first include the headers and final
349  *	checksum.
350  */
351 static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
352 			  struct sk_buff *skb)
353 {
354 	struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
355 	__wsum csum;
356 
357 	csum = skb_copy_and_csum_bits(icmp_param->skb,
358 				      icmp_param->offset + offset,
359 				      to, len, 0);
360 
361 	skb->csum = csum_block_add(skb->csum, csum, odd);
362 	if (icmp_pointers[icmp_param->data.icmph.type].error)
363 		nf_ct_attach(skb, icmp_param->skb);
364 	return 0;
365 }
366 
367 static void icmp_push_reply(struct icmp_bxm *icmp_param,
368 			    struct flowi4 *fl4,
369 			    struct ipcm_cookie *ipc, struct rtable **rt)
370 {
371 	struct sock *sk;
372 	struct sk_buff *skb;
373 
374 	sk = icmp_sk(dev_net((*rt)->dst.dev));
375 	if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
376 			   icmp_param->data_len+icmp_param->head_len,
377 			   icmp_param->head_len,
378 			   ipc, rt, MSG_DONTWAIT) < 0) {
379 		__ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
380 		ip_flush_pending_frames(sk);
381 	} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
382 		struct icmphdr *icmph = icmp_hdr(skb);
383 		__wsum csum = 0;
384 		struct sk_buff *skb1;
385 
386 		skb_queue_walk(&sk->sk_write_queue, skb1) {
387 			csum = csum_add(csum, skb1->csum);
388 		}
389 		csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
390 						 (char *)icmph,
391 						 icmp_param->head_len, csum);
392 		icmph->checksum = csum_fold(csum);
393 		skb->ip_summed = CHECKSUM_NONE;
394 		ip_push_pending_frames(sk, fl4);
395 	}
396 }
397 
398 /*
399  *	Driving logic for building and sending ICMP messages.
400  */
401 
402 static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
403 {
404 	struct ipcm_cookie ipc;
405 	struct rtable *rt = skb_rtable(skb);
406 	struct net *net = dev_net(rt->dst.dev);
407 	struct flowi4 fl4;
408 	struct sock *sk;
409 	struct inet_sock *inet;
410 	__be32 daddr, saddr;
411 	u32 mark = IP4_REPLY_MARK(net, skb->mark);
412 	int type = icmp_param->data.icmph.type;
413 	int code = icmp_param->data.icmph.code;
414 
415 	if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
416 		return;
417 
418 	/* Needed by both icmp_global_allow and icmp_xmit_lock */
419 	local_bh_disable();
420 
421 	/* global icmp_msgs_per_sec */
422 	if (!icmpv4_global_allow(net, type, code))
423 		goto out_bh_enable;
424 
425 	sk = icmp_xmit_lock(net);
426 	if (!sk)
427 		goto out_bh_enable;
428 	inet = inet_sk(sk);
429 
430 	icmp_param->data.icmph.checksum = 0;
431 
432 	ipcm_init(&ipc);
433 	inet->tos = ip_hdr(skb)->tos;
434 	sk->sk_mark = mark;
435 	daddr = ipc.addr = ip_hdr(skb)->saddr;
436 	saddr = fib_compute_spec_dst(skb);
437 
438 	if (icmp_param->replyopts.opt.opt.optlen) {
439 		ipc.opt = &icmp_param->replyopts.opt;
440 		if (ipc.opt->opt.srr)
441 			daddr = icmp_param->replyopts.opt.opt.faddr;
442 	}
443 	memset(&fl4, 0, sizeof(fl4));
444 	fl4.daddr = daddr;
445 	fl4.saddr = saddr;
446 	fl4.flowi4_mark = mark;
447 	fl4.flowi4_uid = sock_net_uid(net, NULL);
448 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
449 	fl4.flowi4_proto = IPPROTO_ICMP;
450 	fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
451 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
452 	rt = ip_route_output_key(net, &fl4);
453 	if (IS_ERR(rt))
454 		goto out_unlock;
455 	if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
456 		icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
457 	ip_rt_put(rt);
458 out_unlock:
459 	icmp_xmit_unlock(sk);
460 out_bh_enable:
461 	local_bh_enable();
462 }
463 
464 static struct rtable *icmp_route_lookup(struct net *net,
465 					struct flowi4 *fl4,
466 					struct sk_buff *skb_in,
467 					const struct iphdr *iph,
468 					__be32 saddr, u8 tos, u32 mark,
469 					int type, int code,
470 					struct icmp_bxm *param)
471 {
472 	struct rtable *rt, *rt2;
473 	struct flowi4 fl4_dec;
474 	int err;
475 
476 	memset(fl4, 0, sizeof(*fl4));
477 	fl4->daddr = (param->replyopts.opt.opt.srr ?
478 		      param->replyopts.opt.opt.faddr : iph->saddr);
479 	fl4->saddr = saddr;
480 	fl4->flowi4_mark = mark;
481 	fl4->flowi4_uid = sock_net_uid(net, NULL);
482 	fl4->flowi4_tos = RT_TOS(tos);
483 	fl4->flowi4_proto = IPPROTO_ICMP;
484 	fl4->fl4_icmp_type = type;
485 	fl4->fl4_icmp_code = code;
486 	fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
487 
488 	security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
489 	rt = ip_route_output_key_hash(net, fl4, skb_in);
490 	if (IS_ERR(rt))
491 		return rt;
492 
493 	/* No need to clone since we're just using its address. */
494 	rt2 = rt;
495 
496 	rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
497 					   flowi4_to_flowi(fl4), NULL, 0);
498 	if (!IS_ERR(rt)) {
499 		if (rt != rt2)
500 			return rt;
501 	} else if (PTR_ERR(rt) == -EPERM) {
502 		rt = NULL;
503 	} else
504 		return rt;
505 
506 	err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
507 	if (err)
508 		goto relookup_failed;
509 
510 	if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
511 				     fl4_dec.saddr) == RTN_LOCAL) {
512 		rt2 = __ip_route_output_key(net, &fl4_dec);
513 		if (IS_ERR(rt2))
514 			err = PTR_ERR(rt2);
515 	} else {
516 		struct flowi4 fl4_2 = {};
517 		unsigned long orefdst;
518 
519 		fl4_2.daddr = fl4_dec.saddr;
520 		rt2 = ip_route_output_key(net, &fl4_2);
521 		if (IS_ERR(rt2)) {
522 			err = PTR_ERR(rt2);
523 			goto relookup_failed;
524 		}
525 		/* Ugh! */
526 		orefdst = skb_in->_skb_refdst; /* save old refdst */
527 		skb_dst_set(skb_in, NULL);
528 		err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
529 				     RT_TOS(tos), rt2->dst.dev);
530 
531 		dst_release(&rt2->dst);
532 		rt2 = skb_rtable(skb_in);
533 		skb_in->_skb_refdst = orefdst; /* restore old refdst */
534 	}
535 
536 	if (err)
537 		goto relookup_failed;
538 
539 	rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
540 					    flowi4_to_flowi(&fl4_dec), NULL,
541 					    XFRM_LOOKUP_ICMP);
542 	if (!IS_ERR(rt2)) {
543 		dst_release(&rt->dst);
544 		memcpy(fl4, &fl4_dec, sizeof(*fl4));
545 		rt = rt2;
546 	} else if (PTR_ERR(rt2) == -EPERM) {
547 		if (rt)
548 			dst_release(&rt->dst);
549 		return rt2;
550 	} else {
551 		err = PTR_ERR(rt2);
552 		goto relookup_failed;
553 	}
554 	return rt;
555 
556 relookup_failed:
557 	if (rt)
558 		return rt;
559 	return ERR_PTR(err);
560 }
561 
562 /*
563  *	Send an ICMP message in response to a situation
564  *
565  *	RFC 1122: 3.2.2	MUST send at least the IP header and 8 bytes of header.
566  *		  MAY send more (we do).
567  *			MUST NOT change this header information.
568  *			MUST NOT reply to a multicast/broadcast IP address.
569  *			MUST NOT reply to a multicast/broadcast MAC address.
570  *			MUST reply to only the first fragment.
571  */
572 
573 void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
574 {
575 	struct iphdr *iph;
576 	int room;
577 	struct icmp_bxm icmp_param;
578 	struct rtable *rt = skb_rtable(skb_in);
579 	struct ipcm_cookie ipc;
580 	struct flowi4 fl4;
581 	__be32 saddr;
582 	u8  tos;
583 	u32 mark;
584 	struct net *net;
585 	struct sock *sk;
586 
587 	if (!rt)
588 		goto out;
589 	net = dev_net(rt->dst.dev);
590 
591 	/*
592 	 *	Find the original header. It is expected to be valid, of course.
593 	 *	Check this, icmp_send is called from the most obscure devices
594 	 *	sometimes.
595 	 */
596 	iph = ip_hdr(skb_in);
597 
598 	if ((u8 *)iph < skb_in->head ||
599 	    (skb_network_header(skb_in) + sizeof(*iph)) >
600 	    skb_tail_pointer(skb_in))
601 		goto out;
602 
603 	/*
604 	 *	No replies to physical multicast/broadcast
605 	 */
606 	if (skb_in->pkt_type != PACKET_HOST)
607 		goto out;
608 
609 	/*
610 	 *	Now check at the protocol level
611 	 */
612 	if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
613 		goto out;
614 
615 	/*
616 	 *	Only reply to fragment 0. We byte re-order the constant
617 	 *	mask for efficiency.
618 	 */
619 	if (iph->frag_off & htons(IP_OFFSET))
620 		goto out;
621 
622 	/*
623 	 *	If we send an ICMP error to an ICMP error a mess would result..
624 	 */
625 	if (icmp_pointers[type].error) {
626 		/*
627 		 *	We are an error, check if we are replying to an
628 		 *	ICMP error
629 		 */
630 		if (iph->protocol == IPPROTO_ICMP) {
631 			u8 _inner_type, *itp;
632 
633 			itp = skb_header_pointer(skb_in,
634 						 skb_network_header(skb_in) +
635 						 (iph->ihl << 2) +
636 						 offsetof(struct icmphdr,
637 							  type) -
638 						 skb_in->data,
639 						 sizeof(_inner_type),
640 						 &_inner_type);
641 			if (!itp)
642 				goto out;
643 
644 			/*
645 			 *	Assume any unknown ICMP type is an error. This
646 			 *	isn't specified by the RFC, but think about it..
647 			 */
648 			if (*itp > NR_ICMP_TYPES ||
649 			    icmp_pointers[*itp].error)
650 				goto out;
651 		}
652 	}
653 
654 	/* Needed by both icmp_global_allow and icmp_xmit_lock */
655 	local_bh_disable();
656 
657 	/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
658 	 * incoming dev is loopback.  If outgoing dev change to not be
659 	 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
660 	 */
661 	if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
662 	      !icmpv4_global_allow(net, type, code))
663 		goto out_bh_enable;
664 
665 	sk = icmp_xmit_lock(net);
666 	if (!sk)
667 		goto out_bh_enable;
668 
669 	/*
670 	 *	Construct source address and options.
671 	 */
672 
673 	saddr = iph->daddr;
674 	if (!(rt->rt_flags & RTCF_LOCAL)) {
675 		struct net_device *dev = NULL;
676 
677 		rcu_read_lock();
678 		if (rt_is_input_route(rt) &&
679 		    net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
680 			dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
681 
682 		if (dev)
683 			saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
684 		else
685 			saddr = 0;
686 		rcu_read_unlock();
687 	}
688 
689 	tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
690 					   IPTOS_PREC_INTERNETCONTROL) :
691 					  iph->tos;
692 	mark = IP4_REPLY_MARK(net, skb_in->mark);
693 
694 	if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
695 		goto out_unlock;
696 
697 
698 	/*
699 	 *	Prepare data for ICMP header.
700 	 */
701 
702 	icmp_param.data.icmph.type	 = type;
703 	icmp_param.data.icmph.code	 = code;
704 	icmp_param.data.icmph.un.gateway = info;
705 	icmp_param.data.icmph.checksum	 = 0;
706 	icmp_param.skb	  = skb_in;
707 	icmp_param.offset = skb_network_offset(skb_in);
708 	inet_sk(sk)->tos = tos;
709 	sk->sk_mark = mark;
710 	ipcm_init(&ipc);
711 	ipc.addr = iph->saddr;
712 	ipc.opt = &icmp_param.replyopts.opt;
713 
714 	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
715 			       type, code, &icmp_param);
716 	if (IS_ERR(rt))
717 		goto out_unlock;
718 
719 	/* peer icmp_ratelimit */
720 	if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
721 		goto ende;
722 
723 	/* RFC says return as much as we can without exceeding 576 bytes. */
724 
725 	room = dst_mtu(&rt->dst);
726 	if (room > 576)
727 		room = 576;
728 	room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
729 	room -= sizeof(struct icmphdr);
730 
731 	icmp_param.data_len = skb_in->len - icmp_param.offset;
732 	if (icmp_param.data_len > room)
733 		icmp_param.data_len = room;
734 	icmp_param.head_len = sizeof(struct icmphdr);
735 
736 	icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
737 ende:
738 	ip_rt_put(rt);
739 out_unlock:
740 	icmp_xmit_unlock(sk);
741 out_bh_enable:
742 	local_bh_enable();
743 out:;
744 }
745 EXPORT_SYMBOL(icmp_send);
746 
747 
748 static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
749 {
750 	const struct iphdr *iph = (const struct iphdr *) skb->data;
751 	const struct net_protocol *ipprot;
752 	int protocol = iph->protocol;
753 
754 	/* Checkin full IP header plus 8 bytes of protocol to
755 	 * avoid additional coding at protocol handlers.
756 	 */
757 	if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
758 		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
759 		return;
760 	}
761 
762 	raw_icmp_error(skb, protocol, info);
763 
764 	ipprot = rcu_dereference(inet_protos[protocol]);
765 	if (ipprot && ipprot->err_handler)
766 		ipprot->err_handler(skb, info);
767 }
768 
769 static bool icmp_tag_validation(int proto)
770 {
771 	bool ok;
772 
773 	rcu_read_lock();
774 	ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation;
775 	rcu_read_unlock();
776 	return ok;
777 }
778 
779 /*
780  *	Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and
781  *	ICMP_PARAMETERPROB.
782  */
783 
784 static bool icmp_unreach(struct sk_buff *skb)
785 {
786 	const struct iphdr *iph;
787 	struct icmphdr *icmph;
788 	struct net *net;
789 	u32 info = 0;
790 
791 	net = dev_net(skb_dst(skb)->dev);
792 
793 	/*
794 	 *	Incomplete header ?
795 	 * 	Only checks for the IP header, there should be an
796 	 *	additional check for longer headers in upper levels.
797 	 */
798 
799 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
800 		goto out_err;
801 
802 	icmph = icmp_hdr(skb);
803 	iph   = (const struct iphdr *)skb->data;
804 
805 	if (iph->ihl < 5) /* Mangled header, drop. */
806 		goto out_err;
807 
808 	switch (icmph->type) {
809 	case ICMP_DEST_UNREACH:
810 		switch (icmph->code & 15) {
811 		case ICMP_NET_UNREACH:
812 		case ICMP_HOST_UNREACH:
813 		case ICMP_PROT_UNREACH:
814 		case ICMP_PORT_UNREACH:
815 			break;
816 		case ICMP_FRAG_NEEDED:
817 			/* for documentation of the ip_no_pmtu_disc
818 			 * values please see
819 			 * Documentation/networking/ip-sysctl.txt
820 			 */
821 			switch (net->ipv4.sysctl_ip_no_pmtu_disc) {
822 			default:
823 				net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n",
824 						    &iph->daddr);
825 				break;
826 			case 2:
827 				goto out;
828 			case 3:
829 				if (!icmp_tag_validation(iph->protocol))
830 					goto out;
831 				/* fall through */
832 			case 0:
833 				info = ntohs(icmph->un.frag.mtu);
834 			}
835 			break;
836 		case ICMP_SR_FAILED:
837 			net_dbg_ratelimited("%pI4: Source Route Failed\n",
838 					    &iph->daddr);
839 			break;
840 		default:
841 			break;
842 		}
843 		if (icmph->code > NR_ICMP_UNREACH)
844 			goto out;
845 		break;
846 	case ICMP_PARAMETERPROB:
847 		info = ntohl(icmph->un.gateway) >> 24;
848 		break;
849 	case ICMP_TIME_EXCEEDED:
850 		__ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS);
851 		if (icmph->code == ICMP_EXC_FRAGTIME)
852 			goto out;
853 		break;
854 	}
855 
856 	/*
857 	 *	Throw it at our lower layers
858 	 *
859 	 *	RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
860 	 *		  header.
861 	 *	RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
862 	 *		  transport layer.
863 	 *	RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
864 	 *		  transport layer.
865 	 */
866 
867 	/*
868 	 *	Check the other end isn't violating RFC 1122. Some routers send
869 	 *	bogus responses to broadcast frames. If you see this message
870 	 *	first check your netmask matches at both ends, if it does then
871 	 *	get the other vendor to fix their kit.
872 	 */
873 
874 	if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
875 	    inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
876 		net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
877 				     &ip_hdr(skb)->saddr,
878 				     icmph->type, icmph->code,
879 				     &iph->daddr, skb->dev->name);
880 		goto out;
881 	}
882 
883 	icmp_socket_deliver(skb, info);
884 
885 out:
886 	return true;
887 out_err:
888 	__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
889 	return false;
890 }
891 
892 
893 /*
894  *	Handle ICMP_REDIRECT.
895  */
896 
897 static bool icmp_redirect(struct sk_buff *skb)
898 {
899 	if (skb->len < sizeof(struct iphdr)) {
900 		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
901 		return false;
902 	}
903 
904 	if (!pskb_may_pull(skb, sizeof(struct iphdr))) {
905 		/* there aught to be a stat */
906 		return false;
907 	}
908 
909 	icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
910 	return true;
911 }
912 
913 /*
914  *	Handle ICMP_ECHO ("ping") requests.
915  *
916  *	RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
917  *		  requests.
918  *	RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
919  *		  included in the reply.
920  *	RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
921  *		  echo requests, MUST have default=NOT.
922  *	See also WRT handling of options once they are done and working.
923  */
924 
925 static bool icmp_echo(struct sk_buff *skb)
926 {
927 	struct net *net;
928 
929 	net = dev_net(skb_dst(skb)->dev);
930 	if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
931 		struct icmp_bxm icmp_param;
932 
933 		icmp_param.data.icmph	   = *icmp_hdr(skb);
934 		icmp_param.data.icmph.type = ICMP_ECHOREPLY;
935 		icmp_param.skb		   = skb;
936 		icmp_param.offset	   = 0;
937 		icmp_param.data_len	   = skb->len;
938 		icmp_param.head_len	   = sizeof(struct icmphdr);
939 		icmp_reply(&icmp_param, skb);
940 	}
941 	/* should there be an ICMP stat for ignored echos? */
942 	return true;
943 }
944 
945 /*
946  *	Handle ICMP Timestamp requests.
947  *	RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
948  *		  SHOULD be in the kernel for minimum random latency.
949  *		  MUST be accurate to a few minutes.
950  *		  MUST be updated at least at 15Hz.
951  */
952 static bool icmp_timestamp(struct sk_buff *skb)
953 {
954 	struct icmp_bxm icmp_param;
955 	/*
956 	 *	Too short.
957 	 */
958 	if (skb->len < 4)
959 		goto out_err;
960 
961 	/*
962 	 *	Fill in the current time as ms since midnight UT:
963 	 */
964 	icmp_param.data.times[1] = inet_current_timestamp();
965 	icmp_param.data.times[2] = icmp_param.data.times[1];
966 
967 	BUG_ON(skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4));
968 
969 	icmp_param.data.icmph	   = *icmp_hdr(skb);
970 	icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
971 	icmp_param.data.icmph.code = 0;
972 	icmp_param.skb		   = skb;
973 	icmp_param.offset	   = 0;
974 	icmp_param.data_len	   = 0;
975 	icmp_param.head_len	   = sizeof(struct icmphdr) + 12;
976 	icmp_reply(&icmp_param, skb);
977 	return true;
978 
979 out_err:
980 	__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
981 	return false;
982 }
983 
984 static bool icmp_discard(struct sk_buff *skb)
985 {
986 	/* pretend it was a success */
987 	return true;
988 }
989 
990 /*
991  *	Deal with incoming ICMP packets.
992  */
993 int icmp_rcv(struct sk_buff *skb)
994 {
995 	struct icmphdr *icmph;
996 	struct rtable *rt = skb_rtable(skb);
997 	struct net *net = dev_net(rt->dst.dev);
998 	bool success;
999 
1000 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1001 		struct sec_path *sp = skb_sec_path(skb);
1002 		int nh;
1003 
1004 		if (!(sp && sp->xvec[sp->len - 1]->props.flags &
1005 				 XFRM_STATE_ICMP))
1006 			goto drop;
1007 
1008 		if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
1009 			goto drop;
1010 
1011 		nh = skb_network_offset(skb);
1012 		skb_set_network_header(skb, sizeof(*icmph));
1013 
1014 		if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
1015 			goto drop;
1016 
1017 		skb_set_network_header(skb, nh);
1018 	}
1019 
1020 	__ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
1021 
1022 	if (skb_checksum_simple_validate(skb))
1023 		goto csum_error;
1024 
1025 	if (!pskb_pull(skb, sizeof(*icmph)))
1026 		goto error;
1027 
1028 	icmph = icmp_hdr(skb);
1029 
1030 	ICMPMSGIN_INC_STATS(net, icmph->type);
1031 	/*
1032 	 *	18 is the highest 'known' ICMP type. Anything else is a mystery
1033 	 *
1034 	 *	RFC 1122: 3.2.2  Unknown ICMP messages types MUST be silently
1035 	 *		  discarded.
1036 	 */
1037 	if (icmph->type > NR_ICMP_TYPES)
1038 		goto error;
1039 
1040 
1041 	/*
1042 	 *	Parse the ICMP message
1043 	 */
1044 
1045 	if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1046 		/*
1047 		 *	RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
1048 		 *	  silently ignored (we let user decide with a sysctl).
1049 		 *	RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
1050 		 *	  discarded if to broadcast/multicast.
1051 		 */
1052 		if ((icmph->type == ICMP_ECHO ||
1053 		     icmph->type == ICMP_TIMESTAMP) &&
1054 		    net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
1055 			goto error;
1056 		}
1057 		if (icmph->type != ICMP_ECHO &&
1058 		    icmph->type != ICMP_TIMESTAMP &&
1059 		    icmph->type != ICMP_ADDRESS &&
1060 		    icmph->type != ICMP_ADDRESSREPLY) {
1061 			goto error;
1062 		}
1063 	}
1064 
1065 	success = icmp_pointers[icmph->type].handler(skb);
1066 
1067 	if (success)  {
1068 		consume_skb(skb);
1069 		return NET_RX_SUCCESS;
1070 	}
1071 
1072 drop:
1073 	kfree_skb(skb);
1074 	return NET_RX_DROP;
1075 csum_error:
1076 	__ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
1077 error:
1078 	__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
1079 	goto drop;
1080 }
1081 
1082 void icmp_err(struct sk_buff *skb, u32 info)
1083 {
1084 	struct iphdr *iph = (struct iphdr *)skb->data;
1085 	int offset = iph->ihl<<2;
1086 	struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
1087 	int type = icmp_hdr(skb)->type;
1088 	int code = icmp_hdr(skb)->code;
1089 	struct net *net = dev_net(skb->dev);
1090 
1091 	/*
1092 	 * Use ping_err to handle all icmp errors except those
1093 	 * triggered by ICMP_ECHOREPLY which sent from kernel.
1094 	 */
1095 	if (icmph->type != ICMP_ECHOREPLY) {
1096 		ping_err(skb, offset, info);
1097 		return;
1098 	}
1099 
1100 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
1101 		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0);
1102 	else if (type == ICMP_REDIRECT)
1103 		ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0);
1104 }
1105 
1106 /*
1107  *	This table is the definition of how we handle ICMP.
1108  */
1109 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1110 	[ICMP_ECHOREPLY] = {
1111 		.handler = ping_rcv,
1112 	},
1113 	[1] = {
1114 		.handler = icmp_discard,
1115 		.error = 1,
1116 	},
1117 	[2] = {
1118 		.handler = icmp_discard,
1119 		.error = 1,
1120 	},
1121 	[ICMP_DEST_UNREACH] = {
1122 		.handler = icmp_unreach,
1123 		.error = 1,
1124 	},
1125 	[ICMP_SOURCE_QUENCH] = {
1126 		.handler = icmp_unreach,
1127 		.error = 1,
1128 	},
1129 	[ICMP_REDIRECT] = {
1130 		.handler = icmp_redirect,
1131 		.error = 1,
1132 	},
1133 	[6] = {
1134 		.handler = icmp_discard,
1135 		.error = 1,
1136 	},
1137 	[7] = {
1138 		.handler = icmp_discard,
1139 		.error = 1,
1140 	},
1141 	[ICMP_ECHO] = {
1142 		.handler = icmp_echo,
1143 	},
1144 	[9] = {
1145 		.handler = icmp_discard,
1146 		.error = 1,
1147 	},
1148 	[10] = {
1149 		.handler = icmp_discard,
1150 		.error = 1,
1151 	},
1152 	[ICMP_TIME_EXCEEDED] = {
1153 		.handler = icmp_unreach,
1154 		.error = 1,
1155 	},
1156 	[ICMP_PARAMETERPROB] = {
1157 		.handler = icmp_unreach,
1158 		.error = 1,
1159 	},
1160 	[ICMP_TIMESTAMP] = {
1161 		.handler = icmp_timestamp,
1162 	},
1163 	[ICMP_TIMESTAMPREPLY] = {
1164 		.handler = icmp_discard,
1165 	},
1166 	[ICMP_INFO_REQUEST] = {
1167 		.handler = icmp_discard,
1168 	},
1169 	[ICMP_INFO_REPLY] = {
1170 		.handler = icmp_discard,
1171 	},
1172 	[ICMP_ADDRESS] = {
1173 		.handler = icmp_discard,
1174 	},
1175 	[ICMP_ADDRESSREPLY] = {
1176 		.handler = icmp_discard,
1177 	},
1178 };
1179 
1180 static void __net_exit icmp_sk_exit(struct net *net)
1181 {
1182 	int i;
1183 
1184 	for_each_possible_cpu(i)
1185 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
1186 	free_percpu(net->ipv4.icmp_sk);
1187 	net->ipv4.icmp_sk = NULL;
1188 }
1189 
1190 static int __net_init icmp_sk_init(struct net *net)
1191 {
1192 	int i, err;
1193 
1194 	net->ipv4.icmp_sk = alloc_percpu(struct sock *);
1195 	if (!net->ipv4.icmp_sk)
1196 		return -ENOMEM;
1197 
1198 	for_each_possible_cpu(i) {
1199 		struct sock *sk;
1200 
1201 		err = inet_ctl_sock_create(&sk, PF_INET,
1202 					   SOCK_RAW, IPPROTO_ICMP, net);
1203 		if (err < 0)
1204 			goto fail;
1205 
1206 		*per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
1207 
1208 		/* Enough space for 2 64K ICMP packets, including
1209 		 * sk_buff/skb_shared_info struct overhead.
1210 		 */
1211 		sk->sk_sndbuf =	2 * SKB_TRUESIZE(64 * 1024);
1212 
1213 		/*
1214 		 * Speedup sock_wfree()
1215 		 */
1216 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1217 		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
1218 	}
1219 
1220 	/* Control parameters for ECHO replies. */
1221 	net->ipv4.sysctl_icmp_echo_ignore_all = 0;
1222 	net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
1223 
1224 	/* Control parameter - ignore bogus broadcast responses? */
1225 	net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
1226 
1227 	/*
1228 	 * 	Configurable global rate limit.
1229 	 *
1230 	 *	ratelimit defines tokens/packet consumed for dst->rate_token
1231 	 *	bucket ratemask defines which icmp types are ratelimited by
1232 	 *	setting	it's bit position.
1233 	 *
1234 	 *	default:
1235 	 *	dest unreachable (3), source quench (4),
1236 	 *	time exceeded (11), parameter problem (12)
1237 	 */
1238 
1239 	net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
1240 	net->ipv4.sysctl_icmp_ratemask = 0x1818;
1241 	net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
1242 
1243 	return 0;
1244 
1245 fail:
1246 	for_each_possible_cpu(i)
1247 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
1248 	free_percpu(net->ipv4.icmp_sk);
1249 	return err;
1250 }
1251 
1252 static struct pernet_operations __net_initdata icmp_sk_ops = {
1253        .init = icmp_sk_init,
1254        .exit = icmp_sk_exit,
1255 };
1256 
1257 int __init icmp_init(void)
1258 {
1259 	return register_pernet_subsys(&icmp_sk_ops);
1260 }
1261