1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * NET3: Implementation of the ICMP protocol layer.
4 *
5 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 *
7 * Some of the function names and the icmp unreach table for this
8 * module were derived from [icmp.c 1.0.11 06/02/93] by
9 * Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
10 * Other than that this module is a complete rewrite.
11 *
12 * Fixes:
13 * Clemens Fruhwirth : introduce global icmp rate limiting
14 * with icmp type masking ability instead
15 * of broken per type icmp timeouts.
16 * Mike Shaver : RFC1122 checks.
17 * Alan Cox : Multicast ping reply as self.
18 * Alan Cox : Fix atomicity lockup in ip_build_xmit
19 * call.
20 * Alan Cox : Added 216,128 byte paths to the MTU
21 * code.
22 * Martin Mares : RFC1812 checks.
23 * Martin Mares : Can be configured to follow redirects
24 * if acting as a router _without_ a
25 * routing protocol (RFC 1812).
26 * Martin Mares : Echo requests may be configured to
27 * be ignored (RFC 1812).
28 * Martin Mares : Limitation of ICMP error message
29 * transmit rate (RFC 1812).
30 * Martin Mares : TOS and Precedence set correctly
31 * (RFC 1812).
32 * Martin Mares : Now copying as much data from the
33 * original packet as we can without
34 * exceeding 576 bytes (RFC 1812).
35 * Willy Konynenberg : Transparent proxying support.
36 * Keith Owens : RFC1191 correction for 4.2BSD based
37 * path MTU bug.
38 * Thomas Quinot : ICMP Dest Unreach codes up to 15 are
39 * valid (RFC 1812).
40 * Andi Kleen : Check all packet lengths properly
41 * and moved all kfree_skb() up to
42 * icmp_rcv.
43 * Andi Kleen : Move the rate limit bookkeeping
44 * into the dest entry and use a token
45 * bucket filter (thanks to ANK). Make
46 * the rates sysctl configurable.
47 * Yu Tianli : Fixed two ugly bugs in icmp_send
48 * - IP option length was accounted wrongly
49 * - ICMP header length was not accounted
50 * at all.
51 * Tristan Greaves : Added sysctl option to ignore bogus
52 * broadcast responses from broken routers.
53 *
54 * To Fix:
55 *
56 * - Should use skb_pull() instead of all the manual checking.
57 * This would also greatly simply some upper layer error handlers. --AK
58 */
59
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
62 #include <linux/module.h>
63 #include <linux/types.h>
64 #include <linux/jiffies.h>
65 #include <linux/kernel.h>
66 #include <linux/fcntl.h>
67 #include <linux/socket.h>
68 #include <linux/in.h>
69 #include <linux/inet.h>
70 #include <linux/inetdevice.h>
71 #include <linux/netdevice.h>
72 #include <linux/string.h>
73 #include <linux/netfilter_ipv4.h>
74 #include <linux/slab.h>
75 #include <net/snmp.h>
76 #include <net/ip.h>
77 #include <net/route.h>
78 #include <net/protocol.h>
79 #include <net/icmp.h>
80 #include <net/tcp.h>
81 #include <net/udp.h>
82 #include <net/raw.h>
83 #include <net/ping.h>
84 #include <linux/skbuff.h>
85 #include <net/sock.h>
86 #include <linux/errno.h>
87 #include <linux/timer.h>
88 #include <linux/init.h>
89 #include <linux/uaccess.h>
90 #include <net/checksum.h>
91 #include <net/xfrm.h>
92 #include <net/inet_common.h>
93 #include <net/ip_fib.h>
94 #include <net/l3mdev.h>
95 #include <net/addrconf.h>
96
97 /*
98 * Build xmit assembly blocks
99 */
100
101 struct icmp_bxm {
102 struct sk_buff *skb;
103 int offset;
104 int data_len;
105
106 struct {
107 struct icmphdr icmph;
108 __be32 times[3];
109 } data;
110 int head_len;
111 struct ip_options_data replyopts;
112 };
113
114 /* An array of errno for error messages from dest unreach. */
115 /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
116
117 const struct icmp_err icmp_err_convert[] = {
118 {
119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */
120 .fatal = 0,
121 },
122 {
123 .errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */
124 .fatal = 0,
125 },
126 {
127 .errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */,
128 .fatal = 1,
129 },
130 {
131 .errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */
132 .fatal = 1,
133 },
134 {
135 .errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */
136 .fatal = 0,
137 },
138 {
139 .errno = EOPNOTSUPP, /* ICMP_SR_FAILED */
140 .fatal = 0,
141 },
142 {
143 .errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */
144 .fatal = 1,
145 },
146 {
147 .errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */
148 .fatal = 1,
149 },
150 {
151 .errno = ENONET, /* ICMP_HOST_ISOLATED */
152 .fatal = 1,
153 },
154 {
155 .errno = ENETUNREACH, /* ICMP_NET_ANO */
156 .fatal = 1,
157 },
158 {
159 .errno = EHOSTUNREACH, /* ICMP_HOST_ANO */
160 .fatal = 1,
161 },
162 {
163 .errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */
164 .fatal = 0,
165 },
166 {
167 .errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */
168 .fatal = 0,
169 },
170 {
171 .errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */
172 .fatal = 1,
173 },
174 {
175 .errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */
176 .fatal = 1,
177 },
178 {
179 .errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */
180 .fatal = 1,
181 },
182 };
183 EXPORT_SYMBOL(icmp_err_convert);
184
185 /*
186 * ICMP control array. This specifies what to do with each ICMP.
187 */
188
189 struct icmp_control {
190 enum skb_drop_reason (*handler)(struct sk_buff *skb);
191 short error; /* This ICMP is classed as an error message */
192 };
193
194 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
195
196 static DEFINE_PER_CPU(struct sock *, ipv4_icmp_sk);
197
198 /* Called with BH disabled */
icmp_xmit_lock(struct net * net)199 static inline struct sock *icmp_xmit_lock(struct net *net)
200 {
201 struct sock *sk;
202
203 sk = this_cpu_read(ipv4_icmp_sk);
204
205 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
206 /* This can happen if the output path signals a
207 * dst_link_failure() for an outgoing ICMP packet.
208 */
209 return NULL;
210 }
211 sock_net_set(sk, net);
212 return sk;
213 }
214
icmp_xmit_unlock(struct sock * sk)215 static inline void icmp_xmit_unlock(struct sock *sk)
216 {
217 sock_net_set(sk, &init_net);
218 spin_unlock(&sk->sk_lock.slock);
219 }
220
221 int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
222 int sysctl_icmp_msgs_burst __read_mostly = 50;
223
224 static struct {
225 atomic_t credit;
226 u32 stamp;
227 } icmp_global;
228
229 /**
230 * icmp_global_allow - Are we allowed to send one more ICMP message ?
231 *
232 * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
233 * Returns false if we reached the limit and can not send another packet.
234 * Works in tandem with icmp_global_consume().
235 */
icmp_global_allow(void)236 bool icmp_global_allow(void)
237 {
238 u32 delta, now, oldstamp;
239 int incr, new, old;
240
241 /* Note: many cpus could find this condition true.
242 * Then later icmp_global_consume() could consume more credits,
243 * this is an acceptable race.
244 */
245 if (atomic_read(&icmp_global.credit) > 0)
246 return true;
247
248 now = jiffies;
249 oldstamp = READ_ONCE(icmp_global.stamp);
250 delta = min_t(u32, now - oldstamp, HZ);
251 if (delta < HZ / 50)
252 return false;
253
254 incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;
255 if (!incr)
256 return false;
257
258 if (cmpxchg(&icmp_global.stamp, oldstamp, now) == oldstamp) {
259 old = atomic_read(&icmp_global.credit);
260 do {
261 new = min(old + incr, READ_ONCE(sysctl_icmp_msgs_burst));
262 } while (!atomic_try_cmpxchg(&icmp_global.credit, &old, new));
263 }
264 return true;
265 }
266 EXPORT_SYMBOL(icmp_global_allow);
267
icmp_global_consume(void)268 void icmp_global_consume(void)
269 {
270 int credits = get_random_u32_below(3);
271
272 /* Note: this might make icmp_global.credit negative. */
273 if (credits)
274 atomic_sub(credits, &icmp_global.credit);
275 }
276 EXPORT_SYMBOL(icmp_global_consume);
277
icmpv4_mask_allow(struct net * net,int type,int code)278 static bool icmpv4_mask_allow(struct net *net, int type, int code)
279 {
280 if (type > NR_ICMP_TYPES)
281 return true;
282
283 /* Don't limit PMTU discovery. */
284 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
285 return true;
286
287 /* Limit if icmp type is enabled in ratemask. */
288 if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask)))
289 return true;
290
291 return false;
292 }
293
icmpv4_global_allow(struct net * net,int type,int code,bool * apply_ratelimit)294 static bool icmpv4_global_allow(struct net *net, int type, int code,
295 bool *apply_ratelimit)
296 {
297 if (icmpv4_mask_allow(net, type, code))
298 return true;
299
300 if (icmp_global_allow()) {
301 *apply_ratelimit = true;
302 return true;
303 }
304 __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
305 return false;
306 }
307
308 /*
309 * Send an ICMP frame.
310 */
311
icmpv4_xrlim_allow(struct net * net,struct rtable * rt,struct flowi4 * fl4,int type,int code,bool apply_ratelimit)312 static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
313 struct flowi4 *fl4, int type, int code,
314 bool apply_ratelimit)
315 {
316 struct dst_entry *dst = &rt->dst;
317 struct inet_peer *peer;
318 bool rc = true;
319 int vif;
320
321 if (!apply_ratelimit)
322 return true;
323
324 /* No rate limit on loopback */
325 if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
326 goto out;
327
328 vif = l3mdev_master_ifindex(dst->dev);
329 peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
330 rc = inet_peer_xrlim_allow(peer,
331 READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
332 if (peer)
333 inet_putpeer(peer);
334 out:
335 if (!rc)
336 __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
337 else
338 icmp_global_consume();
339 return rc;
340 }
341
342 /*
343 * Maintain the counters used in the SNMP statistics for outgoing ICMP
344 */
icmp_out_count(struct net * net,unsigned char type)345 void icmp_out_count(struct net *net, unsigned char type)
346 {
347 ICMPMSGOUT_INC_STATS(net, type);
348 ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
349 }
350
351 /*
352 * Checksum each fragment, and on the first include the headers and final
353 * checksum.
354 */
icmp_glue_bits(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)355 static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
356 struct sk_buff *skb)
357 {
358 struct icmp_bxm *icmp_param = from;
359 __wsum csum;
360
361 csum = skb_copy_and_csum_bits(icmp_param->skb,
362 icmp_param->offset + offset,
363 to, len);
364
365 skb->csum = csum_block_add(skb->csum, csum, odd);
366 if (icmp_pointers[icmp_param->data.icmph.type].error)
367 nf_ct_attach(skb, icmp_param->skb);
368 return 0;
369 }
370
icmp_push_reply(struct sock * sk,struct icmp_bxm * icmp_param,struct flowi4 * fl4,struct ipcm_cookie * ipc,struct rtable ** rt)371 static void icmp_push_reply(struct sock *sk,
372 struct icmp_bxm *icmp_param,
373 struct flowi4 *fl4,
374 struct ipcm_cookie *ipc, struct rtable **rt)
375 {
376 struct sk_buff *skb;
377
378 if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
379 icmp_param->data_len+icmp_param->head_len,
380 icmp_param->head_len,
381 ipc, rt, MSG_DONTWAIT) < 0) {
382 __ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
383 ip_flush_pending_frames(sk);
384 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
385 struct icmphdr *icmph = icmp_hdr(skb);
386 __wsum csum;
387 struct sk_buff *skb1;
388
389 csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
390 (char *)icmph,
391 icmp_param->head_len);
392 skb_queue_walk(&sk->sk_write_queue, skb1) {
393 csum = csum_add(csum, skb1->csum);
394 }
395 icmph->checksum = csum_fold(csum);
396 skb->ip_summed = CHECKSUM_NONE;
397 ip_push_pending_frames(sk, fl4);
398 }
399 }
400
401 /*
402 * Driving logic for building and sending ICMP messages.
403 */
404
icmp_reply(struct icmp_bxm * icmp_param,struct sk_buff * skb)405 static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
406 {
407 struct ipcm_cookie ipc;
408 struct rtable *rt = skb_rtable(skb);
409 struct net *net = dev_net(rt->dst.dev);
410 bool apply_ratelimit = false;
411 struct flowi4 fl4;
412 struct sock *sk;
413 struct inet_sock *inet;
414 __be32 daddr, saddr;
415 u32 mark = IP4_REPLY_MARK(net, skb->mark);
416 int type = icmp_param->data.icmph.type;
417 int code = icmp_param->data.icmph.code;
418
419 if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
420 return;
421
422 /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
423 local_bh_disable();
424
425 /* is global icmp_msgs_per_sec exhausted ? */
426 if (!icmpv4_global_allow(net, type, code, &apply_ratelimit))
427 goto out_bh_enable;
428
429 sk = icmp_xmit_lock(net);
430 if (!sk)
431 goto out_bh_enable;
432 inet = inet_sk(sk);
433
434 icmp_param->data.icmph.checksum = 0;
435
436 ipcm_init(&ipc);
437 inet->tos = ip_hdr(skb)->tos;
438 ipc.sockc.mark = mark;
439 daddr = ipc.addr = ip_hdr(skb)->saddr;
440 saddr = fib_compute_spec_dst(skb);
441
442 if (icmp_param->replyopts.opt.opt.optlen) {
443 ipc.opt = &icmp_param->replyopts.opt;
444 if (ipc.opt->opt.srr)
445 daddr = icmp_param->replyopts.opt.opt.faddr;
446 }
447 memset(&fl4, 0, sizeof(fl4));
448 fl4.daddr = daddr;
449 fl4.saddr = saddr;
450 fl4.flowi4_mark = mark;
451 fl4.flowi4_uid = sock_net_uid(net, NULL);
452 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
453 fl4.flowi4_proto = IPPROTO_ICMP;
454 fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
455 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
456 rt = ip_route_output_key(net, &fl4);
457 if (IS_ERR(rt))
458 goto out_unlock;
459 if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
460 icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt);
461 ip_rt_put(rt);
462 out_unlock:
463 icmp_xmit_unlock(sk);
464 out_bh_enable:
465 local_bh_enable();
466 }
467
468 /*
469 * The device used for looking up which routing table to use for sending an ICMP
470 * error is preferably the source whenever it is set, which should ensure the
471 * icmp error can be sent to the source host, else lookup using the routing
472 * table of the destination device, else use the main routing table (index 0).
473 */
icmp_get_route_lookup_dev(struct sk_buff * skb)474 static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
475 {
476 struct net_device *route_lookup_dev = NULL;
477
478 if (skb->dev)
479 route_lookup_dev = skb->dev;
480 else if (skb_dst(skb))
481 route_lookup_dev = skb_dst(skb)->dev;
482 return route_lookup_dev;
483 }
484
icmp_route_lookup(struct net * net,struct flowi4 * fl4,struct sk_buff * skb_in,const struct iphdr * iph,__be32 saddr,u8 tos,u32 mark,int type,int code,struct icmp_bxm * param)485 static struct rtable *icmp_route_lookup(struct net *net,
486 struct flowi4 *fl4,
487 struct sk_buff *skb_in,
488 const struct iphdr *iph,
489 __be32 saddr, u8 tos, u32 mark,
490 int type, int code,
491 struct icmp_bxm *param)
492 {
493 struct net_device *route_lookup_dev;
494 struct rtable *rt, *rt2;
495 struct flowi4 fl4_dec;
496 int err;
497
498 memset(fl4, 0, sizeof(*fl4));
499 fl4->daddr = (param->replyopts.opt.opt.srr ?
500 param->replyopts.opt.opt.faddr : iph->saddr);
501 fl4->saddr = saddr;
502 fl4->flowi4_mark = mark;
503 fl4->flowi4_uid = sock_net_uid(net, NULL);
504 fl4->flowi4_tos = RT_TOS(tos);
505 fl4->flowi4_proto = IPPROTO_ICMP;
506 fl4->fl4_icmp_type = type;
507 fl4->fl4_icmp_code = code;
508 route_lookup_dev = icmp_get_route_lookup_dev(skb_in);
509 fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev);
510
511 security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4));
512 rt = ip_route_output_key_hash(net, fl4, skb_in);
513 if (IS_ERR(rt))
514 return rt;
515
516 /* No need to clone since we're just using its address. */
517 rt2 = rt;
518
519 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
520 flowi4_to_flowi(fl4), NULL, 0);
521 if (!IS_ERR(rt)) {
522 if (rt != rt2)
523 return rt;
524 } else if (PTR_ERR(rt) == -EPERM) {
525 rt = NULL;
526 } else
527 return rt;
528
529 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
530 if (err)
531 goto relookup_failed;
532
533 if (inet_addr_type_dev_table(net, route_lookup_dev,
534 fl4_dec.saddr) == RTN_LOCAL) {
535 rt2 = __ip_route_output_key(net, &fl4_dec);
536 if (IS_ERR(rt2))
537 err = PTR_ERR(rt2);
538 } else {
539 struct flowi4 fl4_2 = {};
540 unsigned long orefdst;
541
542 fl4_2.daddr = fl4_dec.saddr;
543 rt2 = ip_route_output_key(net, &fl4_2);
544 if (IS_ERR(rt2)) {
545 err = PTR_ERR(rt2);
546 goto relookup_failed;
547 }
548 /* Ugh! */
549 orefdst = skb_in->_skb_refdst; /* save old refdst */
550 skb_dst_set(skb_in, NULL);
551 err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
552 RT_TOS(tos), rt2->dst.dev);
553
554 dst_release(&rt2->dst);
555 rt2 = skb_rtable(skb_in);
556 skb_in->_skb_refdst = orefdst; /* restore old refdst */
557 }
558
559 if (err)
560 goto relookup_failed;
561
562 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
563 flowi4_to_flowi(&fl4_dec), NULL,
564 XFRM_LOOKUP_ICMP);
565 if (!IS_ERR(rt2)) {
566 dst_release(&rt->dst);
567 memcpy(fl4, &fl4_dec, sizeof(*fl4));
568 rt = rt2;
569 } else if (PTR_ERR(rt2) == -EPERM) {
570 if (rt)
571 dst_release(&rt->dst);
572 return rt2;
573 } else {
574 err = PTR_ERR(rt2);
575 goto relookup_failed;
576 }
577 return rt;
578
579 relookup_failed:
580 if (rt)
581 return rt;
582 return ERR_PTR(err);
583 }
584
585 /*
586 * Send an ICMP message in response to a situation
587 *
588 * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
589 * MAY send more (we do).
590 * MUST NOT change this header information.
591 * MUST NOT reply to a multicast/broadcast IP address.
592 * MUST NOT reply to a multicast/broadcast MAC address.
593 * MUST reply to only the first fragment.
594 */
595
__icmp_send(struct sk_buff * skb_in,int type,int code,__be32 info,const struct ip_options * opt)596 void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
597 const struct ip_options *opt)
598 {
599 struct iphdr *iph;
600 int room;
601 struct icmp_bxm icmp_param;
602 struct rtable *rt = skb_rtable(skb_in);
603 bool apply_ratelimit = false;
604 struct ipcm_cookie ipc;
605 struct flowi4 fl4;
606 __be32 saddr;
607 u8 tos;
608 u32 mark;
609 struct net *net;
610 struct sock *sk;
611
612 if (!rt)
613 goto out;
614
615 if (rt->dst.dev)
616 net = dev_net(rt->dst.dev);
617 else if (skb_in->dev)
618 net = dev_net(skb_in->dev);
619 else
620 goto out;
621
622 /*
623 * Find the original header. It is expected to be valid, of course.
624 * Check this, icmp_send is called from the most obscure devices
625 * sometimes.
626 */
627 iph = ip_hdr(skb_in);
628
629 if ((u8 *)iph < skb_in->head ||
630 (skb_network_header(skb_in) + sizeof(*iph)) >
631 skb_tail_pointer(skb_in))
632 goto out;
633
634 /*
635 * No replies to physical multicast/broadcast
636 */
637 if (skb_in->pkt_type != PACKET_HOST)
638 goto out;
639
640 /*
641 * Now check at the protocol level
642 */
643 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
644 goto out;
645
646 /*
647 * Only reply to fragment 0. We byte re-order the constant
648 * mask for efficiency.
649 */
650 if (iph->frag_off & htons(IP_OFFSET))
651 goto out;
652
653 /*
654 * If we send an ICMP error to an ICMP error a mess would result..
655 */
656 if (icmp_pointers[type].error) {
657 /*
658 * We are an error, check if we are replying to an
659 * ICMP error
660 */
661 if (iph->protocol == IPPROTO_ICMP) {
662 u8 _inner_type, *itp;
663
664 itp = skb_header_pointer(skb_in,
665 skb_network_header(skb_in) +
666 (iph->ihl << 2) +
667 offsetof(struct icmphdr,
668 type) -
669 skb_in->data,
670 sizeof(_inner_type),
671 &_inner_type);
672 if (!itp)
673 goto out;
674
675 /*
676 * Assume any unknown ICMP type is an error. This
677 * isn't specified by the RFC, but think about it..
678 */
679 if (*itp > NR_ICMP_TYPES ||
680 icmp_pointers[*itp].error)
681 goto out;
682 }
683 }
684
685 /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
686 local_bh_disable();
687
688 /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
689 * incoming dev is loopback. If outgoing dev change to not be
690 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
691 */
692 if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
693 !icmpv4_global_allow(net, type, code, &apply_ratelimit))
694 goto out_bh_enable;
695
696 sk = icmp_xmit_lock(net);
697 if (!sk)
698 goto out_bh_enable;
699
700 /*
701 * Construct source address and options.
702 */
703
704 saddr = iph->daddr;
705 if (!(rt->rt_flags & RTCF_LOCAL)) {
706 struct net_device *dev = NULL;
707
708 rcu_read_lock();
709 if (rt_is_input_route(rt) &&
710 READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr))
711 dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
712
713 if (dev)
714 saddr = inet_select_addr(dev, iph->saddr,
715 RT_SCOPE_LINK);
716 else
717 saddr = 0;
718 rcu_read_unlock();
719 }
720
721 tos = icmp_pointers[type].error ? (RT_TOS(iph->tos) |
722 IPTOS_PREC_INTERNETCONTROL) :
723 iph->tos;
724 mark = IP4_REPLY_MARK(net, skb_in->mark);
725
726 if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
727 goto out_unlock;
728
729
730 /*
731 * Prepare data for ICMP header.
732 */
733
734 icmp_param.data.icmph.type = type;
735 icmp_param.data.icmph.code = code;
736 icmp_param.data.icmph.un.gateway = info;
737 icmp_param.data.icmph.checksum = 0;
738 icmp_param.skb = skb_in;
739 icmp_param.offset = skb_network_offset(skb_in);
740 inet_sk(sk)->tos = tos;
741 ipcm_init(&ipc);
742 ipc.addr = iph->saddr;
743 ipc.opt = &icmp_param.replyopts.opt;
744 ipc.sockc.mark = mark;
745
746 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
747 type, code, &icmp_param);
748 if (IS_ERR(rt))
749 goto out_unlock;
750
751 /* peer icmp_ratelimit */
752 if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
753 goto ende;
754
755 /* RFC says return as much as we can without exceeding 576 bytes. */
756
757 room = dst_mtu(&rt->dst);
758 if (room > 576)
759 room = 576;
760 room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
761 room -= sizeof(struct icmphdr);
762 /* Guard against tiny mtu. We need to include at least one
763 * IP network header for this message to make any sense.
764 */
765 if (room <= (int)sizeof(struct iphdr))
766 goto ende;
767
768 icmp_param.data_len = skb_in->len - icmp_param.offset;
769 if (icmp_param.data_len > room)
770 icmp_param.data_len = room;
771 icmp_param.head_len = sizeof(struct icmphdr);
772
773 /* if we don't have a source address at this point, fall back to the
774 * dummy address instead of sending out a packet with a source address
775 * of 0.0.0.0
776 */
777 if (!fl4.saddr)
778 fl4.saddr = htonl(INADDR_DUMMY);
779
780 icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt);
781 ende:
782 ip_rt_put(rt);
783 out_unlock:
784 icmp_xmit_unlock(sk);
785 out_bh_enable:
786 local_bh_enable();
787 out:;
788 }
789 EXPORT_SYMBOL(__icmp_send);
790
791 #if IS_ENABLED(CONFIG_NF_NAT)
792 #include <net/netfilter/nf_conntrack.h>
icmp_ndo_send(struct sk_buff * skb_in,int type,int code,__be32 info)793 void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
794 {
795 struct sk_buff *cloned_skb = NULL;
796 struct ip_options opts = { 0 };
797 enum ip_conntrack_info ctinfo;
798 struct nf_conn *ct;
799 __be32 orig_ip;
800
801 ct = nf_ct_get(skb_in, &ctinfo);
802 if (!ct || !(ct->status & IPS_SRC_NAT)) {
803 __icmp_send(skb_in, type, code, info, &opts);
804 return;
805 }
806
807 if (skb_shared(skb_in))
808 skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
809
810 if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
811 (skb_network_header(skb_in) + sizeof(struct iphdr)) >
812 skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
813 skb_network_offset(skb_in) + sizeof(struct iphdr))))
814 goto out;
815
816 orig_ip = ip_hdr(skb_in)->saddr;
817 ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
818 __icmp_send(skb_in, type, code, info, &opts);
819 ip_hdr(skb_in)->saddr = orig_ip;
820 out:
821 consume_skb(cloned_skb);
822 }
823 EXPORT_SYMBOL(icmp_ndo_send);
824 #endif
825
icmp_socket_deliver(struct sk_buff * skb,u32 info)826 static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
827 {
828 const struct iphdr *iph = (const struct iphdr *)skb->data;
829 const struct net_protocol *ipprot;
830 int protocol = iph->protocol;
831
832 /* Checkin full IP header plus 8 bytes of protocol to
833 * avoid additional coding at protocol handlers.
834 */
835 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
836 __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
837 return;
838 }
839
840 raw_icmp_error(skb, protocol, info);
841
842 ipprot = rcu_dereference(inet_protos[protocol]);
843 if (ipprot && ipprot->err_handler)
844 ipprot->err_handler(skb, info);
845 }
846
icmp_tag_validation(int proto)847 static bool icmp_tag_validation(int proto)
848 {
849 bool ok;
850
851 rcu_read_lock();
852 ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation;
853 rcu_read_unlock();
854 return ok;
855 }
856
857 /*
858 * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and
859 * ICMP_PARAMETERPROB.
860 */
861
icmp_unreach(struct sk_buff * skb)862 static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
863 {
864 enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
865 const struct iphdr *iph;
866 struct icmphdr *icmph;
867 struct net *net;
868 u32 info = 0;
869
870 net = dev_net(skb_dst(skb)->dev);
871
872 /*
873 * Incomplete header ?
874 * Only checks for the IP header, there should be an
875 * additional check for longer headers in upper levels.
876 */
877
878 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
879 goto out_err;
880
881 icmph = icmp_hdr(skb);
882 iph = (const struct iphdr *)skb->data;
883
884 if (iph->ihl < 5) { /* Mangled header, drop. */
885 reason = SKB_DROP_REASON_IP_INHDR;
886 goto out_err;
887 }
888
889 switch (icmph->type) {
890 case ICMP_DEST_UNREACH:
891 switch (icmph->code & 15) {
892 case ICMP_NET_UNREACH:
893 case ICMP_HOST_UNREACH:
894 case ICMP_PROT_UNREACH:
895 case ICMP_PORT_UNREACH:
896 break;
897 case ICMP_FRAG_NEEDED:
898 /* for documentation of the ip_no_pmtu_disc
899 * values please see
900 * Documentation/networking/ip-sysctl.rst
901 */
902 switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) {
903 default:
904 net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n",
905 &iph->daddr);
906 break;
907 case 2:
908 goto out;
909 case 3:
910 if (!icmp_tag_validation(iph->protocol))
911 goto out;
912 fallthrough;
913 case 0:
914 info = ntohs(icmph->un.frag.mtu);
915 }
916 break;
917 case ICMP_SR_FAILED:
918 net_dbg_ratelimited("%pI4: Source Route Failed\n",
919 &iph->daddr);
920 break;
921 default:
922 break;
923 }
924 if (icmph->code > NR_ICMP_UNREACH)
925 goto out;
926 break;
927 case ICMP_PARAMETERPROB:
928 info = ntohl(icmph->un.gateway) >> 24;
929 break;
930 case ICMP_TIME_EXCEEDED:
931 __ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS);
932 if (icmph->code == ICMP_EXC_FRAGTIME)
933 goto out;
934 break;
935 }
936
937 /*
938 * Throw it at our lower layers
939 *
940 * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
941 * header.
942 * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
943 * transport layer.
944 * RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
945 * transport layer.
946 */
947
948 /*
949 * Check the other end isn't violating RFC 1122. Some routers send
950 * bogus responses to broadcast frames. If you see this message
951 * first check your netmask matches at both ends, if it does then
952 * get the other vendor to fix their kit.
953 */
954
955 if (!READ_ONCE(net->ipv4.sysctl_icmp_ignore_bogus_error_responses) &&
956 inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
957 net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
958 &ip_hdr(skb)->saddr,
959 icmph->type, icmph->code,
960 &iph->daddr, skb->dev->name);
961 goto out;
962 }
963
964 icmp_socket_deliver(skb, info);
965
966 out:
967 return reason;
968 out_err:
969 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
970 return reason ?: SKB_DROP_REASON_NOT_SPECIFIED;
971 }
972
973
974 /*
975 * Handle ICMP_REDIRECT.
976 */
977
icmp_redirect(struct sk_buff * skb)978 static enum skb_drop_reason icmp_redirect(struct sk_buff *skb)
979 {
980 if (skb->len < sizeof(struct iphdr)) {
981 __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
982 return SKB_DROP_REASON_PKT_TOO_SMALL;
983 }
984
985 if (!pskb_may_pull(skb, sizeof(struct iphdr))) {
986 /* there aught to be a stat */
987 return SKB_DROP_REASON_NOMEM;
988 }
989
990 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
991 return SKB_NOT_DROPPED_YET;
992 }
993
994 /*
995 * Handle ICMP_ECHO ("ping") and ICMP_EXT_ECHO ("PROBE") requests.
996 *
997 * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
998 * requests.
999 * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
1000 * included in the reply.
1001 * RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
1002 * echo requests, MUST have default=NOT.
1003 * RFC 8335: 8 MUST have a config option to enable/disable ICMP
1004 * Extended Echo Functionality, MUST be disabled by default
1005 * See also WRT handling of options once they are done and working.
1006 */
1007
icmp_echo(struct sk_buff * skb)1008 static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
1009 {
1010 struct icmp_bxm icmp_param;
1011 struct net *net;
1012
1013 net = dev_net(skb_dst(skb)->dev);
1014 /* should there be an ICMP stat for ignored echos? */
1015 if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
1016 return SKB_NOT_DROPPED_YET;
1017
1018 icmp_param.data.icmph = *icmp_hdr(skb);
1019 icmp_param.skb = skb;
1020 icmp_param.offset = 0;
1021 icmp_param.data_len = skb->len;
1022 icmp_param.head_len = sizeof(struct icmphdr);
1023
1024 if (icmp_param.data.icmph.type == ICMP_ECHO)
1025 icmp_param.data.icmph.type = ICMP_ECHOREPLY;
1026 else if (!icmp_build_probe(skb, &icmp_param.data.icmph))
1027 return SKB_NOT_DROPPED_YET;
1028
1029 icmp_reply(&icmp_param, skb);
1030 return SKB_NOT_DROPPED_YET;
1031 }
1032
1033 /* Helper for icmp_echo and icmpv6_echo_reply.
1034 * Searches for net_device that matches PROBE interface identifier
1035 * and builds PROBE reply message in icmphdr.
1036 *
1037 * Returns false if PROBE responses are disabled via sysctl
1038 */
1039
icmp_build_probe(struct sk_buff * skb,struct icmphdr * icmphdr)1040 bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
1041 {
1042 struct icmp_ext_hdr *ext_hdr, _ext_hdr;
1043 struct icmp_ext_echo_iio *iio, _iio;
1044 struct net *net = dev_net(skb->dev);
1045 struct inet6_dev *in6_dev;
1046 struct in_device *in_dev;
1047 struct net_device *dev;
1048 char buff[IFNAMSIZ];
1049 u16 ident_len;
1050 u8 status;
1051
1052 if (!READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))
1053 return false;
1054
1055 /* We currently only support probing interfaces on the proxy node
1056 * Check to ensure L-bit is set
1057 */
1058 if (!(ntohs(icmphdr->un.echo.sequence) & 1))
1059 return false;
1060 /* Clear status bits in reply message */
1061 icmphdr->un.echo.sequence &= htons(0xFF00);
1062 if (icmphdr->type == ICMP_EXT_ECHO)
1063 icmphdr->type = ICMP_EXT_ECHOREPLY;
1064 else
1065 icmphdr->type = ICMPV6_EXT_ECHO_REPLY;
1066 ext_hdr = skb_header_pointer(skb, 0, sizeof(_ext_hdr), &_ext_hdr);
1067 /* Size of iio is class_type dependent.
1068 * Only check header here and assign length based on ctype in the switch statement
1069 */
1070 iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio);
1071 if (!ext_hdr || !iio)
1072 goto send_mal_query;
1073 if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) ||
1074 ntohs(iio->extobj_hdr.length) > sizeof(_iio))
1075 goto send_mal_query;
1076 ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr);
1077 iio = skb_header_pointer(skb, sizeof(_ext_hdr),
1078 sizeof(iio->extobj_hdr) + ident_len, &_iio);
1079 if (!iio)
1080 goto send_mal_query;
1081
1082 status = 0;
1083 dev = NULL;
1084 switch (iio->extobj_hdr.class_type) {
1085 case ICMP_EXT_ECHO_CTYPE_NAME:
1086 if (ident_len >= IFNAMSIZ)
1087 goto send_mal_query;
1088 memset(buff, 0, sizeof(buff));
1089 memcpy(buff, &iio->ident.name, ident_len);
1090 dev = dev_get_by_name(net, buff);
1091 break;
1092 case ICMP_EXT_ECHO_CTYPE_INDEX:
1093 if (ident_len != sizeof(iio->ident.ifindex))
1094 goto send_mal_query;
1095 dev = dev_get_by_index(net, ntohl(iio->ident.ifindex));
1096 break;
1097 case ICMP_EXT_ECHO_CTYPE_ADDR:
1098 if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) ||
1099 ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
1100 iio->ident.addr.ctype3_hdr.addrlen)
1101 goto send_mal_query;
1102 switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) {
1103 case ICMP_AFI_IP:
1104 if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr))
1105 goto send_mal_query;
1106 dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr);
1107 break;
1108 #if IS_ENABLED(CONFIG_IPV6)
1109 case ICMP_AFI_IP6:
1110 if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr))
1111 goto send_mal_query;
1112 dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev);
1113 dev_hold(dev);
1114 break;
1115 #endif
1116 default:
1117 goto send_mal_query;
1118 }
1119 break;
1120 default:
1121 goto send_mal_query;
1122 }
1123 if (!dev) {
1124 icmphdr->code = ICMP_EXT_CODE_NO_IF;
1125 return true;
1126 }
1127 /* Fill bits in reply message */
1128 if (dev->flags & IFF_UP)
1129 status |= ICMP_EXT_ECHOREPLY_ACTIVE;
1130
1131 in_dev = __in_dev_get_rcu(dev);
1132 if (in_dev && rcu_access_pointer(in_dev->ifa_list))
1133 status |= ICMP_EXT_ECHOREPLY_IPV4;
1134
1135 in6_dev = __in6_dev_get(dev);
1136 if (in6_dev && !list_empty(&in6_dev->addr_list))
1137 status |= ICMP_EXT_ECHOREPLY_IPV6;
1138
1139 dev_put(dev);
1140 icmphdr->un.echo.sequence |= htons(status);
1141 return true;
1142 send_mal_query:
1143 icmphdr->code = ICMP_EXT_CODE_MAL_QUERY;
1144 return true;
1145 }
1146 EXPORT_SYMBOL_GPL(icmp_build_probe);
1147
1148 /*
1149 * Handle ICMP Timestamp requests.
1150 * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
1151 * SHOULD be in the kernel for minimum random latency.
1152 * MUST be accurate to a few minutes.
1153 * MUST be updated at least at 15Hz.
1154 */
icmp_timestamp(struct sk_buff * skb)1155 static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
1156 {
1157 struct icmp_bxm icmp_param;
1158 /*
1159 * Too short.
1160 */
1161 if (skb->len < 4)
1162 goto out_err;
1163
1164 /*
1165 * Fill in the current time as ms since midnight UT:
1166 */
1167 icmp_param.data.times[1] = inet_current_timestamp();
1168 icmp_param.data.times[2] = icmp_param.data.times[1];
1169
1170 BUG_ON(skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4));
1171
1172 icmp_param.data.icmph = *icmp_hdr(skb);
1173 icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
1174 icmp_param.data.icmph.code = 0;
1175 icmp_param.skb = skb;
1176 icmp_param.offset = 0;
1177 icmp_param.data_len = 0;
1178 icmp_param.head_len = sizeof(struct icmphdr) + 12;
1179 icmp_reply(&icmp_param, skb);
1180 return SKB_NOT_DROPPED_YET;
1181
1182 out_err:
1183 __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
1184 return SKB_DROP_REASON_PKT_TOO_SMALL;
1185 }
1186
icmp_discard(struct sk_buff * skb)1187 static enum skb_drop_reason icmp_discard(struct sk_buff *skb)
1188 {
1189 /* pretend it was a success */
1190 return SKB_NOT_DROPPED_YET;
1191 }
1192
1193 /*
1194 * Deal with incoming ICMP packets.
1195 */
icmp_rcv(struct sk_buff * skb)1196 int icmp_rcv(struct sk_buff *skb)
1197 {
1198 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1199 struct rtable *rt = skb_rtable(skb);
1200 struct net *net = dev_net(rt->dst.dev);
1201 struct icmphdr *icmph;
1202
1203 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1204 struct sec_path *sp = skb_sec_path(skb);
1205 int nh;
1206
1207 if (!(sp && sp->xvec[sp->len - 1]->props.flags &
1208 XFRM_STATE_ICMP)) {
1209 reason = SKB_DROP_REASON_XFRM_POLICY;
1210 goto drop;
1211 }
1212
1213 if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
1214 goto drop;
1215
1216 nh = skb_network_offset(skb);
1217 skb_set_network_header(skb, sizeof(*icmph));
1218
1219 if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN,
1220 skb)) {
1221 reason = SKB_DROP_REASON_XFRM_POLICY;
1222 goto drop;
1223 }
1224
1225 skb_set_network_header(skb, nh);
1226 }
1227
1228 __ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
1229
1230 if (skb_checksum_simple_validate(skb))
1231 goto csum_error;
1232
1233 if (!pskb_pull(skb, sizeof(*icmph)))
1234 goto error;
1235
1236 icmph = icmp_hdr(skb);
1237
1238 ICMPMSGIN_INC_STATS(net, icmph->type);
1239
1240 /* Check for ICMP Extended Echo (PROBE) messages */
1241 if (icmph->type == ICMP_EXT_ECHO) {
1242 /* We can't use icmp_pointers[].handler() because it is an array of
1243 * size NR_ICMP_TYPES + 1 (19 elements) and PROBE has code 42.
1244 */
1245 reason = icmp_echo(skb);
1246 goto reason_check;
1247 }
1248
1249 if (icmph->type == ICMP_EXT_ECHOREPLY) {
1250 reason = ping_rcv(skb);
1251 goto reason_check;
1252 }
1253
1254 /*
1255 * 18 is the highest 'known' ICMP type. Anything else is a mystery
1256 *
1257 * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
1258 * discarded.
1259 */
1260 if (icmph->type > NR_ICMP_TYPES) {
1261 reason = SKB_DROP_REASON_UNHANDLED_PROTO;
1262 goto error;
1263 }
1264
1265 /*
1266 * Parse the ICMP message
1267 */
1268
1269 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1270 /*
1271 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
1272 * silently ignored (we let user decide with a sysctl).
1273 * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
1274 * discarded if to broadcast/multicast.
1275 */
1276 if ((icmph->type == ICMP_ECHO ||
1277 icmph->type == ICMP_TIMESTAMP) &&
1278 READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_broadcasts)) {
1279 reason = SKB_DROP_REASON_INVALID_PROTO;
1280 goto error;
1281 }
1282 if (icmph->type != ICMP_ECHO &&
1283 icmph->type != ICMP_TIMESTAMP &&
1284 icmph->type != ICMP_ADDRESS &&
1285 icmph->type != ICMP_ADDRESSREPLY) {
1286 reason = SKB_DROP_REASON_INVALID_PROTO;
1287 goto error;
1288 }
1289 }
1290
1291 reason = icmp_pointers[icmph->type].handler(skb);
1292 reason_check:
1293 if (!reason) {
1294 consume_skb(skb);
1295 return NET_RX_SUCCESS;
1296 }
1297
1298 drop:
1299 kfree_skb_reason(skb, reason);
1300 return NET_RX_DROP;
1301 csum_error:
1302 reason = SKB_DROP_REASON_ICMP_CSUM;
1303 __ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
1304 error:
1305 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
1306 goto drop;
1307 }
1308
ip_icmp_error_rfc4884_validate(const struct sk_buff * skb,int off)1309 static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off)
1310 {
1311 struct icmp_extobj_hdr *objh, _objh;
1312 struct icmp_ext_hdr *exth, _exth;
1313 u16 olen;
1314
1315 exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth);
1316 if (!exth)
1317 return false;
1318 if (exth->version != 2)
1319 return true;
1320
1321 if (exth->checksum &&
1322 csum_fold(skb_checksum(skb, off, skb->len - off, 0)))
1323 return false;
1324
1325 off += sizeof(_exth);
1326 while (off < skb->len) {
1327 objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh);
1328 if (!objh)
1329 return false;
1330
1331 olen = ntohs(objh->length);
1332 if (olen < sizeof(_objh))
1333 return false;
1334
1335 off += olen;
1336 if (off > skb->len)
1337 return false;
1338 }
1339
1340 return true;
1341 }
1342
ip_icmp_error_rfc4884(const struct sk_buff * skb,struct sock_ee_data_rfc4884 * out,int thlen,int off)1343 void ip_icmp_error_rfc4884(const struct sk_buff *skb,
1344 struct sock_ee_data_rfc4884 *out,
1345 int thlen, int off)
1346 {
1347 int hlen;
1348
1349 /* original datagram headers: end of icmph to payload (skb->data) */
1350 hlen = -skb_transport_offset(skb) - thlen;
1351
1352 /* per rfc 4884: minimal datagram length of 128 bytes */
1353 if (off < 128 || off < hlen)
1354 return;
1355
1356 /* kernel has stripped headers: return payload offset in bytes */
1357 off -= hlen;
1358 if (off + sizeof(struct icmp_ext_hdr) > skb->len)
1359 return;
1360
1361 out->len = off;
1362
1363 if (!ip_icmp_error_rfc4884_validate(skb, off))
1364 out->flags |= SO_EE_RFC4884_FLAG_INVALID;
1365 }
1366 EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884);
1367
icmp_err(struct sk_buff * skb,u32 info)1368 int icmp_err(struct sk_buff *skb, u32 info)
1369 {
1370 struct iphdr *iph = (struct iphdr *)skb->data;
1371 int offset = iph->ihl<<2;
1372 struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
1373 int type = icmp_hdr(skb)->type;
1374 int code = icmp_hdr(skb)->code;
1375 struct net *net = dev_net(skb->dev);
1376
1377 /*
1378 * Use ping_err to handle all icmp errors except those
1379 * triggered by ICMP_ECHOREPLY which sent from kernel.
1380 */
1381 if (icmph->type != ICMP_ECHOREPLY) {
1382 ping_err(skb, offset, info);
1383 return 0;
1384 }
1385
1386 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
1387 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP);
1388 else if (type == ICMP_REDIRECT)
1389 ipv4_redirect(skb, net, 0, IPPROTO_ICMP);
1390
1391 return 0;
1392 }
1393
1394 /*
1395 * This table is the definition of how we handle ICMP.
1396 */
1397 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1398 [ICMP_ECHOREPLY] = {
1399 .handler = ping_rcv,
1400 },
1401 [1] = {
1402 .handler = icmp_discard,
1403 .error = 1,
1404 },
1405 [2] = {
1406 .handler = icmp_discard,
1407 .error = 1,
1408 },
1409 [ICMP_DEST_UNREACH] = {
1410 .handler = icmp_unreach,
1411 .error = 1,
1412 },
1413 [ICMP_SOURCE_QUENCH] = {
1414 .handler = icmp_unreach,
1415 .error = 1,
1416 },
1417 [ICMP_REDIRECT] = {
1418 .handler = icmp_redirect,
1419 .error = 1,
1420 },
1421 [6] = {
1422 .handler = icmp_discard,
1423 .error = 1,
1424 },
1425 [7] = {
1426 .handler = icmp_discard,
1427 .error = 1,
1428 },
1429 [ICMP_ECHO] = {
1430 .handler = icmp_echo,
1431 },
1432 [9] = {
1433 .handler = icmp_discard,
1434 .error = 1,
1435 },
1436 [10] = {
1437 .handler = icmp_discard,
1438 .error = 1,
1439 },
1440 [ICMP_TIME_EXCEEDED] = {
1441 .handler = icmp_unreach,
1442 .error = 1,
1443 },
1444 [ICMP_PARAMETERPROB] = {
1445 .handler = icmp_unreach,
1446 .error = 1,
1447 },
1448 [ICMP_TIMESTAMP] = {
1449 .handler = icmp_timestamp,
1450 },
1451 [ICMP_TIMESTAMPREPLY] = {
1452 .handler = icmp_discard,
1453 },
1454 [ICMP_INFO_REQUEST] = {
1455 .handler = icmp_discard,
1456 },
1457 [ICMP_INFO_REPLY] = {
1458 .handler = icmp_discard,
1459 },
1460 [ICMP_ADDRESS] = {
1461 .handler = icmp_discard,
1462 },
1463 [ICMP_ADDRESSREPLY] = {
1464 .handler = icmp_discard,
1465 },
1466 };
1467
icmp_sk_init(struct net * net)1468 static int __net_init icmp_sk_init(struct net *net)
1469 {
1470 /* Control parameters for ECHO replies. */
1471 net->ipv4.sysctl_icmp_echo_ignore_all = 0;
1472 net->ipv4.sysctl_icmp_echo_enable_probe = 0;
1473 net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
1474
1475 /* Control parameter - ignore bogus broadcast responses? */
1476 net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
1477
1478 /*
1479 * Configurable global rate limit.
1480 *
1481 * ratelimit defines tokens/packet consumed for dst->rate_token
1482 * bucket ratemask defines which icmp types are ratelimited by
1483 * setting it's bit position.
1484 *
1485 * default:
1486 * dest unreachable (3), source quench (4),
1487 * time exceeded (11), parameter problem (12)
1488 */
1489
1490 net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
1491 net->ipv4.sysctl_icmp_ratemask = 0x1818;
1492 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
1493
1494 return 0;
1495 }
1496
1497 static struct pernet_operations __net_initdata icmp_sk_ops = {
1498 .init = icmp_sk_init,
1499 };
1500
icmp_init(void)1501 int __init icmp_init(void)
1502 {
1503 int err, i;
1504
1505 for_each_possible_cpu(i) {
1506 struct sock *sk;
1507
1508 err = inet_ctl_sock_create(&sk, PF_INET,
1509 SOCK_RAW, IPPROTO_ICMP, &init_net);
1510 if (err < 0)
1511 return err;
1512
1513 per_cpu(ipv4_icmp_sk, i) = sk;
1514
1515 /* Enough space for 2 64K ICMP packets, including
1516 * sk_buff/skb_shared_info struct overhead.
1517 */
1518 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
1519
1520 /*
1521 * Speedup sock_wfree()
1522 */
1523 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1524 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
1525 }
1526 return register_pernet_subsys(&icmp_sk_ops);
1527 }
1528