xref: /openbmc/linux/net/ipv6/icmp.c (revision bbe735e4)
1 /*
2  *	Internet Control Message Protocol (ICMPv6)
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
9  *
10  *	Based on net/ipv4/icmp.c
11  *
12  *	RFC 1885
13  *
14  *	This program is free software; you can redistribute it and/or
15  *      modify it under the terms of the GNU General Public License
16  *      as published by the Free Software Foundation; either version
17  *      2 of the License, or (at your option) any later version.
18  */
19 
20 /*
21  *	Changes:
22  *
23  *	Andi Kleen		:	exception handling
24  *	Andi Kleen			add rate limits. never reply to a icmp.
25  *					add more length checks and other fixes.
26  *	yoshfuji		:	ensure to sent parameter problem for
27  *					fragments.
28  *	YOSHIFUJI Hideaki @USAGI:	added sysctl for icmp rate limit.
29  *	Randy Dunlap and
30  *	YOSHIFUJI Hideaki @USAGI:	Per-interface statistics support
31  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
32  */
33 
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
38 #include <linux/in.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
45 
46 #ifdef CONFIG_SYSCTL
47 #include <linux/sysctl.h>
48 #endif
49 
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
53 
54 #include <net/ip.h>
55 #include <net/sock.h>
56 
57 #include <net/ipv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
60 #include <net/raw.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
65 #include <net/icmp.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 
70 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 EXPORT_SYMBOL(icmpv6_statistics);
72 
73 /*
74  *	The ICMP socket(s). This is the most convenient way to flow control
75  *	our ICMP output as well as maintain a clean interface throughout
76  *	all layers. All Socketless IP sends will soon be gone.
77  *
78  *	On SMP we have one ICMP socket per-cpu.
79  */
80 static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
81 #define icmpv6_socket	__get_cpu_var(__icmpv6_socket)
82 
83 static int icmpv6_rcv(struct sk_buff **pskb);
84 
85 static struct inet6_protocol icmpv6_protocol = {
86 	.handler	=	icmpv6_rcv,
87 	.flags		=	INET6_PROTO_FINAL,
88 };
89 
90 static __inline__ int icmpv6_xmit_lock(void)
91 {
92 	local_bh_disable();
93 
94 	if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) {
95 		/* This can happen if the output path (f.e. SIT or
96 		 * ip6ip6 tunnel) signals dst_link_failure() for an
97 		 * outgoing ICMP6 packet.
98 		 */
99 		local_bh_enable();
100 		return 1;
101 	}
102 	return 0;
103 }
104 
105 static __inline__ void icmpv6_xmit_unlock(void)
106 {
107 	spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
108 }
109 
110 /*
111  * Slightly more convenient version of icmpv6_send.
112  */
113 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
114 {
115 	icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
116 	kfree_skb(skb);
117 }
118 
119 /*
120  * Figure out, may we reply to this packet with icmp error.
121  *
122  * We do not reply, if:
123  *	- it was icmp error message.
124  *	- it is truncated, so that it is known, that protocol is ICMPV6
125  *	  (i.e. in the middle of some exthdr)
126  *
127  *	--ANK (980726)
128  */
129 
130 static int is_ineligible(struct sk_buff *skb)
131 {
132 	int ptr = (u8*)(skb->nh.ipv6h+1) - skb->data;
133 	int len = skb->len - ptr;
134 	__u8 nexthdr = skb->nh.ipv6h->nexthdr;
135 
136 	if (len < 0)
137 		return 1;
138 
139 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
140 	if (ptr < 0)
141 		return 0;
142 	if (nexthdr == IPPROTO_ICMPV6) {
143 		u8 _type, *tp;
144 		tp = skb_header_pointer(skb,
145 			ptr+offsetof(struct icmp6hdr, icmp6_type),
146 			sizeof(_type), &_type);
147 		if (tp == NULL ||
148 		    !(*tp & ICMPV6_INFOMSG_MASK))
149 			return 1;
150 	}
151 	return 0;
152 }
153 
154 static int sysctl_icmpv6_time __read_mostly = 1*HZ;
155 
156 /*
157  * Check the ICMP output rate limit
158  */
159 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
160 				     struct flowi *fl)
161 {
162 	struct dst_entry *dst;
163 	int res = 0;
164 
165 	/* Informational messages are not limited. */
166 	if (type & ICMPV6_INFOMSG_MASK)
167 		return 1;
168 
169 	/* Do not limit pmtu discovery, it would break it. */
170 	if (type == ICMPV6_PKT_TOOBIG)
171 		return 1;
172 
173 	/*
174 	 * Look up the output route.
175 	 * XXX: perhaps the expire for routing entries cloned by
176 	 * this lookup should be more aggressive (not longer than timeout).
177 	 */
178 	dst = ip6_route_output(sk, fl);
179 	if (dst->error) {
180 		IP6_INC_STATS(ip6_dst_idev(dst),
181 			      IPSTATS_MIB_OUTNOROUTES);
182 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
183 		res = 1;
184 	} else {
185 		struct rt6_info *rt = (struct rt6_info *)dst;
186 		int tmo = sysctl_icmpv6_time;
187 
188 		/* Give more bandwidth to wider prefixes. */
189 		if (rt->rt6i_dst.plen < 128)
190 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
191 
192 		res = xrlim_allow(dst, tmo);
193 	}
194 	dst_release(dst);
195 	return res;
196 }
197 
198 /*
199  *	an inline helper for the "simple" if statement below
200  *	checks if parameter problem report is caused by an
201  *	unrecognized IPv6 option that has the Option Type
202  *	highest-order two bits set to 10
203  */
204 
205 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
206 {
207 	u8 _optval, *op;
208 
209 	offset += skb_network_offset(skb);
210 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
211 	if (op == NULL)
212 		return 1;
213 	return (*op & 0xC0) == 0x80;
214 }
215 
216 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
217 {
218 	struct sk_buff *skb;
219 	struct icmp6hdr *icmp6h;
220 	int err = 0;
221 
222 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
223 		goto out;
224 
225 	icmp6h = (struct icmp6hdr*) skb->h.raw;
226 	memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
227 	icmp6h->icmp6_cksum = 0;
228 
229 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
230 		skb->csum = csum_partial((char *)icmp6h,
231 					sizeof(struct icmp6hdr), skb->csum);
232 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
233 						      &fl->fl6_dst,
234 						      len, fl->proto,
235 						      skb->csum);
236 	} else {
237 		__wsum tmp_csum = 0;
238 
239 		skb_queue_walk(&sk->sk_write_queue, skb) {
240 			tmp_csum = csum_add(tmp_csum, skb->csum);
241 		}
242 
243 		tmp_csum = csum_partial((char *)icmp6h,
244 					sizeof(struct icmp6hdr), tmp_csum);
245 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
246 						      &fl->fl6_dst,
247 						      len, fl->proto,
248 						      tmp_csum);
249 	}
250 	ip6_push_pending_frames(sk);
251 out:
252 	return err;
253 }
254 
255 struct icmpv6_msg {
256 	struct sk_buff	*skb;
257 	int		offset;
258 	uint8_t		type;
259 };
260 
261 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
262 {
263 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
264 	struct sk_buff *org_skb = msg->skb;
265 	__wsum csum = 0;
266 
267 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
268 				      to, len, csum);
269 	skb->csum = csum_block_add(skb->csum, csum, odd);
270 	if (!(msg->type & ICMPV6_INFOMSG_MASK))
271 		nf_ct_attach(skb, org_skb);
272 	return 0;
273 }
274 
275 #ifdef CONFIG_IPV6_MIP6
276 static void mip6_addr_swap(struct sk_buff *skb)
277 {
278 	struct ipv6hdr *iph = skb->nh.ipv6h;
279 	struct inet6_skb_parm *opt = IP6CB(skb);
280 	struct ipv6_destopt_hao *hao;
281 	struct in6_addr tmp;
282 	int off;
283 
284 	if (opt->dsthao) {
285 		off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
286 		if (likely(off >= 0)) {
287 			hao = (struct ipv6_destopt_hao *)(skb->nh.raw + off);
288 			ipv6_addr_copy(&tmp, &iph->saddr);
289 			ipv6_addr_copy(&iph->saddr, &hao->addr);
290 			ipv6_addr_copy(&hao->addr, &tmp);
291 		}
292 	}
293 }
294 #else
295 static inline void mip6_addr_swap(struct sk_buff *skb) {}
296 #endif
297 
298 /*
299  *	Send an ICMP message in response to a packet in error
300  */
301 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
302 		 struct net_device *dev)
303 {
304 	struct inet6_dev *idev = NULL;
305 	struct ipv6hdr *hdr = skb->nh.ipv6h;
306 	struct sock *sk;
307 	struct ipv6_pinfo *np;
308 	struct in6_addr *saddr = NULL;
309 	struct dst_entry *dst;
310 	struct icmp6hdr tmp_hdr;
311 	struct flowi fl;
312 	struct icmpv6_msg msg;
313 	int iif = 0;
314 	int addr_type = 0;
315 	int len;
316 	int hlimit, tclass;
317 	int err = 0;
318 
319 	if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail)
320 		return;
321 
322 	/*
323 	 *	Make sure we respect the rules
324 	 *	i.e. RFC 1885 2.4(e)
325 	 *	Rule (e.1) is enforced by not using icmpv6_send
326 	 *	in any code that processes icmp errors.
327 	 */
328 	addr_type = ipv6_addr_type(&hdr->daddr);
329 
330 	if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
331 		saddr = &hdr->daddr;
332 
333 	/*
334 	 *	Dest addr check
335 	 */
336 
337 	if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
338 		if (type != ICMPV6_PKT_TOOBIG &&
339 		    !(type == ICMPV6_PARAMPROB &&
340 		      code == ICMPV6_UNK_OPTION &&
341 		      (opt_unrec(skb, info))))
342 			return;
343 
344 		saddr = NULL;
345 	}
346 
347 	addr_type = ipv6_addr_type(&hdr->saddr);
348 
349 	/*
350 	 *	Source addr check
351 	 */
352 
353 	if (addr_type & IPV6_ADDR_LINKLOCAL)
354 		iif = skb->dev->ifindex;
355 
356 	/*
357 	 *	Must not send error if the source does not uniquely
358 	 *	identify a single node (RFC2463 Section 2.4).
359 	 *	We check unspecified / multicast addresses here,
360 	 *	and anycast addresses will be checked later.
361 	 */
362 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
363 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
364 		return;
365 	}
366 
367 	/*
368 	 *	Never answer to a ICMP packet.
369 	 */
370 	if (is_ineligible(skb)) {
371 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
372 		return;
373 	}
374 
375 	mip6_addr_swap(skb);
376 
377 	memset(&fl, 0, sizeof(fl));
378 	fl.proto = IPPROTO_ICMPV6;
379 	ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
380 	if (saddr)
381 		ipv6_addr_copy(&fl.fl6_src, saddr);
382 	fl.oif = iif;
383 	fl.fl_icmp_type = type;
384 	fl.fl_icmp_code = code;
385 	security_skb_classify_flow(skb, &fl);
386 
387 	if (icmpv6_xmit_lock())
388 		return;
389 
390 	sk = icmpv6_socket->sk;
391 	np = inet6_sk(sk);
392 
393 	if (!icmpv6_xrlim_allow(sk, type, &fl))
394 		goto out;
395 
396 	tmp_hdr.icmp6_type = type;
397 	tmp_hdr.icmp6_code = code;
398 	tmp_hdr.icmp6_cksum = 0;
399 	tmp_hdr.icmp6_pointer = htonl(info);
400 
401 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
402 		fl.oif = np->mcast_oif;
403 
404 	err = ip6_dst_lookup(sk, &dst, &fl);
405 	if (err)
406 		goto out;
407 
408 	/*
409 	 * We won't send icmp if the destination is known
410 	 * anycast.
411 	 */
412 	if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
413 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
414 		goto out_dst_release;
415 	}
416 
417 	if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
418 		goto out;
419 
420 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
421 		hlimit = np->mcast_hops;
422 	else
423 		hlimit = np->hop_limit;
424 	if (hlimit < 0)
425 		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
426 	if (hlimit < 0)
427 		hlimit = ipv6_get_hoplimit(dst->dev);
428 
429 	tclass = np->tclass;
430 	if (tclass < 0)
431 		tclass = 0;
432 
433 	msg.skb = skb;
434 	msg.offset = skb_network_offset(skb);
435 	msg.type = type;
436 
437 	len = skb->len - msg.offset;
438 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
439 	if (len < 0) {
440 		LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
441 		goto out_dst_release;
442 	}
443 
444 	idev = in6_dev_get(skb->dev);
445 
446 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
447 			      len + sizeof(struct icmp6hdr),
448 			      sizeof(struct icmp6hdr),
449 			      hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
450 			      MSG_DONTWAIT);
451 	if (err) {
452 		ip6_flush_pending_frames(sk);
453 		goto out_put;
454 	}
455 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
456 
457 	if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
458 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
459 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
460 
461 out_put:
462 	if (likely(idev != NULL))
463 		in6_dev_put(idev);
464 out_dst_release:
465 	dst_release(dst);
466 out:
467 	icmpv6_xmit_unlock();
468 }
469 
470 EXPORT_SYMBOL(icmpv6_send);
471 
472 static void icmpv6_echo_reply(struct sk_buff *skb)
473 {
474 	struct sock *sk;
475 	struct inet6_dev *idev;
476 	struct ipv6_pinfo *np;
477 	struct in6_addr *saddr = NULL;
478 	struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
479 	struct icmp6hdr tmp_hdr;
480 	struct flowi fl;
481 	struct icmpv6_msg msg;
482 	struct dst_entry *dst;
483 	int err = 0;
484 	int hlimit;
485 	int tclass;
486 
487 	saddr = &skb->nh.ipv6h->daddr;
488 
489 	if (!ipv6_unicast_destination(skb))
490 		saddr = NULL;
491 
492 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
493 	tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
494 
495 	memset(&fl, 0, sizeof(fl));
496 	fl.proto = IPPROTO_ICMPV6;
497 	ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
498 	if (saddr)
499 		ipv6_addr_copy(&fl.fl6_src, saddr);
500 	fl.oif = skb->dev->ifindex;
501 	fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
502 	security_skb_classify_flow(skb, &fl);
503 
504 	if (icmpv6_xmit_lock())
505 		return;
506 
507 	sk = icmpv6_socket->sk;
508 	np = inet6_sk(sk);
509 
510 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
511 		fl.oif = np->mcast_oif;
512 
513 	err = ip6_dst_lookup(sk, &dst, &fl);
514 	if (err)
515 		goto out;
516 	if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
517 		goto out;
518 
519 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
520 		hlimit = np->mcast_hops;
521 	else
522 		hlimit = np->hop_limit;
523 	if (hlimit < 0)
524 		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
525 	if (hlimit < 0)
526 		hlimit = ipv6_get_hoplimit(dst->dev);
527 
528 	tclass = np->tclass;
529 	if (tclass < 0)
530 		tclass = 0;
531 
532 	idev = in6_dev_get(skb->dev);
533 
534 	msg.skb = skb;
535 	msg.offset = 0;
536 	msg.type = ICMPV6_ECHO_REPLY;
537 
538 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
539 				sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
540 				(struct rt6_info*)dst, MSG_DONTWAIT);
541 
542 	if (err) {
543 		ip6_flush_pending_frames(sk);
544 		goto out_put;
545 	}
546 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
547 
548 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
549 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
550 
551 out_put:
552 	if (likely(idev != NULL))
553 		in6_dev_put(idev);
554 	dst_release(dst);
555 out:
556 	icmpv6_xmit_unlock();
557 }
558 
559 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
560 {
561 	struct in6_addr *saddr, *daddr;
562 	struct inet6_protocol *ipprot;
563 	struct sock *sk;
564 	int inner_offset;
565 	int hash;
566 	u8 nexthdr;
567 
568 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
569 		return;
570 
571 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
572 	if (ipv6_ext_hdr(nexthdr)) {
573 		/* now skip over extension headers */
574 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
575 		if (inner_offset<0)
576 			return;
577 	} else {
578 		inner_offset = sizeof(struct ipv6hdr);
579 	}
580 
581 	/* Checkin header including 8 bytes of inner protocol header. */
582 	if (!pskb_may_pull(skb, inner_offset+8))
583 		return;
584 
585 	saddr = &skb->nh.ipv6h->saddr;
586 	daddr = &skb->nh.ipv6h->daddr;
587 
588 	/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
589 	   Without this we will not able f.e. to make source routed
590 	   pmtu discovery.
591 	   Corresponding argument (opt) to notifiers is already added.
592 	   --ANK (980726)
593 	 */
594 
595 	hash = nexthdr & (MAX_INET_PROTOS - 1);
596 
597 	rcu_read_lock();
598 	ipprot = rcu_dereference(inet6_protos[hash]);
599 	if (ipprot && ipprot->err_handler)
600 		ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
601 	rcu_read_unlock();
602 
603 	read_lock(&raw_v6_lock);
604 	if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
605 		while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
606 					    IP6CB(skb)->iif))) {
607 			rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
608 			sk = sk_next(sk);
609 		}
610 	}
611 	read_unlock(&raw_v6_lock);
612 }
613 
614 /*
615  *	Handle icmp messages
616  */
617 
618 static int icmpv6_rcv(struct sk_buff **pskb)
619 {
620 	struct sk_buff *skb = *pskb;
621 	struct net_device *dev = skb->dev;
622 	struct inet6_dev *idev = __in6_dev_get(dev);
623 	struct in6_addr *saddr, *daddr;
624 	struct ipv6hdr *orig_hdr;
625 	struct icmp6hdr *hdr;
626 	int type;
627 
628 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
629 
630 	saddr = &skb->nh.ipv6h->saddr;
631 	daddr = &skb->nh.ipv6h->daddr;
632 
633 	/* Perform checksum. */
634 	switch (skb->ip_summed) {
635 	case CHECKSUM_COMPLETE:
636 		if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
637 				     skb->csum))
638 			break;
639 		/* fall through */
640 	case CHECKSUM_NONE:
641 		skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
642 					     IPPROTO_ICMPV6, 0));
643 		if (__skb_checksum_complete(skb)) {
644 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
645 				       NIP6(*saddr), NIP6(*daddr));
646 			goto discard_it;
647 		}
648 	}
649 
650 	if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
651 		goto discard_it;
652 
653 	hdr = (struct icmp6hdr *) skb->h.raw;
654 
655 	type = hdr->icmp6_type;
656 
657 	if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
658 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
659 	else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
660 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
661 
662 	switch (type) {
663 	case ICMPV6_ECHO_REQUEST:
664 		icmpv6_echo_reply(skb);
665 		break;
666 
667 	case ICMPV6_ECHO_REPLY:
668 		/* we couldn't care less */
669 		break;
670 
671 	case ICMPV6_PKT_TOOBIG:
672 		/* BUGGG_FUTURE: if packet contains rthdr, we cannot update
673 		   standard destination cache. Seems, only "advanced"
674 		   destination cache will allow to solve this problem
675 		   --ANK (980726)
676 		 */
677 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
678 			goto discard_it;
679 		hdr = (struct icmp6hdr *) skb->h.raw;
680 		orig_hdr = (struct ipv6hdr *) (hdr + 1);
681 		rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
682 				   ntohl(hdr->icmp6_mtu));
683 
684 		/*
685 		 *	Drop through to notify
686 		 */
687 
688 	case ICMPV6_DEST_UNREACH:
689 	case ICMPV6_TIME_EXCEED:
690 	case ICMPV6_PARAMPROB:
691 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
692 		break;
693 
694 	case NDISC_ROUTER_SOLICITATION:
695 	case NDISC_ROUTER_ADVERTISEMENT:
696 	case NDISC_NEIGHBOUR_SOLICITATION:
697 	case NDISC_NEIGHBOUR_ADVERTISEMENT:
698 	case NDISC_REDIRECT:
699 		ndisc_rcv(skb);
700 		break;
701 
702 	case ICMPV6_MGM_QUERY:
703 		igmp6_event_query(skb);
704 		break;
705 
706 	case ICMPV6_MGM_REPORT:
707 		igmp6_event_report(skb);
708 		break;
709 
710 	case ICMPV6_MGM_REDUCTION:
711 	case ICMPV6_NI_QUERY:
712 	case ICMPV6_NI_REPLY:
713 	case ICMPV6_MLD2_REPORT:
714 	case ICMPV6_DHAAD_REQUEST:
715 	case ICMPV6_DHAAD_REPLY:
716 	case ICMPV6_MOBILE_PREFIX_SOL:
717 	case ICMPV6_MOBILE_PREFIX_ADV:
718 		break;
719 
720 	default:
721 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
722 
723 		/* informational */
724 		if (type & ICMPV6_INFOMSG_MASK)
725 			break;
726 
727 		/*
728 		 * error of unknown type.
729 		 * must pass to upper level
730 		 */
731 
732 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
733 	};
734 	kfree_skb(skb);
735 	return 0;
736 
737 discard_it:
738 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
739 	kfree_skb(skb);
740 	return 0;
741 }
742 
743 /*
744  * Special lock-class for __icmpv6_socket:
745  */
746 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
747 
748 int __init icmpv6_init(struct net_proto_family *ops)
749 {
750 	struct sock *sk;
751 	int err, i, j;
752 
753 	for_each_possible_cpu(i) {
754 		err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
755 				       &per_cpu(__icmpv6_socket, i));
756 		if (err < 0) {
757 			printk(KERN_ERR
758 			       "Failed to initialize the ICMP6 control socket "
759 			       "(err %d).\n",
760 			       err);
761 			goto fail;
762 		}
763 
764 		sk = per_cpu(__icmpv6_socket, i)->sk;
765 		sk->sk_allocation = GFP_ATOMIC;
766 		/*
767 		 * Split off their lock-class, because sk->sk_dst_lock
768 		 * gets used from softirqs, which is safe for
769 		 * __icmpv6_socket (because those never get directly used
770 		 * via userspace syscalls), but unsafe for normal sockets.
771 		 */
772 		lockdep_set_class(&sk->sk_dst_lock,
773 				  &icmpv6_socket_sk_dst_lock_key);
774 
775 		/* Enough space for 2 64K ICMP packets, including
776 		 * sk_buff struct overhead.
777 		 */
778 		sk->sk_sndbuf =
779 			(2 * ((64 * 1024) + sizeof(struct sk_buff)));
780 
781 		sk->sk_prot->unhash(sk);
782 	}
783 
784 
785 	if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
786 		printk(KERN_ERR "Failed to register ICMP6 protocol\n");
787 		err = -EAGAIN;
788 		goto fail;
789 	}
790 
791 	return 0;
792 
793  fail:
794 	for (j = 0; j < i; j++) {
795 		if (!cpu_possible(j))
796 			continue;
797 		sock_release(per_cpu(__icmpv6_socket, j));
798 	}
799 
800 	return err;
801 }
802 
803 void icmpv6_cleanup(void)
804 {
805 	int i;
806 
807 	for_each_possible_cpu(i) {
808 		sock_release(per_cpu(__icmpv6_socket, i));
809 	}
810 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
811 }
812 
813 static const struct icmp6_err {
814 	int err;
815 	int fatal;
816 } tab_unreach[] = {
817 	{	/* NOROUTE */
818 		.err	= ENETUNREACH,
819 		.fatal	= 0,
820 	},
821 	{	/* ADM_PROHIBITED */
822 		.err	= EACCES,
823 		.fatal	= 1,
824 	},
825 	{	/* Was NOT_NEIGHBOUR, now reserved */
826 		.err	= EHOSTUNREACH,
827 		.fatal	= 0,
828 	},
829 	{	/* ADDR_UNREACH	*/
830 		.err	= EHOSTUNREACH,
831 		.fatal	= 0,
832 	},
833 	{	/* PORT_UNREACH	*/
834 		.err	= ECONNREFUSED,
835 		.fatal	= 1,
836 	},
837 };
838 
839 int icmpv6_err_convert(int type, int code, int *err)
840 {
841 	int fatal = 0;
842 
843 	*err = EPROTO;
844 
845 	switch (type) {
846 	case ICMPV6_DEST_UNREACH:
847 		fatal = 1;
848 		if (code <= ICMPV6_PORT_UNREACH) {
849 			*err  = tab_unreach[code].err;
850 			fatal = tab_unreach[code].fatal;
851 		}
852 		break;
853 
854 	case ICMPV6_PKT_TOOBIG:
855 		*err = EMSGSIZE;
856 		break;
857 
858 	case ICMPV6_PARAMPROB:
859 		*err = EPROTO;
860 		fatal = 1;
861 		break;
862 
863 	case ICMPV6_TIME_EXCEED:
864 		*err = EHOSTUNREACH;
865 		break;
866 	};
867 
868 	return fatal;
869 }
870 
871 EXPORT_SYMBOL(icmpv6_err_convert);
872 
873 #ifdef CONFIG_SYSCTL
874 ctl_table ipv6_icmp_table[] = {
875 	{
876 		.ctl_name	= NET_IPV6_ICMP_RATELIMIT,
877 		.procname	= "ratelimit",
878 		.data		= &sysctl_icmpv6_time,
879 		.maxlen		= sizeof(int),
880 		.mode		= 0644,
881 		.proc_handler	= &proc_dointvec
882 	},
883 	{ .ctl_name = 0 },
884 };
885 #endif
886 
887