xref: /openbmc/linux/net/ipv6/icmp.c (revision 0660e03f)
1 /*
2  *	Internet Control Message Protocol (ICMPv6)
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
9  *
10  *	Based on net/ipv4/icmp.c
11  *
12  *	RFC 1885
13  *
14  *	This program is free software; you can redistribute it and/or
15  *      modify it under the terms of the GNU General Public License
16  *      as published by the Free Software Foundation; either version
17  *      2 of the License, or (at your option) any later version.
18  */
19 
20 /*
21  *	Changes:
22  *
23  *	Andi Kleen		:	exception handling
24  *	Andi Kleen			add rate limits. never reply to a icmp.
25  *					add more length checks and other fixes.
26  *	yoshfuji		:	ensure to sent parameter problem for
27  *					fragments.
28  *	YOSHIFUJI Hideaki @USAGI:	added sysctl for icmp rate limit.
29  *	Randy Dunlap and
30  *	YOSHIFUJI Hideaki @USAGI:	Per-interface statistics support
31  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
32  */
33 
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
38 #include <linux/in.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
45 
46 #ifdef CONFIG_SYSCTL
47 #include <linux/sysctl.h>
48 #endif
49 
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
53 
54 #include <net/ip.h>
55 #include <net/sock.h>
56 
57 #include <net/ipv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
60 #include <net/raw.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
65 #include <net/icmp.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 
70 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 EXPORT_SYMBOL(icmpv6_statistics);
72 
73 /*
74  *	The ICMP socket(s). This is the most convenient way to flow control
75  *	our ICMP output as well as maintain a clean interface throughout
76  *	all layers. All Socketless IP sends will soon be gone.
77  *
78  *	On SMP we have one ICMP socket per-cpu.
79  */
80 static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
81 #define icmpv6_socket	__get_cpu_var(__icmpv6_socket)
82 
83 static int icmpv6_rcv(struct sk_buff **pskb);
84 
85 static struct inet6_protocol icmpv6_protocol = {
86 	.handler	=	icmpv6_rcv,
87 	.flags		=	INET6_PROTO_FINAL,
88 };
89 
90 static __inline__ int icmpv6_xmit_lock(void)
91 {
92 	local_bh_disable();
93 
94 	if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) {
95 		/* This can happen if the output path (f.e. SIT or
96 		 * ip6ip6 tunnel) signals dst_link_failure() for an
97 		 * outgoing ICMP6 packet.
98 		 */
99 		local_bh_enable();
100 		return 1;
101 	}
102 	return 0;
103 }
104 
105 static __inline__ void icmpv6_xmit_unlock(void)
106 {
107 	spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
108 }
109 
110 /*
111  * Slightly more convenient version of icmpv6_send.
112  */
113 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
114 {
115 	icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
116 	kfree_skb(skb);
117 }
118 
119 /*
120  * Figure out, may we reply to this packet with icmp error.
121  *
122  * We do not reply, if:
123  *	- it was icmp error message.
124  *	- it is truncated, so that it is known, that protocol is ICMPV6
125  *	  (i.e. in the middle of some exthdr)
126  *
127  *	--ANK (980726)
128  */
129 
130 static int is_ineligible(struct sk_buff *skb)
131 {
132 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
133 	int len = skb->len - ptr;
134 	__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
135 
136 	if (len < 0)
137 		return 1;
138 
139 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
140 	if (ptr < 0)
141 		return 0;
142 	if (nexthdr == IPPROTO_ICMPV6) {
143 		u8 _type, *tp;
144 		tp = skb_header_pointer(skb,
145 			ptr+offsetof(struct icmp6hdr, icmp6_type),
146 			sizeof(_type), &_type);
147 		if (tp == NULL ||
148 		    !(*tp & ICMPV6_INFOMSG_MASK))
149 			return 1;
150 	}
151 	return 0;
152 }
153 
154 static int sysctl_icmpv6_time __read_mostly = 1*HZ;
155 
156 /*
157  * Check the ICMP output rate limit
158  */
159 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
160 				     struct flowi *fl)
161 {
162 	struct dst_entry *dst;
163 	int res = 0;
164 
165 	/* Informational messages are not limited. */
166 	if (type & ICMPV6_INFOMSG_MASK)
167 		return 1;
168 
169 	/* Do not limit pmtu discovery, it would break it. */
170 	if (type == ICMPV6_PKT_TOOBIG)
171 		return 1;
172 
173 	/*
174 	 * Look up the output route.
175 	 * XXX: perhaps the expire for routing entries cloned by
176 	 * this lookup should be more aggressive (not longer than timeout).
177 	 */
178 	dst = ip6_route_output(sk, fl);
179 	if (dst->error) {
180 		IP6_INC_STATS(ip6_dst_idev(dst),
181 			      IPSTATS_MIB_OUTNOROUTES);
182 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
183 		res = 1;
184 	} else {
185 		struct rt6_info *rt = (struct rt6_info *)dst;
186 		int tmo = sysctl_icmpv6_time;
187 
188 		/* Give more bandwidth to wider prefixes. */
189 		if (rt->rt6i_dst.plen < 128)
190 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
191 
192 		res = xrlim_allow(dst, tmo);
193 	}
194 	dst_release(dst);
195 	return res;
196 }
197 
198 /*
199  *	an inline helper for the "simple" if statement below
200  *	checks if parameter problem report is caused by an
201  *	unrecognized IPv6 option that has the Option Type
202  *	highest-order two bits set to 10
203  */
204 
205 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
206 {
207 	u8 _optval, *op;
208 
209 	offset += skb_network_offset(skb);
210 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
211 	if (op == NULL)
212 		return 1;
213 	return (*op & 0xC0) == 0x80;
214 }
215 
216 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
217 {
218 	struct sk_buff *skb;
219 	struct icmp6hdr *icmp6h;
220 	int err = 0;
221 
222 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
223 		goto out;
224 
225 	icmp6h = (struct icmp6hdr*) skb->h.raw;
226 	memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
227 	icmp6h->icmp6_cksum = 0;
228 
229 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
230 		skb->csum = csum_partial((char *)icmp6h,
231 					sizeof(struct icmp6hdr), skb->csum);
232 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
233 						      &fl->fl6_dst,
234 						      len, fl->proto,
235 						      skb->csum);
236 	} else {
237 		__wsum tmp_csum = 0;
238 
239 		skb_queue_walk(&sk->sk_write_queue, skb) {
240 			tmp_csum = csum_add(tmp_csum, skb->csum);
241 		}
242 
243 		tmp_csum = csum_partial((char *)icmp6h,
244 					sizeof(struct icmp6hdr), tmp_csum);
245 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
246 						      &fl->fl6_dst,
247 						      len, fl->proto,
248 						      tmp_csum);
249 	}
250 	ip6_push_pending_frames(sk);
251 out:
252 	return err;
253 }
254 
255 struct icmpv6_msg {
256 	struct sk_buff	*skb;
257 	int		offset;
258 	uint8_t		type;
259 };
260 
261 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
262 {
263 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
264 	struct sk_buff *org_skb = msg->skb;
265 	__wsum csum = 0;
266 
267 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
268 				      to, len, csum);
269 	skb->csum = csum_block_add(skb->csum, csum, odd);
270 	if (!(msg->type & ICMPV6_INFOMSG_MASK))
271 		nf_ct_attach(skb, org_skb);
272 	return 0;
273 }
274 
275 #ifdef CONFIG_IPV6_MIP6
276 static void mip6_addr_swap(struct sk_buff *skb)
277 {
278 	struct ipv6hdr *iph = ipv6_hdr(skb);
279 	struct inet6_skb_parm *opt = IP6CB(skb);
280 	struct ipv6_destopt_hao *hao;
281 	struct in6_addr tmp;
282 	int off;
283 
284 	if (opt->dsthao) {
285 		off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
286 		if (likely(off >= 0)) {
287 			hao = (struct ipv6_destopt_hao *)
288 					(skb_network_header(skb) + off);
289 			ipv6_addr_copy(&tmp, &iph->saddr);
290 			ipv6_addr_copy(&iph->saddr, &hao->addr);
291 			ipv6_addr_copy(&hao->addr, &tmp);
292 		}
293 	}
294 }
295 #else
296 static inline void mip6_addr_swap(struct sk_buff *skb) {}
297 #endif
298 
299 /*
300  *	Send an ICMP message in response to a packet in error
301  */
302 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
303 		 struct net_device *dev)
304 {
305 	struct inet6_dev *idev = NULL;
306 	struct ipv6hdr *hdr = ipv6_hdr(skb);
307 	struct sock *sk;
308 	struct ipv6_pinfo *np;
309 	struct in6_addr *saddr = NULL;
310 	struct dst_entry *dst;
311 	struct icmp6hdr tmp_hdr;
312 	struct flowi fl;
313 	struct icmpv6_msg msg;
314 	int iif = 0;
315 	int addr_type = 0;
316 	int len;
317 	int hlimit, tclass;
318 	int err = 0;
319 
320 	if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail)
321 		return;
322 
323 	/*
324 	 *	Make sure we respect the rules
325 	 *	i.e. RFC 1885 2.4(e)
326 	 *	Rule (e.1) is enforced by not using icmpv6_send
327 	 *	in any code that processes icmp errors.
328 	 */
329 	addr_type = ipv6_addr_type(&hdr->daddr);
330 
331 	if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
332 		saddr = &hdr->daddr;
333 
334 	/*
335 	 *	Dest addr check
336 	 */
337 
338 	if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
339 		if (type != ICMPV6_PKT_TOOBIG &&
340 		    !(type == ICMPV6_PARAMPROB &&
341 		      code == ICMPV6_UNK_OPTION &&
342 		      (opt_unrec(skb, info))))
343 			return;
344 
345 		saddr = NULL;
346 	}
347 
348 	addr_type = ipv6_addr_type(&hdr->saddr);
349 
350 	/*
351 	 *	Source addr check
352 	 */
353 
354 	if (addr_type & IPV6_ADDR_LINKLOCAL)
355 		iif = skb->dev->ifindex;
356 
357 	/*
358 	 *	Must not send error if the source does not uniquely
359 	 *	identify a single node (RFC2463 Section 2.4).
360 	 *	We check unspecified / multicast addresses here,
361 	 *	and anycast addresses will be checked later.
362 	 */
363 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
364 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
365 		return;
366 	}
367 
368 	/*
369 	 *	Never answer to a ICMP packet.
370 	 */
371 	if (is_ineligible(skb)) {
372 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
373 		return;
374 	}
375 
376 	mip6_addr_swap(skb);
377 
378 	memset(&fl, 0, sizeof(fl));
379 	fl.proto = IPPROTO_ICMPV6;
380 	ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
381 	if (saddr)
382 		ipv6_addr_copy(&fl.fl6_src, saddr);
383 	fl.oif = iif;
384 	fl.fl_icmp_type = type;
385 	fl.fl_icmp_code = code;
386 	security_skb_classify_flow(skb, &fl);
387 
388 	if (icmpv6_xmit_lock())
389 		return;
390 
391 	sk = icmpv6_socket->sk;
392 	np = inet6_sk(sk);
393 
394 	if (!icmpv6_xrlim_allow(sk, type, &fl))
395 		goto out;
396 
397 	tmp_hdr.icmp6_type = type;
398 	tmp_hdr.icmp6_code = code;
399 	tmp_hdr.icmp6_cksum = 0;
400 	tmp_hdr.icmp6_pointer = htonl(info);
401 
402 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
403 		fl.oif = np->mcast_oif;
404 
405 	err = ip6_dst_lookup(sk, &dst, &fl);
406 	if (err)
407 		goto out;
408 
409 	/*
410 	 * We won't send icmp if the destination is known
411 	 * anycast.
412 	 */
413 	if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
414 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
415 		goto out_dst_release;
416 	}
417 
418 	if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
419 		goto out;
420 
421 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
422 		hlimit = np->mcast_hops;
423 	else
424 		hlimit = np->hop_limit;
425 	if (hlimit < 0)
426 		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
427 	if (hlimit < 0)
428 		hlimit = ipv6_get_hoplimit(dst->dev);
429 
430 	tclass = np->tclass;
431 	if (tclass < 0)
432 		tclass = 0;
433 
434 	msg.skb = skb;
435 	msg.offset = skb_network_offset(skb);
436 	msg.type = type;
437 
438 	len = skb->len - msg.offset;
439 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
440 	if (len < 0) {
441 		LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
442 		goto out_dst_release;
443 	}
444 
445 	idev = in6_dev_get(skb->dev);
446 
447 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
448 			      len + sizeof(struct icmp6hdr),
449 			      sizeof(struct icmp6hdr),
450 			      hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
451 			      MSG_DONTWAIT);
452 	if (err) {
453 		ip6_flush_pending_frames(sk);
454 		goto out_put;
455 	}
456 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
457 
458 	if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
459 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
460 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
461 
462 out_put:
463 	if (likely(idev != NULL))
464 		in6_dev_put(idev);
465 out_dst_release:
466 	dst_release(dst);
467 out:
468 	icmpv6_xmit_unlock();
469 }
470 
471 EXPORT_SYMBOL(icmpv6_send);
472 
473 static void icmpv6_echo_reply(struct sk_buff *skb)
474 {
475 	struct sock *sk;
476 	struct inet6_dev *idev;
477 	struct ipv6_pinfo *np;
478 	struct in6_addr *saddr = NULL;
479 	struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
480 	struct icmp6hdr tmp_hdr;
481 	struct flowi fl;
482 	struct icmpv6_msg msg;
483 	struct dst_entry *dst;
484 	int err = 0;
485 	int hlimit;
486 	int tclass;
487 
488 	saddr = &ipv6_hdr(skb)->daddr;
489 
490 	if (!ipv6_unicast_destination(skb))
491 		saddr = NULL;
492 
493 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
494 	tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
495 
496 	memset(&fl, 0, sizeof(fl));
497 	fl.proto = IPPROTO_ICMPV6;
498 	ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
499 	if (saddr)
500 		ipv6_addr_copy(&fl.fl6_src, saddr);
501 	fl.oif = skb->dev->ifindex;
502 	fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
503 	security_skb_classify_flow(skb, &fl);
504 
505 	if (icmpv6_xmit_lock())
506 		return;
507 
508 	sk = icmpv6_socket->sk;
509 	np = inet6_sk(sk);
510 
511 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
512 		fl.oif = np->mcast_oif;
513 
514 	err = ip6_dst_lookup(sk, &dst, &fl);
515 	if (err)
516 		goto out;
517 	if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
518 		goto out;
519 
520 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
521 		hlimit = np->mcast_hops;
522 	else
523 		hlimit = np->hop_limit;
524 	if (hlimit < 0)
525 		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
526 	if (hlimit < 0)
527 		hlimit = ipv6_get_hoplimit(dst->dev);
528 
529 	tclass = np->tclass;
530 	if (tclass < 0)
531 		tclass = 0;
532 
533 	idev = in6_dev_get(skb->dev);
534 
535 	msg.skb = skb;
536 	msg.offset = 0;
537 	msg.type = ICMPV6_ECHO_REPLY;
538 
539 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
540 				sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
541 				(struct rt6_info*)dst, MSG_DONTWAIT);
542 
543 	if (err) {
544 		ip6_flush_pending_frames(sk);
545 		goto out_put;
546 	}
547 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
548 
549 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
550 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
551 
552 out_put:
553 	if (likely(idev != NULL))
554 		in6_dev_put(idev);
555 	dst_release(dst);
556 out:
557 	icmpv6_xmit_unlock();
558 }
559 
560 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
561 {
562 	struct in6_addr *saddr, *daddr;
563 	struct inet6_protocol *ipprot;
564 	struct sock *sk;
565 	int inner_offset;
566 	int hash;
567 	u8 nexthdr;
568 
569 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
570 		return;
571 
572 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
573 	if (ipv6_ext_hdr(nexthdr)) {
574 		/* now skip over extension headers */
575 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
576 		if (inner_offset<0)
577 			return;
578 	} else {
579 		inner_offset = sizeof(struct ipv6hdr);
580 	}
581 
582 	/* Checkin header including 8 bytes of inner protocol header. */
583 	if (!pskb_may_pull(skb, inner_offset+8))
584 		return;
585 
586 	saddr = &ipv6_hdr(skb)->saddr;
587 	daddr = &ipv6_hdr(skb)->daddr;
588 
589 	/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
590 	   Without this we will not able f.e. to make source routed
591 	   pmtu discovery.
592 	   Corresponding argument (opt) to notifiers is already added.
593 	   --ANK (980726)
594 	 */
595 
596 	hash = nexthdr & (MAX_INET_PROTOS - 1);
597 
598 	rcu_read_lock();
599 	ipprot = rcu_dereference(inet6_protos[hash]);
600 	if (ipprot && ipprot->err_handler)
601 		ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
602 	rcu_read_unlock();
603 
604 	read_lock(&raw_v6_lock);
605 	if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
606 		while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
607 					    IP6CB(skb)->iif))) {
608 			rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
609 			sk = sk_next(sk);
610 		}
611 	}
612 	read_unlock(&raw_v6_lock);
613 }
614 
615 /*
616  *	Handle icmp messages
617  */
618 
619 static int icmpv6_rcv(struct sk_buff **pskb)
620 {
621 	struct sk_buff *skb = *pskb;
622 	struct net_device *dev = skb->dev;
623 	struct inet6_dev *idev = __in6_dev_get(dev);
624 	struct in6_addr *saddr, *daddr;
625 	struct ipv6hdr *orig_hdr;
626 	struct icmp6hdr *hdr;
627 	int type;
628 
629 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
630 
631 	saddr = &ipv6_hdr(skb)->saddr;
632 	daddr = &ipv6_hdr(skb)->daddr;
633 
634 	/* Perform checksum. */
635 	switch (skb->ip_summed) {
636 	case CHECKSUM_COMPLETE:
637 		if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
638 				     skb->csum))
639 			break;
640 		/* fall through */
641 	case CHECKSUM_NONE:
642 		skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
643 					     IPPROTO_ICMPV6, 0));
644 		if (__skb_checksum_complete(skb)) {
645 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
646 				       NIP6(*saddr), NIP6(*daddr));
647 			goto discard_it;
648 		}
649 	}
650 
651 	if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
652 		goto discard_it;
653 
654 	hdr = (struct icmp6hdr *) skb->h.raw;
655 
656 	type = hdr->icmp6_type;
657 
658 	if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
659 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
660 	else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
661 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
662 
663 	switch (type) {
664 	case ICMPV6_ECHO_REQUEST:
665 		icmpv6_echo_reply(skb);
666 		break;
667 
668 	case ICMPV6_ECHO_REPLY:
669 		/* we couldn't care less */
670 		break;
671 
672 	case ICMPV6_PKT_TOOBIG:
673 		/* BUGGG_FUTURE: if packet contains rthdr, we cannot update
674 		   standard destination cache. Seems, only "advanced"
675 		   destination cache will allow to solve this problem
676 		   --ANK (980726)
677 		 */
678 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
679 			goto discard_it;
680 		hdr = (struct icmp6hdr *) skb->h.raw;
681 		orig_hdr = (struct ipv6hdr *) (hdr + 1);
682 		rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
683 				   ntohl(hdr->icmp6_mtu));
684 
685 		/*
686 		 *	Drop through to notify
687 		 */
688 
689 	case ICMPV6_DEST_UNREACH:
690 	case ICMPV6_TIME_EXCEED:
691 	case ICMPV6_PARAMPROB:
692 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
693 		break;
694 
695 	case NDISC_ROUTER_SOLICITATION:
696 	case NDISC_ROUTER_ADVERTISEMENT:
697 	case NDISC_NEIGHBOUR_SOLICITATION:
698 	case NDISC_NEIGHBOUR_ADVERTISEMENT:
699 	case NDISC_REDIRECT:
700 		ndisc_rcv(skb);
701 		break;
702 
703 	case ICMPV6_MGM_QUERY:
704 		igmp6_event_query(skb);
705 		break;
706 
707 	case ICMPV6_MGM_REPORT:
708 		igmp6_event_report(skb);
709 		break;
710 
711 	case ICMPV6_MGM_REDUCTION:
712 	case ICMPV6_NI_QUERY:
713 	case ICMPV6_NI_REPLY:
714 	case ICMPV6_MLD2_REPORT:
715 	case ICMPV6_DHAAD_REQUEST:
716 	case ICMPV6_DHAAD_REPLY:
717 	case ICMPV6_MOBILE_PREFIX_SOL:
718 	case ICMPV6_MOBILE_PREFIX_ADV:
719 		break;
720 
721 	default:
722 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
723 
724 		/* informational */
725 		if (type & ICMPV6_INFOMSG_MASK)
726 			break;
727 
728 		/*
729 		 * error of unknown type.
730 		 * must pass to upper level
731 		 */
732 
733 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
734 	};
735 	kfree_skb(skb);
736 	return 0;
737 
738 discard_it:
739 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
740 	kfree_skb(skb);
741 	return 0;
742 }
743 
744 /*
745  * Special lock-class for __icmpv6_socket:
746  */
747 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
748 
749 int __init icmpv6_init(struct net_proto_family *ops)
750 {
751 	struct sock *sk;
752 	int err, i, j;
753 
754 	for_each_possible_cpu(i) {
755 		err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
756 				       &per_cpu(__icmpv6_socket, i));
757 		if (err < 0) {
758 			printk(KERN_ERR
759 			       "Failed to initialize the ICMP6 control socket "
760 			       "(err %d).\n",
761 			       err);
762 			goto fail;
763 		}
764 
765 		sk = per_cpu(__icmpv6_socket, i)->sk;
766 		sk->sk_allocation = GFP_ATOMIC;
767 		/*
768 		 * Split off their lock-class, because sk->sk_dst_lock
769 		 * gets used from softirqs, which is safe for
770 		 * __icmpv6_socket (because those never get directly used
771 		 * via userspace syscalls), but unsafe for normal sockets.
772 		 */
773 		lockdep_set_class(&sk->sk_dst_lock,
774 				  &icmpv6_socket_sk_dst_lock_key);
775 
776 		/* Enough space for 2 64K ICMP packets, including
777 		 * sk_buff struct overhead.
778 		 */
779 		sk->sk_sndbuf =
780 			(2 * ((64 * 1024) + sizeof(struct sk_buff)));
781 
782 		sk->sk_prot->unhash(sk);
783 	}
784 
785 
786 	if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
787 		printk(KERN_ERR "Failed to register ICMP6 protocol\n");
788 		err = -EAGAIN;
789 		goto fail;
790 	}
791 
792 	return 0;
793 
794  fail:
795 	for (j = 0; j < i; j++) {
796 		if (!cpu_possible(j))
797 			continue;
798 		sock_release(per_cpu(__icmpv6_socket, j));
799 	}
800 
801 	return err;
802 }
803 
804 void icmpv6_cleanup(void)
805 {
806 	int i;
807 
808 	for_each_possible_cpu(i) {
809 		sock_release(per_cpu(__icmpv6_socket, i));
810 	}
811 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
812 }
813 
814 static const struct icmp6_err {
815 	int err;
816 	int fatal;
817 } tab_unreach[] = {
818 	{	/* NOROUTE */
819 		.err	= ENETUNREACH,
820 		.fatal	= 0,
821 	},
822 	{	/* ADM_PROHIBITED */
823 		.err	= EACCES,
824 		.fatal	= 1,
825 	},
826 	{	/* Was NOT_NEIGHBOUR, now reserved */
827 		.err	= EHOSTUNREACH,
828 		.fatal	= 0,
829 	},
830 	{	/* ADDR_UNREACH	*/
831 		.err	= EHOSTUNREACH,
832 		.fatal	= 0,
833 	},
834 	{	/* PORT_UNREACH	*/
835 		.err	= ECONNREFUSED,
836 		.fatal	= 1,
837 	},
838 };
839 
840 int icmpv6_err_convert(int type, int code, int *err)
841 {
842 	int fatal = 0;
843 
844 	*err = EPROTO;
845 
846 	switch (type) {
847 	case ICMPV6_DEST_UNREACH:
848 		fatal = 1;
849 		if (code <= ICMPV6_PORT_UNREACH) {
850 			*err  = tab_unreach[code].err;
851 			fatal = tab_unreach[code].fatal;
852 		}
853 		break;
854 
855 	case ICMPV6_PKT_TOOBIG:
856 		*err = EMSGSIZE;
857 		break;
858 
859 	case ICMPV6_PARAMPROB:
860 		*err = EPROTO;
861 		fatal = 1;
862 		break;
863 
864 	case ICMPV6_TIME_EXCEED:
865 		*err = EHOSTUNREACH;
866 		break;
867 	};
868 
869 	return fatal;
870 }
871 
872 EXPORT_SYMBOL(icmpv6_err_convert);
873 
874 #ifdef CONFIG_SYSCTL
875 ctl_table ipv6_icmp_table[] = {
876 	{
877 		.ctl_name	= NET_IPV6_ICMP_RATELIMIT,
878 		.procname	= "ratelimit",
879 		.data		= &sysctl_icmpv6_time,
880 		.maxlen		= sizeof(int),
881 		.mode		= 0644,
882 		.proc_handler	= &proc_dointvec
883 	},
884 	{ .ctl_name = 0 },
885 };
886 #endif
887 
888