xref: /openbmc/linux/net/ipv6/icmp.c (revision 670c02c2)
1 /*
2  *	Internet Control Message Protocol (ICMPv6)
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
9  *
10  *	Based on net/ipv4/icmp.c
11  *
12  *	RFC 1885
13  *
14  *	This program is free software; you can redistribute it and/or
15  *      modify it under the terms of the GNU General Public License
16  *      as published by the Free Software Foundation; either version
17  *      2 of the License, or (at your option) any later version.
18  */
19 
20 /*
21  *	Changes:
22  *
23  *	Andi Kleen		:	exception handling
24  *	Andi Kleen			add rate limits. never reply to a icmp.
25  *					add more length checks and other fixes.
26  *	yoshfuji		:	ensure to sent parameter problem for
27  *					fragments.
28  *	YOSHIFUJI Hideaki @USAGI:	added sysctl for icmp rate limit.
29  *	Randy Dunlap and
30  *	YOSHIFUJI Hideaki @USAGI:	Per-interface statistics support
31  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
32  */
33 
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
38 #include <linux/in.h>
39 #include <linux/kernel.h>
40 #include <linux/sched.h>
41 #include <linux/sockios.h>
42 #include <linux/net.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 
46 #ifdef CONFIG_SYSCTL
47 #include <linux/sysctl.h>
48 #endif
49 
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
53 
54 #include <net/ip.h>
55 #include <net/sock.h>
56 
57 #include <net/ipv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
60 #include <net/raw.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
65 #include <net/icmp.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 
70 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 
72 /*
73  *	The ICMP socket(s). This is the most convenient way to flow control
74  *	our ICMP output as well as maintain a clean interface throughout
75  *	all layers. All Socketless IP sends will soon be gone.
76  *
77  *	On SMP we have one ICMP socket per-cpu.
78  */
79 static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
80 #define icmpv6_socket	__get_cpu_var(__icmpv6_socket)
81 
82 static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
83 
84 static struct inet6_protocol icmpv6_protocol = {
85 	.handler	=	icmpv6_rcv,
86 	.flags		=	INET6_PROTO_FINAL,
87 };
88 
89 static __inline__ int icmpv6_xmit_lock(void)
90 {
91 	local_bh_disable();
92 
93 	if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) {
94 		/* This can happen if the output path (f.e. SIT or
95 		 * ip6ip6 tunnel) signals dst_link_failure() for an
96 		 * outgoing ICMP6 packet.
97 		 */
98 		local_bh_enable();
99 		return 1;
100 	}
101 	return 0;
102 }
103 
104 static __inline__ void icmpv6_xmit_unlock(void)
105 {
106 	spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
107 }
108 
109 /*
110  * Slightly more convenient version of icmpv6_send.
111  */
112 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
113 {
114 	icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
115 	kfree_skb(skb);
116 }
117 
118 /*
119  * Figure out, may we reply to this packet with icmp error.
120  *
121  * We do not reply, if:
122  *	- it was icmp error message.
123  *	- it is truncated, so that it is known, that protocol is ICMPV6
124  *	  (i.e. in the middle of some exthdr)
125  *
126  *	--ANK (980726)
127  */
128 
129 static int is_ineligible(struct sk_buff *skb)
130 {
131 	int ptr = (u8*)(skb->nh.ipv6h+1) - skb->data;
132 	int len = skb->len - ptr;
133 	__u8 nexthdr = skb->nh.ipv6h->nexthdr;
134 
135 	if (len < 0)
136 		return 1;
137 
138 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
139 	if (ptr < 0)
140 		return 0;
141 	if (nexthdr == IPPROTO_ICMPV6) {
142 		u8 _type, *tp;
143 		tp = skb_header_pointer(skb,
144 			ptr+offsetof(struct icmp6hdr, icmp6_type),
145 			sizeof(_type), &_type);
146 		if (tp == NULL ||
147 		    !(*tp & ICMPV6_INFOMSG_MASK))
148 			return 1;
149 	}
150 	return 0;
151 }
152 
153 static int sysctl_icmpv6_time = 1*HZ;
154 
155 /*
156  * Check the ICMP output rate limit
157  */
158 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
159 				     struct flowi *fl)
160 {
161 	struct dst_entry *dst;
162 	int res = 0;
163 
164 	/* Informational messages are not limited. */
165 	if (type & ICMPV6_INFOMSG_MASK)
166 		return 1;
167 
168 	/* Do not limit pmtu discovery, it would break it. */
169 	if (type == ICMPV6_PKT_TOOBIG)
170 		return 1;
171 
172 	/*
173 	 * Look up the output route.
174 	 * XXX: perhaps the expire for routing entries cloned by
175 	 * this lookup should be more aggressive (not longer than timeout).
176 	 */
177 	dst = ip6_route_output(sk, fl);
178 	if (dst->error) {
179 		IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
180 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
181 		res = 1;
182 	} else {
183 		struct rt6_info *rt = (struct rt6_info *)dst;
184 		int tmo = sysctl_icmpv6_time;
185 
186 		/* Give more bandwidth to wider prefixes. */
187 		if (rt->rt6i_dst.plen < 128)
188 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
189 
190 		res = xrlim_allow(dst, tmo);
191 	}
192 	dst_release(dst);
193 	return res;
194 }
195 
196 /*
197  *	an inline helper for the "simple" if statement below
198  *	checks if parameter problem report is caused by an
199  *	unrecognized IPv6 option that has the Option Type
200  *	highest-order two bits set to 10
201  */
202 
203 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
204 {
205 	u8 _optval, *op;
206 
207 	offset += skb->nh.raw - skb->data;
208 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
209 	if (op == NULL)
210 		return 1;
211 	return (*op & 0xC0) == 0x80;
212 }
213 
214 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
215 {
216 	struct sk_buff *skb;
217 	struct icmp6hdr *icmp6h;
218 	int err = 0;
219 
220 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
221 		goto out;
222 
223 	icmp6h = (struct icmp6hdr*) skb->h.raw;
224 	memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
225 	icmp6h->icmp6_cksum = 0;
226 
227 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
228 		skb->csum = csum_partial((char *)icmp6h,
229 					sizeof(struct icmp6hdr), skb->csum);
230 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
231 						      &fl->fl6_dst,
232 						      len, fl->proto,
233 						      skb->csum);
234 	} else {
235 		u32 tmp_csum = 0;
236 
237 		skb_queue_walk(&sk->sk_write_queue, skb) {
238 			tmp_csum = csum_add(tmp_csum, skb->csum);
239 		}
240 
241 		tmp_csum = csum_partial((char *)icmp6h,
242 					sizeof(struct icmp6hdr), tmp_csum);
243 		tmp_csum = csum_ipv6_magic(&fl->fl6_src,
244 					   &fl->fl6_dst,
245 					   len, fl->proto, tmp_csum);
246 		icmp6h->icmp6_cksum = tmp_csum;
247 	}
248 	if (icmp6h->icmp6_cksum == 0)
249 		icmp6h->icmp6_cksum = -1;
250 	ip6_push_pending_frames(sk);
251 out:
252 	return err;
253 }
254 
255 struct icmpv6_msg {
256 	struct sk_buff	*skb;
257 	int		offset;
258 };
259 
260 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
261 {
262 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
263 	struct sk_buff *org_skb = msg->skb;
264 	__u32 csum = 0;
265 
266 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
267 				      to, len, csum);
268 	skb->csum = csum_block_add(skb->csum, csum, odd);
269 	return 0;
270 }
271 
272 /*
273  *	Send an ICMP message in response to a packet in error
274  */
275 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
276 		 struct net_device *dev)
277 {
278 	struct inet6_dev *idev = NULL;
279 	struct ipv6hdr *hdr = skb->nh.ipv6h;
280 	struct sock *sk;
281 	struct ipv6_pinfo *np;
282 	struct in6_addr *saddr = NULL;
283 	struct dst_entry *dst;
284 	struct icmp6hdr tmp_hdr;
285 	struct flowi fl;
286 	struct icmpv6_msg msg;
287 	int iif = 0;
288 	int addr_type = 0;
289 	int len;
290 	int hlimit, tclass;
291 	int err = 0;
292 
293 	if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail)
294 		return;
295 
296 	/*
297 	 *	Make sure we respect the rules
298 	 *	i.e. RFC 1885 2.4(e)
299 	 *	Rule (e.1) is enforced by not using icmpv6_send
300 	 *	in any code that processes icmp errors.
301 	 */
302 	addr_type = ipv6_addr_type(&hdr->daddr);
303 
304 	if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
305 		saddr = &hdr->daddr;
306 
307 	/*
308 	 *	Dest addr check
309 	 */
310 
311 	if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
312 		if (type != ICMPV6_PKT_TOOBIG &&
313 		    !(type == ICMPV6_PARAMPROB &&
314 		      code == ICMPV6_UNK_OPTION &&
315 		      (opt_unrec(skb, info))))
316 			return;
317 
318 		saddr = NULL;
319 	}
320 
321 	addr_type = ipv6_addr_type(&hdr->saddr);
322 
323 	/*
324 	 *	Source addr check
325 	 */
326 
327 	if (addr_type & IPV6_ADDR_LINKLOCAL)
328 		iif = skb->dev->ifindex;
329 
330 	/*
331 	 *	Must not send if we know that source is Anycast also.
332 	 *	for now we don't know that.
333 	 */
334 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
335 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
336 		return;
337 	}
338 
339 	/*
340 	 *	Never answer to a ICMP packet.
341 	 */
342 	if (is_ineligible(skb)) {
343 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
344 		return;
345 	}
346 
347 	memset(&fl, 0, sizeof(fl));
348 	fl.proto = IPPROTO_ICMPV6;
349 	ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
350 	if (saddr)
351 		ipv6_addr_copy(&fl.fl6_src, saddr);
352 	fl.oif = iif;
353 	fl.fl_icmp_type = type;
354 	fl.fl_icmp_code = code;
355 
356 	if (icmpv6_xmit_lock())
357 		return;
358 
359 	sk = icmpv6_socket->sk;
360 	np = inet6_sk(sk);
361 
362 	if (!icmpv6_xrlim_allow(sk, type, &fl))
363 		goto out;
364 
365 	tmp_hdr.icmp6_type = type;
366 	tmp_hdr.icmp6_code = code;
367 	tmp_hdr.icmp6_cksum = 0;
368 	tmp_hdr.icmp6_pointer = htonl(info);
369 
370 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
371 		fl.oif = np->mcast_oif;
372 
373 	err = ip6_dst_lookup(sk, &dst, &fl);
374 	if (err)
375 		goto out;
376 	if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
377 		goto out;
378 
379 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
380 		hlimit = np->mcast_hops;
381 	else
382 		hlimit = np->hop_limit;
383 	if (hlimit < 0)
384 		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
385 	if (hlimit < 0)
386 		hlimit = ipv6_get_hoplimit(dst->dev);
387 
388 	tclass = np->cork.tclass;
389 	if (tclass < 0)
390 		tclass = 0;
391 
392 	msg.skb = skb;
393 	msg.offset = skb->nh.raw - skb->data;
394 
395 	len = skb->len - msg.offset;
396 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
397 	if (len < 0) {
398 		LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
399 		goto out_dst_release;
400 	}
401 
402 	idev = in6_dev_get(skb->dev);
403 
404 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
405 			      len + sizeof(struct icmp6hdr),
406 			      sizeof(struct icmp6hdr),
407 			      hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
408 			      MSG_DONTWAIT);
409 	if (err) {
410 		ip6_flush_pending_frames(sk);
411 		goto out_put;
412 	}
413 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
414 
415 	if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
416 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
417 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
418 
419 out_put:
420 	if (likely(idev != NULL))
421 		in6_dev_put(idev);
422 out_dst_release:
423 	dst_release(dst);
424 out:
425 	icmpv6_xmit_unlock();
426 }
427 
428 static void icmpv6_echo_reply(struct sk_buff *skb)
429 {
430 	struct sock *sk;
431 	struct inet6_dev *idev;
432 	struct ipv6_pinfo *np;
433 	struct in6_addr *saddr = NULL;
434 	struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
435 	struct icmp6hdr tmp_hdr;
436 	struct flowi fl;
437 	struct icmpv6_msg msg;
438 	struct dst_entry *dst;
439 	int err = 0;
440 	int hlimit;
441 	int tclass;
442 
443 	saddr = &skb->nh.ipv6h->daddr;
444 
445 	if (!ipv6_unicast_destination(skb))
446 		saddr = NULL;
447 
448 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
449 	tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
450 
451 	memset(&fl, 0, sizeof(fl));
452 	fl.proto = IPPROTO_ICMPV6;
453 	ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
454 	if (saddr)
455 		ipv6_addr_copy(&fl.fl6_src, saddr);
456 	fl.oif = skb->dev->ifindex;
457 	fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
458 
459 	if (icmpv6_xmit_lock())
460 		return;
461 
462 	sk = icmpv6_socket->sk;
463 	np = inet6_sk(sk);
464 
465 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
466 		fl.oif = np->mcast_oif;
467 
468 	err = ip6_dst_lookup(sk, &dst, &fl);
469 	if (err)
470 		goto out;
471 	if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
472 		goto out;
473 
474 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
475 		hlimit = np->mcast_hops;
476 	else
477 		hlimit = np->hop_limit;
478 	if (hlimit < 0)
479 		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
480 	if (hlimit < 0)
481 		hlimit = ipv6_get_hoplimit(dst->dev);
482 
483 	tclass = np->cork.tclass;
484 	if (tclass < 0)
485 		tclass = 0;
486 
487 	idev = in6_dev_get(skb->dev);
488 
489 	msg.skb = skb;
490 	msg.offset = 0;
491 
492 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
493 				sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
494 				(struct rt6_info*)dst, MSG_DONTWAIT);
495 
496 	if (err) {
497 		ip6_flush_pending_frames(sk);
498 		goto out_put;
499 	}
500 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
501 
502         ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
503         ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
504 
505 out_put:
506 	if (likely(idev != NULL))
507 		in6_dev_put(idev);
508 	dst_release(dst);
509 out:
510 	icmpv6_xmit_unlock();
511 }
512 
513 static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info)
514 {
515 	struct in6_addr *saddr, *daddr;
516 	struct inet6_protocol *ipprot;
517 	struct sock *sk;
518 	int inner_offset;
519 	int hash;
520 	u8 nexthdr;
521 
522 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
523 		return;
524 
525 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
526 	if (ipv6_ext_hdr(nexthdr)) {
527 		/* now skip over extension headers */
528 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
529 		if (inner_offset<0)
530 			return;
531 	} else {
532 		inner_offset = sizeof(struct ipv6hdr);
533 	}
534 
535 	/* Checkin header including 8 bytes of inner protocol header. */
536 	if (!pskb_may_pull(skb, inner_offset+8))
537 		return;
538 
539 	saddr = &skb->nh.ipv6h->saddr;
540 	daddr = &skb->nh.ipv6h->daddr;
541 
542 	/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
543 	   Without this we will not able f.e. to make source routed
544 	   pmtu discovery.
545 	   Corresponding argument (opt) to notifiers is already added.
546 	   --ANK (980726)
547 	 */
548 
549 	hash = nexthdr & (MAX_INET_PROTOS - 1);
550 
551 	rcu_read_lock();
552 	ipprot = rcu_dereference(inet6_protos[hash]);
553 	if (ipprot && ipprot->err_handler)
554 		ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
555 	rcu_read_unlock();
556 
557 	read_lock(&raw_v6_lock);
558 	if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
559 		while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
560 					    IP6CB(skb)->iif))) {
561 			rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
562 			sk = sk_next(sk);
563 		}
564 	}
565 	read_unlock(&raw_v6_lock);
566 }
567 
568 /*
569  *	Handle icmp messages
570  */
571 
572 static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
573 {
574 	struct sk_buff *skb = *pskb;
575 	struct net_device *dev = skb->dev;
576 	struct inet6_dev *idev = __in6_dev_get(dev);
577 	struct in6_addr *saddr, *daddr;
578 	struct ipv6hdr *orig_hdr;
579 	struct icmp6hdr *hdr;
580 	int type;
581 
582 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
583 
584 	saddr = &skb->nh.ipv6h->saddr;
585 	daddr = &skb->nh.ipv6h->daddr;
586 
587 	/* Perform checksum. */
588 	if (skb->ip_summed == CHECKSUM_HW) {
589 		skb->ip_summed = CHECKSUM_UNNECESSARY;
590 		if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
591 				    skb->csum)) {
592 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 hw checksum failed\n");
593 			skb->ip_summed = CHECKSUM_NONE;
594 		}
595 	}
596 	if (skb->ip_summed == CHECKSUM_NONE) {
597 		if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
598 				    skb_checksum(skb, 0, skb->len, 0))) {
599 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
600 				       NIP6(*saddr), NIP6(*daddr));
601 			goto discard_it;
602 		}
603 	}
604 
605 	if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
606 		goto discard_it;
607 
608 	hdr = (struct icmp6hdr *) skb->h.raw;
609 
610 	type = hdr->icmp6_type;
611 
612 	if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
613 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
614 	else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
615 		ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
616 
617 	switch (type) {
618 	case ICMPV6_ECHO_REQUEST:
619 		icmpv6_echo_reply(skb);
620 		break;
621 
622 	case ICMPV6_ECHO_REPLY:
623 		/* we couldn't care less */
624 		break;
625 
626 	case ICMPV6_PKT_TOOBIG:
627 		/* BUGGG_FUTURE: if packet contains rthdr, we cannot update
628 		   standard destination cache. Seems, only "advanced"
629 		   destination cache will allow to solve this problem
630 		   --ANK (980726)
631 		 */
632 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
633 			goto discard_it;
634 		hdr = (struct icmp6hdr *) skb->h.raw;
635 		orig_hdr = (struct ipv6hdr *) (hdr + 1);
636 		rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
637 				   ntohl(hdr->icmp6_mtu));
638 
639 		/*
640 		 *	Drop through to notify
641 		 */
642 
643 	case ICMPV6_DEST_UNREACH:
644 	case ICMPV6_TIME_EXCEED:
645 	case ICMPV6_PARAMPROB:
646 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
647 		break;
648 
649 	case NDISC_ROUTER_SOLICITATION:
650 	case NDISC_ROUTER_ADVERTISEMENT:
651 	case NDISC_NEIGHBOUR_SOLICITATION:
652 	case NDISC_NEIGHBOUR_ADVERTISEMENT:
653 	case NDISC_REDIRECT:
654 		ndisc_rcv(skb);
655 		break;
656 
657 	case ICMPV6_MGM_QUERY:
658 		igmp6_event_query(skb);
659 		break;
660 
661 	case ICMPV6_MGM_REPORT:
662 		igmp6_event_report(skb);
663 		break;
664 
665 	case ICMPV6_MGM_REDUCTION:
666 	case ICMPV6_NI_QUERY:
667 	case ICMPV6_NI_REPLY:
668 	case ICMPV6_MLD2_REPORT:
669 	case ICMPV6_DHAAD_REQUEST:
670 	case ICMPV6_DHAAD_REPLY:
671 	case ICMPV6_MOBILE_PREFIX_SOL:
672 	case ICMPV6_MOBILE_PREFIX_ADV:
673 		break;
674 
675 	default:
676 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
677 
678 		/* informational */
679 		if (type & ICMPV6_INFOMSG_MASK)
680 			break;
681 
682 		/*
683 		 * error of unknown type.
684 		 * must pass to upper level
685 		 */
686 
687 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
688 	};
689 	kfree_skb(skb);
690 	return 0;
691 
692 discard_it:
693 	ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
694 	kfree_skb(skb);
695 	return 0;
696 }
697 
698 int __init icmpv6_init(struct net_proto_family *ops)
699 {
700 	struct sock *sk;
701 	int err, i, j;
702 
703 	for_each_cpu(i) {
704 		err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
705 				       &per_cpu(__icmpv6_socket, i));
706 		if (err < 0) {
707 			printk(KERN_ERR
708 			       "Failed to initialize the ICMP6 control socket "
709 			       "(err %d).\n",
710 			       err);
711 			goto fail;
712 		}
713 
714 		sk = per_cpu(__icmpv6_socket, i)->sk;
715 		sk->sk_allocation = GFP_ATOMIC;
716 
717 		/* Enough space for 2 64K ICMP packets, including
718 		 * sk_buff struct overhead.
719 		 */
720 		sk->sk_sndbuf =
721 			(2 * ((64 * 1024) + sizeof(struct sk_buff)));
722 
723 		sk->sk_prot->unhash(sk);
724 	}
725 
726 
727 	if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
728 		printk(KERN_ERR "Failed to register ICMP6 protocol\n");
729 		err = -EAGAIN;
730 		goto fail;
731 	}
732 
733 	return 0;
734 
735  fail:
736 	for (j = 0; j < i; j++) {
737 		if (!cpu_possible(j))
738 			continue;
739 		sock_release(per_cpu(__icmpv6_socket, j));
740 	}
741 
742 	return err;
743 }
744 
745 void icmpv6_cleanup(void)
746 {
747 	int i;
748 
749 	for_each_cpu(i) {
750 		sock_release(per_cpu(__icmpv6_socket, i));
751 	}
752 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
753 }
754 
755 static struct icmp6_err {
756 	int err;
757 	int fatal;
758 } tab_unreach[] = {
759 	{	/* NOROUTE */
760 		.err	= ENETUNREACH,
761 		.fatal	= 0,
762 	},
763 	{	/* ADM_PROHIBITED */
764 		.err	= EACCES,
765 		.fatal	= 1,
766 	},
767 	{	/* Was NOT_NEIGHBOUR, now reserved */
768 		.err	= EHOSTUNREACH,
769 		.fatal	= 0,
770 	},
771 	{	/* ADDR_UNREACH	*/
772 		.err	= EHOSTUNREACH,
773 		.fatal	= 0,
774 	},
775 	{	/* PORT_UNREACH	*/
776 		.err	= ECONNREFUSED,
777 		.fatal	= 1,
778 	},
779 };
780 
781 int icmpv6_err_convert(int type, int code, int *err)
782 {
783 	int fatal = 0;
784 
785 	*err = EPROTO;
786 
787 	switch (type) {
788 	case ICMPV6_DEST_UNREACH:
789 		fatal = 1;
790 		if (code <= ICMPV6_PORT_UNREACH) {
791 			*err  = tab_unreach[code].err;
792 			fatal = tab_unreach[code].fatal;
793 		}
794 		break;
795 
796 	case ICMPV6_PKT_TOOBIG:
797 		*err = EMSGSIZE;
798 		break;
799 
800 	case ICMPV6_PARAMPROB:
801 		*err = EPROTO;
802 		fatal = 1;
803 		break;
804 
805 	case ICMPV6_TIME_EXCEED:
806 		*err = EHOSTUNREACH;
807 		break;
808 	};
809 
810 	return fatal;
811 }
812 
813 #ifdef CONFIG_SYSCTL
814 ctl_table ipv6_icmp_table[] = {
815 	{
816 		.ctl_name	= NET_IPV6_ICMP_RATELIMIT,
817 		.procname	= "ratelimit",
818 		.data		= &sysctl_icmpv6_time,
819 		.maxlen		= sizeof(int),
820 		.mode		= 0644,
821 		.proc_handler	= &proc_dointvec
822 	},
823 	{ .ctl_name = 0 },
824 };
825 #endif
826 
827