xref: /openbmc/linux/net/ipv6/reassembly.c (revision 87c2ce3b)
1 /*
2  *	IPv6 fragment reassembly
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *	Based on: net/ipv4/ip_fragment.c
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  *	Fixes:
20  *	Andi Kleen	Make it work with multiple hosts.
21  *			More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *	Alexey Kuznetsov	SMP races, threading, cleanup.
25  *	Patrick McHardy		LRU queue of frag heads for evictor.
26  *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
27  *	David Stevens and
28  *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
29  *				calculate ICV correctly.
30  */
31 #include <linux/config.h>
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/sockios.h>
37 #include <linux/jiffies.h>
38 #include <linux/net.h>
39 #include <linux/list.h>
40 #include <linux/netdevice.h>
41 #include <linux/in6.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
45 #include <linux/jhash.h>
46 
47 #include <net/sock.h>
48 #include <net/snmp.h>
49 
50 #include <net/ipv6.h>
51 #include <net/protocol.h>
52 #include <net/transp_v6.h>
53 #include <net/rawv6.h>
54 #include <net/ndisc.h>
55 #include <net/addrconf.h>
56 
57 int sysctl_ip6frag_high_thresh = 256*1024;
58 int sysctl_ip6frag_low_thresh = 192*1024;
59 
60 int sysctl_ip6frag_time = IPV6_FRAG_TIMEOUT;
61 
62 struct ip6frag_skb_cb
63 {
64 	struct inet6_skb_parm	h;
65 	int			offset;
66 };
67 
68 #define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
69 
70 
71 /*
72  *	Equivalent of ipv4 struct ipq
73  */
74 
75 struct frag_queue
76 {
77 	struct hlist_node	list;
78 	struct list_head lru_list;		/* lru list member	*/
79 
80 	__u32			id;		/* fragment id		*/
81 	struct in6_addr		saddr;
82 	struct in6_addr		daddr;
83 
84 	spinlock_t		lock;
85 	atomic_t		refcnt;
86 	struct timer_list	timer;		/* expire timer		*/
87 	struct sk_buff		*fragments;
88 	int			len;
89 	int			meat;
90 	int			iif;
91 	struct timeval		stamp;
92 	unsigned int		csum;
93 	__u8			last_in;	/* has first/last segment arrived? */
94 #define COMPLETE		4
95 #define FIRST_IN		2
96 #define LAST_IN			1
97 	__u16			nhoffset;
98 };
99 
100 /* Hash table. */
101 
102 #define IP6Q_HASHSZ	64
103 
104 static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
105 static DEFINE_RWLOCK(ip6_frag_lock);
106 static u32 ip6_frag_hash_rnd;
107 static LIST_HEAD(ip6_frag_lru_list);
108 int ip6_frag_nqueues = 0;
109 
110 static __inline__ void __fq_unlink(struct frag_queue *fq)
111 {
112 	hlist_del(&fq->list);
113 	list_del(&fq->lru_list);
114 	ip6_frag_nqueues--;
115 }
116 
117 static __inline__ void fq_unlink(struct frag_queue *fq)
118 {
119 	write_lock(&ip6_frag_lock);
120 	__fq_unlink(fq);
121 	write_unlock(&ip6_frag_lock);
122 }
123 
124 static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
125 			       struct in6_addr *daddr)
126 {
127 	u32 a, b, c;
128 
129 	a = saddr->s6_addr32[0];
130 	b = saddr->s6_addr32[1];
131 	c = saddr->s6_addr32[2];
132 
133 	a += JHASH_GOLDEN_RATIO;
134 	b += JHASH_GOLDEN_RATIO;
135 	c += ip6_frag_hash_rnd;
136 	__jhash_mix(a, b, c);
137 
138 	a += saddr->s6_addr32[3];
139 	b += daddr->s6_addr32[0];
140 	c += daddr->s6_addr32[1];
141 	__jhash_mix(a, b, c);
142 
143 	a += daddr->s6_addr32[2];
144 	b += daddr->s6_addr32[3];
145 	c += id;
146 	__jhash_mix(a, b, c);
147 
148 	return c & (IP6Q_HASHSZ - 1);
149 }
150 
151 static struct timer_list ip6_frag_secret_timer;
152 int sysctl_ip6frag_secret_interval = 10 * 60 * HZ;
153 
154 static void ip6_frag_secret_rebuild(unsigned long dummy)
155 {
156 	unsigned long now = jiffies;
157 	int i;
158 
159 	write_lock(&ip6_frag_lock);
160 	get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
161 	for (i = 0; i < IP6Q_HASHSZ; i++) {
162 		struct frag_queue *q;
163 		struct hlist_node *p, *n;
164 
165 		hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
166 			unsigned int hval = ip6qhashfn(q->id,
167 						       &q->saddr,
168 						       &q->daddr);
169 
170 			if (hval != i) {
171 				hlist_del(&q->list);
172 
173 				/* Relink to new hash chain. */
174 				hlist_add_head(&q->list,
175 					       &ip6_frag_hash[hval]);
176 
177 			}
178 		}
179 	}
180 	write_unlock(&ip6_frag_lock);
181 
182 	mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval);
183 }
184 
185 atomic_t ip6_frag_mem = ATOMIC_INIT(0);
186 
187 /* Memory Tracking Functions. */
188 static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
189 {
190 	if (work)
191 		*work -= skb->truesize;
192 	atomic_sub(skb->truesize, &ip6_frag_mem);
193 	kfree_skb(skb);
194 }
195 
196 static inline void frag_free_queue(struct frag_queue *fq, int *work)
197 {
198 	if (work)
199 		*work -= sizeof(struct frag_queue);
200 	atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
201 	kfree(fq);
202 }
203 
204 static inline struct frag_queue *frag_alloc_queue(void)
205 {
206 	struct frag_queue *fq = kmalloc(sizeof(struct frag_queue), GFP_ATOMIC);
207 
208 	if(!fq)
209 		return NULL;
210 	atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
211 	return fq;
212 }
213 
214 /* Destruction primitives. */
215 
216 /* Complete destruction of fq. */
217 static void ip6_frag_destroy(struct frag_queue *fq, int *work)
218 {
219 	struct sk_buff *fp;
220 
221 	BUG_TRAP(fq->last_in&COMPLETE);
222 	BUG_TRAP(del_timer(&fq->timer) == 0);
223 
224 	/* Release all fragment data. */
225 	fp = fq->fragments;
226 	while (fp) {
227 		struct sk_buff *xp = fp->next;
228 
229 		frag_kfree_skb(fp, work);
230 		fp = xp;
231 	}
232 
233 	frag_free_queue(fq, work);
234 }
235 
236 static __inline__ void fq_put(struct frag_queue *fq, int *work)
237 {
238 	if (atomic_dec_and_test(&fq->refcnt))
239 		ip6_frag_destroy(fq, work);
240 }
241 
242 /* Kill fq entry. It is not destroyed immediately,
243  * because caller (and someone more) holds reference count.
244  */
245 static __inline__ void fq_kill(struct frag_queue *fq)
246 {
247 	if (del_timer(&fq->timer))
248 		atomic_dec(&fq->refcnt);
249 
250 	if (!(fq->last_in & COMPLETE)) {
251 		fq_unlink(fq);
252 		atomic_dec(&fq->refcnt);
253 		fq->last_in |= COMPLETE;
254 	}
255 }
256 
257 static void ip6_evictor(void)
258 {
259 	struct frag_queue *fq;
260 	struct list_head *tmp;
261 	int work;
262 
263 	work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh;
264 	if (work <= 0)
265 		return;
266 
267 	while(work > 0) {
268 		read_lock(&ip6_frag_lock);
269 		if (list_empty(&ip6_frag_lru_list)) {
270 			read_unlock(&ip6_frag_lock);
271 			return;
272 		}
273 		tmp = ip6_frag_lru_list.next;
274 		fq = list_entry(tmp, struct frag_queue, lru_list);
275 		atomic_inc(&fq->refcnt);
276 		read_unlock(&ip6_frag_lock);
277 
278 		spin_lock(&fq->lock);
279 		if (!(fq->last_in&COMPLETE))
280 			fq_kill(fq);
281 		spin_unlock(&fq->lock);
282 
283 		fq_put(fq, &work);
284 		IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
285 	}
286 }
287 
288 static void ip6_frag_expire(unsigned long data)
289 {
290 	struct frag_queue *fq = (struct frag_queue *) data;
291 
292 	spin_lock(&fq->lock);
293 
294 	if (fq->last_in & COMPLETE)
295 		goto out;
296 
297 	fq_kill(fq);
298 
299 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
300 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
301 
302 	/* Send error only if the first segment arrived. */
303 	if (fq->last_in&FIRST_IN && fq->fragments) {
304 		struct net_device *dev = dev_get_by_index(fq->iif);
305 
306 		/*
307 		   But use as source device on which LAST ARRIVED
308 		   segment was received. And do not use fq->dev
309 		   pointer directly, device might already disappeared.
310 		 */
311 		if (dev) {
312 			fq->fragments->dev = dev;
313 			icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0,
314 				    dev);
315 			dev_put(dev);
316 		}
317 	}
318 out:
319 	spin_unlock(&fq->lock);
320 	fq_put(fq, NULL);
321 }
322 
323 /* Creation primitives. */
324 
325 
326 static struct frag_queue *ip6_frag_intern(unsigned int hash,
327 					  struct frag_queue *fq_in)
328 {
329 	struct frag_queue *fq;
330 #ifdef CONFIG_SMP
331 	struct hlist_node *n;
332 #endif
333 
334 	write_lock(&ip6_frag_lock);
335 #ifdef CONFIG_SMP
336 	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
337 		if (fq->id == fq_in->id &&
338 		    ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
339 		    ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
340 			atomic_inc(&fq->refcnt);
341 			write_unlock(&ip6_frag_lock);
342 			fq_in->last_in |= COMPLETE;
343 			fq_put(fq_in, NULL);
344 			return fq;
345 		}
346 	}
347 #endif
348 	fq = fq_in;
349 
350 	if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
351 		atomic_inc(&fq->refcnt);
352 
353 	atomic_inc(&fq->refcnt);
354 	hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
355 	INIT_LIST_HEAD(&fq->lru_list);
356 	list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
357 	ip6_frag_nqueues++;
358 	write_unlock(&ip6_frag_lock);
359 	return fq;
360 }
361 
362 
363 static struct frag_queue *
364 ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst)
365 {
366 	struct frag_queue *fq;
367 
368 	if ((fq = frag_alloc_queue()) == NULL)
369 		goto oom;
370 
371 	memset(fq, 0, sizeof(struct frag_queue));
372 
373 	fq->id = id;
374 	ipv6_addr_copy(&fq->saddr, src);
375 	ipv6_addr_copy(&fq->daddr, dst);
376 
377 	init_timer(&fq->timer);
378 	fq->timer.function = ip6_frag_expire;
379 	fq->timer.data = (long) fq;
380 	spin_lock_init(&fq->lock);
381 	atomic_set(&fq->refcnt, 1);
382 
383 	return ip6_frag_intern(hash, fq);
384 
385 oom:
386 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
387 	return NULL;
388 }
389 
390 static __inline__ struct frag_queue *
391 fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
392 {
393 	struct frag_queue *fq;
394 	struct hlist_node *n;
395 	unsigned int hash = ip6qhashfn(id, src, dst);
396 
397 	read_lock(&ip6_frag_lock);
398 	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
399 		if (fq->id == id &&
400 		    ipv6_addr_equal(src, &fq->saddr) &&
401 		    ipv6_addr_equal(dst, &fq->daddr)) {
402 			atomic_inc(&fq->refcnt);
403 			read_unlock(&ip6_frag_lock);
404 			return fq;
405 		}
406 	}
407 	read_unlock(&ip6_frag_lock);
408 
409 	return ip6_frag_create(hash, id, src, dst);
410 }
411 
412 
413 static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
414 			   struct frag_hdr *fhdr, int nhoff)
415 {
416 	struct sk_buff *prev, *next;
417 	int offset, end;
418 
419 	if (fq->last_in & COMPLETE)
420 		goto err;
421 
422 	offset = ntohs(fhdr->frag_off) & ~0x7;
423 	end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
424 			((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
425 
426 	if ((unsigned int)end > IPV6_MAXPLEN) {
427 		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
428  		icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
429  		return;
430 	}
431 
432  	if (skb->ip_summed == CHECKSUM_HW)
433  		skb->csum = csum_sub(skb->csum,
434  				     csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
435 
436 	/* Is this the final fragment? */
437 	if (!(fhdr->frag_off & htons(IP6_MF))) {
438 		/* If we already have some bits beyond end
439 		 * or have different end, the segment is corrupted.
440 		 */
441 		if (end < fq->len ||
442 		    ((fq->last_in & LAST_IN) && end != fq->len))
443 			goto err;
444 		fq->last_in |= LAST_IN;
445 		fq->len = end;
446 	} else {
447 		/* Check if the fragment is rounded to 8 bytes.
448 		 * Required by the RFC.
449 		 */
450 		if (end & 0x7) {
451 			/* RFC2460 says always send parameter problem in
452 			 * this case. -DaveM
453 			 */
454 			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
455 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
456 					  offsetof(struct ipv6hdr, payload_len));
457 			return;
458 		}
459 		if (end > fq->len) {
460 			/* Some bits beyond end -> corruption. */
461 			if (fq->last_in & LAST_IN)
462 				goto err;
463 			fq->len = end;
464 		}
465 	}
466 
467 	if (end == offset)
468 		goto err;
469 
470 	/* Point into the IP datagram 'data' part. */
471 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
472 		goto err;
473 
474 	if (pskb_trim_rcsum(skb, end - offset))
475 		goto err;
476 
477 	/* Find out which fragments are in front and at the back of us
478 	 * in the chain of fragments so far.  We must know where to put
479 	 * this fragment, right?
480 	 */
481 	prev = NULL;
482 	for(next = fq->fragments; next != NULL; next = next->next) {
483 		if (FRAG6_CB(next)->offset >= offset)
484 			break;	/* bingo! */
485 		prev = next;
486 	}
487 
488 	/* We found where to put this one.  Check for overlap with
489 	 * preceding fragment, and, if needed, align things so that
490 	 * any overlaps are eliminated.
491 	 */
492 	if (prev) {
493 		int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
494 
495 		if (i > 0) {
496 			offset += i;
497 			if (end <= offset)
498 				goto err;
499 			if (!pskb_pull(skb, i))
500 				goto err;
501 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
502 				skb->ip_summed = CHECKSUM_NONE;
503 		}
504 	}
505 
506 	/* Look for overlap with succeeding segments.
507 	 * If we can merge fragments, do it.
508 	 */
509 	while (next && FRAG6_CB(next)->offset < end) {
510 		int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
511 
512 		if (i < next->len) {
513 			/* Eat head of the next overlapped fragment
514 			 * and leave the loop. The next ones cannot overlap.
515 			 */
516 			if (!pskb_pull(next, i))
517 				goto err;
518 			FRAG6_CB(next)->offset += i;	/* next fragment */
519 			fq->meat -= i;
520 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
521 				next->ip_summed = CHECKSUM_NONE;
522 			break;
523 		} else {
524 			struct sk_buff *free_it = next;
525 
526 			/* Old fragment is completely overridden with
527 			 * new one drop it.
528 			 */
529 			next = next->next;
530 
531 			if (prev)
532 				prev->next = next;
533 			else
534 				fq->fragments = next;
535 
536 			fq->meat -= free_it->len;
537 			frag_kfree_skb(free_it, NULL);
538 		}
539 	}
540 
541 	FRAG6_CB(skb)->offset = offset;
542 
543 	/* Insert this fragment in the chain of fragments. */
544 	skb->next = next;
545 	if (prev)
546 		prev->next = skb;
547 	else
548 		fq->fragments = skb;
549 
550 	if (skb->dev)
551 		fq->iif = skb->dev->ifindex;
552 	skb->dev = NULL;
553 	skb_get_timestamp(skb, &fq->stamp);
554 	fq->meat += skb->len;
555 	atomic_add(skb->truesize, &ip6_frag_mem);
556 
557 	/* The first fragment.
558 	 * nhoffset is obtained from the first fragment, of course.
559 	 */
560 	if (offset == 0) {
561 		fq->nhoffset = nhoff;
562 		fq->last_in |= FIRST_IN;
563 	}
564 	write_lock(&ip6_frag_lock);
565 	list_move_tail(&fq->lru_list, &ip6_frag_lru_list);
566 	write_unlock(&ip6_frag_lock);
567 	return;
568 
569 err:
570 	IP6_INC_STATS(IPSTATS_MIB_REASMFAILS);
571 	kfree_skb(skb);
572 }
573 
574 /*
575  *	Check if this packet is complete.
576  *	Returns NULL on failure by any reason, and pointer
577  *	to current nexthdr field in reassembled frame.
578  *
579  *	It is called with locked fq, and caller must check that
580  *	queue is eligible for reassembly i.e. it is not COMPLETE,
581  *	the last and the first frames arrived and all the bits are here.
582  */
583 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
584 			  struct net_device *dev)
585 {
586 	struct sk_buff *fp, *head = fq->fragments;
587 	int    payload_len;
588 	unsigned int nhoff;
589 
590 	fq_kill(fq);
591 
592 	BUG_TRAP(head != NULL);
593 	BUG_TRAP(FRAG6_CB(head)->offset == 0);
594 
595 	/* Unfragmented part is taken from the first segment. */
596 	payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
597 	if (payload_len > IPV6_MAXPLEN)
598 		goto out_oversize;
599 
600 	/* Head of list must not be cloned. */
601 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
602 		goto out_oom;
603 
604 	/* If the first fragment is fragmented itself, we split
605 	 * it to two chunks: the first with data and paged part
606 	 * and the second, holding only fragments. */
607 	if (skb_shinfo(head)->frag_list) {
608 		struct sk_buff *clone;
609 		int i, plen = 0;
610 
611 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
612 			goto out_oom;
613 		clone->next = head->next;
614 		head->next = clone;
615 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
616 		skb_shinfo(head)->frag_list = NULL;
617 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
618 			plen += skb_shinfo(head)->frags[i].size;
619 		clone->len = clone->data_len = head->data_len - plen;
620 		head->data_len -= clone->len;
621 		head->len -= clone->len;
622 		clone->csum = 0;
623 		clone->ip_summed = head->ip_summed;
624 		atomic_add(clone->truesize, &ip6_frag_mem);
625 	}
626 
627 	/* We have to remove fragment header from datagram and to relocate
628 	 * header in order to calculate ICV correctly. */
629 	nhoff = fq->nhoffset;
630 	head->nh.raw[nhoff] = head->h.raw[0];
631 	memmove(head->head + sizeof(struct frag_hdr), head->head,
632 		(head->data - head->head) - sizeof(struct frag_hdr));
633 	head->mac.raw += sizeof(struct frag_hdr);
634 	head->nh.raw += sizeof(struct frag_hdr);
635 
636 	skb_shinfo(head)->frag_list = head->next;
637 	head->h.raw = head->data;
638 	skb_push(head, head->data - head->nh.raw);
639 	atomic_sub(head->truesize, &ip6_frag_mem);
640 
641 	for (fp=head->next; fp; fp = fp->next) {
642 		head->data_len += fp->len;
643 		head->len += fp->len;
644 		if (head->ip_summed != fp->ip_summed)
645 			head->ip_summed = CHECKSUM_NONE;
646 		else if (head->ip_summed == CHECKSUM_HW)
647 			head->csum = csum_add(head->csum, fp->csum);
648 		head->truesize += fp->truesize;
649 		atomic_sub(fp->truesize, &ip6_frag_mem);
650 	}
651 
652 	head->next = NULL;
653 	head->dev = dev;
654 	skb_set_timestamp(head, &fq->stamp);
655 	head->nh.ipv6h->payload_len = htons(payload_len);
656 	IP6CB(head)->nhoff = nhoff;
657 
658 	*skb_in = head;
659 
660 	/* Yes, and fold redundant checksum back. 8) */
661 	if (head->ip_summed == CHECKSUM_HW)
662 		head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
663 
664 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
665 	fq->fragments = NULL;
666 	return 1;
667 
668 out_oversize:
669 	if (net_ratelimit())
670 		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
671 	goto out_fail;
672 out_oom:
673 	if (net_ratelimit())
674 		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
675 out_fail:
676 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
677 	return -1;
678 }
679 
680 static int ipv6_frag_rcv(struct sk_buff **skbp)
681 {
682 	struct sk_buff *skb = *skbp;
683 	struct net_device *dev = skb->dev;
684 	struct frag_hdr *fhdr;
685 	struct frag_queue *fq;
686 	struct ipv6hdr *hdr;
687 
688 	hdr = skb->nh.ipv6h;
689 
690 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
691 
692 	/* Jumbo payload inhibits frag. header */
693 	if (hdr->payload_len==0) {
694 		IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
695 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
696 		return -1;
697 	}
698 	if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
699 		IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
700 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
701 		return -1;
702 	}
703 
704 	hdr = skb->nh.ipv6h;
705 	fhdr = (struct frag_hdr *)skb->h.raw;
706 
707 	if (!(fhdr->frag_off & htons(0xFFF9))) {
708 		/* It is not a fragmented frame */
709 		skb->h.raw += sizeof(struct frag_hdr);
710 		IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
711 
712 		IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw;
713 		return 1;
714 	}
715 
716 	if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
717 		ip6_evictor();
718 
719 	if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) {
720 		int ret = -1;
721 
722 		spin_lock(&fq->lock);
723 
724 		ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
725 
726 		if (fq->last_in == (FIRST_IN|LAST_IN) &&
727 		    fq->meat == fq->len)
728 			ret = ip6_frag_reasm(fq, skbp, dev);
729 
730 		spin_unlock(&fq->lock);
731 		fq_put(fq, NULL);
732 		return ret;
733 	}
734 
735 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
736 	kfree_skb(skb);
737 	return -1;
738 }
739 
740 static struct inet6_protocol frag_protocol =
741 {
742 	.handler	=	ipv6_frag_rcv,
743 	.flags		=	INET6_PROTO_NOPOLICY,
744 };
745 
746 void __init ipv6_frag_init(void)
747 {
748 	if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
749 		printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
750 
751 	ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
752 				   (jiffies ^ (jiffies >> 6)));
753 
754 	init_timer(&ip6_frag_secret_timer);
755 	ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
756 	ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
757 	add_timer(&ip6_frag_secret_timer);
758 }
759