xref: /openbmc/linux/net/ipv6/reassembly.c (revision f61944ef)
1 /*
2  *	IPv6 fragment reassembly
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *	Based on: net/ipv4/ip_fragment.c
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  *	Fixes:
20  *	Andi Kleen	Make it work with multiple hosts.
21  *			More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *	Alexey Kuznetsov	SMP races, threading, cleanup.
25  *	Patrick McHardy		LRU queue of frag heads for evictor.
26  *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
27  *	David Stevens and
28  *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
29  *				calculate ICV correctly.
30  */
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45 #include <linux/skbuff.h>
46 
47 #include <net/sock.h>
48 #include <net/snmp.h>
49 
50 #include <net/ipv6.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 
58 int sysctl_ip6frag_high_thresh __read_mostly = 256*1024;
59 int sysctl_ip6frag_low_thresh __read_mostly = 192*1024;
60 
61 int sysctl_ip6frag_time __read_mostly = IPV6_FRAG_TIMEOUT;
62 
63 struct ip6frag_skb_cb
64 {
65 	struct inet6_skb_parm	h;
66 	int			offset;
67 };
68 
69 #define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
70 
71 
72 /*
73  *	Equivalent of ipv4 struct ipq
74  */
75 
76 struct frag_queue
77 {
78 	struct hlist_node	list;
79 	struct list_head lru_list;		/* lru list member	*/
80 
81 	__be32			id;		/* fragment id		*/
82 	struct in6_addr		saddr;
83 	struct in6_addr		daddr;
84 
85 	spinlock_t		lock;
86 	atomic_t		refcnt;
87 	struct timer_list	timer;		/* expire timer		*/
88 	struct sk_buff		*fragments;
89 	int			len;
90 	int			meat;
91 	int			iif;
92 	ktime_t			stamp;
93 	unsigned int		csum;
94 	__u8			last_in;	/* has first/last segment arrived? */
95 #define COMPLETE		4
96 #define FIRST_IN		2
97 #define LAST_IN			1
98 	__u16			nhoffset;
99 };
100 
101 /* Hash table. */
102 
103 #define IP6Q_HASHSZ	64
104 
105 static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
106 static DEFINE_RWLOCK(ip6_frag_lock);
107 static u32 ip6_frag_hash_rnd;
108 static LIST_HEAD(ip6_frag_lru_list);
109 int ip6_frag_nqueues = 0;
110 
111 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
112 			  struct net_device *dev);
113 
114 static __inline__ void __fq_unlink(struct frag_queue *fq)
115 {
116 	hlist_del(&fq->list);
117 	list_del(&fq->lru_list);
118 	ip6_frag_nqueues--;
119 }
120 
121 static __inline__ void fq_unlink(struct frag_queue *fq)
122 {
123 	write_lock(&ip6_frag_lock);
124 	__fq_unlink(fq);
125 	write_unlock(&ip6_frag_lock);
126 }
127 
128 /*
129  * callers should be careful not to use the hash value outside the ipfrag_lock
130  * as doing so could race with ipfrag_hash_rnd being recalculated.
131  */
132 static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
133 			       struct in6_addr *daddr)
134 {
135 	u32 a, b, c;
136 
137 	a = (__force u32)saddr->s6_addr32[0];
138 	b = (__force u32)saddr->s6_addr32[1];
139 	c = (__force u32)saddr->s6_addr32[2];
140 
141 	a += JHASH_GOLDEN_RATIO;
142 	b += JHASH_GOLDEN_RATIO;
143 	c += ip6_frag_hash_rnd;
144 	__jhash_mix(a, b, c);
145 
146 	a += (__force u32)saddr->s6_addr32[3];
147 	b += (__force u32)daddr->s6_addr32[0];
148 	c += (__force u32)daddr->s6_addr32[1];
149 	__jhash_mix(a, b, c);
150 
151 	a += (__force u32)daddr->s6_addr32[2];
152 	b += (__force u32)daddr->s6_addr32[3];
153 	c += (__force u32)id;
154 	__jhash_mix(a, b, c);
155 
156 	return c & (IP6Q_HASHSZ - 1);
157 }
158 
159 static struct timer_list ip6_frag_secret_timer;
160 int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ;
161 
162 static void ip6_frag_secret_rebuild(unsigned long dummy)
163 {
164 	unsigned long now = jiffies;
165 	int i;
166 
167 	write_lock(&ip6_frag_lock);
168 	get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
169 	for (i = 0; i < IP6Q_HASHSZ; i++) {
170 		struct frag_queue *q;
171 		struct hlist_node *p, *n;
172 
173 		hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
174 			unsigned int hval = ip6qhashfn(q->id,
175 						       &q->saddr,
176 						       &q->daddr);
177 
178 			if (hval != i) {
179 				hlist_del(&q->list);
180 
181 				/* Relink to new hash chain. */
182 				hlist_add_head(&q->list,
183 					       &ip6_frag_hash[hval]);
184 
185 			}
186 		}
187 	}
188 	write_unlock(&ip6_frag_lock);
189 
190 	mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval);
191 }
192 
193 atomic_t ip6_frag_mem = ATOMIC_INIT(0);
194 
195 /* Memory Tracking Functions. */
196 static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
197 {
198 	if (work)
199 		*work -= skb->truesize;
200 	atomic_sub(skb->truesize, &ip6_frag_mem);
201 	kfree_skb(skb);
202 }
203 
204 static inline void frag_free_queue(struct frag_queue *fq, int *work)
205 {
206 	if (work)
207 		*work -= sizeof(struct frag_queue);
208 	atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
209 	kfree(fq);
210 }
211 
212 static inline struct frag_queue *frag_alloc_queue(void)
213 {
214 	struct frag_queue *fq = kzalloc(sizeof(struct frag_queue), GFP_ATOMIC);
215 
216 	if(!fq)
217 		return NULL;
218 	atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
219 	return fq;
220 }
221 
222 /* Destruction primitives. */
223 
224 /* Complete destruction of fq. */
225 static void ip6_frag_destroy(struct frag_queue *fq, int *work)
226 {
227 	struct sk_buff *fp;
228 
229 	BUG_TRAP(fq->last_in&COMPLETE);
230 	BUG_TRAP(del_timer(&fq->timer) == 0);
231 
232 	/* Release all fragment data. */
233 	fp = fq->fragments;
234 	while (fp) {
235 		struct sk_buff *xp = fp->next;
236 
237 		frag_kfree_skb(fp, work);
238 		fp = xp;
239 	}
240 
241 	frag_free_queue(fq, work);
242 }
243 
244 static __inline__ void fq_put(struct frag_queue *fq, int *work)
245 {
246 	if (atomic_dec_and_test(&fq->refcnt))
247 		ip6_frag_destroy(fq, work);
248 }
249 
250 /* Kill fq entry. It is not destroyed immediately,
251  * because caller (and someone more) holds reference count.
252  */
253 static __inline__ void fq_kill(struct frag_queue *fq)
254 {
255 	if (del_timer(&fq->timer))
256 		atomic_dec(&fq->refcnt);
257 
258 	if (!(fq->last_in & COMPLETE)) {
259 		fq_unlink(fq);
260 		atomic_dec(&fq->refcnt);
261 		fq->last_in |= COMPLETE;
262 	}
263 }
264 
265 static void ip6_evictor(struct inet6_dev *idev)
266 {
267 	struct frag_queue *fq;
268 	struct list_head *tmp;
269 	int work;
270 
271 	work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh;
272 	if (work <= 0)
273 		return;
274 
275 	while(work > 0) {
276 		read_lock(&ip6_frag_lock);
277 		if (list_empty(&ip6_frag_lru_list)) {
278 			read_unlock(&ip6_frag_lock);
279 			return;
280 		}
281 		tmp = ip6_frag_lru_list.next;
282 		fq = list_entry(tmp, struct frag_queue, lru_list);
283 		atomic_inc(&fq->refcnt);
284 		read_unlock(&ip6_frag_lock);
285 
286 		spin_lock(&fq->lock);
287 		if (!(fq->last_in&COMPLETE))
288 			fq_kill(fq);
289 		spin_unlock(&fq->lock);
290 
291 		fq_put(fq, &work);
292 		IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
293 	}
294 }
295 
296 static void ip6_frag_expire(unsigned long data)
297 {
298 	struct frag_queue *fq = (struct frag_queue *) data;
299 	struct net_device *dev = NULL;
300 
301 	spin_lock(&fq->lock);
302 
303 	if (fq->last_in & COMPLETE)
304 		goto out;
305 
306 	fq_kill(fq);
307 
308 	dev = dev_get_by_index(&init_net, fq->iif);
309 	if (!dev)
310 		goto out;
311 
312 	rcu_read_lock();
313 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
314 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
315 	rcu_read_unlock();
316 
317 	/* Don't send error if the first segment did not arrive. */
318 	if (!(fq->last_in&FIRST_IN) || !fq->fragments)
319 		goto out;
320 
321 	/*
322 	   But use as source device on which LAST ARRIVED
323 	   segment was received. And do not use fq->dev
324 	   pointer directly, device might already disappeared.
325 	 */
326 	fq->fragments->dev = dev;
327 	icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
328 out:
329 	if (dev)
330 		dev_put(dev);
331 	spin_unlock(&fq->lock);
332 	fq_put(fq, NULL);
333 }
334 
335 /* Creation primitives. */
336 
337 
338 static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
339 {
340 	struct frag_queue *fq;
341 	unsigned int hash;
342 #ifdef CONFIG_SMP
343 	struct hlist_node *n;
344 #endif
345 
346 	write_lock(&ip6_frag_lock);
347 	hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
348 #ifdef CONFIG_SMP
349 	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
350 		if (fq->id == fq_in->id &&
351 		    ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
352 		    ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
353 			atomic_inc(&fq->refcnt);
354 			write_unlock(&ip6_frag_lock);
355 			fq_in->last_in |= COMPLETE;
356 			fq_put(fq_in, NULL);
357 			return fq;
358 		}
359 	}
360 #endif
361 	fq = fq_in;
362 
363 	if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
364 		atomic_inc(&fq->refcnt);
365 
366 	atomic_inc(&fq->refcnt);
367 	hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
368 	INIT_LIST_HEAD(&fq->lru_list);
369 	list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
370 	ip6_frag_nqueues++;
371 	write_unlock(&ip6_frag_lock);
372 	return fq;
373 }
374 
375 
376 static struct frag_queue *
377 ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst,
378 		struct inet6_dev *idev)
379 {
380 	struct frag_queue *fq;
381 
382 	if ((fq = frag_alloc_queue()) == NULL)
383 		goto oom;
384 
385 	fq->id = id;
386 	ipv6_addr_copy(&fq->saddr, src);
387 	ipv6_addr_copy(&fq->daddr, dst);
388 
389 	init_timer(&fq->timer);
390 	fq->timer.function = ip6_frag_expire;
391 	fq->timer.data = (long) fq;
392 	spin_lock_init(&fq->lock);
393 	atomic_set(&fq->refcnt, 1);
394 
395 	return ip6_frag_intern(fq);
396 
397 oom:
398 	IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
399 	return NULL;
400 }
401 
402 static __inline__ struct frag_queue *
403 fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
404 	struct inet6_dev *idev)
405 {
406 	struct frag_queue *fq;
407 	struct hlist_node *n;
408 	unsigned int hash;
409 
410 	read_lock(&ip6_frag_lock);
411 	hash = ip6qhashfn(id, src, dst);
412 	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
413 		if (fq->id == id &&
414 		    ipv6_addr_equal(src, &fq->saddr) &&
415 		    ipv6_addr_equal(dst, &fq->daddr)) {
416 			atomic_inc(&fq->refcnt);
417 			read_unlock(&ip6_frag_lock);
418 			return fq;
419 		}
420 	}
421 	read_unlock(&ip6_frag_lock);
422 
423 	return ip6_frag_create(id, src, dst, idev);
424 }
425 
426 
427 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
428 			   struct frag_hdr *fhdr, int nhoff)
429 {
430 	struct sk_buff *prev, *next;
431 	struct net_device *dev;
432 	int offset, end;
433 
434 	if (fq->last_in & COMPLETE)
435 		goto err;
436 
437 	offset = ntohs(fhdr->frag_off) & ~0x7;
438 	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
439 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
440 
441 	if ((unsigned int)end > IPV6_MAXPLEN) {
442 		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
443 				 IPSTATS_MIB_INHDRERRORS);
444 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
445 				  ((u8 *)&fhdr->frag_off -
446 				   skb_network_header(skb)));
447 		return -1;
448 	}
449 
450 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
451 		const unsigned char *nh = skb_network_header(skb);
452 		skb->csum = csum_sub(skb->csum,
453 				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
454 						  0));
455 	}
456 
457 	/* Is this the final fragment? */
458 	if (!(fhdr->frag_off & htons(IP6_MF))) {
459 		/* If we already have some bits beyond end
460 		 * or have different end, the segment is corrupted.
461 		 */
462 		if (end < fq->len ||
463 		    ((fq->last_in & LAST_IN) && end != fq->len))
464 			goto err;
465 		fq->last_in |= LAST_IN;
466 		fq->len = end;
467 	} else {
468 		/* Check if the fragment is rounded to 8 bytes.
469 		 * Required by the RFC.
470 		 */
471 		if (end & 0x7) {
472 			/* RFC2460 says always send parameter problem in
473 			 * this case. -DaveM
474 			 */
475 			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
476 					 IPSTATS_MIB_INHDRERRORS);
477 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
478 					  offsetof(struct ipv6hdr, payload_len));
479 			return -1;
480 		}
481 		if (end > fq->len) {
482 			/* Some bits beyond end -> corruption. */
483 			if (fq->last_in & LAST_IN)
484 				goto err;
485 			fq->len = end;
486 		}
487 	}
488 
489 	if (end == offset)
490 		goto err;
491 
492 	/* Point into the IP datagram 'data' part. */
493 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
494 		goto err;
495 
496 	if (pskb_trim_rcsum(skb, end - offset))
497 		goto err;
498 
499 	/* Find out which fragments are in front and at the back of us
500 	 * in the chain of fragments so far.  We must know where to put
501 	 * this fragment, right?
502 	 */
503 	prev = NULL;
504 	for(next = fq->fragments; next != NULL; next = next->next) {
505 		if (FRAG6_CB(next)->offset >= offset)
506 			break;	/* bingo! */
507 		prev = next;
508 	}
509 
510 	/* We found where to put this one.  Check for overlap with
511 	 * preceding fragment, and, if needed, align things so that
512 	 * any overlaps are eliminated.
513 	 */
514 	if (prev) {
515 		int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
516 
517 		if (i > 0) {
518 			offset += i;
519 			if (end <= offset)
520 				goto err;
521 			if (!pskb_pull(skb, i))
522 				goto err;
523 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
524 				skb->ip_summed = CHECKSUM_NONE;
525 		}
526 	}
527 
528 	/* Look for overlap with succeeding segments.
529 	 * If we can merge fragments, do it.
530 	 */
531 	while (next && FRAG6_CB(next)->offset < end) {
532 		int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
533 
534 		if (i < next->len) {
535 			/* Eat head of the next overlapped fragment
536 			 * and leave the loop. The next ones cannot overlap.
537 			 */
538 			if (!pskb_pull(next, i))
539 				goto err;
540 			FRAG6_CB(next)->offset += i;	/* next fragment */
541 			fq->meat -= i;
542 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
543 				next->ip_summed = CHECKSUM_NONE;
544 			break;
545 		} else {
546 			struct sk_buff *free_it = next;
547 
548 			/* Old fragment is completely overridden with
549 			 * new one drop it.
550 			 */
551 			next = next->next;
552 
553 			if (prev)
554 				prev->next = next;
555 			else
556 				fq->fragments = next;
557 
558 			fq->meat -= free_it->len;
559 			frag_kfree_skb(free_it, NULL);
560 		}
561 	}
562 
563 	FRAG6_CB(skb)->offset = offset;
564 
565 	/* Insert this fragment in the chain of fragments. */
566 	skb->next = next;
567 	if (prev)
568 		prev->next = skb;
569 	else
570 		fq->fragments = skb;
571 
572 	dev = skb->dev;
573 	if (dev) {
574 		fq->iif = dev->ifindex;
575 		skb->dev = NULL;
576 	}
577 	fq->stamp = skb->tstamp;
578 	fq->meat += skb->len;
579 	atomic_add(skb->truesize, &ip6_frag_mem);
580 
581 	/* The first fragment.
582 	 * nhoffset is obtained from the first fragment, of course.
583 	 */
584 	if (offset == 0) {
585 		fq->nhoffset = nhoff;
586 		fq->last_in |= FIRST_IN;
587 	}
588 
589 	if (fq->last_in == (FIRST_IN | LAST_IN) && fq->meat == fq->len)
590 		return ip6_frag_reasm(fq, prev, dev);
591 
592 	write_lock(&ip6_frag_lock);
593 	list_move_tail(&fq->lru_list, &ip6_frag_lru_list);
594 	write_unlock(&ip6_frag_lock);
595 	return -1;
596 
597 err:
598 	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
599 	kfree_skb(skb);
600 	return -1;
601 }
602 
603 /*
604  *	Check if this packet is complete.
605  *	Returns NULL on failure by any reason, and pointer
606  *	to current nexthdr field in reassembled frame.
607  *
608  *	It is called with locked fq, and caller must check that
609  *	queue is eligible for reassembly i.e. it is not COMPLETE,
610  *	the last and the first frames arrived and all the bits are here.
611  */
612 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
613 			  struct net_device *dev)
614 {
615 	struct sk_buff *fp, *head = fq->fragments;
616 	int    payload_len;
617 	unsigned int nhoff;
618 
619 	fq_kill(fq);
620 
621 	/* Make the one we just received the head. */
622 	if (prev) {
623 		head = prev->next;
624 		fp = skb_clone(head, GFP_ATOMIC);
625 
626 		if (!fp)
627 			goto out_oom;
628 
629 		fp->next = head->next;
630 		prev->next = fp;
631 
632 		skb_morph(head, fq->fragments);
633 		head->next = fq->fragments->next;
634 
635 		kfree_skb(fq->fragments);
636 		fq->fragments = head;
637 	}
638 
639 	BUG_TRAP(head != NULL);
640 	BUG_TRAP(FRAG6_CB(head)->offset == 0);
641 
642 	/* Unfragmented part is taken from the first segment. */
643 	payload_len = ((head->data - skb_network_header(head)) -
644 		       sizeof(struct ipv6hdr) + fq->len -
645 		       sizeof(struct frag_hdr));
646 	if (payload_len > IPV6_MAXPLEN)
647 		goto out_oversize;
648 
649 	/* Head of list must not be cloned. */
650 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
651 		goto out_oom;
652 
653 	/* If the first fragment is fragmented itself, we split
654 	 * it to two chunks: the first with data and paged part
655 	 * and the second, holding only fragments. */
656 	if (skb_shinfo(head)->frag_list) {
657 		struct sk_buff *clone;
658 		int i, plen = 0;
659 
660 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
661 			goto out_oom;
662 		clone->next = head->next;
663 		head->next = clone;
664 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
665 		skb_shinfo(head)->frag_list = NULL;
666 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
667 			plen += skb_shinfo(head)->frags[i].size;
668 		clone->len = clone->data_len = head->data_len - plen;
669 		head->data_len -= clone->len;
670 		head->len -= clone->len;
671 		clone->csum = 0;
672 		clone->ip_summed = head->ip_summed;
673 		atomic_add(clone->truesize, &ip6_frag_mem);
674 	}
675 
676 	/* We have to remove fragment header from datagram and to relocate
677 	 * header in order to calculate ICV correctly. */
678 	nhoff = fq->nhoffset;
679 	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
680 	memmove(head->head + sizeof(struct frag_hdr), head->head,
681 		(head->data - head->head) - sizeof(struct frag_hdr));
682 	head->mac_header += sizeof(struct frag_hdr);
683 	head->network_header += sizeof(struct frag_hdr);
684 
685 	skb_shinfo(head)->frag_list = head->next;
686 	skb_reset_transport_header(head);
687 	skb_push(head, head->data - skb_network_header(head));
688 	atomic_sub(head->truesize, &ip6_frag_mem);
689 
690 	for (fp=head->next; fp; fp = fp->next) {
691 		head->data_len += fp->len;
692 		head->len += fp->len;
693 		if (head->ip_summed != fp->ip_summed)
694 			head->ip_summed = CHECKSUM_NONE;
695 		else if (head->ip_summed == CHECKSUM_COMPLETE)
696 			head->csum = csum_add(head->csum, fp->csum);
697 		head->truesize += fp->truesize;
698 		atomic_sub(fp->truesize, &ip6_frag_mem);
699 	}
700 
701 	head->next = NULL;
702 	head->dev = dev;
703 	head->tstamp = fq->stamp;
704 	ipv6_hdr(head)->payload_len = htons(payload_len);
705 	IP6CB(head)->nhoff = nhoff;
706 
707 	/* Yes, and fold redundant checksum back. 8) */
708 	if (head->ip_summed == CHECKSUM_COMPLETE)
709 		head->csum = csum_partial(skb_network_header(head),
710 					  skb_network_header_len(head),
711 					  head->csum);
712 
713 	rcu_read_lock();
714 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
715 	rcu_read_unlock();
716 	fq->fragments = NULL;
717 	return 1;
718 
719 out_oversize:
720 	if (net_ratelimit())
721 		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
722 	goto out_fail;
723 out_oom:
724 	if (net_ratelimit())
725 		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
726 out_fail:
727 	rcu_read_lock();
728 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
729 	rcu_read_unlock();
730 	return -1;
731 }
732 
733 static int ipv6_frag_rcv(struct sk_buff **skbp)
734 {
735 	struct sk_buff *skb = *skbp;
736 	struct frag_hdr *fhdr;
737 	struct frag_queue *fq;
738 	struct ipv6hdr *hdr = ipv6_hdr(skb);
739 
740 	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
741 
742 	/* Jumbo payload inhibits frag. header */
743 	if (hdr->payload_len==0) {
744 		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
745 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
746 				  skb_network_header_len(skb));
747 		return -1;
748 	}
749 	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
750 				 sizeof(struct frag_hdr)))) {
751 		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
752 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
753 				  skb_network_header_len(skb));
754 		return -1;
755 	}
756 
757 	hdr = ipv6_hdr(skb);
758 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
759 
760 	if (!(fhdr->frag_off & htons(0xFFF9))) {
761 		/* It is not a fragmented frame */
762 		skb->transport_header += sizeof(struct frag_hdr);
763 		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
764 
765 		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
766 		return 1;
767 	}
768 
769 	if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
770 		ip6_evictor(ip6_dst_idev(skb->dst));
771 
772 	if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
773 			  ip6_dst_idev(skb->dst))) != NULL) {
774 		int ret;
775 
776 		spin_lock(&fq->lock);
777 
778 		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
779 
780 		spin_unlock(&fq->lock);
781 		fq_put(fq, NULL);
782 		return ret;
783 	}
784 
785 	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
786 	kfree_skb(skb);
787 	return -1;
788 }
789 
790 static struct inet6_protocol frag_protocol =
791 {
792 	.handler	=	ipv6_frag_rcv,
793 	.flags		=	INET6_PROTO_NOPOLICY,
794 };
795 
796 void __init ipv6_frag_init(void)
797 {
798 	if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
799 		printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
800 
801 	ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
802 				   (jiffies ^ (jiffies >> 6)));
803 
804 	init_timer(&ip6_frag_secret_timer);
805 	ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
806 	ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
807 	add_timer(&ip6_frag_secret_timer);
808 }
809