xref: /openbmc/linux/net/ipv6/reassembly.c (revision abd6523d)
1 /*
2  *	IPv6 fragment reassembly
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *	Based on: net/ipv4/ip_fragment.c
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  *	Fixes:
20  *	Andi Kleen	Make it work with multiple hosts.
21  *			More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *	Alexey Kuznetsov	SMP races, threading, cleanup.
25  *	Patrick McHardy		LRU queue of frag heads for evictor.
26  *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
27  *	David Stevens and
28  *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
29  *				calculate ICV correctly.
30  */
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45 #include <linux/skbuff.h>
46 
47 #include <net/sock.h>
48 #include <net/snmp.h>
49 
50 #include <net/ipv6.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 #include <net/inet_frag.h>
58 
59 struct ip6frag_skb_cb
60 {
61 	struct inet6_skb_parm	h;
62 	int			offset;
63 };
64 
65 #define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
66 
67 
68 /*
69  *	Equivalent of ipv4 struct ipq
70  */
71 
72 struct frag_queue
73 {
74 	struct inet_frag_queue	q;
75 
76 	__be32			id;		/* fragment id		*/
77 	struct in6_addr		saddr;
78 	struct in6_addr		daddr;
79 
80 	int			iif;
81 	unsigned int		csum;
82 	__u16			nhoffset;
83 };
84 
85 struct inet_frags_ctl ip6_frags_ctl __read_mostly = {
86 	.high_thresh 	 = 256 * 1024,
87 	.low_thresh	 = 192 * 1024,
88 	.timeout	 = IPV6_FRAG_TIMEOUT,
89 	.secret_interval = 10 * 60 * HZ,
90 };
91 
92 static struct inet_frags ip6_frags;
93 
94 int ip6_frag_nqueues(void)
95 {
96 	return ip6_frags.nqueues;
97 }
98 
99 int ip6_frag_mem(void)
100 {
101 	return atomic_read(&ip6_frags.mem);
102 }
103 
104 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
105 			  struct net_device *dev);
106 
107 /*
108  * callers should be careful not to use the hash value outside the ipfrag_lock
109  * as doing so could race with ipfrag_hash_rnd being recalculated.
110  */
111 static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
112 			       struct in6_addr *daddr)
113 {
114 	u32 a, b, c;
115 
116 	a = (__force u32)saddr->s6_addr32[0];
117 	b = (__force u32)saddr->s6_addr32[1];
118 	c = (__force u32)saddr->s6_addr32[2];
119 
120 	a += JHASH_GOLDEN_RATIO;
121 	b += JHASH_GOLDEN_RATIO;
122 	c += ip6_frags.rnd;
123 	__jhash_mix(a, b, c);
124 
125 	a += (__force u32)saddr->s6_addr32[3];
126 	b += (__force u32)daddr->s6_addr32[0];
127 	c += (__force u32)daddr->s6_addr32[1];
128 	__jhash_mix(a, b, c);
129 
130 	a += (__force u32)daddr->s6_addr32[2];
131 	b += (__force u32)daddr->s6_addr32[3];
132 	c += (__force u32)id;
133 	__jhash_mix(a, b, c);
134 
135 	return c & (INETFRAGS_HASHSZ - 1);
136 }
137 
138 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
139 {
140 	struct frag_queue *fq;
141 
142 	fq = container_of(q, struct frag_queue, q);
143 	return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
144 }
145 
146 int ip6_frag_equal(struct inet_frag_queue *q1, struct inet_frag_queue *q2)
147 {
148 	struct frag_queue *fq1, *fq2;
149 
150 	fq1 = container_of(q1, struct frag_queue, q);
151 	fq2 = container_of(q2, struct frag_queue, q);
152 	return (fq1->id == fq2->id &&
153 			ipv6_addr_equal(&fq2->saddr, &fq1->saddr) &&
154 			ipv6_addr_equal(&fq2->daddr, &fq1->daddr));
155 }
156 EXPORT_SYMBOL(ip6_frag_equal);
157 
158 int ip6_frag_match(struct inet_frag_queue *q, void *a)
159 {
160 	struct frag_queue *fq;
161 	struct ip6_create_arg *arg = a;
162 
163 	fq = container_of(q, struct frag_queue, q);
164 	return (fq->id == arg->id &&
165 			ipv6_addr_equal(&fq->saddr, arg->src) &&
166 			ipv6_addr_equal(&fq->daddr, arg->dst));
167 }
168 EXPORT_SYMBOL(ip6_frag_match);
169 
170 /* Memory Tracking Functions. */
171 static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
172 {
173 	if (work)
174 		*work -= skb->truesize;
175 	atomic_sub(skb->truesize, &ip6_frags.mem);
176 	kfree_skb(skb);
177 }
178 
179 void ip6_frag_init(struct inet_frag_queue *q, void *a)
180 {
181 	struct frag_queue *fq = container_of(q, struct frag_queue, q);
182 	struct ip6_create_arg *arg = a;
183 
184 	fq->id = arg->id;
185 	ipv6_addr_copy(&fq->saddr, arg->src);
186 	ipv6_addr_copy(&fq->daddr, arg->dst);
187 }
188 EXPORT_SYMBOL(ip6_frag_init);
189 
190 static void ip6_frag_free(struct inet_frag_queue *fq)
191 {
192 	kfree(container_of(fq, struct frag_queue, q));
193 }
194 
195 /* Destruction primitives. */
196 
197 static __inline__ void fq_put(struct frag_queue *fq)
198 {
199 	inet_frag_put(&fq->q, &ip6_frags);
200 }
201 
202 /* Kill fq entry. It is not destroyed immediately,
203  * because caller (and someone more) holds reference count.
204  */
205 static __inline__ void fq_kill(struct frag_queue *fq)
206 {
207 	inet_frag_kill(&fq->q, &ip6_frags);
208 }
209 
210 static void ip6_evictor(struct inet6_dev *idev)
211 {
212 	int evicted;
213 
214 	evicted = inet_frag_evictor(&ip6_frags);
215 	if (evicted)
216 		IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted);
217 }
218 
219 static void ip6_frag_expire(unsigned long data)
220 {
221 	struct frag_queue *fq;
222 	struct net_device *dev = NULL;
223 
224 	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
225 
226 	spin_lock(&fq->q.lock);
227 
228 	if (fq->q.last_in & COMPLETE)
229 		goto out;
230 
231 	fq_kill(fq);
232 
233 	dev = dev_get_by_index(&init_net, fq->iif);
234 	if (!dev)
235 		goto out;
236 
237 	rcu_read_lock();
238 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
239 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
240 	rcu_read_unlock();
241 
242 	/* Don't send error if the first segment did not arrive. */
243 	if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments)
244 		goto out;
245 
246 	/*
247 	   But use as source device on which LAST ARRIVED
248 	   segment was received. And do not use fq->dev
249 	   pointer directly, device might already disappeared.
250 	 */
251 	fq->q.fragments->dev = dev;
252 	icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
253 out:
254 	if (dev)
255 		dev_put(dev);
256 	spin_unlock(&fq->q.lock);
257 	fq_put(fq);
258 }
259 
260 static __inline__ struct frag_queue *
261 fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
262 	struct inet6_dev *idev)
263 {
264 	struct inet_frag_queue *q;
265 	struct ip6_create_arg arg;
266 	unsigned int hash;
267 
268 	arg.id = id;
269 	arg.src = src;
270 	arg.dst = dst;
271 	hash = ip6qhashfn(id, src, dst);
272 
273 	q = inet_frag_find(&ip6_frags, &arg, hash);
274 	if (q == NULL)
275 		goto oom;
276 
277 	return container_of(q, struct frag_queue, q);
278 
279 oom:
280 	IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
281 	return NULL;
282 }
283 
284 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
285 			   struct frag_hdr *fhdr, int nhoff)
286 {
287 	struct sk_buff *prev, *next;
288 	struct net_device *dev;
289 	int offset, end;
290 
291 	if (fq->q.last_in & COMPLETE)
292 		goto err;
293 
294 	offset = ntohs(fhdr->frag_off) & ~0x7;
295 	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
296 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
297 
298 	if ((unsigned int)end > IPV6_MAXPLEN) {
299 		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
300 				 IPSTATS_MIB_INHDRERRORS);
301 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
302 				  ((u8 *)&fhdr->frag_off -
303 				   skb_network_header(skb)));
304 		return -1;
305 	}
306 
307 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
308 		const unsigned char *nh = skb_network_header(skb);
309 		skb->csum = csum_sub(skb->csum,
310 				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
311 						  0));
312 	}
313 
314 	/* Is this the final fragment? */
315 	if (!(fhdr->frag_off & htons(IP6_MF))) {
316 		/* If we already have some bits beyond end
317 		 * or have different end, the segment is corrupted.
318 		 */
319 		if (end < fq->q.len ||
320 		    ((fq->q.last_in & LAST_IN) && end != fq->q.len))
321 			goto err;
322 		fq->q.last_in |= LAST_IN;
323 		fq->q.len = end;
324 	} else {
325 		/* Check if the fragment is rounded to 8 bytes.
326 		 * Required by the RFC.
327 		 */
328 		if (end & 0x7) {
329 			/* RFC2460 says always send parameter problem in
330 			 * this case. -DaveM
331 			 */
332 			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
333 					 IPSTATS_MIB_INHDRERRORS);
334 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
335 					  offsetof(struct ipv6hdr, payload_len));
336 			return -1;
337 		}
338 		if (end > fq->q.len) {
339 			/* Some bits beyond end -> corruption. */
340 			if (fq->q.last_in & LAST_IN)
341 				goto err;
342 			fq->q.len = end;
343 		}
344 	}
345 
346 	if (end == offset)
347 		goto err;
348 
349 	/* Point into the IP datagram 'data' part. */
350 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
351 		goto err;
352 
353 	if (pskb_trim_rcsum(skb, end - offset))
354 		goto err;
355 
356 	/* Find out which fragments are in front and at the back of us
357 	 * in the chain of fragments so far.  We must know where to put
358 	 * this fragment, right?
359 	 */
360 	prev = NULL;
361 	for(next = fq->q.fragments; next != NULL; next = next->next) {
362 		if (FRAG6_CB(next)->offset >= offset)
363 			break;	/* bingo! */
364 		prev = next;
365 	}
366 
367 	/* We found where to put this one.  Check for overlap with
368 	 * preceding fragment, and, if needed, align things so that
369 	 * any overlaps are eliminated.
370 	 */
371 	if (prev) {
372 		int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
373 
374 		if (i > 0) {
375 			offset += i;
376 			if (end <= offset)
377 				goto err;
378 			if (!pskb_pull(skb, i))
379 				goto err;
380 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
381 				skb->ip_summed = CHECKSUM_NONE;
382 		}
383 	}
384 
385 	/* Look for overlap with succeeding segments.
386 	 * If we can merge fragments, do it.
387 	 */
388 	while (next && FRAG6_CB(next)->offset < end) {
389 		int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
390 
391 		if (i < next->len) {
392 			/* Eat head of the next overlapped fragment
393 			 * and leave the loop. The next ones cannot overlap.
394 			 */
395 			if (!pskb_pull(next, i))
396 				goto err;
397 			FRAG6_CB(next)->offset += i;	/* next fragment */
398 			fq->q.meat -= i;
399 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
400 				next->ip_summed = CHECKSUM_NONE;
401 			break;
402 		} else {
403 			struct sk_buff *free_it = next;
404 
405 			/* Old fragment is completely overridden with
406 			 * new one drop it.
407 			 */
408 			next = next->next;
409 
410 			if (prev)
411 				prev->next = next;
412 			else
413 				fq->q.fragments = next;
414 
415 			fq->q.meat -= free_it->len;
416 			frag_kfree_skb(free_it, NULL);
417 		}
418 	}
419 
420 	FRAG6_CB(skb)->offset = offset;
421 
422 	/* Insert this fragment in the chain of fragments. */
423 	skb->next = next;
424 	if (prev)
425 		prev->next = skb;
426 	else
427 		fq->q.fragments = skb;
428 
429 	dev = skb->dev;
430 	if (dev) {
431 		fq->iif = dev->ifindex;
432 		skb->dev = NULL;
433 	}
434 	fq->q.stamp = skb->tstamp;
435 	fq->q.meat += skb->len;
436 	atomic_add(skb->truesize, &ip6_frags.mem);
437 
438 	/* The first fragment.
439 	 * nhoffset is obtained from the first fragment, of course.
440 	 */
441 	if (offset == 0) {
442 		fq->nhoffset = nhoff;
443 		fq->q.last_in |= FIRST_IN;
444 	}
445 
446 	if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
447 		return ip6_frag_reasm(fq, prev, dev);
448 
449 	write_lock(&ip6_frags.lock);
450 	list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list);
451 	write_unlock(&ip6_frags.lock);
452 	return -1;
453 
454 err:
455 	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
456 	kfree_skb(skb);
457 	return -1;
458 }
459 
460 /*
461  *	Check if this packet is complete.
462  *	Returns NULL on failure by any reason, and pointer
463  *	to current nexthdr field in reassembled frame.
464  *
465  *	It is called with locked fq, and caller must check that
466  *	queue is eligible for reassembly i.e. it is not COMPLETE,
467  *	the last and the first frames arrived and all the bits are here.
468  */
469 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
470 			  struct net_device *dev)
471 {
472 	struct sk_buff *fp, *head = fq->q.fragments;
473 	int    payload_len;
474 	unsigned int nhoff;
475 
476 	fq_kill(fq);
477 
478 	/* Make the one we just received the head. */
479 	if (prev) {
480 		head = prev->next;
481 		fp = skb_clone(head, GFP_ATOMIC);
482 
483 		if (!fp)
484 			goto out_oom;
485 
486 		fp->next = head->next;
487 		prev->next = fp;
488 
489 		skb_morph(head, fq->q.fragments);
490 		head->next = fq->q.fragments->next;
491 
492 		kfree_skb(fq->q.fragments);
493 		fq->q.fragments = head;
494 	}
495 
496 	BUG_TRAP(head != NULL);
497 	BUG_TRAP(FRAG6_CB(head)->offset == 0);
498 
499 	/* Unfragmented part is taken from the first segment. */
500 	payload_len = ((head->data - skb_network_header(head)) -
501 		       sizeof(struct ipv6hdr) + fq->q.len -
502 		       sizeof(struct frag_hdr));
503 	if (payload_len > IPV6_MAXPLEN)
504 		goto out_oversize;
505 
506 	/* Head of list must not be cloned. */
507 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
508 		goto out_oom;
509 
510 	/* If the first fragment is fragmented itself, we split
511 	 * it to two chunks: the first with data and paged part
512 	 * and the second, holding only fragments. */
513 	if (skb_shinfo(head)->frag_list) {
514 		struct sk_buff *clone;
515 		int i, plen = 0;
516 
517 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
518 			goto out_oom;
519 		clone->next = head->next;
520 		head->next = clone;
521 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
522 		skb_shinfo(head)->frag_list = NULL;
523 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
524 			plen += skb_shinfo(head)->frags[i].size;
525 		clone->len = clone->data_len = head->data_len - plen;
526 		head->data_len -= clone->len;
527 		head->len -= clone->len;
528 		clone->csum = 0;
529 		clone->ip_summed = head->ip_summed;
530 		atomic_add(clone->truesize, &ip6_frags.mem);
531 	}
532 
533 	/* We have to remove fragment header from datagram and to relocate
534 	 * header in order to calculate ICV correctly. */
535 	nhoff = fq->nhoffset;
536 	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
537 	memmove(head->head + sizeof(struct frag_hdr), head->head,
538 		(head->data - head->head) - sizeof(struct frag_hdr));
539 	head->mac_header += sizeof(struct frag_hdr);
540 	head->network_header += sizeof(struct frag_hdr);
541 
542 	skb_shinfo(head)->frag_list = head->next;
543 	skb_reset_transport_header(head);
544 	skb_push(head, head->data - skb_network_header(head));
545 	atomic_sub(head->truesize, &ip6_frags.mem);
546 
547 	for (fp=head->next; fp; fp = fp->next) {
548 		head->data_len += fp->len;
549 		head->len += fp->len;
550 		if (head->ip_summed != fp->ip_summed)
551 			head->ip_summed = CHECKSUM_NONE;
552 		else if (head->ip_summed == CHECKSUM_COMPLETE)
553 			head->csum = csum_add(head->csum, fp->csum);
554 		head->truesize += fp->truesize;
555 		atomic_sub(fp->truesize, &ip6_frags.mem);
556 	}
557 
558 	head->next = NULL;
559 	head->dev = dev;
560 	head->tstamp = fq->q.stamp;
561 	ipv6_hdr(head)->payload_len = htons(payload_len);
562 	IP6CB(head)->nhoff = nhoff;
563 
564 	/* Yes, and fold redundant checksum back. 8) */
565 	if (head->ip_summed == CHECKSUM_COMPLETE)
566 		head->csum = csum_partial(skb_network_header(head),
567 					  skb_network_header_len(head),
568 					  head->csum);
569 
570 	rcu_read_lock();
571 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
572 	rcu_read_unlock();
573 	fq->q.fragments = NULL;
574 	return 1;
575 
576 out_oversize:
577 	if (net_ratelimit())
578 		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
579 	goto out_fail;
580 out_oom:
581 	if (net_ratelimit())
582 		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
583 out_fail:
584 	rcu_read_lock();
585 	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
586 	rcu_read_unlock();
587 	return -1;
588 }
589 
590 static int ipv6_frag_rcv(struct sk_buff *skb)
591 {
592 	struct frag_hdr *fhdr;
593 	struct frag_queue *fq;
594 	struct ipv6hdr *hdr = ipv6_hdr(skb);
595 
596 	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
597 
598 	/* Jumbo payload inhibits frag. header */
599 	if (hdr->payload_len==0) {
600 		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
601 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
602 				  skb_network_header_len(skb));
603 		return -1;
604 	}
605 	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
606 				 sizeof(struct frag_hdr)))) {
607 		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
608 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
609 				  skb_network_header_len(skb));
610 		return -1;
611 	}
612 
613 	hdr = ipv6_hdr(skb);
614 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
615 
616 	if (!(fhdr->frag_off & htons(0xFFF9))) {
617 		/* It is not a fragmented frame */
618 		skb->transport_header += sizeof(struct frag_hdr);
619 		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
620 
621 		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
622 		return 1;
623 	}
624 
625 	if (atomic_read(&ip6_frags.mem) > ip6_frags_ctl.high_thresh)
626 		ip6_evictor(ip6_dst_idev(skb->dst));
627 
628 	if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
629 			  ip6_dst_idev(skb->dst))) != NULL) {
630 		int ret;
631 
632 		spin_lock(&fq->q.lock);
633 
634 		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
635 
636 		spin_unlock(&fq->q.lock);
637 		fq_put(fq);
638 		return ret;
639 	}
640 
641 	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
642 	kfree_skb(skb);
643 	return -1;
644 }
645 
646 static struct inet6_protocol frag_protocol =
647 {
648 	.handler	=	ipv6_frag_rcv,
649 	.flags		=	INET6_PROTO_NOPOLICY,
650 };
651 
652 void __init ipv6_frag_init(void)
653 {
654 	if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
655 		printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
656 
657 	ip6_frags.ctl = &ip6_frags_ctl;
658 	ip6_frags.hashfn = ip6_hashfn;
659 	ip6_frags.constructor = ip6_frag_init;
660 	ip6_frags.destructor = ip6_frag_free;
661 	ip6_frags.skb_free = NULL;
662 	ip6_frags.qsize = sizeof(struct frag_queue);
663 	ip6_frags.match = ip6_frag_match;
664 	ip6_frags.equal = ip6_frag_equal;
665 	ip6_frags.frag_expire = ip6_frag_expire;
666 	inet_frags_init(&ip6_frags);
667 }
668