xref: /openbmc/linux/net/ipv6/esp6.c (revision 0d83620f)
1 /*
2  * Copyright (C)2002 USAGI/WIDE Project
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * Authors
18  *
19  *	Mitsuru KANDA @USAGI       : IPv6 Support
20  *	Kazunori MIYAZAWA @USAGI   :
21  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
22  *
23  *	This file is derived from net/ipv4/esp.c
24  */
25 
26 #define pr_fmt(fmt) "IPv6: " fmt
27 
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <net/ip.h>
33 #include <net/xfrm.h>
34 #include <net/esp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
46 
47 #include <linux/highmem.h>
48 
49 struct esp_skb_cb {
50 	struct xfrm_skb_cb xfrm;
51 	void *tmp;
52 };
53 
54 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55 
56 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
57 
58 /*
59  * Allocate an AEAD request structure with extra space for SG and IV.
60  *
61  * For alignment considerations the upper 32 bits of the sequence number are
62  * placed at the front, if present. Followed by the IV, the request and finally
63  * the SG list.
64  *
65  * TODO: Use spare space in skb for this where possible.
66  */
67 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
68 {
69 	unsigned int len;
70 
71 	len = seqihlen;
72 
73 	len += crypto_aead_ivsize(aead);
74 
75 	if (len) {
76 		len += crypto_aead_alignmask(aead) &
77 		       ~(crypto_tfm_ctx_alignment() - 1);
78 		len = ALIGN(len, crypto_tfm_ctx_alignment());
79 	}
80 
81 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
82 	len = ALIGN(len, __alignof__(struct scatterlist));
83 
84 	len += sizeof(struct scatterlist) * nfrags;
85 
86 	return kmalloc(len, GFP_ATOMIC);
87 }
88 
89 static inline __be32 *esp_tmp_seqhi(void *tmp)
90 {
91 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
92 }
93 
94 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 {
96 	return crypto_aead_ivsize(aead) ?
97 	       PTR_ALIGN((u8 *)tmp + seqhilen,
98 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
99 }
100 
101 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
102 {
103 	struct aead_request *req;
104 
105 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 				crypto_tfm_ctx_alignment());
107 	aead_request_set_tfm(req, aead);
108 	return req;
109 }
110 
111 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
112 					     struct aead_request *req)
113 {
114 	return (void *)ALIGN((unsigned long)(req + 1) +
115 			     crypto_aead_reqsize(aead),
116 			     __alignof__(struct scatterlist));
117 }
118 
119 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
120 {
121 	struct crypto_aead *aead = x->data;
122 	int seqhilen = 0;
123 	u8 *iv;
124 	struct aead_request *req;
125 	struct scatterlist *sg;
126 
127 	if (x->props.flags & XFRM_STATE_ESN)
128 		seqhilen += sizeof(__be32);
129 
130 	iv = esp_tmp_iv(aead, tmp, seqhilen);
131 	req = esp_tmp_req(aead, iv);
132 
133 	/* Unref skb_frag_pages in the src scatterlist if necessary.
134 	 * Skip the first sg which comes from skb->data.
135 	 */
136 	if (req->src != req->dst)
137 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
138 			put_page(sg_page(sg));
139 }
140 
141 static void esp_output_done(struct crypto_async_request *base, int err)
142 {
143 	struct sk_buff *skb = base->data;
144 	void *tmp;
145 	struct dst_entry *dst = skb_dst(skb);
146 	struct xfrm_state *x = dst->xfrm;
147 
148 	tmp = ESP_SKB_CB(skb)->tmp;
149 	esp_ssg_unref(x, tmp);
150 	kfree(tmp);
151 	xfrm_output_resume(skb, err);
152 }
153 
154 /* Move ESP header back into place. */
155 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
156 {
157 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
158 	void *tmp = ESP_SKB_CB(skb)->tmp;
159 	__be32 *seqhi = esp_tmp_seqhi(tmp);
160 
161 	esph->seq_no = esph->spi;
162 	esph->spi = *seqhi;
163 }
164 
165 static void esp_output_restore_header(struct sk_buff *skb)
166 {
167 	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
168 }
169 
170 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
171 					     struct xfrm_state *x,
172 					     struct ip_esp_hdr *esph,
173 					     __be32 *seqhi)
174 {
175 	/* For ESN we move the header forward by 4 bytes to
176 	 * accomodate the high bits.  We will move it back after
177 	 * encryption.
178 	 */
179 	if ((x->props.flags & XFRM_STATE_ESN)) {
180 		struct xfrm_offload *xo = xfrm_offload(skb);
181 
182 		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
183 		*seqhi = esph->spi;
184 		if (xo)
185 			esph->seq_no = htonl(xo->seq.hi);
186 		else
187 			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
188 	}
189 
190 	esph->spi = x->id.spi;
191 
192 	return esph;
193 }
194 
195 static void esp_output_done_esn(struct crypto_async_request *base, int err)
196 {
197 	struct sk_buff *skb = base->data;
198 
199 	esp_output_restore_header(skb);
200 	esp_output_done(base, err);
201 }
202 
203 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
204 {
205 	/* Fill padding... */
206 	if (tfclen) {
207 		memset(tail, 0, tfclen);
208 		tail += tfclen;
209 	}
210 	do {
211 		int i;
212 		for (i = 0; i < plen - 2; i++)
213 			tail[i] = i + 1;
214 	} while (0);
215 	tail[plen - 2] = plen - 2;
216 	tail[plen - 1] = proto;
217 }
218 
219 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
220 {
221 	u8 *tail;
222 	u8 *vaddr;
223 	int nfrags;
224 	struct page *page;
225 	struct sk_buff *trailer;
226 	int tailen = esp->tailen;
227 
228 	if (!skb_cloned(skb)) {
229 		if (tailen <= skb_tailroom(skb)) {
230 			nfrags = 1;
231 			trailer = skb;
232 			tail = skb_tail_pointer(trailer);
233 
234 			goto skip_cow;
235 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
236 			   && !skb_has_frag_list(skb)) {
237 			int allocsize;
238 			struct sock *sk = skb->sk;
239 			struct page_frag *pfrag = &x->xfrag;
240 
241 			esp->inplace = false;
242 
243 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
244 
245 			spin_lock_bh(&x->lock);
246 
247 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
248 				spin_unlock_bh(&x->lock);
249 				goto cow;
250 			}
251 
252 			page = pfrag->page;
253 			get_page(page);
254 
255 			vaddr = kmap_atomic(page);
256 
257 			tail = vaddr + pfrag->offset;
258 
259 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
260 
261 			kunmap_atomic(vaddr);
262 
263 			nfrags = skb_shinfo(skb)->nr_frags;
264 
265 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
266 					     tailen);
267 			skb_shinfo(skb)->nr_frags = ++nfrags;
268 
269 			pfrag->offset = pfrag->offset + allocsize;
270 
271 			spin_unlock_bh(&x->lock);
272 
273 			nfrags++;
274 
275 			skb->len += tailen;
276 			skb->data_len += tailen;
277 			skb->truesize += tailen;
278 			if (sk)
279 				refcount_add(tailen, &sk->sk_wmem_alloc);
280 
281 			goto out;
282 		}
283 	}
284 
285 cow:
286 	nfrags = skb_cow_data(skb, tailen, &trailer);
287 	if (nfrags < 0)
288 		goto out;
289 	tail = skb_tail_pointer(trailer);
290 
291 skip_cow:
292 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
293 	pskb_put(skb, trailer, tailen);
294 
295 out:
296 	return nfrags;
297 }
298 EXPORT_SYMBOL_GPL(esp6_output_head);
299 
300 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
301 {
302 	u8 *iv;
303 	int alen;
304 	void *tmp;
305 	int ivlen;
306 	int assoclen;
307 	int seqhilen;
308 	__be32 *seqhi;
309 	struct page *page;
310 	struct ip_esp_hdr *esph;
311 	struct aead_request *req;
312 	struct crypto_aead *aead;
313 	struct scatterlist *sg, *dsg;
314 	int err = -ENOMEM;
315 
316 	assoclen = sizeof(struct ip_esp_hdr);
317 	seqhilen = 0;
318 
319 	if (x->props.flags & XFRM_STATE_ESN) {
320 		seqhilen += sizeof(__be32);
321 		assoclen += sizeof(__be32);
322 	}
323 
324 	aead = x->data;
325 	alen = crypto_aead_authsize(aead);
326 	ivlen = crypto_aead_ivsize(aead);
327 
328 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
329 	if (!tmp)
330 		goto error;
331 
332 	seqhi = esp_tmp_seqhi(tmp);
333 	iv = esp_tmp_iv(aead, tmp, seqhilen);
334 	req = esp_tmp_req(aead, iv);
335 	sg = esp_req_sg(aead, req);
336 
337 	if (esp->inplace)
338 		dsg = sg;
339 	else
340 		dsg = &sg[esp->nfrags];
341 
342 	esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
343 
344 	sg_init_table(sg, esp->nfrags);
345 	err = skb_to_sgvec(skb, sg,
346 		           (unsigned char *)esph - skb->data,
347 		           assoclen + ivlen + esp->clen + alen);
348 	if (unlikely(err < 0))
349 		goto error_free;
350 
351 	if (!esp->inplace) {
352 		int allocsize;
353 		struct page_frag *pfrag = &x->xfrag;
354 
355 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
356 
357 		spin_lock_bh(&x->lock);
358 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
359 			spin_unlock_bh(&x->lock);
360 			goto error_free;
361 		}
362 
363 		skb_shinfo(skb)->nr_frags = 1;
364 
365 		page = pfrag->page;
366 		get_page(page);
367 		/* replace page frags in skb with new page */
368 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
369 		pfrag->offset = pfrag->offset + allocsize;
370 		spin_unlock_bh(&x->lock);
371 
372 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
373 		err = skb_to_sgvec(skb, dsg,
374 			           (unsigned char *)esph - skb->data,
375 			           assoclen + ivlen + esp->clen + alen);
376 		if (unlikely(err < 0))
377 			goto error_free;
378 	}
379 
380 	if ((x->props.flags & XFRM_STATE_ESN))
381 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
382 	else
383 		aead_request_set_callback(req, 0, esp_output_done, skb);
384 
385 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
386 	aead_request_set_ad(req, assoclen);
387 
388 	memset(iv, 0, ivlen);
389 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
390 	       min(ivlen, 8));
391 
392 	ESP_SKB_CB(skb)->tmp = tmp;
393 	err = crypto_aead_encrypt(req);
394 
395 	switch (err) {
396 	case -EINPROGRESS:
397 		goto error;
398 
399 	case -ENOSPC:
400 		err = NET_XMIT_DROP;
401 		break;
402 
403 	case 0:
404 		if ((x->props.flags & XFRM_STATE_ESN))
405 			esp_output_restore_header(skb);
406 	}
407 
408 	if (sg != dsg)
409 		esp_ssg_unref(x, tmp);
410 
411 error_free:
412 	kfree(tmp);
413 error:
414 	return err;
415 }
416 EXPORT_SYMBOL_GPL(esp6_output_tail);
417 
418 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
419 {
420 	int alen;
421 	int blksize;
422 	struct ip_esp_hdr *esph;
423 	struct crypto_aead *aead;
424 	struct esp_info esp;
425 
426 	esp.inplace = true;
427 
428 	esp.proto = *skb_mac_header(skb);
429 	*skb_mac_header(skb) = IPPROTO_ESP;
430 
431 	/* skb is pure payload to encrypt */
432 
433 	aead = x->data;
434 	alen = crypto_aead_authsize(aead);
435 
436 	esp.tfclen = 0;
437 	if (x->tfcpad) {
438 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
439 		u32 padto;
440 
441 		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
442 		if (skb->len < padto)
443 			esp.tfclen = padto - skb->len;
444 	}
445 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
446 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
447 	esp.plen = esp.clen - skb->len - esp.tfclen;
448 	esp.tailen = esp.tfclen + esp.plen + alen;
449 
450 	esp.nfrags = esp6_output_head(x, skb, &esp);
451 	if (esp.nfrags < 0)
452 		return esp.nfrags;
453 
454 	esph = ip_esp_hdr(skb);
455 	esph->spi = x->id.spi;
456 
457 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
458 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
459 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
460 
461 	skb_push(skb, -skb_network_offset(skb));
462 
463 	return esp6_output_tail(x, skb, &esp);
464 }
465 
466 static inline int esp_remove_trailer(struct sk_buff *skb)
467 {
468 	struct xfrm_state *x = xfrm_input_state(skb);
469 	struct xfrm_offload *xo = xfrm_offload(skb);
470 	struct crypto_aead *aead = x->data;
471 	int alen, hlen, elen;
472 	int padlen, trimlen;
473 	__wsum csumdiff;
474 	u8 nexthdr[2];
475 	int ret;
476 
477 	alen = crypto_aead_authsize(aead);
478 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
479 	elen = skb->len - hlen;
480 
481 	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
482 		ret = xo->proto;
483 		goto out;
484 	}
485 
486 	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
487 	BUG_ON(ret);
488 
489 	ret = -EINVAL;
490 	padlen = nexthdr[0];
491 	if (padlen + 2 + alen >= elen) {
492 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
493 				    padlen + 2, elen - alen);
494 		goto out;
495 	}
496 
497 	trimlen = alen + padlen + 2;
498 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
499 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
500 		skb->csum = csum_block_sub(skb->csum, csumdiff,
501 					   skb->len - trimlen);
502 	}
503 	pskb_trim(skb, skb->len - trimlen);
504 
505 	ret = nexthdr[1];
506 
507 out:
508 	return ret;
509 }
510 
511 int esp6_input_done2(struct sk_buff *skb, int err)
512 {
513 	struct xfrm_state *x = xfrm_input_state(skb);
514 	struct xfrm_offload *xo = xfrm_offload(skb);
515 	struct crypto_aead *aead = x->data;
516 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
517 	int hdr_len = skb_network_header_len(skb);
518 
519 	if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
520 		kfree(ESP_SKB_CB(skb)->tmp);
521 
522 	if (unlikely(err))
523 		goto out;
524 
525 	err = esp_remove_trailer(skb);
526 	if (unlikely(err < 0))
527 		goto out;
528 
529 	skb_postpull_rcsum(skb, skb_network_header(skb),
530 			   skb_network_header_len(skb));
531 	skb_pull_rcsum(skb, hlen);
532 	if (x->props.mode == XFRM_MODE_TUNNEL)
533 		skb_reset_transport_header(skb);
534 	else
535 		skb_set_transport_header(skb, -hdr_len);
536 
537 	/* RFC4303: Drop dummy packets without any error */
538 	if (err == IPPROTO_NONE)
539 		err = -EINVAL;
540 
541 out:
542 	return err;
543 }
544 EXPORT_SYMBOL_GPL(esp6_input_done2);
545 
546 static void esp_input_done(struct crypto_async_request *base, int err)
547 {
548 	struct sk_buff *skb = base->data;
549 
550 	xfrm_input_resume(skb, esp6_input_done2(skb, err));
551 }
552 
553 static void esp_input_restore_header(struct sk_buff *skb)
554 {
555 	esp_restore_header(skb, 0);
556 	__skb_pull(skb, 4);
557 }
558 
559 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
560 {
561 	struct xfrm_state *x = xfrm_input_state(skb);
562 
563 	/* For ESN we move the header forward by 4 bytes to
564 	 * accomodate the high bits.  We will move it back after
565 	 * decryption.
566 	 */
567 	if ((x->props.flags & XFRM_STATE_ESN)) {
568 		struct ip_esp_hdr *esph = skb_push(skb, 4);
569 
570 		*seqhi = esph->spi;
571 		esph->spi = esph->seq_no;
572 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
573 	}
574 }
575 
576 static void esp_input_done_esn(struct crypto_async_request *base, int err)
577 {
578 	struct sk_buff *skb = base->data;
579 
580 	esp_input_restore_header(skb);
581 	esp_input_done(base, err);
582 }
583 
584 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
585 {
586 	struct ip_esp_hdr *esph;
587 	struct crypto_aead *aead = x->data;
588 	struct aead_request *req;
589 	struct sk_buff *trailer;
590 	int ivlen = crypto_aead_ivsize(aead);
591 	int elen = skb->len - sizeof(*esph) - ivlen;
592 	int nfrags;
593 	int assoclen;
594 	int seqhilen;
595 	int ret = 0;
596 	void *tmp;
597 	__be32 *seqhi;
598 	u8 *iv;
599 	struct scatterlist *sg;
600 
601 	if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
602 		ret = -EINVAL;
603 		goto out;
604 	}
605 
606 	if (elen <= 0) {
607 		ret = -EINVAL;
608 		goto out;
609 	}
610 
611 	assoclen = sizeof(*esph);
612 	seqhilen = 0;
613 
614 	if (x->props.flags & XFRM_STATE_ESN) {
615 		seqhilen += sizeof(__be32);
616 		assoclen += seqhilen;
617 	}
618 
619 	if (!skb_cloned(skb)) {
620 		if (!skb_is_nonlinear(skb)) {
621 			nfrags = 1;
622 
623 			goto skip_cow;
624 		} else if (!skb_has_frag_list(skb)) {
625 			nfrags = skb_shinfo(skb)->nr_frags;
626 			nfrags++;
627 
628 			goto skip_cow;
629 		}
630 	}
631 
632 	nfrags = skb_cow_data(skb, 0, &trailer);
633 	if (nfrags < 0) {
634 		ret = -EINVAL;
635 		goto out;
636 	}
637 
638 skip_cow:
639 	ret = -ENOMEM;
640 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
641 	if (!tmp)
642 		goto out;
643 
644 	ESP_SKB_CB(skb)->tmp = tmp;
645 	seqhi = esp_tmp_seqhi(tmp);
646 	iv = esp_tmp_iv(aead, tmp, seqhilen);
647 	req = esp_tmp_req(aead, iv);
648 	sg = esp_req_sg(aead, req);
649 
650 	esp_input_set_header(skb, seqhi);
651 
652 	sg_init_table(sg, nfrags);
653 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
654 	if (unlikely(ret < 0))
655 		goto out;
656 
657 	skb->ip_summed = CHECKSUM_NONE;
658 
659 	if ((x->props.flags & XFRM_STATE_ESN))
660 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
661 	else
662 		aead_request_set_callback(req, 0, esp_input_done, skb);
663 
664 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
665 	aead_request_set_ad(req, assoclen);
666 
667 	ret = crypto_aead_decrypt(req);
668 	if (ret == -EINPROGRESS)
669 		goto out;
670 
671 	if ((x->props.flags & XFRM_STATE_ESN))
672 		esp_input_restore_header(skb);
673 
674 	ret = esp6_input_done2(skb, ret);
675 
676 out:
677 	return ret;
678 }
679 
680 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
681 {
682 	struct crypto_aead *aead = x->data;
683 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
684 	unsigned int net_adj;
685 
686 	if (x->props.mode != XFRM_MODE_TUNNEL)
687 		net_adj = sizeof(struct ipv6hdr);
688 	else
689 		net_adj = 0;
690 
691 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
692 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
693 }
694 
695 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
696 		    u8 type, u8 code, int offset, __be32 info)
697 {
698 	struct net *net = dev_net(skb->dev);
699 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
700 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
701 	struct xfrm_state *x;
702 
703 	if (type != ICMPV6_PKT_TOOBIG &&
704 	    type != NDISC_REDIRECT)
705 		return 0;
706 
707 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
708 			      esph->spi, IPPROTO_ESP, AF_INET6);
709 	if (!x)
710 		return 0;
711 
712 	if (type == NDISC_REDIRECT)
713 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
714 			     sock_net_uid(net, NULL));
715 	else
716 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
717 	xfrm_state_put(x);
718 
719 	return 0;
720 }
721 
722 static void esp6_destroy(struct xfrm_state *x)
723 {
724 	struct crypto_aead *aead = x->data;
725 
726 	if (!aead)
727 		return;
728 
729 	crypto_free_aead(aead);
730 }
731 
732 static int esp_init_aead(struct xfrm_state *x)
733 {
734 	char aead_name[CRYPTO_MAX_ALG_NAME];
735 	struct crypto_aead *aead;
736 	int err;
737 	u32 mask = 0;
738 
739 	err = -ENAMETOOLONG;
740 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
741 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
742 		goto error;
743 
744 	if (x->xso.offload_handle)
745 		mask |= CRYPTO_ALG_ASYNC;
746 
747 	aead = crypto_alloc_aead(aead_name, 0, mask);
748 	err = PTR_ERR(aead);
749 	if (IS_ERR(aead))
750 		goto error;
751 
752 	x->data = aead;
753 
754 	err = crypto_aead_setkey(aead, x->aead->alg_key,
755 				 (x->aead->alg_key_len + 7) / 8);
756 	if (err)
757 		goto error;
758 
759 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
760 	if (err)
761 		goto error;
762 
763 error:
764 	return err;
765 }
766 
767 static int esp_init_authenc(struct xfrm_state *x)
768 {
769 	struct crypto_aead *aead;
770 	struct crypto_authenc_key_param *param;
771 	struct rtattr *rta;
772 	char *key;
773 	char *p;
774 	char authenc_name[CRYPTO_MAX_ALG_NAME];
775 	unsigned int keylen;
776 	int err;
777 	u32 mask = 0;
778 
779 	err = -EINVAL;
780 	if (!x->ealg)
781 		goto error;
782 
783 	err = -ENAMETOOLONG;
784 
785 	if ((x->props.flags & XFRM_STATE_ESN)) {
786 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
787 			     "%s%sauthencesn(%s,%s)%s",
788 			     x->geniv ?: "", x->geniv ? "(" : "",
789 			     x->aalg ? x->aalg->alg_name : "digest_null",
790 			     x->ealg->alg_name,
791 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
792 			goto error;
793 	} else {
794 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
795 			     "%s%sauthenc(%s,%s)%s",
796 			     x->geniv ?: "", x->geniv ? "(" : "",
797 			     x->aalg ? x->aalg->alg_name : "digest_null",
798 			     x->ealg->alg_name,
799 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
800 			goto error;
801 	}
802 
803 	if (x->xso.offload_handle)
804 		mask |= CRYPTO_ALG_ASYNC;
805 
806 	aead = crypto_alloc_aead(authenc_name, 0, mask);
807 	err = PTR_ERR(aead);
808 	if (IS_ERR(aead))
809 		goto error;
810 
811 	x->data = aead;
812 
813 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
814 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
815 	err = -ENOMEM;
816 	key = kmalloc(keylen, GFP_KERNEL);
817 	if (!key)
818 		goto error;
819 
820 	p = key;
821 	rta = (void *)p;
822 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
823 	rta->rta_len = RTA_LENGTH(sizeof(*param));
824 	param = RTA_DATA(rta);
825 	p += RTA_SPACE(sizeof(*param));
826 
827 	if (x->aalg) {
828 		struct xfrm_algo_desc *aalg_desc;
829 
830 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
831 		p += (x->aalg->alg_key_len + 7) / 8;
832 
833 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
834 		BUG_ON(!aalg_desc);
835 
836 		err = -EINVAL;
837 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
838 		    crypto_aead_authsize(aead)) {
839 			pr_info("ESP: %s digestsize %u != %hu\n",
840 				x->aalg->alg_name,
841 				crypto_aead_authsize(aead),
842 				aalg_desc->uinfo.auth.icv_fullbits / 8);
843 			goto free_key;
844 		}
845 
846 		err = crypto_aead_setauthsize(
847 			aead, x->aalg->alg_trunc_len / 8);
848 		if (err)
849 			goto free_key;
850 	}
851 
852 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
853 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
854 
855 	err = crypto_aead_setkey(aead, key, keylen);
856 
857 free_key:
858 	kfree(key);
859 
860 error:
861 	return err;
862 }
863 
864 static int esp6_init_state(struct xfrm_state *x)
865 {
866 	struct crypto_aead *aead;
867 	u32 align;
868 	int err;
869 
870 	if (x->encap)
871 		return -EINVAL;
872 
873 	x->data = NULL;
874 
875 	if (x->aead)
876 		err = esp_init_aead(x);
877 	else
878 		err = esp_init_authenc(x);
879 
880 	if (err)
881 		goto error;
882 
883 	aead = x->data;
884 
885 	x->props.header_len = sizeof(struct ip_esp_hdr) +
886 			      crypto_aead_ivsize(aead);
887 	switch (x->props.mode) {
888 	case XFRM_MODE_BEET:
889 		if (x->sel.family != AF_INET6)
890 			x->props.header_len += IPV4_BEET_PHMAXLEN +
891 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
892 		break;
893 	default:
894 	case XFRM_MODE_TRANSPORT:
895 		break;
896 	case XFRM_MODE_TUNNEL:
897 		x->props.header_len += sizeof(struct ipv6hdr);
898 		break;
899 	}
900 
901 	align = ALIGN(crypto_aead_blocksize(aead), 4);
902 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
903 
904 error:
905 	return err;
906 }
907 
908 static int esp6_rcv_cb(struct sk_buff *skb, int err)
909 {
910 	return 0;
911 }
912 
913 static const struct xfrm_type esp6_type = {
914 	.description	= "ESP6",
915 	.owner		= THIS_MODULE,
916 	.proto		= IPPROTO_ESP,
917 	.flags		= XFRM_TYPE_REPLAY_PROT,
918 	.init_state	= esp6_init_state,
919 	.destructor	= esp6_destroy,
920 	.get_mtu	= esp6_get_mtu,
921 	.input		= esp6_input,
922 	.output		= esp6_output,
923 	.hdr_offset	= xfrm6_find_1stfragopt,
924 };
925 
926 static struct xfrm6_protocol esp6_protocol = {
927 	.handler	=	xfrm6_rcv,
928 	.cb_handler	=	esp6_rcv_cb,
929 	.err_handler	=	esp6_err,
930 	.priority	=	0,
931 };
932 
933 static int __init esp6_init(void)
934 {
935 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
936 		pr_info("%s: can't add xfrm type\n", __func__);
937 		return -EAGAIN;
938 	}
939 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
940 		pr_info("%s: can't add protocol\n", __func__);
941 		xfrm_unregister_type(&esp6_type, AF_INET6);
942 		return -EAGAIN;
943 	}
944 
945 	return 0;
946 }
947 
948 static void __exit esp6_fini(void)
949 {
950 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
951 		pr_info("%s: can't remove protocol\n", __func__);
952 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
953 		pr_info("%s: can't remove xfrm type\n", __func__);
954 }
955 
956 module_init(esp6_init);
957 module_exit(esp6_fini);
958 
959 MODULE_LICENSE("GPL");
960 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
961