xref: /openbmc/linux/net/ipv6/esp6.c (revision d003d772)
1 /*
2  * Copyright (C)2002 USAGI/WIDE Project
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * Authors
18  *
19  *	Mitsuru KANDA @USAGI       : IPv6 Support
20  *	Kazunori MIYAZAWA @USAGI   :
21  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
22  *
23  *	This file is derived from net/ipv4/esp.c
24  */
25 
26 #define pr_fmt(fmt) "IPv6: " fmt
27 
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <net/ip.h>
33 #include <net/xfrm.h>
34 #include <net/esp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
46 
47 #include <linux/highmem.h>
48 
49 struct esp_skb_cb {
50 	struct xfrm_skb_cb xfrm;
51 	void *tmp;
52 };
53 
54 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55 
56 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
57 
58 /*
59  * Allocate an AEAD request structure with extra space for SG and IV.
60  *
61  * For alignment considerations the upper 32 bits of the sequence number are
62  * placed at the front, if present. Followed by the IV, the request and finally
63  * the SG list.
64  *
65  * TODO: Use spare space in skb for this where possible.
66  */
67 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
68 {
69 	unsigned int len;
70 
71 	len = seqihlen;
72 
73 	len += crypto_aead_ivsize(aead);
74 
75 	if (len) {
76 		len += crypto_aead_alignmask(aead) &
77 		       ~(crypto_tfm_ctx_alignment() - 1);
78 		len = ALIGN(len, crypto_tfm_ctx_alignment());
79 	}
80 
81 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
82 	len = ALIGN(len, __alignof__(struct scatterlist));
83 
84 	len += sizeof(struct scatterlist) * nfrags;
85 
86 	return kmalloc(len, GFP_ATOMIC);
87 }
88 
89 static inline __be32 *esp_tmp_seqhi(void *tmp)
90 {
91 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
92 }
93 
94 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 {
96 	return crypto_aead_ivsize(aead) ?
97 	       PTR_ALIGN((u8 *)tmp + seqhilen,
98 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
99 }
100 
101 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
102 {
103 	struct aead_request *req;
104 
105 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 				crypto_tfm_ctx_alignment());
107 	aead_request_set_tfm(req, aead);
108 	return req;
109 }
110 
111 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
112 					     struct aead_request *req)
113 {
114 	return (void *)ALIGN((unsigned long)(req + 1) +
115 			     crypto_aead_reqsize(aead),
116 			     __alignof__(struct scatterlist));
117 }
118 
119 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
120 {
121 	struct crypto_aead *aead = x->data;
122 	int seqhilen = 0;
123 	u8 *iv;
124 	struct aead_request *req;
125 	struct scatterlist *sg;
126 
127 	if (x->props.flags & XFRM_STATE_ESN)
128 		seqhilen += sizeof(__be32);
129 
130 	iv = esp_tmp_iv(aead, tmp, seqhilen);
131 	req = esp_tmp_req(aead, iv);
132 
133 	/* Unref skb_frag_pages in the src scatterlist if necessary.
134 	 * Skip the first sg which comes from skb->data.
135 	 */
136 	if (req->src != req->dst)
137 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
138 			put_page(sg_page(sg));
139 }
140 
141 static void esp_output_done(struct crypto_async_request *base, int err)
142 {
143 	struct sk_buff *skb = base->data;
144 	struct xfrm_offload *xo = xfrm_offload(skb);
145 	void *tmp;
146 	struct xfrm_state *x;
147 
148 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
149 		struct sec_path *sp = skb_sec_path(skb);
150 
151 		x = sp->xvec[sp->len - 1];
152 	} else {
153 		x = skb_dst(skb)->xfrm;
154 	}
155 
156 	tmp = ESP_SKB_CB(skb)->tmp;
157 	esp_ssg_unref(x, tmp);
158 	kfree(tmp);
159 
160 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
161 		if (err) {
162 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
163 			kfree_skb(skb);
164 			return;
165 		}
166 
167 		skb_push(skb, skb->data - skb_mac_header(skb));
168 		secpath_reset(skb);
169 		xfrm_dev_resume(skb);
170 	} else {
171 		xfrm_output_resume(skb, err);
172 	}
173 }
174 
175 /* Move ESP header back into place. */
176 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
177 {
178 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
179 	void *tmp = ESP_SKB_CB(skb)->tmp;
180 	__be32 *seqhi = esp_tmp_seqhi(tmp);
181 
182 	esph->seq_no = esph->spi;
183 	esph->spi = *seqhi;
184 }
185 
186 static void esp_output_restore_header(struct sk_buff *skb)
187 {
188 	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
189 }
190 
191 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
192 					     struct xfrm_state *x,
193 					     struct ip_esp_hdr *esph,
194 					     __be32 *seqhi)
195 {
196 	/* For ESN we move the header forward by 4 bytes to
197 	 * accomodate the high bits.  We will move it back after
198 	 * encryption.
199 	 */
200 	if ((x->props.flags & XFRM_STATE_ESN)) {
201 		struct xfrm_offload *xo = xfrm_offload(skb);
202 
203 		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
204 		*seqhi = esph->spi;
205 		if (xo)
206 			esph->seq_no = htonl(xo->seq.hi);
207 		else
208 			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
209 	}
210 
211 	esph->spi = x->id.spi;
212 
213 	return esph;
214 }
215 
216 static void esp_output_done_esn(struct crypto_async_request *base, int err)
217 {
218 	struct sk_buff *skb = base->data;
219 
220 	esp_output_restore_header(skb);
221 	esp_output_done(base, err);
222 }
223 
224 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
225 {
226 	/* Fill padding... */
227 	if (tfclen) {
228 		memset(tail, 0, tfclen);
229 		tail += tfclen;
230 	}
231 	do {
232 		int i;
233 		for (i = 0; i < plen - 2; i++)
234 			tail[i] = i + 1;
235 	} while (0);
236 	tail[plen - 2] = plen - 2;
237 	tail[plen - 1] = proto;
238 }
239 
240 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
241 {
242 	u8 *tail;
243 	u8 *vaddr;
244 	int nfrags;
245 	struct page *page;
246 	struct sk_buff *trailer;
247 	int tailen = esp->tailen;
248 
249 	if (!skb_cloned(skb)) {
250 		if (tailen <= skb_tailroom(skb)) {
251 			nfrags = 1;
252 			trailer = skb;
253 			tail = skb_tail_pointer(trailer);
254 
255 			goto skip_cow;
256 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
257 			   && !skb_has_frag_list(skb)) {
258 			int allocsize;
259 			struct sock *sk = skb->sk;
260 			struct page_frag *pfrag = &x->xfrag;
261 
262 			esp->inplace = false;
263 
264 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
265 
266 			spin_lock_bh(&x->lock);
267 
268 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
269 				spin_unlock_bh(&x->lock);
270 				goto cow;
271 			}
272 
273 			page = pfrag->page;
274 			get_page(page);
275 
276 			vaddr = kmap_atomic(page);
277 
278 			tail = vaddr + pfrag->offset;
279 
280 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
281 
282 			kunmap_atomic(vaddr);
283 
284 			nfrags = skb_shinfo(skb)->nr_frags;
285 
286 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
287 					     tailen);
288 			skb_shinfo(skb)->nr_frags = ++nfrags;
289 
290 			pfrag->offset = pfrag->offset + allocsize;
291 
292 			spin_unlock_bh(&x->lock);
293 
294 			nfrags++;
295 
296 			skb->len += tailen;
297 			skb->data_len += tailen;
298 			skb->truesize += tailen;
299 			if (sk && sk_fullsock(sk))
300 				refcount_add(tailen, &sk->sk_wmem_alloc);
301 
302 			goto out;
303 		}
304 	}
305 
306 cow:
307 	nfrags = skb_cow_data(skb, tailen, &trailer);
308 	if (nfrags < 0)
309 		goto out;
310 	tail = skb_tail_pointer(trailer);
311 
312 skip_cow:
313 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
314 	pskb_put(skb, trailer, tailen);
315 
316 out:
317 	return nfrags;
318 }
319 EXPORT_SYMBOL_GPL(esp6_output_head);
320 
321 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
322 {
323 	u8 *iv;
324 	int alen;
325 	void *tmp;
326 	int ivlen;
327 	int assoclen;
328 	int seqhilen;
329 	__be32 *seqhi;
330 	struct page *page;
331 	struct ip_esp_hdr *esph;
332 	struct aead_request *req;
333 	struct crypto_aead *aead;
334 	struct scatterlist *sg, *dsg;
335 	int err = -ENOMEM;
336 
337 	assoclen = sizeof(struct ip_esp_hdr);
338 	seqhilen = 0;
339 
340 	if (x->props.flags & XFRM_STATE_ESN) {
341 		seqhilen += sizeof(__be32);
342 		assoclen += sizeof(__be32);
343 	}
344 
345 	aead = x->data;
346 	alen = crypto_aead_authsize(aead);
347 	ivlen = crypto_aead_ivsize(aead);
348 
349 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
350 	if (!tmp)
351 		goto error;
352 
353 	seqhi = esp_tmp_seqhi(tmp);
354 	iv = esp_tmp_iv(aead, tmp, seqhilen);
355 	req = esp_tmp_req(aead, iv);
356 	sg = esp_req_sg(aead, req);
357 
358 	if (esp->inplace)
359 		dsg = sg;
360 	else
361 		dsg = &sg[esp->nfrags];
362 
363 	esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
364 
365 	sg_init_table(sg, esp->nfrags);
366 	err = skb_to_sgvec(skb, sg,
367 		           (unsigned char *)esph - skb->data,
368 		           assoclen + ivlen + esp->clen + alen);
369 	if (unlikely(err < 0))
370 		goto error_free;
371 
372 	if (!esp->inplace) {
373 		int allocsize;
374 		struct page_frag *pfrag = &x->xfrag;
375 
376 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
377 
378 		spin_lock_bh(&x->lock);
379 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
380 			spin_unlock_bh(&x->lock);
381 			goto error_free;
382 		}
383 
384 		skb_shinfo(skb)->nr_frags = 1;
385 
386 		page = pfrag->page;
387 		get_page(page);
388 		/* replace page frags in skb with new page */
389 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
390 		pfrag->offset = pfrag->offset + allocsize;
391 		spin_unlock_bh(&x->lock);
392 
393 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
394 		err = skb_to_sgvec(skb, dsg,
395 			           (unsigned char *)esph - skb->data,
396 			           assoclen + ivlen + esp->clen + alen);
397 		if (unlikely(err < 0))
398 			goto error_free;
399 	}
400 
401 	if ((x->props.flags & XFRM_STATE_ESN))
402 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
403 	else
404 		aead_request_set_callback(req, 0, esp_output_done, skb);
405 
406 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
407 	aead_request_set_ad(req, assoclen);
408 
409 	memset(iv, 0, ivlen);
410 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
411 	       min(ivlen, 8));
412 
413 	ESP_SKB_CB(skb)->tmp = tmp;
414 	err = crypto_aead_encrypt(req);
415 
416 	switch (err) {
417 	case -EINPROGRESS:
418 		goto error;
419 
420 	case -ENOSPC:
421 		err = NET_XMIT_DROP;
422 		break;
423 
424 	case 0:
425 		if ((x->props.flags & XFRM_STATE_ESN))
426 			esp_output_restore_header(skb);
427 	}
428 
429 	if (sg != dsg)
430 		esp_ssg_unref(x, tmp);
431 
432 error_free:
433 	kfree(tmp);
434 error:
435 	return err;
436 }
437 EXPORT_SYMBOL_GPL(esp6_output_tail);
438 
439 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
440 {
441 	int alen;
442 	int blksize;
443 	struct ip_esp_hdr *esph;
444 	struct crypto_aead *aead;
445 	struct esp_info esp;
446 
447 	esp.inplace = true;
448 
449 	esp.proto = *skb_mac_header(skb);
450 	*skb_mac_header(skb) = IPPROTO_ESP;
451 
452 	/* skb is pure payload to encrypt */
453 
454 	aead = x->data;
455 	alen = crypto_aead_authsize(aead);
456 
457 	esp.tfclen = 0;
458 	if (x->tfcpad) {
459 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
460 		u32 padto;
461 
462 		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
463 		if (skb->len < padto)
464 			esp.tfclen = padto - skb->len;
465 	}
466 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
467 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
468 	esp.plen = esp.clen - skb->len - esp.tfclen;
469 	esp.tailen = esp.tfclen + esp.plen + alen;
470 
471 	esp.nfrags = esp6_output_head(x, skb, &esp);
472 	if (esp.nfrags < 0)
473 		return esp.nfrags;
474 
475 	esph = ip_esp_hdr(skb);
476 	esph->spi = x->id.spi;
477 
478 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
479 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
480 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
481 
482 	skb_push(skb, -skb_network_offset(skb));
483 
484 	return esp6_output_tail(x, skb, &esp);
485 }
486 
487 static inline int esp_remove_trailer(struct sk_buff *skb)
488 {
489 	struct xfrm_state *x = xfrm_input_state(skb);
490 	struct xfrm_offload *xo = xfrm_offload(skb);
491 	struct crypto_aead *aead = x->data;
492 	int alen, hlen, elen;
493 	int padlen, trimlen;
494 	__wsum csumdiff;
495 	u8 nexthdr[2];
496 	int ret;
497 
498 	alen = crypto_aead_authsize(aead);
499 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
500 	elen = skb->len - hlen;
501 
502 	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
503 		ret = xo->proto;
504 		goto out;
505 	}
506 
507 	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
508 	BUG_ON(ret);
509 
510 	ret = -EINVAL;
511 	padlen = nexthdr[0];
512 	if (padlen + 2 + alen >= elen) {
513 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
514 				    padlen + 2, elen - alen);
515 		goto out;
516 	}
517 
518 	trimlen = alen + padlen + 2;
519 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
520 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
521 		skb->csum = csum_block_sub(skb->csum, csumdiff,
522 					   skb->len - trimlen);
523 	}
524 	pskb_trim(skb, skb->len - trimlen);
525 
526 	ret = nexthdr[1];
527 
528 out:
529 	return ret;
530 }
531 
532 int esp6_input_done2(struct sk_buff *skb, int err)
533 {
534 	struct xfrm_state *x = xfrm_input_state(skb);
535 	struct xfrm_offload *xo = xfrm_offload(skb);
536 	struct crypto_aead *aead = x->data;
537 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
538 	int hdr_len = skb_network_header_len(skb);
539 
540 	if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
541 		kfree(ESP_SKB_CB(skb)->tmp);
542 
543 	if (unlikely(err))
544 		goto out;
545 
546 	err = esp_remove_trailer(skb);
547 	if (unlikely(err < 0))
548 		goto out;
549 
550 	skb_postpull_rcsum(skb, skb_network_header(skb),
551 			   skb_network_header_len(skb));
552 	skb_pull_rcsum(skb, hlen);
553 	if (x->props.mode == XFRM_MODE_TUNNEL)
554 		skb_reset_transport_header(skb);
555 	else
556 		skb_set_transport_header(skb, -hdr_len);
557 
558 	/* RFC4303: Drop dummy packets without any error */
559 	if (err == IPPROTO_NONE)
560 		err = -EINVAL;
561 
562 out:
563 	return err;
564 }
565 EXPORT_SYMBOL_GPL(esp6_input_done2);
566 
567 static void esp_input_done(struct crypto_async_request *base, int err)
568 {
569 	struct sk_buff *skb = base->data;
570 
571 	xfrm_input_resume(skb, esp6_input_done2(skb, err));
572 }
573 
574 static void esp_input_restore_header(struct sk_buff *skb)
575 {
576 	esp_restore_header(skb, 0);
577 	__skb_pull(skb, 4);
578 }
579 
580 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
581 {
582 	struct xfrm_state *x = xfrm_input_state(skb);
583 
584 	/* For ESN we move the header forward by 4 bytes to
585 	 * accomodate the high bits.  We will move it back after
586 	 * decryption.
587 	 */
588 	if ((x->props.flags & XFRM_STATE_ESN)) {
589 		struct ip_esp_hdr *esph = skb_push(skb, 4);
590 
591 		*seqhi = esph->spi;
592 		esph->spi = esph->seq_no;
593 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
594 	}
595 }
596 
597 static void esp_input_done_esn(struct crypto_async_request *base, int err)
598 {
599 	struct sk_buff *skb = base->data;
600 
601 	esp_input_restore_header(skb);
602 	esp_input_done(base, err);
603 }
604 
605 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
606 {
607 	struct crypto_aead *aead = x->data;
608 	struct aead_request *req;
609 	struct sk_buff *trailer;
610 	int ivlen = crypto_aead_ivsize(aead);
611 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
612 	int nfrags;
613 	int assoclen;
614 	int seqhilen;
615 	int ret = 0;
616 	void *tmp;
617 	__be32 *seqhi;
618 	u8 *iv;
619 	struct scatterlist *sg;
620 
621 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
622 		ret = -EINVAL;
623 		goto out;
624 	}
625 
626 	if (elen <= 0) {
627 		ret = -EINVAL;
628 		goto out;
629 	}
630 
631 	assoclen = sizeof(struct ip_esp_hdr);
632 	seqhilen = 0;
633 
634 	if (x->props.flags & XFRM_STATE_ESN) {
635 		seqhilen += sizeof(__be32);
636 		assoclen += seqhilen;
637 	}
638 
639 	if (!skb_cloned(skb)) {
640 		if (!skb_is_nonlinear(skb)) {
641 			nfrags = 1;
642 
643 			goto skip_cow;
644 		} else if (!skb_has_frag_list(skb)) {
645 			nfrags = skb_shinfo(skb)->nr_frags;
646 			nfrags++;
647 
648 			goto skip_cow;
649 		}
650 	}
651 
652 	nfrags = skb_cow_data(skb, 0, &trailer);
653 	if (nfrags < 0) {
654 		ret = -EINVAL;
655 		goto out;
656 	}
657 
658 skip_cow:
659 	ret = -ENOMEM;
660 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
661 	if (!tmp)
662 		goto out;
663 
664 	ESP_SKB_CB(skb)->tmp = tmp;
665 	seqhi = esp_tmp_seqhi(tmp);
666 	iv = esp_tmp_iv(aead, tmp, seqhilen);
667 	req = esp_tmp_req(aead, iv);
668 	sg = esp_req_sg(aead, req);
669 
670 	esp_input_set_header(skb, seqhi);
671 
672 	sg_init_table(sg, nfrags);
673 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
674 	if (unlikely(ret < 0)) {
675 		kfree(tmp);
676 		goto out;
677 	}
678 
679 	skb->ip_summed = CHECKSUM_NONE;
680 
681 	if ((x->props.flags & XFRM_STATE_ESN))
682 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
683 	else
684 		aead_request_set_callback(req, 0, esp_input_done, skb);
685 
686 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
687 	aead_request_set_ad(req, assoclen);
688 
689 	ret = crypto_aead_decrypt(req);
690 	if (ret == -EINPROGRESS)
691 		goto out;
692 
693 	if ((x->props.flags & XFRM_STATE_ESN))
694 		esp_input_restore_header(skb);
695 
696 	ret = esp6_input_done2(skb, ret);
697 
698 out:
699 	return ret;
700 }
701 
702 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
703 {
704 	struct crypto_aead *aead = x->data;
705 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
706 	unsigned int net_adj;
707 
708 	if (x->props.mode != XFRM_MODE_TUNNEL)
709 		net_adj = sizeof(struct ipv6hdr);
710 	else
711 		net_adj = 0;
712 
713 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
714 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
715 }
716 
717 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
718 		    u8 type, u8 code, int offset, __be32 info)
719 {
720 	struct net *net = dev_net(skb->dev);
721 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
722 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
723 	struct xfrm_state *x;
724 
725 	if (type != ICMPV6_PKT_TOOBIG &&
726 	    type != NDISC_REDIRECT)
727 		return 0;
728 
729 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
730 			      esph->spi, IPPROTO_ESP, AF_INET6);
731 	if (!x)
732 		return 0;
733 
734 	if (type == NDISC_REDIRECT)
735 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
736 			     sock_net_uid(net, NULL));
737 	else
738 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
739 	xfrm_state_put(x);
740 
741 	return 0;
742 }
743 
744 static void esp6_destroy(struct xfrm_state *x)
745 {
746 	struct crypto_aead *aead = x->data;
747 
748 	if (!aead)
749 		return;
750 
751 	crypto_free_aead(aead);
752 }
753 
754 static int esp_init_aead(struct xfrm_state *x)
755 {
756 	char aead_name[CRYPTO_MAX_ALG_NAME];
757 	struct crypto_aead *aead;
758 	int err;
759 
760 	err = -ENAMETOOLONG;
761 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
762 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
763 		goto error;
764 
765 	aead = crypto_alloc_aead(aead_name, 0, 0);
766 	err = PTR_ERR(aead);
767 	if (IS_ERR(aead))
768 		goto error;
769 
770 	x->data = aead;
771 
772 	err = crypto_aead_setkey(aead, x->aead->alg_key,
773 				 (x->aead->alg_key_len + 7) / 8);
774 	if (err)
775 		goto error;
776 
777 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
778 	if (err)
779 		goto error;
780 
781 error:
782 	return err;
783 }
784 
785 static int esp_init_authenc(struct xfrm_state *x)
786 {
787 	struct crypto_aead *aead;
788 	struct crypto_authenc_key_param *param;
789 	struct rtattr *rta;
790 	char *key;
791 	char *p;
792 	char authenc_name[CRYPTO_MAX_ALG_NAME];
793 	unsigned int keylen;
794 	int err;
795 
796 	err = -EINVAL;
797 	if (!x->ealg)
798 		goto error;
799 
800 	err = -ENAMETOOLONG;
801 
802 	if ((x->props.flags & XFRM_STATE_ESN)) {
803 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
804 			     "%s%sauthencesn(%s,%s)%s",
805 			     x->geniv ?: "", x->geniv ? "(" : "",
806 			     x->aalg ? x->aalg->alg_name : "digest_null",
807 			     x->ealg->alg_name,
808 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
809 			goto error;
810 	} else {
811 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
812 			     "%s%sauthenc(%s,%s)%s",
813 			     x->geniv ?: "", x->geniv ? "(" : "",
814 			     x->aalg ? x->aalg->alg_name : "digest_null",
815 			     x->ealg->alg_name,
816 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
817 			goto error;
818 	}
819 
820 	aead = crypto_alloc_aead(authenc_name, 0, 0);
821 	err = PTR_ERR(aead);
822 	if (IS_ERR(aead))
823 		goto error;
824 
825 	x->data = aead;
826 
827 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
828 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
829 	err = -ENOMEM;
830 	key = kmalloc(keylen, GFP_KERNEL);
831 	if (!key)
832 		goto error;
833 
834 	p = key;
835 	rta = (void *)p;
836 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
837 	rta->rta_len = RTA_LENGTH(sizeof(*param));
838 	param = RTA_DATA(rta);
839 	p += RTA_SPACE(sizeof(*param));
840 
841 	if (x->aalg) {
842 		struct xfrm_algo_desc *aalg_desc;
843 
844 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
845 		p += (x->aalg->alg_key_len + 7) / 8;
846 
847 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
848 		BUG_ON(!aalg_desc);
849 
850 		err = -EINVAL;
851 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
852 		    crypto_aead_authsize(aead)) {
853 			pr_info("ESP: %s digestsize %u != %hu\n",
854 				x->aalg->alg_name,
855 				crypto_aead_authsize(aead),
856 				aalg_desc->uinfo.auth.icv_fullbits / 8);
857 			goto free_key;
858 		}
859 
860 		err = crypto_aead_setauthsize(
861 			aead, x->aalg->alg_trunc_len / 8);
862 		if (err)
863 			goto free_key;
864 	}
865 
866 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
867 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
868 
869 	err = crypto_aead_setkey(aead, key, keylen);
870 
871 free_key:
872 	kfree(key);
873 
874 error:
875 	return err;
876 }
877 
878 static int esp6_init_state(struct xfrm_state *x)
879 {
880 	struct crypto_aead *aead;
881 	u32 align;
882 	int err;
883 
884 	if (x->encap)
885 		return -EINVAL;
886 
887 	x->data = NULL;
888 
889 	if (x->aead)
890 		err = esp_init_aead(x);
891 	else
892 		err = esp_init_authenc(x);
893 
894 	if (err)
895 		goto error;
896 
897 	aead = x->data;
898 
899 	x->props.header_len = sizeof(struct ip_esp_hdr) +
900 			      crypto_aead_ivsize(aead);
901 	switch (x->props.mode) {
902 	case XFRM_MODE_BEET:
903 		if (x->sel.family != AF_INET6)
904 			x->props.header_len += IPV4_BEET_PHMAXLEN +
905 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
906 		break;
907 	default:
908 	case XFRM_MODE_TRANSPORT:
909 		break;
910 	case XFRM_MODE_TUNNEL:
911 		x->props.header_len += sizeof(struct ipv6hdr);
912 		break;
913 	}
914 
915 	align = ALIGN(crypto_aead_blocksize(aead), 4);
916 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
917 
918 error:
919 	return err;
920 }
921 
922 static int esp6_rcv_cb(struct sk_buff *skb, int err)
923 {
924 	return 0;
925 }
926 
927 static const struct xfrm_type esp6_type = {
928 	.description	= "ESP6",
929 	.owner		= THIS_MODULE,
930 	.proto		= IPPROTO_ESP,
931 	.flags		= XFRM_TYPE_REPLAY_PROT,
932 	.init_state	= esp6_init_state,
933 	.destructor	= esp6_destroy,
934 	.get_mtu	= esp6_get_mtu,
935 	.input		= esp6_input,
936 	.output		= esp6_output,
937 	.hdr_offset	= xfrm6_find_1stfragopt,
938 };
939 
940 static struct xfrm6_protocol esp6_protocol = {
941 	.handler	=	xfrm6_rcv,
942 	.cb_handler	=	esp6_rcv_cb,
943 	.err_handler	=	esp6_err,
944 	.priority	=	0,
945 };
946 
947 static int __init esp6_init(void)
948 {
949 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
950 		pr_info("%s: can't add xfrm type\n", __func__);
951 		return -EAGAIN;
952 	}
953 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
954 		pr_info("%s: can't add protocol\n", __func__);
955 		xfrm_unregister_type(&esp6_type, AF_INET6);
956 		return -EAGAIN;
957 	}
958 
959 	return 0;
960 }
961 
962 static void __exit esp6_fini(void)
963 {
964 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
965 		pr_info("%s: can't remove protocol\n", __func__);
966 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
967 		pr_info("%s: can't remove xfrm type\n", __func__);
968 }
969 
970 module_init(esp6_init);
971 module_exit(esp6_fini);
972 
973 MODULE_LICENSE("GPL");
974 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
975