xref: /openbmc/linux/net/ipv6/esp6.c (revision 83268fa6)
1 /*
2  * Copyright (C)2002 USAGI/WIDE Project
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * Authors
18  *
19  *	Mitsuru KANDA @USAGI       : IPv6 Support
20  *	Kazunori MIYAZAWA @USAGI   :
21  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
22  *
23  *	This file is derived from net/ipv4/esp.c
24  */
25 
26 #define pr_fmt(fmt) "IPv6: " fmt
27 
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <net/ip.h>
33 #include <net/xfrm.h>
34 #include <net/esp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
46 
47 #include <linux/highmem.h>
48 
49 struct esp_skb_cb {
50 	struct xfrm_skb_cb xfrm;
51 	void *tmp;
52 };
53 
54 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55 
56 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
57 
58 /*
59  * Allocate an AEAD request structure with extra space for SG and IV.
60  *
61  * For alignment considerations the upper 32 bits of the sequence number are
62  * placed at the front, if present. Followed by the IV, the request and finally
63  * the SG list.
64  *
65  * TODO: Use spare space in skb for this where possible.
66  */
67 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
68 {
69 	unsigned int len;
70 
71 	len = seqihlen;
72 
73 	len += crypto_aead_ivsize(aead);
74 
75 	if (len) {
76 		len += crypto_aead_alignmask(aead) &
77 		       ~(crypto_tfm_ctx_alignment() - 1);
78 		len = ALIGN(len, crypto_tfm_ctx_alignment());
79 	}
80 
81 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
82 	len = ALIGN(len, __alignof__(struct scatterlist));
83 
84 	len += sizeof(struct scatterlist) * nfrags;
85 
86 	return kmalloc(len, GFP_ATOMIC);
87 }
88 
89 static inline __be32 *esp_tmp_seqhi(void *tmp)
90 {
91 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
92 }
93 
94 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 {
96 	return crypto_aead_ivsize(aead) ?
97 	       PTR_ALIGN((u8 *)tmp + seqhilen,
98 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
99 }
100 
101 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
102 {
103 	struct aead_request *req;
104 
105 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 				crypto_tfm_ctx_alignment());
107 	aead_request_set_tfm(req, aead);
108 	return req;
109 }
110 
111 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
112 					     struct aead_request *req)
113 {
114 	return (void *)ALIGN((unsigned long)(req + 1) +
115 			     crypto_aead_reqsize(aead),
116 			     __alignof__(struct scatterlist));
117 }
118 
119 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
120 {
121 	struct crypto_aead *aead = x->data;
122 	int seqhilen = 0;
123 	u8 *iv;
124 	struct aead_request *req;
125 	struct scatterlist *sg;
126 
127 	if (x->props.flags & XFRM_STATE_ESN)
128 		seqhilen += sizeof(__be32);
129 
130 	iv = esp_tmp_iv(aead, tmp, seqhilen);
131 	req = esp_tmp_req(aead, iv);
132 
133 	/* Unref skb_frag_pages in the src scatterlist if necessary.
134 	 * Skip the first sg which comes from skb->data.
135 	 */
136 	if (req->src != req->dst)
137 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
138 			put_page(sg_page(sg));
139 }
140 
141 static void esp_output_done(struct crypto_async_request *base, int err)
142 {
143 	struct sk_buff *skb = base->data;
144 	struct xfrm_offload *xo = xfrm_offload(skb);
145 	void *tmp;
146 	struct xfrm_state *x;
147 
148 	if (xo && (xo->flags & XFRM_DEV_RESUME))
149 		x = skb->sp->xvec[skb->sp->len - 1];
150 	else
151 		x = skb_dst(skb)->xfrm;
152 
153 	tmp = ESP_SKB_CB(skb)->tmp;
154 	esp_ssg_unref(x, tmp);
155 	kfree(tmp);
156 
157 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
158 		if (err) {
159 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
160 			kfree_skb(skb);
161 			return;
162 		}
163 
164 		skb_push(skb, skb->data - skb_mac_header(skb));
165 		secpath_reset(skb);
166 		xfrm_dev_resume(skb);
167 	} else {
168 		xfrm_output_resume(skb, err);
169 	}
170 }
171 
172 /* Move ESP header back into place. */
173 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
174 {
175 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
176 	void *tmp = ESP_SKB_CB(skb)->tmp;
177 	__be32 *seqhi = esp_tmp_seqhi(tmp);
178 
179 	esph->seq_no = esph->spi;
180 	esph->spi = *seqhi;
181 }
182 
183 static void esp_output_restore_header(struct sk_buff *skb)
184 {
185 	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
186 }
187 
188 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
189 					     struct xfrm_state *x,
190 					     struct ip_esp_hdr *esph,
191 					     __be32 *seqhi)
192 {
193 	/* For ESN we move the header forward by 4 bytes to
194 	 * accomodate the high bits.  We will move it back after
195 	 * encryption.
196 	 */
197 	if ((x->props.flags & XFRM_STATE_ESN)) {
198 		struct xfrm_offload *xo = xfrm_offload(skb);
199 
200 		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
201 		*seqhi = esph->spi;
202 		if (xo)
203 			esph->seq_no = htonl(xo->seq.hi);
204 		else
205 			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
206 	}
207 
208 	esph->spi = x->id.spi;
209 
210 	return esph;
211 }
212 
213 static void esp_output_done_esn(struct crypto_async_request *base, int err)
214 {
215 	struct sk_buff *skb = base->data;
216 
217 	esp_output_restore_header(skb);
218 	esp_output_done(base, err);
219 }
220 
221 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
222 {
223 	/* Fill padding... */
224 	if (tfclen) {
225 		memset(tail, 0, tfclen);
226 		tail += tfclen;
227 	}
228 	do {
229 		int i;
230 		for (i = 0; i < plen - 2; i++)
231 			tail[i] = i + 1;
232 	} while (0);
233 	tail[plen - 2] = plen - 2;
234 	tail[plen - 1] = proto;
235 }
236 
237 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
238 {
239 	u8 *tail;
240 	u8 *vaddr;
241 	int nfrags;
242 	struct page *page;
243 	struct sk_buff *trailer;
244 	int tailen = esp->tailen;
245 
246 	if (!skb_cloned(skb)) {
247 		if (tailen <= skb_tailroom(skb)) {
248 			nfrags = 1;
249 			trailer = skb;
250 			tail = skb_tail_pointer(trailer);
251 
252 			goto skip_cow;
253 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
254 			   && !skb_has_frag_list(skb)) {
255 			int allocsize;
256 			struct sock *sk = skb->sk;
257 			struct page_frag *pfrag = &x->xfrag;
258 
259 			esp->inplace = false;
260 
261 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
262 
263 			spin_lock_bh(&x->lock);
264 
265 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
266 				spin_unlock_bh(&x->lock);
267 				goto cow;
268 			}
269 
270 			page = pfrag->page;
271 			get_page(page);
272 
273 			vaddr = kmap_atomic(page);
274 
275 			tail = vaddr + pfrag->offset;
276 
277 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
278 
279 			kunmap_atomic(vaddr);
280 
281 			nfrags = skb_shinfo(skb)->nr_frags;
282 
283 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
284 					     tailen);
285 			skb_shinfo(skb)->nr_frags = ++nfrags;
286 
287 			pfrag->offset = pfrag->offset + allocsize;
288 
289 			spin_unlock_bh(&x->lock);
290 
291 			nfrags++;
292 
293 			skb->len += tailen;
294 			skb->data_len += tailen;
295 			skb->truesize += tailen;
296 			if (sk)
297 				refcount_add(tailen, &sk->sk_wmem_alloc);
298 
299 			goto out;
300 		}
301 	}
302 
303 cow:
304 	nfrags = skb_cow_data(skb, tailen, &trailer);
305 	if (nfrags < 0)
306 		goto out;
307 	tail = skb_tail_pointer(trailer);
308 
309 skip_cow:
310 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
311 	pskb_put(skb, trailer, tailen);
312 
313 out:
314 	return nfrags;
315 }
316 EXPORT_SYMBOL_GPL(esp6_output_head);
317 
318 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
319 {
320 	u8 *iv;
321 	int alen;
322 	void *tmp;
323 	int ivlen;
324 	int assoclen;
325 	int seqhilen;
326 	__be32 *seqhi;
327 	struct page *page;
328 	struct ip_esp_hdr *esph;
329 	struct aead_request *req;
330 	struct crypto_aead *aead;
331 	struct scatterlist *sg, *dsg;
332 	int err = -ENOMEM;
333 
334 	assoclen = sizeof(struct ip_esp_hdr);
335 	seqhilen = 0;
336 
337 	if (x->props.flags & XFRM_STATE_ESN) {
338 		seqhilen += sizeof(__be32);
339 		assoclen += sizeof(__be32);
340 	}
341 
342 	aead = x->data;
343 	alen = crypto_aead_authsize(aead);
344 	ivlen = crypto_aead_ivsize(aead);
345 
346 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
347 	if (!tmp)
348 		goto error;
349 
350 	seqhi = esp_tmp_seqhi(tmp);
351 	iv = esp_tmp_iv(aead, tmp, seqhilen);
352 	req = esp_tmp_req(aead, iv);
353 	sg = esp_req_sg(aead, req);
354 
355 	if (esp->inplace)
356 		dsg = sg;
357 	else
358 		dsg = &sg[esp->nfrags];
359 
360 	esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
361 
362 	sg_init_table(sg, esp->nfrags);
363 	err = skb_to_sgvec(skb, sg,
364 		           (unsigned char *)esph - skb->data,
365 		           assoclen + ivlen + esp->clen + alen);
366 	if (unlikely(err < 0))
367 		goto error_free;
368 
369 	if (!esp->inplace) {
370 		int allocsize;
371 		struct page_frag *pfrag = &x->xfrag;
372 
373 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
374 
375 		spin_lock_bh(&x->lock);
376 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
377 			spin_unlock_bh(&x->lock);
378 			goto error_free;
379 		}
380 
381 		skb_shinfo(skb)->nr_frags = 1;
382 
383 		page = pfrag->page;
384 		get_page(page);
385 		/* replace page frags in skb with new page */
386 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
387 		pfrag->offset = pfrag->offset + allocsize;
388 		spin_unlock_bh(&x->lock);
389 
390 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
391 		err = skb_to_sgvec(skb, dsg,
392 			           (unsigned char *)esph - skb->data,
393 			           assoclen + ivlen + esp->clen + alen);
394 		if (unlikely(err < 0))
395 			goto error_free;
396 	}
397 
398 	if ((x->props.flags & XFRM_STATE_ESN))
399 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
400 	else
401 		aead_request_set_callback(req, 0, esp_output_done, skb);
402 
403 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
404 	aead_request_set_ad(req, assoclen);
405 
406 	memset(iv, 0, ivlen);
407 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
408 	       min(ivlen, 8));
409 
410 	ESP_SKB_CB(skb)->tmp = tmp;
411 	err = crypto_aead_encrypt(req);
412 
413 	switch (err) {
414 	case -EINPROGRESS:
415 		goto error;
416 
417 	case -ENOSPC:
418 		err = NET_XMIT_DROP;
419 		break;
420 
421 	case 0:
422 		if ((x->props.flags & XFRM_STATE_ESN))
423 			esp_output_restore_header(skb);
424 	}
425 
426 	if (sg != dsg)
427 		esp_ssg_unref(x, tmp);
428 
429 error_free:
430 	kfree(tmp);
431 error:
432 	return err;
433 }
434 EXPORT_SYMBOL_GPL(esp6_output_tail);
435 
436 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
437 {
438 	int alen;
439 	int blksize;
440 	struct ip_esp_hdr *esph;
441 	struct crypto_aead *aead;
442 	struct esp_info esp;
443 
444 	esp.inplace = true;
445 
446 	esp.proto = *skb_mac_header(skb);
447 	*skb_mac_header(skb) = IPPROTO_ESP;
448 
449 	/* skb is pure payload to encrypt */
450 
451 	aead = x->data;
452 	alen = crypto_aead_authsize(aead);
453 
454 	esp.tfclen = 0;
455 	if (x->tfcpad) {
456 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
457 		u32 padto;
458 
459 		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
460 		if (skb->len < padto)
461 			esp.tfclen = padto - skb->len;
462 	}
463 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
464 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
465 	esp.plen = esp.clen - skb->len - esp.tfclen;
466 	esp.tailen = esp.tfclen + esp.plen + alen;
467 
468 	esp.nfrags = esp6_output_head(x, skb, &esp);
469 	if (esp.nfrags < 0)
470 		return esp.nfrags;
471 
472 	esph = ip_esp_hdr(skb);
473 	esph->spi = x->id.spi;
474 
475 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
476 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
477 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
478 
479 	skb_push(skb, -skb_network_offset(skb));
480 
481 	return esp6_output_tail(x, skb, &esp);
482 }
483 
484 static inline int esp_remove_trailer(struct sk_buff *skb)
485 {
486 	struct xfrm_state *x = xfrm_input_state(skb);
487 	struct xfrm_offload *xo = xfrm_offload(skb);
488 	struct crypto_aead *aead = x->data;
489 	int alen, hlen, elen;
490 	int padlen, trimlen;
491 	__wsum csumdiff;
492 	u8 nexthdr[2];
493 	int ret;
494 
495 	alen = crypto_aead_authsize(aead);
496 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
497 	elen = skb->len - hlen;
498 
499 	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
500 		ret = xo->proto;
501 		goto out;
502 	}
503 
504 	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
505 	BUG_ON(ret);
506 
507 	ret = -EINVAL;
508 	padlen = nexthdr[0];
509 	if (padlen + 2 + alen >= elen) {
510 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
511 				    padlen + 2, elen - alen);
512 		goto out;
513 	}
514 
515 	trimlen = alen + padlen + 2;
516 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
517 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
518 		skb->csum = csum_block_sub(skb->csum, csumdiff,
519 					   skb->len - trimlen);
520 	}
521 	pskb_trim(skb, skb->len - trimlen);
522 
523 	ret = nexthdr[1];
524 
525 out:
526 	return ret;
527 }
528 
529 int esp6_input_done2(struct sk_buff *skb, int err)
530 {
531 	struct xfrm_state *x = xfrm_input_state(skb);
532 	struct xfrm_offload *xo = xfrm_offload(skb);
533 	struct crypto_aead *aead = x->data;
534 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
535 	int hdr_len = skb_network_header_len(skb);
536 
537 	if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
538 		kfree(ESP_SKB_CB(skb)->tmp);
539 
540 	if (unlikely(err))
541 		goto out;
542 
543 	err = esp_remove_trailer(skb);
544 	if (unlikely(err < 0))
545 		goto out;
546 
547 	skb_postpull_rcsum(skb, skb_network_header(skb),
548 			   skb_network_header_len(skb));
549 	skb_pull_rcsum(skb, hlen);
550 	if (x->props.mode == XFRM_MODE_TUNNEL)
551 		skb_reset_transport_header(skb);
552 	else
553 		skb_set_transport_header(skb, -hdr_len);
554 
555 	/* RFC4303: Drop dummy packets without any error */
556 	if (err == IPPROTO_NONE)
557 		err = -EINVAL;
558 
559 out:
560 	return err;
561 }
562 EXPORT_SYMBOL_GPL(esp6_input_done2);
563 
564 static void esp_input_done(struct crypto_async_request *base, int err)
565 {
566 	struct sk_buff *skb = base->data;
567 
568 	xfrm_input_resume(skb, esp6_input_done2(skb, err));
569 }
570 
571 static void esp_input_restore_header(struct sk_buff *skb)
572 {
573 	esp_restore_header(skb, 0);
574 	__skb_pull(skb, 4);
575 }
576 
577 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
578 {
579 	struct xfrm_state *x = xfrm_input_state(skb);
580 
581 	/* For ESN we move the header forward by 4 bytes to
582 	 * accomodate the high bits.  We will move it back after
583 	 * decryption.
584 	 */
585 	if ((x->props.flags & XFRM_STATE_ESN)) {
586 		struct ip_esp_hdr *esph = skb_push(skb, 4);
587 
588 		*seqhi = esph->spi;
589 		esph->spi = esph->seq_no;
590 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
591 	}
592 }
593 
594 static void esp_input_done_esn(struct crypto_async_request *base, int err)
595 {
596 	struct sk_buff *skb = base->data;
597 
598 	esp_input_restore_header(skb);
599 	esp_input_done(base, err);
600 }
601 
602 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
603 {
604 	struct crypto_aead *aead = x->data;
605 	struct aead_request *req;
606 	struct sk_buff *trailer;
607 	int ivlen = crypto_aead_ivsize(aead);
608 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
609 	int nfrags;
610 	int assoclen;
611 	int seqhilen;
612 	int ret = 0;
613 	void *tmp;
614 	__be32 *seqhi;
615 	u8 *iv;
616 	struct scatterlist *sg;
617 
618 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
619 		ret = -EINVAL;
620 		goto out;
621 	}
622 
623 	if (elen <= 0) {
624 		ret = -EINVAL;
625 		goto out;
626 	}
627 
628 	assoclen = sizeof(struct ip_esp_hdr);
629 	seqhilen = 0;
630 
631 	if (x->props.flags & XFRM_STATE_ESN) {
632 		seqhilen += sizeof(__be32);
633 		assoclen += seqhilen;
634 	}
635 
636 	if (!skb_cloned(skb)) {
637 		if (!skb_is_nonlinear(skb)) {
638 			nfrags = 1;
639 
640 			goto skip_cow;
641 		} else if (!skb_has_frag_list(skb)) {
642 			nfrags = skb_shinfo(skb)->nr_frags;
643 			nfrags++;
644 
645 			goto skip_cow;
646 		}
647 	}
648 
649 	nfrags = skb_cow_data(skb, 0, &trailer);
650 	if (nfrags < 0) {
651 		ret = -EINVAL;
652 		goto out;
653 	}
654 
655 skip_cow:
656 	ret = -ENOMEM;
657 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
658 	if (!tmp)
659 		goto out;
660 
661 	ESP_SKB_CB(skb)->tmp = tmp;
662 	seqhi = esp_tmp_seqhi(tmp);
663 	iv = esp_tmp_iv(aead, tmp, seqhilen);
664 	req = esp_tmp_req(aead, iv);
665 	sg = esp_req_sg(aead, req);
666 
667 	esp_input_set_header(skb, seqhi);
668 
669 	sg_init_table(sg, nfrags);
670 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
671 	if (unlikely(ret < 0)) {
672 		kfree(tmp);
673 		goto out;
674 	}
675 
676 	skb->ip_summed = CHECKSUM_NONE;
677 
678 	if ((x->props.flags & XFRM_STATE_ESN))
679 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
680 	else
681 		aead_request_set_callback(req, 0, esp_input_done, skb);
682 
683 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
684 	aead_request_set_ad(req, assoclen);
685 
686 	ret = crypto_aead_decrypt(req);
687 	if (ret == -EINPROGRESS)
688 		goto out;
689 
690 	if ((x->props.flags & XFRM_STATE_ESN))
691 		esp_input_restore_header(skb);
692 
693 	ret = esp6_input_done2(skb, ret);
694 
695 out:
696 	return ret;
697 }
698 
699 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
700 {
701 	struct crypto_aead *aead = x->data;
702 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
703 	unsigned int net_adj;
704 
705 	if (x->props.mode != XFRM_MODE_TUNNEL)
706 		net_adj = sizeof(struct ipv6hdr);
707 	else
708 		net_adj = 0;
709 
710 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
711 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
712 }
713 
714 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
715 		    u8 type, u8 code, int offset, __be32 info)
716 {
717 	struct net *net = dev_net(skb->dev);
718 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
719 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
720 	struct xfrm_state *x;
721 
722 	if (type != ICMPV6_PKT_TOOBIG &&
723 	    type != NDISC_REDIRECT)
724 		return 0;
725 
726 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
727 			      esph->spi, IPPROTO_ESP, AF_INET6);
728 	if (!x)
729 		return 0;
730 
731 	if (type == NDISC_REDIRECT)
732 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
733 			     sock_net_uid(net, NULL));
734 	else
735 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
736 	xfrm_state_put(x);
737 
738 	return 0;
739 }
740 
741 static void esp6_destroy(struct xfrm_state *x)
742 {
743 	struct crypto_aead *aead = x->data;
744 
745 	if (!aead)
746 		return;
747 
748 	crypto_free_aead(aead);
749 }
750 
751 static int esp_init_aead(struct xfrm_state *x)
752 {
753 	char aead_name[CRYPTO_MAX_ALG_NAME];
754 	struct crypto_aead *aead;
755 	int err;
756 
757 	err = -ENAMETOOLONG;
758 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
759 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
760 		goto error;
761 
762 	aead = crypto_alloc_aead(aead_name, 0, 0);
763 	err = PTR_ERR(aead);
764 	if (IS_ERR(aead))
765 		goto error;
766 
767 	x->data = aead;
768 
769 	err = crypto_aead_setkey(aead, x->aead->alg_key,
770 				 (x->aead->alg_key_len + 7) / 8);
771 	if (err)
772 		goto error;
773 
774 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
775 	if (err)
776 		goto error;
777 
778 error:
779 	return err;
780 }
781 
782 static int esp_init_authenc(struct xfrm_state *x)
783 {
784 	struct crypto_aead *aead;
785 	struct crypto_authenc_key_param *param;
786 	struct rtattr *rta;
787 	char *key;
788 	char *p;
789 	char authenc_name[CRYPTO_MAX_ALG_NAME];
790 	unsigned int keylen;
791 	int err;
792 
793 	err = -EINVAL;
794 	if (!x->ealg)
795 		goto error;
796 
797 	err = -ENAMETOOLONG;
798 
799 	if ((x->props.flags & XFRM_STATE_ESN)) {
800 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
801 			     "%s%sauthencesn(%s,%s)%s",
802 			     x->geniv ?: "", x->geniv ? "(" : "",
803 			     x->aalg ? x->aalg->alg_name : "digest_null",
804 			     x->ealg->alg_name,
805 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
806 			goto error;
807 	} else {
808 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
809 			     "%s%sauthenc(%s,%s)%s",
810 			     x->geniv ?: "", x->geniv ? "(" : "",
811 			     x->aalg ? x->aalg->alg_name : "digest_null",
812 			     x->ealg->alg_name,
813 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
814 			goto error;
815 	}
816 
817 	aead = crypto_alloc_aead(authenc_name, 0, 0);
818 	err = PTR_ERR(aead);
819 	if (IS_ERR(aead))
820 		goto error;
821 
822 	x->data = aead;
823 
824 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
825 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
826 	err = -ENOMEM;
827 	key = kmalloc(keylen, GFP_KERNEL);
828 	if (!key)
829 		goto error;
830 
831 	p = key;
832 	rta = (void *)p;
833 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
834 	rta->rta_len = RTA_LENGTH(sizeof(*param));
835 	param = RTA_DATA(rta);
836 	p += RTA_SPACE(sizeof(*param));
837 
838 	if (x->aalg) {
839 		struct xfrm_algo_desc *aalg_desc;
840 
841 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
842 		p += (x->aalg->alg_key_len + 7) / 8;
843 
844 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
845 		BUG_ON(!aalg_desc);
846 
847 		err = -EINVAL;
848 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
849 		    crypto_aead_authsize(aead)) {
850 			pr_info("ESP: %s digestsize %u != %hu\n",
851 				x->aalg->alg_name,
852 				crypto_aead_authsize(aead),
853 				aalg_desc->uinfo.auth.icv_fullbits / 8);
854 			goto free_key;
855 		}
856 
857 		err = crypto_aead_setauthsize(
858 			aead, x->aalg->alg_trunc_len / 8);
859 		if (err)
860 			goto free_key;
861 	}
862 
863 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
864 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
865 
866 	err = crypto_aead_setkey(aead, key, keylen);
867 
868 free_key:
869 	kfree(key);
870 
871 error:
872 	return err;
873 }
874 
875 static int esp6_init_state(struct xfrm_state *x)
876 {
877 	struct crypto_aead *aead;
878 	u32 align;
879 	int err;
880 
881 	if (x->encap)
882 		return -EINVAL;
883 
884 	x->data = NULL;
885 
886 	if (x->aead)
887 		err = esp_init_aead(x);
888 	else
889 		err = esp_init_authenc(x);
890 
891 	if (err)
892 		goto error;
893 
894 	aead = x->data;
895 
896 	x->props.header_len = sizeof(struct ip_esp_hdr) +
897 			      crypto_aead_ivsize(aead);
898 	switch (x->props.mode) {
899 	case XFRM_MODE_BEET:
900 		if (x->sel.family != AF_INET6)
901 			x->props.header_len += IPV4_BEET_PHMAXLEN +
902 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
903 		break;
904 	default:
905 	case XFRM_MODE_TRANSPORT:
906 		break;
907 	case XFRM_MODE_TUNNEL:
908 		x->props.header_len += sizeof(struct ipv6hdr);
909 		break;
910 	}
911 
912 	align = ALIGN(crypto_aead_blocksize(aead), 4);
913 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
914 
915 error:
916 	return err;
917 }
918 
919 static int esp6_rcv_cb(struct sk_buff *skb, int err)
920 {
921 	return 0;
922 }
923 
924 static const struct xfrm_type esp6_type = {
925 	.description	= "ESP6",
926 	.owner		= THIS_MODULE,
927 	.proto		= IPPROTO_ESP,
928 	.flags		= XFRM_TYPE_REPLAY_PROT,
929 	.init_state	= esp6_init_state,
930 	.destructor	= esp6_destroy,
931 	.get_mtu	= esp6_get_mtu,
932 	.input		= esp6_input,
933 	.output		= esp6_output,
934 	.hdr_offset	= xfrm6_find_1stfragopt,
935 };
936 
937 static struct xfrm6_protocol esp6_protocol = {
938 	.handler	=	xfrm6_rcv,
939 	.cb_handler	=	esp6_rcv_cb,
940 	.err_handler	=	esp6_err,
941 	.priority	=	0,
942 };
943 
944 static int __init esp6_init(void)
945 {
946 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
947 		pr_info("%s: can't add xfrm type\n", __func__);
948 		return -EAGAIN;
949 	}
950 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
951 		pr_info("%s: can't add protocol\n", __func__);
952 		xfrm_unregister_type(&esp6_type, AF_INET6);
953 		return -EAGAIN;
954 	}
955 
956 	return 0;
957 }
958 
959 static void __exit esp6_fini(void)
960 {
961 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
962 		pr_info("%s: can't remove protocol\n", __func__);
963 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
964 		pr_info("%s: can't remove xfrm type\n", __func__);
965 }
966 
967 module_init(esp6_init);
968 module_exit(esp6_fini);
969 
970 MODULE_LICENSE("GPL");
971 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
972